summaryrefslogtreecommitdiffstats
path: root/src/osd/OSD.h
diff options
context:
space:
mode:
authorSage Weil <sage@newdream.net>2011-09-20 03:23:10 +0200
committerSage Weil <sage@newdream.net>2011-09-20 03:23:10 +0200
commit46cfda7b8245d4f44d1194943cccb379840f3aa8 (patch)
treeddb662b3710efeff2377f23aaa5e67ab68c7f31d /src/osd/OSD.h
parentosd: set reply version for dup requests (diff)
downloadceph-46cfda7b8245d4f44d1194943cccb379840f3aa8.tar.xz
ceph-46cfda7b8245d4f44d1194943cccb379840f3aa8.zip
osd: preserve ordering when throttling races with missing/degraded requeue
When we delay an op because the op_queue is full, we can violate the op order: - op1 comes in, waits because object is missing - op2 comes in, throttles on op queue - op1 is requeued (no longer missing) - queue drains, op2 happens - op1 happens To avoid this, if we delay, requeue ourselves... after whatever else is on the queue. Fixes: #1490 Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to '')
-rw-r--r--src/osd/OSD.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/src/osd/OSD.h b/src/osd/OSD.h
index 9f54484da21..7eb7afec59e 100644
--- a/src/osd/OSD.h
+++ b/src/osd/OSD.h
@@ -303,6 +303,11 @@ private:
finished.splice(finished.end(), ls);
finished_lock.Unlock();
}
+ void take_waiter(Message *o) {
+ finished_lock.Lock();
+ finished.push_back(o);
+ finished_lock.Unlock();
+ }
void push_waiters(list<class Message*>& ls) {
assert(osd_lock.is_locked()); // currently, at least. be careful if we change this (see #743)
finished_lock.Lock();
@@ -351,7 +356,7 @@ private:
Cond op_queue_cond;
void wait_for_no_ops();
- void throttle_op_queue();
+ bool throttle_op_queue(Message *op);
void enqueue_op(PG *pg, Message *op);
void dequeue_op(PG *pg);
static void static_dequeueop(OSD *o, PG *pg) {