Queue and pop precisely the same amount of work in flush_request to ensure all work is cleared and there isn't an uneven add/remove possible.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
diff --git a/main.c b/main.c
index ddd3d7f..1e22e74 100644
--- a/main.c
+++ b/main.c
@@ -719,27 +719,24 @@ static bool discard_request(void)
static void flush_requests(void)
{
- unsigned int i;
+ int i, extra;
- /* Queue a whole batch of new requests */
- for (i = 0; i < opt_queue; i++) {
+ extra = requests_queued();
+ for (i = 0; i < extra; i++) {
+ /* Queue a whole batch of new requests */
if (unlikely(!queue_request())) {
applog(LOG_ERR, "Failed to queue requests in flush_requests");
kill_work();
- return;
+ break;
}
- }
-
- /* Pop off the old requests. Cancelling the requests would be better
- * but is tricky */
- while (requests_queued() > opt_queue) {
+ /* Pop off the old requests. Cancelling the requests would be better
+ * but is tricky */
if (unlikely(!discard_request())) {
applog(LOG_ERR, "Failed to discard requests in flush_requests");
kill_work();
- return;
+ break;
}
}
-
}
static bool get_work(struct work *work, bool queued)