Put locking around the queued count to ensure we're never left without work.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
diff --git a/cpu-miner.c b/cpu-miner.c
index 80704dc..3000d97 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -146,11 +146,12 @@ int longpoll_thr_id;
struct work_restart *work_restart = NULL;
pthread_mutex_t time_lock;
static pthread_mutex_t hash_lock;
+static pthread_mutex_t qd_lock;
static double total_mhashes_done;
static struct timeval total_tv_start, total_tv_end;
static int accepted, rejected;
int hw_errors;
-
+static int total_queued;
struct option_help {
const char *name;
@@ -690,6 +691,32 @@ out_unlock:
pthread_mutex_unlock(&hash_lock);
}
+/* This is overkill, but at least we'll know accurately how much work is
+ * queued to prevent ever being left without work */
+static void inc_queued(void)
+{
+ pthread_mutex_lock(&qd_lock);
+ total_queued++;
+ pthread_mutex_unlock(&qd_lock);
+}
+
+static void dec_queued(void)
+{
+ pthread_mutex_lock(&qd_lock);
+ total_queued--;
+ pthread_mutex_unlock(&qd_lock);
+}
+
+static int requests_queued(void)
+{
+ int ret;
+
+ pthread_mutex_lock(&qd_lock);
+ ret = total_queued;
+ pthread_mutex_unlock(&qd_lock);
+ return ret;
+}
+
/* All work is queued flagged as being for thread 0 and then the mining thread
* flags it as its own */
static bool queue_request(void)
@@ -710,6 +737,7 @@ static bool queue_request(void)
workio_cmd_free(wc);
return false;
}
+ inc_queued();
return true;
}
@@ -718,10 +746,15 @@ static bool discard_request(void)
struct thr_info *thr = &thr_info[0];
struct work *work_heap;
+ /* Just in case we fell in a hole and missed a queue filling */
+ if (unlikely(!requests_queued()))
+ return true;
+
work_heap = tq_pop(thr->q, NULL);
if (unlikely(!work_heap))
return false;
free(work_heap);
+ dec_queued();
return true;
}
@@ -740,13 +773,14 @@ static void flush_requests(void)
/* Pop off the old requests. Cancelling the requests would be better
* but is tricky */
- for (i = 0; i < opt_queue; i++) {
+ while (requests_queued() > opt_queue) {
if (unlikely(!discard_request())) {
applog(LOG_ERR, "Failed to discard requests in flush_requests");
kill_work();
return;
}
}
+
}
static bool get_work(struct work *work, bool queued)
@@ -755,13 +789,16 @@ static bool get_work(struct work *work, bool queued)
struct work *work_heap;
bool ret = false;
- if (unlikely(!queued && !queue_request()))
+ if (!queued || !requests_queued()) {
+ if (unlikely(!queue_request()))
goto out;
+ }
/* wait for 1st response, or get cached response */
work_heap = tq_pop(thr->q, NULL);
if (unlikely(!work_heap))
goto out;
+ dec_queued();
memcpy(work, work_heap, sizeof(*work));
memcpy(current_block, work->data, 36);