Logic error in the queueing of work ended up generating more stale blocks. There is a small chance that a longpoll is signalled right at the start which would lead to a deadlock so check for first work before restart.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
diff --git a/cpu-miner.c b/cpu-miner.c
index 72e0a6d..d3527e1 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -728,7 +728,6 @@ static bool get_work(struct work *work)
bool ret = false;
unsigned int i;
-get_new:
if (unlikely(!queue_request()))
goto out;
@@ -737,31 +736,29 @@ get_new:
if (unlikely(!work_heap))
goto out;
+ if (unlikely(first_work)) {
+ first_work = false;
+ /* send for extra work requests for the next time get_work
+ * is called. */
+ for (i = 1; i < opt_queue; i++) {
+ if (unlikely(!queue_request()))
+ goto out_free;
+ }
+ }
+
if (unlikely(work_restart[opt_n_threads + gpu_threads].restart)) {
work_restart[opt_n_threads + gpu_threads].restart = 0;
- free(work_heap);
if (opt_debug)
applog(LOG_DEBUG, "New block detected, discarding old work");
for (i = 1; i < opt_queue; i++) {
+ free(work_heap);
+ if (unlikely(!queue_request()))
+ goto out;
/* Pop off all the work. Cancelling the requests would
* be better but tricky. */
work_heap = tq_pop(thr->q, NULL);
if (unlikely(!work_heap))
goto out;
- free(work_heap);
- if (unlikely(!queue_request()))
- goto out;
- }
- goto get_new;
- }
-
- if (unlikely(first_work)) {
- first_work = false;
- /* send for extra work requests for the next time get_work
- * is called. */
- for (i = 1; i < opt_queue; i++) {
- if (unlikely(!queue_request()))
- goto out_free;
}
}