The longpoll blanking of the current_block data may not be happening before the work is converted and appears to be a detected block change. Blank the current block before submitting the work and stop passing the longpoll bool around.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
diff --git a/main.c b/main.c
index da60991..4254565 100644
--- a/main.c
+++ b/main.c
@@ -1783,15 +1783,13 @@ static void discard_staged(void)
total_discarded++;
}
-static void flush_requests(bool longpoll)
+static void flush_requests(void)
{
struct pool *pool = current_pool();
int i, stale;
/* We should have one fresh work item staged from the block change. */
stale = requests_staged() - 1;
- if (longpoll)
- memcpy(current_block, blank, 36);
/* Temporarily increase the staged count so that get_work thinks there
* is work available instead of making threads reuse existing work */
@@ -2366,12 +2364,12 @@ out:
}
#endif /* HAVE_OPENCL */
-static void restart_threads(bool longpoll)
+static void restart_threads(void)
{
int i;
/* Discard old queued requests and get new ones */
- flush_requests(longpoll);
+ flush_requests();
for (i = 0; i < mining_threads; i++)
work_restart[i].restart = 1;
@@ -2463,7 +2461,8 @@ next_path:
!strncmp(longpoll_block, current_block, 36))) {
new_blocks++;
applog(LOG_WARNING, "LONGPOLL detected new block on network, waiting on fresh work");
- restart_threads(true);
+ memcpy(current_block, blank, 36);
+ restart_threads();
} else
applog(LOG_WARNING, "LONGPOLL received after new block already detected");
@@ -2636,7 +2635,7 @@ static void *watchdog_thread(void *userdata)
}
if (unlikely(work_restart[watchdog_thr_id].restart)) {
- restart_threads(false);
+ restart_threads();
work_restart[watchdog_thr_id].restart = 0;
}