Discard accumulated work when longpoll indicates a new block.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
diff --git a/cpu-miner.c b/cpu-miner.c
index 131495a..9edd6f5 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -612,12 +612,13 @@ static void hashmeter(int thr_id, struct timeval *diff,
total_mhashes_done / total_secs, accepted, rejected);
}
+static struct work *work_heap = NULL;
+
/* Since we always have one extra work item queued, set the thread id to 0
* for all the work and just give the work to the first thread that requests
* work */
static bool get_work(struct work *work)
{
- static struct work *work_heap = NULL;
struct thr_info *thr = &thr_info[0];
struct workio_cmd *wc;
bool ret = false;
@@ -636,7 +637,7 @@ static bool get_work(struct work *work)
goto out;
}
- /* work_heap is a static var so it is protected by get_lock */
+ /* work_heap is protected by get_lock */
pthread_mutex_lock(&get_lock);
if (likely(work_heap)) {
memcpy(work, work_heap, sizeof(*work));
@@ -1004,6 +1005,14 @@ static void restart_threads(void)
for (i = 0; i < opt_n_threads + nDevs; i++)
work_restart[i].restart = 1;
+ /* If longpoll has detected a new block, we should discard any queued
+ * blocks in work_heap */
+ pthread_mutex_lock(&get_lock);
+ if (likely(work_heap)) {
+ free(work_heap);
+ work_heap = NULL;
+ }
+ pthread_mutex_unlock(&get_lock);
}
static void *longpoll_thread(void *userdata)