As work is sorted by age, we can discard the oldest work at regular intervals to keep only 1 of the newest work items per mining thread.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
diff --git a/cgminer.c b/cgminer.c
index 43fc206..17b2db6 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3625,6 +3625,9 @@ static struct work *make_clone(struct work *work)
memcpy(work_clone, work, sizeof(struct work));
work_clone->clone = true;
work_clone->longpoll = false;
+ /* Make cloned work appear slightly older to bias towards keeping the
+ * master work item which can be further rolled */
+ work_clone->tv_staged.tv_sec -= 1;
return work_clone;
}
@@ -4312,6 +4315,23 @@ static void *watchpool_thread(void __maybe_unused *userdata)
return NULL;
}
+/* Work is sorted according to age, so discard the oldest work items, leaving
+ * only 1 staged work item per mining thread */
+static void age_work(void)
+{
+ int discarded = 0;
+
+ while (requests_staged() > mining_threads) {
+ struct work *work = hash_pop(NULL);
+
+ if (unlikely(!work))
+ break;
+ discard_work(work);
+ discarded++;
+ }
+ if (discarded)
+ applog(LOG_DEBUG, "Aged %d work items", discarded);
+}
/* Makes sure the hashmeter keeps going even if mining threads stall, updates
* the screen at regular intervals, and restarts threads if they appear to have
@@ -4334,6 +4354,8 @@ static void *watchdog_thread(void __maybe_unused *userdata)
if (requests_queued() < opt_queue)
queue_request(NULL, false);
+ age_work();
+
hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES