With better bounds on the amount of work cloned, there is no need to age work and ageing it was picking off master work items that could be further rolled.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
diff --git a/cgminer.c b/cgminer.c
index 4f88f9f..92f9ad8 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -4331,24 +4331,6 @@ static void *watchpool_thread(void __maybe_unused *userdata)
return NULL;
}
-/* Work is sorted according to age, so discard the oldest work items, leaving
- * only 1 staged work item per mining thread */
-static void age_work(void)
-{
- int discarded = 0;
-
- while (requests_staged() > mining_threads * 4 / 3 + opt_queue) {
- struct work *work = hash_pop(NULL);
-
- if (unlikely(!work))
- break;
- discard_work(work);
- discarded++;
- }
- if (discarded)
- applog(LOG_DEBUG, "Aged %d work items", discarded);
-}
-
/* Makes sure the hashmeter keeps going even if mining threads stall, updates
* the screen at regular intervals, and restarts threads if they appear to have
* died. */
@@ -4370,8 +4352,6 @@ static void *watchdog_thread(void __maybe_unused *userdata)
if (requests_queued() < opt_queue)
queue_request(NULL, false);
- age_work();
-
hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES