Queue one request for each staged request removed, keeping the staged request count optimal at all times.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
diff --git a/cgminer.c b/cgminer.c
index a662c1f..1ab0649 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2610,7 +2610,8 @@ static void discard_stale(void)
if (stale) {
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
- queue_request(NULL, false);
+ while (stale-- > 0)
+ queue_request(NULL, false);
}
}
@@ -3780,7 +3781,6 @@ bool queue_request(struct thr_info *thr, bool needed)
static struct work *hash_pop(const struct timespec *abstime)
{
struct work *work = NULL;
- bool queue = false;
int rc = 0;
mutex_lock(stgd_lock);
@@ -3791,13 +3791,10 @@ static struct work *hash_pop(const struct timespec *abstime)
work = staged_work;
HASH_DEL(staged_work, work);
work->pool->staged--;
- if (HASH_COUNT(staged_work) < (unsigned int)mining_threads)
- queue = true;
}
mutex_unlock(stgd_lock);
- if (queue)
- queue_request(NULL, false);
+ queue_request(NULL, false);
return work;
}