Test we have enough work queued for pools with and without rolltime capability.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
diff --git a/cgminer.c b/cgminer.c
index 5b68658..aff264b 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3512,20 +3512,27 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL);
}
-static long requested_tv_sec;
+static time_t requested_tv_sec;
static bool queue_request(struct thr_info *thr, bool needed)
{
int rq = requests_queued();
struct workio_cmd *wc;
struct timeval now;
+ time_t scan_post;
+
+ /* Grab more work every 2/3 of the scan time to avoid all work expiring
+ * at the same time */
+ scan_post = opt_scantime * 2 / 3;
+ if (scan_post < 5)
+ scan_post = 5;
gettimeofday(&now, NULL);
- /* Space out retrieval of extra work according to the number of mining
- * threads */
- if (rq >= mining_threads + staged_extras &&
- (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1))
+ /* Test to make sure we have enough work for pools without rolltime
+ * and enough original work for pools with rolltime */
+ if (rq >= mining_threads && rq > staged_extras + opt_queue &&
+ now.tv_sec - requested_tv_sec < scan_post)
return true;
/* fill out work request message */
@@ -4329,7 +4336,7 @@ static void age_work(void)
{
int discarded = 0;
- while (requests_staged() > mining_threads * 4 / 3) {
+ while (requests_staged() > mining_threads * 4 / 3 + opt_queue) {
struct work *work = hash_pop(NULL);
if (unlikely(!work))