Clean up the pool switching to not be dependent on whether the work can roll or not by setting a lagging flag and then the idle flag.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
diff --git a/main.c b/main.c
index fb37d81..e998c92 100644
--- a/main.c
+++ b/main.c
@@ -2852,9 +2852,11 @@ static bool pool_active(struct pool *pool, bool pinging)
static void pool_died(struct pool *pool)
{
- applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url);
- gettimeofday(&pool->tv_idle, NULL);
- switch_pools(NULL);
+ if (!pool_tset(pool, &pool->idle)) {
+ applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url);
+ gettimeofday(&pool->tv_idle, NULL);
+ switch_pools(NULL);
+ }
}
static void pool_resus(struct pool *pool)
@@ -3053,30 +3055,18 @@ retry:
}
requested = false;
- if (!requests_staged() && can_roll(work)) {
- /* Only print this message once each time we shift to localgen */
- if (!pool_tset(pool, &pool->idle)) {
- applog(LOG_WARNING, "Pool %d not providing work fast enough, generating work locally",
+ if (!requests_staged()) {
+ if (!pool_tset(pool, &pool->lagging)) {
+ applog(LOG_WARNING, "Pool %d not providing work fast enough",
pool->pool_no);
pool->localgen_occasions++;
total_lo++;
- gettimeofday(&pool->tv_idle, NULL);
- } else {
- struct timeval tv_now, diff;
-
- gettimeofday(&tv_now, NULL);
- timeval_subtract(&diff, &tv_now, &pool->tv_idle);
- /* Attempt to switch pools if this one has been unresponsive for >half
- * a block's duration */
- if (diff.tv_sec > 300) {
- pool_died(pool);
- goto retry;
- }
}
-
- roll_work(work);
- ret = true;
- goto out;
+ if (can_roll(work)) {
+ roll_work(work);
+ ret = true;
+ goto out;
+ }
}
gettimeofday(&now, NULL);
@@ -3088,10 +3078,7 @@ retry:
/* wait for 1st response, or get cached response */
work_heap = tq_pop(getq, &abstime);
if (unlikely(!work_heap)) {
- /* Attempt to switch pools if this one has mandatory work that
- * has timed out or does not support rolltime */
- pool->localgen_occasions++;
- total_lo++;
+ /* Attempt to switch pools if this one times out */
pool_died(pool);
goto retry;
}
@@ -3104,8 +3091,11 @@ retry:
pool = work_heap->pool;
/* If we make it here we have succeeded in getting fresh work */
- if (pool_tclear(pool, &pool->idle))
- pool_resus(pool);
+ if (!work_heap->mined) {
+ pool_tclear(pool, &pool->lagging);
+ if (pool_tclear(pool, &pool->idle))
+ pool_resus(pool);
+ }
memcpy(work, work_heap, sizeof(*work));
diff --git a/miner.h b/miner.h
index 49bce03..47b8437 100644
--- a/miner.h
+++ b/miner.h
@@ -310,6 +310,7 @@ struct pool {
int accepted, rejected;
bool submit_fail;
bool idle;
+ bool lagging;
bool probed;
bool enabled;