Track queued and staged per pool once again for future use.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
diff --git a/cgminer.c b/cgminer.c
index 29362c9..31ab609 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2252,17 +2252,19 @@ static void push_curl_entry(struct curl_ent *ce, struct pool *pool)
/* This is overkill, but at least we'll know accurately how much work is
* queued to prevent ever being left without work */
-static void inc_queued(void)
+static void inc_queued(struct pool *pool)
{
mutex_lock(&qd_lock);
total_queued++;
+ pool->queued++;
mutex_unlock(&qd_lock);
}
-static void dec_queued(void)
+static void dec_queued(struct pool *pool)
{
mutex_lock(&qd_lock);
total_queued--;
+ pool->queued--;
mutex_unlock(&qd_lock);
}
@@ -2422,7 +2424,7 @@ retry:
lagging = true;
pool = ret_work->pool = select_pool(lagging);
- inc_queued();
+ inc_queued(pool);
if (!ce)
ce = pop_curl_entry(pool);
@@ -2430,7 +2432,7 @@ retry:
/* Check that we haven't staged work via other threads while
* waiting for a curl entry */
if (total_staged() >= maxq) {
- dec_queued();
+ dec_queued(pool);
free_work(ret_work);
goto out;
}
@@ -2440,7 +2442,7 @@ retry:
/* pause, then restart work-request loop */
applog(LOG_DEBUG, "json_rpc_call failed on get work, retrying");
lagging = true;
- dec_queued();
+ dec_queued(pool);
free_work(ret_work);
goto retry;
}
@@ -2725,6 +2727,7 @@ static void discard_stale(void)
HASH_ITER(hh, staged_work, work, tmp) {
if (stale_work(work, false)) {
HASH_DEL(staged_work, work);
+ work->pool->staged--;
discard_work(work);
stale++;
}
@@ -2931,9 +2934,11 @@ static bool hash_push(struct work *work)
pthread_cond_signal(&getq->cond);
mutex_unlock(stgd_lock);
+ work->pool->staged++;
+
if (work->queued) {
work->queued = false;
- dec_queued();
+ dec_queued(work->pool);
}
return rc;
@@ -3926,17 +3931,7 @@ static void pool_resus(struct pool *pool)
static bool queue_request(struct thr_info *thr, bool needed)
{
struct workio_cmd *wc;
- bool doq = true;
- mutex_lock(&control_lock);
- if (queued_getworks > (mining_threads + opt_queue) * 2)
- doq = false;
- else
- queued_getworks++;
- mutex_unlock(&control_lock);
- if (!doq)
- return true;
-
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) {
@@ -3981,6 +3976,7 @@ static struct work *hash_pop(const struct timespec *abstime)
} else
work = staged_work;
HASH_DEL(staged_work, work);
+ work->pool->staged--;
if (work_rollable(work))
staged_rollable--;
}
diff --git a/miner.h b/miner.h
index 91fe7ce..2141724 100644
--- a/miner.h
+++ b/miner.h
@@ -732,6 +732,9 @@ struct pool {
int solved;
int diff1;
+ int queued;
+ int staged;
+
bool submit_fail;
bool idle;
bool lagging;