Reimplement the per-thread getwork count with proper accounting at get_work().
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
diff --git a/main.c b/main.c
index 57fee5c..25d12db 100644
--- a/main.c
+++ b/main.c
@@ -1096,6 +1096,8 @@ static bool get_upstream_work(struct work *work, bool lagging)
work->pool = pool;
total_getworks++;
pool->getwork_requested++;
+ if (work->thr)
+ work->thr->cgpu->getworks++;
json_decref(val);
out:
@@ -1220,6 +1222,11 @@ static void *get_work_thread(void *userdata)
goto out;
}
+ if (wc->thr)
+ ret_work->thr = wc->thr;
+ else
+ ret_work->thr = NULL;
+
/* obtain new work from bitcoin via JSON-RPC */
while (!get_upstream_work(ret_work, wc->lagging)) {
if (unlikely((opt_retries >= 0) && (++failures > opt_retries))) {
@@ -2268,7 +2275,7 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL);
}
-static bool queue_request(void)
+static bool queue_request(struct thr_info *thr)
{
int maxq = opt_queue + mining_threads;
struct workio_cmd *wc;
@@ -2290,8 +2297,10 @@ static bool queue_request(void)
}
wc->cmd = WC_GET_WORK;
- /* The get work does not belong to any thread */
- wc->thr = NULL;
+ if (thr)
+ wc->thr = thr;
+ else
+ wc->thr = NULL;
/* If we've queued more than 2/3 of the maximum and still have no
* staged work, consider the system lagging and allow work to be
@@ -2363,7 +2372,7 @@ static void flush_requests(void)
for (i = 0; i < stale; i++) {
/* Queue a whole batch of new requests */
- if (unlikely(!queue_request())) {
+ if (unlikely(!queue_request(NULL))) {
applog(LOG_ERR, "Failed to queue requests in flush_requests");
kill_work();
break;
@@ -2445,7 +2454,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
thread_reportout(thr);
retry:
pool = current_pool();
- if (unlikely(!requested && !queue_request())) {
+ if (unlikely(!requested && !queue_request(thr))) {
applog(LOG_WARNING, "Failed to queue_request in get_work");
goto out;
}
@@ -2673,7 +2682,6 @@ static void *miner_thread(void *userdata)
"mining thread %d", thr_id);
goto out;
}
- mythr->cgpu->getworks++;
needs_work = requested = false;
total_hashes = 0;
max_nonce = work.blk.nonce + hashes_done;
@@ -2794,7 +2802,7 @@ static void *miner_thread(void *userdata)
timeval_subtract(&diff, &tv_end, &tv_workstart);
if (!requested && (diff.tv_sec >= request_interval)) {
thread_reportout(mythr);
- if (unlikely(!queue_request())) {
+ if (unlikely(!queue_request(mythr))) {
applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
goto out;
}
@@ -2997,7 +3005,6 @@ static void *gpuminer_thread(void *userdata)
"gpu mining thread %d", thr_id);
goto out;
}
- mythr->cgpu->getworks++;
requested = false;
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
work->blk.nonce = 0;
@@ -3048,7 +3055,6 @@ static void *gpuminer_thread(void *userdata)
"gpu mining thread %d", thr_id);
goto out;
}
- mythr->cgpu->getworks++;
requested = false;
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
@@ -3104,7 +3110,7 @@ static void *gpuminer_thread(void *userdata)
#endif
if (diff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
thread_reportout(mythr);
- if (unlikely(!queue_request())) {
+ if (unlikely(!queue_request(mythr))) {
applog(LOG_ERR, "Failed to queue_request in gpuminer_thread %d", thr_id);
goto out;
}
@@ -3438,7 +3444,7 @@ static void *watchdog_thread(void *userdata)
sleep(interval);
if (requests_queued() < opt_queue)
- queue_request();
+ queue_request(NULL);
hashmeter(-1, &zero_tv, 0);
diff --git a/miner.h b/miner.h
index e6421b9..9a2f570 100644
--- a/miner.h
+++ b/miner.h
@@ -325,6 +325,7 @@ struct work {
uint32_t valid;
dev_blk_ctx blk;
+ struct thr_info *thr;
int thr_id;
struct pool *pool;
struct timeval tv_staged;