Move the decision to queue further work upstream before threads are spawned based on fine grained per-pool stats and increment the queued count immediately.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
diff --git a/cgminer.c b/cgminer.c
index 31ab609..856537c 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -70,7 +70,7 @@ struct workio_cmd {
enum workio_commands cmd;
struct thr_info *thr;
struct work *work;
- bool needed;
+ struct pool *pool;
};
struct strategies strategies[] = {
@@ -187,7 +187,6 @@ int hw_errors;
int total_accepted, total_rejected, total_diff1;
int total_getworks, total_stale, total_discarded;
static int total_queued, staged_rollable;
-static int queued_getworks;
unsigned int new_blocks;
static unsigned int work_block;
unsigned int found_blocks;
@@ -2383,31 +2382,25 @@ out:
return cloned;
}
+static bool queue_request(struct thr_info *thr, bool needed);
+
static void *get_work_thread(void *userdata)
{
struct workio_cmd *wc = (struct workio_cmd *)userdata;
- int ts, tq, maxq = opt_queue + mining_threads;
struct pool *pool = current_pool();
struct work *ret_work= NULL;
struct curl_ent *ce = NULL;
- bool lagging = false;
pthread_detach(pthread_self());
applog(LOG_DEBUG, "Creating extra get work thread");
-retry:
- tq = global_queued();
- ts = total_staged();
-
- if (ts >= maxq)
- goto out;
-
- if (ts >= opt_queue && tq >= maxq)
- goto out;
+ pool = wc->pool;
- if (clone_available())
+ if (clone_available()) {
+ dec_queued(pool);
goto out;
+ }
ret_work = make_work();
if (wc->thr)
@@ -2419,32 +2412,19 @@ retry:
get_benchmark_work(ret_work);
ret_work->queued = true;
} else {
-
- if (!ts)
- lagging = true;
- pool = ret_work->pool = select_pool(lagging);
-
- inc_queued(pool);
+ ret_work->pool = wc->pool;
if (!ce)
ce = pop_curl_entry(pool);
- /* Check that we haven't staged work via other threads while
- * waiting for a curl entry */
- if (total_staged() >= maxq) {
- dec_queued(pool);
- free_work(ret_work);
- goto out;
- }
-
/* obtain new work from bitcoin via JSON-RPC */
if (!get_upstream_work(ret_work, ce->curl)) {
/* pause, then restart work-request loop */
applog(LOG_DEBUG, "json_rpc_call failed on get work, retrying");
- lagging = true;
dec_queued(pool);
+ queue_request(ret_work->thr, true);
free_work(ret_work);
- goto retry;
+ goto out;
}
ret_work->queued = true;
@@ -2463,9 +2443,6 @@ out:
workio_cmd_free(wc);
if (ce)
push_curl_entry(ce, pool);
- mutex_lock(&control_lock);
- queued_getworks--;
- mutex_unlock(&control_lock);
return NULL;
}
@@ -2626,8 +2603,6 @@ static struct pool *priority_pool(int choice)
return ret;
}
-static bool queue_request(struct thr_info *thr, bool needed);
-
void switch_pools(struct pool *selected)
{
struct pool *pool, *last_pool;
@@ -3930,8 +3905,28 @@ static void pool_resus(struct pool *pool)
static bool queue_request(struct thr_info *thr, bool needed)
{
+ int ts, tq, maxq = opt_queue + mining_threads;
+ struct pool *pool, *cp;
struct workio_cmd *wc;
+ ts = total_staged();
+ tq = global_queued();
+ if (ts && ts + tq >= maxq)
+ return true;
+
+ cp = current_pool();
+ if ((!needed || opt_fail_only) && (cp->staged + cp->queued >= maxq))
+ return true;
+
+ if (needed && !ts)
+ pool = select_pool(true);
+ else
+ pool = cp;
+ if (pool->staged + pool->queued >= maxq)
+ return true;
+
+ inc_queued(pool);
+
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) {
@@ -3941,7 +3936,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
wc->cmd = WC_GET_WORK;
wc->thr = thr;
- wc->needed = needed;
+ wc->pool = pool;
applog(LOG_DEBUG, "Queueing getwork request to work thread");