Avoid getting more work if by the time the getwork thread is spawned we find ourselves with enough work.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
diff --git a/cgminer.c b/cgminer.c
index 1ab0649..2aaf4d4 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2272,20 +2272,39 @@ static int global_queued(void)
return ret;
}
+static bool enough_work(void)
+{
+ int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
+
+ cq = current_queued();
+ cs = current_staged();
+ ts = total_staged();
+ tq = global_queued();
+
+ if (((cs || cq >= opt_queue) && ts >= maxq) ||
+ ((cs || cq) && tq >= maxq))
+ return true;
+ return false;
+}
+
/* ce and pool may appear uninitialised at push_curl_entry, but they're always
* set when we don't have opt_benchmark enabled */
static void *get_work_thread(void *userdata)
{
struct workio_cmd *wc = (struct workio_cmd *)userdata;
- struct curl_ent * uninitialised_var(ce);
struct pool * uninitialised_var(pool);
- struct work *ret_work = make_work();
+ struct curl_ent *ce = NULL;
+ struct work *ret_work;
int failures = 0;
pthread_detach(pthread_self());
applog(LOG_DEBUG, "Creating extra get work thread");
+ if (enough_work())
+ goto out;
+
+ ret_work = make_work();
if (wc->thr)
ret_work->thr = wc->thr;
else
@@ -2330,7 +2349,7 @@ static void *get_work_thread(void *userdata)
out:
workio_cmd_free(wc);
- if (!opt_benchmark)
+ if (ce)
push_curl_entry(ce, pool);
return NULL;
}