Roll back to 45f0ac7b482abe9d9d7c4644c286df6e70924145
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
diff --git a/cgminer.c b/cgminer.c
index d530db6..89db177 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2382,8 +2382,8 @@ out:
static void *get_work_thread(void *userdata)
{
- int ts, tq, oq = opt_queue * mining_threads, maxq = oq + mining_threads;
struct workio_cmd *wc = (struct workio_cmd *)userdata;
+ int ts, tq, maxq = opt_queue + mining_threads;
struct pool *pool = current_pool();
struct work *ret_work= NULL;
struct curl_ent *ce = NULL;
@@ -2400,7 +2400,7 @@ retry:
if (ts >= maxq)
goto out;
- if (ts >= oq && tq >= maxq)
+ if (ts >= opt_queue && tq >= maxq - 1)
goto out;
if (clone_available())
@@ -3970,9 +3970,9 @@ static bool reuse_work(struct work *work)
/* Clones work by rolling it if possible, and returning a clone instead of the
* original work item which gets staged again to possibly be rolled again in
* the future */
-static struct work *clone_lpwork(struct work *work)
+static struct work *clone_work(struct work *work)
{
- int oq = opt_queue * mining_threads, mrs = mining_threads + oq;
+ int mrs = mining_threads + opt_queue - total_staged();
struct work *work_clone;
bool cloned;
@@ -4075,6 +4075,7 @@ retry:
pool_resus(pool);
}
+ work_heap = clone_work(work_heap);
memcpy(work, work_heap, sizeof(struct work));
free_work(work_heap);
@@ -4413,7 +4414,7 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
return;
}
- work = clone_lpwork(work);
+ work = clone_work(work);
applog(LOG_DEBUG, "Pushing converted work to stage thread");