Consider extra longpoll work items as staged_extra so as to make sure we queue more work if queueing regular work items as longpolls.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
diff --git a/cgminer.c b/cgminer.c
index 786eb6d..b710f73 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -216,7 +216,7 @@ struct thread_q *getq;
static int total_work;
struct work *staged_work = NULL;
-static int staged_clones;
+static int staged_extras;
struct schedtime {
bool enable;
@@ -2062,8 +2062,8 @@ static int discard_stale(void)
HASH_ITER(hh, staged_work, work, tmp) {
if (stale_work(work, false)) {
HASH_DEL(staged_work, work);
- if (work->clone)
- --staged_clones;
+ if (work->clone || work->longpoll)
+ --staged_extras;
discard_work(work);
stale++;
}
@@ -2205,8 +2205,8 @@ static bool hash_push(struct work *work)
if (likely(!getq->frozen)) {
HASH_ADD_INT(staged_work, id, work);
HASH_SORT(staged_work, tv_sort);
- if (work->clone)
- ++staged_clones;
+ if (work->clone || work->longpoll)
+ ++staged_extras;
} else
rc = false;
pthread_cond_signal(&getq->cond);
@@ -3121,7 +3121,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
/* Space out retrieval of extra work according to the number of mining
* threads */
- if (rq >= mining_threads + staged_clones &&
+ if (rq >= mining_threads + staged_extras &&
(now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1))
return true;
@@ -3170,8 +3170,8 @@ static struct work *hash_pop(const struct timespec *abstime)
if (HASH_COUNT(staged_work)) {
work = staged_work;
HASH_DEL(staged_work, work);
- if (work->clone)
- --staged_clones;
+ if (work->clone || work->longpoll)
+ --staged_extras;
}
mutex_unlock(stgd_lock);