Further simplify the total_queued counting mechanism and do all dec_queued from the one location.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
diff --git a/cgminer.c b/cgminer.c
index 0381b04..483331a 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2371,18 +2371,6 @@ void switch_pools(struct pool *selected)
mutex_unlock(&lp_lock);
}
-static void discard_work(struct work *work)
-{
- if (!work->clone && !work->rolls && !work->mined) {
- if (work->pool)
- work->pool->discarded_work++;
- total_discarded++;
- applog(LOG_DEBUG, "Discarded work");
- } else
- applog(LOG_DEBUG, "Discarded cloned or rolled work");
- free_work(work);
-}
-
/* Done lockless since this is not a critical value */
static inline void inc_queued(void)
{
@@ -2395,15 +2383,25 @@ static inline void dec_queued(void)
total_queued--;
}
-static int requests_queued(void)
+static void discard_work(struct work *work)
{
- return requests_staged() - staged_extras;
+ if (!work->clone)
+ dec_queued();
+
+ if (!work->clone && !work->rolls && !work->mined) {
+ if (work->pool)
+ work->pool->discarded_work++;
+ total_discarded++;
+ applog(LOG_DEBUG, "Discarded work");
+ } else
+ applog(LOG_DEBUG, "Discarded cloned or rolled work");
+ free_work(work);
}
static int discard_stale(void)
{
struct work *work, *tmp;
- int i, stale = 0;
+ int stale = 0;
mutex_lock(stgd_lock);
HASH_ITER(hh, staged_work, work, tmp) {
@@ -2419,10 +2417,6 @@ static int discard_stale(void)
applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale);
- /* Dec queued outside the loop to not have recursive locks */
- for (i = 0; i < stale; i++)
- dec_queued();
-
return stale;
}
@@ -3498,11 +3492,11 @@ static void pool_resus(struct pool *pool)
static bool queue_request(struct thr_info *thr, bool needed)
{
- int rs = requests_staged(), rq = requests_queued();
+ int rs = requests_staged();
struct workio_cmd *wc;
- if ((rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads)) &&
- total_queued >= opt_queue)
+ if ((total_queued >= opt_queue && rs >= mining_threads) ||
+ total_queued >= mining_threads)
return true;
/* fill out work request message */
@@ -3521,7 +3515,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
/* If we're queueing work faster than we can stage it, consider the
* system lagging and allow work to be gathered from another pool if
* possible */
- if (rq && needed && !requests_staged() && !opt_fail_only)
+ if (total_queued && needed && !rs && !opt_fail_only)
wc->lagging = true;
applog(LOG_DEBUG, "Queueing getwork request to work thread");
@@ -3671,7 +3665,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
}
retry:
pool = current_pool();
- if (!requested || requests_queued() < opt_queue) {
+ if (!requested || total_queued < opt_queue) {
if (unlikely(!queue_request(thr, true))) {
applog(LOG_WARNING, "Failed to queue_request in get_work");
goto out;
@@ -3721,7 +3715,6 @@ retry:
}
if (stale_work(work_heap, false)) {
- dec_queued();
discard_work(work_heap);
goto retry;
}
@@ -3737,8 +3730,6 @@ retry:
work_heap = clone_work(work_heap);
memcpy(work, work_heap, sizeof(struct work));
free_work(work_heap);
- if (!work->clone)
- dec_queued();
ret = true;
out:
@@ -4336,8 +4327,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
struct timeval now;
sleep(interval);
- if (requests_queued() < opt_queue || total_queued < opt_queue)
- queue_request(NULL, false);
+ queue_request(NULL, false);
age_work();