Further simplify the queue request mechanism.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
diff --git a/cgminer.c b/cgminer.c
index f09d57b..525614c 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -1355,23 +1355,6 @@ static int pending_staged(void)
return ret;
}
-static int pool_staged(struct pool *pool)
-{
- int ret;
-
- mutex_lock(stgd_lock);
- ret = pool->staged;
- mutex_unlock(stgd_lock);
- return ret;
-}
-
-static int current_staged(void)
-{
- struct pool *pool = current_pool();
-
- return pool_staged(pool);
-}
-
#ifdef HAVE_CURSES
WINDOW *mainwin, *statuswin, *logwin;
#endif
@@ -2240,43 +2223,26 @@ static void push_curl_entry(struct curl_ent *ce, struct pool *pool)
/* This is overkill, but at least we'll know accurately how much work is
* queued to prevent ever being left without work */
-static void inc_queued(struct pool *pool)
+static void inc_queued(void)
{
mutex_lock(stgd_lock);
- if (likely(pool))
- pool->queued++;
total_queued++;
mutex_unlock(stgd_lock);
}
-static void __dec_queued(struct pool *pool)
+static void __dec_queued(void)
{
- if (!total_queued)
- return;
-
- if (likely(pool))
- pool->queued--;
- total_queued--;
+ if (total_queued)
+ total_queued--;
}
-static void dec_queued(struct pool *pool)
+static void dec_queued(void)
{
mutex_lock(stgd_lock);
- __dec_queued(pool);
+ __dec_queued();
mutex_unlock(stgd_lock);
}
-static int current_queued(void)
-{
- struct pool *pool = current_pool();
- int ret;
-
- mutex_lock(stgd_lock);
- ret = pool->queued;
- mutex_unlock(stgd_lock);
- return ret;
-}
-
static int global_queued(void)
{
int ret;
@@ -2617,7 +2583,6 @@ static void discard_stale(void)
HASH_ITER(hh, staged_work, work, tmp) {
if (stale_work(work, false)) {
HASH_DEL(staged_work, work);
- work->pool->staged--;
discard_work(work);
stale++;
}
@@ -2789,8 +2754,7 @@ static bool hash_push(struct work *work)
mutex_lock(stgd_lock);
if (likely(!getq->frozen)) {
HASH_ADD_INT(staged_work, id, work);
- work->pool->staged++;
- __dec_queued(work->pool);
+ __dec_queued();
HASH_SORT(staged_work, tv_sort);
} else
rc = false;
@@ -3823,34 +3787,25 @@ static bool clone_available(void)
bool queue_request(struct thr_info *thr, bool needed)
{
- int cq, cs, ts, tq, maxq;
- bool lag, ret, qing;
struct workio_cmd *wc;
+ bool lag, ret, qing;
+ int ps, ts, maxq;
- inc_queued(NULL);
+ inc_queued();
maxq = opt_queue + mining_threads;
lag = ret = qing = false;
- cq = current_queued();
- cs = current_staged();
- ts = pending_staged();
- tq = global_queued();
+ ps = pending_staged();
+ ts = total_staged();
- if (needed && cq >= maxq && !ts && !opt_fail_only) {
- /* If we're queueing work faster than we can stage it, consider
- * the system lagging and allow work to be gathered from
- * another pool if possible */
- lag = true;
- } else {
- /* Test to make sure we have enough work for pools without rolltime
- * and enough original work for pools with rolltime */
- if (((cs || cq >= opt_queue) && ts >= maxq) ||
- ((cs || cq) && tq >= maxq)) {
- ret = true;
- goto out;
- }
+ if (ps >= maxq) {
+ ret = true;
+ goto out;
}
+ if (needed && !ts && !opt_fail_only)
+ lag = true;
+
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
if (unlikely(!wc)) {
@@ -3873,7 +3828,7 @@ bool queue_request(struct thr_info *thr, bool needed)
qing = ret = true;
out:
if (!qing)
- dec_queued(NULL);
+ dec_queued();
return true;
}
@@ -3889,7 +3844,6 @@ static struct work *hash_pop(const struct timespec *abstime)
if (HASH_COUNT(staged_work)) {
work = staged_work;
HASH_DEL(staged_work, work);
- work->pool->staged--;
}
mutex_unlock(stgd_lock);
@@ -3952,7 +3906,7 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
struct timespec abstime = {0, 0};
struct timeval now;
struct work *work_heap;
- int failures = 0, cq;
+ int failures = 0, tq;
struct pool *pool;
/* Tell the watchdog thread this thread is waiting on getwork and
@@ -3965,10 +3919,10 @@ static bool get_work(struct work *work, bool requested, struct thr_info *thr,
return true;
}
- cq = current_queued();
+ tq = global_queued();
retry:
pool = current_pool();
- if (!requested || cq < opt_queue) {
+ if (!requested || tq < opt_queue) {
if (unlikely(!queue_request(thr, true))) {
applog(LOG_WARNING, "Failed to queue_request in get_work");
goto out;
@@ -3981,7 +3935,7 @@ retry:
goto out;
}
- if (!pool->lagging && requested && !newreq && !pool_staged(pool) && cq >= mining_threads + opt_queue) {
+ if (!pool->lagging && requested && !newreq && !total_staged() && pending_staged() >= mining_threads + opt_queue) {
struct cgpu_info *cgpu = thr->cgpu;
bool stalled = true;
int i;
diff --git a/miner.h b/miner.h
index 5afa071..dc94b5a 100644
--- a/miner.h
+++ b/miner.h
@@ -720,8 +720,6 @@ struct pool {
int accepted, rejected;
int seq_rejects;
int solved;
- int queued;
- int staged;
bool submit_fail;
bool idle;