Simplify the total_queued count to those staged not cloned and remove the locking since it's no longer a critical value. Clone only anticipated difference sicne there will be a lag from the value returned by requests_staged(). Keep 1/3 buffer of extra work items when ageing them.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
diff --git a/cgminer.c b/cgminer.c
index 17b2db6..2d789d8 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -161,7 +161,6 @@ static int total_threads;
struct work_restart *work_restart = NULL;
static pthread_mutex_t hash_lock;
-static pthread_mutex_t qd_lock;
static pthread_mutex_t *stgd_lock;
#ifdef HAVE_CURSES
static pthread_mutex_t curses_lock;
@@ -2364,11 +2363,6 @@ void switch_pools(struct pool *selected)
if (pool != last_pool)
applog(LOG_WARNING, "Switching to %s", pool->rpc_url);
- /* Reset the queued amount to allow more to be queued for the new pool */
- mutex_lock(&qd_lock);
- total_queued = 0;
- mutex_unlock(&qd_lock);
-
mutex_lock(&lp_lock);
pthread_cond_broadcast(&lp_cond);
mutex_unlock(&lp_lock);
@@ -2386,31 +2380,21 @@ static void discard_work(struct work *work)
free_work(work);
}
-/* This is overkill, but at least we'll know accurately how much work is
- * queued to prevent ever being left without work */
-static void inc_queued(void)
+/* Done lockless since this is not a critical value */
+static inline void inc_queued(void)
{
- mutex_lock(&qd_lock);
total_queued++;
- mutex_unlock(&qd_lock);
}
-static void dec_queued(void)
+static inline void dec_queued(void)
{
- mutex_lock(&qd_lock);
- if (total_queued > 0)
+ if (likely(total_queued > 0))
total_queued--;
- mutex_unlock(&qd_lock);
}
static int requests_queued(void)
{
- int ret;
-
- mutex_lock(&qd_lock);
- ret = total_queued;
- mutex_unlock(&qd_lock);
- return ret;
+ return requests_staged() - staged_extras;
}
static int discard_stale(void)
@@ -3509,20 +3493,12 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL);
}
-static long requested_tv_sec;
-
static bool queue_request(struct thr_info *thr, bool needed)
{
- int rq = requests_queued();
+ int rs = requests_staged(), rq = requests_queued();
struct workio_cmd *wc;
- struct timeval now;
-
- gettimeofday(&now, NULL);
- /* Space out retrieval of extra work according to the number of mining
- * threads */
- if (rq >= mining_threads + staged_extras &&
- (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1))
+ if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads))
return true;
/* fill out work request message */
@@ -3553,7 +3529,6 @@ static bool queue_request(struct thr_info *thr, bool needed)
return false;
}
- requested_tv_sec = now.tv_sec;
inc_queued();
return true;
}
@@ -3637,11 +3612,17 @@ static struct work *make_clone(struct work *work)
* the future */
static struct work *clone_work(struct work *work)
{
+ int mrs = mining_threads - requests_staged();
struct work *work_clone;
- bool cloned = false;
+ bool cloned;
+
+ if (mrs < 1)
+ return work;
+
+ cloned = false;
work_clone = make_clone(work);
- while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) {
+ while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
if (unlikely(!stage_work(work_clone))) {
cloned = false;
@@ -3699,7 +3680,7 @@ retry:
goto out;
}
- if (!pool->lagging && requested && !newreq && !requests_staged() && requests_queued() >= mining_threads) {
+ if (!pool->lagging && requested && !newreq && !requests_staged()) {
struct cgpu_info *cgpu = thr->cgpu;
bool stalled = true;
int i;
@@ -4321,7 +4302,7 @@ static void age_work(void)
{
int discarded = 0;
- while (requests_staged() > mining_threads) {
+ while (requests_staged() > mining_threads * 4 / 3) {
struct work *work = hash_pop(NULL);
if (unlikely(!work))
@@ -4905,7 +4886,6 @@ int main(int argc, char *argv[])
#endif
mutex_init(&hash_lock);
- mutex_init(&qd_lock);
#ifdef HAVE_CURSES
mutex_init(&curses_lock);
#endif