Commit 53269a97f30c363ef92a522fe1cb7d8f7bd61a9a

Con Kolivas 2012-06-24T21:57:49

Revert "Simplify the total_queued count to those staged not cloned and remove the locking since it's no longer a critical value." This reverts commit 9f811c528f6eefbca5f16c92181783f756e3a68f.

diff --git a/cgminer.c b/cgminer.c
index 67691eb..7475364 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -161,6 +161,7 @@ static int total_threads;
 struct work_restart *work_restart = NULL;
 
 static pthread_mutex_t hash_lock;
+static pthread_mutex_t qd_lock;
 static pthread_mutex_t *stgd_lock;
 #ifdef HAVE_CURSES
 static pthread_mutex_t curses_lock;
@@ -2366,6 +2367,11 @@ void switch_pools(struct pool *selected)
 	if (pool != last_pool)
 		applog(LOG_WARNING, "Switching to %s", pool->rpc_url);
 
+	/* Reset the queued amount to allow more to be queued for the new pool */
+	mutex_lock(&qd_lock);
+	total_queued = 0;
+	mutex_unlock(&qd_lock);
+
 	mutex_lock(&lp_lock);
 	pthread_cond_broadcast(&lp_cond);
 	mutex_unlock(&lp_lock);
@@ -2383,21 +2389,31 @@ static void discard_work(struct work *work)
 	free_work(work);
 }
 
-/* Done lockless since this is not a critical value */
-static inline void inc_queued(void)
+/* This is overkill, but at least we'll know accurately how much work is
+ * queued to prevent ever being left without work */
+static void inc_queued(void)
 {
+	mutex_lock(&qd_lock);
 	total_queued++;
+	mutex_unlock(&qd_lock);
 }
 
-static inline void dec_queued(void)
+static void dec_queued(void)
 {
-	if (likely(total_queued > 0))
+	mutex_lock(&qd_lock);
+	if (total_queued > 0)
 		total_queued--;
+	mutex_unlock(&qd_lock);
 }
 
 static int requests_queued(void)
 {
-	return requests_staged() - staged_extras;
+	int ret;
+
+	mutex_lock(&qd_lock);
+	ret = total_queued;
+	mutex_unlock(&qd_lock);
+	return ret;
 }
 
 static int discard_stale(void)
@@ -3496,12 +3512,20 @@ static void pool_resus(struct pool *pool)
 		switch_pools(NULL);
 }
 
+static long requested_tv_sec;
+
 static bool queue_request(struct thr_info *thr, bool needed)
 {
-	int rs = requests_staged(), rq = requests_queued();
+	int rq = requests_queued();
 	struct workio_cmd *wc;
+	struct timeval now;
+
+	gettimeofday(&now, NULL);
 
-	if (rq >= mining_threads || (rq >= opt_queue && rs >= mining_threads))
+	/* Space out retrieval of extra work according to the number of mining
+	 * threads */
+	if (rq >= mining_threads + staged_extras &&
+	    (now.tv_sec - requested_tv_sec) < opt_scantime / (mining_threads + 1))
 		return true;
 
 	/* fill out work request message */
@@ -3532,6 +3556,7 @@ static bool queue_request(struct thr_info *thr, bool needed)
 		return false;
 	}
 
+	requested_tv_sec = now.tv_sec;
 	inc_queued();
 	return true;
 }
@@ -3615,17 +3640,11 @@ static struct work *make_clone(struct work *work)
  * the future */
 static struct work *clone_work(struct work *work)
 {
-	int mrs = mining_threads - requests_staged();
 	struct work *work_clone;
-	bool cloned;
-
-	if (mrs < 1)
-		return work;
-
-	cloned = false;
+	bool cloned = false;
 
 	work_clone = make_clone(work);
-	while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
+	while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) {
 		applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
 		if (unlikely(!stage_work(work_clone))) {
 			cloned = false;
@@ -3683,7 +3702,7 @@ retry:
 		goto out;
 	}
 
-	if (!pool->lagging && requested && !newreq && !requests_staged()) {
+	if (!pool->lagging && requested && !newreq && !requests_staged() && requests_queued() >= mining_threads) {
 		struct cgpu_info *cgpu = thr->cgpu;
 		bool stalled = true;
 		int i;
@@ -4305,7 +4324,7 @@ static void age_work(void)
 {
 	int discarded = 0;
 
-	while (requests_staged() > mining_threads * 4 / 3) {
+	while (requests_staged() > mining_threads) {
 		struct work *work = hash_pop(NULL);
 
 		if (unlikely(!work))
@@ -4889,6 +4908,7 @@ int main(int argc, char *argv[])
 #endif
 
 	mutex_init(&hash_lock);
+	mutex_init(&qd_lock);
 #ifdef HAVE_CURSES
 	mutex_init(&curses_lock);
 #endif