Commit 74cd6548a939c681502c61ec87677d6435a3c5df

Con Kolivas 2012-06-24T22:00:37

Use a static base measurement difference of how many items to clone since requests_staged may not climb while rolling.

diff --git a/cgminer.c b/cgminer.c
index 0b0f457..7d7af4d 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3640,11 +3640,16 @@ static struct work *make_clone(struct work *work)
  * the future */
 static struct work *clone_work(struct work *work)
 {
+	int mrs = mining_threads - requests_staged();
 	struct work *work_clone;
-	bool cloned = false;
+	bool cloned;
 
+	if (mrs < 1)
+		return work;
+
+	cloned = false;
 	work_clone = make_clone(work);
-	while (requests_staged() < mining_threads && can_roll(work) && should_roll(work)) {
+	while (mrs-- > 0 && can_roll(work) && should_roll(work)) {
 		applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
 		if (unlikely(!stage_work(work_clone))) {
 			cloned = false;