Commit 522f620c89b5f152f86a2916b0dca7b71b2a5005

Con Kolivas 2012-06-27T22:34:46

Check we don't exhaust the entire unsigned 32 bit ntime range when rolling time to cope with extremely high hashrates.

diff --git a/cgminer.c b/cgminer.c
index 4774797..0c4e0c3 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3646,13 +3646,17 @@ static inline bool can_roll(struct work *work)
 	return (work->pool && !stale_work(work, false) && work->rolltime && !work->clone);
 }
 
-static void roll_work(struct work *work)
+static bool roll_work(struct work *work)
 {
 	uint32_t *work_ntime;
 	uint32_t ntime;
 
 	work_ntime = (uint32_t *)(work->data + 68);
 	ntime = be32toh(*work_ntime);
+	if (unlikely(ntime == 0xFFFFFFFF)) {
+		applog(LOG_DEBUG, "Exhausted ntime space, cannot roll work");
+		return false;
+	}
 	ntime++;
 	*work_ntime = htobe32(ntime);
 	local_work++;
@@ -3663,13 +3667,14 @@ static void roll_work(struct work *work)
 	/* This is now a different work item so it needs a different ID for the
 	 * hashtable */
 	work->id = total_work++;
+	return true;
 }
 
 static bool reuse_work(struct work *work)
 {
 	if (can_roll(work) && should_roll(work)) {
-		roll_work(work);
-		return true;
+		if (likely(roll_work(work)))
+			return true;
 	}
 	return false;
 }