Check we don't exhaust the entire unsigned 32 bit ntime range when rolling time to cope with extremely high hashrates.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
diff --git a/cgminer.c b/cgminer.c
index 4774797..0c4e0c3 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3646,13 +3646,17 @@ static inline bool can_roll(struct work *work)
return (work->pool && !stale_work(work, false) && work->rolltime && !work->clone);
}
-static void roll_work(struct work *work)
+static bool roll_work(struct work *work)
{
uint32_t *work_ntime;
uint32_t ntime;
work_ntime = (uint32_t *)(work->data + 68);
ntime = be32toh(*work_ntime);
+ if (unlikely(ntime == 0xFFFFFFFF)) {
+ applog(LOG_DEBUG, "Exhausted ntime space, cannot roll work");
+ return false;
+ }
ntime++;
*work_ntime = htobe32(ntime);
local_work++;
@@ -3663,13 +3667,14 @@ static void roll_work(struct work *work)
/* This is now a different work item so it needs a different ID for the
* hashtable */
work->id = total_work++;
+ return true;
}
static bool reuse_work(struct work *work)
{
if (can_roll(work) && should_roll(work)) {
- roll_work(work);
- return true;
+ if (likely(roll_work(work)))
+ return true;
}
return false;
}