Go back to cloning available work under staged lock.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
diff --git a/cgminer.c b/cgminer.c
index b08ad3a..56b0535 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2999,14 +2999,14 @@ static struct work *make_clone(struct work *work)
static void stage_work(struct work *work);
-/* Called with stgd_lock held */
-static bool __clone_available(void)
+static bool clone_available(void)
{
struct work *work, *tmp;
bool cloned = false;
+ mutex_lock(stgd_lock);
if (!staged_rollable)
- goto out;
+ goto out_unlock;
HASH_ITER(hh, staged_work, work, tmp) {
if (can_roll(work) && should_roll(work)) {
@@ -3022,7 +3022,9 @@ static bool __clone_available(void)
}
}
-out:
+out_unlock:
+ mutex_unlock(stgd_lock);
+
return cloned;
}
@@ -6864,7 +6866,7 @@ retry:
continue;
}
- if (__clone_available()) {
+ if (clone_available()) {
applog(LOG_DEBUG, "Cloned getwork work");
free_work(work);
continue;