Avoid recursive locks in fill_queue.
diff --git a/cgminer.c b/cgminer.c
index 98d735f..bbba3a1 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -5728,15 +5728,19 @@ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct de
{
thread_reportout(mythr);
do {
- struct work *work;
+ bool need_work;
+
+ rd_lock(&cgpu->qlock);
+ need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count);
+ rd_unlock(&cgpu->qlock);
+
+ if (need_work) {
+ struct work *work = get_work(mythr, thr_id);
- wr_lock(&cgpu->qlock);
- if (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count) {
- work = get_work(mythr, thr_id);
- work->device_diff = MIN(drv->max_diff, work->work_difficulty);
+ wr_lock(&cgpu->qlock);
HASH_ADD_INT(cgpu->queued_work, id, work);
+ wr_unlock(&cgpu->qlock);
}
- wr_unlock(&cgpu->qlock);
/* The queue_full function should be used by the driver to
* actually place work items on the physical device if it
* does have a queue. */