Unlock the avalon qlock while sending tasks to not hold the lock for an extended period.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
diff --git a/driver-avalon.c b/driver-avalon.c
index d51b88f..8a892d8 100644
--- a/driver-avalon.c
+++ b/driver-avalon.c
@@ -1004,11 +1004,13 @@ static void *avalon_get_results(void *userdata)
return NULL;
}
-static void avalon_rotate_array(struct cgpu_info *avalon)
+static void avalon_rotate_array(struct cgpu_info *avalon, struct avalon_info *info)
{
+ mutex_lock(&info->qlock);
avalon->queued = 0;
if (++avalon->work_array >= AVALON_ARRAY_SIZE)
avalon->work_array = 0;
+ mutex_unlock(&info->qlock);
}
static void bitburner_rotate_array(struct cgpu_info *avalon)
@@ -1117,7 +1119,6 @@ static void *avalon_send_tasks(void *userdata)
us_timeout = 0x100000000ll / info->asic_count / info->frequency;
cgsleep_prepare_r(&ts_start);
- mutex_lock(&info->qlock);
start_count = avalon->work_array * avalon_get_work_count;
end_count = start_count + avalon_get_work_count;
for (i = start_count, j = 0; i < end_count; i++, j++) {
@@ -1128,6 +1129,7 @@ static void *avalon_send_tasks(void *userdata)
break;
}
+ mutex_lock(&info->qlock);
if (likely(j < avalon->queued && !info->overheat && avalon->works[i])) {
avalon_init_task(&at, 0, 0, info->fan_pwm,
info->timeout, info->asic_count,
@@ -1148,6 +1150,7 @@ static void *avalon_send_tasks(void *userdata)
* idling any miners. */
avalon_reset_auto(info);
}
+ mutex_unlock(&info->qlock);
ret = avalon_send_task(&at, avalon, info);
@@ -1160,8 +1163,7 @@ static void *avalon_send_tasks(void *userdata)
}
}
- avalon_rotate_array(avalon);
- mutex_unlock(&info->qlock);
+ avalon_rotate_array(avalon, info);
cgsem_post(&info->qsem);