Stagger the restart of every next thread per device to keep devices busy ahead of accessory threads per device.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
diff --git a/cgminer.c b/cgminer.c
index db944ac..1d88107 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3248,8 +3248,22 @@ void *miner_thread(void *userdata)
gettimeofday(&tv_start, NULL);
hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
- if (unlikely(work_restart[thr_id].restart))
+ if (unlikely(work_restart[thr_id].restart)) {
+
+ /* Apart from device_thread 0, we stagger the
+ * starting of every next thread to try and get
+ * all devices busy before worrying about
+ * getting work for their extra threads */
+ if (mythr->device_thread) {
+ struct timespec rgtp;
+
+ rgtp.tv_sec = 0;
+ rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
+ nanosleep(&rgtp, NULL);
+ }
break;
+ }
+
if (unlikely(!hashes))
goto out;
hashes_done += hashes;
@@ -4317,10 +4331,12 @@ retry_pools:
k = 0;
for (i = 0; i < total_devices; ++i) {
struct cgpu_info *cgpu = devices[i];
+
for (j = 0; j < cgpu->threads; ++j, ++k) {
thr = &thr_info[k];
thr->id = k;
thr->cgpu = cgpu;
+ thr->device_thread = j;
thr->q = tq_new();
if (!thr->q)
diff --git a/miner.h b/miner.h
index e028533..7f32829 100644
--- a/miner.h
+++ b/miner.h
@@ -318,6 +318,8 @@ struct thread_q {
struct thr_info {
int id;
+ int device_thread;
+
pthread_t pth;
struct thread_q *q;
struct cgpu_info *cgpu;