Create a fill_queue function that creates hashtables of as many work items as is required by the device driver till it flags the queue full.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
diff --git a/cgminer.c b/cgminer.c
index 6c28a4d..c9f02e7 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -5595,6 +5595,25 @@ static void hash_sole_work(struct thr_info *mythr)
}
}
+/* Create a hashtable of work items for devices with a queue. The device
+ * driver must have a custom queue_full function or it will default to true
+ * and put only one work item in the queue. Work items should not be removed
+ * from this hashtable until they are no longer in use anywhere. Once a work
+ * item is physically queued on the device itself, the work->queued flag
+ * should be set under cgpu->qlock write lock to prevent it being dereferenced
+ * while still in use. */
+static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
+{
+ thread_reportout(mythr);
+ do {
+ struct work *work = get_work(mythr, thr_id);
+
+ wr_lock(&cgpu->qlock);
+ HASH_ADD_INT(cgpu->queued_work, id, work);
+ wr_unlock(&cgpu->qlock);
+ } while (!drv->queue_full(cgpu));
+}
+
/* This version of hash work is for devices that are fast enough to always
* perform a full nonce range and need a queue to maintain the device busy.
* Work creation and destruction is not done from within this function
@@ -5614,7 +5633,7 @@ static void hash_queued_work(struct thr_info *mythr)
mythr->work_restart = false;
- //fill_queue(mythr, cgpu, drv, thr_id);
+ fill_queue(mythr, cgpu, drv, thr_id);
thread_reportin(mythr);
hashes = drv->scanwork(mythr);
@@ -6589,6 +6608,8 @@ void fill_device_api(struct cgpu_info *cgpu)
drv->thread_shutdown = &noop_thread_shutdown;
if (!drv->thread_enable)
drv->thread_enable = &noop_thread_enable;
+ if (!drv->queue_full)
+ drv->queue_full = &noop_get_stats;
}
void enable_device(struct cgpu_info *cgpu)
@@ -6612,6 +6633,9 @@ void enable_device(struct cgpu_info *cgpu)
}
#endif
fill_device_api(cgpu);
+
+ rwlock_init(&cgpu->qlock);
+ cgpu->queued_work = NULL;
}
struct _cgpu_devid_counter {
diff --git a/miner.h b/miner.h
index f68707a..14d5338 100644
--- a/miner.h
+++ b/miner.h
@@ -299,6 +299,8 @@ struct device_drv {
bool (*prepare_work)(struct thr_info *, struct work *);
int64_t (*scanhash)(struct thr_info *, struct work *, int64_t);
int64_t (*scanwork)(struct thr_info *);
+ bool (*queue_full)(struct cgpu_info *);
+
void (*hw_error)(struct thr_info *);
void (*thread_shutdown)(struct thr_info *);
void (*thread_enable)(struct thr_info *);
@@ -500,6 +502,9 @@ struct cgpu_info {
int dev_throttle_count;
struct cgminer_stats cgminer_stats;
+
+ pthread_rwlock_t qlock;
+ struct work *queued_work;
};
extern bool add_cgpu(struct cgpu_info*);