Simplify queued hashtable by storing unqueued work separately in a single pointer.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
diff --git a/cgminer.c b/cgminer.c
index 159d765..fb739f2 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -6341,55 +6341,39 @@ static void hash_sole_work(struct thr_info *mythr)
cgpu->deven = DEV_DISABLED;
}
-/* Create a hashtable of work items for devices with a queue. The device
- * driver must have a custom queue_full function or it will default to true
- * and put only one work item in the queue. Work items should not be removed
- * from this hashtable until they are no longer in use anywhere. Once a work
- * item is physically queued on the device itself, the work->queued flag
- * should be set under cgpu->qlock write lock to prevent it being dereferenced
- * while still in use. */
+/* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till
+ * the driver tells us it's full so that it may extract the work item using
+ * the get_queued() function which adds it to the hashtable on
+ * cgpu->queued_work. */
static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id)
{
do {
- bool need_work;
-
- rd_lock(&cgpu->qlock);
- need_work = (HASH_COUNT(cgpu->queued_work) == cgpu->queued_count);
- rd_unlock(&cgpu->qlock);
-
- if (need_work) {
- struct work *work = get_work(mythr, thr_id);
-
- wr_lock(&cgpu->qlock);
- HASH_ADD_INT(cgpu->queued_work, id, work);
- wr_unlock(&cgpu->qlock);
- }
+ wr_lock(&cgpu->qlock);
+ if (!cgpu->unqueued_work)
+ cgpu->unqueued_work = get_work(mythr, thr_id);
+ wr_unlock(&cgpu->qlock);
/* The queue_full function should be used by the driver to
* actually place work items on the physical device if it
* does have a queue. */
} while (!drv->queue_full(cgpu));
}
-/* This function is for retrieving one work item from the queued hashtable of
- * available work items that are not yet physically on a device (which is
- * flagged with the work->queued bool). Code using this function must be able
- * to handle NULL as a return which implies there is no work available. */
+/* This function is for retrieving one work item from the unqueued pointer and
+ * adding it to the hashtable of queued work. Code using this function must be
+ * able to handle NULL as a return which implies there is no work available. */
struct work *get_queued(struct cgpu_info *cgpu)
{
- struct work *work, *tmp, *ret = NULL;
+ struct work *work = NULL;
wr_lock(&cgpu->qlock);
- HASH_ITER(hh, cgpu->queued_work, work, tmp) {
- if (!work->queued) {
- work->queued = true;
- cgpu->queued_count++;
- ret = work;
- break;
- }
+ if (cgpu->unqueued_work) {
+ work = cgpu->unqueued_work;
+ HASH_ADD_INT(cgpu->queued_work, id, work);
+ cgpu->unqueued_work = NULL;
}
wr_unlock(&cgpu->qlock);
- return ret;
+ return work;
}
/* This function is for finding an already queued work item in the
@@ -6402,8 +6386,7 @@ struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t mid
struct work *work, *tmp, *ret = NULL;
HASH_ITER(hh, que, work, tmp) {
- if (work->queued &&
- memcmp(work->midstate, midstate, midstatelen) == 0 &&
+ if (memcmp(work->midstate, midstate, midstatelen) == 0 &&
memcmp(work->data + offset, data, datalen) == 0) {
ret = work;
break;
@@ -6443,8 +6426,7 @@ struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate
void __work_completed(struct cgpu_info *cgpu, struct work *work)
{
- if (work->queued)
- cgpu->queued_count--;
+ cgpu->queued_count--;
HASH_DEL(cgpu->queued_work, work);
}
/* This function should be used by queued device drivers when they're sure
@@ -6475,23 +6457,17 @@ struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate,
static void flush_queue(struct cgpu_info *cgpu)
{
- struct work *work, *tmp;
- int discarded = 0;
+ struct work *work = NULL;
wr_lock(&cgpu->qlock);
- HASH_ITER(hh, cgpu->queued_work, work, tmp) {
- /* Can only discard the work items if they're not physically
- * queued on the device. */
- if (!work->queued) {
- HASH_DEL(cgpu->queued_work, work);
- discard_work(work);
- discarded++;
- }
- }
+ work = cgpu->unqueued_work;
+ cgpu->unqueued_work = NULL;
wr_unlock(&cgpu->qlock);
- if (discarded)
- applog(LOG_DEBUG, "Discarded %d queued work items", discarded);
+ if (work) {
+ free_work(work);
+ applog(LOG_DEBUG, "Discarded queued work item");
+ }
}
/* This version of hash work is for devices that are fast enough to always
diff --git a/driver-bflsc.c b/driver-bflsc.c
index c9011ba..a64e552 100644
--- a/driver-bflsc.c
+++ b/driver-bflsc.c
@@ -948,7 +948,7 @@ static void flush_one_dev(struct cgpu_info *bflsc, int dev)
rd_lock(&bflsc->qlock);
HASH_ITER(hh, bflsc->queued_work, work, tmp) {
- if (work->queued && work->subid == dev) {
+ if (work->subid == dev) {
// devflag is used to flag stale work
work->devflag = true;
did = true;
diff --git a/driver-klondike.c b/driver-klondike.c
index 5545b43..4d99f43 100644
--- a/driver-klondike.c
+++ b/driver-klondike.c
@@ -720,7 +720,7 @@ static void klondike_check_nonce(struct cgpu_info *klncgpu, KLIST *kitem)
cgtime(&tv_now);
rd_lock(&(klncgpu->qlock));
HASH_ITER(hh, klncgpu->queued_work, look, tmp) {
- if (look->queued && ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS &&
+ if (ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS &&
(look->subid == (kline->wr.dev*256 + kline->wr.workid))) {
work = look;
break;
@@ -1026,13 +1026,11 @@ static bool klondike_send_work(struct cgpu_info *klncgpu, int dev, struct work *
cgtime(&tv_old);
wr_lock(&klncgpu->qlock);
HASH_ITER(hh, klncgpu->queued_work, look, tmp) {
- if (look->queued) {
- if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) {
- __work_completed(klncgpu, look);
- free_work(look);
- } else
- wque_size++;
- }
+ if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) {
+ __work_completed(klncgpu, look);
+ free_work(look);
+ } else
+ wque_size++;
}
wr_unlock(&klncgpu->qlock);
diff --git a/miner.h b/miner.h
index 44b7e33..915d7f8 100644
--- a/miner.h
+++ b/miner.h
@@ -574,6 +574,7 @@ struct cgpu_info {
pthread_rwlock_t qlock;
struct work *queued_work;
+ struct work *unqueued_work;
unsigned int queued_count;
bool shutdown;
@@ -1381,7 +1382,6 @@ struct work {
bool stale;
bool mandatory;
bool block;
- bool queued;
bool stratum;
char *job_id;