Move queueing of one request to separate function in preparation for variable length queues.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
diff --git a/cpu-miner.c b/cpu-miner.c
index e340efc..117353d 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -693,19 +693,17 @@ static void hashmeter(int thr_id, struct timeval *diff,
local_mhashes_done = 0;
}
-static bool get_work(struct work *work)
+/* All work is queued flagged as being for thread 0 and then the mining thread
+ * flags it as its own */
+static bool queue_request(void)
{
struct thr_info *thr = &thr_info[0];
- struct work *work_heap;
struct workio_cmd *wc;
- bool ret = false;
- static bool first_work = true;
-get_new:
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
if (unlikely(!wc))
- goto out;
+ return false;
wc->cmd = WC_GET_WORK;
wc->thr = thr;
@@ -713,8 +711,21 @@ get_new:
/* send work request to workio thread */
if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
workio_cmd_free(wc);
- goto out;
+ return false;
}
+ return true;
+}
+
+static bool get_work(struct work *work)
+{
+ struct thr_info *thr = &thr_info[0];
+ struct work *work_heap;
+ bool ret = false;
+ static bool first_work = true;
+
+get_new:
+ if (unlikely(!queue_request()))
+ goto out;
/* wait for 1st response, or get cached response */
work_heap = tq_pop(thr->q, NULL);
@@ -733,17 +744,8 @@ get_new:
first_work = false;
/* send for another work request for the next time get_work
* is called. */
- wc = calloc(1, sizeof(*wc));
- if (unlikely(!wc))
- goto out_free;
-
- wc->cmd = WC_GET_WORK;
- wc->thr = thr;
-
- if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
- workio_cmd_free(wc);
+ if (unlikely(!queue_request()))
goto out_free;
- }
}
memcpy(work, work_heap, sizeof(*work));