Make the getting of work asynchronous from the mining threads requests by always having one work item queued. This prevents drops in hash rates when getting work from a pool that is slow to respond. Use a local static struct work in get_work that is used to queue one extra work item.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
diff --git a/cpu-miner.c b/cpu-miner.c
index 86e5944..3bfd2d0 100644
--- a/cpu-miner.c
+++ b/cpu-miner.c
@@ -141,6 +141,7 @@ int longpoll_thr_id;
struct work_restart *work_restart = NULL;
pthread_mutex_t time_lock;
static pthread_mutex_t hash_lock;
+static pthread_mutex_t get_lock;
static double total_mhashes_done;
static struct timeval total_tv_start, total_tv_end;
static int accepted, rejected;
@@ -591,35 +592,67 @@ static void hashmeter(int thr_id, struct timeval *diff,
total_mhashes_done / total_secs, accepted, rejected);
}
-static bool get_work(struct thr_info *thr, struct work *work)
+/* Since we always have one extra work item queued, set the thread id to 0
+ * for all the work and just give the work to the first thread that requests
+ * work */
+static bool get_work(struct work *work)
{
+ static struct work *work_heap = NULL;
+ struct thr_info *thr = &thr_info[0];
struct workio_cmd *wc;
- struct work *work_heap;
+ bool ret = false;
/* fill out work request message */
wc = calloc(1, sizeof(*wc));
- if (!wc)
- return false;
+ if (unlikely(!wc))
+ goto out;
wc->cmd = WC_GET_WORK;
wc->thr = thr;
/* send work request to workio thread */
- if (!tq_push(thr_info[work_thr_id].q, wc)) {
+ if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
workio_cmd_free(wc);
- return false;
+ goto out;
}
- /* wait for response, a unit of work */
- work_heap = tq_pop(thr->q, NULL);
- if (!work_heap)
- return false;
+ /* work_heap is a static var so it is protected by get_lock */
+ pthread_mutex_lock(&get_lock);
+ if (likely(work_heap)) {
+ memcpy(work, work_heap, sizeof(*work));
+ /* Wait for next response, a unit of work - it should be queued */
+ free(work_heap);
+ work_heap = tq_pop(thr->q, NULL);
+ } else {
+ /* wait for 1st response, or 1st response after failure */
+ work_heap = tq_pop(thr->q, NULL);
+ if (unlikely(!work_heap))
+ goto out_unlock;
+
+ /* send for another work request for the next time get_work
+ * is called. */
+ wc = calloc(1, sizeof(*wc));
+ if (unlikely(!wc)) {
+ free(work_heap);
+ work_heap = NULL;
+ goto out_unlock;
+ }
- /* copy returned work into storage provided by caller */
- memcpy(work, work_heap, sizeof(*work));
- free(work_heap);
+ wc->cmd = WC_GET_WORK;
+ wc->thr = thr;
- return true;
+ if (unlikely(!tq_push(thr_info[work_thr_id].q, wc))) {
+ workio_cmd_free(wc);
+ free(work_heap);
+ work_heap = NULL;
+ goto out_unlock;
+ }
+ }
+ ret = true;
+out_unlock:
+ pthread_mutex_unlock(&get_lock);
+out:
+ return ret;
}
static bool submit_work(struct thr_info *thr, const struct work *work_in)
@@ -689,7 +722,7 @@ static void *miner_thread(void *userdata)
bool rc;
/* obtain new work from internal workio thread */
- if (unlikely(!get_work(mythr, &work))) {
+ if (unlikely(!get_work(&work))) {
applog(LOG_ERR, "work retrieval failed, exiting "
"mining thread %d", mythr->id);
goto out;
@@ -875,7 +908,7 @@ static void *gpuminer_thread(void *userdata)
if (need_work) {
gettimeofday(&tv_workstart, NULL);
/* obtain new work from internal workio thread */
- if (unlikely(!get_work(mythr, work))) {
+ if (unlikely(!get_work(work))) {
applog(LOG_ERR, "work retrieval failed, exiting "
"gpu mining thread %d", mythr->id);
goto out;
@@ -1238,6 +1271,8 @@ int main (int argc, char *argv[])
return 1;
if (unlikely(pthread_mutex_init(&hash_lock, NULL)))
return 1;
+ if (unlikely(pthread_mutex_init(&get_lock, NULL)))
+ return 1;
#ifdef HAVE_SYSLOG_H
if (use_syslog)