Abstract out work cloning and clone $mining_threads copies whenever a rollable work item is found and return a clone instead.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
diff --git a/cgminer.c b/cgminer.c
index 1cd9e09..bc2c931 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -3618,6 +3618,48 @@ static bool reuse_work(struct work *work)
return false;
}
+static struct work *make_clone(struct work *work)
+{
+ struct work *work_clone = make_work();
+
+ memcpy(work_clone, work, sizeof(struct work));
+ work_clone->clone = true;
+ work_clone->longpoll = false;
+
+ return work_clone;
+}
+
+/* Clones work by rolling it if possible, and returning a clone instead of the
+ * original work item which gets staged again to possibly be rolled again in
+ * the future */
+static struct work *clone_work(struct work *work)
+{
+ struct work *work_clone;
+ bool cloned = false;
+ int rolled = 0;
+
+ work_clone = make_clone(work);
+ while (rolled++ < mining_threads && can_roll(work) && should_roll(work)) {
+ applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
+ if (unlikely(!stage_work(work_clone))) {
+ cloned = false;
+ break;
+ }
+ roll_work(work);
+ work_clone = make_clone(work);
+ cloned = true;
+ }
+
+ if (cloned) {
+ stage_work(work);
+ return work_clone;
+ }
+
+ free_work(work_clone);
+
+ return work;
+}
+
static bool get_work(struct work *work, bool requested, struct thr_info *thr,
const int thr_id)
{
@@ -3702,18 +3744,11 @@ retry:
pool_resus(pool);
}
- memcpy(work, work_heap, sizeof(*work));
-
- /* Hand out a clone if we can roll this work item */
- if (reuse_work(work_heap)) {
- applog(LOG_DEBUG, "Pushing divided work to get queue head");
-
- stage_work(work_heap);
- work->clone = true;
- } else {
+ work_heap = clone_work(work_heap);
+ memcpy(work, work_heap, sizeof(struct work));
+ free_work(work_heap);
+ if (!work->clone)
dec_queued();
- free_work(work_heap);
- }
ret = true;
out:
@@ -4039,8 +4074,7 @@ enum {
/* Stage another work item from the work returned in a longpoll */
static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
{
- struct work *work, *work_clone;
- int rolled = 0;
+ struct work *work;
bool rc;
work = make_work();
@@ -4073,18 +4107,7 @@ static void convert_to_work(json_t *val, int rolltime, struct pool *pool)
return;
}
- work_clone = make_work();
- memcpy(work_clone, work, sizeof(struct work));
- while (reuse_work(work) && rolled++ < mining_threads) {
- work_clone->clone = true;
- work_clone->longpoll = false;
- applog(LOG_DEBUG, "Pushing rolled converted work to stage thread");
- if (unlikely(!stage_work(work_clone)))
- break;
- work_clone = make_work();
- memcpy(work_clone, work, sizeof(struct work));
- }
- free_work(work_clone);
+ work = clone_work(work);
applog(LOG_DEBUG, "Pushing converted work to stage thread");