Put spacing around locking code for clarity.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
diff --git a/cgminer.c b/cgminer.c
index 3eeaa40..8470cf9 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -398,6 +398,7 @@ struct thr_info *get_thread(int thr_id)
rd_lock(&mining_thr_lock);
thr = mining_thr[thr_id];
rd_unlock(&mining_thr_lock);
+
return thr;
}
@@ -415,6 +416,7 @@ struct cgpu_info *get_devices(int id)
rd_lock(&devices_lock);
cgpu = devices[id];
rd_unlock(&devices_lock);
+
return cgpu;
}
@@ -455,6 +457,7 @@ static void sharelog(const char*disposition, const struct work*work)
ret = fwrite(s, rv, 1, sharelog_file);
fflush(sharelog_file);
mutex_unlock(&sharelog_lock);
+
if (ret != 1)
applog(LOG_ERR, "sharelog fwrite error");
}
@@ -500,6 +503,7 @@ static bool pool_tset(struct pool *pool, bool *var)
ret = *var;
*var = true;
mutex_unlock(&pool->pool_lock);
+
return ret;
}
@@ -511,6 +515,7 @@ bool pool_tclear(struct pool *pool, bool *var)
ret = *var;
*var = false;
mutex_unlock(&pool->pool_lock);
+
return ret;
}
@@ -521,6 +526,7 @@ struct pool *current_pool(void)
cg_rlock(&control_lock);
pool = currentpool;
cg_runlock(&control_lock);
+
return pool;
}
@@ -790,6 +796,7 @@ static void load_temp_cutoffs()
devices[i]->cutofftemp = opt_cutofftemp;
}
rd_unlock(&devices_lock);
+
return;
}
if (device <= 1) {
@@ -1491,9 +1498,11 @@ static struct work *make_work(void)
if (unlikely(!work))
quit(1, "Failed to calloc work in make_work");
+
cg_wlock(&control_lock);
work->id = total_work++;
cg_wunlock(&control_lock);
+
return work;
}
@@ -1886,6 +1895,7 @@ static int total_staged(void)
mutex_lock(stgd_lock);
ret = __total_staged();
mutex_unlock(stgd_lock);
+
return ret;
}
@@ -3158,10 +3168,12 @@ static bool stale_work(struct work *work, bool share)
}
same_job = true;
+
cg_rlock(&pool->data_lock);
if (strcmp(work->job_id, pool->swork.job_id))
same_job = false;
cg_runlock(&pool->data_lock);
+
if (!same_job) {
applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch");
return true;
@@ -3205,6 +3217,7 @@ static uint64_t share_diff(const struct work *work)
if (unlikely(!d64))
d64 = 1;
ret = diffone / d64;
+
cg_wlock(&control_lock);
if (ret > best_diff) {
best_diff = ret;
@@ -3213,6 +3226,7 @@ static uint64_t share_diff(const struct work *work)
if (ret > work->pool->best_diff)
work->pool->best_diff = ret;
cg_wunlock(&control_lock);
+
return ret;
}
@@ -3277,6 +3291,7 @@ static void *submit_work_thread(void *userdata)
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
+
goto out;
}
work->stale = true;
@@ -3316,10 +3331,12 @@ static void *submit_work_thread(void *userdata)
if (likely(stratum_send(pool, s, strlen(s)))) {
if (pool_tclear(pool, &pool->submit_fail))
applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
+
mutex_lock(&sshare_lock);
HASH_ADD_INT(stratum_shares, id, sshare);
pool->sshares++;
mutex_unlock(&sshare_lock);
+
applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
submitted = true;
break;
@@ -3365,6 +3382,7 @@ static void *submit_work_thread(void *userdata)
total_diff_stale += work->work_difficulty;
pool->diff_stale += work->work_difficulty;
mutex_unlock(&stats_lock);
+
break;
}
@@ -3605,6 +3623,7 @@ static bool block_exists(char *hexstr)
rd_lock(&blk_lock);
HASH_FIND_STR(blocks, hexstr, s);
rd_unlock(&blk_lock);
+
if (s)
return true;
return false;
@@ -3693,6 +3712,7 @@ static bool test_work_current(struct work *work)
quit (1, "test_work_current OOM");
strcpy(s->hash, hexstr);
s->block_no = new_blocks++;
+
wr_lock(&blk_lock);
/* Only keep the last hour's worth of blocks in memory since
* work from blocks before this is virtually impossible and we
@@ -3709,6 +3729,7 @@ static bool test_work_current(struct work *work)
HASH_ADD_STR(blocks, hash, s);
set_blockdiff(work);
wr_unlock(&blk_lock);
+
if (deleted_block)
applog(LOG_DEBUG, "Deleted block %d from database", deleted_block);
set_curblock(hexstr, work->data);
@@ -4701,6 +4722,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
local_mhashes_done = 0;
out_unlock:
mutex_unlock(&hash_lock);
+
if (showlog) {
if (!curses_active) {
printf("%s \r", statusline);
@@ -4763,6 +4785,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
}
id = json_integer_value(id_val);
+
mutex_lock(&sshare_lock);
HASH_FIND_INT(stratum_shares, &id, sshare);
if (sshare) {
@@ -4770,6 +4793,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
pool->sshares--;
}
mutex_unlock(&sshare_lock);
+
if (!sshare) {
if (json_is_true(res_val))
applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
@@ -4840,6 +4864,7 @@ static int cp_prio(void)
cg_rlock(&control_lock);
prio = currentpool->prio;
cg_runlock(&control_lock);
+
return prio;
}
@@ -4901,6 +4926,7 @@ static bool supports_resume(struct pool *pool)
cg_rlock(&pool->data_lock);
ret = (pool->sessionid != NULL);
cg_runlock(&pool->data_lock);
+
return ret;
}
@@ -5819,6 +5845,7 @@ void work_completed(struct cgpu_info *cgpu, struct work *work)
cgpu->queued_count--;
HASH_DEL(cgpu->queued_work, work);
wr_unlock(&cgpu->qlock);
+
free_work(work);
}
@@ -6159,6 +6186,7 @@ static void reap_curl(struct pool *pool)
int reaped = 0;
gettimeofday(&now, NULL);
+
mutex_lock(&pool->pool_lock);
list_for_each_entry_safe(ent, iter, &pool->curlring, node) {
if (pool->curls < 2)
@@ -6172,6 +6200,7 @@ static void reap_curl(struct pool *pool)
}
}
mutex_unlock(&pool->pool_lock);
+
if (reaped)
applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no);
}
@@ -6294,6 +6323,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
schedstart.tm.tm_hour, schedstart.tm.tm_min);
sched_paused = true;
+
rd_lock(&mining_thr_lock);
for (i = 0; i < mining_threads; i++)
mining_thr[i]->pause = true;
@@ -6577,6 +6607,7 @@ static void *test_pool_thread(void *arg)
pools_active = true;
}
cg_wunlock(&control_lock);
+
pool_resus(pool);
} else
pool_died(pool);
@@ -6890,9 +6921,11 @@ void fill_device_drv(struct cgpu_info *cgpu)
void enable_device(struct cgpu_info *cgpu)
{
cgpu->deven = DEV_ENABLED;
+
wr_lock(&devices_lock);
devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
wr_unlock(&devices_lock);
+
if (hotplug_mode) {
new_threads += cgpu->threads;
#ifdef HAVE_CURSES
@@ -6935,9 +6968,11 @@ bool add_cgpu(struct cgpu_info*cgpu)
cgpu->device_id = d->lastid = 0;
HASH_ADD_STR(devids, name, d);
}
+
wr_lock(&devices_lock);
devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2));
wr_unlock(&devices_lock);
+
if (hotplug_mode)
devices[total_devices + new_devices++] = cgpu;
else
@@ -6976,6 +7011,7 @@ static void hotplug_process()
wr_lock(&mining_thr_lock);
mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1));
wr_unlock(&mining_thr_lock);
+
if (!mining_thr)
quit(1, "Failed to hotplug realloc mining_thr");
for (i = 0; i < new_threads; i++) {