Commit 8d22ca1ac1087963846b5c5c5a436edac84f1fec

Con Kolivas 2013-04-15T11:42:16

Put spacing around locking code for clarity.

diff --git a/cgminer.c b/cgminer.c
index 3eeaa40..8470cf9 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -398,6 +398,7 @@ struct thr_info *get_thread(int thr_id)
 	rd_lock(&mining_thr_lock);
 	thr = mining_thr[thr_id];
 	rd_unlock(&mining_thr_lock);
+
 	return thr;
 }
 
@@ -415,6 +416,7 @@ struct cgpu_info *get_devices(int id)
 	rd_lock(&devices_lock);
 	cgpu = devices[id];
 	rd_unlock(&devices_lock);
+
 	return cgpu;
 }
 
@@ -455,6 +457,7 @@ static void sharelog(const char*disposition, const struct work*work)
 	ret = fwrite(s, rv, 1, sharelog_file);
 	fflush(sharelog_file);
 	mutex_unlock(&sharelog_lock);
+
 	if (ret != 1)
 		applog(LOG_ERR, "sharelog fwrite error");
 }
@@ -500,6 +503,7 @@ static bool pool_tset(struct pool *pool, bool *var)
 	ret = *var;
 	*var = true;
 	mutex_unlock(&pool->pool_lock);
+
 	return ret;
 }
 
@@ -511,6 +515,7 @@ bool pool_tclear(struct pool *pool, bool *var)
 	ret = *var;
 	*var = false;
 	mutex_unlock(&pool->pool_lock);
+
 	return ret;
 }
 
@@ -521,6 +526,7 @@ struct pool *current_pool(void)
 	cg_rlock(&control_lock);
 	pool = currentpool;
 	cg_runlock(&control_lock);
+
 	return pool;
 }
 
@@ -790,6 +796,7 @@ static void load_temp_cutoffs()
 				devices[i]->cutofftemp = opt_cutofftemp;
 		}
 		rd_unlock(&devices_lock);
+
 		return;
 	}
 	if (device <= 1) {
@@ -1491,9 +1498,11 @@ static struct work *make_work(void)
 
 	if (unlikely(!work))
 		quit(1, "Failed to calloc work in make_work");
+
 	cg_wlock(&control_lock);
 	work->id = total_work++;
 	cg_wunlock(&control_lock);
+
 	return work;
 }
 
@@ -1886,6 +1895,7 @@ static int total_staged(void)
 	mutex_lock(stgd_lock);
 	ret = __total_staged();
 	mutex_unlock(stgd_lock);
+
 	return ret;
 }
 
@@ -3158,10 +3168,12 @@ static bool stale_work(struct work *work, bool share)
 		}
 
 		same_job = true;
+
 		cg_rlock(&pool->data_lock);
 		if (strcmp(work->job_id, pool->swork.job_id))
 			same_job = false;
 		cg_runlock(&pool->data_lock);
+
 		if (!same_job) {
 			applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch");
 			return true;
@@ -3205,6 +3217,7 @@ static uint64_t share_diff(const struct work *work)
 	if (unlikely(!d64))
 		d64 = 1;
 	ret = diffone / d64;
+
 	cg_wlock(&control_lock);
 	if (ret > best_diff) {
 		best_diff = ret;
@@ -3213,6 +3226,7 @@ static uint64_t share_diff(const struct work *work)
 	if (ret > work->pool->best_diff)
 		work->pool->best_diff = ret;
 	cg_wunlock(&control_lock);
+
 	return ret;
 }
 
@@ -3277,6 +3291,7 @@ static void *submit_work_thread(void *userdata)
 			total_diff_stale += work->work_difficulty;
 			pool->diff_stale += work->work_difficulty;
 			mutex_unlock(&stats_lock);
+
 			goto out;
 		}
 		work->stale = true;
@@ -3316,10 +3331,12 @@ static void *submit_work_thread(void *userdata)
 			if (likely(stratum_send(pool, s, strlen(s)))) {
 				if (pool_tclear(pool, &pool->submit_fail))
 						applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no);
+
 				mutex_lock(&sshare_lock);
 				HASH_ADD_INT(stratum_shares, id, sshare);
 				pool->sshares++;
 				mutex_unlock(&sshare_lock);
+
 				applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db");
 				submitted = true;
 				break;
@@ -3365,6 +3382,7 @@ static void *submit_work_thread(void *userdata)
 			total_diff_stale += work->work_difficulty;
 			pool->diff_stale += work->work_difficulty;
 			mutex_unlock(&stats_lock);
+
 			break;
 		}
 
@@ -3605,6 +3623,7 @@ static bool block_exists(char *hexstr)
 	rd_lock(&blk_lock);
 	HASH_FIND_STR(blocks, hexstr, s);
 	rd_unlock(&blk_lock);
+
 	if (s)
 		return true;
 	return false;
@@ -3693,6 +3712,7 @@ static bool test_work_current(struct work *work)
 			quit (1, "test_work_current OOM");
 		strcpy(s->hash, hexstr);
 		s->block_no = new_blocks++;
+
 		wr_lock(&blk_lock);
 		/* Only keep the last hour's worth of blocks in memory since
 		 * work from blocks before this is virtually impossible and we
@@ -3709,6 +3729,7 @@ static bool test_work_current(struct work *work)
 		HASH_ADD_STR(blocks, hash, s);
 		set_blockdiff(work);
 		wr_unlock(&blk_lock);
+
 		if (deleted_block)
 			applog(LOG_DEBUG, "Deleted block %d from database", deleted_block);
 		set_curblock(hexstr, work->data);
@@ -4701,6 +4722,7 @@ static void hashmeter(int thr_id, struct timeval *diff,
 	local_mhashes_done = 0;
 out_unlock:
 	mutex_unlock(&hash_lock);
+
 	if (showlog) {
 		if (!curses_active) {
 			printf("%s          \r", statusline);
@@ -4763,6 +4785,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
 	}
 
 	id = json_integer_value(id_val);
+
 	mutex_lock(&sshare_lock);
 	HASH_FIND_INT(stratum_shares, &id, sshare);
 	if (sshare) {
@@ -4770,6 +4793,7 @@ static bool parse_stratum_response(struct pool *pool, char *s)
 		pool->sshares--;
 	}
 	mutex_unlock(&sshare_lock);
+
 	if (!sshare) {
 		if (json_is_true(res_val))
 			applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no);
@@ -4840,6 +4864,7 @@ static int cp_prio(void)
 	cg_rlock(&control_lock);
 	prio = currentpool->prio;
 	cg_runlock(&control_lock);
+
 	return prio;
 }
 
@@ -4901,6 +4926,7 @@ static bool supports_resume(struct pool *pool)
 	cg_rlock(&pool->data_lock);
 	ret = (pool->sessionid != NULL);
 	cg_runlock(&pool->data_lock);
+
 	return ret;
 }
 
@@ -5819,6 +5845,7 @@ void work_completed(struct cgpu_info *cgpu, struct work *work)
 		cgpu->queued_count--;
 	HASH_DEL(cgpu->queued_work, work);
 	wr_unlock(&cgpu->qlock);
+
 	free_work(work);
 }
 
@@ -6159,6 +6186,7 @@ static void reap_curl(struct pool *pool)
 	int reaped = 0;
 
 	gettimeofday(&now, NULL);
+
 	mutex_lock(&pool->pool_lock);
 	list_for_each_entry_safe(ent, iter, &pool->curlring, node) {
 		if (pool->curls < 2)
@@ -6172,6 +6200,7 @@ static void reap_curl(struct pool *pool)
 		}
 	}
 	mutex_unlock(&pool->pool_lock);
+
 	if (reaped)
 		applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no);
 }
@@ -6294,6 +6323,7 @@ static void *watchdog_thread(void __maybe_unused *userdata)
 			applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d",
 			       schedstart.tm.tm_hour, schedstart.tm.tm_min);
 			sched_paused = true;
+
 			rd_lock(&mining_thr_lock);
 			for (i = 0; i < mining_threads; i++)
 				mining_thr[i]->pause = true;
@@ -6577,6 +6607,7 @@ static void *test_pool_thread(void *arg)
 			pools_active = true;
 		}
 		cg_wunlock(&control_lock);
+
 		pool_resus(pool);
 	} else
 		pool_died(pool);
@@ -6890,9 +6921,11 @@ void fill_device_drv(struct cgpu_info *cgpu)
 void enable_device(struct cgpu_info *cgpu)
 {
 	cgpu->deven = DEV_ENABLED;
+
 	wr_lock(&devices_lock);
 	devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu;
 	wr_unlock(&devices_lock);
+
 	if (hotplug_mode) {
 		new_threads += cgpu->threads;
 #ifdef HAVE_CURSES
@@ -6935,9 +6968,11 @@ bool add_cgpu(struct cgpu_info*cgpu)
 		cgpu->device_id = d->lastid = 0;
 		HASH_ADD_STR(devids, name, d);
 	}
+
 	wr_lock(&devices_lock);
 	devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2));
 	wr_unlock(&devices_lock);
+
 	if (hotplug_mode)
 		devices[total_devices + new_devices++] = cgpu;
 	else
@@ -6976,6 +7011,7 @@ static void hotplug_process()
 	wr_lock(&mining_thr_lock);
 	mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1));
 	wr_unlock(&mining_thr_lock);
+
 	if (!mining_thr)
 		quit(1, "Failed to hotplug realloc mining_thr");
 	for (i = 0; i < new_threads; i++) {