Minimise locking and unlocking when getting counts by reusing shared mutex lock functions.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
diff --git a/cgminer.c b/cgminer.c
index 2aaf4d4..e9a9f78 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -1334,16 +1334,26 @@ void decay_time(double *f, double fadd)
*f = (fadd + *f * 0.58) / 1.58;
}
+static int __total_staged(void)
+{
+ return HASH_COUNT(staged_work);
+}
+
static int total_staged(void)
{
int ret;
mutex_lock(stgd_lock);
- ret = HASH_COUNT(staged_work);
+ ret = __total_staged();
mutex_unlock(stgd_lock);
return ret;
}
+static int __pool_staged(struct pool *pool)
+{
+ return pool->staged;
+}
+
static int pool_staged(struct pool *pool)
{
int ret;
@@ -1354,13 +1364,6 @@ static int pool_staged(struct pool *pool)
return ret;
}
-static int current_staged(void)
-{
- struct pool *pool = current_pool();
-
- return pool_staged(pool);
-}
-
#ifdef HAVE_CURSES
WINDOW *mainwin, *statuswin, *logwin;
#endif
@@ -2251,23 +2254,33 @@ static void dec_queued(struct pool *pool)
mutex_unlock(&qd_lock);
}
+static int __pool_queued(struct pool *pool)
+{
+ return pool->queued;
+}
+
static int current_queued(void)
{
struct pool *pool = current_pool();
int ret;
mutex_lock(&qd_lock);
- ret = pool->queued;
+ ret = __pool_queued(pool);
mutex_unlock(&qd_lock);
return ret;
}
+static int __global_queued(void)
+{
+ return total_queued;
+}
+
static int global_queued(void)
{
int ret;
mutex_lock(&qd_lock);
- ret = total_queued;
+ ret = __global_queued();
mutex_unlock(&qd_lock);
return ret;
}
@@ -2275,11 +2288,17 @@ static int global_queued(void)
static bool enough_work(void)
{
int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
+ struct pool *pool = current_pool();
- cq = current_queued();
- cs = current_staged();
- ts = total_staged();
- tq = global_queued();
+ mutex_lock(&qd_lock);
+ cq = __pool_queued(pool);
+ tq = __global_queued();
+ mutex_unlock(&qd_lock);
+
+ mutex_lock(stgd_lock);
+ cs = __pool_staged(pool);
+ ts = __total_staged();
+ mutex_unlock(stgd_lock);
if (((cs || cq >= opt_queue) && ts >= maxq) ||
((cs || cq) && tq >= maxq))
@@ -3753,13 +3772,19 @@ static void pool_resus(struct pool *pool)
bool queue_request(struct thr_info *thr, bool needed)
{
int cq, cs, ts, tq, maxq = opt_queue + mining_threads;
+ struct pool *pool = current_pool();
struct workio_cmd *wc;
bool lag = false;
- cq = current_queued();
- cs = current_staged();
- ts = total_staged();
- tq = global_queued();
+ mutex_lock(&qd_lock);
+ cq = __pool_queued(pool);
+ tq = __global_queued();
+ mutex_unlock(&qd_lock);
+
+ mutex_lock(stgd_lock);
+ cs = __pool_staged(pool);
+ ts = __total_staged();
+ mutex_unlock(stgd_lock);
if (needed && cq >= maxq && !ts && !opt_fail_only) {
/* If we're queueing work faster than we can stage it, consider