Merge branch 'master' of git://github.com/ckolivas/cgminer.git
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
diff --git a/adl.c b/adl.c
index a920d95..473cba0 100644
--- a/adl.c
+++ b/adl.c
@@ -1065,6 +1065,11 @@ static bool fan_autotune(int gpu, int temp, int fanpercent, int lasttemp, bool *
if (newpercent != fanpercent) {
applog(LOG_INFO, "Setting GPU %d fan percentage to %d", gpu, newpercent);
set_fanspeed(gpu, newpercent);
+ /* If the fanspeed is going down and we're below the top speed,
+ * consider the fan optimal to prevent minute changes in
+ * fanspeed delaying GPU engine speed changes */
+ if (newpercent < fanpercent && *fan_window)
+ return true;
return false;
}
return true;
diff --git a/cgminer.c b/cgminer.c
index f0b9924..7ea9b4c 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -1625,7 +1625,6 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
bool rolltime;
uint32_t *hash32;
char hashshow[64+1] = "";
- bool isblock;
#ifdef __BIG_ENDIAN__
int swapcounter = 0;
@@ -1666,17 +1665,9 @@ static bool submit_upstream_work(const struct work *work, CURL *curl)
res = json_object_get(val, "result");
if (!QUIET) {
-#ifndef MIPSEB
-// This one segfaults on my router for some reason
- isblock = regeneratehash(work);
- if (unlikely(isblock)) {
- pool->solved++;
- found_blocks++;
- }
hash32 = (uint32_t *)(work->hash);
sprintf(hashshow, "%08lx.%08lx%s", (unsigned long)(hash32[6]), (unsigned long)(hash32[5]),
- isblock ? " BLOCK!" : "");
-#endif
+ work->block? " BLOCK!" : "");
}
/* Theoretically threads could race when modifying accepted and
@@ -1829,6 +1820,7 @@ static void get_benchmark_work(struct work *work)
size_t min_size = (work_size < bench_size ? work_size : bench_size);
memset(work, 0, sizeof(work));
memcpy(work, &bench_block, min_size);
+ work->mandatory = true;
}
static bool get_upstream_work(struct work *work, CURL *curl)
@@ -2164,7 +2156,7 @@ static bool stale_work(struct work *work, bool share)
struct timeval now;
struct pool *pool;
- if (opt_benchmark)
+ if (work->mandatory)
return false;
gettimeofday(&now, NULL);
@@ -2184,6 +2176,16 @@ static bool stale_work(struct work *work, bool share)
return false;
}
+static void check_solve(struct work *work)
+{
+ work->block = regeneratehash(work);
+ if (unlikely(work->block)) {
+ work->pool->solved++;
+ found_blocks++;
+ work->mandatory = true;
+ applog(LOG_NOTICE, "Found block for pool %d!", work->pool);
+ }
+}
static void *submit_work_thread(void *userdata)
{
@@ -2197,6 +2199,8 @@ static void *submit_work_thread(void *userdata)
applog(LOG_DEBUG, "Creating extra submit work thread");
+ check_solve(work);
+
if (stale_work(work, true)) {
if (opt_submit_stale)
applog(LOG_NOTICE, "Stale share detected, submitting as user requested");
@@ -2280,7 +2284,7 @@ static struct pool *priority_pool(int choice)
void switch_pools(struct pool *selected)
{
struct pool *pool, *last_pool;
- int i, pool_no;
+ int i, pool_no, next_pool;
mutex_lock(&control_lock);
last_pool = currentpool;
@@ -2313,13 +2317,22 @@ void switch_pools(struct pool *selected)
/* Both of these simply increment and cycle */
case POOL_ROUNDROBIN:
case POOL_ROTATE:
- if (selected) {
+ if (selected && !selected->idle) {
pool_no = selected->pool_no;
break;
}
- pool_no++;
- if (pool_no >= total_pools)
- pool_no = 0;
+ next_pool = pool_no;
+ /* Select the next alive pool */
+ for (i = 1; i < total_pools; i++) {
+ next_pool++;
+ if (next_pool >= total_pools)
+ next_pool = 0;
+ pool = pools[next_pool];
+ if (!pool->idle && pool->enabled == POOL_ENABLED) {
+ pool_no = next_pool;
+ break;
+ }
+ }
break;
default:
break;
@@ -2479,7 +2492,7 @@ static void test_work_current(struct work *work)
{
char *hexstr;
- if (opt_benchmark)
+ if (work->mandatory)
return;
hexstr = bin2hex(work->data, 18);
@@ -4010,6 +4023,9 @@ static void convert_to_work(json_t *val, bool rolltime, struct pool *pool)
work->rolltime = rolltime;
work->longpoll = true;
+ if (pool->enabled == POOL_REJECTING)
+ work->mandatory = true;
+
/* We'll be checking this work item twice, but we already know it's
* from a new block so explicitly force the new block detection now
* rather than waiting for it to hit the stage thread. This also
@@ -4069,7 +4085,7 @@ static struct pool *select_longpoll_pool(struct pool *cp)
*/
static void wait_lpcurrent(struct pool *pool)
{
- if (pool->enabled == POOL_REJECTING)
+ if (pool->enabled == POOL_REJECTING || pool_strategy == POOL_LOADBALANCE)
return;
while (pool != current_pool()) {
diff --git a/miner.h b/miner.h
index 4e65f22..d01e213 100644
--- a/miner.h
+++ b/miner.h
@@ -741,6 +741,8 @@ struct work {
bool rolltime;
bool longpoll;
bool stale;
+ bool mandatory;
+ bool block;
unsigned int work_block;
int id;