Cull all the early queue requests since we request every time work is popped now.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
diff --git a/cgminer.c b/cgminer.c
index 9ae2389..affa56f 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -2630,6 +2630,8 @@ static struct pool *priority_pool(int choice)
return ret;
}
+static bool queue_request(struct thr_info *thr, bool needed);
+
void switch_pools(struct pool *selected)
{
struct pool *pool, *last_pool;
@@ -2719,8 +2721,6 @@ static void discard_work(struct work *work)
free_work(work);
}
-bool queue_request(struct thr_info *thr, bool needed);
-
static void discard_stale(void)
{
struct work *work, *tmp;
@@ -2781,8 +2781,6 @@ static void restart_threads(void)
/* Discard staged work that is now stale */
discard_stale();
- queue_request(NULL, true);
-
for (i = 0; i < mining_threads; i++)
thr_info[i].work_restart = true;
@@ -3930,7 +3928,7 @@ static void pool_resus(struct pool *pool)
switch_pools(NULL);
}
-bool queue_request(struct thr_info *thr, bool needed)
+static bool queue_request(struct thr_info *thr, bool needed)
{
struct workio_cmd *wc;
@@ -4370,22 +4368,6 @@ void *miner_thread(void *userdata)
}
timersub(&tv_end, &tv_workstart, &wdiff);
- if (!requested) {
- if (wdiff.tv_sec > request_interval || work->blk.nonce > request_nonce) {
- thread_reportout(mythr);
- if (unlikely(!queue_request(mythr, false))) {
- applog(LOG_ERR, "Failed to queue_request in miner_thread %d", thr_id);
-
- cgpu->device_last_not_well = time(NULL);
- cgpu->device_not_well_reason = REASON_THREAD_FAIL_QUEUE;
- cgpu->thread_fail_queue_count++;
-
- goto out;
- }
- thread_reportin(mythr);
- requested = true;
- }
- }
if (unlikely((long)sdiff.tv_sec < cycle)) {
int mult;
@@ -4721,8 +4703,6 @@ static void *watchdog_thread(void __maybe_unused *userdata)
discard_stale();
- queue_request(NULL, false);
-
hashmeter(-1, &zero_tv, 0);
#ifdef HAVE_CURSES
diff --git a/driver-bitforce.c b/driver-bitforce.c
index ca6b8b0..d80f664 100644
--- a/driver-bitforce.c
+++ b/driver-bitforce.c
@@ -602,35 +602,15 @@ static void biforce_thread_enable(struct thr_info *thr)
static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce)
{
struct cgpu_info *bitforce = thr->cgpu;
- unsigned int sleep_time;
bool send_ret;
int64_t ret;
send_ret = bitforce_send_work(thr, work);
- if (!bitforce->nonce_range) {
- /* Initially wait 2/3 of the average cycle time so we can request more
- work before full scan is up */
- sleep_time = (2 * bitforce->sleep_ms) / 3;
- if (!restart_wait(sleep_time))
- return 0;
-
- bitforce->wait_ms = sleep_time;
- queue_request(thr, false);
-
- /* Now wait athe final 1/3rd; no bitforce should be finished by now */
- sleep_time = bitforce->sleep_ms - sleep_time;
- if (!restart_wait(sleep_time))
- return 0;
-
- bitforce->wait_ms += sleep_time;
- } else {
- sleep_time = bitforce->sleep_ms;
- if (!restart_wait(sleep_time))
- return 0;
+ if (!restart_wait(bitforce->sleep_ms))
+ return 0;
- bitforce->wait_ms = sleep_time;
- }
+ bitforce->wait_ms = bitforce->sleep_ms;
if (send_ret) {
bitforce->polling = true;
diff --git a/miner.h b/miner.h
index d79578e..51da4a2 100644
--- a/miner.h
+++ b/miner.h
@@ -599,7 +599,6 @@ extern pthread_mutex_t restart_lock;
extern pthread_cond_t restart_cond;
extern void thread_reportin(struct thr_info *thr);
-extern bool queue_request(struct thr_info *thr, bool needed);
extern int restart_wait(unsigned int mstime);
extern void kill_work(void);