Silence unused parameter warnings.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
diff --git a/cgminer.c b/cgminer.c
index 1d88107..d744305 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -854,7 +854,7 @@ static char *parse_config(json_t *config, bool fileconf)
return NULL;
}
-static char *load_config(const char *arg, void *unused)
+static char *load_config(const char *arg, void __maybe_unused *unused)
{
json_error_t err;
json_t *config;
@@ -2599,7 +2599,7 @@ retry:
opt_loginput = false;
}
-static void *input_thread(void *userdata)
+static void *input_thread(void __maybe_unused *userdata)
{
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
@@ -2979,7 +2979,8 @@ static void roll_work(struct work *work)
/* Recycle the work at a higher starting res_nonce if we know the thread we're
* giving it to will not finish scanning it. We keep the master copy to be
* recycled more rapidly and discard the clone to avoid repeating work */
-static bool divide_work(struct timeval *now, struct work *work, uint32_t hash_div)
+static bool divide_work(struct timeval __maybe_unused *now, struct work *work,
+ uint32_t __maybe_unused hash_div)
{
if (can_roll(work) && should_roll(work)) {
roll_work(work);
@@ -3183,7 +3184,7 @@ bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce)
return submit_work_sync(thr, work);
}
-static inline bool abandon_work(int thr_id, struct work *work, struct timeval *wdiff, uint64_t hashes)
+static inline bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t hashes)
{
if (wdiff->tv_sec > opt_scantime ||
work->blk.nonce >= MAXTHREADS - hashes ||
@@ -3334,7 +3335,7 @@ void *miner_thread(void *userdata)
if (can_roll(work) && should_roll(work))
roll_work(work);
- } while (!abandon_work(thr_id, work, &wdiff, hashes));
+ } while (!abandon_work(work, &wdiff, hashes));
}
out:
@@ -3527,7 +3528,7 @@ void reinit_device(struct cgpu_info *cgpu)
/* Makes sure the hashmeter keeps going even if mining threads stall, updates
* the screen at regular intervals, and restarts threads if they appear to have
* died. */
-static void *watchdog_thread(void *userdata)
+static void *watchdog_thread(void __maybe_unused *userdata)
{
const unsigned int interval = 3;
static struct timeval rotate_tv;
diff --git a/device-gpu.c b/device-gpu.c
index 8dfe92d..a74c51a 100644
--- a/device-gpu.c
+++ b/device-gpu.c
@@ -1081,13 +1081,14 @@ static void opencl_free_work(struct thr_info *thr, struct work *work)
}
}
-static bool opencl_prepare_work(struct thr_info *thr, struct work *work)
+static bool opencl_prepare_work(struct thr_info __maybe_unused *thr, struct work *work)
{
precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64));
return true;
}
-static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work, uint64_t max_nonce)
+static uint64_t opencl_scanhash(struct thr_info *thr, struct work *work,
+ uint64_t __maybe_unused max_nonce)
{
const int thr_id = thr->id;
struct opencl_thread_data *thrdata = thr->cgpu_data;
diff --git a/util.c b/util.c
index b68ba20..e05e450 100644
--- a/util.c
+++ b/util.c
@@ -239,7 +239,8 @@ out:
}
#ifdef CURL_HAS_SOCKOPT
-int json_rpc_call_sockopt_cb(void *userdata, curl_socket_t fd, curlsocktype purpose)
+int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
+ curlsocktype __maybe_unused purpose)
{
int keepalive = 1;
int tcp_keepcnt = 5;