ocl correct applog typing
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
diff --git a/ocl.c b/ocl.c
index 8a15912..8709f4f 100644
--- a/ocl.c
+++ b/ocl.c
@@ -363,14 +363,14 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE", status);
return NULL;
}
- applog(LOG_DEBUG, "Max work group size reported %d", clState->max_work_size);
+ applog(LOG_DEBUG, "Max work group size reported %zu", clState->max_work_size);
status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL);
if (status != CL_SUCCESS) {
applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_MEM_ALLOC_SIZE", status);
return NULL;
}
- applog(LOG_DEBUG, "Max mem alloc size is %u", cgpu->max_alloc);
+ applog(LOG_DEBUG, "Max mem alloc size is %lu", (long unsigned int)(cgpu->max_alloc));
/* Create binary filename based on parameters passed to opencl
* compiler to ensure we only load a binary that matches what would
@@ -495,7 +495,7 @@ _clState *initCl(unsigned int gpu, char *name, size_t nameSize)
if (cgpu->thread_concurrency > cgpu->shaders * 5)
cgpu->thread_concurrency = cgpu->shaders * 5;
}
- applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %u",gpu, cgpu->thread_concurrency);
+ applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %zu", gpu, cgpu->thread_concurrency);
} else
cgpu->thread_concurrency = cgpu->opt_tc;
}
@@ -610,7 +610,7 @@ build:
sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d -D WORKVEC=%d",
(int)clState->wsize, clState->vwidth, (int)clState->wsize * clState->vwidth);
}
- applog(LOG_DEBUG, "Setting worksize to %d", clState->wsize);
+ applog(LOG_DEBUG, "Setting worksize to %zu", clState->wsize);
if (clState->vwidth > 1)
applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->vwidth);
@@ -689,7 +689,7 @@ build:
break;
/* copy over all of the generated binaries. */
- applog(LOG_DEBUG, "Binary size for gpu %d found in binary slot %d: %d", gpu, slot, binary_sizes[slot]);
+ applog(LOG_DEBUG, "Binary size for gpu %d found in binary slot %d: %zu", gpu, slot, binary_sizes[slot]);
if (!binary_sizes[slot]) {
applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, FAIL!");
return NULL;
@@ -771,7 +771,7 @@ built:
free(binaries);
free(binary_sizes);
- applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %d vectors and worksize %d",
+ applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %d vectors and worksize %zu",
filename, clState->hasBitAlign ? "" : "out", clState->vwidth, clState->wsize);
if (!prog_built) {
@@ -804,10 +804,11 @@ built:
/* Use the max alloc value which has been rounded to a power of
* 2 greater >= required amount earlier */
if (bufsize > cgpu->max_alloc) {
- applog(LOG_WARNING, "Maximum buffer memory device %d supports says %u", gpu, cgpu->max_alloc);
- applog(LOG_WARNING, "Your scrypt settings come to %u", bufsize);
+ applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu",
+ gpu, (long unsigned int)(cgpu->max_alloc));
+ applog(LOG_WARNING, "Your scrypt settings come to %zu", bufsize);
}
- applog(LOG_DEBUG, "Creating scrypt buffer sized %u", bufsize);
+ applog(LOG_DEBUG, "Creating scrypt buffer sized %zu", bufsize);
clState->padbufsize = bufsize;
/* This buffer is weird and might work to some degree even if