Provide a wrapper for aligning lengths of size_t to 4 byte boundaries.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
diff --git a/cgminer.c b/cgminer.c
index 2fe8ee6..32fd47a 100644
--- a/cgminer.c
+++ b/cgminer.c
@@ -1395,16 +1395,16 @@ void free_work(struct work *work)
* entered under gbt_lock */
static void __build_gbt_coinbase(struct pool *pool)
{
- int cbt_len, cal_len, orig_len;
unsigned char *coinbase;
+ int cbt_len, orig_len;
uint8_t *extra_len;
+ size_t cal_len;
cbt_len = strlen(pool->coinbasetxn) / 2;
pool->coinbase_len = cbt_len + 4;
/* We add 4 bytes of extra data corresponding to nonce2 of stratum */
cal_len = pool->coinbase_len + 1;
- if (cal_len % 4)
- cal_len += 4 - (cal_len % 4);
+ align_len(&cal_len);
coinbase = calloc(cal_len, 1);
hex2bin(coinbase, pool->coinbasetxn, 42);
extra_len = (uint8_t *)(coinbase + 41);
@@ -1428,7 +1428,8 @@ static bool __build_gbt_txns(struct pool *pool, json_t *res_val)
{
json_t *txn_array;
bool ret = false;
- int i, cal_len;
+ size_t cal_len;
+ int i;
free(pool->txn_hashes);
pool->txn_hashes = NULL;
@@ -1454,8 +1455,7 @@ static bool __build_gbt_txns(struct pool *pool, json_t *res_val)
unsigned char *txn_bin;
cal_len = txn_len;
- if (cal_len % 4)
- cal_len += 4 - (cal_len % 4);
+ align_len(&cal_len);
txn_bin = calloc(cal_len, 1);
if (unlikely(!txn_bin))
quit(1, "Failed to calloc txn_bin in __build_gbt_txns");
@@ -5088,6 +5088,7 @@ static void gen_stratum_work(struct pool *pool, struct work *work)
unsigned char *coinbase, merkle_root[32], merkle_sha[64], *merkle_hash;
int len, cb1_len, n1_len, cb2_len, i;
uint32_t *data32, *swap32;
+ size_t alloc_len;
char *header;
mutex_lock(&pool->pool_lock);
@@ -5099,7 +5100,9 @@ static void gen_stratum_work(struct pool *pool, struct work *work)
n1_len = strlen(pool->nonce1) / 2;
cb2_len = strlen(pool->swork.coinbase2) / 2;
len = cb1_len + n1_len + pool->n2size + cb2_len;
- coinbase = alloca(len + 1);
+ alloc_len = len;
+ align_len(&alloc_len);
+ coinbase = alloca(alloc_len);
hex2bin(coinbase, pool->swork.coinbase1, cb1_len);
hex2bin(coinbase + cb1_len, pool->nonce1, n1_len);
hex2bin(coinbase + cb1_len + n1_len, work->nonce2, pool->n2size);
diff --git a/util.c b/util.c
index e628af4..386298a 100644
--- a/util.c
+++ b/util.c
@@ -1543,8 +1543,7 @@ void *realloc_strcat(char *ptr, char *s)
return ptr;
len += old + 1;
- if (len % 4)
- len += 4 - (len % 4);
+ align_len(&len);
ret = malloc(len);
if (unlikely(!ret))
diff --git a/util.h b/util.h
index 620dcda..ade0b2e 100644
--- a/util.h
+++ b/util.h
@@ -57,4 +57,11 @@ void dev_error(struct cgpu_info *dev, enum dev_reason reason);
void *realloc_strcat(char *ptr, char *s);
void RenameThread(const char* name);
+/* Align a size_t to 4 byte boundaries for fussy arches */
+static inline void align_len(size_t *len)
+{
+ if (*len % 4)
+ *len += 4 - (*len % 4);
+}
+
#endif /* __UTIL_H__ */