Merge pull request #94 from fperrad/20171018_cast restore previous cast
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
diff --git a/bn_mp_div_2d.c b/bn_mp_div_2d.c
index 00b4a63..aeaa8f2 100644
--- a/bn_mp_div_2d.c
+++ b/bn_mp_div_2d.c
@@ -54,7 +54,7 @@ int mp_div_2d(const mp_int *a, int b, mp_int *c, mp_int *d)
mp_digit *tmpc, mask, shift;
/* mask */
- mask = (1uL << D) - 1uL;
+ mask = ((mp_digit)1 << D) - 1uL;
/* shift for lsb */
shift = (mp_digit)DIGIT_BIT - D;
diff --git a/bn_mp_div_d.c b/bn_mp_div_d.c
index 5acdc31..2124bcc 100644
--- a/bn_mp_div_d.c
+++ b/bn_mp_div_d.c
@@ -25,7 +25,7 @@ static int s_is_power_of_two(mp_digit b, int *p)
}
for (x = 0; x < DIGIT_BIT; x++) {
- if (b == (1uL<<(mp_digit)x)) {
+ if (b == ((mp_digit)1<<(mp_digit)x)) {
*p = x;
return 1;
}
@@ -60,7 +60,7 @@ int mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d)
/* power of two ? */
if (s_is_power_of_two(b, &ix) == 1) {
if (d != NULL) {
- *d = a->dp[0] & ((1uL<<(mp_digit)ix) - 1uL);
+ *d = a->dp[0] & (((mp_digit)1<<(mp_digit)ix) - 1uL);
}
if (c != NULL) {
return mp_div_2d(a, ix, c, NULL);
diff --git a/bn_mp_expt_d_ex.c b/bn_mp_expt_d_ex.c
index d02012f..d363d59 100644
--- a/bn_mp_expt_d_ex.c
+++ b/bn_mp_expt_d_ex.c
@@ -60,7 +60,7 @@ int mp_expt_d_ex(const mp_int *a, mp_digit b, mp_int *c, int fast)
}
/* if the bit is set multiply */
- if ((b & (1uL << (DIGIT_BIT - 1))) != 0u) {
+ if ((b & ((mp_digit)1 << (DIGIT_BIT - 1))) != 0u) {
if ((res = mp_mul(c, &g, c)) != MP_OKAY) {
mp_clear(&g);
return res;
diff --git a/bn_mp_mod_2d.c b/bn_mp_mod_2d.c
index e48fc19..7a74746 100644
--- a/bn_mp_mod_2d.c
+++ b/bn_mp_mod_2d.c
@@ -43,7 +43,7 @@ int mp_mod_2d(const mp_int *a, int b, mp_int *c)
}
/* clear the digit that is not completely outside/inside the modulus */
c->dp[b / DIGIT_BIT] &=
- (1uL << (mp_digit)(b % DIGIT_BIT)) - 1uL;
+ ((mp_digit)1 << (mp_digit)(b % DIGIT_BIT)) - (mp_digit)1;
mp_clamp(c);
return MP_OKAY;
}
diff --git a/bn_mp_mul_2d.c b/bn_mp_mul_2d.c
index ceac909..96aef85 100644
--- a/bn_mp_mul_2d.c
+++ b/bn_mp_mul_2d.c
@@ -48,7 +48,7 @@ int mp_mul_2d(const mp_int *a, int b, mp_int *c)
int x;
/* bitmask for carries */
- mask = (1uL << d) - 1uL;
+ mask = ((mp_digit)1 << d) - (mp_digit)1;
/* shift for msbs */
shift = (mp_digit)DIGIT_BIT - d;
diff --git a/bn_mp_prime_next_prime.c b/bn_mp_prime_next_prime.c
index 948e97e..b106a74 100644
--- a/bn_mp_prime_next_prime.c
+++ b/bn_mp_prime_next_prime.c
@@ -131,7 +131,7 @@ int mp_prime_next_prime(mp_int *a, int t, int bbs_style)
y = 1;
}
}
- } while ((y == 1) && (step < ((1uL << DIGIT_BIT) - kstep)));
+ } while ((y == 1) && (step < (((mp_digit)1 << DIGIT_BIT) - kstep)));
/* add the step */
if ((err = mp_add_d(a, step, a)) != MP_OKAY) {
@@ -139,7 +139,7 @@ int mp_prime_next_prime(mp_int *a, int t, int bbs_style)
}
/* if didn't pass sieve and step == MAX then skip test */
- if ((y == 1) && (step >= ((1uL << DIGIT_BIT) - kstep))) {
+ if ((y == 1) && (step >= (((mp_digit)1 << DIGIT_BIT) - kstep))) {
continue;
}
diff --git a/bn_mp_reduce.c b/bn_mp_reduce.c
index 5f72a01..5b1d405 100644
--- a/bn_mp_reduce.c
+++ b/bn_mp_reduce.c
@@ -33,7 +33,7 @@ int mp_reduce(mp_int *x, const mp_int *m, const mp_int *mu)
mp_rshd(&q, um - 1);
/* according to HAC this optimization is ok */
- if ((mp_digit)um > (1uL << (DIGIT_BIT - 1))) {
+ if ((mp_digit)um > ((mp_digit)1 << (DIGIT_BIT - 1))) {
if ((res = mp_mul(&q, mu, &q)) != MP_OKAY) {
goto CLEANUP;
}