diff options
| -rw-r--r-- | lib/math/div64.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/lib/math/div64.c b/lib/math/div64.c index bf77b9843175e8..0ebff850fd4dd0 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -184,10 +184,10 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) EXPORT_SYMBOL(iter_div_u64_rem); #ifndef mul_u64_u64_div_u64 -u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) { if (ilog2(a) + ilog2(b) <= 62) - return div64_u64(a * b, c); + return div64_u64(a * b, d); #if defined(__SIZEOF_INT128__) @@ -212,37 +212,37 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) #endif - /* make sure c is not zero, trigger runtime exception otherwise */ - if (unlikely(c == 0)) { + /* make sure d is not zero, trigger runtime exception otherwise */ + if (unlikely(d == 0)) { unsigned long zero = 0; OPTIMIZER_HIDE_VAR(zero); return ~0UL/zero; } - int shift = __builtin_ctzll(c); + int shift = __builtin_ctzll(d); /* try reducing the fraction in case the dividend becomes <= 64 bits */ if ((n_hi >> shift) == 0) { u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo; - return div64_u64(n, c >> shift); + return div64_u64(n, d >> shift); /* * The remainder value if needed would be: - * res = div64_u64_rem(n, c >> shift, &rem); + * res = div64_u64_rem(n, d >> shift, &rem); * rem = (rem << shift) + (n_lo - (n << shift)); */ } - if (n_hi >= c) { + if (n_hi >= d) { /* overflow: result is unrepresentable in a u64 */ return -1; } /* Do the full 128 by 64 bits division */ - shift = __builtin_clzll(c); - c <<= shift; + shift = __builtin_clzll(d); + d <<= shift; int p = 64 + shift; u64 res = 0; @@ -257,8 +257,8 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) n_hi <<= shift; n_hi |= n_lo >> (64 - shift); n_lo <<= shift; - if (carry || (n_hi >= c)) { - n_hi -= c; + if (carry || (n_hi >= d)) { + n_hi -= d; res |= 1ULL << p; } } while (n_hi); |
