Skip to content
Snippets Groups Projects
div64.h 7.12 KiB
Newer Older
  • Learn to ignore specific revisions
  • #ifndef _ASM_GENERIC_DIV64_H
    #define _ASM_GENERIC_DIV64_H
    /*
     * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
     * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
     *
    
    Peng Fan's avatar
    Peng Fan committed
     * Optimization for constant divisors on 32-bit machines:
     * Copyright (C) 2006-2015 Nicolas Pitre
     *
    
     * The semantics of do_div() are:
     *
     * uint32_t do_div(uint64_t *n, uint32_t base)
     * {
    
    Peng Fan's avatar
    Peng Fan committed
     * 	uint32_t remainder = *n % base;
     * 	*n = *n / base;
     * 	return remainder;
    
     * }
     *
     * NOTE: macro parameter n is evaluated multiple times,
     *       beware of side effects!
     */
    
    #include <linux/types.h>
    
    Peng Fan's avatar
    Peng Fan committed
    #include <linux/compiler.h>
    
    #if BITS_PER_LONG == 64
    
    # define do_div(n,base) ({					\
    	uint32_t __base = (base);				\
    	uint32_t __rem;						\
    	__rem = ((uint64_t)(n)) % __base;			\
    	(n) = ((uint64_t)(n)) / __base;				\
    	__rem;							\
     })
    
    #elif BITS_PER_LONG == 32
    
    #include <linux/log2.h>
    
    /*
     * If the divisor happens to be constant, we determine the appropriate
     * inverse at compile time to turn the division into a few inline
     * multiplications which ought to be much faster. And yet only if compiling
     * with a sufficiently recent gcc version to perform proper 64-bit constant
     * propagation.
     *
     * (It is unfortunate that gcc doesn't perform all this internally.)
     */
    
    #ifndef __div64_const32_is_OK
    #define __div64_const32_is_OK (__GNUC__ >= 4)
    #endif
    
    #define __div64_const32(n, ___b)					\
    ({									\
    	/*								\
    	 * Multiplication by reciprocal of b: n / b = n * (p / b) / p	\
    	 *								\
    	 * We rely on the fact that most of this code gets optimized	\
    	 * away at compile time due to constant propagation and only	\
    	 * a few multiplication instructions should remain.		\
    	 * Hence this monstrous macro (static inline doesn't always	\
    	 * do the trick here).						\
    	 */								\
    	uint64_t ___res, ___x, ___t, ___m, ___n = (n);			\
    	uint32_t ___p, ___bias;						\
    									\
    	/* determine MSB of b */					\
    	___p = 1 << ilog2(___b);					\
    									\
    	/* compute m = ((p << 64) + b - 1) / b */			\
    	___m = (~0ULL / ___b) * ___p;					\
    	___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b;	\
    									\
    	/* one less than the dividend with highest result */		\
    	___x = ~0ULL / ___b * ___b - 1;					\
    									\
    	/* test our ___m with res = m * x / (p << 64) */		\
    	___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32;	\
    	___t = ___res += (___m & 0xffffffff) * (___x >> 32);		\
    	___res += (___x & 0xffffffff) * (___m >> 32);			\
    	___t = (___res < ___t) ? (1ULL << 32) : 0;			\
    	___res = (___res >> 32) + ___t;					\
    	___res += (___m >> 32) * (___x >> 32);				\
    	___res /= ___p;							\
    									\
    	/* Now sanitize and optimize what we've got. */			\
    	if (~0ULL % (___b / (___b & -___b)) == 0) {			\
    		/* special case, can be simplified to ... */		\
    		___n /= (___b & -___b);					\
    		___m = ~0ULL / (___b / (___b & -___b));			\
    		___p = 1;						\
    		___bias = 1;						\
    	} else if (___res != ___x / ___b) {				\
    		/*							\
    		 * We can't get away without a bias to compensate	\
    		 * for bit truncation errors.  To avoid it we'd need an	\
    		 * additional bit to represent m which would overflow	\
    		 * a 64-bit variable.					\
    		 *							\
    		 * Instead we do m = p / b and n / b = (n * m + m) / p.	\
    		 */							\
    		___bias = 1;						\
    		/* Compute m = (p << 64) / b */				\
    		___m = (~0ULL / ___b) * ___p;				\
    		___m += ((~0ULL % ___b + 1) * ___p) / ___b;		\
    	} else {							\
    		/*							\
    		 * Reduce m / p, and try to clear bit 31 of m when	\
    		 * possible, otherwise that'll need extra overflow	\
    		 * handling later.					\
    		 */							\
    		uint32_t ___bits = -(___m & -___m);			\
    		___bits |= ___m >> 32;					\
    		___bits = (~___bits) << 1;				\
    		/*							\
    		 * If ___bits == 0 then setting bit 31 is  unavoidable.	\
    		 * Simply apply the maximum possible reduction in that	\
    		 * case. Otherwise the MSB of ___bits indicates the	\
    		 * best reduction we should apply.			\
    		 */							\
    		if (!___bits) {						\
    			___p /= (___m & -___m);				\
    			___m /= (___m & -___m);				\
    		} else {						\
    			___p >>= ilog2(___bits);			\
    			___m >>= ilog2(___bits);			\
    		}							\
    		/* No bias needed. */					\
    		___bias = 0;						\
    	}								\
    									\
    	/*								\
    	 * Now we have a combination of 2 conditions:			\
    	 *								\
    	 * 1) whether or not we need to apply a bias, and		\
    	 *								\
    	 * 2) whether or not there might be an overflow in the cross	\
    	 *    product determined by (___m & ((1 << 63) | (1 << 31))).	\
    	 *								\
    	 * Select the best way to do (m_bias + m * n) / (1 << 64).	\
    	 * From now on there will be actual runtime code generated.	\
    	 */								\
    	___res = __arch_xprod_64(___m, ___n, ___bias);			\
    									\
    	___res /= ___p;							\
    })
    
    #ifndef __arch_xprod_64
    /*
     * Default C implementation for __arch_xprod_64()
     *
     * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
     * Semantic:  retval = ((bias ? m : 0) + m * n) >> 64
     *
     * The product is a 128-bit value, scaled down to 64 bits.
     * Assuming constant propagation to optimize away unused conditional code.
     * Architectures may provide their own optimized assembly implementation.
     */
    static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
    {
    	uint32_t m_lo = m;
    	uint32_t m_hi = m >> 32;
    	uint32_t n_lo = n;
    	uint32_t n_hi = n >> 32;
    	uint64_t res, tmp;
    
    	if (!bias) {
    		res = ((uint64_t)m_lo * n_lo) >> 32;
    	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
    		/* there can't be any overflow here */
    		res = (m + (uint64_t)m_lo * n_lo) >> 32;
    	} else {
    		res = m + (uint64_t)m_lo * n_lo;
    		tmp = (res < m) ? (1ULL << 32) : 0;
    		res = (res >> 32) + tmp;
    	}
    
    	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
    		/* there can't be any overflow here */
    		res += (uint64_t)m_lo * n_hi;
    		res += (uint64_t)m_hi * n_lo;
    		res >>= 32;
    	} else {
    		tmp = res += (uint64_t)m_lo * n_hi;
    		res += (uint64_t)m_hi * n_lo;
    		tmp = (res < tmp) ? (1ULL << 32) : 0;
    		res = (res >> 32) + tmp;
    	}
    
    Peng Fan's avatar
    Peng Fan committed
    	res += (uint64_t)m_hi * n_hi;
    
    	return res;
    }
    #endif
    
    #ifndef __div64_32
    
    extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
    
    Peng Fan's avatar
    Peng Fan committed
    #endif
    
    
    /* The unnecessary pointer compare is there
     * to check for type safety (n must be 64bit)
     */
    # define do_div(n,base) ({				\
    	uint32_t __base = (base);			\
    	uint32_t __rem;					\
    	(void)(((typeof((n)) *)0) == ((uint64_t *)0));	\
    
    Peng Fan's avatar
    Peng Fan committed
    	if (__builtin_constant_p(__base) &&		\
    	    is_power_of_2(__base)) {			\
    		__rem = (n) & (__base - 1);		\
    		(n) >>= ilog2(__base);			\
    	} else if (__div64_const32_is_OK &&		\
    		   __builtin_constant_p(__base) &&	\
    		   __base != 0) {			\
    		uint32_t __res_lo, __n_lo = (n);	\
    		(n) = __div64_const32(n, __base);	\
    		/* the remainder can be computed with 32-bit regs */ \
    		__res_lo = (n);				\
    		__rem = __n_lo - __res_lo * __base;	\
    	} else if (likely(((n) >> 32) == 0)) {		\
    
    		__rem = (uint32_t)(n) % __base;		\
    		(n) = (uint32_t)(n) / __base;		\
    
    Peng Fan's avatar
    Peng Fan committed
    	} else 						\
    
    		__rem = __div64_32(&(n), __base);	\
    	__rem;						\
     })
    
    
    Peng Fan's avatar
    Peng Fan committed
    #else /* BITS_PER_LONG == ?? */
    
    # error do_div() does not yet support the C64
    
    #endif /* BITS_PER_LONG */
    
    
    /* Wrapper for do_div(). Doesn't modify dividend and returns
     * the result, not reminder.
     */
    static inline uint64_t lldiv(uint64_t dividend, uint32_t divisor)
    {
    	uint64_t __res = dividend;
    	do_div(__res, divisor);
    	return(__res);
    }
    
    
    #endif /* _ASM_GENERIC_DIV64_H */