📄 gmp-impl.h
字号:
__GMP_DECLSPEC mp_limb_t mpn_mul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
#define mpn_mul_2 __MPN(mul_2)
mp_limb_t mpn_mul_2 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
#define mpn_mul_basecase __MPN(mul_basecase)
__GMP_DECLSPEC void mpn_mul_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
#define mpn_sqr_n __MPN(sqr_n)
__GMP_DECLSPEC void mpn_sqr_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
#define mpn_sqr_basecase __MPN(sqr_basecase)
__GMP_DECLSPEC void mpn_sqr_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
#define mpn_sub_nc __MPN(sub_nc)
__GMP_DECLSPEC mp_limb_t mpn_sub_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
#define mpn_submul_1c __MPN(submul_1c)
__GMP_DECLSPEC mp_limb_t mpn_submul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
typedef __gmp_randstate_struct *gmp_randstate_ptr;
#define _gmp_rand __gmp_rand
__GMP_DECLSPEC void _gmp_rand _PROTO ((mp_ptr, gmp_randstate_t, unsigned long int));
/* __gmp_rands is the global state for the old-style random functions, and
is also used in the test programs (hence the __GMP_DECLSPEC).
There's no seeding here, so mpz_random etc will generate the same
sequence every time. This is not unlike the C library random functions
if you don't seed them, so perhaps it's acceptable. Digging up a seed
from /dev/random or the like would work on many systems, but might
encourage a false confidence, since it'd be pretty much impossible to do
something that would work reliably everywhere. In any case the new style
functions are recommended to applications which care about randomness, so
the old functions aren't too important. */
__GMP_DECLSPEC extern char __gmp_rands_initialized;
__GMP_DECLSPEC extern gmp_randstate_t __gmp_rands;
#define RANDS \
((__gmp_rands_initialized ? 0 \
: (__gmp_rands_initialized = 1, \
gmp_randinit_default (__gmp_rands), 0)), \
__gmp_rands)
/* this is used by the test programs, to free memory */
#define RANDS_CLEAR() \
do { \
if (__gmp_rands_initialized) \
{ \
__gmp_rands_initialized = 0; \
gmp_randclear (__gmp_rands); \
} \
} while (0)
/* kara uses n+1 limbs of temporary space and then recurses with the
balance, so need (n+1) + (ceil(n/2)+1) + (ceil(n/4)+1) + ... */
#define MPN_KARA_MUL_N_TSIZE(n) (2*((n)+BITS_PER_MP_LIMB))
#define MPN_KARA_SQR_N_TSIZE(n) (2*((n)+BITS_PER_MP_LIMB))
/* toom3 uses 4*(ceil(n/3)) of temporary space and then recurses with the
balance either into itself or kara. The following might be
overestimates. */
#define MPN_TOOM3_MUL_N_TSIZE(n) (2*(n) + 3*BITS_PER_MP_LIMB)
#define MPN_TOOM3_SQR_N_TSIZE(n) (2*(n) + 3*BITS_PER_MP_LIMB)
/* need 2 so that n2>=1 */
#define MPN_KARA_MUL_N_MINSIZE 2
#define MPN_KARA_SQR_N_MINSIZE 2
/* Need l>=1, ls>=1, and 2*ls > l (the latter for the tD MPN_INCR_U) */
#define MPN_TOOM3_MUL_N_MINSIZE 11
#define MPN_TOOM3_SQR_N_MINSIZE 11
#define mpn_sqr_diagonal __MPN(sqr_diagonal)
void mpn_sqr_diagonal _PROTO ((mp_ptr, mp_srcptr, mp_size_t));
#define mpn_kara_mul_n __MPN(kara_mul_n)
void mpn_kara_mul_n _PROTO((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
#define mpn_kara_sqr_n __MPN(kara_sqr_n)
void mpn_kara_sqr_n _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
#define mpn_toom3_mul_n __MPN(toom3_mul_n)
void mpn_toom3_mul_n _PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,mp_ptr));
#define mpn_toom3_sqr_n __MPN(toom3_sqr_n)
void mpn_toom3_sqr_n _PROTO((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
#define mpn_fft_best_k __MPN(fft_best_k)
int mpn_fft_best_k _PROTO ((mp_size_t n, int sqr)) ATTRIBUTE_CONST;
#define mpn_mul_fft __MPN(mul_fft)
void mpn_mul_fft _PROTO ((mp_ptr op, mp_size_t pl,
mp_srcptr n, mp_size_t nl,
mp_srcptr m, mp_size_t ml,
int k));
#define mpn_mul_fft_full __MPN(mul_fft_full)
void mpn_mul_fft_full _PROTO ((mp_ptr op,
mp_srcptr n, mp_size_t nl,
mp_srcptr m, mp_size_t ml));
#define mpn_fft_next_size __MPN(fft_next_size)
mp_size_t mpn_fft_next_size _PROTO ((mp_size_t pl, int k)) ATTRIBUTE_CONST;
#define mpn_sb_divrem_mn __MPN(sb_divrem_mn)
mp_limb_t mpn_sb_divrem_mn _PROTO ((mp_ptr, mp_ptr, mp_size_t,
mp_srcptr, mp_size_t));
#define mpn_dc_divrem_n __MPN(dc_divrem_n)
mp_limb_t mpn_dc_divrem_n _PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t));
/* #define mpn_tdiv_q __MPN(tdiv_q) */
/* void mpn_tdiv_q _PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)); */
#define mpz_divexact_gcd __gmpz_divexact_gcd
void mpz_divexact_gcd _PROTO ((mpz_ptr q, mpz_srcptr a, mpz_srcptr d));
#define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
#ifdef _GMP_H_HAVE_FILE
size_t mpz_inp_str_nowhite _PROTO ((mpz_ptr x, FILE *stream, int base, int c, size_t nread));
#endif
#define mpn_divisible_p __MPN(divisible_p)
int mpn_divisible_p _PROTO ((mp_srcptr ap, mp_size_t asize,
mp_srcptr dp, mp_size_t dsize)) __GMP_ATTRIBUTE_PURE;
#define mpn_rootrem __gmpn_rootrem
mp_size_t mpn_rootrem _PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
/* from gmp.h */
#if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
#define MPN_COPY_INCR(dst, src, size) \
do { \
ASSERT ((size) >= 0); \
ASSERT (MPN_SAME_OR_INCR_P (dst, src, size)); \
__GMPN_COPY_INCR (dst, src, size); \
} while (0)
#endif
#if defined (_CRAY)
#define MPN_COPY_INCR(dst, src, n) \
do { \
int __i; /* Faster on some Crays with plain int */ \
_Pragma ("_CRI ivdep"); \
for (__i = 0; __i < (n); __i++) \
(dst)[__i] = (src)[__i]; \
} while (0)
#endif
#define mpn_copyi __MPN(copyi)
void mpn_copyi _PROTO ((mp_ptr, mp_srcptr, mp_size_t));
#if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
#define MPN_COPY_INCR(dst, src, size) \
do { \
ASSERT ((size) >= 0); \
ASSERT (MPN_SAME_OR_INCR_P (dst, src, size)); \
mpn_copyi (dst, src, size); \
} while (0)
#endif
/* Copy N limbs from SRC to DST incrementing, N==0 allowed. */
#if ! defined (MPN_COPY_INCR)
#define MPN_COPY_INCR(dst, src, n) \
do { \
ASSERT ((n) >= 0); \
ASSERT (MPN_SAME_OR_INCR_P (dst, src, n)); \
if ((n) != 0) \
{ \
mp_size_t __n = (n) - 1; \
mp_ptr __dst = (dst); \
mp_srcptr __src = (src); \
mp_limb_t __x; \
__x = *__src++; \
if (__n != 0) \
{ \
do \
{ \
*__dst++ = __x; \
__x = *__src++; \
} \
while (--__n); \
} \
*__dst++ = __x; \
} \
} while (0)
#endif
/* As per __GMPN_COPY_INCR in gmp.h. */
#if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
#define MPN_COPY_DECR(dst, src, size) \
do { \
ASSERT ((size) >= 0); \
ASSERT (MPN_SAME_OR_DECR_P (dst, src, size)); \
if ((size) != 0) \
{ \
mp_ptr __dst = (dst) + (size); \
mp_srcptr __src = (src) + (size); \
mp_size_t __size = (size); \
do \
*--__dst = *--__src; \
while (--__size != 0); \
} \
} while (0)
#endif
#if defined (_CRAY)
#define MPN_COPY_DECR(dst, src, n) \
do { \
int __i; /* Faster on some Crays with plain int */ \
_Pragma ("_CRI ivdep"); \
for (__i = (n) - 1; __i >= 0; __i--) \
(dst)[__i] = (src)[__i]; \
} while (0)
#endif
#define mpn_copyd __MPN(copyd)
void mpn_copyd _PROTO ((mp_ptr, mp_srcptr, mp_size_t));
#if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
#define MPN_COPY_DECR(dst, src, size) \
do { \
ASSERT ((size) >= 0); \
ASSERT (MPN_SAME_OR_DECR_P (dst, src, size)); \
mpn_copyd (dst, src, size); \
} while (0)
#endif
/* Copy N limbs from SRC to DST decrementing, N==0 allowed. */
#if ! defined (MPN_COPY_DECR)
#define MPN_COPY_DECR(dst, src, n) \
do { \
ASSERT ((n) >= 0); \
ASSERT (MPN_SAME_OR_DECR_P (dst, src, n)); \
if ((n) != 0) \
{ \
mp_size_t __n = (n) - 1; \
mp_ptr __dst = (dst) + __n; \
mp_srcptr __src = (src) + __n; \
mp_limb_t __x; \
__x = *__src--; \
if (__n != 0) \
{ \
do \
{ \
*__dst-- = __x; \
__x = *__src--; \
} \
while (--__n); \
} \
*__dst-- = __x; \
} \
} while (0)
#endif
#ifndef MPN_COPY
#define MPN_COPY(d,s,n) \
do { \
ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n)); \
MPN_COPY_INCR (d, s, n); \
} while (0)
#endif
/* Set {dst,size} to the limbs of {src,size} in reverse order. */
#define MPN_REVERSE(dst, src, size) \
do { \
mp_ptr __dst = (dst); \
mp_size_t __size = (size); \
mp_srcptr __src = (src) + __size - 1; \
mp_size_t __i; \
ASSERT ((size) >= 0); \
ASSERT (! MPN_OVERLAP_P (dst, size, src, size)); \
CRAY_Pragma ("_CRI ivdep"); \
for (__i = 0; __i < __size; __i++) \
{ \
*__dst = *__src; \
__dst++; \
__src--; \
} \
} while (0)
/* Zero n limbs at dst.
For power and powerpc we want an inline stu/bdnz loop for zeroing. On
ppc630 for instance this is optimal since it can sustain only 1 store per
cycle.
gcc 2.95.x (for powerpc64 -maix64, or powerpc32) doesn't recognise the
"for" loop in the generic code below can become stu/bdnz. The do/while
here helps it get to that. The same caveat about plain -mpowerpc64 mode
applies here as to __GMPN_COPY_INCR in gmp.h.
xlc 3.1 already generates stu/bdnz from the generic C, and does so from
this loop too.
Enhancement: GLIBC does some trickery with dcbz to zero whole cache lines
at a time. MPN_ZERO isn't all that important in GMP, so it might be more
trouble than it's worth to do the same, though perhaps a call to memset
would be good when on a GNU system. */
#if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
#define MPN_ZERO(dst, n) \
do { \
ASSERT ((n) >= 0); \
if ((n) != 0) \
{ \
mp_ptr __dst = (dst) - 1; \
mp_size_t __n = (n); \
do \
*++__dst = 0; \
while (--__n); \
} \
} while (0)
#endif
#ifndef MPN_ZERO
#define MPN_ZERO(dst, n) \
do { \
ASSERT ((n) >= 0); \
if ((n) != 0) \
{ \
mp_ptr __dst = (dst); \
mp_size_t __n = (n); \
do \
*__dst++ = 0; \
while (--__n); \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -