📄 c_lip.c
字号:
#include <NTL/lip.h>#include <NTL/IsFinite.h>#include <stdlib.h>#include <stdio.h>#include <math.h>#if (defined(NTL_CXX_ONLY) && !defined(__cplusplus))#error "CXX_ONLY flag set...must use C++ compiler"#endif#define MustAlloc(c, len) (!(c) || ((c)[-1] >> 1) < (len)) /* Fast test to determine if allocation is necessary */static void zhalt(char *c){ fprintf(stderr,"fatal error:\n %s\nexit...\n",c); fflush(stderr); abort();}#define NTL_GMP_MUL_CROSS (4)#define NTL_GMP_SQR_CROSS (4)#define NTL_GMP_DIV_CROSS (4)#define NTL_GMP_REM_CROSS (4)#define NTL_GMP_QREM_CROSS (4)#define NTL_GMP_SQRT_CROSS (2)#define NTL_GMP_GCD_CROSS (1)#define NTL_GMP_XGCD_CROSS (1)#define NTL_GMP_INVMOD_CROSS (1)#ifdef NTL_GMP_HACK/* using GMP */#ifdef NTL_SINGLE_MUL#error "sorry...not implemented"#endif#if (NTL_NBITS != NTL_NBITS_MAX)#error "sorry...not implemented"#endifint _ntl_gmp_hack = 1;#include <gmp.h>static mp_limb_t *gspace_data = 0;static long gspace_size = 0;static void alloc_gspace(long n){ if (n <= gspace_size) return; if (n <= gspace_size*1.1) n = ((long) (gspace_size*1.1)) + 10; if (gspace_data) gspace_data = (mp_limb_t *) realloc(gspace_data, n*sizeof(mp_limb_t)); else gspace_data = (mp_limb_t *) malloc(n*sizeof(mp_limb_t)); if (!gspace_data) zhalt("alloc_gspace: out of memeory"); gspace_size = n;}#define NTL_GSPACE(n) \{ long _n = (n); if (_n > gspace_size) alloc_gspace(_n); } static mpz_t gt_1, gt_2, gt_3, gt_4, gt_5;static long gt_init = 0;staticvoid do_gt_init(){ gt_init = 1; mpz_init(gt_1); mpz_init(gt_2); mpz_init(gt_3); mpz_init(gt_4); mpz_init(gt_5);}#define GT_INIT { if (!gt_init) do_gt_init(); }#include "lip_gmp_aux.c"/* Check that we really have it...otherwise, some compilers * only issue warnings for missing functions. */#ifndef HAVE_LIP_GMP_AUX#error "do not have lip_gmp_aux.h"#endifstaticvoid lip_to_mpz(const long *x, mpz_t y){ long sx; long neg; long gsx; if (!x || (x[0] == 1 && x[1] == 0)) { mpz_set_ui(y, 0); return; } sx = x[0]; if (sx < 0) { neg = 1; sx = -sx; } else neg = 0; gsx = L_TO_G(sx); if (gsx > y->_mp_alloc) _mpz_realloc(y, gsx); lip_to_gmp(x+1, y->_mp_d, sx); while (y->_mp_d[gsx-1] == 0) gsx--; if (neg) gsx = -gsx; y->_mp_size = gsx;}staticvoid mpz_to_lip(long **x, mpz_t y){ long gsy; long neg; long sx; long *xp; gsy = y->_mp_size; if (gsy == 0) { _ntl_zzero(x); return; } if (gsy < 0) { neg = 1; gsy = -gsy; } else neg = 0; sx = G_TO_L(gsy); xp = *x; if (MustAlloc(xp, sx)) { _ntl_zsetlength(x, sx); xp = *x; } gmp_to_lip1(xp+1, y->_mp_d, sx); while (xp[sx] == 0) sx--; if (neg) sx = -sx; xp[0] = sx; }void _ntl_gmp_powermod(long **x, long *a, long *e, long *n){ long neg = 0; GT_INIT lip_to_mpz(a, gt_2); lip_to_mpz(e, gt_3); lip_to_mpz(n, gt_4); if (mpz_sgn(gt_3) < 0) { mpz_neg(gt_3, gt_3); neg = 1; } mpz_powm(gt_1, gt_2, gt_3, gt_4); /* This fixes a bug in GMP 3.0.1 */ if (mpz_cmp(gt_1, gt_4) >= 0) mpz_sub(gt_1, gt_1, gt_4); if (neg) { if (!mpz_invert(gt_1, gt_1, gt_4)) zhalt("Modular inverse not defined"); } mpz_to_lip(x, gt_1);}#else/* not using GMP */int _ntl_gmp_hack = 0;#endif#define DENORMALIZE NTL_FDOUBLE_PRECISION#define MIN_SETL (4) /* _ntl_zsetlength allocates a multiple of MIN_SETL digits */#if (defined(NTL_SINGLE_MUL) && !defined(NTL_FAST_INT_MUL))#define MulLo(rres,a,b) \{ \ double _x = ((double) a) * ((double) b) + (DENORMALIZE); \ unsigned long _x_lo; \ NTL_FetchLo(_x_lo, _x); \ rres = _x_lo; \}#else#define MulLo(rres,a,b) rres = (a)*(b)#endif#if (defined(NTL_SINGLE_MUL))#define zaddmulp(a,b,d,t) \{ \ long _a = (a), _b = (b), _d = (d), _t = (t); \ unsigned long __lhi, __llo;\ double __lx = ((double) ((_a) + (_t))) + ((double) _b)*((double) _d); \ __lx += (DENORMALIZE); \ NTL_FetchHiLo(__lhi,__llo,__lx);\ __lhi = ((__lhi<<6)|(__llo>>26)) & 0x3FFFFFF; \ __llo &= 0x3FFFFFF; \ (a) = __llo;\ (t) = __lhi;\}#define zxmulp(a,b,d,t) \{ \ long _b = (b), _d = (d), _t = (t); \ unsigned long __lhi, __llo;\ double __lx = ((double) ((_t))) + ((double) _b)*((double) _d); \ __lx += (DENORMALIZE); \ NTL_FetchHiLo(__lhi,__llo,__lx);\ __lhi = ((__lhi<<6)|(__llo>>26)) & 0x3FFFFFF; \ __llo &= 0x3FFFFFF; \ (a) = __llo;\ (t) = __lhi;\}#define zaddmulpsq(a,b,t) \{ \ long _a = (a), _b = (b); \ unsigned long __lhi, __llo; \ double __lx = ((double) (_a)) + ((double) _b)*((double) _b); \ __lx += (DENORMALIZE); \ NTL_FetchHiLo(__lhi,__llo,__lx);\ __lhi = ((__lhi<<6)|(__llo>>26)) & 0x3FFFFFF; \ __llo &= 0x3FFFFFF; \ (a) = __llo;\ (t) = __lhi;\}#else#if (defined(NTL_LONG_LONG))#ifndef NTL_LONG_LONG_TYPE#define NTL_LONG_LONG_TYPE long long#endif#define zaddmulp(a, b, d, t) { \ NTL_LONG_LONG_TYPE _pp = ((NTL_LONG_LONG_TYPE) (b)) * ((NTL_LONG_LONG_TYPE) (d)) + ((t)+(a)); \ (a) = ((long)(_pp)) & NTL_RADIXM; \ (t) = (long) (_pp >> NTL_NBITS); \} #define zxmulp(a, b, d, t) { \ NTL_LONG_LONG_TYPE _pp = ((NTL_LONG_LONG_TYPE) (b)) * ((NTL_LONG_LONG_TYPE) (d)) + (t); \ (a) = ((long)(_pp)) & NTL_RADIXM; \ (t) = (long) (_pp >> NTL_NBITS); \} #define zaddmulpsq(a,b,t) { \ NTL_LONG_LONG_TYPE _pp = ((NTL_LONG_LONG_TYPE) (b)) * ((NTL_LONG_LONG_TYPE) (b)) + (a); \ (a) = ((long)(_pp)) & NTL_RADIXM; \ (t) = (long) (_pp >> NTL_NBITS); \}#elif (defined(NTL_AVOID_FLOAT)) #define zaddmulp( a, b, d, t) { \ unsigned long _b1 = b & NTL_RADIXROOTM; \ unsigned long _d1 = d & NTL_RADIXROOTM; \ unsigned long _bd,_b1d1,_m,_aa= (a) + (t); \ unsigned long _ld = (d>>NTL_NBITSH); \ unsigned long _lb = (b>>NTL_NBITSH); \ \ _bd=_lb*_ld; \ _b1d1=_b1*_d1; \ _m=(_lb+_b1)*(_ld+_d1) - _bd - _b1d1; \ _aa += ( _b1d1+ ((_m&NTL_RADIXROOTM)<<NTL_NBITSH)); \ (t) = (_aa >> NTL_NBITS) + _bd + (_m>>NTL_NBITSH); \ (a) = _aa & NTL_RADIXM; \}#define zxmulp( a, b, d, t) { \ unsigned long _b1 = b & NTL_RADIXROOTM; \ unsigned long _d1 = d & NTL_RADIXROOTM; \ unsigned long _bd,_b1d1,_m,_aa= (t); \ unsigned long _ld = (d>>NTL_NBITSH); \ unsigned long _lb = (b>>NTL_NBITSH); \ \ _bd=_lb*_ld; \ _b1d1=_b1*_d1; \ _m=(_lb+_b1)*(_ld+_d1) - _bd - _b1d1; \ _aa += ( _b1d1+ ((_m&NTL_RADIXROOTM)<<NTL_NBITSH)); \ (t) = (_aa >> NTL_NBITS) + _bd + (_m>>NTL_NBITSH); \ (a) = _aa & NTL_RADIXM; \}#define zaddmulpsq(_a, _b, _t) \{ \ long _lb = (_b); \ long _b1 = (_b) & NTL_RADIXROOTM; \ long _aa = (_a) + _b1 * _b1; \ \ _b1 = (_b1 * (_lb >>= NTL_NBITSH) << 1) + (_aa >> NTL_NBITSH); \ _aa = (_aa & NTL_RADIXROOTM) + ((_b1 & NTL_RADIXROOTM) << NTL_NBITSH); \ (_t) = _lb * _lb + (_b1 >> NTL_NBITSH) + (_aa >> NTL_NBITS); \ (_a) = (_aa & NTL_RADIXM); \}#else#if (NTL_BITS_PER_LONG <= NTL_NBITS + 2)#if (NTL_ARITH_RIGHT_SHIFT)/* value right-shifted is -1..1 */#define zaddmulp(a, b, d, t) \{ \ long _a = (a), _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ); \ _t2 = _t2 + ((_t1 - (_t2 << NTL_NBITS)) >> NTL_NBITS); \ _t1 = (_t1 & NTL_RADIXM) + _a +_t; \ (t) = _t2 + (((unsigned long)_t1) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zxmulp(a, b, d, t) \{ \ long _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d + _t; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ) - 1; \ (t) = _t2 + (((unsigned long)(_t1 - (_t2 << NTL_NBITS))) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}/* value shifted is -1..1 */#define zaddmulpsq(a, b, t) \{ \ long _a = (a), _b = (b); \ long _t1 = _b*_b; \ long _t2 = (long) ( ((double) _b)*(((double) _b)*NTL_FRADIX_INV) ); \ _t2 = _t2 + ((_t1 - (_t2 << NTL_NBITS)) >> NTL_NBITS); \ _t1 = (_t1 & NTL_RADIXM) + _a; \ (t) = _t2 + (((unsigned long)_t1) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zam_decl double _ds; long _hi, _lo, _s;#define zam_init(b,s) \{ \ long _b = (b); \ _s = (s); \ _ds = ((_s << 1)+1)*(NTL_FRADIX_INV/2.0); \ _lo = _b*_s; \ _hi = (long) (((double) _b)*_ds); \}/* value shifted is 0..3 */#define zam_loop(a,t,nb) \{ \ long _a = (a), _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _lo = _lo + _a + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* shift is -1..+1 */#define zsx_loop(a,t,nb) \{ \ long _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _lo = _lo + _t; \ (t) = _hi + ((_lo - (_hi<<NTL_NBITS)) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* value shifted is -2..+1 */#define zam_subloop(a,t,nb) \{ \ long _a = (a), _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _lo = _a + _t - _lo; \ (t) = ((_lo + (_hi<<NTL_NBITS)) >> NTL_NBITS) - _hi; \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* value shifted is 0..3 */#define zam_finish(a,t) \{ \ long _a = (a), _t = (t); \ _lo = _lo + _a + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \}/* value shifted is -1..+1 */#define zsx_finish(a,t) \{ \ long _t = (t); \ _lo = _lo + _t; \ (t) = _hi + ((_lo - (_hi<<NTL_NBITS)) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \}/* value shifted is -2..+1 */#define zam_subfinish(a,t) \{ \ long _a = (a), _t = (t); \ _lo = _a + _t - _lo; \ (t) = ((_lo + (_hi<<NTL_NBITS)) >> NTL_NBITS) - _hi; \ (a) = _lo & NTL_RADIXM; \}#else/* right shift is not arithmetic *//* value right-shifted is 0..2 */#define zaddmulp(a, b, d, t) \{ \ long _a = (a), _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ) - 1; \ _t2 = _t2 + ( ((unsigned long) (_t1 - (_t2 << NTL_NBITS))) >> NTL_NBITS ); \ _t1 = (_t1 & NTL_RADIXM) + _a +_t; \ (t) = _t2 + (((unsigned long)_t1) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zxmulp(a, b, d, t) \{ \ long _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d + _t; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ) - 1; \ (t) = _t2 + (((unsigned long)(_t1 - (_t2 << NTL_NBITS))) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}/* value shifted is 0..2 */#define zaddmulpsq(a, b, t) \{ \ long _a = (a), _b = (b); \ long _t1 = _b*_b; \ long _t2 = (long) ( ((double) _b)*(((double) _b)*NTL_FRADIX_INV) ) - 1; \ _t2 = _t2 + ( ((unsigned long) (_t1 - (_t2 << NTL_NBITS))) >> NTL_NBITS ); \ _t1 = (_t1 & NTL_RADIXM) + _a; \ (t) = _t2 + (((unsigned long)_t1) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zam_decl double _ds; long _hi, _lo, _s;#define zam_init(b,s) \{ \ long _b = (b); \ _s = (s); \ _ds = ((_s << 1)+1)*(NTL_FRADIX_INV/2.0); \ _lo = _b*_s; \ _hi = (long) (((double) _b)*_ds); \}/* value shifted is 0..3 */#define zam_loop(a,t,nb) \{ \ long _a = (a), _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _lo = _lo + _a + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* value shifted is 0..2 */#define zsx_loop(a,t,nb) \{ \ long _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _lo = _lo + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* value shifted is 0..3 */#define zam_subloop(a,t,nb) \{ \ long _a = (a), _t = (t), _nb = (nb); \ long _vv; \ double _yy; \ _vv = _nb*_s; \ _yy = ((double) _nb)*_ds; \ _hi += 2; \ _lo = _a + _t - _lo; \ (t) = (((unsigned long)(_lo + (_hi<<NTL_NBITS))) >> NTL_NBITS) - _hi; \ (a) = _lo & NTL_RADIXM; \ _lo = _vv; \ _hi = (long) _yy; \}/* value shifted is 0..3 */#define zam_finish(a,t) \{ \ long _a = (a), _t = (t); \ _lo = _lo + _a + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \}/* value shifted is 0..2 */#define zsx_finish(a,t) \{ \ long _a = (a), _t = (t); \ _lo = _lo + _t; \ _hi--; \ (t) = _hi + (((unsigned long)(_lo - (_hi<<NTL_NBITS))) >> NTL_NBITS); \ (a) = _lo & NTL_RADIXM; \}/* value shifted is 0..3 */#define zam_subfinish(a,t) \{ \ long _a = (a), _t = (t); \ _hi += 2; \ _lo = _a + _t - _lo; \ (t) = (((unsigned long)(_lo + (_hi<<NTL_NBITS))) >> NTL_NBITS) - _hi; \ (a) = _lo & NTL_RADIXM; \}#endif #else/* NTL_BITS_PER_LONG > NTL_NBITS + 2, and certain optimizations can be made. Useful on 64-bit machines. */#if (NTL_ARITH_RIGHT_SHIFT)/* shift is -1..+3 */#define zaddmulp(a, b, d, t) \{ \ long _a = (a), _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d + _a + _t; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ); \ (t) = _t2 + ((_t1 - (_t2 << NTL_NBITS)) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zxmulp(a, b, d, t) \{ \ long _b = (b), _d = (d), _t = (t); \ long _t1 = _b*_d + _t; \ long _t2 = (long) ( ((double) _b)*(((double) _d)*NTL_FRADIX_INV) ); \ (t) = _t2 + ((_t1 - (_t2 << NTL_NBITS)) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}/* shift is -1..+2 */#define zaddmulpsq(a, b, t) \{ \ long _a = (a), _b = (b), _t = (t); \ long _t1 = _b*_b + _a; \ long _t2 = (long) ( ((double) _b)*(((double) _b)*NTL_FRADIX_INV) ); \ (t) = _t2 + ((_t1 - (_t2 << NTL_NBITS)) >> NTL_NBITS); \ (a) = _t1 & NTL_RADIXM; \}#define zam_decl double _ds; long _hi, _lo, _s;#define zam_init(b,s) \{ \ long _b = (b); \ _s = (s); \ _ds = _s*NTL_FRADIX_INV; \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -