📄 g_lip_impl.h
字号:
_ntl_gcopy(r, rr);
}
/*
* DIRT: this implementation of _ntl_gsqrts relies crucially
* on the assumption that the number of bits per limb_t is at least
* equal to the number of bits per long.
*/
long _ntl_gsqrts(long n)
{
mp_limb_t ndata, rdata;
if (n == 0) {
return 0;
}
if (n < 0) ghalt("negative argument to _ntl_sqrts");
ndata = n;
mpn_sqrtrem(&rdata, 0, &ndata, 1);
return rdata;
}
void _ntl_ggcd(_ntl_gbigint m1, _ntl_gbigint m2, _ntl_gbigint *r)
{
static _ntl_gbigint s1 = 0, s2 = 0, res = 0;
long k1, k2, k_min, l1, l2, ss1, ss2, sres;
_ntl_gcopy(m1, &s1);
_ntl_gabs(&s1);
_ntl_gcopy(m2, &s2);
_ntl_gabs(&s2);
if (ZEROP(s1)) {
_ntl_gcopy(s2, r);
return;
}
if (ZEROP(s2)) {
_ntl_gcopy(s1, r);
return;
}
k1 = _ntl_gmakeodd(&s1);
k2 = _ntl_gmakeodd(&s2);
if (k1 <= k2)
k_min = k1;
else
k_min = k2;
l1 = _ntl_g2log(s1);
l2 = _ntl_g2log(s2);
ss1 = SIZE(s1);
ss2 = SIZE(s2);
if (ss1 >= ss2)
sres = ss1;
else
sres = ss2;
/* set to max: gmp documentation is unclear on this point */
_ntl_gsetlength(&res, sres);
if (l1 >= l2)
SIZE(res) = mpn_gcd(DATA(res), DATA(s1), ss1, DATA(s2), ss2);
else
SIZE(res) = mpn_gcd(DATA(res), DATA(s2), ss2, DATA(s1), ss1);
_ntl_glshift(res, k_min, &res);
_ntl_gcopy(res, r);
}
static long
gxxeucl(
_ntl_gbigint ain,
_ntl_gbigint nin,
_ntl_gbigint *invv,
_ntl_gbigint *uu
)
{
static _ntl_gbigint a = 0;
static _ntl_gbigint n = 0;
static _ntl_gbigint q = 0;
static _ntl_gbigint w = 0;
static _ntl_gbigint x = 0;
static _ntl_gbigint y = 0;
static _ntl_gbigint z = 0;
_ntl_gbigint inv = *invv;
_ntl_gbigint u = *uu;
long diff;
long ilo;
long sa;
long sn;
long temp;
long e;
long fast;
long parity;
long gotthem;
mp_limb_t *p;
long try11;
long try12;
long try21;
long try22;
long got11;
long got12;
long got21;
long got22;
double hi;
double lo;
double dt;
double fhi, fhi1;
double flo, flo1;
double num;
double den;
double dirt;
_ntl_gsetlength(&a, (e = (SIZE(ain) > SIZE(nin) ? SIZE(ain) : SIZE(nin))));
_ntl_gsetlength(&n, e);
_ntl_gsetlength(&q, e);
_ntl_gsetlength(&w, e);
_ntl_gsetlength(&x, e);
_ntl_gsetlength(&y, e);
_ntl_gsetlength(&z, e);
_ntl_gsetlength(&inv, e);
*invv = inv;
_ntl_gsetlength(&u, e);
*uu = u;
fhi1 = 1.0 + ((double) 32.0)/NTL_FDOUBLE_PRECISION;
flo1 = 1.0 - ((double) 32.0)/NTL_FDOUBLE_PRECISION;
fhi = 1.0 + ((double) 8.0)/NTL_FDOUBLE_PRECISION;
flo = 1.0 - ((double) 8.0)/NTL_FDOUBLE_PRECISION;
_ntl_gcopy(ain, &a);
_ntl_gcopy(nin, &n);
_ntl_gone(&inv);
_ntl_gzero(&w);
while (SIZE(n) > 0)
{
gotthem = 0;
sa = SIZE(a);
sn = SIZE(n);
diff = sa - sn;
if (!diff || diff == 1)
{
sa = SIZE(a);
p = DATA(a) + (sa-1);
num = (double) (*p) * NTL_ZZ_FRADIX;
if (sa > 1)
num += (*(--p));
num *= NTL_ZZ_FRADIX;
if (sa > 2)
num += (*(p - 1));
sn = SIZE(n);
p = DATA(n) + (sn-1);
den = (double) (*p) * NTL_ZZ_FRADIX;
if (sn > 1)
den += (*(--p));
den *= NTL_ZZ_FRADIX;
if (sn > 2)
den += (*(p - 1));
hi = fhi1 * (num + 1.0) / den;
lo = flo1 * num / (den + 1.0);
if (diff > 0)
{
hi *= NTL_ZZ_FRADIX;
lo *= NTL_ZZ_FRADIX;
}
try11 = 1;
try12 = 0;
try21 = 0;
try22 = 1;
parity = 1;
fast = 1;
while (fast > 0)
{
parity = 1 - parity;
if (hi >= NTL_SP_BOUND)
fast = 0;
else
{
ilo = (long)lo;
dirt = hi - ilo;
if (dirt < 1.0/NTL_FDOUBLE_PRECISION || !ilo || ilo < (long)hi)
fast = 0;
else
{
dt = lo-ilo;
lo = flo / dirt;
if (dt > 1.0/NTL_FDOUBLE_PRECISION)
hi = fhi / dt;
else
hi = NTL_SP_BOUND;
temp = try11;
try11 = try21;
if ((NTL_WSP_BOUND - temp) / ilo < try21)
fast = 0;
else
try21 = temp + ilo * try21;
temp = try12;
try12 = try22;
if ((NTL_WSP_BOUND - temp) / ilo < try22)
fast = 0;
else
try22 = temp + ilo * try22;
if ((fast > 0) && (parity > 0))
{
gotthem = 1;
got11 = try11;
got12 = try12;
got21 = try21;
got22 = try22;
}
}
}
}
}
if (gotthem)
{
_ntl_gsmul(inv, got11, &x);
_ntl_gsmul(w, got12, &y);
_ntl_gsmul(inv, got21, &z);
_ntl_gsmul(w, got22, &w);
_ntl_gadd(x, y, &inv);
_ntl_gadd(z, w, &w);
_ntl_gsmul(a, got11, &x);
_ntl_gsmul(n, got12, &y);
_ntl_gsmul(a, got21, &z);
_ntl_gsmul(n, got22, &n);
_ntl_gsub(x, y, &a);
_ntl_gsub(n, z, &n);
}
else
{
_ntl_gdiv(a, n, &q, &a);
_ntl_gmul(q, w, &x);
_ntl_gadd(inv, x, &inv);
if (!ZEROP(a))
{
_ntl_gdiv(n, a, &q, &n);
_ntl_gmul(q, inv, &x);
_ntl_gadd(w, x, &w);
}
else
{
_ntl_gcopy(n, &a);
_ntl_gzero(&n);
_ntl_gcopy(w, &inv);
_ntl_gnegate(&inv);
}
}
}
if (_ntl_gscompare(a, 1) == 0)
e = 0;
else
e = 1;
_ntl_gcopy(a, &u);
*invv = inv;
*uu = u;
return (e);
}
#if 0
void
_ntl_gexteucl(
_ntl_gbigint aa,
_ntl_gbigint *xa,
_ntl_gbigint bb,
_ntl_gbigint *xb,
_ntl_gbigint *d
)
{
static _ntl_gbigint modcon = 0;
static _ntl_gbigint a=0, b=0;
long anegative = 0;
long bnegative = 0;
_ntl_gcopy(aa, &a);
_ntl_gcopy(bb, &b);
if (a && SIZE(a) < 0) {
anegative = 1;
SIZE(a) = -SIZE(a);
}
else
anegative = 0;
if (b && SIZE(b) < 0) {
bnegative = 1;
SIZE(b) = -SIZE(b);
}
else
bnegative = 0;
if (ZEROP(b))
{
_ntl_gone(xa);
_ntl_gzero(xb);
_ntl_gcopy(a, d);
goto done;
}
if (ZEROP(a))
{
_ntl_gzero(xa);
_ntl_gone(xb);
_ntl_gcopy(b, d);
goto done;
}
gxxeucl(a, b, xa, d);
_ntl_gmul(a, *xa, xb);
_ntl_gsub(*d, *xb, xb);
_ntl_gdiv(*xb, b, xb, &modcon);
if (!ZEROP(modcon))
{
ghalt("non-zero remainder in _ntl_gexteucl BUG");
}
done:
if (anegative)
{
_ntl_gnegate(xa);
}
if (bnegative)
{
_ntl_gnegate(xb);
}
}
#endif
void
_ntl_gexteucl(
_ntl_gbigint ain,
_ntl_gbigint *xap,
_ntl_gbigint bin,
_ntl_gbigint *xbp,
_ntl_gbigint *dp
)
{
if (ZEROP(bin)) {
long asign = _ntl_gsign(ain);
_ntl_gcopy(ain, dp);
_ntl_gabs(dp);
_ntl_gintoz( (asign >= 0 ? 1 : -1), xap);
_ntl_gzero(xbp);
}
else if (ZEROP(ain)) {
long bsign = _ntl_gsign(bin);
_ntl_gcopy(bin, dp);
_ntl_gabs(dp);
_ntl_gzero(xap);
_ntl_gintoz(bsign, xbp);
}
else {
static _ntl_gbigint a = 0, b = 0, xa = 0, xb = 0, d = 0, tmp = 0;
long sa, aneg, sb, bneg, rev;
mp_limb_t *adata, *bdata, *ddata, *xadata;
mp_size_t sxa, sd;
GET_SIZE_NEG(sa, aneg, ain);
GET_SIZE_NEG(sb, bneg, bin);
_ntl_gsetlength(&a, sa+1); /* +1 because mpn_gcdext may need it */
_ntl_gcopy(ain, &a);
_ntl_gsetlength(&b, sb+1); /* +1 because mpn_gcdext may need it */
_ntl_gcopy(bin, &b);
adata = DATA(a);
bdata = DATA(b);
if (sa < sb || (sa == sb && mpn_cmp(adata, bdata, sa) < 0)) {
SWAP_BIGINT(ain, bin);
SWAP_LONG(sa, sb);
SWAP_LONG(aneg, bneg);
SWAP_LIMB_PTR(adata, bdata);
rev = 1;
}
else
rev = 0;
_ntl_gsetlength(&d, sa+1); /* +1 because mpn_gcdext may need it...
documentation is unclear, but this is
what is done in mpz_gcdext */
_ntl_gsetlength(&xa, sa+1); /* ditto */
ddata = DATA(d);
xadata = DATA(xa);
sd = mpn_gcdext(ddata, xadata, &sxa, adata, sa, bdata, sb);
SIZE(d) = sd;
SIZE(xa) = sxa;
if (aneg) _ntl_gnegate(&xa);
_ntl_gmul(ain, xa, &tmp);
_ntl_gsub(d, tmp, &tmp);
_ntl_gdiv(tmp, bin, &xb, &tmp);
if (!ZEROP(tmp)) ghalt("internal bug in _ntl_gexteucl");
if (rev) SWAP_BIGINT(xa, xb);
_ntl_gcopy(xa, xap);
_ntl_gcopy(xb, xbp);
_ntl_gcopy(d, dp);
}
}
long _ntl_ginv(_ntl_gbigint ain, _ntl_gbigint nin, _ntl_gbigint *invv)
{
static _ntl_gbigint u = 0;
static _ntl_gbigint d = 0;
static _ntl_gbigint a = 0;
static _ntl_gbigint n = 0;
long sz;
long sd;
mp_size_t su;
if (_ntl_gscompare(nin, 1) <= 0) {
ghalt("InvMod: second input <= 1");
}
if (_ntl_gsign(ain) < 0) {
ghalt("InvMod: first input negative");
}
if (_ntl_gcompare(ain, nin) >= 0) {
ghalt("InvMod: first input too big");
}
sz = SIZE(nin) + 2;
if (MustAlloc(a, sz))
_ntl_gsetlength(&a, sz);
if (MustAlloc(n, sz))
_ntl_gsetlength(&n, sz);
if (MustAlloc(d, sz))
_ntl_gsetlength(&d, sz);
if (MustAlloc(u, sz))
_ntl_gsetlength(&u, sz);
_ntl_gadd(ain, nin, &a);
_ntl_gcopy(nin, &n);
/* We apply mpn_gcdext to (a, n) = (ain+nin, nin), because that function
* only computes the co-factor of the larger input. This way, we avoid
* a multiplication and a division.
*/
sd = mpn_gcdext(DATA(d), DATA(u), &su, DATA(a), SIZE(a), DATA(n), SIZE(n));
SIZE(d) = sd;
SIZE(u) = su;
if (ONEP(d)) {
/*
* We make sure that u is in range 0..n-1, just in case
* GMP is sloppy.
*/
while (_ntl_gsign(u) < 0) _ntl_gadd(u, nin, &u);
while (_ntl_gcompare(u, nin) >= 0) _ntl_gsub(u, nin, &u);
_ntl_gcopy(u, invv);
return 0;
}
else {
_ntl_gcopy(d, invv);
return 1;
}
}
void
_ntl_ginvmod(
_ntl_gbigint a,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
if (_ntl_ginv(a, n, c))
ghalt("undefined inverse in _ntl_ginvmod");
}
void
_ntl_gaddmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
if (*c != n) {
_ntl_gadd(a, b, c);
if (_ntl_gcompare(*c, n) >= 0)
_ntl_gsubpos(*c, n, c);
}
else {
static _ntl_gbigint mem = 0;
_ntl_gadd(a, b, &mem);
if (_ntl_gcompare(mem, n) >= 0)
_ntl_gsubpos(mem, n, c);
else
_ntl_gcopy(mem, c);
}
}
void
_ntl_gsubmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
long cmp;
if ((cmp=_ntl_gcompare(a, b)) < 0) {
_ntl_gadd(n, a, &mem);
_ntl_gsubpos(mem, b, c);
} else if (!cmp)
_ntl_gzero(c);
else
_ntl_gsubpos(a, b, c);
}
void
_ntl_gsmulmod(
_ntl_gbigint a,
long d,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
_ntl_gsmul(a, d, &mem);
_ntl_gmod(mem, n, c);
}
void
_ntl_gmulmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
_ntl_gmul(a, b, &mem);
_ntl_gmod(mem, n, c);
}
void
_ntl_gsqmod(
_ntl_gbigint a,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
_ntl_gmulmod(a, a, n, c);
}
double _ntl_gdoub_aux(_ntl_gbigint n)
{
double res;
mp_limb_t *ndata;
long i, sn, nneg;
if (!n)
return ((double) 0);
GET_SIZE_NEG(sn, nneg, n);
ndata = DATA(n);
res = 0;
for (i = sn-1; i >= 0; i--)
res = res * NTL_ZZ_FRADIX + ((double) ndata[i]);
if (nneg) res = -res;
return res;
}
long _ntl_ground_correction(_ntl_gbigint a, long k, long residual)
{
long direction;
long p;
long sgn;
long bl;
mp_limb_t wh;
long i;
mp_limb_t *adata;
if (SIZE(a) > 0)
sgn = 1;
else
sgn = -1;
adata = DATA(a);
p = k - 1;
bl = (p/NTL_ZZ_NBITS);
wh = ((mp_limb_t) 1) << (p - NTL_ZZ_NBITS*bl);
if (adata[bl] & wh) {
/* bit is 1...we have to see if lower bits are all 0
in order to implement "round to even" */
if (adata[bl] & (wh - ((mp_limb_t) 1)))
direction = 1;
else {
i = bl - 1;
while (i >= 0 && adata[i] == 0) i--;
if (i >= 0)
direction = 1;
else
direction = 0;
}
/* use residual to break ties */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -