📄 g_lip_impl.h
字号:
fhi1 = 1.0 + ((double) 32.0)/NTL_FDOUBLE_PRECISION;
flo1 = 1.0 - ((double) 32.0)/NTL_FDOUBLE_PRECISION;
fhi = 1.0 + ((double) 8.0)/NTL_FDOUBLE_PRECISION;
flo = 1.0 - ((double) 8.0)/NTL_FDOUBLE_PRECISION;
_ntl_gcopy(ain, &a);
_ntl_gcopy(nin, &n);
_ntl_gone(&inv);
_ntl_gzero(&w);
while (SIZE(n) > 0)
{
gotthem = 0;
sa = SIZE(a);
sn = SIZE(n);
diff = sa - sn;
if (!diff || diff == 1)
{
sa = SIZE(a);
p = DATA(a) + (sa-1);
num = (double) (*p) * NTL_ZZ_FRADIX;
if (sa > 1)
num += (*(--p));
num *= NTL_ZZ_FRADIX;
if (sa > 2)
num += (*(p - 1));
sn = SIZE(n);
p = DATA(n) + (sn-1);
den = (double) (*p) * NTL_ZZ_FRADIX;
if (sn > 1)
den += (*(--p));
den *= NTL_ZZ_FRADIX;
if (sn > 2)
den += (*(p - 1));
hi = fhi1 * (num + 1.0) / den;
lo = flo1 * num / (den + 1.0);
if (diff > 0)
{
hi *= NTL_ZZ_FRADIX;
lo *= NTL_ZZ_FRADIX;
}
try11 = 1;
try12 = 0;
try21 = 0;
try22 = 1;
parity = 1;
fast = 1;
while (fast > 0)
{
parity = 1 - parity;
if (hi >= NTL_SP_BOUND)
fast = 0;
else
{
ilo = (long)lo;
dirt = hi - ilo;
if (dirt <= 0 || !ilo || ilo < (long)hi)
fast = 0;
else
{
dt = lo-ilo;
lo = flo / dirt;
if (dt > 0)
hi = fhi / dt;
else
hi = NTL_SP_BOUND;
temp = try11;
try11 = try21;
if ((NTL_WSP_BOUND - temp) / ilo < try21)
fast = 0;
else
try21 = temp + ilo * try21;
temp = try12;
try12 = try22;
if ((NTL_WSP_BOUND - temp) / ilo < try22)
fast = 0;
else
try22 = temp + ilo * try22;
if ((fast > 0) && (parity > 0))
{
gotthem = 1;
got11 = try11;
got12 = try12;
got21 = try21;
got22 = try22;
}
}
}
}
}
if (gotthem)
{
_ntl_gsmul(inv, got11, &x);
_ntl_gsmul(w, got12, &y);
_ntl_gsmul(inv, got21, &z);
_ntl_gsmul(w, got22, &w);
_ntl_gadd(x, y, &inv);
_ntl_gadd(z, w, &w);
_ntl_gsmul(a, got11, &x);
_ntl_gsmul(n, got12, &y);
_ntl_gsmul(a, got21, &z);
_ntl_gsmul(n, got22, &n);
_ntl_gsub(x, y, &a);
_ntl_gsub(n, z, &n);
}
else
{
_ntl_gdiv(a, n, &q, &a);
_ntl_gmul(q, w, &x);
_ntl_gadd(inv, x, &inv);
if (!ZEROP(a))
{
_ntl_gdiv(n, a, &q, &n);
_ntl_gmul(q, inv, &x);
_ntl_gadd(w, x, &w);
}
else
{
_ntl_gcopy(n, &a);
_ntl_gzero(&n);
_ntl_gcopy(w, &inv);
_ntl_gnegate(&inv);
}
}
}
if (_ntl_gscompare(a, 1) == 0)
e = 0;
else
e = 1;
_ntl_gcopy(a, &u);
*invv = inv;
*uu = u;
return (e);
}
#if 0
void
_ntl_gexteucl(
_ntl_gbigint aa,
_ntl_gbigint *xa,
_ntl_gbigint bb,
_ntl_gbigint *xb,
_ntl_gbigint *d
)
{
static _ntl_gbigint modcon = 0;
static _ntl_gbigint a=0, b=0;
long anegative = 0;
long bnegative = 0;
_ntl_gcopy(aa, &a);
_ntl_gcopy(bb, &b);
if (a && SIZE(a) < 0) {
anegative = 1;
SIZE(a) = -SIZE(a);
}
else
anegative = 0;
if (b && SIZE(b) < 0) {
bnegative = 1;
SIZE(b) = -SIZE(b);
}
else
bnegative = 0;
if (ZEROP(b))
{
_ntl_gone(xa);
_ntl_gzero(xb);
_ntl_gcopy(a, d);
goto done;
}
if (ZEROP(a))
{
_ntl_gzero(xa);
_ntl_gone(xb);
_ntl_gcopy(b, d);
goto done;
}
gxxeucl(a, b, xa, d);
_ntl_gmul(a, *xa, xb);
_ntl_gsub(*d, *xb, xb);
_ntl_gdiv(*xb, b, xb, &modcon);
if (!ZEROP(modcon))
{
ghalt("non-zero remainder in _ntl_gexteucl BUG");
}
done:
if (anegative)
{
_ntl_gnegate(xa);
}
if (bnegative)
{
_ntl_gnegate(xb);
}
}
#endif
void
_ntl_gexteucl(
_ntl_gbigint ain,
_ntl_gbigint *xap,
_ntl_gbigint bin,
_ntl_gbigint *xbp,
_ntl_gbigint *dp
)
{
if (ZEROP(bin)) {
long asign = _ntl_gsign(ain);
_ntl_gcopy(ain, dp);
_ntl_gabs(dp);
_ntl_gintoz( (asign >= 0 ? 1 : -1), xap);
_ntl_gzero(xbp);
}
else if (ZEROP(ain)) {
long bsign = _ntl_gsign(bin);
_ntl_gcopy(bin, dp);
_ntl_gabs(dp);
_ntl_gzero(xap);
_ntl_gintoz(bsign, xbp);
}
else {
static _ntl_gbigint a = 0, b = 0, xa = 0, xb = 0, d = 0, tmp = 0;
long sa, aneg, sb, bneg, rev;
mp_limb_t *adata, *bdata, *ddata, *xadata;
mp_size_t sxa, sd;
GET_SIZE_NEG(sa, aneg, ain);
GET_SIZE_NEG(sb, bneg, bin);
_ntl_gsetlength(&a, sa+1); /* +1 because mpn_gcdext may need it */
_ntl_gcopy(ain, &a);
_ntl_gsetlength(&b, sb+1); /* +1 because mpn_gcdext may need it */
_ntl_gcopy(bin, &b);
adata = DATA(a);
bdata = DATA(b);
if (sa < sb || (sa == sb && mpn_cmp(adata, bdata, sa) < 0)) {
SWAP_BIGINT(ain, bin);
SWAP_LONG(sa, sb);
SWAP_LONG(aneg, bneg);
SWAP_LIMB_PTR(adata, bdata);
rev = 1;
}
else
rev = 0;
_ntl_gsetlength(&d, sa+1); /* +1 because mpn_gcdext may need it...
documentation is unclear, but this is
what is done in mpz_gcdext */
_ntl_gsetlength(&xa, sa+1); /* ditto */
ddata = DATA(d);
xadata = DATA(xa);
sd = mpn_gcdext(ddata, xadata, &sxa, adata, sa, bdata, sb);
SIZE(d) = sd;
SIZE(xa) = sxa;
if (aneg) _ntl_gnegate(&xa);
_ntl_gmul(ain, xa, &tmp);
_ntl_gsub(d, tmp, &tmp);
_ntl_gdiv(tmp, bin, &xb, &tmp);
if (!ZEROP(tmp)) ghalt("internal bug in _ntl_gexteucl");
if (rev) SWAP_BIGINT(xa, xb);
_ntl_gcopy(xa, xap);
_ntl_gcopy(xb, xbp);
_ntl_gcopy(d, dp);
}
}
long _ntl_ginv(_ntl_gbigint ain, _ntl_gbigint nin, _ntl_gbigint *invv)
{
static _ntl_gbigint u = 0;
static _ntl_gbigint d = 0;
static _ntl_gbigint a = 0;
static _ntl_gbigint n = 0;
long sz;
long sd;
mp_size_t su;
if (_ntl_gscompare(nin, 1) <= 0) {
ghalt("InvMod: second input <= 1");
}
if (_ntl_gsign(ain) < 0) {
ghalt("InvMod: first input negative");
}
if (_ntl_gcompare(ain, nin) >= 0) {
ghalt("InvMod: first input too big");
}
sz = SIZE(nin) + 2;
if (MustAlloc(a, sz))
_ntl_gsetlength(&a, sz);
if (MustAlloc(n, sz))
_ntl_gsetlength(&n, sz);
if (MustAlloc(d, sz))
_ntl_gsetlength(&d, sz);
if (MustAlloc(u, sz))
_ntl_gsetlength(&u, sz);
_ntl_gadd(ain, nin, &a);
_ntl_gcopy(nin, &n);
/* We apply mpn_gcdext to (a, n) = (ain+nin, nin), because that function
* only computes the co-factor of the larger input. This way, we avoid
* a multiplication and a division.
*/
sd = mpn_gcdext(DATA(d), DATA(u), &su, DATA(a), SIZE(a), DATA(n), SIZE(n));
SIZE(d) = sd;
SIZE(u) = su;
if (ONEP(d)) {
if (_ntl_gsign(u) < 0) _ntl_gadd(u, nin, &u);
_ntl_gcopy(u, invv);
return 0;
}
else {
_ntl_gcopy(d, invv);
return 1;
}
}
void
_ntl_ginvmod(
_ntl_gbigint a,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
if (_ntl_ginv(a, n, c))
ghalt("undefined inverse in _ntl_ginvmod");
}
void
_ntl_gaddmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
if (*c != n) {
_ntl_gadd(a, b, c);
if (_ntl_gcompare(*c, n) >= 0)
_ntl_gsubpos(*c, n, c);
}
else {
static _ntl_gbigint mem = 0;
_ntl_gadd(a, b, &mem);
if (_ntl_gcompare(mem, n) >= 0)
_ntl_gsubpos(mem, n, c);
_ntl_gcopy(mem, c);
}
}
void
_ntl_gsubmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
long cmp;
if ((cmp=_ntl_gcompare(a, b)) < 0) {
_ntl_gadd(n, a, &mem);
_ntl_gsubpos(mem, b, c);
} else if (!cmp)
_ntl_gzero(c);
else
_ntl_gsubpos(a, b, c);
}
void
_ntl_gsmulmod(
_ntl_gbigint a,
long d,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
_ntl_gsmul(a, d, &mem);
_ntl_gmod(mem, n, c);
}
void
_ntl_gmulmod(
_ntl_gbigint a,
_ntl_gbigint b,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
static _ntl_gbigint mem = 0;
_ntl_gmul(a, b, &mem);
_ntl_gmod(mem, n, c);
}
void
_ntl_gsqmod(
_ntl_gbigint a,
_ntl_gbigint n,
_ntl_gbigint *c
)
{
_ntl_gmulmod(a, a, n, c);
}
double _ntl_gdoub_aux(_ntl_gbigint n)
{
double res;
mp_limb_t *ndata;
long i, sn, nneg;
if (!n)
return ((double) 0);
GET_SIZE_NEG(sn, nneg, n);
ndata = DATA(n);
res = 0;
for (i = sn-1; i >= 0; i--)
res = res * NTL_ZZ_FRADIX + ((double) ndata[i]);
if (nneg) res = -res;
return res;
}
long _ntl_ground_correction(_ntl_gbigint a, long k, long residual)
{
long direction;
long p;
long sgn;
long bl;
mp_limb_t wh;
long i;
mp_limb_t *adata;
if (SIZE(a) > 0)
sgn = 1;
else
sgn = -1;
adata = DATA(a);
p = k - 1;
bl = (p/NTL_ZZ_NBITS);
wh = ((mp_limb_t) 1) << (p - NTL_ZZ_NBITS*bl);
if (adata[bl] & wh) {
/* bit is 1...we have to see if lower bits are all 0
in order to implement "round to even" */
if (adata[bl] & (wh - ((mp_limb_t) 1)))
direction = 1;
else {
i = bl - 1;
while (i >= 0 && adata[i] == 0) i--;
if (i >= 0)
direction = 1;
else
direction = 0;
}
/* use residual to break ties */
if (direction == 0 && residual != 0) {
if (residual == sgn)
direction = 1;
else
direction = -1;
}
if (direction == 0) {
/* round to even */
wh = wh << 1;
if (wh == 0) {
wh = 1;
bl++;
}
if (adata[bl] & wh)
direction = 1;
else
direction = -1;
}
}
else
direction = -1;
if (direction == 1)
return sgn;
return 0;
}
double _ntl_gdoub(_ntl_gbigint n)
{
static _ntl_gbigint tmp = 0;
long s;
long shamt;
long correction;
double x;
s = _ntl_g2log(n);
shamt = s - NTL_DOUBLE_PRECISION;
if (shamt <= 0)
return _ntl_gdoub_aux(n);
_ntl_grshift(n, shamt, &tmp);
correction = _ntl_ground_correction(n, shamt, 0);
if (correction) _ntl_gsadd(tmp, correction, &tmp);
x = _ntl_gdoub_aux(tmp);
/* We could just write x = ldexp(x, shamt); however, if long's
* are bigger than int's, there is the possibility that shamt would be
* truncated. We could check for this and raise an error, but
* it is preferable to do it this way to get +/- infinity, if
* possible. */
while (shamt > 1024) {
x = ldexp(x, 1024);
shamt -= 1024;
}
x = ldexp(x, shamt);
return x;
}
double _ntl_glog(_ntl_gbigint n)
{
static _ntl_gbigint tmp = 0;
static double log_2;
static long init = 0;
long s;
long shamt;
long correction;
double x;
if (!init) {
log_2 = log(2.0);
init = 1;
}
if (_ntl_gsign(n) <= 0)
ghalt("log argument <= 0");
s = _ntl_g2log(n);
shamt = s - NTL_DOUBLE_PRECISION;
if (shamt <= 0)
return log(_ntl_gdoub_aux(n));
_ntl_grshift(n, shamt, &tmp);
correction = _ntl_ground_correction(n, shamt, 0);
if (correction) _ntl_gsadd(tmp, correction, &tmp);
x = _ntl_gdoub_aux(tmp);
return log(x) + shamt*log_2;
}
/* To implement _ntl_gdoubtoz, I've implemented essentially the
* same algorithm as in LIP, processing in blocks of
* NTL_SP_NBITS bits, rather than NTL_ZZ_NBITS.
* This is conversion is rather delicate, and I don't want to
* make any new assumptions about the underlying arithmetic.
* This implementation should be quite portable. */
void _ntl_gdoubtoz(double a, _ntl_gbigint *xx)
{
static _ntl_gbigint x = 0;
long neg, i, t, sz;
a = floor(a);
if (!_ntl_IsFinite(&a))
ghalt("_ntl_gdoubtoz: attempt to convert non-finite value");
if (a < 0) {
a = -a;
neg = 1;
}
else
neg = 0;
if (a == 0) {
_ntl_gzero(xx);
return;
}
sz = 0;
while (a >= 1) {
a = a*(1.0/NTL_SP_FBOUND);
sz++;
}
i = 0;
_ntl_gzero(&x);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -