📄 lip.c
字号:
/*
Signed arbitrary length long int package.
Copyright Arjen K. Lenstra, 1989-2000
Bugs, questions, suggestions, additions, whatever, to
Arjen K. Lenstra
1 North Gate Road
Mendham, NJ 07945-3104
email arjen.lenstra@citicorp.com
fax 973 543 5094
This version (free-LIP v1.1) is based on the LIP used in the RSA-129
project, with a few bug fixes and minor enhancements. Permission is
granted for free re-distribution for non-commercial uses only.
*/
#ifdef WIN32
#include <windows.h>
#include <time.h>
#else
#include <sys/time.h>
#include <sys/resource.h>
#endif
#include <malloc.h>
#include <stddef.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include "lip.h"
#ifndef KAR_DEPTH
# define KAR_DEPTH 20
#endif
#ifndef KAR_MUL_CROV
# define KAR_MUL_CROV 30
#endif
#ifndef KAR_SQU_CROV
# define KAR_SQU_CROV 30
#endif
#if (!ILLEGAL)
/*Function prototypes for static internal functions.*/
static void zsubmul(
long r,
verylong a,
verylong b
);
static void zalphasubmul(
long r,
verylong a,
verylong b
);
static void hidetimer(
FILE *f,
long w
);
static void zaddls(
verylong a,
verylong b,
verylong c
);
static void zmstartint(
verylong n
);
static void zmkeep(
verylong n
);
static void zmback(
);
static void kar_mul(
verylong a,
verylong b,
verylong *c,
long shi
);
static void kar_sq(
verylong a,
verylong *c,
long shi
);
static long zxxeucl(
verylong ain,
verylong nin,
verylong *invv,
verylong *uu
);
static void mont_init_odd_power(
verylong a,
verylong *a_sq,
long m,
verylong **odd_a_power
);
static void init_odd_power(
verylong a,
verylong n,
verylong *a_sq,
long m,
verylong **odd_a_power
);
static void zsmexp(
long a,
verylong e,
verylong *bb
);
static void zpshift(
void
);
static long zrootnewton(
verylong a,
verylong *b,
long n
);
static long ph1set(
verylong n,
verylong *rap,
verylong *alpha,
verylong *mu,
verylong *x,
verylong *f
);
static long ph1exp(
verylong n,
verylong *x,
verylong e,
verylong rap,
verylong *f
);
static long ph1(
verylong n,
verylong *x,
long m,
verylong rap,
verylong *f
);
static long ph1toph2(
verylong n,
verylong *x,
verylong *y,
verylong *ra,
verylong alpha,
verylong mu,
verylong *f
);
static long ph2squ(
verylong n,
verylong x1s,
verylong y1s,
verylong *x2,
verylong *y2,
verylong ra,
verylong *f
);
static long ph2mul(
verylong n,
verylong x1s,
verylong y1s,
verylong x2s,
verylong y2s,
verylong *x3,
verylong *y3,
verylong ra,
verylong *f
);
static long ph2exp(
verylong n,
verylong x1,
verylong yy1,
verylong *x2,
verylong *y2,
long e,
verylong ra,
verylong *f
);
static long ph2set(
verylong n,
verylong x,
verylong y,
long te,
verylong ra,
verylong *f
);
static void ph2prec(
verylong n,
long n1,
long j
);
static void ph2eval(
verylong n,
long ind,
long n1,
long j,
verylong *result
);
static long ph2(
verylong n,
verylong inx,
verylong iny,
long m,
long te,
verylong ra,
verylong *f
);
static void message(
char *str,
double t,
FILE *fp
);
#ifdef DOUBLES_LOW_HIGH
#define LO_WD 0
#define HI_WD 1
#else
#define LO_WD 1
#define HI_WD 0
#endif
/* globals for Montgomery multiplication */
static verylong zn = 0;
static verylong zoldzn = 0;
static verylong zr = 0;
static verylong zrr = 0;
static verylong zrrr = 0;
static verylong znm = 0;
static long znotinternal = 0;
static long zntop = 0;
#ifdef PLAIN_OR_KARAT
static long zninv1 = 0;
static long zninv2 = 0;
#else
static long zninv = 0;
#endif
#ifdef SINGLE_MUL
#define ExtractHiLo(Hi,Lo,x) \
{ \
double t=x+(4503599627370496.0); \
unsigned int *it = (unsigned int *)&t; \
Lo = it[LO_WD]; \
Hi = ((it[HI_WD]<<6)|(it[LO_WD]>>26)); \
Lo &= 0x3FFFFFF; \
Hi &= 0x3FFFFFF; \
}
#define zaddmulp(a,b,d,t) \
{ \
double __lx = ((double) ((*a) + (*t))) + ((double) b)*((double) d); \
register long __lhi = 0, __llo = 0;\
ExtractHiLo(__lhi,__llo,__lx);\
(*a) = __llo;\
(*t) = __lhi;\
}
#define zaddmulpsq(a,b,t) \
{ \
double __lx = ((double) (*a)) + ((double) b)*((double) b); \
register long __lhi = 0, __llo = 0;\
ExtractHiLo(__lhi,__llo,__lx);\
(*a) = __llo;\
(*t) = __lhi;\
}
typedef union { double d; unsigned long rep[2]; } d_or_rep;
#define FetchHiLo(hi,lo,x) \
{ \
d_or_rep _l_xx; \
_l_xx.d = x; \
hi = _l_xx.rep[HI_WD]; \
lo = _l_xx.rep[LO_WD]; \
}
static
void zaddmul(
long ams,
long *ama,
long *amb
)
{
register long carry = 0;
register long i = (*amb++);
register double dams = (double) ams;
register double xx;
register double yy;
register unsigned long lo_wd, lo;
register unsigned long hi_wd, hi;
xx = ((double) (*amb))*dams + 4503599627370496.0;
for (; i > 1; i--)
{
yy = ((double) (*(++amb)))*dams +4503599627370496.0;
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd<<6)|(lo_wd>>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
xx = yy;
}
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd<<6)|(lo_wd>>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
*ama += carry;
}
static
void zmmulp(long *ama)
{
register long carry = 0;
register long i = (*zn);
register long *amb = (zn+1);
register double dams = (double) ((zninv*(*ama))&RADIXM);
double xx;
register double yy;
register unsigned long lo_wd, lo;
register unsigned long hi_wd, hi;
xx = ((double) (*amb))*dams + 4503599627370496.0;
for (; i > 1; i--)
{
yy = ((double) (*(++amb)))*dams +4503599627370496.0;
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd<<6)|(lo_wd>>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
xx = yy;
}
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd<<6)|(lo_wd>>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
if (((*ama += carry) & RADIX) > 0 )
{
(*ama++) &= RADIXM;
(*ama)++;
}
}
static
void zaddmulsq(
long ams,
long *ama,
long *amb
)
{
register long carry = 0;
register long i = ams;
register double dams = (double) (*amb);
double xx;
register double yy;
register unsigned long lo_wd,lo;
register unsigned long hi_wd,hi;
if (!ams) return;
xx = ((double) (*(++amb)))*dams + 4503599627370496.0;
for (; i > 1; i--)
{
yy = ((double) (*(++amb)))*dams + 4503599627370496.0;
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd <<6)|(lo_wd >>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
xx = yy;
}
if (i==1)
{
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd <<6)|(lo_wd >>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
}
*ama += carry;
}
static
void zsubmul(
long ams,
long *ama,
long *amb
)
{
register long carry = RADIX;
register long i = (*amb++);
register double dams = (double) (RADIX-ams);
double xx;
register double yy;
register unsigned long lo_wd,lo;
register unsigned long hi_wd,hi;
xx = ((double) (*amb))*dams + 4503599627370496.0;
for (; i > 1; i--)
{
yy = ((double) (*(amb+1)))*dams + 4503599627370496.0;
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd<<6)|(lo_wd>>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
carry += RADIXM - (*(amb++));
xx = yy;
}
FetchHiLo(hi_wd, lo_wd, xx);
lo = lo_wd & 0x3FFFFFF;
hi = ((hi_wd <<6)|(lo_wd >>26)) & 0x3FFFFFF;
lo = lo + (*ama) + carry;
*ama = lo & 0x3FFFFFF;
carry = hi + (lo >> 26);
ama++;
carry += RADIXM - (*amb);
*ama += carry - RADIX;
}
#elif KARAT
/*
This definition of zaddmulp and zaddmulpsq assumes nothing
*/
#define zaddmulp( a, b, d, t) { \
register unsigned long b1 = b & RADIXROOTM; \
register unsigned long d1 = d & RADIXROOTM; \
register unsigned long bd,b1d1,m,aa= (*a) + (*t); \
register unsigned long ld = (d>>NBITSH); \
register unsigned long lb = (b>>NBITSH); \
\
bd=lb*ld; \
b1d1=b1*d1; \
m=(lb+b1)*(ld+d1) - bd - b1d1; \
aa += ( b1d1+ ((m&RADIXROOTM)<<NBITSH)); \
(*t) = (aa >> NBITS) + bd + (m>>NBITSH); \
(*a) = aa & RADIXM; \
}
#define zaddmulpsq(_a, _b, _t) \
{ \
register long lb = (_b); \
register long b1 = (_b) & RADIXROOTM; \
register long aa = *(_a) + b1 * b1; \
\
b1 = (b1 * (lb >>= NBITSH) << 1) + (aa >> NBITSH); \
aa = (aa & RADIXROOTM) + ((b1 & RADIXROOTM) << NBITSH); \
*(_t) = lb * lb + (b1 >> NBITSH) + (aa >> NBITS); \
*(_a) = (aa & RADIXM); \
}
#define zmmulp(mmpa) \
{ \
register verylong lmmpa = (mmpa); \
register verylong lmmpb = (zn); \
register long lmmi = (*lmmpa) >> NBITSH; \
register long lmmd = (*lmmpa) & RADIXROOTM; \
long zmmtemp = 0; \
\
lmmd = (zninv1 * lmmd + (((zninv1 * lmmi + zninv2 * lmmd) & RADIXROOTM) << NBITSH)) & RADIXM; \
for (lmmi = *lmmpb++; lmmi > 0; lmmi--) \
{ \
zaddmulp(lmmpa, *lmmpb, lmmd, &zmmtemp); \
lmmpa++; \
lmmpb++; \
} \
if (((*lmmpa += zmmtemp) & RADIX) > 0) \
{ \
(*lmmpa++) &= RADIXM; \
(*lmmpa)++; \
} \
}
#define zaddmul(ams, ama, amb) \
{ \
register long lami; \
register long lams = (ams); \
register verylong lama = (ama); \
register verylong lamb = (amb); \
long lamcarry = 0; \
\
for (lami = (*lamb++); lami > 0; lami--) \
{ \
zaddmulp(lama, *lamb, lams, &lamcarry); \
/* Be careful, the last lama is unnormalized */ \
lama++; \
lamb++; \
} \
*lama += lamcarry; \
}
#define zaddmulsq(sql, sqa, sqb) \
{ \
register long lsqi = (sql); \
register long lsqs = *(sqb); \
register verylong lsqa = (sqa); \
register verylong lsqb = (sqb); \
long lsqcarry = 0; \
\
lsqb++; \
for (; lsqi > 0; lsqi--) \
{ \
zaddmulp(lsqa, *lsqb, lsqs, &lsqcarry); \
lsqa++; \
lsqb++; \
} \
*lsqa += lsqcarry; \
/* Be careful, the last lama is unnormalized */ \
}
static void
zsubmul(
long r,
verylong a,
verylong b
)
{
register long rd = RADIX - r;
register long i;
long carry = RADIX;
for (i = (*b++); i > 0; i--)
{
zaddmulp(a, *b, rd, &carry);
a++;
carry += RADIXM - (*b++);
}
*a += carry - RADIX; /* unnormalized */
}
#elif PLAIN
/*
This definition of zaddmulp and zaddmulpsq assumes nothing
*/
#define zaddmulp(_a, _b, _d, _t) \
{ \
register long lb = (_b); \
register long ld = (_d); \
register long b1 = (_b) & RADIXROOTM; \
register long d1 = (_d) & RADIXROOTM; \
register long aa = *(_a) + b1 * d1; \
\
b1 = b1 * (ld >>= NBITSH) + d1 * (lb >>= NBITSH) + (aa >> NBITSH); \
aa = (aa & RADIXROOTM) + ((b1 & RADIXROOTM) << NBITSH) + *(_t); \
*(_t) = ld * lb + (b1 >> NBITSH) + (aa >> NBITS); \
*(_a) = (aa & RADIXM); \
}
#define zaddmulpsq(_a, _b, _t) \
{ \
register long lb = (_b); \
register long b1 = (_b) & RADIXROOTM; \
register long aa = *(_a) + b1 * b1; \
\
b1 = (b1 * (lb >>= NBITSH) << 1) + (aa >> NBITSH); \
aa = (aa & RADIXROOTM) + ((b1 & RADIXROOTM) << NBITSH); \
*(_t) = lb * lb + (b1 >> NBITSH) + (aa >> NBITS); \
*(_a) = (aa & RADIXM); \
}
#define zmmulp(mmpa) \
{ \
register verylong lmmpa = (mmpa); \
register verylong lmmpb = (zn); \
register long lmmi = (*lmmpa) >> NBITSH; \
register long lmmd = (*lmmpa) & RADIXROOTM; \
long zmmtemp = 0; \
\
lmmd = (zninv1 * lmmd + (((zninv1 * lmmi + zninv2 * lmmd) & RADIXROOTM) << NBITSH)) & RADIXM; \
for (lmmi = *lmmpb++; lmmi > 0; lmmi--) \
{ \
zaddmulp(lmmpa, *lmmpb, lmmd, &zmmtemp); \
lmmpa++; \
lmmpb++; \
} \
if (((*lmmpa += zmmtemp) & RADIX) > 0) \
{ \
(*lmmpa++) &= RADIXM; \
(*lmmpa)++; \
} \
}
#define zaddmul(ams, ama, amb) \
{ \
register long lami; \
register long lams = (ams); \
register verylong lama = (ama); \
register verylong lamb = (amb); \
long lamcarry = 0; \
\
for (lami = (*lamb++); lami > 0; lami--) \
{ \
zaddmulp(lama, *lamb, lams, &lamcarry); \
/* Be careful, the last lama is unnormalized */ \
lama++; \
lamb++; \
} \
*lama += lamcarry; \
}
#define zaddmulsq(sql, sqa, sqb) \
{ \
register long lsqi = (sql); \
register long lsqs = *(sqb); \
register verylong lsqa = (sqa); \
register verylong lsqb = (sqb); \
long lsqcarry = 0; \
\
lsqb++; \
for (; lsqi > 0; lsqi--) \
{ \
zaddmulp(lsqa, *lsqb, lsqs, &lsqcarry); \
lsqa++; \
lsqb++; \
} \
*lsqa += lsqcarry; \
/* Be careful, the last lama is unnormalized */ \
}
static void
zsubmul(
long r,
verylong a,
verylong b
)
{
register long rd = RADIX - r;
register long i;
long carry = RADIX;
for (i = (*b++); i > 0; i--)
{
zaddmulp(a, *b, rd, &carry);
a++;
carry += RADIXM - (*b++);
}
*a += carry - RADIX; /* unnormalized */
}
#else
/*
This definition of zaddmulp and zaddmulpsq presumes a two's complement
machine in which integer overflow is ignored and where double
precision arithmetic is fast. The 0.25 allows round to nearest
or round towards zero (value being rounded should be integer
except for roundoff.)
DO NOT USE FOR alpha because of fradix_inv (unless NBITS is small)
*/
static double fradix_inv = 1.0 / RADIX; /* Global constant */
#define zaddmulp(_a, _b, _d, _t) \
{ \
register at = *(_a) + *(_t); \
register long aa = (at + (_b) * (_d)) & RADIXM; \
\
*(_t) = (long) (0.25 + fradix_inv * (((double) (at - aa)) \
+ ((double) (_b)) * ((double) (_d)))); \
*(_a) = aa; \
}
#define zaddmulpsq(_a, _b, _t) \
{ \
register at = *(_a); \
register long aa = (at + (_b) * (_b)) & RADIXM; \
\
*(_t) = (long) (0.25 + fradix_inv * (((double) (at - aa)) \
+ ((double) (_b)) * ((double) (_b)))); \
*(_a) = aa; \
}
#define zmmulp(mmpa) \
{ \
register verylong lmmpa = (mmpa); \
register verylong lmmpb = (zn); \
register long lmmi = (*lmmpa); \
register long lmmd; \
long zmmtemp = 0; \
\
lmmd = (zninv * lmmi) & RADIXM; \
for (lmmi = *lmmpb++; lmmi > 0; lmmi--) \
{ \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -