⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 zz.h

📁 NTL is a high-performance, portable C++ library providing data structures and algorithms for manipul
💻 H
📖 第 1 页 / 共 4 页
字号:
}

#endif


#elif (!defined(NTL_CLEAN_SPMM))


/*
 * The default MulMod code.
 */

static inline long MulMod(long a, long b, long n)
{
   long q, res;

   q  = (long) ((((double) a) * ((double) b)) / ((double) n)); 
   res = a*b - q*n;
#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
   res -= n;
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
#else
   if (res >= n)
      res -= n;
   else if (res < 0)
      res += n;
#endif
   return res;
}

static inline long MulMod(long a, long b, long n, double ninv)
{
   long q, res;

   q  = (long) ((((double) a) * ((double) b)) * ninv); 
   res = a*b - q*n;
#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
   res -= n;
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
#else
   if (res >= n)
      res -= n;
   else if (res < 0)
      res += n;
#endif
   return res;
}


static inline long MulMod2(long a, long b, long n, double bninv)
{
   long q, res;

   q  = (long) (((double) a) * bninv);
   res = a*b - q*n;
#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
   res -= n;
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
#else
   if (res >= n)
      res -= n;
   else if (res < 0)
      res += n;
#endif
   return res;
}

static inline long MulDivRem(long& qq, long a, long b, long n, double bninv)
{
   long q, res;

   q  = (long) (((double) a) * bninv);
   res = a*b - q*n;
   if (res >= n) {
      res -= n;
      q++;
   } else if (res < 0) {
      res += n;
      q--;
   }

   qq = q;
   return res;
}

#else

/*
 * NTL_CLEAN_INT set: these versions of MulMod are completely portable,
 * assuming IEEE floating point arithmetic.
 */

static inline long MulMod(long a, long b, long n)
{  
   long q;
   unsigned long res;

   q  = (long) ((((double) a) * ((double) b)) / ((double) n)); 

   res = ((unsigned long) a)*((unsigned long) b) - 
         ((unsigned long) q)*((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
   res -= ((unsigned long) n);
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
#else
   if (res >> (NTL_BITS_PER_LONG-1))
      res += ((unsigned long) n);
   else if (((long) res) >= n)
      res -= ((unsigned long) n);
#endif
 
   return ((long) res);
}

static inline long MulMod(long a, long b, long n, double ninv)
{
   long q; 
   unsigned long res;

   q  = (long) ((((double) a) * ((double) b)) * ninv); 

   res = ((unsigned long) a)*((unsigned long) b) - 
         ((unsigned long) q)*((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
   res -= ((unsigned long) n);
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
#else
   if (res >> (NTL_BITS_PER_LONG-1))
      res += ((unsigned long) n);
   else if (((long) res) >= n)
      res -= ((unsigned long) n);
#endif
 
   return ((long) res);
}


static inline long MulMod2(long a, long b, long n, double bninv)
{
   long q;
   unsigned long res;

   q  = (long) (((double) a) * bninv);

   res = ((unsigned long) a)*((unsigned long) b) - 
         ((unsigned long) q)*((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
   res -= ((unsigned long) n);
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
#else
   if (res >> (NTL_BITS_PER_LONG-1))
      res += ((unsigned long) n);
   else if (((long) res) >= n)
      res -= ((unsigned long) n);
#endif

 
   return ((long) res);
}

static inline long MulDivRem(long& qq, long a, long b, long n, double bninv)
{
   long q; 
   unsigned long res;

   q  = (long) (((double) a) * bninv);
   res = ((unsigned long) a)*((unsigned long) b) - 
         ((unsigned long) q)*((unsigned long) n);

   if (res >> (NTL_BITS_PER_LONG-1)) {
      res += n;
      q--;
   } else if (((long) res) >= n) {
      res -= n;
      q++;
   }

   qq = q;
   return ((long) res);
}


#endif




// These MulMod routines (with preconditioning) are sometimes
// significantly faster.  There are four possible implementations:
//  - default: uses MulMod2 above (lots of floating point)
//  - NTL_SPMM_ULL: uses unsigned long long (if possible)
//  - NTL_SMPP_ASM: uses assembly language (if possible)
//  - NTL_SPMM_UL: uses only unsigned long arithmetic (portable, slower).

#if (!defined(NTL_SINGLE_MUL) && (defined(NTL_SPMM_ULL) || defined(NTL_SPMM_ASM)))


// unsigned long long / asm versions

typedef unsigned long mulmod_precon_t;

#define NTL_SPMM_VEC_T vec_ulong

#if (!defined(NTL_CLEAN_SPMM))

static inline unsigned long PrepMulModPrecon(long b, long n, double ninv)
{
   long q, r;

   q  = (long) ( (((double) b) * NTL_SP_FBOUND) * ninv ); 
   r = (b << NTL_SP_NBITS) - q*n;

#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   q += 1 + (r >> (NTL_BITS_PER_LONG-1)) + ((r - n) >> (NTL_BITS_PER_LONG-1));
#else
   if (r >= n)
      q++;
   else if (r < 0)
      q--;
#endif

   return ((unsigned long) q) << (NTL_BITS_PER_LONG - NTL_SP_NBITS);
}


#else

/*
 * clean int version -- this should be completely portable.
 */


static inline unsigned long PrepMulModPrecon(long b, long n, double ninv)
{
   unsigned long q, r;

   q = (long) ( (((double) b) * NTL_SP_FBOUND) * ninv ); 
   r = (((unsigned long) b) << NTL_SP_NBITS ) - q * ((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   q += 1UL - (r >> (NTL_BITS_PER_LONG-1)) - ((r - ((unsigned long) n)) >> (NTL_BITS_PER_LONG-1));
#else
   if (r >> (NTL_BITS_PER_LONG-1))
      q--;
   else if (((long) r) >= n)
      q++;
#endif

   return q << (NTL_BITS_PER_LONG - NTL_SP_NBITS);
}



#endif




#if (defined(NTL_SPMM_ULL))

static inline unsigned long MulHiUL(unsigned long a, unsigned long b)
{
   return (((NTL_ULL_TYPE)(a)) * ((NTL_ULL_TYPE)(b))) >> NTL_BITS_PER_LONG;
} 

#else 

// assmbly code versions

#include <NTL/SPMM_ASM.h>


#endif





   

#if (!defined(NTL_CLEAN_SPMM))



static inline long MulModPrecon(long a, long b, long n, unsigned long bninv)
{
   long q, res;
   
   q = (long) MulHiUL(a, bninv);

   res = a*b - q*n;
#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   res -= n;
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
#else
   if (res >= n)
      res -= n;
#endif
   return res;
}


#else

static inline long MulModPrecon(long a, long b, long n, unsigned long bninv)
{
   unsigned long q, res;

   
   q = MulHiUL(a, bninv);

   res = ((unsigned long) a)*((unsigned long) b) - q*((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   res -= ((unsigned long) n);
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
#else
   if (((long) res) >= n)
      res -= ((unsigned long) n);
#endif

   return (long) res;
}

#endif



#elif (!defined(NTL_SINGLE_MUL) && defined(NTL_SPMM_UL))

// plain, portable (but slower) int version

typedef long mulmod_precon_t;

#define NTL_SPMM_VEC_T vec_long


#if (!defined(NTL_CLEAN_SPMM))

static inline long PrepMulModPrecon(long b, long n, double ninv)
{
   long q, r;

   q  = (long) ( (((double) b) * NTL_SP_FBOUND) * ninv ); 
   r = (b << NTL_SP_NBITS) - q*n;


#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   q += 1 + (r >> (NTL_BITS_PER_LONG-1)) + ((r - n) >> (NTL_BITS_PER_LONG-1));
#else
   if (r >= n)
      q++;
   else if (r < 0)
      q--;
#endif

   return q;
}


#else

static inline long PrepMulModPrecon(long b, long n, double ninv)
{
   unsigned long q, r;

   q = (long) ( (((double) b) * NTL_SP_FBOUND) * ninv ); 
   r = (((unsigned long) b) << NTL_SP_NBITS ) - q * ((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   q += 1UL - (r >> (NTL_BITS_PER_LONG-1)) - ((r - ((unsigned long) n)) >> (NTL_BITS_PER_LONG-1));
#else
   if (r >> (NTL_BITS_PER_LONG-1))
      q--;
   else if (((long) r) >= n)
      q++;
#endif

   return ((long) q);
}


#endif




static inline long MulHiSP(long b, long d)
{
   unsigned long _b1 = b & ((1UL << (NTL_SP_NBITS/2)) - 1UL);
   unsigned long _d1 = d & ((1UL << (NTL_SP_NBITS/2)) - 1UL);
   unsigned long _bd,_b1d1,_m,_aa;
   unsigned long _ld = (d>>(NTL_SP_NBITS/2));
   unsigned long _lb = (b>>(NTL_SP_NBITS/2));

   _bd=_lb*_ld;
   _b1d1=_b1*_d1;
   _m=(_lb+_b1)*(_ld+_d1) - _bd - _b1d1;
   _aa = ( _b1d1+ ((_m&((1UL << (NTL_SP_NBITS/2)) - 1UL))<<(NTL_SP_NBITS/2)));
   return (_aa >> NTL_SP_NBITS) + _bd + (_m>>(NTL_SP_NBITS/2));
}


#if (!defined(NTL_CLEAN_SPMM))

static inline long MulModPrecon(long a, long b, long n, long bninv)
{

   long q, res;

   q = MulHiSP(a, bninv);

   res = a*b - q*n;
#if (NTL_ARITH_RIGHT_SHIFT && defined(NTL_AVOID_BRANCHING))
   res -= n;
   res += (res >> (NTL_BITS_PER_LONG-1)) & n;
#else
   if (res >= n)
      res -= n;
#endif
   return res;
}



#else


static inline long MulModPrecon(long a, long b, long n, long bninv)
{

   unsigned long q, res;

   q = MulHiSP(a, bninv);

   res = ((unsigned long) a)*((unsigned long) b) - q*((unsigned long) n);

#if (defined(NTL_AVOID_BRANCHING))
   res -= ((unsigned long) n);
   res += (-(res >> (NTL_BITS_PER_LONG-1))) & ((unsigned long) n);
#else
   if (((long) res) >= n)
      res -= ((unsigned long) n);
#endif

   return (long) res;
}


#endif




#else

// default, double version

typedef double mulmod_precon_t;

#define NTL_SPMM_VEC_T vec_double

static inline double PrepMulModPrecon(long b, long n, double ninv)
{
   return ((double) b) * ninv;
}

static inline long MulModPrecon(long a, long b, long n, double bninv)
{
   return MulMod2(a, b, n, bninv);
}


#endif


long InvMod(long a, long n);
// computes a^{-1} mod n.  Error is raised if undefined.

long PowerMod(long a, long e, long n);
// computes a^e mod n, e >= 0


NTL_CLOSE_NNS


#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -