⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 math64.h

📁 著名的 helix realplayer 基于手机 symbian 系统的 播放器全套源代码
💻 H
📖 第 1 页 / 共 2 页
字号:

/*if your compiler can compile 64-bit instructions, and your CPU has them,
 define this. */
// #define USE_64BIT_INSNS

#define HAVE_PLATFORM_MACROS

/* Compute a * b / c, using 64-bit intermediate result */
static __inline__ int MulDiv64(int a, int b, int c)
{
	int res ;
#ifdef USE_64BIT_INSNS
	__asm__ volatile ("mulhd %0,%2,%3\n\t"
					  "divd %0,%0,%1"
					  : "=&r" (res) : "r" (c), "%r" (a), "r" (b) ) ;
#else
	res = (int)(((double)a*(double)b - (double)(c>>1)) / (double)c) ;
#endif
	return res ;
}

/* Compute (a * b) >> 32, using 64-bit intermediate result */
static __inline__ int MulShift32(int a, int b)
{
	int res ;
	__asm__ ("mulhw %0,%1,%2" : "=r" (res) : "%r" (a) , "r" (b) ) ;
	return res ;
}

/* Compute (a * b) >> 30, using 64-bit intermediate result */
static __inline__ int MulShift30(int a, int b)
{
	int res ;
#ifdef USE_64BIT_INSNS
	__asm__ ("mulhd %0,%1,%2\n\t"
			 "srd %0,30" : "=r" (res) : "%r" (a), "r" (b) ) ;
#else
	res = MulShift32(a,b) << 2 ;
#endif
	return res ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift31(int a, int b)
{
	int res ;
#ifdef USE_64BIT_INSNS
	__asm__ ("mulhd %0,%1,%2\n\t"
			 "srd %0,31" : "=r" (res) : "%r" (a), "r" (b) ) ;
#else
	res = MulShift32(a,b) << 1 ;
#endif
	return res ;
}

/* Compute (a * b) >> n, using 64-bit intermediate result */
static __inline__ int MulShiftN(int a, int b, int n)
{
	int res ;

#ifdef USE_64BIT_INSNS
	__asm__ ("mulhd %0,%1,%2\n\t"
			 "srd %0,%3" : "=&r" (res) : "%r" (a), "r" (b), "r" (n) ) ;
#else
	unsigned int temp ;

	__asm__ ("mullw %0,%1,%2" : "=r" (temp) : "%r" (a) , "r" (b) ) ;

	res = temp >> n ;

	__asm__ ("mulhw %0,%1,%2" : "=r" (temp) : "%r" (a) , "r" (b) ) ;

	res |= (temp << (32-n)) ;
#endif
	return res ;
}

#ifdef TIMING
static unsigned int tick,tock ;
inline void fTICK()
{ register int t ; __asm__ ( "mftb %0" : "=r" (t) ) ; tick = t ; }
inline void fTOCK()
{ register int t ; __asm__ ( "mftb %0" : "=r" (t) ) ; tock = t ;
  if (tock < tick) {
  	tock += 65536 ; tick -= 65536 ; 
  }
}

#define TICK() fTICK()
#define TOCK(nsamples) ( fTOCK() , printf("cycles = %4.1f\n",4.0f*(tock-tick)/(float)(nsamples)), \
	tock-tick )

#endif // TIMING

#endif //  defined(__GNUC__) && defined(__POWERPC__)

///////////////////////////////////////////////////////////////////////////////////////
// EVC3.0 / ARM
///////////////////////////////////////////////////////////////////////////////////////

#if (defined(_ARM) && defined(_MSC_VER))

/* EVC does not allow us to use inline assembly. Thus, you'll only see prototypes here.
 */

#define HAVE_PLATFORM_MACROS

/* Compute a * b / c, using 64-bit intermediate result. Since the ARM does not have
   a division instruction, we code a totally lame C version here. TODO wschildbach
 */
static __inline int MulDiv64(int a, int b, int c)
{
  __int64 t = (__int64)a * (__int64)b ;
  return (int)(t / c) ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
int MulShift32(int a, int b);

/* Compute (a * b) >> 31, using 64-bit intermediate result */
int MulShift31(int a, int b);

/* Compute (a * b) >> 31, using 64-bit intermediate result */
int MulShift30(int a, int b);

/* Compute (a * b) >> n, using 64-bit intermediate result */
int MulShiftN(int a, int b, int n);
#ifdef __cplusplus
}
#endif // __cplusplus

#endif // (defined(_ARM) && defined(_MSC_VER))

///////////////////////////////////////////////////////////////////////////////////////
// GNUC / ARM
///////////////////////////////////////////////////////////////////////////////////////

#if (defined(_ARM) && defined(__GNUC__))

#define HAVE_PLATFORM_MACROS

#if defined(__MARM_THUMB__)

/* Compute a * b / c, using 64-bit intermediate result. Since the ARM does not have
   a division instruction, we code a totally lame C version here. TODO wschildbach
 */
static __inline int MulDiv64(int a, int b, int c)
{
  long long t = (long long)a * (long long)b ;
  return (int)(t / c) ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift32(int x, int y)
{
    SymInt64 a = x;
    SymInt64 b = y;
    a *= b;
    a >>= 32;
    return a.Low();
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift31(int x, int y)
{
    SymInt64 a = x;
    SymInt64 b = y;
    a *= b;
    a >>= 31;
    return a.Low();
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift30(int x, int y)
{
    SymInt64 a = x;
    SymInt64 b = y;
    a *= b;
    a >>= 30;
    return a.Low();
}

/* Compute (a * b) >> n, using 64-bit intermediate result */
static __inline__ int MulShiftN(int x, int y, int n)
{
    SymInt64 a = x;
    SymInt64 b = y;
    a *= b;
    a >>= n;
    return a.Low();
}

#define HAVE_FASTABS
static __inline int FASTABS(int x)
{
    if (x >= 0)
	return x;
    return -x;
}

#else

/* Compute a * b / c, using 64-bit intermediate result. Since the ARM does not have
   a division instruction, we code a totally lame C version here. TODO wschildbach
 */
static __inline int MulDiv64(int a, int b, int c)
{
  long long t = (long long)a * (long long)b ;
  return (int)(t / c) ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift32(int x, int y)
{
  int zlow ;
  __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (x) : "%r" (y), "1" (x)) ;
  return x ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift31(int x, int y)
{
  int zlow ;
  __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (x) : "%r" (y), "1" (x)) ;
  __asm__ volatile ("mov %0,%1, lsr #31" : "=r" (zlow) : "r" (zlow)) ;
  __asm__ volatile ("orr %0,%1,%2, lsl #1" : "=r" (x) : "r" (zlow), "r" (x)) ;
  return x ;
}

/* Compute (a * b) >> 31, using 64-bit intermediate result */
static __inline__ int MulShift30(int x, int y)
{
  int zlow ;
  __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (x) : "%r" (y), "1" (x)) ;
  __asm__ volatile ("mov %0,%1, lsr #30" : "=r" (zlow) : "r" (zlow)) ;
  __asm__ volatile ("orr %0,%1,%2, lsl #2" : "=r" (x) : "r" (zlow), "r" (x)) ;
  return x ;
}

/* Compute (a * b) >> n, using 64-bit intermediate result */
static __inline__ int MulShiftN(int x, int y, int n)
{
  int zlow ;
  __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (x) : "%r" (y), "1" (x)) ;
  __asm__ volatile ("mov %0,%1, lsr %2" : "=r" (zlow) : "r" (zlow), "r" (n)) ;
  __asm__ volatile ("orr %0,%1,%2, lsl %3" : "=r" (x) : "r" (zlow), "r" (x), "r" (32-n)) ;
  return x ;
}

#define HAVE_FASTABS
static __inline int FASTABS(int x)
{
	int s;

  // s = x ^ (x >> 31)
  __asm__ volatile ("eor %0, %1, %1, asr #31" : "=r" (s) : "r" (x)) ;
  // x = s - (x >> 31)
  __asm__ volatile ("sub %0, %1, %2, asr #31" : "=r" (x) : "r" (s), "r" (x)) ;

	return x;
}

#endif // defined(__MARM_THUMB__)

#endif // (defined(_ARM) && defined(__GNUC__))

///////////////////////////////////////////////////////////////////////////////////////
// ARM_ADS / ARM
///////////////////////////////////////////////////////////////////////////////////////

#if defined(ARM_ADS)

static __inline int MulShift32(int x, int y)
{
    /* JR - important rules for smull RdLo, RdHi, Rm, Rs:
     *        RdHi and Rm can't be the same register
     *        RdLo and Rm can't be the same register
     *        RdHi and RdLo can't be the same register
     *      for MULSHIFT0(x,y), x = R0 and y = R1
     *      therefore we do y*x instead of x*y so that top 32 can go in R0 (the return register)
     */
    int zlow;
    __asm {
    	smull zlow,x,y,x
   	}

    return x;
}

static __inline int MulShift31(int x, int y)
{
    /* JR - store result in z (instead of reusing zlow) so that gcc optimizes properly */
    int zlow, z;

    __asm {
    	smull 	zlow, x, y, x
		mov 	zlow, zlow, lsr #31
    	orr 	z, zlow, x, lsl #1
   	}

    return z;
}

static __inline int MulShift30(int x, int y)
{
    /* JR - store result in z (instead of reusing zlow) so that gcc optimizes properly */
    int zlow, z;
    
    __asm {
    	smull 	zlow, x, y, x
		mov 	zlow, zlow, lsr #30
    	orr 	z, zlow, x, lsl #2
   	}

    return z;
}

#define HAVE_FASTABS
static __inline int FASTABS(int x) 
{
	int s;

	__asm {
		 eor	s, x, x, asr #31
		 sub	x, s, x, asr #31 
	}

	return x;
}

#endif // defined(ARM_ADS)

///////////////////////////////////////////////////////////////////////////////////////
// platform independent implementations
///////////////////////////////////////////////////////////////////////////////////////

#ifndef ASSERT
#define ASSERT(x)
#endif

#ifndef TICK
#define TICK()
#endif

#ifndef TOCK
#define TOCK(nsamples) 1
#endif

#ifndef HAVE_FASTABS
static __inline int FASTABS(int x) 
{
	int sign;

	sign = x >> 31;
	x ^= sign;
	x -= sign;

	return x;
}
#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -