📄 qpel.h
字号:
case 14:
Ops_Copy->H_Pass(tmp, src, 17, stride, rounding);
Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
break;
case 15:
Ops_Copy->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
break;
}
}
static void __inline
interpolate16x8_quarterpel(uint8_t * const cur,
uint8_t * const refn,
uint8_t * const refh,
uint8_t * const refv,
uint8_t * const refhv,
const uint32_t x, const uint32_t y,
const int32_t dx, const int dy,
const uint32_t stride,
const uint32_t rounding)
{
const uint8_t *src;
uint8_t *dst;
uint8_t *tmp;
int32_t quads;
const XVID_QP_FUNCS *Ops;
int32_t x_int, y_int;
const int32_t xRef = (int)x*4 + dx;
const int32_t yRef = (int)y*4 + dy;
Ops = xvid_QP_Funcs;
quads = (dx&3) | ((dy&3)<<2);
x_int = xRef >> 2;
y_int = yRef >> 2;
dst = cur + y * stride + x;
src = refn + y_int * (int)stride + x_int;
tmp = refh; /* we need at least a 16 x stride scratch block */
switch(quads) {
case 0:
transfer8x8_copy( dst, src, stride);
transfer8x8_copy( dst+8, src+8, stride);
break;
case 1:
Ops->H_Pass_Avrg(dst, src, 8, stride, rounding);
break;
case 2:
Ops->H_Pass(dst, src, 8, stride, rounding);
break;
case 3:
Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding);
break;
case 4:
Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding);
break;
case 5:
Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
break;
case 6:
Ops->H_Pass(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
break;
case 7:
Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
break;
case 8:
Ops->V_Pass_8(dst, src, 16, stride, rounding);
break;
case 9:
Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
break;
case 10:
Ops->H_Pass(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
break;
case 11:
Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
break;
case 12:
Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding);
break;
case 13:
Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
break;
case 14:
Ops->H_Pass(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding);
break;
case 15:
Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
break;
}
}
static void __inline
interpolate8x8_quarterpel(uint8_t * const cur,
uint8_t * const refn,
uint8_t * const refh,
uint8_t * const refv,
uint8_t * const refhv,
const uint32_t x, const uint32_t y,
const int32_t dx, const int dy,
const uint32_t stride,
const uint32_t rounding)
{
const uint8_t *src;
uint8_t *dst;
uint8_t *tmp;
int32_t quads;
const XVID_QP_FUNCS *Ops;
int32_t x_int, y_int;
const int32_t xRef = (int)x*4 + dx;
const int32_t yRef = (int)y*4 + dy;
Ops = xvid_QP_Funcs;
quads = (dx&3) | ((dy&3)<<2);
x_int = xRef >> 2;
y_int = yRef >> 2;
dst = cur + y * stride + x;
src = refn + y_int * (int)stride + x_int;
tmp = refh; /* we need at least a 16 x stride scratch block */
switch(quads) {
case 0:
transfer8x8_copy( dst, src, stride);
break;
case 1:
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
break;
case 2:
Ops->H_Pass_8(dst, src, 8, stride, rounding);
break;
case 3:
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
break;
case 4:
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
break;
case 5:
Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 6:
Ops->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 7:
Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 8:
Ops->V_Pass_8(dst, src, 8, stride, rounding);
break;
case 9:
Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 10:
Ops->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 11:
Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 12:
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
break;
case 13:
Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
break;
case 14:
Ops->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
break;
case 15:
Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
break;
}
}
static void __inline
interpolate8x8_add_quarterpel(uint8_t * const cur,
uint8_t * const refn,
uint8_t * const refh,
uint8_t * const refv,
uint8_t * const refhv,
const uint32_t x, const uint32_t y,
const int32_t dx, const int dy,
const uint32_t stride,
const uint32_t rounding)
{
const uint8_t *src;
uint8_t *dst;
uint8_t *tmp;
int32_t quads;
const XVID_QP_FUNCS *Ops;
const XVID_QP_FUNCS *Ops_Copy;
int32_t x_int, y_int;
const int32_t xRef = (int)x*4 + dx;
const int32_t yRef = (int)y*4 + dy;
Ops = xvid_QP_Add_Funcs;
Ops_Copy = xvid_QP_Funcs;
quads = (dx&3) | ((dy&3)<<2);
x_int = xRef >> 2;
y_int = yRef >> 2;
dst = cur + y * stride + x;
src = refn + y_int * (int)stride + x_int;
tmp = refh; /* we need at least a 16 x stride scratch block */
switch(quads) {
case 0:
/* Misleading function name, there is no halfpel involved
* just dst and src averaging with rounding=0 */
interpolate8x8_halfpel_add(dst, src, stride, rounding);
break;
case 1:
Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
break;
case 2:
Ops->H_Pass_8(dst, src, 8, stride, rounding);
break;
case 3:
Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
break;
case 4:
Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
break;
case 5:
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 6:
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 7:
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
break;
case 8:
Ops->V_Pass_8(dst, src, 8, stride, rounding);
break;
case 9:
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 10:
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 11:
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
break;
case 12:
Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
break;
case 13:
Ops_Copy->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
break;
case 14:
Ops_Copy->H_Pass_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
break;
case 15:
Ops_Copy->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
break;
}
}
#endif /* _XVID_QPEL_H_ */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -