📄 h264.c
字号:
const int16_t * const B= h->mv_cache[list][ index8 - 8 ]; const int16_t * C; int diagonal_ref, match_count; assert(part_width==1 || part_width==2 || part_width==4);/* mv_cache B . . A T T T T U . . L . . , . U . . L . . . . U . . L . . , . . . . L . . . .*/ diagonal_ref= fetch_diagonal_mv(h, &C, index8, list, part_width); match_count= (diagonal_ref==ref) + (top_ref==ref) + (left_ref==ref); if(match_count > 1){ //most common *mx= mid_pred(A[0], B[0], C[0]); *my= mid_pred(A[1], B[1], C[1]); }else if(match_count==1){ if(left_ref==ref){ *mx= A[0]; *my= A[1]; }else if(top_ref==ref){ *mx= B[0]; *my= B[1]; }else{ *mx= C[0]; *my= C[1]; } }else{ if(top_ref == PART_NOT_AVAILABLE && diagonal_ref == PART_NOT_AVAILABLE && left_ref != PART_NOT_AVAILABLE){ *mx= A[0]; *my= A[1]; }else{ *mx= mid_pred(A[0], B[0], C[0]); *my= mid_pred(A[1], B[1], C[1]); } } tprintf("pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n", top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref, A[0], A[1], ref, *mx, *my, h->s.mb_x, h->s.mb_y, n, list);}/** * gets the directionally predicted 16x8 MV. * @param n the block index * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */static inline void pred_16x8_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int top_ref= h->ref_cache[list][ scan8[0] - 8 ]; const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", top_ref, B[0], B[1], h->s.mb_x, h->s.mb_y, n, list); if(top_ref == ref){ *mx= B[0]; *my= B[1]; return; } }else{ const int left_ref= h->ref_cache[list][ scan8[8] - 1 ]; const int16_t * const A= h->mv_cache[list][ scan8[8] - 1 ]; tprintf("pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; *my= A[1]; return; } } //RARE pred_motion(h, n, 4, list, ref, mx, my);}/** * gets the directionally predicted 8x16 MV. * @param n the block index * @param mx the x component of the predicted motion vector * @param my the y component of the predicted motion vector */static inline void pred_8x16_motion(H264Context * const h, int n, int list, int ref, int * const mx, int * const my){ if(n==0){ const int left_ref= h->ref_cache[list][ scan8[0] - 1 ]; const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", left_ref, A[0], A[1], h->s.mb_x, h->s.mb_y, n, list); if(left_ref == ref){ *mx= A[0]; *my= A[1]; return; } }else{ const int16_t * C; int diagonal_ref; diagonal_ref= fetch_diagonal_mv(h, &C, scan8[4], list, 2); tprintf("pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d", diagonal_ref, C[0], C[1], h->s.mb_x, h->s.mb_y, n, list); if(diagonal_ref == ref){ *mx= C[0]; *my= C[1]; return; } } //RARE pred_motion(h, n, 2, list, ref, mx, my);}static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; tprintf("pred_pskip: (%d) (%d) at %2d %2d", top_ref, left_ref, h->s.mb_x, h->s.mb_y); if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE || (top_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ] == 0) || (left_ref == 0 && *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ] == 0)){ *mx = *my = 0; return; } pred_motion(h, 0, 4, 0, 0, mx, my); return;}static inline void write_back_motion(H264Context *h, int mb_type){ MpegEncContext * const s = &h->s; const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; const int b8_xy= 2*s->mb_x + 2*s->mb_y*h->b8_stride; int list; for(list=0; list<2; list++){ int y; if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){ if(1){ //FIXME skip or never read if mb_type doesnt use it for(y=0; y<4; y++){ *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0; } for(y=0; y<2; y++){ *(uint16_t*)s->current_picture.motion_val[list][b8_xy + y*h->b8_stride]= (LIST_NOT_USED&0xFF)*0x0101; } } continue; //FIXME direct mode ... } for(y=0; y<4; y++){ *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+0 + 8*y]; *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y]; } for(y=0; y<2; y++){ s->current_picture.ref_index[list][b8_xy + 0 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+0 + 16*y]; s->current_picture.ref_index[list][b8_xy + 1 + y*h->b8_stride]= h->ref_cache[list][scan8[0]+2 + 16*y]; } }}/** * Decodes a network abstraction layer unit. * @param consumed is the number of bytes used as input * @param length is the length of the array * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing? * @returns decoded bytes, might be src+1 if no escapes */static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){ int i, si, di; uint8_t *dst;// src[0]&0x80; //forbidden bit h->nal_ref_idc= src[0]>>5; h->nal_unit_type= src[0]&0x1F; src++; length--;#if 0 for(i=0; i<length; i++) printf("%2X ", src[i]);#endif for(i=0; i+1<length; i+=2){ if(src[i]) continue; if(i>0 && src[i-1]==0) i--; if(i+2<length && src[i+1]==0 && src[i+2]<=3){ if(src[i+2]!=3){ /* startcode, so we must be past the end */ length=i; } break; } } if(i>=length-1){ //no escaped 0 *dst_length= length; *consumed= length+1; //+1 for the header return src; } h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length); dst= h->rbsp_buffer;//printf("deoding esc\n"); si=di=0; while(si<length){ //remove escapes (very rare 1:2^22) if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){ if(src[si+2]==3){ //escape dst[di++]= 0; dst[di++]= 0; si+=3; }else //next start code break; } dst[di++]= src[si++]; } *dst_length= di; *consumed= si + 1;//+1 for the header//FIXME store exact number of bits in the getbitcontext (its needed for decoding) return dst;}/** * @param src the data which should be escaped * @param dst the target buffer, dst+1 == src is allowed as a special case * @param length the length of the src data * @param dst_length the length of the dst array * @returns length of escaped data in bytes or -1 if an error occured */static int encode_nal(H264Context *h, uint8_t *dst, uint8_t *src, int length, int dst_length){ int i, escape_count, si, di; uint8_t *temp; assert(length>=0); assert(dst_length>0); dst[0]= (h->nal_ref_idc<<5) + h->nal_unit_type; if(length==0) return 1; escape_count= 0; for(i=0; i<length; i+=2){ if(src[i]) continue; if(i>0 && src[i-1]==0) i--; if(i+2<length && src[i+1]==0 && src[i+2]<=3){ escape_count++; i+=2; } } if(escape_count==0){ if(dst+1 != src) memcpy(dst+1, src, length); return length + 1; } if(length + escape_count + 1> dst_length) return -1; //this should be damn rare (hopefully) h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length + escape_count); temp= h->rbsp_buffer;//printf("encoding esc\n"); si= 0; di= 0; while(si < length){ if(si+2<length && src[si]==0 && src[si+1]==0 && src[si+2]<=3){ temp[di++]= 0; si++; temp[di++]= 0; si++; temp[di++]= 3; temp[di++]= src[si++]; } else temp[di++]= src[si++]; } memcpy(dst+1, temp, length+escape_count); assert(di == length+escape_count); return di + 1;}/** * write 1,10,100,1000,... for alignment, yes its exactly inverse to mpeg4 */static void encode_rbsp_trailing(PutBitContext *pb){ int length; put_bits(pb, 1, 1); length= (-get_bit_count(pb))&7; if(length) put_bits(pb, length, 0);}/** * identifies the exact end of the bitstream * @return the length of the trailing, or 0 if damaged */static int decode_rbsp_trailing(uint8_t *src){ int v= *src; int r; tprintf("rbsp trailing %X\n", v); for(r=1; r<9; r++){ if(v&1) return r; v>>=1; } return 0;}/** * idct tranforms the 16 dc values and dequantize them. * @param qp quantization parameter */static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){ const int qmul= dequant_coeff[qp][0];#define stride 16 int i; int temp[16]; //FIXME check if this is a good idea static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride}; static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};//memset(block, 64, 2*256);//return; for(i=0; i<4; i++){ const int offset= y_offset[i]; const int z0= block[offset+stride*0] + block[offset+stride*4]; const int z1= block[offset+stride*0] - block[offset+stride*4]; const int z2= block[offset+stride*1] - block[offset+stride*5]; const int z3= block[offset+stride*1] + block[offset+stride*5]; temp[4*i+0]= z0+z3; temp[4*i+1]= z1+z2; temp[4*i+2]= z1-z2; temp[4*i+3]= z0-z3; } for(i=0; i<4; i++){ const int offset= x_offset[i]; const int z0= temp[4*0+i] + temp[4*2+i]; const int z1= temp[4*0+i] - temp[4*2+i]; const int z2= temp[4*1+i] - temp[4*3+i]; const int z3= temp[4*1+i] + temp[4*3+i]; block[stride*0 +offset]= ((z0 + z3)*qmul + 2)>>2; //FIXME think about merging this into decode_resdual block[stride*2 +offset]= ((z1 + z2)*qmul + 2)>>2; block[stride*8 +offset]= ((z1 - z2)*qmul + 2)>>2; block[stride*10+offset]= ((z0 - z3)*qmul + 2)>>2; }}/** * dct tranforms the 16 dc values. * @param qp quantization parameter ??? FIXME */static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){// const int qmul= dequant_coeff[qp][0]; int i; int temp[16]; //FIXME check if this is a good idea static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride}; static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride}; for(i=0; i<4; i++){ const int offset= y_offset[i]; const int z0= block[offset+stride*0] + block[offset+stride*4]; const int z1= block[offset+stride*0] - block[offset+stride*4]; const int z2= block[offset+stride*1] - block[offset+stride*5]; const int z3= block[offset+stride*1] + block[offset+stride*5]; temp[4*i+0]= z0+z3; temp[4*i+1]= z1+z2; temp[4*i+2]= z1-z2; temp[4*i+3]= z0-z3; } for(i=0; i<4; i++){ const int offset= x_offset[i]; const int z0= temp[4*0+i] + temp[4*2+i];
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -