📄 decoder.c
字号:
/*
***********************************************************************
* COPYRIGHT AND WARRANTY INFORMATION
*
* Copyright 2001, International Telecommunications Union, Geneva
*
* DISCLAIMER OF WARRANTY
*
* These software programs are available to the user without any
* license fee or royalty on an "as is" basis. The ITU disclaims
* any and all warranties, whether express, implied, or
* statutory, including any implied warranties of merchantability
* or of fitness for a particular purpose. In no event shall the
* contributor or the ITU be liable for any incidental, punitive, or
* consequential damages of any kind whatsoever arising from the
* use of these programs.
*
* This disclaimer of warranty extends to the user of these programs
* and user's customers, employees, agents, transferees, successors,
* and assigns.
*
* The ITU does not represent or warrant that the programs furnished
* hereunder are free of infringement of any third-party patents.
* Commercial implementations of ITU-T Recommendations, including
* shareware, may be subject to royalty fees to patent holders.
* Information regarding the ITU-T patent policy is available from
* the ITU Web site at http://www.itu.int.
*
* THIS IS NOT A GRANT OF PATENT RIGHTS - SEE THE ITU-T PATENT POLICY.
************************************************************************
*/
/*!
*************************************************************************************
* \file decoder.c
*
* \brief
* Contains functions that implement the "decoders in the encoder" concept for the
* rate-distortion optimization with losses.
* \date
* October 22nd, 2001
*
* \author
* Main contributors (see contributors.h for copyright, address and
* affiliation details)
* - Dimitrios Kontopodis <dkonto@eikon.tum.de>
*************************************************************************************
*/
#include <stdlib.h>
#include <memory.h>
#include "global.h"
#include "refbuf.h"
#include "rdopt.h"
/*!
*************************************************************************************
* \brief
* decodes one macroblock at one simulated decoder
*
* \param decoder
* The id of the decoder
*
* \param mode
* encoding mode of the MB
*
* \param ref
* reference frame index
*
* \note
* Gives the expected value in the decoder of one MB. This is done based on the
* stored reconstructed residue resY[][], the reconstructed values imgY[][]
* and the motion vectors. The decoded MB is moved to decY[][].
*************************************************************************************
*/
void decode_one_macroblock(int decoder, int mode, int ref)
{
int i,j,block_y,block_x;
int ref_inx;
int mv[2][BLOCK_MULTIPLE][BLOCK_MULTIPLE];
int resY_tmp[MB_BLOCK_SIZE][MB_BLOCK_SIZE];
int inter = (mode >= MBMODE_INTER16x16 && mode <= MBMODE_INTER4x4 && img->type!=B_IMG);
if (img->number==0)
{
for(i=0;i<MB_BLOCK_SIZE;i++)
for(j=0;j<MB_BLOCK_SIZE;j++)
decY[decoder][img->pix_y+j][img->pix_x+i]=imgY[img->pix_y+j][img->pix_x+i];
}
else
{
if (mode==MBMODE_COPY)
{
for(i=0;i<MB_BLOCK_SIZE;i++)
for(j=0;j<MB_BLOCK_SIZE;j++)
resY_tmp[j][i]=0;
/* Set motion vectors to zero */
for (block_y=0; block_y<BLOCK_MULTIPLE; block_y++)
for (block_x=0; block_x<BLOCK_MULTIPLE; block_x++)
for (i=0;i<2;i++)
mv[i][block_y][block_x]=0;
}
else
{
/* Copy motion vectors and residues to local arrays mv, resY_tmp, resUV_tmp */
for (block_y=0; block_y<BLOCK_MULTIPLE; block_y++)
for (block_x=0; block_x<BLOCK_MULTIPLE; block_x++)
for (i=0;i<2;i++)
mv[i][block_y][block_x]=tmp_mv[i][img->block_y+block_y][img->block_x+block_x+4];
for(i=0;i<MB_BLOCK_SIZE;i++)
for(j=0;j<MB_BLOCK_SIZE;j++)
resY_tmp[j][i]=resY[j][i];
}
/* Decode Luminance */
if (inter || mode==MBMODE_COPY)
{
for (block_y=img->block_y ; block_y < img->block_y+BLOCK_MULTIPLE ; block_y++)
for (block_x=img->block_x ; block_x < img->block_x+BLOCK_MULTIPLE ; block_x++)
{
ref_inx = (img->number-ref-1)%img->no_multpred;
Get_Reference_Block(decref[decoder][ref_inx],
block_y, block_x,
mv[0][block_y-img->block_y][block_x-img->block_x],
mv[1][block_y-img->block_y][block_x-img->block_x],
RefBlock);
for (j=0;j<BLOCK_SIZE;j++)
for (i=0;i<BLOCK_SIZE;i++)
{
if (RefBlock[j][i] != UMVPelY_14 (mref[ref_inx],
(block_y*4+j)*4+mv[1][block_y-img->block_y][block_x-img->block_x],
(block_x*4+i)*4+mv[0][block_y-img->block_y][block_x-img->block_x]))
ref_inx = (img->number-ref-1)%img->no_multpred;
decY[decoder][block_y*BLOCK_SIZE + j][block_x*BLOCK_SIZE + i] =
resY_tmp[(block_y-img->block_y)*BLOCK_SIZE + j]
[(block_x-img->block_x)*BLOCK_SIZE + i]
+ RefBlock[j][i];
}
}
}
else
{
/* Intra Refresh - Assume no spatial prediction */
for (j=0;j<MB_BLOCK_SIZE;j++)
for (i=0;i<MB_BLOCK_SIZE;i++)
decY[decoder][img->pix_y + j][img->pix_x + i] = imgY[img->pix_y + j][img->pix_x + i];
}
}
}
/*!
*************************************************************************************
* \brief
* Finds the reference MB given the decoded reference frame
* \note
* This is based on the function UnifiedOneForthPix, only it is modified to
* be used at the "many decoders in the encoder" RD optimization. In this case
* we dont want to keep full upsampled reference frames for all decoders, so
* we just upsample when it is necessary.
* \param imY
* The frame to be upsampled
* \param block_y
* The row of the block, whose prediction we want to find
* \param block_x
* The column of the block, whose prediction we want to track
* \param mvhor
* Motion vector, horizontal part
* \param mvver
* Motion vector, vertical part
* \param out
* Output: The prediction for the block (block_y, block_x)
*************************************************************************************
*/
void Get_Reference_Block(byte **imY,
int block_y,
int block_x,
int mvhor,
int mvver,
byte **out)
{
int i,j,y,x;
y = block_y * BLOCK_SIZE * 4 + mvver;
x = block_x * BLOCK_SIZE * 4 + mvhor;
for (j=0; j<BLOCK_SIZE; j++)
for (i=0; i<BLOCK_SIZE; i++)
out[j][i] = Get_Reference_Pixel(imY,
max(0,min(img->mvert, y+j*4)),
max(0,min(img->mhor, x+i*4)));
}
/*!
*************************************************************************************
* \brief
* Finds a pixel (y,x) of the upsampled reference frame
* \note
* This is based on the function UnifiedOneForthPix, only it is modified to
* be used at the "many decoders in the encoder" RD optimization. In this case
* we dont want to keep full upsampled reference frames for all decoders, so
* we just upsample when it is necessary.
*************************************************************************************
*/
byte Get_Reference_Pixel(byte **imY, int y_pos, int x_pos)
{
int dx, x;
int dy, y;
int maxold_x,maxold_y;
int result = 0, result1, result2;
int pres_x;
int pres_y;
int tmp_res[6];
static const int COEF[6] = {
1, -5, 20, 20, -5, 1
};
dx = x_pos&3;
dy = y_pos&3;
x_pos = (x_pos-dx)/4;
y_pos = (y_pos-dy)/4;
maxold_x = img->width-1;
maxold_y = img->height-1;
if (dx == 0 && dy == 0) { /* fullpel position */
result = imY[max(0,min(maxold_y,y_pos))][max(0,min(maxold_x,x_pos))];
}
else if (dx == 3 && dy == 3) { /* funny position */
result = (imY[max(0,min(maxold_y,y_pos)) ][max(0,min(maxold_x,x_pos)) ]+
imY[max(0,min(maxold_y,y_pos)) ][max(0,min(maxold_x,x_pos+1))]+
imY[max(0,min(maxold_y,y_pos+1))][max(0,min(maxold_x,x_pos+1))]+
imY[max(0,min(maxold_y,y_pos+1))][max(0,min(maxold_x,x_pos)) ]+2)/4;
}
else { /* other positions */
if (dy == 0) {
pres_y = max(0,min(maxold_y,y_pos));
for(x=-2;x<4;x++) {
pres_x = max(0,min(maxold_x,x_pos+x));
result += imY[pres_y][pres_x]*COEF[x+2];
}
result = max(0, min(255, (result+16)/32));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -