📄 vconvert.cxx
字号:
return TRUE;
}
PSTANDARD_COLOUR_CONVERTER(Grey,YUV420P)
{
return GreytoYUV420P(srcFrameBuffer, dstFrameBuffer, bytesReturned);
}
PSTANDARD_COLOUR_CONVERTER(RGB24,YUV420P)
{
return RGBtoYUV420P(srcFrameBuffer, dstFrameBuffer, bytesReturned, 3, 0, 2);
}
PSTANDARD_COLOUR_CONVERTER(BGR24,YUV420P)
{
return RGBtoYUV420P(srcFrameBuffer, dstFrameBuffer, bytesReturned, 3, 2, 0);
}
PSTANDARD_COLOUR_CONVERTER(RGB32,YUV420P)
{
return RGBtoYUV420P(srcFrameBuffer, dstFrameBuffer, bytesReturned, 4, 0, 2);
}
PSTANDARD_COLOUR_CONVERTER(BGR32,YUV420P)
{
return RGBtoYUV420P(srcFrameBuffer, dstFrameBuffer, bytesReturned, 4, 2, 0);
}
/*
* Format YUY2 or YUV422(non planar):
*
* off: 0 Y00 U00 Y01 V00 Y02 U01 Y03 V01
* off: 8 Y10 U10 Y11 V10 Y12 U11 Y13 V11
* off:16 Y20 U20 Y21 V20 Y22 U21 Y23 V21
* off:24 Y30 U30 Y31 V30 Y32 U31 Y33 V31
* length:32 bytes
*
* Format YUV420P:
* off: 00 Y00 Y01 Y02 Y03
* off: 04 Y10 Y11 Y12 Y13
* off: 08 Y20 Y21 Y22 Y23
* off: 12 Y30 Y31 Y32 Y33
* off: 16 U00 U02 U20 U22
* off: 20 V00 V02 V20 V22
*
* So, we loose some bit of information when converting YUY2 to YUV420
*
* NOTE: This algorithm works only if the width and the height is pair.
*/
void PStandardColourConverter::YUY2toYUV420PSameSize(const BYTE *yuy2, BYTE *yuv420p) const
{
const BYTE *s;
BYTE *y, *u, *v;
unsigned int x, h;
int npixels = srcFrameWidth * srcFrameHeight;
s = yuy2;
y = yuv420p;
u = yuv420p + npixels;
v = u + npixels/4;
for (h=0; h<srcFrameHeight; h+=2) {
/* Copy the first line keeping all information */
for (x=0; x<srcFrameWidth; x+=2) {
*y++ = *s++;
*u++ = *s++;
*y++ = *s++;
*v++ = *s++;
}
/* Copy the second line discarding u and v information */
for (x=0; x<srcFrameWidth; x+=2) {
*y++ = *s++;
s++;
*y++ = *s++;
s++;
}
}
}
/*
* Format YUY2 or YUV422(non planar):
*
* off: 0 Y00 U00 Y01 V00 Y02 U01 Y03 V01
* off: 8 Y10 U10 Y11 V10 Y12 U11 Y13 V11
* off:16 Y20 U20 Y21 V20 Y22 U21 Y23 V21
* off:24 Y30 U30 Y31 V30 Y32 U31 Y33 V31
* length:32 bytes
*
* Format YUV420P:
* off: 00 Y00 Y01 Y02 Y03
* off: 04 Y10 Y11 Y12 Y13
* off: 08 Y20 Y21 Y22 Y23
* off: 12 Y30 Y31 Y32 Y33
* off: 16 U00 U02 U20 U22
* off: 20 V00 V02 V20 V22
*
* So, we loose some bit of information when converting YUY2 to YUV420
*
* NOTE: This algorithm works only if the width and the height is pair.
*/
void PStandardColourConverter::YUY2toYUV420PWithResize(const BYTE *yuy2, BYTE *yuv420p) const
{
const BYTE *s;
BYTE *y, *u, *v;
unsigned int x, h;
unsigned int npixels = srcFrameWidth * srcFrameHeight;
s = yuy2;
y = yuv420p;
u = yuv420p + npixels;
v = u + npixels/4;
if ( (dstFrameWidth * dstFrameHeight) > npixels ) {
// dest is bigger than the source. No subsampling.
// Place the src in the middle of the destination.
unsigned int yOffset = (dstFrameHeight - srcFrameHeight)/2;
unsigned int xOffset = (dstFrameWidth - srcFrameWidth)/2;
unsigned int bpixels = yOffset * dstFrameWidth;
/* Top border */
memset(y, BLACK_Y, bpixels); y += bpixels;
memset(u, BLACK_U, bpixels/4); u += bpixels/4;
memset(v, BLACK_V, bpixels/4); v += bpixels/4;
for (h=0; h<srcFrameHeight; h+=2)
{
/* Left border */
memset(y, BLACK_Y, xOffset); y += xOffset;
memset(u, BLACK_U, xOffset/2); u += xOffset/2;
memset(v, BLACK_V, xOffset/2); v += xOffset/2;
/* Copy the first line keeping all information */
for (x=0; x<srcFrameWidth; x+=2)
{
*y++ = *s++;
*u++ = *s++;
*y++ = *s++;
*v++ = *s++;
}
/* Right and Left border */
for (x=0; x<xOffset*2; x++)
*y++ = BLACK_Y;
/* Copy the second line discarding u and v information */
for (x=0; x<srcFrameWidth; x+=2)
{
*y++ = *s++;
s++;
*y++ = *s++;
s++;
}
/* Fill the border with black (right side) */
memset(y, BLACK_Y, xOffset); y += xOffset;
memset(u, BLACK_U, xOffset/2); u += xOffset/2;
memset(v, BLACK_V, xOffset/2); v += xOffset/2;
}
memset(y, BLACK_Y, bpixels);
memset(u, BLACK_U, bpixels/4);
memset(v, BLACK_V, bpixels/4);
} else {
// source is bigger than the destination
// We are doing linear interpolation to find value.
#define FIX_FLOAT 12
unsigned int dx = (srcFrameWidth<<FIX_FLOAT)/dstFrameWidth;
unsigned int dy = (srcFrameHeight<<FIX_FLOAT)/dstFrameHeight;
unsigned int fy, fx;
for (fy=0, h=0; h<dstFrameHeight; h+=2, fy+=dy*2)
{
/* Copy the first line with U&V */
unsigned int yy = fy>>FIX_FLOAT;
unsigned int yy2 = (fy+dy)>>FIX_FLOAT;
const unsigned char *line1, *line2;
unsigned char lastU, lastV;
line1 = s + (yy*2*srcFrameWidth);
line2 = s + (yy2*2*srcFrameWidth);
lastU = line1[0];
lastV = line1[2];
for (fx=0, x=0; x<dstFrameWidth; x+=2, fx+=dx*2)
{
unsigned int xx = (fx>>FIX_FLOAT)*2;
*y++ = line1[xx+1];
if ( (xx&2) == 0)
{
*u++ = lastU = (line1[xx+1] + line2[xx+1])/2;
*v++ = lastV = (line1[xx+3] + line2[xx+3])/2;
}
else
{
*u++ = lastU;
*v++ = lastV = (line1[xx+1] + line2[xx+1])/2;
}
xx = ((fx+dx)>>FIX_FLOAT)*2;
*y++ = line1[xx+1];
if ( (xx&2) == 0)
lastU = (line1[xx+1] + line2[xx+1])/2;
else
lastV = (line1[xx+1] + line2[xx+1])/2;
}
/* Copy the second line without U&V */
for (fx=0, x=0; x<dstFrameWidth; x++, fx+=dx)
{
unsigned int xx = (fx>>FIX_FLOAT)*2;
*y++ = line2[xx];
}
} /* end of for (fy=0, h=0; h<dstFrameHeight; h+=2, fy+=dy*2) */
}
}
PSTANDARD_COLOUR_CONVERTER(YUY2,YUV420P)
{
const BYTE *yuy2 = srcFrameBuffer;
BYTE *yuv420p = dstFrameBuffer;
if ((srcFrameWidth | dstFrameWidth | srcFrameHeight | dstFrameHeight) & 1) {
PTRACE(2,"PColCnv\tError in YUY2 to YUV420P converter, All size need to be pair.");
return FALSE;
}
if ((srcFrameWidth == dstFrameWidth) || (srcFrameHeight == dstFrameHeight)) {
YUY2toYUV420PSameSize(yuy2, yuv420p);
} else {
YUY2toYUV420PWithResize(yuy2, yuv420p);
}
if (bytesReturned != NULL)
*bytesReturned = dstFrameBytes;
return TRUE;
}
// Consider a YUV422P image of 8x2 pixels.
//
// A plane of Y values A B C D E F G H
// I J K L M N O P
//
// A plane of U values 1 . 2 . 3 . 4 .
// 5 . 6 . 7 . 8 .
//
// A plane of V values 1 . 2 . 3 . 4 .
// 5 . 6 . 7 . 8 .
//
// YUV422 is stored as Y U Y V
// thus, a 4x4 image requires 32 bytes of storage.
//
// Image has two possible transformations.
// padded (src smaller than dst)
// subsampled and padded (src bigger than dst)
void PStandardColourConverter::ResizeYUV422(const BYTE * src, BYTE * dest) const
{
DWORD *result = (DWORD *)dest;
DWORD black = (DWORD)(BLACK_U<<24) + (BLACK_Y<<16) + (BLACK_U<<8) + BLACK_Y;
unsigned maxIndex = dstFrameWidth*dstFrameHeight/2;
if ( (dstFrameWidth*dstFrameHeight) > (srcFrameWidth*srcFrameHeight) ) {
for (unsigned i = 0; i < maxIndex; i++)
*result++ = black;
//dest is bigger than the source. No subsampling.
//Place the src in the middle of the destination.
unsigned yOffset = dstFrameHeight - srcFrameHeight;
unsigned xOffset = dstFrameWidth - srcFrameWidth;
BYTE *s_ptr,*d_ptr;
d_ptr = (yOffset * dstFrameWidth) + xOffset + dest;
s_ptr = (BYTE *)src;
for (unsigned y = 0; y < srcFrameHeight; y++) {
memcpy(d_ptr,s_ptr, srcFrameWidth*2);
d_ptr += 2*dstFrameWidth;
s_ptr += 2*srcFrameWidth;
}
} else {
// source is bigger than the destination.
//
unsigned subSample = 1 + (srcFrameHeight/dstFrameHeight) ;
unsigned yOffset = dstFrameHeight - (srcFrameHeight/subSample);
unsigned xOffset = dstFrameWidth - (srcFrameWidth/subSample);
unsigned subSample2 = subSample*2;
DWORD *s_ptr = (DWORD * )src;
DWORD *d_ptr = (DWORD *) dest + ((yOffset * dstFrameWidth) + xOffset)/4 ;
DWORD *sl_ptr, *dl_ptr;
for (unsigned y = 0; y < srcFrameHeight; y+= subSample) {
sl_ptr = s_ptr;
dl_ptr = d_ptr;
for (unsigned x = 0; x < srcFrameWidth; x+= subSample2) {
*dl_ptr++ = *sl_ptr;
sl_ptr += subSample;
}
d_ptr += dstFrameWidth/2;
s_ptr += srcFrameWidth*subSample/2;
}
}
}
PSTANDARD_COLOUR_CONVERTER(YUV422,YUV422)
{
if (bytesReturned != NULL)
*bytesReturned = dstFrameBytes;
if (srcFrameBuffer == dstFrameBuffer)
return TRUE;
if ((srcFrameWidth == dstFrameWidth) && (srcFrameHeight == dstFrameHeight))
memcpy(dstFrameBuffer,srcFrameBuffer,srcFrameWidth*srcFrameHeight*2);
else
ResizeYUV422(srcFrameBuffer, dstFrameBuffer);
return TRUE;
}
// copied from OpenMCU
static void ConvertQCIFToCIF(const void * _src, void * _dst)
{
BYTE * src = (BYTE *)_src;
BYTE * dst = (BYTE *)_dst;
//BYTE * dstEnd = dst + CIF_SIZE;
int y, x;
BYTE * srcRow;
// copy Y
for (y = 0; y < QCIF_HEIGHT; y++) {
srcRow = src;
for (x = 0; x < QCIF_WIDTH; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
srcRow = src;
for (x = 0; x < QCIF_WIDTH; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
src += QCIF_WIDTH;
}
// copy U
for (y = 0; y < QCIF_HEIGHT/2; y++) {
srcRow = src;
for (x = 0; x < QCIF_WIDTH/2; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
srcRow = src;
for (x = 0; x < QCIF_WIDTH/2; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
src += QCIF_WIDTH/2;
}
// copy V
for (y = 0; y < QCIF_HEIGHT/2; y++) {
srcRow = src;
for (x = 0; x < QCIF_WIDTH/2; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
srcRow = src;
for (x = 0; x < QCIF_WIDTH/2; x++) {
dst[0] = dst[1] = *srcRow++;
dst += 2;
}
src += QCIF_WIDTH/2;
}
}
// Consider a YUV420P image of 4x4 pixels.
//
// A plane of Y values A B C D
// E F G H
// I J K L
// M N O P
//
// A plane of U values 1 . 2 .
// . . . .
// 3 . 4 .
// . . . .
//
// A plane of V values 1 . 2 .
// . . . .
// 3 . 4 .
// . . . .
//
// YUV420P is stored as all Y (w*h), then U (w*h/4), then V
// thus, a 4x4 image requires 24 bytes of storage.
//
// Image has two possible transformations.
// padded (src smaller than dst)
// subsampled and padded (src bigger than dst)
void PStandardColourConverter::ResizeYUV420P(const BYTE * src, BYTE * dest) const
{
unsigned int i, y, x, npixels;
BYTE *d;
const BYTE *s;
npixels = dstFrameWidth * dstFrameHeight;
if ( (dstFrameWidth*dstFrameHeight) > (srcFrameWidth*srcFrameHeight) ) {
if (srcFrameWidth == QCIF_WIDTH &&
srcFrameHeight == QCIF_HEIGHT &&
dstFrameWidth == CIF_WIDTH &&
dstFrameHeight == CIF_HEIGHT) {
ConvertQCIFToCIF(src, dest);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -