📄 h263codec.cxx
字号:
padding[2] = 0;
// decode values from the RTP H263 header
if (src.GetPayloadType() == RTP_DataFrame::H263) { // RFC 2190
context->flags |= CODEC_FLAG_RFC2190;
}
else
return FALSE;
int got_picture, len;
len = ff.AvcodecDecodeVideo(context, picture, &got_picture, payload, payload_size);
if (!src.GetMarker()) // Have not built an entire frame yet
return TRUE;
len = ff.AvcodecDecodeVideo(context, picture, &got_picture, NULL, -1);
if (len < 0) {
PTRACE(1, "H263\tError while decoding frame");
return TRUE; // And hope the error condition will fix itself
}
if (got_picture) {
PTRACE(5, "H263\tDecoded frame (" << len << " bytes) into image "
<< context->width << "x" << context->height);
// H.263 could change picture size at any time
if (context->width == 0 || context->height == 0) {
PTRACE(1,"H263\tImage dimension is 0");
return TRUE; // And hope the error condition will fix itself
}
if (frameWidth != (unsigned)context->width || frameHeight != (unsigned)context->height) {
frameWidth = context->width;
frameHeight = context->height;
}
PINDEX frameBytes = (frameWidth * frameHeight * 12) / 8;
RTP_DataFrame * pkt = new RTP_DataFrame(sizeof(FrameHeader)+frameBytes);
FrameHeader * header = (FrameHeader *)pkt->GetPayloadPtr();
header->x = header->y = 0;
header->width = frameWidth;
header->height = frameHeight;
int size = frameWidth * frameHeight;
if (picture->data[1] == picture->data[0] + size
&& picture->data[2] == picture->data[1] + (size >> 2))
memcpy(header->data, picture->data[0], frameBytes);
else {
unsigned char *dst = header->data;
for (int i=0; i<3; i ++) {
unsigned char *src = picture->data[i];
int dst_stride = i ? frameWidth >> 1 : frameWidth;
int src_stride = picture->linesize[i];
int h = i ? frameHeight >> 1 : frameHeight;
if (src_stride==dst_stride) {
memcpy(dst, src, dst_stride*h);
dst += dst_stride*h;
} else {
while (h--) {
memcpy(dst, src, dst_stride);
dst += dst_stride;
src += src_stride;
}
}
}
}
pkt->SetPayloadSize(frameBytes);
pkt->SetPayloadType(RTP_DataFrame::MaxPayloadType);
pkt->SetTimestamp(src.GetTimestamp());
pkt->SetMarker(TRUE);
dst.Append(pkt);
frameNum++;
}
return TRUE;
}
//////////////////////////////////////////////////////////////////////////////
Opal_YUV420P_H263::Opal_YUV420P_H263()
: OpalVideoTranscoder(OpalYUV420P, OpalH263)
{
if (!ff.IsLoaded())
return;
encodedPackets.DisallowDeleteObjects();
unusedPackets.DisallowDeleteObjects();
if ((codec = ff.AvcodecFindEncoder(CODEC_ID_H263)) == NULL) {
PTRACE(1, "H263\tCodec not found for encoder");
return;
}
frameWidth = 352;
frameHeight = 288;
context = ff.AvcodecAllocContext();
if (context == NULL) {
PTRACE(1, "H263\tFailed to allocate context for encoder");
return;
}
picture = ff.AvcodecAllocFrame();
if (picture == NULL) {
PTRACE(1, "H263\tFailed to allocate frame for encoder");
return;
}
context->codec = NULL;
// set some reasonable values for quality as default
videoQuality = 10;
videoQMin = 4;
videoQMax = 24;
frameNum = 0;
PTRACE(3, "Codec\tH263 encoder created");
}
Opal_YUV420P_H263::~Opal_YUV420P_H263()
{
if (ff.IsLoaded()) {
CloseCodec();
ff.AvcodecFree(context);
ff.AvcodecFree(picture);
encodedPackets.AllowDeleteObjects(TRUE);
unusedPackets.AllowDeleteObjects(TRUE);
}
}
BOOL Opal_YUV420P_H263::OpenCodec()
{
// avoid copying input/output
context->flags |= CODEC_FLAG_INPUT_PRESERVED; // we guarantee to preserve input for max_b_frames+1 frames
context->flags |= CODEC_FLAG_EMU_EDGE; // don't draw edges
context->width = frameWidth;
context->height = frameHeight;
picture->linesize[0] = frameWidth;
picture->linesize[1] = frameWidth / 2;
picture->linesize[2] = frameWidth / 2;
picture->quality = (float)videoQuality;
int bitRate = 256000;
context->bit_rate = (bitRate * 3) >> 2; // average bit rate
context->bit_rate_tolerance = bitRate << 3;
context->rc_min_rate = 0; // minimum bitrate
context->rc_max_rate = bitRate; // maximum bitrate
context->mb_qmin = context->qmin = videoQMin;
context->mb_qmax = context->qmax = videoQMax;
context->max_qdiff = 3; // max q difference between frames
context->rc_qsquish = 0; // limit q by clipping
context->rc_eq= "tex^qComp"; // rate control equation
context->qcompress = 0.5; // qscale factor between easy & hard scenes (0.0-1.0)
context->i_quant_factor = (float)-0.6; // qscale factor between p and i frames
context->i_quant_offset = (float)0.0; // qscale offset between p and i frames
// context->b_quant_factor = (float)1.25; // qscale factor between ip and b frames
// context->b_quant_offset = (float)1.25; // qscale offset between ip and b frames
context->flags |= CODEC_FLAG_PASS1;
context->mb_decision = FF_MB_DECISION_SIMPLE; // choose only one MB type at a time
context->me_method = ME_EPZS;
context->me_subpel_quality = 8;
context->frame_rate_base = 1;
context->frame_rate = 15;
context->gop_size = 64;
context->flags &= ~CODEC_FLAG_H263P_UMV;
context->flags &= ~CODEC_FLAG_4MV;
context->max_b_frames = 0;
context->flags &= ~CODEC_FLAG_H263P_AIC; // advanced intra coding (not handled by H323_FFH263Capability)
context->flags |= CODEC_FLAG_RFC2190;
context->rtp_mode = 1;
context->rtp_payload_size = 750;
context->rtp_callback = &Opal_YUV420P_H263::RtpCallback;
context->opaque = this; // used to separate out packets from different encode threads
if (ff.AvcodecOpen(context, codec) < 0) {
PTRACE(1, "H263\tFailed to open H.263 encoder");
return FALSE;
}
return TRUE;
}
void Opal_YUV420P_H263::CloseCodec()
{
if (context != NULL) {
if (context->codec != NULL) {
ff.AvcodecClose(context);
PTRACE(5, "H263\tClosed H.263 encoder" );
}
}
}
void Opal_YUV420P_H263::RtpCallback(void *data, int data_size, void *hdr, int hdr_size, void *priv_data)
{
Opal_YUV420P_H263 *c = (Opal_YUV420P_H263 *) priv_data;
H263Packet *p = c->unusedPackets.GetSize() > 0 ? (H263Packet *) c->unusedPackets.RemoveAt(0) : new H263Packet();
p->Store(data, data_size, hdr, hdr_size);
c->encodedPackets.Append(p);
}
static struct { int width; int height; } s_vidFrameSize[] = {
{ 0, 0}, // forbidden
{ 128, 96}, // SQCIF
{ 176, 144}, // QCIF
{ 352, 288}, // CIF
{ 704, 576}, // 4CIF
{ 1408, 1152}, // 16CIF
{ 0, 0}, // reserved
{ 0, 0}, // extended PTYPE
};
int Opal_YUV420P_H263::GetStdSize(int _width, int _height)
{
int sizeIndex;
for ( sizeIndex = SQCIF; sizeIndex < NumStdSizes; ++sizeIndex ) {
if ( s_vidFrameSize[sizeIndex].width == _width && s_vidFrameSize[sizeIndex].height == _height )
return sizeIndex;
}
return UnknownStdSize;
}
PINDEX Opal_YUV420P_H263::GetOptimalDataFrameSize(BOOL input) const
{
return input ? ((frameWidth * frameHeight * 12) / 8) : maxOutputSize;
}
BOOL Opal_YUV420P_H263::ConvertFrames(const RTP_DataFrame & src, RTP_DataFrameList & dst)
{
if (!ff.IsLoaded())
return FALSE;
PWaitAndSignal mutex(updateMutex);
dst.RemoveAll();
if (src.GetPayloadSize() < (PINDEX)sizeof(FrameHeader)) {
PTRACE(1,"H263\tVideo grab too small, Close down video transmission thread.");
return FALSE;
}
FrameHeader * header = (FrameHeader *)src.GetPayloadPtr();
if (header->x != 0 || header->y != 0) {
PTRACE(1,"H263\tVideo grab of partial frame unsupported, Close down video transmission thread.");
return FALSE;
}
if (frameNum == 0 || frameWidth != header->width || frameHeight != header->height) {
int sizeIndex = GetStdSize(header->width, header->height);
if (sizeIndex == UnknownStdSize) {
PTRACE(3, "H263\tCannot resize to " << header->width << "x" << header->height << " (non-standard format), Close down video transmission thread.");
return FALSE;
}
frameWidth = header->width;
frameHeight = header->height;
rawFrameLen = (frameWidth * frameHeight * 12) / 8;
rawFrameBuffer.SetSize(rawFrameLen + FF_INPUT_BUFFER_PADDING_SIZE);
memset(rawFrameBuffer.GetPointer() + rawFrameLen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
encFrameLen = rawFrameLen; // this could be set to some lower value
encFrameBuffer.SetSize(encFrameLen); // encoded video frame
CloseCodec();
if (!OpenCodec())
return FALSE;
}
unsigned char * payload;
// get payload and ensure correct padding
if (src.GetHeaderSize() + src.GetPayloadSize() + FF_INPUT_BUFFER_PADDING_SIZE > src.GetSize()) {
payload = (unsigned char *) rawFrameBuffer.GetPointer();
memcpy(payload, header->data, rawFrameLen);
}
else
payload = header->data;
int size = frameWidth * frameHeight;
picture->data[0] = payload;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + (size / 4);
PTRACE(1, "Now encode video for ctxt @ " << ::hex << (int)context << ", pict @ " << (int)picture << ", buf @ " << (int)encFrameBuffer.GetPointer() << ::dec << " (" << encFrameLen << " bytes)");
#if PTRACING
PTime encTime;
int out_size =
#endif
ff.AvcodecEncodeVideo(context, encFrameBuffer.GetPointer(), encFrameLen, picture);
#if PTRACING
PTRACE(5, "H263\tEncoded " << out_size << " bytes from " << frameWidth << "x" << frameHeight
<< " in " << (PTime() - encTime).GetMilliSeconds() << " ms");
#endif
if (encodedPackets.GetSize() == 0) {
PTRACE(1, "H263\tEncoder internal error - there should be outstanding packets at this point");
return TRUE; // And hope the error condition will fix itself
}
while (encodedPackets.GetSize() > 0) {
RTP_DataFrame * pkt = new RTP_DataFrame(2048);
unsigned length = maxOutputSize;
H263Packet *p = (H263Packet *) encodedPackets.RemoveAt(0);
if (p->Read(length, *pkt)) {
pkt->SetPayloadType(RTP_DataFrame::H263);
pkt->SetTimestamp(src.GetTimestamp());
dst.Append(pkt);
}
unusedPackets.Append(p);
}
dst[dst.GetSize()-1].SetMarker(TRUE);
frameNum++; // increment the number of frames encoded
PTRACE(6, "H263\tEncoded " << src.GetPayloadSize() << " bytes of YUV420P raw data into " << dst.GetSize() << " RTP frame(s)");
return TRUE;
}
#endif // RFC2190_AVCODEC
#endif // NO_OPAL_VIDEO
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -