⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 libffmpegplugin.cpp

📁 Trolltech公司发布的图形界面操作系统。可在qt-embedded-2.3.7平台上编译为嵌入式图形界面操作系统。
💻 CPP
📖 第 1 页 / 共 3 页
字号:
		    return FALSE;		}		if ( ptr && tmpSamplesRead < AVCODEC_MAX_AUDIO_FRAME_SIZE*4 ) {		    ret = avcodec_decode_audio(audioCodecContext, (short*)(tmpSamples + tmpSamplesRead), &bytesRead, ptr, len);		}	    }	    if ( bytesRead > 0 )		tmpSamplesRead += bytesRead;	    else if ( bytesRead < 0 )		qDebug("read count < 0, %i", bytesRead );	    if ( ret < 0 ) {		qDebug("Error while decoding audio stream");		if ( pkt ) {		    av_free_packet(&pkt->pkt);		    delete pkt;		}		return FALSE;	    }	    ptr += ret;	    len -= ret;	}	if ( pkt ) {	    av_free_packet(&pkt->pkt);	    delete pkt;	}	tmpBufCount = bufferedSamplesCount_fact + tmpSamplesRead * 44100;    }/*    // Attempt to reduce a memcpy by doing the audio_resample directly in to the output     // buffer and then with any remaining output, resample it in to a buffer for later    tmpSamplesRead /= 2 * audioCodecContext->channels;    if ( bufferedSamplesCount ) {	if ( bufferedSamplesCount <= samples ) {	    memcpy( output, bufferedSamples, bufferedSamplesCount*2*channels );	    output += bufferedSamplesCount * channels;	    samples -= bufferedSamplesCount;	    bufferedSamplesCount = 0;	    int srcSampleCount = samples * audioCodecContext->sample_rate / 44100;	    audio_resample( audioScaleContext, (short*)output, (short*)tmpSamples, srcSampleCount );	    audio_resample( audioScaleContext, (short*)bufferedSamples, (short*)tmpSamples + srcSampleCount * 2, tmpSamplesRead - srcSampleCount );	    bufferedSamplesCount += (tmpSamplesRead - srcSampleCount) * 44100 / audioCodecContext->sample_rate;	} else {	    samplesRead = samples;	    memcpy( output, bufferedSamples, samples*2*channels );	    bufferedSamplesCount -= samples;	    int blength = bufferedSamplesCount*2*channels;	    memmove( bufferedSamples, bufferedSamples + samples*2*channels, blength );	    audio_resample( audioScaleContext, (short*)bufferedSamples + bufferedSamplesCount*channels, (short*)tmpSamples, tmpSamplesRead );	    bufferedSamplesCount += tmpSamplesRead * 44100 / audioCodecContext->sample_rate;	}    }*/    tmpSamplesRead /= 2 * audioCodecContext->channels;    bufferedSamplesCount += audio_resample( audioScaleContext, (short*)bufferedSamples + bufferedSamplesCount*channels, (short*)tmpSamples, tmpSamplesRead );    samplesRead = samples;    int slength = samples*2*channels;    memcpy( output, bufferedSamples, slength );    if ( currentAudioTimeStamp > 1 )	currentAudioTimeStamp += samples * 1000 / 44100;    bufferedSamplesCount -= samples;    int blength = bufferedSamplesCount*2*channels;    memmove( bufferedSamples, bufferedSamples + slength, blength );    return TRUE;}int LibFFMpegPlugin::videoStreams(){    return (videoCodecContext) ? 1 : 0;}int LibFFMpegPlugin::videoWidth( int ){    return (videoCodecContext) ? videoCodecContext->width : 0;}int LibFFMpegPlugin::videoHeight( int ){    return (videoCodecContext) ? videoCodecContext->height : 0;}double LibFFMpegPlugin::videoFrameRate( int ){    return (videoCodecContext) ? (double)videoCodecContext->frame_rate / FRAME_RATE_BASE : 1.0;}int LibFFMpegPlugin::videoFrames( int ){    return -1;}bool LibFFMpegPlugin::videoSetFrame( long, int ){    return FALSE;}long LibFFMpegPlugin::videoGetFrame( int ){    return -1;}bool LibFFMpegPlugin::videoReadFrame( unsigned char **, int, int, int, int, ColorFormat, int ){    return FALSE;}bool LibFFMpegPlugin::videoReadScaledFrame( unsigned char **output_rows, int, int, int in_w, int in_h, int out_w, int out_h, ColorFormat fmt, int ){    AutoLockUnlockMutex lock( &videoMutex );    int colorMode = -1;    switch ( fmt ) {	case RGB565:   colorMode = MODE_16_RGB; break;	case BGR565:   colorMode = MODE_16_BGR; break;	case RGBA8888: colorMode = MODE_32_RGB; break;	case BGRA8888: colorMode = MODE_32_BGR; break;    };    if ( colorMode != scaleContextDepth ) {	scaleContextDepth = colorMode;	videoScaleContext = yuv2rgb_factory_init( colorMode, 0, 0 );    }    int lineStride = (uchar*)output_rows[1] - (uchar*)output_rows[0];    if ( !videoCodecContext || !videoCodecContext->codec ) {	qDebug("No video decoder for stream");	return 1;    }    if ( skipNext ) {	skipNext--;	return 0;    }    int got_picture = 0;    while ( !got_picture ) {	MediaPacket *pkt = getAnotherPacket( videoStream );	if ( !pkt ) {	    qDebug("Video EOF");	    return 1; // EOF	}	while (pkt->len > 0 && !got_picture) {	    int ret = avcodec_decode_video(videoCodecContext, &picture, &got_picture, pkt->ptr, pkt->len);	    if ( got_picture ) {		pkt->frameInPacket++;		if ( currentVideoTimeStamp )		    currentVideoTimeStamp += msecPerFrame;		frame = videoCodecContext->frame_number;//		qDebug("got picture: %i", frame );		// Check if any colour space conversion variables have changed		// since the last decoded frame which will require		// re-initialising the colour space tables 		if ( scaleContextInputWidth != in_w ||		    scaleContextInputHeight != in_h ||		    scaleContextPicture1Width != picture.linesize[0] ||		    scaleContextPicture2Width != picture.linesize[1] ||		    scaleContextOutputWidth != out_w ||		    scaleContextOutputHeight != out_h ||		    scaleContextLineStride != lineStride ||		    scaleContextFormat != videoCodecContext->pix_fmt ) {		    scaleContextInputWidth = in_w;		    scaleContextInputHeight = in_h;		    scaleContextPicture1Width = picture.linesize[0];		    scaleContextPicture2Width = picture.linesize[1];		    scaleContextOutputWidth = out_w;		    scaleContextOutputHeight = out_h;		    scaleContextLineStride = lineStride;		    scaleContextFormat = videoCodecContext->pix_fmt;		    int format = 0;		    switch ( videoCodecContext->pix_fmt ) {			case PIX_FMT_YUV444P:			    format = FORMAT_YUV444;			    break;			case PIX_FMT_YUV422P:			    format = FORMAT_YUV422;			    break;			case PIX_FMT_YUV420P:			    format = FORMAT_YUV420;			    break;		    };		    qDebug("reconfiguring scale context");		    videoScaleContext->converter->configure( videoScaleContext->converter,			in_w, in_h, picture.linesize[0], picture.linesize[1], out_w, out_h, lineStride, format );//		    qDebug("configured yuv convert context with - input: %i x %i  pic lines: %i %i, output: %i x %i, linestride: %i", in_w, in_h, picture.linesize[0], picture.linesize[1], out_w, out_h, lineStride );		}		videoScaleContext->converter->yuv2rgb_fun( videoScaleContext->converter, (uint8_t*)output_rows[0], picture.data[0], picture.data[1], picture.data[2] );	    }	    if ( ret < 0 ) {		qDebug("Error while decoding stream");		removeCurrentVideoPacket();		return 1;	    }	    pkt->ptr += ret;	    pkt->len -= ret;	}	if ( pkt->len == 0 ) 	    removeCurrentVideoPacket(); // Remove from list when done with it    }    return 0;}bool LibFFMpegPlugin::videoReadYUVFrame( char *, char *, char *, int, int, int, int, int ){    return FALSE;}double LibFFMpegPlugin::getTime(){    return -1;}bool LibFFMpegPlugin::setSMP( int ){    return FALSE;}bool LibFFMpegPlugin::setMMX( bool ){    return FALSE;}bool LibFFMpegPlugin::supportsAudio(){    return TRUE;}bool LibFFMpegPlugin::supportsVideo(){    return TRUE;}bool LibFFMpegPlugin::supportsYUV(){    return FALSE;}bool LibFFMpegPlugin::supportsMMX(){    return FALSE;}bool LibFFMpegPlugin::supportsSMP(){    return FALSE;}bool LibFFMpegPlugin::supportsStereo(){    return TRUE;}bool LibFFMpegPlugin::supportsScaling(){    return TRUE;}long LibFFMpegPlugin::getPlayTime(){    return -1;}bool LibFFMpegPlugin::supportsStreaming(){    return TRUE;}bool LibFFMpegPlugin::canStreamURL( const QUrl& url, const QString& mimetype ){    QString fileName = url.toString( true, false );    // Support file://    if ( fileName.left(7).lower() == "file://" )	return true;    // Support http://    if ( fileName.left(7).lower() == "http://" )	return true;    // Support rtsp://    if ( fileName.left(7).lower() == "rtsp://" )	return true;    // Does not support mms://    if ( fileName.left(6).lower() == "mms://" )	return false;    // All others assumed not supported    return false;}bool LibFFMpegPlugin::openURL( const QUrl& url, const QString& mimetype ){    fileInit();    streamingFlag = TRUE;    haveTotalTimeCache = FALSE;    QString fileName = url.toString( false, false );    qDebug("opening url %s", fileName.latin1() );    // open the input file with generic libav function    if ( av_open_input_file(&streamContext, fileName.latin1(), NULL, 0, 0) < 0 ) {	strInfo = qApp->translate( "LibFFMpegPlugin", "Error: Could not open url, URL: " ) + fileName;        qDebug( "%s", strInfo.latin1() );	return FALSE;    }    qDebug("opened url %s", fileName.latin1() );    // Decode first frames to get stream parameters (for some stream types like mpeg)    if ( av_find_stream_info(streamContext) < 0 ) {	qDebug("Error getting parameters for file %s", fileName.latin1() );	return FALSE;    }    qDebug("initing url %s", fileName.latin1() );     // update the current parameters so that they match the one of the input stream    for ( int i = 0; i < streamContext->nb_streams; i++ ) {	//printf( "searching: %i\n", i );        AVCodecContext *enc = &streamContext->streams[i]->codec;	enc->codec = avcodec_find_decoder( enc->codec_id );	//printf( "decoder found: %s\n", enc->codec->name );	if ( !enc->codec )	    qDebug("Unsupported codec for input stream");        else if ( avcodec_open( enc, enc->codec ) < 0 )            qDebug("Error while opening codec for input stream");	else {	    switch (enc->codec_type) {		case CODEC_TYPE_AUDIO:		    qDebug("setting audio stream with id: %i", i);		    audioStream = i;		    audioCodecContext = enc;		    break;		case CODEC_TYPE_VIDEO:		    qDebug("setting video stream with id: %i", i);		    videoStream = i;		    videoCodecContext = enc;		    break;		default:		    qDebug("unknown stream type");		    break;	    }	}    }    if ( audioCodecContext )        audioScaleContext = audio_resample_init( 2, audioCodecContext->channels, 44100, audioCodecContext->sample_rate );    if ( videoCodecContext )	videoCodecContext->hurry_up = 0;    if ( videoCodecContext && videoCodecContext->frame_rate )	msecPerFrame = (1000 * FRAME_RATE_BASE) / videoCodecContext->frame_rate;    else	msecPerFrame = 1000 / 25;    qDebug("finished opening %s", fileName.latin1() );    return true;}bool LibFFMpegPlugin::streamed()

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -