⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 libffmpegplugin.cpp

📁 Trolltech公司发布的图形界面操作系统。可在qt-embedded-2.3.7平台上编译为嵌入式图形界面操作系统。
💻 CPP
📖 第 1 页 / 共 3 页
字号:
{    return streamingFlag;}bool LibFFMpegPlugin::syncAvailable(){    return TRUE;}bool LibFFMpegPlugin::sync(){    if ( !streamContext ) {	qDebug("No file open");	return FALSE;    }    if ( !videoCodecContext ) {	printf("no context\n");	return FALSE;    }    if ( !videoStreams() )	return TRUE;     AutoLockUnlockMutex lock( &videoMutex );    int packetCount = waitingVideoPackets.count();    if ( packetCount > 1000 ) {	// We are way too far behind, need to altogether drop packets we	// are behind and try to pick up from somewhere reasonable	for ( int i = 0; i < packetCount - 10; i++ )	    removeCurrentVideoPacket();	qDebug("got really far behind");    }    bool haveBothTimeStamps = ( currentVideoTimeStamp && currentAudioTimeStamp );    bool keepDecoding = TRUE;    if ( haveBothTimeStamps ) {//	qDebug("have both time stamps %li %li", (long)currentVideoTimeStamp, (long)currentAudioTimeStamp );	// Are we too far ahead with the video?	if ( currentVideoTimeStamp > currentAudioTimeStamp + AVSyncSlack ) {	    printf("slow down video\n");	    skipNext++;	    // The w38.mpg example has a crazy video time stamp on the first video packet	    // which would cause the logic here to think we are 1000s of frames behind.	    // What is required is to decode more video packets till we get a sane video	    // time stamp but this doesn't happen if we think we are miles ahead with the	    // video because of a bad time stamp. The line below ensures if we do get too	    // ahead with the video we can't stall and stop decoding for more than a 	    // single frame at a time in case we get crazy time stamps.//	    currentVideoTimeStamp = currentAudioTimeStamp;	    return TRUE;	}	keepDecoding = ( currentAudioTimeStamp > currentVideoTimeStamp + AVSyncSlack );	if ( keepDecoding ) {//	    qDebug("audio ahead, decode more video 1");	}    } else {//	qDebug("packets - video: %i audio: %i", waitingVideoPackets.count(), waitingAudioPackets.count() );	if ( waitingAudioPackets.count() >= 1 ) {	    printf("slow down video\n");	    skipNext++;	    return TRUE;	}	keepDecoding = ( waitingVideoPackets.count() > 1 );	if ( keepDecoding ) {//	    qDebug("audio ahead, decode more video 2");	}    }    // Quickly skip over packets if we are *really* far behind (ie one second behind)    if ( haveBothTimeStamps ) {       int maxPackets = 10;       while ( currentAudioTimeStamp > currentVideoTimeStamp + 1000 && maxPackets ) {           maxPackets--;           //qDebug("catching up another frame");           MediaPacket *pkt = getAnotherPacket( videoStream );           if ( !pkt ) {               qDebug("Video EOF");               return FALSE; // EOF           }           removeCurrentVideoPacket(); // Remove from list when done with it       }    }    int maxFrames = 10;    // Try to consume up the waiting video packets so we get back in sync with the audio    while ( keepDecoding ) {//	qDebug("catching up another packet");	MediaPacket *pkt = getAnotherPacket( videoStream );	if ( !pkt ) {	    qDebug("Video EOF");	    return FALSE;	}	while ( pkt->len > 0 && maxFrames && (!haveBothTimeStamps || ( currentAudioTimeStamp > currentVideoTimeStamp + 70 )) ) {	    int got_picture = 0;	    // when set to 1 during decoding, b frames will be skiped when	    // set to 2 idct/dequant will be skipped too	    int oldHurryUp = videoCodecContext->hurry_up;	    videoCodecContext->hurry_up = 1;	    int ret = avcodec_decode_video(videoCodecContext, &picture, &got_picture, pkt->ptr, pkt->len);	    if ( got_picture ) {		pkt->frameInPacket++;		if ( currentVideoTimeStamp )		    currentVideoTimeStamp += msecPerFrame;		droppedFrames++;		qDebug("frames dropped: %i", droppedFrames);		maxFrames--;	    }	    videoCodecContext->hurry_up = oldHurryUp;	    if ( ret < 0 ) {		qDebug("Error while decoding stream");		av_free_packet(&pkt->pkt);		delete pkt;	    }	    pkt->ptr += ret;	    pkt->len -= ret;	}	if ( pkt->len == 0 ) 	    removeCurrentVideoPacket(); // Remove from list when done with it	if ( haveBothTimeStamps ) {	    // Guard against crazy audio or video time stamps by limiting the frames to maxFrames	    // otherwise they could cause this code to want to catch up with an oasis timestamp way in the future	    if ( maxFrames <= 0 ) {		currentVideoTimeStamp = currentAudioTimeStamp;		keepDecoding = FALSE;	    } else		keepDecoding = ( currentAudioTimeStamp > currentVideoTimeStamp + AVSyncSlack );	    if ( keepDecoding ) {//		qDebug("audio ahead, decode more video 3");	    }	} else {	    if ( maxFrames <= 0 ) {		keepDecoding = FALSE;	    } else		keepDecoding = ( waitingVideoPackets.count() > 1 );	}    }    return TRUE;}bool LibFFMpegPlugin::seekAvailable(){    return !streamingFlag;}bool LibFFMpegPlugin::seek( long pos ){    qDebug("LibFFMpegPlugin::seek");    if ( !streamContext ) {	qDebug("No file open");	return FALSE;    }    AutoLockUnlockMutex audioLock( &audioMutex );    AutoLockUnlockMutex videoLock( &videoMutex );    flushAudioPackets();    flushVideoPackets();    {	AutoLockUnlockMutex lock( &pluginMutex );	if ( audioCodecContext )	    avcodec_flush_buffers( audioCodecContext );	if ( videoCodecContext )	    avcodec_flush_buffers( videoCodecContext );	// Seek in to the file	if ( pos > 1000 )	    url_fseek( &streamContext->pb, pos - 1000, SEEK_SET );	else	    url_fseek( &streamContext->pb, 0, SEEK_SET );	// Reset these after seeking till we start to get new ones again	currentVideoTimeStamp = 0;	currentAudioTimeStamp = 0;	bufferedSamplesCount = 0;    }    if ( !videoStreams() )	return TRUE;    // Sync up the input with the packets so    // we are ready to get the next video frame    // We have to sync through a keyframe, but not    // all codecs have keyframes or some have keyframes    // which are far apart depending on the encoder which created the file.    // So instead of hoping to find a keyframe, we decode 25 frames so we    // are reasonably confident we have enough picture complete by then    // to resume decoding from.    int framesToDecode = 25;    while ( framesToDecode > 0 ) {	MediaPacket *pkt = getAnotherPacket( videoStream );	if ( !pkt ) {	    qDebug("Video EOF");	    return FALSE;	}	while (pkt->len > 0) {	    int got_pic;	    int ret = avcodec_decode_video(videoCodecContext, &picture, &got_pic, pkt->ptr, pkt->len);	    if ( got_pic ) {		pkt->frameInPacket++;		if ( currentVideoTimeStamp )		    currentVideoTimeStamp += msecPerFrame;		framesToDecode--;	    }	    if ( ret < 0 ) {		qDebug("Error while decoding stream");		av_free_packet(&pkt->pkt);		delete pkt;	    }	    pkt->ptr += ret;	    pkt->len -= ret;	}	if ( pkt->len == 0 ) 	    removeCurrentVideoPacket(); // Remove from list when done with it    }    bool haveBothTimeStamps = ( currentVideoTimeStamp && currentAudioTimeStamp );    if ( haveBothTimeStamps ) {	// ### Throw away audio packets till we are in time with the video packets        flushAudioPackets();	bufferedSamplesCount = 0;    } else {        flushAudioPackets();	bufferedSamplesCount = 0;    }    return TRUE;}bool LibFFMpegPlugin::tellAvailable(){    return !streamingFlag;}long LibFFMpegPlugin::tell(){    return url_ftell( &streamContext->pb );}bool LibFFMpegPlugin::lengthAvailable(){    return !streamingFlag;}long LibFFMpegPlugin::length(){    return fileLength;}bool LibFFMpegPlugin::totalTimeAvailable(){    return haveTotalTimeCache;}long LibFFMpegPlugin::totalTime(){    return totalTimeCache;}bool LibFFMpegPlugin::currentTimeAvailable(){    bool haveTimeStamp = (currentAudioTimeStamp != 0) || (currentVideoTimeStamp != 0);    bool canDerivePosition = lengthAvailable() && tellAvailable() && totalTimeAvailable();    return haveTimeStamp || canDerivePosition;}long LibFFMpegPlugin::currentTime(){    if (currentAudioTimeStamp != 0)       return currentAudioTimeStamp;    if (currentVideoTimeStamp != 0)       return currentVideoTimeStamp;    bool canDerivePosition = lengthAvailable() && tellAvailable() && totalTimeAvailable();    if ( canDerivePosition && fileLength )       return ((long long)totalTime() * tell()) / fileLength;    return -1;}// Remove from list when done with itvoid LibFFMpegPlugin::removeCurrentVideoPacket(){    AutoLockUnlockMutex lock( &pluginMutex );    MediaPacket *pkt = waitingVideoPackets.take(0);    framesInLastPacket = pkt->frameInPacket;    if ( pkt ) {	av_free_packet(&pkt->pkt);	delete pkt;    }}void LibFFMpegPlugin::flushVideoPackets(){    AutoLockUnlockMutex lock( &pluginMutex );    if ( videoCodecContext )	avcodec_flush_buffers( videoCodecContext );    while ( waitingVideoPackets.first() ) {	MediaPacket *pkt = waitingVideoPackets.take();	if ( pkt ) {	    av_free_packet(&pkt->pkt);	    delete pkt;	}    }}void LibFFMpegPlugin::flushAudioPackets(){    AutoLockUnlockMutex lock( &pluginMutex );    if ( audioCodecContext )	avcodec_flush_buffers( audioCodecContext );    while ( waitingAudioPackets.first() ) {	MediaPacket *pkt = waitingAudioPackets.take();	if ( pkt ) {	    av_free_packet(&pkt->pkt);	    delete pkt;	}    }}MediaPacket *LibFFMpegPlugin::getAnotherPacket( int stream ){    AutoLockUnlockMutex lock( &pluginMutex );    if ( stream == videoStream ) 	if ( waitingVideoPackets.first() ) {	    MediaPacket *pkt = waitingVideoPackets.first();	    if ( pkt->pkt.pts ) {		currentPacketTimeStamp = pkt->pkt.pts / 100;		currentVideoTimeStamp = currentPacketTimeStamp + (pkt->frameInPacket * msecPerFrame);//		qDebug("got time stamp: %li queued video (%i left)", currentPacketTimeStamp, waitingVideoPackets.count() );	    } else if ( currentVideoTimeStamp ) {		currentVideoTimeStamp = currentPacketTimeStamp + (framesInLastPacket * msecPerFrame);	    }	    return pkt;	}    if ( stream == audioStream ) 	if ( waitingAudioPackets.first() ) {	    MediaPacket *pkt = waitingAudioPackets.take();	    if ( pkt->pkt.pts ) {		currentPacketTimeStamp = pkt->pkt.pts / 100; // convert to milliseconds		currentAudioTimeStamp = currentPacketTimeStamp;//		qDebug("got time stamp: %li queued audio (%i left)", currentPacketTimeStamp, waitingAudioPackets.count() );	    }	    return pkt;	}    // Buffer up some more packets    for ( int i = 0; i < 100; i++ ) {	MediaPacket *pkt = new MediaPacket;	pkt->pkt.pts = 0;	pkt->frameInPacket = 0;	// read a packet from input	if (av_read_packet(streamContext, &pkt->pkt) < 0) {	    delete pkt;	    return 0; // EOF	}	pkt->len = pkt->pkt.size;	pkt->ptr = pkt->pkt.data;	if ( pkt->pkt.stream_index == stream && stream == audioStream ) {	    if ( pkt->pkt.pts ) {		currentPacketTimeStamp = pkt->pkt.pts / 100;		currentAudioTimeStamp = currentPacketTimeStamp;//		qDebug("got time stamp: %li audio", currentPacketTimeStamp );	    }	    return pkt;	} else if ( pkt->pkt.stream_index == stream && stream == videoStream ) {	    waitingVideoPackets.append( pkt );	    if ( pkt->pkt.pts ) {		currentPacketTimeStamp = pkt->pkt.pts / 100;		currentVideoTimeStamp = currentPacketTimeStamp;//		qDebug("got time stamp: %li video", currentPacketTimeStamp );	    }	    return pkt;	}	if ( pkt->pkt.stream_index == videoStream ) 	    waitingVideoPackets.append( pkt );	if ( pkt->pkt.stream_index == audioStream ) 	    waitingAudioPackets.append( pkt );    }    return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -