📄 playcommon.cpp
字号:
case 's': { // specify initial seek time (trick play) double arg; if (sscanf(argv[2], "%lg", &arg) != 1 || arg < 0) { usage(); } initialSeekTime = arg; ++argv; --argc; break; } case 'z': { // scale (trick play) float arg; if (sscanf(argv[2], "%g", &arg) != 1 || arg == 0.0f) { usage(); } scale = arg; ++argv; --argc; break; } default: { usage(); break; } } ++argv; --argc; } if (argc != 2) usage(); if (outputQuickTimeFile && outputAVIFile) { *env << "The -i and -q (or -4) flags cannot both be used!\n"; usage(); } Boolean outputCompositeFile = outputQuickTimeFile || outputAVIFile; if (!createReceivers && outputCompositeFile) { *env << "The -r and -q (or -4 or -i) flags cannot both be used!\n"; usage(); } if (outputCompositeFile && !movieWidthOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -w. Assuming a video width of " << movieWidth << " pixels\n"; } if (outputCompositeFile && !movieHeightOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -h. Assuming a video height of " << movieHeight << " pixels\n"; } if (outputCompositeFile && !movieFPSOptionSet) { *env << "Warning: The -q, -4 or -i option was used, but not -f. Assuming a video frame rate of " << movieFPS << " frames-per-second\n"; } if (audioOnly && videoOnly) { *env << "The -a and -v flags cannot both be used!\n"; usage(); } if (sendOptionsRequestOnly && !sendOptionsRequest) { *env << "The -o and -O flags cannot both be used!\n"; usage(); } if (tunnelOverHTTPPortNum > 0) { if (streamUsingTCP) { *env << "The -t and -T flags cannot both be used!\n"; usage(); } else { streamUsingTCP = True; } } if (!createReceivers && notifyOnPacketArrival) { *env << "Warning: Because we're not receiving stream data, the -n flag has no effect\n"; } if (durationSlop < 0) { // This parameter wasn't set, so use a default value. // If we're measuring QOS stats, then don't add any slop, to avoid // having 'empty' measurement intervals at the end. durationSlop = qosMeasurementIntervalMS > 0 ? 0.0 : 5.0; } streamURL = argv[1]; // Create our client object: ourClient = createClient(*env, streamURL, verbosityLevel, progName); if (ourClient == NULL) { *env << "Failed to create " << clientProtocolName << " client: " << env->getResultMsg() << "\n"; shutdown(); } if (sendOptionsRequest) { // Begin by sending an "OPTIONS" command: getOptions(continueAfterOPTIONS); } else { continueAfterOPTIONS(NULL, 0, NULL); } // All subsequent activity takes place within the event loop: env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning}void continueAfterOPTIONS(RTSPClient*, int resultCode, char* resultString) { if (sendOptionsRequestOnly) { if (resultCode != 0) { *env << clientProtocolName << " \"OPTIONS\" request failed: " << resultString << "\n"; } else { *env << clientProtocolName << " \"OPTIONS\" request returned: " << resultString << "\n"; } shutdown(); } delete[] resultString; // Next, get a SDP description for the stream: getSDPDescription(continueAfterDESCRIBE);}void continueAfterDESCRIBE(RTSPClient*, int resultCode, char* resultString) { if (resultCode != 0) { *env << "Failed to get a SDP description from URL \"" << streamURL << "\": " << resultString << "\n"; shutdown(); } char* sdpDescription = resultString; *env << "Opened URL \"" << streamURL << "\", returning a SDP description:\n" << sdpDescription << "\n"; // Create a media session object from this SDP description: session = MediaSession::createNew(*env, sdpDescription); delete[] sdpDescription; if (session == NULL) { *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n"; shutdown(); } else if (!session->hasSubsessions()) { *env << "This session has no media subsessions (i.e., \"m=\" lines)\n"; shutdown(); } // Then, setup the "RTPSource"s for the session: MediaSubsessionIterator iter(*session); MediaSubsession *subsession; Boolean madeProgress = False; char const* singleMediumToTest = singleMedium; while ((subsession = iter.next()) != NULL) { // If we've asked to receive only a single medium, then check this now: if (singleMediumToTest != NULL) { if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) { *env << "Ignoring \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession, because we've asked to receive a single " << singleMedium << " session only\n"; continue; } else { // Receive this subsession only singleMediumToTest = "xxxxx"; // this hack ensures that we get only 1 subsession of this type } } if (desiredPortNum != 0) { subsession->setClientPortNum(desiredPortNum); desiredPortNum += 2; } if (createReceivers) { if (!subsession->initiate(simpleRTPoffsetArg)) { *env << "Unable to create receiver for \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession: " << env->getResultMsg() << "\n"; } else { *env << "Created receiver for \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession (client ports " << subsession->clientPortNum() << "-" << subsession->clientPortNum()+1 << ")\n"; madeProgress = True; if (subsession->rtpSource() != NULL) { // Because we're saving the incoming data, rather than playing // it in real time, allow an especially large time threshold // (1 second) for reordering misordered incoming packets: unsigned const thresh = 1000000; // 1 second subsession->rtpSource()->setPacketReorderingThresholdTime(thresh); // Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B), // or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size. // (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size, // then the input data rate may be large enough to justify increasing the OS socket buffer size also.) int socketNum = subsession->rtpSource()->RTPgs()->socketNum(); unsigned curBufferSize = getReceiveBufferSize(*env, socketNum); if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) { unsigned newBufferSize = socketInputBufferSize > 0 ? socketInputBufferSize : fileSinkBufferSize; newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize); if (socketInputBufferSize > 0) { // The user explicitly asked for the new socket buffer size; announce it: *env << "Changed socket receive buffer size for the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession from " << curBufferSize << " to " << newBufferSize << " bytes\n"; } } } } } else { if (subsession->clientPortNum() == 0) { *env << "No client port was specified for the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession. (Try adding the \"-p <portNum>\" option.)\n"; } else { madeProgress = True; } } } if (!madeProgress) shutdown(); // Perform additional 'setup' on each subsession, before playing them: setupStreams();}MediaSubsession *subsession;Boolean madeProgress = False;void continueAfterSETUP(RTSPClient*, int resultCode, char* resultString) { if (resultCode == 0) { *env << "Setup \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession (client ports " << subsession->clientPortNum() << "-" << subsession->clientPortNum()+1 << ")\n"; madeProgress = True; } else { *env << "Failed to setup \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession: " << env->getResultMsg() << "\n"; } // Set up the next subsession, if any: setupStreams();}void setupStreams() { static MediaSubsessionIterator* setupIter = NULL; if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session); while ((subsession = setupIter->next()) != NULL) { // We have another subsession left to set up: if (subsession->clientPortNum() == 0) continue; // port # was not set setupSubsession(subsession, streamUsingTCP, continueAfterSETUP); return; } // We're done setting up subsessions. delete setupIter; if (!madeProgress) shutdown(); // Create output files: if (createReceivers) { if (outputQuickTimeFile) { // Create a "QuickTimeFileSink", to write to 'stdout': qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout", fileSinkBufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format); if (qtOut == NULL) { *env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg(); shutdown(); } qtOut->startPlaying(sessionAfterPlaying, NULL); } else if (outputAVIFile) { // Create an "AVIFileSink", to write to 'stdout': aviOut = AVIFileSink::createNew(*env, *session, "stdout", fileSinkBufferSize, movieWidth, movieHeight, movieFPS, packetLossCompensate); if (aviOut == NULL) { *env << "Failed to create AVI file sink for stdout: " << env->getResultMsg(); shutdown(); } aviOut->startPlaying(sessionAfterPlaying, NULL); } else { // Create and start "FileSink"s for each subsession: madeProgress = False; MediaSubsessionIterator iter(*session); while ((subsession = iter.next()) != NULL) { if (subsession->readSource() == NULL) continue; // was not initiated // Create an output file for each desired stream: char outFileName[1000]; if (singleMedium == NULL) { // Output file name is // "<filename-prefix><medium_name>-<codec_name>-<counter>" static unsigned streamCounter = 0; snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d", fileNamePrefix, subsession->mediumName(), subsession->codecName(), ++streamCounter); } else { sprintf(outFileName, "stdout"); } FileSink* fileSink; if (strcmp(subsession->mediumName(), "audio") == 0 && (strcmp(subsession->codecName(), "AMR") == 0 || strcmp(subsession->codecName(), "AMR-WB") == 0)) { // For AMR audio streams, we use a special sink that inserts AMR frame hdrs: fileSink = AMRAudioFileSink::createNew(*env, outFileName, fileSinkBufferSize, oneFilePerFrame); } else if (strcmp(subsession->mediumName(), "video") == 0 && (strcmp(subsession->codecName(), "H264") == 0)) { // For H.264 video stream, we use a special sink that insert start_codes: fileSink = H264VideoFileSink::createNew(*env, outFileName, fileSinkBufferSize, oneFilePerFrame); } else { // Normal case: fileSink = FileSink::createNew(*env, outFileName, fileSinkBufferSize, oneFilePerFrame); } subsession->sink = fileSink; if (subsession->sink == NULL) { *env << "Failed to create FileSink for \"" << outFileName << "\": " << env->getResultMsg() << "\n"; } else { if (singleMedium == NULL) { *env << "Created output file: \"" << outFileName << "\"\n"; } else { *env << "Outputting data from the \"" << subsession->mediumName() << "/" << subsession->codecName() << "\" subsession to 'stdout'\n"; } if (strcmp(subsession->mediumName(), "video") == 0 && strcmp(subsession->codecName(), "MP4V-ES") == 0 && subsession->fmtp_config() != NULL) { // For MPEG-4 video RTP streams, the 'config' information // from the SDP description contains useful VOL etc. headers. // Insert this data at the front of the output file: unsigned configLen; unsigned char* configData = parseGeneralConfigStr(subsession->fmtp_config(), configLen); struct timeval timeNow; gettimeofday(&timeNow, NULL); fileSink->addData(configData, configLen, timeNow); delete[] configData; } subsession->sink->startPlaying(*(subsession->readSource()), subsessionAfterPlaying, subsession); // Also set a handler to be called if a RTCP "BYE" arrives // for this subsession: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession); } madeProgress = True; } } if (!madeProgress) shutdown(); } } // Finally, start playing each subsession, to start the data flow: if (duration == 0) { if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time else if (scale < 0) duration = initialSeekTime; } if (duration < 0) duration = 0.0; endTime = initialSeekTime; if (scale > 0) { if (duration <= 0) endTime = -1.0f; else endTime = initialSeekTime + duration; } else { endTime = initialSeekTime - duration; if (endTime < 0) endTime = 0.0f; } startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);}void continueAfterPLAY(RTSPClient*, int resultCode, char* resultString) { if (resultCode != 0) { *env << "Failed to start playing session: " << resultString << "\n"; shutdown(); } else { *env << "Started playing session\n"; } if (qosMeasurementIntervalMS > 0) { // Begin periodic QOS measurements: beginQOSMeasurement(); } // Figure out how long to delay (if at all) before shutting down, or // repeating the playing Boolean timerIsBeingUsed = False; double secondsToDelay = duration; if (duration > 0) { timerIsBeingUsed = True; double absScale = scale > 0 ? scale : -scale; // ASSERT: scale != 0 secondsToDelay = duration/absScale + durationSlop; int64_t uSecsToDelay = (int64_t)(secondsToDelay*1000000.0); sessionTimerTask = env->taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)sessionTimerHandler, (void*)NULL); } char const* actionString = createReceivers? "Receiving streamed data":"Data is being streamed"; if (timerIsBeingUsed) { *env << actionString << " (for up to " << secondsToDelay
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -