📄 mpeg2ps.c
字号:
uint8_t mpeg2ps_get_video_stream_mp4_type (mpeg2ps_t *ps, uint streamno){ if (invalid_video_streamno(ps, streamno)) { return 0; } if (ps->video_streams[streamno]->have_h264) { return MP4_PRIVATE_VIDEO_TYPE; } if (ps->video_streams[streamno]->have_mpeg2) { return mpeg2_profile_to_mp4_track_type(ps->video_streams[streamno]->video_profile); } return MP4_MPEG1_VIDEO_TYPE;}static bool invalid_audio_streamno (mpeg2ps_t *ps, uint streamno){ if (streamno >= NUM_ELEMENTS_IN_ARRAY(ps->audio_streams)) return true; if (ps->audio_streams[streamno] == NULL) return true; return false;}uint32_t mpeg2ps_get_audio_stream_count (mpeg2ps_t *ps){ return ps->audio_cnt;}const char *mpeg2ps_get_audio_stream_name (mpeg2ps_t *ps, uint streamno){ if (invalid_audio_streamno(ps, streamno)) { return "none"; } if (ps->audio_streams[streamno]->m_stream_id >= 0xc0) { switch (ps->audio_streams[streamno]->version) { case 3: switch (ps->audio_streams[streamno]->layer) { case 3: return "MP1 layer 1"; case 2: return "MP1 layer 2"; case 1: return "MP1 layer 3"; } break; case 2: switch (ps->audio_streams[streamno]->layer) { case 3: return "MP2 layer 1"; case 2: return "MP2 layer 2"; case 1: return "MP2 layer 3"; } break; case 0: switch (ps->audio_streams[streamno]->layer) { case 3: return "MP2.5 layer 1"; case 2: return "MP2.5 layer 2"; case 1: return "MP2.5 layer 3"; } break; break; } return "unknown mpeg layer"; } if (ps->audio_streams[streamno]->m_substream_id >= 0x80 && ps->audio_streams[streamno]->m_substream_id < 0x90) return "AC3"; return "LPCM";}mpeg2ps_audio_type_t mpeg2ps_get_audio_stream_type (mpeg2ps_t *ps, uint streamno){ if (invalid_audio_streamno(ps, streamno)) { return MPEG_AUDIO_UNKNOWN; } if (ps->audio_streams[streamno]->m_stream_id >= 0xc0) { return MPEG_AUDIO_MPEG; } if (ps->audio_streams[streamno]->m_substream_id >= 0x80 && ps->audio_streams[streamno]->m_substream_id < 0x90) return MPEG_AUDIO_AC3; return MPEG_AUDIO_LPCM;}uint32_t mpeg2ps_get_audio_stream_sample_freq (mpeg2ps_t *ps, uint streamno){ if (invalid_audio_streamno(ps, streamno)) { return 0; } return ps->audio_streams[streamno]->freq;}uint32_t mpeg2ps_get_audio_stream_channels (mpeg2ps_t *ps, uint streamno){ if (invalid_audio_streamno(ps, streamno)) { return 0; } return ps->audio_streams[streamno]->channels;}uint32_t mpeg2ps_get_audio_stream_bitrate (mpeg2ps_t *ps, uint streamno){ if (invalid_audio_streamno(ps, streamno)) { return 0; } return ps->audio_streams[streamno]->bitrate;}mpeg2ps_t *mpeg2ps_init (const char *filename){ mpeg2ps_t *ps = MALLOC_STRUCTURE(mpeg2ps_t);#if 0 uint8_t local[4]; uint32_t hdr;#endif if (ps == NULL) { return NULL; } memset(ps, 0, sizeof(*ps)); ps->fd = file_open(filename); if (file_okay(ps->fd) == false) { free(ps); return NULL; } #if 0 file_read_bytes(ps->fd, local, 4); hdr = convert32(local); // this should accept all pes headers with 0xba or greater - so, // we'll handle pack streams, or pes streams. if (((hdr & MPEG2_PS_START_MASK) != MPEG2_PS_START) || (hdr < MPEG2_PS_PACKSTART)) { free(ps); return NULL; }#endif ps->filename = strdup(filename); mpeg2ps_scan_file(ps); if (ps->video_cnt == 0 && ps->audio_cnt == 0) { mpeg2ps_close(ps); return NULL; } return ps;}void mpeg2ps_close (mpeg2ps_t *ps){ uint ix; if (ps == NULL) return; for (ix = 0; ix < ps->video_cnt; ix++) { mpeg2ps_stream_destroy(ps->video_streams[ix]); ps->video_streams[ix] = NULL; } for (ix = 0; ix < ps->audio_cnt; ix++) { mpeg2ps_stream_destroy(ps->audio_streams[ix]); ps->audio_streams[ix] = NULL; } CHECK_AND_FREE(ps->filename); if (ps->fd != FDNULL) file_close(ps->fd); free(ps);}/* * check_fd_for_stream will make sure we have a fd for the stream we're * trying to read - we use a different fd for each stream *//* * stream_convert_frame_ts_to_msec - given a "read" frame, we'll * calculate the msec and freq timestamps. This can be called more * than 1 time, if needed, without changing any variables, such as * frames_since_last_ts, which gets updated in advance_frame */static uint64_t stream_convert_frame_ts_to_msec (mpeg2ps_stream_t *sptr, mpeg2ps_ts_type_t ts_type, uint64_t base_dts, uint32_t *freq_ts){ uint64_t calc_ts; uint frames_since_last = 0; uint64_t freq_conv; calc_ts = sptr->last_ts; if (sptr->frame_ts.have_dts) calc_ts = sptr->frame_ts.dts; else if (sptr->frame_ts.have_pts) calc_ts = sptr->frame_ts.dts; else frames_since_last = sptr->frames_since_last_ts + 1; if (freq_ts != NULL) {#if 0 printf("base dts "U64" "U64" %d %u %u\n", base_dts, calc_ts, frames_since_last, sptr->samples_per_frame, sptr->freq);#endif freq_conv = calc_ts - base_dts; freq_conv *= sptr->freq; freq_conv /= 90000; freq_conv += frames_since_last * sptr->samples_per_frame; *freq_ts = freq_conv & 0xffffffff; } return convert_ts(sptr, ts_type, calc_ts, base_dts, frames_since_last);}/* * mpeg2ps_get_video_frame - gets the next frame */ bool mpeg2ps_get_video_frame(mpeg2ps_t *ps, uint streamno, uint8_t **buffer, uint32_t *buflen, uint8_t *frame_type, mpeg2ps_ts_type_t ts_type, uint64_t *timestamp){ mpeg2ps_stream_t *sptr; if (invalid_video_streamno(ps, streamno)) return false; sptr = ps->video_streams[streamno]; check_fd_for_stream(ps, sptr); if (sptr->have_frame_loaded == false) { // if we don't have the frame in the buffer (like after a seek), // read the next frame if (mpeg2ps_stream_read_frame(sptr, buffer, buflen, false) == false) { return false; } } else { *buffer = sptr->pes_buffer + sptr->pes_buffer_on; *buflen = sptr->frame_len; } // determine frame type if (frame_type != NULL) { *frame_type = MP4AV_Mpeg3PictHdrType(sptr->pes_buffer + sptr->pict_header_offset); } // and the timestamp if (timestamp != NULL) { *timestamp = stream_convert_frame_ts_to_msec(sptr, ts_type, ps->first_dts, NULL); } // finally, indicate that we read this frame - get ready for the next one. advance_frame(sptr); return true;}// see above comments bool mpeg2ps_get_audio_frame(mpeg2ps_t *ps, uint streamno, uint8_t **buffer, uint32_t *buflen, mpeg2ps_ts_type_t ts_type, uint32_t *freq_timestamp, uint64_t *timestamp){ mpeg2ps_stream_t *sptr; uint64_t ts; if (invalid_audio_streamno(ps, streamno)) return false; sptr = ps->audio_streams[streamno]; check_fd_for_stream(ps, sptr); if (sptr->have_frame_loaded == false) { if (mpeg2ps_stream_read_frame(sptr, buffer, buflen, false) == false) return false; } else { *buffer = sptr->pes_buffer + sptr->pes_buffer_on; *buflen = sptr->frame_len; } if (timestamp != NULL || freq_timestamp != NULL) { ts = stream_convert_frame_ts_to_msec(sptr, ts_type, ps->first_dts, freq_timestamp); if (timestamp != NULL) { *timestamp = ts; } } advance_frame(sptr); return true;}/*************************************************************************** * seek routines ***************************************************************************//* * mpeg2ps_binary_seek - look for a pts that's close to the one that * we're looking for. We have a start ts and location, an end ts and * location, and what we're looking for */static void mpeg2ps_binary_seek (mpeg2ps_t *ps, mpeg2ps_stream_t *sptr, uint64_t search_dts, uint64_t start_dts, off_t start_loc, uint64_t end_dts, off_t end_loc){ uint64_t dts_perc; off_t loc; uint16_t pes_len; bool have_ts = false; off_t found_loc; uint64_t found_dts; mpeg2ps_message(LOG_DEBUG, "bin search "U64, search_dts); while (1) { /* * It's not a binary search as much as using a percentage between * the start and end dts to start. We subtract off a bit, so we * approach from the beginning of the file - we're more likely to * hit a pts that way */ dts_perc = (search_dts - start_dts) * 1000 / (end_dts - start_dts); dts_perc -= dts_perc % 10; loc = ((end_loc - start_loc) * dts_perc) / 1000; if (loc == start_loc || loc == end_loc) return; clear_stream_buffer(sptr); file_seek_to(sptr->m_fd, start_loc + loc); mpeg2ps_message(LOG_DEBUG, "start dl "U64" "U64" end dl "U64" "U64" new "U64, start_dts, start_loc, end_dts, end_loc, loc); // we'll look for the next pes header for this stream that has a ts. do { if (search_for_next_pes_header(sptr, &pes_len, &have_ts, &found_loc) == false) { return; } if (have_ts == false) { file_skip_bytes(sptr->m_fd, pes_len); } } while (have_ts == false); // record that spot... mpeg2ps_record_pts(sptr, found_loc, &sptr->next_pes_ts); found_dts = sptr->next_pes_ts.have_dts ? sptr->next_pes_ts.dts : sptr->next_pes_ts.pts; mpeg2ps_message(LOG_DEBUG, "found dts "U64" loc "U64, found_dts, loc); /* * Now, if we're before the search ts, and within 5 seconds, * we'll say we're close enough */ if (found_dts + (5 * 90000) > search_dts && found_dts < search_dts) { file_seek_to(sptr->m_fd, found_loc); return; // found it - we can seek from here } /* * otherwise, move the head or the tail (most likely the head). */ if (found_dts > search_dts) { if (found_dts >= end_dts) { file_seek_to(sptr->m_fd, found_loc); return; } end_loc = found_loc; end_dts = found_dts; } else { if (found_dts <= start_dts) { file_seek_to(sptr->m_fd, found_loc); return; } start_loc = found_loc; start_dts = found_dts; } }}/* * mpeg2ps_seek_frame - seek to the next timestamp after the search timestamp * First, find a close DTS (usually minus 5 seconds or closer), then * read frames until we get the frame after the timestamp. */static bool mpeg2ps_seek_frame (mpeg2ps_t *ps, mpeg2ps_stream_t *sptr, uint64_t search_msec_timestamp){ uint64_t dts; mpeg2ps_record_pes_t *rec; uint64_t msec_ts; uint8_t *buffer; uint32_t buflen; check_fd_for_stream(ps, sptr); clear_stream_buffer(sptr); if (search_msec_timestamp <= 1000) { // first second, start from begin... file_seek_to(sptr->m_fd, sptr->first_pes_loc); return true; } dts = search_msec_timestamp * 90; // 1000 timescale to 90000 timescale dts += ps->first_dts; mpeg2ps_message(LOG_DEBUG, "%x seek msec "U64" dts "U64, sptr->m_stream_id, search_msec_timestamp, dts); /* * see if the recorded data has anything close */ rec = search_for_ts(sptr, dts); if (rec != NULL) { // see if it is close mpeg2ps_message(LOG_DEBUG, "found rec dts "U64" loc "U64, rec->dts, rec->location); // if we're plus or minus a second, seek to that. if (rec->dts + 90000 >= dts && rec->dts <= dts + 90000) { file_seek_to(sptr->m_fd, rec->location); return true; } // at this point, rec is > a distance. If within 5 or so seconds, // skip if (rec->dts > dts) { mpeg2ps_message(LOG_ERR, "stream %x seek frame error dts "U64" rec "U64, sptr->m_stream_id, dts, rec->dts); return false; } if (rec->dts + (5 * 90000) < dts) { // more than 5 seconds away - skip and search if (rec->next_rec == NULL) { mpeg2ps_binary_seek(ps, sptr, dts, rec->dts, rec->location, sptr->end_dts, sptr->end_dts_loc); } else { mpeg2ps_binary_seek(ps, sptr, dts, rec->dts, rec->location, rec->next_rec->dts, rec->next_rec->location); } } // otherwise, frame by frame search... } else { // we weren't able to find anything from the recording mpeg2ps_binary_seek(ps, sptr, dts, sptr->start_dts, sptr->first_pes_loc, sptr->end_dts, sptr->end_dts_loc); } /* * Now, the fun part - read frames until we're just past the time */ clear_stream_buffer(sptr); // clear out any data, so we can read it do { if (mpeg2ps_stream_read_frame(sptr, &buffer, &buflen, false) == false) return false; msec_ts = stream_convert_frame_ts_to_msec(sptr, TS_MSEC, ps->first_dts, NULL); mpeg2ps_message(LOG_DEBUG, "%x read ts "U64, sptr->m_stream_id, msec_ts); if (msec_ts < search_msec_timestamp) { // only advance the frame if we're not greater than the timestamp advance_frame(sptr); } } while (msec_ts < search_msec_timestamp); return true;} /* * mpeg2ps_seek_video_frame - seek to the location that we're interested * in, then scroll up to the next I frame */bool mpeg2ps_seek_video_frame (mpeg2ps_t *ps, uint streamno, uint64_t msec_timestamp){ // off_t closest_pes; uint8_t frame_type; mpeg2ps_stream_t *sptr; uint8_t *buffer; uint32_t buflen; uint64_t msec_ts; if (invalid_video_streamno(ps, streamno)) return false; sptr = ps->video_streams[streamno]; if (mpeg2ps_seek_frame(ps, sptr, msec_timestamp) == false) return false; if (sptr->have_frame_loaded == false) { mpeg2ps_message(LOG_CRIT, "no frame loaded after search"); return false; } /* * read forward until we find the next I frame */ if (sptr->have_h264) { while (h264_access_unit_is_sync(sptr->pes_buffer + sptr->pict_header_offset, sptr->pes_buffer_size - sptr->pict_header_offset) == false) { advance_frame(sptr); if (mpeg2ps_stream_read_frame(sptr, &buffer, &buflen, false) == false) return false; msec_ts = stream_convert_frame_ts_to_msec(sptr, TS_MSEC, ps->first_dts, NULL); mpeg2ps_message(LOG_DEBUG, "read ts "U64, msec_ts); } } else { frame_type = MP4AV_Mpeg3PictHdrType(sptr->pes_buffer + sptr->pict_header_offset); while (frame_type != 1) { advance_frame(sptr); if (mpeg2ps_stream_read_frame(sptr, &buffer, &buflen, false) == false) return false; frame_type = MP4AV_Mpeg3PictHdrType(sptr->pes_buffer + sptr->pict_header_offset); msec_ts = stream_convert_frame_ts_to_msec(sptr, TS_MSEC, ps->first_dts, NULL); mpeg2ps_message(LOG_DEBUG, "read ts "U64" type %d", msec_ts, frame_type); } } return true;}/* * mpeg2ps_seek_audio_frame - go to the closest audio frame after the * timestamp */bool mpeg2ps_seek_audio_frame (mpeg2ps_t *ps, uint streamno, uint64_t msec_timestamp){ // off_t closest_pes; mpeg2ps_stream_t *sptr; if (invalid_audio_streamno(ps, streamno)) return false; sptr = ps->audio_streams[streamno]; if (mpeg2ps_seek_frame(ps, sptr, msec_timestamp) == false) return false; return true;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -