📄 mpeg2audio.cc
字号:
/* File: mpeg2audio.cc By: Alex Theo de Jong Created: Febraury 1996 Description: MPEG 2 Audio class. The MPEG 2 Audio decoder reads from the stream buffer, decodes and plays the audio on the SUN Solaris or SGI Irix audio device.*/#define RELEASE "1.2, November 1996"#ifdef __GNUG__#pragma implementation#endif#include <stdio.h>#include <stdlib.h>#include <string.h>#include <iomanip.h>#include <fstream.h>#include <sys/errno.h>#include <unistd.h>#ifdef IRIX#include <dmedia/audio.h>#endif#ifdef SOLARIS#include <sys/audioio.h>#endif#ifdef LINUX#include <sys/soundcard.h>#endif#include "athread.hh"#include "error.hh"#include "debug.hh"#include "util.hh"#include "sync.hh"#include "mpeg2const.hh"#include "mpeg2buff.hh"#include "mpeg2audio.hh"extern int decode_ac3(AudioStream *,Synchronization *);/* * * Mpeg2Audio * */Mpeg2Audio::Mpeg2Audio(Mpeg2Buffer* input_buffer, Synchronization* s, int audio, int c, char** v) : verbose_mode(True), filter_check(False), stdout_mode(False), which_channels(both), use_speaker(False), use_headphone(False), use_line_out(False), use_own_scalefactor(False), argc(c), terminate(0), terminated(0){ sync=s; if (input_buffer) stream=new AudioStream(input_buffer, sync); else stream=0; if (argc) argv=v; else argv=0; header=new Header; buffer=0; filter1=0; filter2=0; if (audio == 1) {#ifdef DEBUG Mpeg2Audio::player(this);#else if (athr_create((void*(*)(void*))Mpeg2Audio::player, this, &thread_id)<0){ error("could not create audio player"); athr_exit(0); }#endif } else {#ifdef DEBUG Mpeg2Audio::ac3_player(this);#else if (athr_create((void*(*)(void*))Mpeg2Audio::ac3_player, this, &thread_id)<0){ error("could not create audio player"); athr_exit(0); }#endif } sched_param param; int policy; if (athr_getschedparam(thread_id, &policy, ¶m)<0){ error("could not get thread priority"); }#ifdef LINUX param.sched_priority+=2;// policy = SCHED_RR; TRACER("AUDIOPRIORITY=" << param.sched_priority << "(" << param.sched_priority-2 << ")");#else param.prio+=2; TRACER("AUDIOPRIORITY=" << param.prio << "(" << param.prio-2 << ")");#endif if (athr_setschedparam(thread_id, policy, ¶m)<0){ error("could not set thread priority"); }}Mpeg2Audio::~Mpeg2Audio(){ TRACER("Mpeg2Audio::~Mpeg2Audio()");// terminate=1; if (!terminated){ TRACER("waiting for audio thread ..."); athr_join(thread_id); // In order to play TS stream without audio it shouln't wait here // This is a bug that needs to be fixed somewhere else. // Just a temporary fix. Alex 05/27/96 } TRACER("audio thread done\nDeleting audio stuff ... "); delete stream; delete header; delete buffer; delete filter1; delete filter2; TRACER("audio thread done");}void* Mpeg2Audio::player(Mpeg2Audio* base){ base->terminated=0; base->terminate=0; if (base->options()) base->play(); base->terminated=1; TRACER("audio thread terminated"); athr_exit(0); return 0;}void* Mpeg2Audio::ac3_player(Mpeg2Audio* base){ base->terminated=0; base->terminate=0; decode_ac3(base->stream,base->sync); base->terminated=1; TRACER("audio thread terminated"); athr_exit(0); return 0;}void Mpeg2Audio::usage(const char* name){ msg("usage: "); msg(name); message(" <filename> [options]"); message("options:"); message("\t-q\tquiet"); message("\t-l\tdecode only the left channel"); message("\t-r\tdecode only the right channel"); message("\t-ux\tsend audio signal to speaker(x:s), headphone (x:h), line out (x:l)"); message("\t-c\tcheck for filter range violations"); message("\t-fn\tuse this (n) scalefactor instead of the default value 32768"); message(""); message("\tMPEG 2 Audio Player\n"); msg("\tversion "); message(RELEASE); message("\tAlex Theo de Jong (e-mail: alex.dejong@nist.gov)\n"); message("\tMulti-Media and Digital Video Group"); message("\tNational Institute of Standards and Technology"); message("\tGaithersburg, Md, U.S.A.\n\n"); message(""); message("Original code by:"); message("\tTobias Bading"); message("\tBerlin University of Technology, Germany");} int Mpeg2Audio::options(){ if (argc < 2 || argv[1][0]=='-'){ usage(argv[0]); return 0; } filename=argv[1]; for (int i=2; i < argc; i++){ if (argv[i][0] == '-' && argv[i][1]) switch (argv[i][1]){ case 'q': verbose_mode = False; break; case 's': stdout_mode = True; break; case 'l': which_channels = left; break; case 'r': which_channels = right; break; case 'u': switch (argv[i][2]){ case 's': use_speaker = True; break; case 'h': use_headphone = True; break; case 'l': use_line_out = True; break; } break; case 'c': filter_check = True; break; case 'f': use_own_scalefactor = True; scalefactor=atoi(&argv[i][2]); break; default: msg("unknown option "); msg(argv[i]); message(" - ignored"); } } if (!(use_speaker || use_headphone || use_line_out)) use_speaker=True; return 1;}int Mpeg2Audio::play(){ bool read_ready = False, write_ready = False; if (!stream) stream=new AudioStream(filename); // read from file if (!header->read_header(stream, &crc)) { error("no header found!"); athr_exit(0); } // get info from header of first frame: layer = header->layer (); if ((mode = header->mode ()) == single_channel) which_channels = left; sample_frequency = header->sample_frequency (); // create filter(s): if (use_own_scalefactor) filter1 = new SynthesisFilter(0, scalefactor); else filter1 = new SynthesisFilter(0); if (mode != single_channel && which_channels == both) if (use_own_scalefactor) filter2 = new SynthesisFilter(1, scalefactor); else filter2 = new SynthesisFilter(1); // create buffer: if (stdout_mode)#ifdef IRIX buffer=0;#else if (mode == single_channel || which_channels != both) buffer = new ShortObuffer(1); else buffer = new ShortObuffer (2);#endif else#ifdef IRIX if (mode == single_channel || which_channels != both) buffer = new IrixObuffer (1, header); else buffer = new IrixObuffer (2, header);#else#ifdef SOLARIS if (SparcObuffer::class_suitable()){ if (mode == single_channel || which_channels != both) buffer = new SparcObuffer (1, header, use_speaker, use_headphone, use_line_out); else buffer = new SparcObuffer (2, header, use_speaker, use_headphone, use_line_out); } else { error("Sorry, no suitable audio device detected, please use stdout mode"); athr_exit(0); }#else#ifdef LINUX if (LinuxObuffer::class_suitable()){ if (mode == single_channel || which_channels != both) buffer = new LinuxObuffer (1, header); else buffer = new LinuxObuffer (2, header); } else { error("Sorry, no suitable audio device detected, please use stdout mode"); athr_exit(0); }#endif // !LINUX#endif // !SOLARIS#endif // !IRIX if (!buffer){ error("no audio device available"); return 0; } if (verbose_mode){ // print informations about the stream char *name = strrchr (filename, '/'); if (name) ++name; else name = filename; cerr << name << " is a layer " << header->layer_string () << ' ' << header->mode_string () << " MPEG audio stream with"; if (!header->checksums ()) cerr << "out"; cerr << " checksums.\nThe sample frequency is " << header->sample_frequency_string () << " at a bitrate of " << header->bitrate_string () << ".\n" "This stream is "; if (header->original ()) cerr << "an original"; else cerr << "a copy"; cerr << " and is "; if (!header->copyright ()) cerr << "not "; cerr << "copyright protected.\n"; } do { // is there a change in important parameters? // (bitrate switching is allowed) if (header->layer () != layer){ // layer switching is allowed if (verbose_mode) cerr << "switching to layer " << header->layer_string () << ".\n"; layer = header->layer (); } if ((mode == single_channel && header->mode () != single_channel) || (mode != single_channel && header->mode () == single_channel)){ // switching from single channel to stereo or vice versa is not allowed error("illegal switch from single channel to stereo or vice versa!"); athr_exit(0); } if (header->sample_frequency () != sample_frequency){ // switching the sample frequency is not allowed error("sorry, can't switch the sample frequency in the middle of the stream!"); athr_exit(0); } unsigned int i=0; // create subband objects: if (header->layer()==1){ if (header->mode()==single_channel) for (i=0; i<header->number_of_subbands(); ++i) subbands[i]=new SubbandLayer1(i); else if (header->mode()==joint_stereo){ for (i=0; i<header->intensity_stereo_bound(); ++i) subbands[i]=new SubbandLayer1Stereo(i); for (; i<header->number_of_subbands(); ++i) subbands[i]=new SubbandLayer1IntensityStereo(i); } else for (i=0; i<header->number_of_subbands(); ++i) subbands[i]=new SubbandLayer1Stereo(i); } else if (header->layer()==2){ if (header->mode()==single_channel) for (i = 0; i<header->number_of_subbands (); ++i) subbands[i] = new SubbandLayer2 (i); else if (header->mode () == joint_stereo){ for (i = 0; i < header->intensity_stereo_bound (); ++i) subbands[i] = new SubbandLayer2Stereo (i); for (; i < header->number_of_subbands (); ++i) subbands[i] = new SubbandLayer2IntensityStereo (i); } else for (i = 0; i < header->number_of_subbands (); ++i) subbands[i] = new SubbandLayer2Stereo (i); } else { error("sorry, layer 3 not implemented!"); athr_exit(0); } // start to read audio data: for (i = 0; i < header->number_of_subbands(); ++i) subbands[i]->read_allocation (stream, header, crc); if (header->layer()==2) for (i = 0; i < header->number_of_subbands(); ++i) ((SubbandLayer2 *)subbands[i])->read_scalefactor_selection (stream, crc); if (!crc || header->checksum_ok()){ // no checksums or checksum ok, continue reading from stream: for (i = 0; i < header->number_of_subbands (); ++i) subbands[i]->read_scalefactor (stream, header); do { for (i = 0; i < header->number_of_subbands (); ++i) read_ready = subbands[i]->read_sampledata (stream); do { for (i = 0; i < header->number_of_subbands (); ++i) write_ready = subbands[i]->put_next_sample (which_channels, filter1, filter2); filter1->calculate_pcm_samples (buffer); if (which_channels == both && header->mode () != single_channel) filter2->calculate_pcm_samples (buffer); } while (!write_ready); } while (!read_ready); if (sync) sync->wait(2); // wait for PTS, id=2, if <0, end of file buffer->write_buffer(1); // write to stdout } else // Sh*t! Wrong crc checksum in frame! cerr << "WARNING: frame contains wrong crc checksum! (throwing frame away)\n"; for (i=0; i < header->number_of_subbands (); ++i) delete subbands[i]; } while (!terminate && header->read_header(stream, &crc) && (!sync || !sync->done(2))); uint32 range_violations = filter1->violations (); if (mode != single_channel && which_channels == both) range_violations += filter2->violations (); if (filter_check){ // check whether (one of) the filter(s) produced values not in [-1.0, 1.0]: if (range_violations){ cerr << range_violations << " range violations have occured!\n"; if (stdout_mode) cerr << "If you notice these violations,\n"; else cerr << "If you have noticed these violations,\n"; cerr << "please use the -f option with the value "; if (mode != single_channel && which_channels == both && filter2->hardest_violation () > filter1->hardest_violation ()) cerr << filter2->recommended_scalefactor (); else cerr << filter1->recommended_scalefactor (); cerr << "\nor a greater value up to 32768 and try again.\n"; } } if (verbose_mode){ // print playtime of stream: real playtime = filter1->seconds_played (Header::frequency (sample_frequency)); uint32 minutes = (uint32)(playtime / 60.0); uint32 seconds = (uint32)playtime - minutes * 60; uint32 centiseconds = (uint32)((playtime - (real)(minutes * 60) - (real)seconds) * 100.0); cerr << "end of stream, playtime: " << minutes << ':' << setw (2) << setfill ('0') << seconds << '.' << setw (2) << setfill ('0') << centiseconds << '\n'; } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -