⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 player_example.c

📁 mediastreamer2是开源的网络传输媒体流的库
💻 C
📖 第 1 页 / 共 2 页
字号:
/******************************************************************** *                                                                  * * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE.   * * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS     * * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING.       * *                                                                  * * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2003                * * by the Xiph.Org Foundation http://www.xiph.org/                  * *                                                                  * ********************************************************************  function: example SDL player application; plays Ogg Theora files (with            optional Vorbis audio second stream)  last mod: $Id: player_example.c 11442 2006-05-27 17:28:08Z giles $ ********************************************************************//* far more complex than most Ogg 'example' programs.  The complexity   of maintaining A/V sync is pretty much unavoidable.  It's necessary   to actually have audio/video playback to make the hard audio clock   sync actually work.  If there's audio playback, there might as well   be simple video playback as well...   A simple 'demux and write back streams' would have been easier,   it's true. */#define _GNU_SOURCE#define _LARGEFILE_SOURCE#define _LARGEFILE64_SOURCE#define _FILE_OFFSET_BITS 64#ifdef HAVE_CONFIG_H# include <config.h>#endif#ifndef _REENTRANT# define _REENTRANT#endif#include <stdio.h>#include <unistd.h>#include <stdlib.h>#include <string.h>#include <sys/time.h>#include <sys/types.h>#include <sys/stat.h>#include <fcntl.h>#include <math.h>#include <signal.h>#include "theora/theora.h"#include "vorbis/codec.h"#include <SDL.h>/* yes, this makes us OSS-specific for now. None of SDL, libao, libao2   give us any way to determine hardware timing, and since the   hard/kernel buffer is going to be most of or > a second, that's   just a little bit important */#if defined(__FreeBSD__)#include <machine/soundcard.h>#define AUDIO_DEVICE "/dev/audio"#elif defined(__NetBSD__) || defined(__OpenBSD__)#include <soundcard.h>#define AUDIO_DEVICE "/dev/audio"#else#include <sys/soundcard.h>#define AUDIO_DEVICE "/dev/dsp"#endif#include <sys/ioctl.h>/* Helper; just grab some more compressed bitstream and sync it for   page extraction */int buffer_data(FILE *in,ogg_sync_state *oy){  char *buffer=ogg_sync_buffer(oy,4096);  int bytes=fread(buffer,1,4096,in);  ogg_sync_wrote(oy,bytes);  return(bytes);}/* never forget that globals are a one-way ticket to Hell *//* Ogg and codec state for demux/decode */ogg_sync_state   oy;ogg_page         og;ogg_stream_state vo;ogg_stream_state to;theora_info      ti;theora_comment   tc;theora_state     td;vorbis_info      vi;vorbis_dsp_state vd;vorbis_block     vb;vorbis_comment   vc;int              theora_p=0;int              vorbis_p=0;int              stateflag=0;/* SDL Video playback structures */SDL_Surface *screen;SDL_Overlay *yuv_overlay;SDL_Rect rect;/* single frame video buffering */int          videobuf_ready=0;ogg_int64_t  videobuf_granulepos=-1;double       videobuf_time=0;/* single audio fragment audio buffering */int          audiobuf_fill=0;int          audiobuf_ready=0;ogg_int16_t *audiobuf;ogg_int64_t  audiobuf_granulepos=0; /* time position of last sample *//* audio / video synchronization tracking:Since this will make it to Google at some point and lots of peoplesearch for how to do this, a quick rundown of a practical A/V syncstrategy under Linux [the UNIX where Everything Is Hard].  Naturally,this works on other platforms using OSS for sound as well.In OSS, we don't have reliable access to any precise information onthe exact current playback position (that, of course would have beentoo easy; the kernel folks like to keep us app people working harddoing simple things that should have been solved once and abstractedlong ago).  Hopefully ALSA solves this a little better; we'll probablyuse that once ALSA is the standard in the stable kernel.We can't use the system clock for a/v sync because audio is hardsynced to its own clock, and both the system and audio clocks sufferfrom wobble, drift, and a lack of accuracy that can be guaranteed toadd a reliable percent or so of error.  After ten seconds, that's100ms.  We can't drift by half a second every minute.Although OSS can't generally tell us where the audio playback pointeris, we do know that if we work in complete audio fragments and keepthe kernel buffer full, a blocking select on the audio buffer willgive us a writable fragment immediately after playback finishes withit.  We assume at that point that we know the exact number of bytes inthe kernel buffer that have not been played (total fragments minusone) and calculate clock drift between audio and system then (and onlythen).  Damp the sync correction fraction, apply, and walla: Areliable A/V clock that even works if it's interrupted. */long         audiofd_totalsize=-1;int          audiofd_fragsize;      /* read and write only complete fragments                                       so that SNDCTL_DSP_GETOSPACE is                                       accurate immediately after a bank                                       switch */int          audiofd=-1;ogg_int64_t  audiofd_timer_calibrate=-1;static void open_audio(){  audio_buf_info info;  int format=AFMT_S16_NE; /* host endian */  int channels=vi.channels;  int rate=vi.rate;  int ret;  audiofd=open(AUDIO_DEVICE,O_RDWR);  if(audiofd<0){    fprintf(stderr,"Could not open audio device " AUDIO_DEVICE ".\n");    exit(1);  }  ret=ioctl(audiofd,SNDCTL_DSP_SETFMT,&format);  if(ret){    fprintf(stderr,"Could not set 16 bit host-endian playback\n");    exit(1);  }  ret=ioctl(audiofd,SNDCTL_DSP_CHANNELS,&channels);  if(ret){    fprintf(stderr,"Could not set %d channel playback\n",channels);    exit(1);  }  ret=ioctl(audiofd,SNDCTL_DSP_SPEED,&rate);  if(ret){    fprintf(stderr,"Could not set %d Hz playback\n",rate);    exit(1);  }  ioctl(audiofd,SNDCTL_DSP_GETOSPACE,&info);  audiofd_fragsize=info.fragsize;  audiofd_totalsize=info.fragstotal*info.fragsize;  audiobuf=malloc(audiofd_fragsize);}static void audio_close(void){  if(audiofd>-1){    ioctl(audiofd,SNDCTL_DSP_RESET,NULL);    close(audiofd);    free(audiobuf);  }}/* call this only immediately after unblocking from a full kernel   having a newly empty fragment or at the point of DMA restart */void audio_calibrate_timer(int restart){  struct timeval tv;  ogg_int64_t current_sample;  ogg_int64_t new_time;  gettimeofday(&tv,0);  new_time=tv.tv_sec*1000+tv.tv_usec/1000;  if(restart){    current_sample=audiobuf_granulepos-audiobuf_fill/2/vi.channels;  }else    current_sample=audiobuf_granulepos-      (audiobuf_fill+audiofd_totalsize-audiofd_fragsize)/2/vi.channels;  new_time-=1000*current_sample/vi.rate;  audiofd_timer_calibrate=new_time;}/* get relative time since beginning playback, compensating for A/V   drift */double get_time(){  static ogg_int64_t last=0;  static ogg_int64_t up=0;  ogg_int64_t now;  struct timeval tv;  gettimeofday(&tv,0);  now=tv.tv_sec*1000+tv.tv_usec/1000;  if(audiofd_timer_calibrate==-1)audiofd_timer_calibrate=last=now;  if(audiofd<0){    /* no audio timer to worry about, we can just use the system clock */    /* only one complication: If the process is suspended, we should       reset timing to account for the gap in play time.  Do it the       easy/hack way */    if(now-last>1000)audiofd_timer_calibrate+=(now-last);    last=now;  }  if(now-up>200){    double timebase=(now-audiofd_timer_calibrate)*.001;    int hundredths=timebase*100-(long)timebase*100;    int seconds=(long)timebase%60;    int minutes=((long)timebase/60)%60;    int hours=(long)timebase/3600;    fprintf(stderr,"   Playing: %d:%02d:%02d.%02d                       \r",            hours,minutes,seconds,hundredths);    up=now;  }  return (now-audiofd_timer_calibrate)*.001;}/* write a fragment to the OSS kernel audio API, but only if we can   stuff in a whole fragment without blocking */void audio_write_nonblocking(void){  if(audiobuf_ready){    audio_buf_info info;    long bytes;    ioctl(audiofd,SNDCTL_DSP_GETOSPACE,&info);    bytes=info.bytes;    if(bytes>=audiofd_fragsize){      if(bytes==audiofd_totalsize)audio_calibrate_timer(1);      while(1){        bytes=write(audiofd,audiobuf+(audiofd_fragsize-audiobuf_fill),                    audiofd_fragsize);                if(bytes>0){                  if(bytes!=audiobuf_fill){            /* shouldn't actually be possible... but eh */            audiobuf_fill-=bytes;          }else            break;        }      }      audiobuf_fill=0;      audiobuf_ready=0;    }  }}/* clean quit on Ctrl-C for SDL and thread shutdown as per SDL example   (we don't use any threads, but libSDL does) */int got_sigint=0;static void sigint_handler (int signal) {  got_sigint = 1;}static void open_video(void){  if ( SDL_Init(SDL_INIT_VIDEO) < 0 ) {    fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());    exit(1);  }  screen = SDL_SetVideoMode(ti.frame_width, ti.frame_height, 0, SDL_SWSURFACE);  if ( screen == NULL ) {    fprintf(stderr, "Unable to set %dx%d video: %s\n",            ti.frame_width,ti.frame_height,SDL_GetError());    exit(1);  }  yuv_overlay = SDL_CreateYUVOverlay(ti.frame_width, ti.frame_height,                                     SDL_YV12_OVERLAY,                                     screen);  if ( yuv_overlay == NULL ) {    fprintf(stderr, "SDL: Couldn't create SDL_yuv_overlay: %s\n",            SDL_GetError());    exit(1);  }  rect.x = 0;  rect.y = 0;  rect.w = ti.frame_width;  rect.h = ti.frame_height;  SDL_DisplayYUVOverlay(yuv_overlay, &rect);}static void video_write(void){  int i;  yuv_buffer yuv;  int crop_offset;  theora_decode_YUVout(&td,&yuv);  /* Lock SDL_yuv_overlay */  if ( SDL_MUSTLOCK(screen) ) {    if ( SDL_LockSurface(screen) < 0 ) return;  }  if (SDL_LockYUVOverlay(yuv_overlay) < 0) return;  /* let's draw the data (*yuv[3]) on a SDL screen (*screen) */  /* deal with border stride */  /* reverse u and v for SDL */  /* and crop input properly, respecting the encoded frame rect */  crop_offset=ti.offset_x+yuv.y_stride*ti.offset_y;  for(i=0;i<yuv_overlay->h;i++)    memcpy(yuv_overlay->pixels[0]+yuv_overlay->pitches[0]*i,           yuv.y+crop_offset+yuv.y_stride*i,           yuv_overlay->w);  crop_offset=(ti.offset_x/2)+(yuv.uv_stride)*(ti.offset_y/2);  for(i=0;i<yuv_overlay->h/2;i++){    memcpy(yuv_overlay->pixels[1]+yuv_overlay->pitches[1]*i,           yuv.v+crop_offset+yuv.uv_stride*i,           yuv_overlay->w/2);    memcpy(yuv_overlay->pixels[2]+yuv_overlay->pitches[2]*i,           yuv.u+crop_offset+yuv.uv_stride*i,           yuv_overlay->w/2);  }  /* Unlock SDL_yuv_overlay */  if ( SDL_MUSTLOCK(screen) ) {    SDL_UnlockSurface(screen);  }  SDL_UnlockYUVOverlay(yuv_overlay);  /* Show, baby, show! */  SDL_DisplayYUVOverlay(yuv_overlay, &rect);}/* dump the theora (or vorbis) comment header */static int dump_comments(theora_comment *tc){  int i, len;  char *value;  FILE *out=stdout;  fprintf(out,"Encoded by %s\n",tc->vendor);  if(tc->comments){    fprintf(out, "theora comment header:\n");    for(i=0;i<tc->comments;i++){      if(tc->user_comments[i]){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -