📄 nativeoggparser.java
字号:
// // @Override// public void stop()// {// try // {// source.stop();// } catch (IOException e) // {// logger.log(Level.WARNING, "" + e, e);// }// } @Override public boolean isPositionable() { return false; // TODO } // @Override// public Time setPosition(Time where, int rounding)// {// synchronized (OGG_SYNC_OBJ)// { // // }// } @Override public boolean isRandomAccess() { return super.isRandomAccess(); // TODO: can we determine this from the data source? } public static VideoFormat convertCodecPixelFormat(theora_info ti) { // resulting format based on what YUVConverter will return. Depends on a bit of internal // knowledge of how YUVConverter works. // TODO: we are ignoring any cropping here. final Dimension size = new Dimension(ti.width, ti.height); final int maxDataLength = ti.width * ti.height; final Class dataType = int[].class; final int bitsPerPixel = 32; final float frameRate = (float) ti.fps_numerator / (float) ti.fps_denominator; final int red; final int green; final int blue; // YUVConverter returns TYPE_INT_RGB final int bufferedImageType = BufferedImage.TYPE_INT_RGB; if (bufferedImageType == BufferedImage.TYPE_INT_BGR) { // TODO: test red = 0xFF; green = 0xFF00; blue = 0xFF0000; } else if (bufferedImageType == BufferedImage.TYPE_INT_RGB) { red = 0xFF0000; green = 0xFF00; blue = 0xFF; } else if (bufferedImageType == BufferedImage.TYPE_INT_ARGB) { red = 0xFF0000; green = 0xFF00; blue = 0xFF; // just ignore alpha } else throw new IllegalArgumentException("Unsupported buffered image type: " + bufferedImageType); return new RGBFormat(size, maxDataLength, dataType, frameRate, bitsPerPixel, red, green, blue); } public static AudioFormat convertCodecAudioFormat(vorbis_info vi) { return new AudioFormat(AudioFormat.LINEAR, vi.rate.floatValue(), 16, vi.channels, AudioFormat.LITTLE_ENDIAN, AudioFormat.SIGNED); } private boolean eomAudio; // set to true on end of media private boolean eomVideo; // set to true on end of media private int videoFrameNo = -1; private void nextVideoBuffer() throws IOException { synchronized (OGG_SYNC_OBJ) { int i; int j; while (theora_p != 0 && videobuf_ready == 0) { /* theora is one in, one out... */ if (OGG.ogg_stream_packetout(to, op) > 0) { ++videoFrameNo; final int ret = THEORA.theora_decode_packetin(td, op); if (ret < 0) throw new IOException("theora_decode_packetin failed: " + ret); videobuf_granulepos = td.granulepos; videobuf_time = THEORA.theora_granule_time(td, videobuf_granulepos); if (videobuf_time == 0.0) { // TODO: for some reason, some videos, like Apollo_15_liftoff_from_inside_LM.ogg (available from wikimedia) // always report the videobuf_time as zero. So we'll just calculate it based on the frame rate and // the frame number. videobuf_time = (double) videoFrameNo * (double) ti.fps_denominator / (double) ti.fps_numerator; } // /* is it already too old to be useful? This is only actually // useful cosmetically after a SIGSTOP. Note that we have to // decode the frame even if we don't show it (for now) due to // keyframing. Soon enough libtheora will be able to deal // with non-keyframe seeks. */ // // if(videobuf_time>=get_time()) videobuf_ready = 1; } else break; } // if(videobuf_ready == 0 && audiobuf_ready == 0 && // feof(infile))break; if (videobuf_ready == 0) { /* no data yet for somebody. Grab another page */ int bytes = buffer_data(oy); if (bytes < 0) { eomVideo = true; } while (OGG.ogg_sync_pageout(oy, og) > 0) { queue_page(og); } } // /* If playback has begun, top audio buffer off immediately. */ // if(stateflag != 0) audio_write_nonblocking(); // // /* are we at or past time for this video frame? */ // if(stateflag != 0 && videobuf_ready != 0 // // && videobuf_time<=get_time() // ) // { // video_write(); // videobuf_ready=0; // } /* * if our buffers either don't exist or are ready to go, we can * begin playback */ if ((theora_p == 0 || videobuf_ready != 0)) stateflag = 1; // /* same if we've run out of input */ // if(feof(infile))stateflag=1; } } private void nextAudioBuffer() throws IOException { synchronized (OGG_SYNC_OBJ) { int i; int j; /* * we want a video and audio frame ready to go at all times. If we * have to buffer incoming, buffer the compressed data (ie, let ogg * do the buffering) */ while (vorbis_p != 0 && audiobuf_ready == 0) { int ret; final PointerByReference pcm = new PointerByReference(); /* if there's pending, decoded audio, grab it */ if ((ret = VORBIS.vorbis_synthesis_pcmout(vd, pcm)) > 0) { final Pointer ppChannels = pcm.getValue(); final Pointer[] pChannels = ppChannels.getPointerArray(0, vi.channels); final float[][] floatArrays = new float[pChannels.length][]; for (int k = 0; k < pChannels.length; ++k) { floatArrays[k] = pChannels[k].getFloatArray(0, ret); } int count = audiobuf_fill / 2; final int maxsamples = (audiofd_fragsize - audiobuf_fill) / 2 / vi.channels; for (i = 0; i < ret && i < maxsamples; i++) { for (j = 0; j < vi.channels; j++) { int val = Math.round(floatArrays[j][i] * 32767.f); if (val > 32767) val = 32767; if (val < -32768) val = -32768; audiobuf[count++] = (short) val; } } VORBIS.vorbis_synthesis_read(vd, i); audiobuf_fill += i * vi.channels * 2; if (audiobuf_fill == audiofd_fragsize) audiobuf_ready = 1; if (vd.granulepos >= 0) audiobuf_granulepos = vd.granulepos - ret + i; else audiobuf_granulepos += i; } else { /* no pending audio; is there a pending packet to decode? */ if (OGG.ogg_stream_packetout(vo, op) > 0) { if (VORBIS.vorbis_synthesis(vb, op) == 0) /* * test for * success! */ VORBIS.vorbis_synthesis_blockin(vd, vb); } else /* we need more data; break out to suck in another page */ break; } } // if(videobuf_ready == 0 && audiobuf_ready == 0 && // feof(infile))break; if (audiobuf_ready == 0) { /* no data yet for somebody. Grab another page */ int bytes = buffer_data(oy); if (bytes < 0) { eomAudio = true; } while (OGG.ogg_sync_pageout(oy, og) > 0) { queue_page(og); } } // /* If playback has begun, top audio buffer off immediately. */ // if(stateflag != 0) audio_write_nonblocking(); // // /* are we at or past time for this video frame? */ // if(stateflag != 0 && videobuf_ready != 0 // // && videobuf_time<=get_time() // ) // { // video_write(); // videobuf_ready=0; // } /* * if our buffers either don't exist or are ready to go, we can * begin playback */ if ((vorbis_p == 0 || audiobuf_ready != 0)) stateflag = 1; // /* same if we've run out of input */ // if(feof(infile))stateflag=1; } } private abstract class PullSourceStreamTrack extends AbstractTrack { public abstract void deallocate(); } private class VideoTrack extends PullSourceStreamTrack { // TODO: track listener private final VideoFormat format; public VideoTrack() throws ResourceUnavailableException { super(); synchronized (OGG_SYNC_OBJ) { // set format format = convertCodecPixelFormat(ti); } } @Override public void deallocate() { } /** * * @return nanos skipped, 0 if unable to skip. * @throws IOException */ public long skipNanos(long nanos) throws IOException { return 0; // TODO } public boolean canSkipNanos() { return false; } @Override public Format getFormat() { return format; }// TODO: from JAVADOC:// This method might block if the data for a complete frame is not available. It might also block if the stream contains intervening data for a different interleaved Track. Once the other Track is read by a readFrame call from a different thread, this method can read the frame. If the intervening Track has been disabled, data for that Track is read and discarded.//// Note: This scenario is necessary only if a PullDataSource Demultiplexer implementation wants to avoid buffering data locally and copying the data to the Buffer passed in as a parameter. Implementations might decide to buffer data and not block (if possible) and incur data copy overhead. @Override public void readFrame(Buffer buffer) { synchronized (OGG_SYNC_OBJ) { try { nextVideoBuffer(); } catch (IOException e) { buffer.setLength(0); buffer.setDiscard(true); throw new RuntimeException(e); // TODO: how to handle? } /* are we at or past time for this video frame? */ if (stateflag != 0 && videobuf_ready != 0 // && videobuf_time<=get_time() ) { final yuv_buffer yuv = new yuv_buffer(); THEORA.theora_decode_YUVout(td, yuv); final BufferedImage bi = YUVConverter.toBufferedImage(yuv, ti); final Buffer b = ImageToBuffer.createBuffer(bi, format.getFrameRate()); buffer.setData(b.getData()); buffer.setLength(b.getLength()); buffer.setOffset(b.getOffset()); buffer.setEOM(false); buffer.setDiscard(false); buffer.setTimeStamp((long) secondsToNanos(videobuf_time)); //System.out.println("Generated video buffer"); videobuf_ready = 0; } else { buffer.setEOM(eomVideo); buffer.setLength(0); if (!eomVideo) buffer.setDiscard(true); } } } @Override public Time mapFrameToTime(int frameNumber) { return TIME_UNKNOWN; } @Override public int mapTimeToFrame(Time t) { return FRAME_UNKNOWN; } @Override public Time getDuration() { return Duration.DURATION_UNKNOWN; // TODO } } private class AudioTrack extends PullSourceStreamTrack { // TODO: track listener private final AudioFormat format; public AudioTrack() throws ResourceUnavailableException { super(); audiofd_fragsize=10000; // TODO: this is just a hack audiobuf = new short[audiofd_fragsize/2]; // audiofd_fragsize is in bytes, so divide by two to get shorts synchronized (OGG_SYNC_OBJ) { format = convertCodecAudioFormat(vi); } } @Override public void deallocate() { } // TODO: implement seeking using av_seek_frame /** * * @return nanos skipped, 0 if unable to skip. * @throws IOException */ public long skipNanos(long nanos) throws IOException { return 0; } public boolean canSkipNanos() { return false; } @Override public Format getFormat() { return format; }// TODO: from JAVADOC:// This method might block if the data for a complete frame is not available. It might also block if the stream contains intervening data for a different interleaved Track. Once the other Track is read by a readFrame call from a different thread, this method can read the frame. If the intervening Track has been disabled, data for that Track is read and discarded.//// Note: This scenario is necessary only if a PullDataSource Demultiplexer implementation wants to avoid buffering data locally and copying the data to the Buffer passed in as a parameter. Implementations might decide to buffer data and not block (if possible) and incur data copy overhead. @Override public void readFrame(Buffer buffer) { synchronized (OGG_SYNC_OBJ) { try { nextAudioBuffer(); // TODO: this often generates discard buffers, we could be smarter about it. Same for video. } catch (IOException e) { buffer.setLength(0); buffer.setDiscard(true); throw new RuntimeException(e); // TODO: how to handle? } /* If playback has begun, top audio buffer off immediately. */ if (stateflag == 0) { buffer.setEOM(eomAudio); buffer.setLength(0); if (!eomAudio) buffer.setDiscard(true); return; } else { if (audiobuf_ready == 0) { buffer.setEOM(eomAudio); buffer.setLength(0); if (!eomAudio) buffer.setDiscard(true); //System.out.println("Generated discard buffer: "); return; } else { // convert from short array to byte array. TODO: inefficient, should just store in byte array to begin with. final byte[] data = new byte[audiobuf.length * 2]; for (int i = 0; i < audiobuf.length; ++i) { // little-endian: data[i * 2] = (byte) (audiobuf[i] & 0xff); data[i * 2 + 1] = (byte) ((audiobuf[i] >> 8) & 0xff); } buffer.setData(data); buffer.setLength(data.length); buffer.setOffset(0); buffer.setEOM(false); buffer.setDiscard(false); buffer.setTimeStamp(System.currentTimeMillis()); // TODO //System.out.println("Generated audio buffer: " + data.length); audiobuf_fill = 0; audiobuf_ready = 0; } } } } @Override public Time mapFrameToTime(int frameNumber) { return TIME_UNKNOWN; } @Override public int mapTimeToFrame(Time t) { return FRAME_UNKNOWN; } @Override public Time getDuration() { return Duration.DURATION_UNKNOWN; // TODO } } private static final double secondsToNanos(double secs) { return secs * 1000000000.0; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -