📄 javasoundrenderer.java
字号:
package net.sf.fmj.media.renderer.audio;import java.util.HashMap;import java.util.List;import java.util.Vector;import java.util.logging.Logger;import javax.media.Buffer;import javax.media.Codec;import javax.media.Format;import javax.media.Renderer;import javax.media.ResourceUnavailableException;import javax.media.format.AudioFormat;import javax.sound.sampled.AudioSystem;import javax.sound.sampled.BooleanControl;import javax.sound.sampled.CompoundControl;import javax.sound.sampled.Control;import javax.sound.sampled.DataLine;import javax.sound.sampled.FloatControl;import javax.sound.sampled.LineUnavailableException;import javax.sound.sampled.Mixer;import javax.sound.sampled.SourceDataLine;import javax.sound.sampled.AudioFormat.Encoding;import javax.sound.sampled.Control.Type;import javazoom.spi.mpeg.sampled.file.MpegAudioFormat;import javazoom.spi.mpeg.sampled.file.MpegEncoding;import javazoom.spi.vorbis.sampled.file.VorbisAudioFormat;import javazoom.spi.vorbis.sampled.file.VorbisEncoding;import net.sf.fmj.utility.LoggerSingleton;import net.sf.fmj.utility.ObjectCollection;/** * net.sf.fmj.media.renderer.audio.FmjAudioRenderer * * @author Warren Bloomer * */public class JavaSoundRenderer implements Renderer { private static final Logger logger = LoggerSingleton.logger; private String name = "FMJ Audio Renderer"; /** the selected mixer to use */ private Mixer mixer; /** the DataLine to write audio data to. */ private SourceDataLine sourceLine; /** javax.media version of audio format*/ private AudioFormat inputFormat; /** javax.sound version of audio format */ private javax.sound.sampled.AudioFormat sampledFormat; /** set of controls */ private final ObjectCollection controls = new ObjectCollection(); // To support ULAW, we use a codec which can convert from ULAW to LINEAR. // JMF's renderer can do this, although it may be overkill to use a codec. // TODO: support ULAW directly by simply converting the samples. // Same for ALAW. private Codec codec; // in case we need to do any conversions private final Buffer codecBuffer = new Buffer(); /* ----------------------- Renderer interface ------------------------- */ /** * Returns the name of the pluging. */ public String getName() { return name; } private Format[] supportedInputFormats = new Format[] { new AudioFormat(AudioFormat.LINEAR, -1.0, -1, -1, -1, -1, -1, -1.0, Format.byteArray), new AudioFormat(AudioFormat.ULAW, -1.0, -1, -1, -1, -1, -1, -1.0, Format.byteArray), // TODO: our codec doesn't support all ULAW input formats. new AudioFormat(AudioFormat.ALAW, -1.0, -1, -1, -1, -1, -1, -1.0, Format.byteArray), // TODO: our codec doesn't support all ALAW input formats. }; /** * Set supported input formats for the default or selected Mixer. * Perhaps just list generic LINEAR, ALAW and ULAW. At the moment, we are * returning all the formats handled by the current default mixer. */ public Format[] getSupportedInputFormats() { return supportedInputFormats; // JMF doesn't return all the details. // Vector supportedFormats = new Vector();// // Mixer.Info mixerInfo = null; // default mixer// Mixer mixer = AudioSystem.getMixer(mixerInfo);//// Line.Info[] lineInfos = mixer.getSourceLineInfo();// for (int i=0; i<lineInfos.length; i++) {// DataLine.Info lineInfo = (DataLine.Info) lineInfos[i];// javax.sound.sampled.AudioFormat[] formats = lineInfo.getFormats();// for (int j=0; j<formats.length; j++) {// AudioFormat format = convertFormat(formats[j]);// supportedFormats.add(format);// }// }// // return (Format[]) supportedFormats.toArray(new Format[]{}); } public Format setInputFormat(Format format) { logger.info("Setting input format to: " + format); if ( !(format instanceof AudioFormat) ) { return null; } this.inputFormat = (AudioFormat) format; return inputFormat; } public Object getControl(String controlType) { return controls.getControl(controlType); } public Object[] getControls() { return controls.getControls(); } /** * Open the plugin. Must be called after the formats have been determined * and before "process" is called. * * Open the DataLine. */ public void open() throws ResourceUnavailableException { javax.sound.sampled.AudioFormat audioFormat = convertFormat(inputFormat); logger.info("opening with javax.sound format: " + audioFormat); try { if (!inputFormat.getEncoding().equals(AudioFormat.LINEAR)) { logger.info("JavaSoundRenderer: Audio format is not linear, creating conversion"); if (inputFormat.getEncoding().equals(AudioFormat.ULAW)) codec = new net.sf.fmj.media.codec.audio.ulaw.Decoder(); // much more efficient than JavaSoundCodec else if (inputFormat.getEncoding().equals(AudioFormat.ALAW)) codec = new net.sf.fmj.media.codec.audio.alaw.Decoder(); // much more efficient than JavaSoundCodec else throw new ResourceUnavailableException("Unsupported input format encoding: " + inputFormat.getEncoding()); //codec = new net.sf.fmj.media.codec.JavaSoundCodec(); codec.setInputFormat(inputFormat); final Format[] outputFormats = codec.getSupportedOutputFormats(inputFormat); if (outputFormats.length < 1) throw new ResourceUnavailableException("Unable to get an output format for input format: " + inputFormat); final AudioFormat codecOutputFormat = (AudioFormat) outputFormats[0]; // TODO: choose the best quality one. codec.setOutputFormat(codecOutputFormat); audioFormat = convertFormat(codecOutputFormat); codec.open(); logger.info("JavaSoundRenderer: Audio format is not linear, created conversion from " + inputFormat + " to " + codecOutputFormat); } sourceLine = getSourceDataLine(audioFormat); sourceLine.open(audioFormat); { FloatControl gainFloatControl = null; BooleanControl muteBooleanControl = null; try { gainFloatControl = (FloatControl) sourceLine.getControl(FloatControl.Type.MASTER_GAIN); } catch (Exception e) { e.printStackTrace(); } try { muteBooleanControl = (BooleanControl) sourceLine.getControl(BooleanControl.Type.MUTE); } catch (Exception e) { e.printStackTrace(); } // TODO add other controls JavaSoundGainControl gainControl = new JavaSoundGainControl( gainFloatControl, muteBooleanControl ); controls.addControl(gainControl); } logControls(sourceLine.getControls()); } catch (LineUnavailableException e) { throw new ResourceUnavailableException(e.getMessage()); } } /** * Created to work around the fact that AudioSystem.getSourceDataLine is not available in 1.4. */ private static SourceDataLine getSourceDataLine(javax.sound.sampled.AudioFormat format) throws LineUnavailableException { // 1.5: // return AudioSystem.getSourceDataLine(format); DataLine.Info info = new DataLine.Info(SourceDataLine.class, format); return (SourceDataLine) AudioSystem.getLine(info); } /** * Free the data line. */ public void close() { logger.info("closing..."); controls.clear(); if (codec != null) { codec.close(); codec = null; } sourceLine.close(); sourceLine = null; } /** * Reset the state of the plugin. * The reset method is typically called if the end of media is reached or the media is repositioned. */ public void reset() { logger.info("resetting..."); } /** * Start the rendering process */ public void start() { logger.info("starting..."); sourceLine.start(); } /** * Stop the rendering process. */ public void stop() { logger.info("stopping..."); sourceLine.stop(); } // the problem with not blocking is that we can get choppy audio. This would be // solved theoretically by having the filter graph infrastructure pre-buffer some // data. The other problem with non-blocking is that the filter graph has to // repeatedly call process, and it has no idea when it can call again and have some // input consumed. This is, I think, kind of a rough spot in the JMF architecture. // the filter graph could sleep, but how long should it sleep? // the problem with blocking, is that (if we allow it, which we don't) stop will interrupt any write to sourceLine, // and basically, data will be lost. This will result in a gap in the audio upon // start. If we don't interrupt with a stop, then the track can only fully stop after process // has written all of the data. private static final boolean NON_BLOCKING = false; /** * Write the buffer to the SourceDataLine. */ public int process(Buffer buffer) { // if we need to convert the format, do so using the codec. if (codec != null) { final int codecResult = codec.process(buffer, codecBuffer); if (codecResult == BUFFER_PROCESSED_FAILED) return BUFFER_PROCESSED_FAILED; if (codecResult == OUTPUT_BUFFER_NOT_FILLED) return BUFFER_PROCESSED_OK; buffer = codecBuffer; } int length = buffer.getLength(); int offset = buffer.getOffset(); final Format format = buffer.getFormat(); final Class type = format.getDataType(); if (type != Format.byteArray) { return BUFFER_PROCESSED_FAILED; } final byte[] data = (byte[]) buffer.getData(); final boolean bufferNotConsumed; final int newBufferLength; // only applicable if bufferNotConsumed final int newBufferOffset; // only applicable if bufferNotConsumed if (NON_BLOCKING) { // TODO: handle sourceLine.available(). This code currently causes choppy audio. if (length > sourceLine.available()) { // we should only write sourceLine.available() bytes, then return INPUT_BUFFER_NOT_CONSUMED. length = sourceLine.available(); // don't try to write more than available bufferNotConsumed = true; newBufferLength = buffer.getLength() - length; newBufferOffset = buffer.getOffset() + length; } else { bufferNotConsumed = false; newBufferLength = length; newBufferOffset = offset; } } else { bufferNotConsumed = false; newBufferLength = 0; newBufferOffset = 0; } if (length == 0) { logger.finer("Buffer has zero length, flags = " + buffer.getFlags()); } // make sure all the bytes are written. while (length > 0)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -