/** * Convert the audio data into an AudioInputStream of the proper AudioFormat. * * @param audioFormat * the format of the audio data. * @return an AudioInputStream from which the synthesised audio data can be read. * @throws IOException * if a problem occurred with the temporary file (only applies when using files as temporary storage). */ public AudioInputStream convertToAudioInputStream(AudioFormat audioFormat) throws IOException { if (ram) { assert os instanceof ByteArrayOutputStream; assert f == null; byte[] audioData = ((ByteArrayOutputStream) os).toByteArray(); // logger.debug("Total of " + audioData.length + " bytes of audio data for this section."); return new AudioInputStream(new ByteArrayInputStream(audioData), audioFormat, audioData.length / audioFormat.getFrameSize()); } else { assert os instanceof FileOutputStream; assert f != null; os.close(); long byteLength = f.length(); return new AudioInputStream(new FileInputStream(f), audioFormat, byteLength / audioFormat.getFrameSize()); } }
/** * Construct an audio input stream from which <code>duration</code> seconds of silence can be read. * * @param duration * the desired duration of the silence, in seconds * @param format * the desired audio format of the audio input stream. getFrameSize() and getFrameRate() must return meaningful * values. */ public SilenceAudioInputStream(double duration, AudioFormat format) { super(new ByteArrayInputStream(new byte[(int) (format.getFrameSize() * format.getFrameRate() * duration)]), format, (long) (format.getFrameRate() * duration)); }
/** * Convert the audio data into an AudioInputStream of the proper AudioFormat. * * @param audioFormat * the format of the audio data. * @return an AudioInputStream from which the synthesised audio data can be read. * @throws IOException * if a problem occurred with the temporary file (only applies when using files as temporary storage). */ public AudioInputStream convertToAudioInputStream(AudioFormat audioFormat) throws IOException { if (ram) { assert os instanceof ByteArrayOutputStream; assert f == null; byte[] audioData = ((ByteArrayOutputStream) os).toByteArray(); // logger.debug("Total of " + audioData.length + " bytes of audio data for this section."); return new AudioInputStream(new ByteArrayInputStream(audioData), audioFormat, audioData.length / audioFormat.getFrameSize()); } else { assert os instanceof FileOutputStream; assert f != null; os.close(); long byteLength = f.length(); return new AudioInputStream(new FileInputStream(f), audioFormat, byteLength / audioFormat.getFrameSize()); } }
/** * Construct an audio input stream from which <code>duration</code> seconds of silence can be read. * * @param duration * the desired duration of the silence, in seconds * @param format * the desired audio format of the audio input stream. getFrameSize() and getFrameRate() must return meaningful * values. */ public SilenceAudioInputStream(double duration, AudioFormat format) { super(new ByteArrayInputStream(new byte[(int) (format.getFrameSize() * format.getFrameRate() * duration)]), format, (long) (format.getFrameRate() * duration)); }
/** * Turns the AudioInputStream into a 16bit, SIGNED_PCM, little endian audio stream that preserves the original sample * rate of the AudioInputStream. NOTE: this assumes the frame size can be only 1 or 2 bytes. The AudioInputStream * is left in a state of having all of its data being read. * @param ais stream to convert * @return result array * @throws IOException if error occurred */ static public short[] toSignedPCM(AudioInputStream ais) throws IOException { AudioFormat aisFormat = ais.getFormat(); short[] shorts = new short[ais.available() / aisFormat.getFrameSize()]; byte[] frame = new byte[aisFormat.getFrameSize()]; int pos = 0; while (ais.read(frame) != -1) { shorts[pos++] = bytesToShort(aisFormat, frame); } return shorts; }
int nFrameSize = format.getFrameSize(); long totalBytesToRead = (long) (millis * format.getFrameRate() * nFrameSize / 1000); if (totalBytesToRead % nFrameSize != 0) { / format.getFrameSize()); try { AudioSystem.write(audioInputStream, m_targetType, m_file);
int nFrameSize = format.getFrameSize(); long totalBytesToRead = (long) (millis * format.getFrameRate() * nFrameSize / 1000); if (totalBytesToRead % nFrameSize != 0) { / format.getFrameSize()); try { AudioSystem.write(audioInputStream, m_targetType, m_file);
/** * Writes the current stream to disc; override this method if you want to take * additional action on file writes * * @param wavName name of the file to be written */ protected void writeFile(String wavName) { AudioFormat wavFormat = new AudioFormat(sampleRate, bitsPerSample, 1, isSigned, true); AudioFileFormat.Type outputType = getTargetType("wav"); byte[] abAudioData = baos.toByteArray(); ByteArrayInputStream bais = new ByteArrayInputStream(abAudioData); AudioInputStream ais = new AudioInputStream(bais, wavFormat, abAudioData.length / wavFormat.getFrameSize()); File outWavFile = new File(wavName); if (AudioSystem.isFileTypeSupported(outputType, ais)) { try { AudioSystem.write(ais, outputType, outWavFile); } catch (IOException e) { e.printStackTrace(); } } } }
int nFrameSize = format.getFrameSize(); long totalBytesToRead = (long) (millis * format.getFrameRate() * nFrameSize / 1000); if (totalBytesToRead % nFrameSize != 0) { / format.getFrameSize()); if (audioProcessor != null) { audioInputStream = audioProcessor.apply(audioInputStream);
int nFrameSize = format.getFrameSize(); long totalBytesToRead = (long) (millis * format.getFrameRate() * nFrameSize / 1000); if (totalBytesToRead % nFrameSize != 0) { / format.getFrameSize()); if (audioProcessor != null) { audioInputStream = audioProcessor.apply(audioInputStream);
short result = 0; Encoding encoding = format.getEncoding(); int frameSize = format.getFrameSize();
private static void dumpStreamChunk(File file, String dstPath, long offset, long length) throws UnsupportedAudioFileException, IOException { AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(file); AudioInputStream inputStream = AudioSystem.getAudioInputStream(file); AudioFormat audioFormat = fileFormat.getFormat(); int bitrate = Math.round(audioFormat.getFrameSize() * audioFormat.getFrameRate() / 1000); inputStream.skip(offset * bitrate); AudioInputStream chunkStream = new AudioInputStream(inputStream, audioFormat, length * bitrate); AudioSystem.write(chunkStream, fileFormat.getType(), new File(dstPath)); inputStream.close(); chunkStream.close(); } }
/** * * @param input * input * @param inputMode * if AudioPlayer.STEREO, average both input streams; if AudioPlayer.LEFT_ONLY, use only the left channel; if * AudioPlayer.RIGHT_ONLY, use only the right channel. */ public MonoAudioInputStream(AudioInputStream input, int inputMode) { super(input, input.getFormat(), input.getFrameLength()); this.newFormat = new AudioFormat(input.getFormat().getEncoding(), input.getFormat().getSampleRate(), input.getFormat() .getSampleSizeInBits(), 1, input.getFormat().getFrameSize() / input.getFormat().getChannels(), input.getFormat() .getFrameRate(), input.getFormat().isBigEndian()); this.inputChannels = input.getFormat().getChannels(); if (inputChannels < 2) throw new IllegalArgumentException("expected more than one input channel!"); this.inputMode = inputMode; if (inputMode == AudioPlayer.MONO) throw new IllegalArgumentException("expected non-mono input mode"); }
/** * * @param input * input * @param inputMode * if AudioPlayer.STEREO, average both input streams; if AudioPlayer.LEFT_ONLY, use only the left channel; if * AudioPlayer.RIGHT_ONLY, use only the right channel. */ public MonoAudioInputStream(AudioInputStream input, int inputMode) { super(input, input.getFormat(), input.getFrameLength()); this.newFormat = new AudioFormat(input.getFormat().getEncoding(), input.getFormat().getSampleRate(), input.getFormat() .getSampleSizeInBits(), 1, input.getFormat().getFrameSize() / input.getFormat().getChannels(), input.getFormat() .getFrameRate(), input.getFormat().isBigEndian()); this.inputChannels = input.getFormat().getChannels(); if (inputChannels < 2) throw new IllegalArgumentException("expected more than one input channel!"); this.inputMode = inputMode; if (inputMode == AudioPlayer.MONO) throw new IllegalArgumentException("expected non-mono input mode"); }
AudioFormat sampleRateConvFormat = new AudioFormat(ais.getFormat().getEncoding(), targetFormat.getSampleRate(), ais.getFormat().getSampleSizeInBits(), ais.getFormat().getChannels(), ais.getFormat().getFrameSize(), ais.getFormat().getFrameRate(), ais.getFormat().isBigEndian()); try { AudioInputStream intermedStream = AudioSystem.getAudioInputStream(sampleRateConvFormat, ais);
/** * * @param input * input * @param outputMode * as defined in AudioPlayer: STEREO, LEFT_ONLY or RIGHT_ONLY. */ public StereoAudioInputStream(AudioInputStream input, int outputMode) { super(input, input.getFormat(), input.getFrameLength()); this.newFormat = new AudioFormat(input.getFormat().getEncoding(), input.getFormat().getSampleRate(), input.getFormat() .getSampleSizeInBits(), 2, 2 * input.getFormat().getFrameSize() / input.getFormat().getChannels(), input .getFormat().getFrameRate(), input.getFormat().isBigEndian()); this.inputChannels = input.getFormat().getChannels(); this.outputMode = outputMode; }
/** * * @param input * input * @param outputMode * as defined in AudioPlayer: STEREO, LEFT_ONLY or RIGHT_ONLY. */ public StereoAudioInputStream(AudioInputStream input, int outputMode) { super(input, input.getFormat(), input.getFrameLength()); this.newFormat = new AudioFormat(input.getFormat().getEncoding(), input.getFormat().getSampleRate(), input.getFormat() .getSampleSizeInBits(), 2, 2 * input.getFormat().getFrameSize() / input.getFormat().getChannels(), input .getFormat().getFrameRate(), input.getFormat().isBigEndian()); this.inputChannels = input.getFormat().getChannels(); this.outputMode = outputMode; }
AudioFormat sampleRateConvFormat = new AudioFormat(ais.getFormat().getEncoding(), targetFormat.getSampleRate(), ais.getFormat().getSampleSizeInBits(), ais.getFormat().getChannels(), ais.getFormat().getFrameSize(), ais.getFormat().getFrameRate(), ais.getFormat().isBigEndian()); try { AudioInputStream intermedStream = AudioSystem.getAudioInputStream(sampleRateConvFormat, ais);
byte[] samples = new byte[(int) audio.getFrameLength() * audioformat.getFrameSize()]; audio.read(samples); return samples;
byte[] samples = new byte[(int) audio.getFrameLength() * audioformat.getFrameSize()]; audio.read(samples); return samples;