List of usage examples for javax.sound.sampled AudioFormat getChannels
public int getChannels()
From source file:Main.java
public static void main(String[] argv) throws Exception { AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile")); // From URL//from ww w.j av a 2 s . c o m // stream = AudioSystem.getAudioInputStream(new URL( // "http://hostname/audiofile")); AudioFormat format = stream.getFormat(); if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) { format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); // big endian stream = AudioSystem.getAudioInputStream(format, stream); } DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); Clip clip = (Clip) AudioSystem.getLine(info); clip.open(stream); clip.start(); }
From source file:Main.java
public static void main(String[] argv) throws Exception { AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile")); // stream = AudioSystem.getAudioInputStream(new URL( // "http://hostname/audiofile")); AudioFormat format = stream.getFormat(); if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) { format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); // big endian stream = AudioSystem.getAudioInputStream(format, stream); }// ww w . j a v a2 s. com SourceDataLine.Info info = new DataLine.Info(SourceDataLine.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info); line.open(stream.getFormat()); line.start(); int numRead = 0; byte[] buf = new byte[line.getBufferSize()]; while ((numRead = stream.read(buf, 0, buf.length)) >= 0) { int offset = 0; while (offset < numRead) { offset += line.write(buf, offset, numRead - offset); } } line.drain(); line.stop(); }
From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java
private static AudioFormat toAlawFormat(AudioFormat source) { Preconditions.checkNotNull(source, "Source AudioFormat cannot be null."); return new AudioFormat(AudioFormat.Encoding.ALAW, source.getSampleRate(), 8, // sample size in bits source.getChannels(), 1, // frame size in bytes source.getFrameRate(), source.isBigEndian()); }
From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java
private static AudioFormat toUlawFormat(AudioFormat source) { Preconditions.checkNotNull(source, "Source AudioFormat cannot be null."); return new AudioFormat(AudioFormat.Encoding.ULAW, source.getSampleRate(), 8, // sample size in bits source.getChannels(), 1, // frame size in bytes source.getFrameRate(), source.isBigEndian()); }
From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java
/** * Utility method to convert an {@link AudioFormat} object to a String. * {@code AudioFormat} does implement a toString method, but it's output * varies depending upon the contents. I find it more useful to always print * the value of all fields.// ww w .ja va 2 s . c o m * * @param format * {@code AudioFormat} to convert to a String * @return {@code AudioFormat} object as a String */ private static String audioFormatToString(AudioFormat format) { return new ToStringBuilder(format).append("encoding", format.getEncoding()) .append("sampleRate", format.getSampleRate()) .append("sampleSizeInBits", format.getSampleSizeInBits()).append("channels", format.getChannels()) .append("frameSize", format.getFrameSize()).append("frameRate", format.getFrameRate()) .append("isBigEndian", format.isBigEndian()).toString(); }
From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java
private static AudioFormat toPcm16Format(AudioFormat source, boolean bigEndianOutput) { Preconditions.checkNotNull(source, "Source AudioFormat cannot be null."); return new AudioFormat(AudioFormat.Encoding.PCM_UNSIGNED, source.getSampleRate(), 16, // sample size in bits source.getChannels(), 2, // frame size in bytes source.getFrameRate(), bigEndianOutput); }
From source file:iristk.speech.nuance9.BaseRecognizer.java
public static String getEncoding(AudioFormat format) throws IllegalArgumentException { if (format.getFrameRate() != 8000 && format.getFrameRate() != 16000) throw new IllegalArgumentException("Can only process 8khz or 16khz"); if (format.isBigEndian()) throw new IllegalArgumentException("Can only process little-endian"); if (format.getChannels() != 1) throw new IllegalArgumentException("Can only process mono sound"); if (format.getEncoding() == Encoding.ULAW) return "audio/basic;rate=8000"; else if (format.getEncoding() == Encoding.PCM_SIGNED) { if (format.getFrameSize() != 2) throw new IllegalArgumentException("Can only process 16 bit PCM sound"); return "audio/L16;rate=8000"; } else//from w w w . j a va2s. c o m throw new IllegalArgumentException("Bad audio encoding: " + format.getEncoding()); }
From source file:io.github.dsheirer.record.wave.WaveWriter.java
/** * Creates an audio format chunk//from w w w . j a va2s.c om */ public static ByteBuffer getFormatChunk(AudioFormat format) { ByteBuffer header = ByteBuffer.allocate(24).order(ByteOrder.LITTLE_ENDIAN); //Format descriptor header.put(FORMAT_CHUNK_ID.getBytes()); header.putInt(FORMAT_CHUNK_LENGTH); header.putShort(FORMAT_UNCOMPRESSED_PCM); header.putShort((short) format.getChannels()); header.putInt((int) format.getSampleRate()); //Byte Rate = sample rate * channels * bits per sample / 8 int frameByteRate = format.getChannels() * format.getSampleSizeInBits() / 8; int byteRate = (int) (format.getSampleRate() * frameByteRate); header.putInt(byteRate); //Block Align header.putShort((short) frameByteRate); //Bits per Sample header.putShort((short) format.getSampleSizeInBits()); //Reset the buffer pointer to 0 header.position(0); return header; }
From source file:Main.java
/** Read sampled audio data from the specified URL and play it */ public static void streamSampledAudio(URL url) throws IOException, UnsupportedAudioFileException, LineUnavailableException { AudioInputStream ain = null; // We read audio data from here SourceDataLine line = null; // And write it here. try {/* w ww. ja v a2s .c om*/ // Get an audio input stream from the URL ain = AudioSystem.getAudioInputStream(url); // Get information about the format of the stream AudioFormat format = ain.getFormat(); DataLine.Info info = new DataLine.Info(SourceDataLine.class, format); // If the format is not supported directly (i.e. if it is not PCM // encoded, then try to transcode it to PCM. if (!AudioSystem.isLineSupported(info)) { // This is the PCM format we want to transcode to. // The parameters here are audio format details that you // shouldn't need to understand for casual use. AudioFormat pcm = new AudioFormat(format.getSampleRate(), 16, format.getChannels(), true, false); // Get a wrapper stream around the input stream that does the // transcoding for us. ain = AudioSystem.getAudioInputStream(pcm, ain); // Update the format and info variables for the transcoded data format = ain.getFormat(); info = new DataLine.Info(SourceDataLine.class, format); } // Open the line through which we'll play the streaming audio. line = (SourceDataLine) AudioSystem.getLine(info); line.open(format); // Allocate a buffer for reading from the input stream and writing // to the line. Make it large enough to hold 4k audio frames. // Note that the SourceDataLine also has its own internal buffer. int framesize = format.getFrameSize(); byte[] buffer = new byte[4 * 1024 * framesize]; // the buffer int numbytes = 0; // how many bytes // We haven't started the line yet. boolean started = false; for (;;) { // We'll exit the loop when we reach the end of stream // First, read some bytes from the input stream. int bytesread = ain.read(buffer, numbytes, buffer.length - numbytes); // If there were no more bytes to read, we're done. if (bytesread == -1) break; numbytes += bytesread; // Now that we've got some audio data, to write to the line, // start the line, so it will play that data as we write it. if (!started) { line.start(); started = true; } // We must write bytes to the line in an integer multiple of // the framesize. So figure out how many bytes we'll write. int bytestowrite = (numbytes / framesize) * framesize; // Now write the bytes. The line will buffer them and play // them. This call will block until all bytes are written. line.write(buffer, 0, bytestowrite); // If we didn't have an integer multiple of the frame size, // then copy the remaining bytes to the start of the buffer. int remaining = numbytes - bytestowrite; if (remaining > 0) System.arraycopy(buffer, bytestowrite, buffer, 0, remaining); numbytes = remaining; } // Now block until all buffered sound finishes playing. line.drain(); } finally { // Always relinquish the resources we use if (line != null) line.close(); if (ain != null) ain.close(); } }
From source file:edu.tsinghua.lumaqq.Sounder.java
/** * /*from w w w . jav a 2 s . com*/ * @param filename * @return */ private boolean loadSound(String filename) { // ?? File file = new File(filename); try { currentSound = AudioSystem.getAudioInputStream(file); } catch (Exception e) { try { FileInputStream is = new FileInputStream(file); currentSound = new BufferedInputStream(is, 1024); } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } // ?????? if (currentSound instanceof AudioInputStream) { try { AudioInputStream stream = (AudioInputStream) currentSound; AudioFormat format = stream.getFormat(); // ?? ALAW/ULAW ? ALAW/ULAW ?? PCM if ((format.getEncoding() == AudioFormat.Encoding.ULAW) || (format.getEncoding() == AudioFormat.Encoding.ALAW)) { AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); stream = AudioSystem.getAudioInputStream(tmp, stream); format = tmp; } DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); Clip clip = (Clip) AudioSystem.getLine(info); clip.open(stream); currentSound = clip; } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) { try { sequencer.open(); if (currentSound instanceof Sequence) { sequencer.setSequence((Sequence) currentSound); } else { sequencer.setSequence((BufferedInputStream) currentSound); } log.trace("Sequence Created"); } catch (InvalidMidiDataException imde) { log.error("???"); currentSound = null; return false; } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } return true; }