List of usage examples for javax.sound.sampled AudioFormat getChannels
public int getChannels()
From source file:SimpleSoundPlayer.java
public boolean loadSound(Object object) { duration = 0.0;/*from w w w.j a v a 2s .com*/ currentName = ((File) object).getName(); try { currentSound = AudioSystem.getAudioInputStream((File) object); } catch (Exception e1) { try { FileInputStream is = new FileInputStream((File) object); currentSound = new BufferedInputStream(is, 1024); } catch (Exception e3) { e3.printStackTrace(); currentSound = null; return false; } // } } // user pressed stop or changed tabs while loading if (sequencer == null) { currentSound = null; return false; } if (currentSound instanceof AudioInputStream) { try { AudioInputStream stream = (AudioInputStream) currentSound; AudioFormat format = stream.getFormat(); /** * we can't yet open the device for ALAW/ULAW playback, convert * ALAW/ULAW to PCM */ if ((format.getEncoding() == AudioFormat.Encoding.ULAW) || (format.getEncoding() == AudioFormat.Encoding.ALAW)) { AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); stream = AudioSystem.getAudioInputStream(tmp, stream); format = tmp; } DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); Clip clip = (Clip) AudioSystem.getLine(info); clip.addLineListener(this); clip.open(stream); currentSound = clip; // seekSlider.setMaximum((int) stream.getFrameLength()); } catch (Exception ex) { ex.printStackTrace(); currentSound = null; return false; } } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) { try { sequencer.open(); if (currentSound instanceof Sequence) { sequencer.setSequence((Sequence) currentSound); } else { sequencer.setSequence((BufferedInputStream) currentSound); } } catch (InvalidMidiDataException imde) { System.out.println("Unsupported audio file."); currentSound = null; return false; } catch (Exception ex) { ex.printStackTrace(); currentSound = null; return false; } } duration = getDuration(); return true; }
From source file:com.limegroup.gnutella.gui.mp3.BasicPlayer.java
/** * Inits a DateLine.<br>/*from ww w. j av a 2 s. c o m*/ * * We check if the line supports Volume and Pan controls. * * From the AudioInputStream, i.e. from the sound file, we * fetch information about the format of the audio data. These * information include the sampling frequency, the number of * channels and the size of the samples. There information * are needed to ask JavaSound for a suitable output line * for this audio file. * Furthermore, we have to give JavaSound a hint about how * big the internal buffer for the line should be. Here, * we say AudioSystem.NOT_SPECIFIED, signaling that we don't * care about the exact size. JavaSound will use some default * value for the buffer size. */ private void createLine() throws LineUnavailableException { if (m_line == null) { AudioFormat sourceFormat = m_audioInputStream.getFormat(); if (LOG.isDebugEnabled()) LOG.debug("Source format : " + sourceFormat); AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, sourceFormat.getSampleRate(), 16, sourceFormat.getChannels(), sourceFormat.getChannels() * 2, sourceFormat.getSampleRate(), false); if (LOG.isDebugEnabled()) LOG.debug("Target format: " + targetFormat); m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream); AudioFormat audioFormat = m_audioInputStream.getFormat(); if (LOG.isDebugEnabled()) LOG.debug("Create Line : " + audioFormat); DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED); m_line = (SourceDataLine) AudioSystem.getLine(info); /*-- Display supported controls --*/ Control[] c = m_line.getControls(); for (int p = 0; p < c.length; p++) { if (LOG.isDebugEnabled()) LOG.debug("Controls : " + c[p].toString()); } /*-- Is Gain Control supported ? --*/ if (m_line.isControlSupported(FloatControl.Type.MASTER_GAIN)) { m_gainControl = (FloatControl) m_line.getControl(FloatControl.Type.MASTER_GAIN); if (LOG.isDebugEnabled()) LOG.debug("Master Gain Control : [" + m_gainControl.getMinimum() + "," + m_gainControl.getMaximum() + "]," + m_gainControl.getPrecision()); } /*-- Is Pan control supported ? --*/ if (m_line.isControlSupported(FloatControl.Type.PAN)) { m_panControl = (FloatControl) m_line.getControl(FloatControl.Type.PAN); if (LOG.isDebugEnabled()) LOG.debug("Pan Control : [" + m_panControl.getMinimum() + "," + m_panControl.getMaximum() + "]," + m_panControl.getPrecision()); } } }
From source file:com.player.BasicMP3Player.java
/** * Inits a DateLine.<br>/* w w w . j a va 2 s . co m*/ * We check if the line supports Gain and Pan controls. From the AudioInputStream, i.e. from the * sound file, we fetch information about the format of the audio data. These information include * the sampling frequency, the number of channels and the size of the samples. There information * are needed to ask JavaSound for a suitable output line for this audio file. Furthermore, we * have to give JavaSound a hint about how big the internal buffer for the line should be. Here, * we say AudioSystem.NOT_SPECIFIED, signaling that we don't care about the exact size. JavaSound * will use some default value for the buffer size. */ private void createLine() throws LineUnavailableException { log.info("Create Line"); if (m_line == null) { AudioFormat sourceFormat = m_audioInputStream.getFormat(); log.info("Create Line : Source format : " + sourceFormat.toString()); AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, sourceFormat.getSampleRate(), 16, sourceFormat.getChannels(), sourceFormat.getChannels() * 2, sourceFormat.getSampleRate(), false); log.info("Create Line : Target format: " + targetFormat); // Keep a reference on encoded stream to progress notification. m_encodedaudioInputStream = m_audioInputStream; try { // Get total length in bytes of the encoded stream. encodedLength = m_encodedaudioInputStream.available(); } catch (IOException e) { log.error("Cannot get m_encodedaudioInputStream.available()", e); } // Create decoded stream. m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream); AudioFormat audioFormat = m_audioInputStream.getFormat(); DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED); m_line = (SourceDataLine) AudioSystem.getLine(info); /*-- Display supported controls --*/ Control[] c = m_line.getControls(); for (int p = 0; p < c.length; p++) { log.debug("Controls : " + c[p].toString()); } /*-- Is Gain Control supported ? --*/ if (m_line.isControlSupported(FloatControl.Type.MASTER_GAIN)) { m_gainControl = (FloatControl) m_line.getControl(FloatControl.Type.MASTER_GAIN); log.info("Master Gain Control : [" + m_gainControl.getMinimum() + "," + m_gainControl.getMaximum() + "] " + m_gainControl.getPrecision()); } /*-- Is Pan control supported ? --*/ if (m_line.isControlSupported(FloatControl.Type.PAN)) { m_panControl = (FloatControl) m_line.getControl(FloatControl.Type.PAN); log.info("Pan Control : [" + m_panControl.getMinimum() + "," + m_panControl.getMaximum() + "] " + m_panControl.getPrecision()); } } }
From source file:com.player.BasicMP3Player.java
/** * Inits AudioInputStream and AudioFileFormat from the data source. * // w w w . ja v a 2s .c o m * @throws BasicPlayerException */ private void initAudioInputStream() throws BasicPlayerException { try { reset(); notifyEvent(BasicPlayerEvent.OPENING, getEncodedStreamPosition(), -1, m_dataSource); if (m_dataSource instanceof URL) { initAudioInputStream((URL) m_dataSource); } else if (m_dataSource instanceof File) { initAudioInputStream((File) m_dataSource); } else if (m_dataSource instanceof InputStream) { initAudioInputStream((InputStream) m_dataSource); } createLine(); // Notify listeners with AudioFileFormat properties. Map properties = null; if (m_audioFileFormat instanceof TAudioFileFormat) { // Tritonus SPI compliant audio file format. properties = ((TAudioFileFormat) m_audioFileFormat).properties(); // Clone the Map because it is not mutable. properties = deepCopy(properties); } else properties = new HashMap(); // Add JavaSound properties. if (m_audioFileFormat.getByteLength() > 0) properties.put("audio.length.bytes", new Integer(m_audioFileFormat.getByteLength())); if (m_audioFileFormat.getFrameLength() > 0) properties.put("audio.length.frames", new Integer(m_audioFileFormat.getFrameLength())); if (m_audioFileFormat.getType() != null) properties.put("audio.type", (m_audioFileFormat.getType().toString())); // Audio format. AudioFormat audioFormat = m_audioFileFormat.getFormat(); if (audioFormat.getFrameRate() > 0) properties.put("audio.framerate.fps", new Float(audioFormat.getFrameRate())); if (audioFormat.getFrameSize() > 0) properties.put("audio.framesize.bytes", new Integer(audioFormat.getFrameSize())); if (audioFormat.getSampleRate() > 0) properties.put("audio.samplerate.hz", new Float(audioFormat.getSampleRate())); if (audioFormat.getSampleSizeInBits() > 0) properties.put("audio.samplesize.bits", new Integer(audioFormat.getSampleSizeInBits())); if (audioFormat.getChannels() > 0) properties.put("audio.channels", new Integer(audioFormat.getChannels())); if (audioFormat instanceof TAudioFormat) { // Tritonus SPI compliant audio format. Map addproperties = ((TAudioFormat) audioFormat).properties(); properties.putAll(addproperties); } Iterator it = m_listeners.iterator(); while (it.hasNext()) { BasicPlayerListener bpl = (BasicPlayerListener) it.next(); bpl.opened(m_dataSource, properties); } m_status = OPENED; notifyEvent(BasicPlayerEvent.OPENED, getEncodedStreamPosition(), -1, null); } catch (LineUnavailableException e) { throw new BasicPlayerException(e); } catch (UnsupportedAudioFileException e) { throw new BasicPlayerException(e); } catch (IOException e) { throw new BasicPlayerException(e); } }
From source file:BasicPlayer.java
/** * Inits AudioInputStream and AudioFileFormat from the data source. * @throws BasicPlayerException/*from ww w . j a v a 2 s. c o m*/ */ protected void initAudioInputStream() throws BasicPlayerException { try { reset(); notifyEvent(BasicPlayerEvent.OPENING, getEncodedStreamPosition(), -1, m_dataSource); if (m_dataSource instanceof URL) { initAudioInputStream((URL) m_dataSource); } else if (m_dataSource instanceof File) { initAudioInputStream((File) m_dataSource); } else if (m_dataSource instanceof InputStream) { initAudioInputStream((InputStream) m_dataSource); } createLine(); // Notify listeners with AudioFileFormat properties. Map properties = null; if (m_audioFileFormat instanceof TAudioFileFormat) { // Tritonus SPI compliant audio file format. properties = ((TAudioFileFormat) m_audioFileFormat).properties(); // Clone the Map because it is not mutable. properties = deepCopy(properties); } else properties = new HashMap(); // Add JavaSound properties. if (m_audioFileFormat.getByteLength() > 0) properties.put("audio.length.bytes", new Integer(m_audioFileFormat.getByteLength())); if (m_audioFileFormat.getFrameLength() > 0) properties.put("audio.length.frames", new Integer(m_audioFileFormat.getFrameLength())); if (m_audioFileFormat.getType() != null) properties.put("audio.type", (m_audioFileFormat.getType().toString())); // Audio format. AudioFormat audioFormat = m_audioFileFormat.getFormat(); if (audioFormat.getFrameRate() > 0) properties.put("audio.framerate.fps", new Float(audioFormat.getFrameRate())); if (audioFormat.getFrameSize() > 0) properties.put("audio.framesize.bytes", new Integer(audioFormat.getFrameSize())); if (audioFormat.getSampleRate() > 0) properties.put("audio.samplerate.hz", new Float(audioFormat.getSampleRate())); if (audioFormat.getSampleSizeInBits() > 0) properties.put("audio.samplesize.bits", new Integer(audioFormat.getSampleSizeInBits())); if (audioFormat.getChannels() > 0) properties.put("audio.channels", new Integer(audioFormat.getChannels())); if (audioFormat instanceof TAudioFormat) { // Tritonus SPI compliant audio format. Map addproperties = ((TAudioFormat) audioFormat).properties(); properties.putAll(addproperties); } // Add SourceDataLine properties.put("basicplayer.sourcedataline", m_line); Iterator it = m_listeners.iterator(); while (it.hasNext()) { BasicPlayerListener bpl = (BasicPlayerListener) it.next(); bpl.opened(m_dataSource, properties); } m_status = OPENED; notifyEvent(BasicPlayerEvent.OPENED, getEncodedStreamPosition(), -1, null); } catch (LineUnavailableException e) { throw new BasicPlayerException(e); } catch (UnsupportedAudioFileException e) { throw new BasicPlayerException(e); } catch (IOException e) { throw new BasicPlayerException(e); } }
From source file:BasicPlayer.java
/** * Inits a DateLine.<br>/*from w w w .j a va2 s . c o m*/ * * We check if the line supports Gain and Pan controls. * * From the AudioInputStream, i.e. from the sound file, we * fetch information about the format of the audio data. These * information include the sampling frequency, the number of * channels and the size of the samples. There information * are needed to ask JavaSound for a suitable output line * for this audio file. * Furthermore, we have to give JavaSound a hint about how * big the internal buffer for the line should be. Here, * we say AudioSystem.NOT_SPECIFIED, signaling that we don't * care about the exact size. JavaSound will use some default * value for the buffer size. */ protected void createLine() throws LineUnavailableException { log.info("Create Line"); if (m_line == null) { AudioFormat sourceFormat = m_audioInputStream.getFormat(); log.info("Create Line : Source format : " + sourceFormat.toString()); int nSampleSizeInBits = sourceFormat.getSampleSizeInBits(); if (nSampleSizeInBits <= 0) nSampleSizeInBits = 16; if ((sourceFormat.getEncoding() == AudioFormat.Encoding.ULAW) || (sourceFormat.getEncoding() == AudioFormat.Encoding.ALAW)) nSampleSizeInBits = 16; if (nSampleSizeInBits != 8) nSampleSizeInBits = 16; AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, sourceFormat.getSampleRate(), nSampleSizeInBits, sourceFormat.getChannels(), sourceFormat.getChannels() * (nSampleSizeInBits / 8), sourceFormat.getSampleRate(), false); log.info("Create Line : Target format: " + targetFormat); // Keep a reference on encoded stream to progress notification. m_encodedaudioInputStream = m_audioInputStream; try { // Get total length in bytes of the encoded stream. encodedLength = m_encodedaudioInputStream.available(); } catch (IOException e) { log.error("Cannot get m_encodedaudioInputStream.available()", e); } // Create decoded stream. m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream); AudioFormat audioFormat = m_audioInputStream.getFormat(); DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED); Mixer mixer = getMixer(m_mixerName); if (mixer != null) { log.info("Mixer : " + mixer.getMixerInfo().toString()); m_line = (SourceDataLine) mixer.getLine(info); } else { m_line = (SourceDataLine) AudioSystem.getLine(info); m_mixerName = null; } log.info("Line : " + m_line.toString()); log.debug("Line Info : " + m_line.getLineInfo().toString()); log.debug("Line AudioFormat: " + m_line.getFormat().toString()); } }
From source file:com.skratchdot.electribe.model.esx.impl.SampleImpl.java
/** * @param file/*from ww w.j ava 2 s .c o m*/ * @throws EsxException */ protected SampleImpl(File file) throws EsxException { super(); init(); // Declare our streams and formats AudioFormat audioFormatEncoded; AudioFormat audioFormatDecoded; AudioInputStream audioInputStreamEncoded; AudioInputStream audioInputStreamDecoded; try { // Initialize our streams and formats audioInputStreamEncoded = AudioSystem.getAudioInputStream(file); audioFormatEncoded = audioInputStreamEncoded.getFormat(); audioFormatDecoded = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, audioFormatEncoded.getSampleRate(), 16, audioFormatEncoded.getChannels(), audioFormatEncoded.getChannels() * 2, audioFormatEncoded.getSampleRate(), true); audioInputStreamDecoded = AudioSystem.getAudioInputStream(audioFormatDecoded, audioInputStreamEncoded); // We have a decoded stereo audio stream // Now we need to get the stream info into a list we can manipulate byte[] audioData = new byte[4096]; int nBytesRead = 0; long nTotalBytesRead = 0; List<Byte> audioDataListChannel1 = new ArrayList<Byte>(); List<Byte> audioDataListChannel2 = new ArrayList<Byte>(); boolean isAudioDataStereo = false; // Set isAudioDataStereo if (audioFormatEncoded.getChannels() == 1) { isAudioDataStereo = false; } else if (audioFormatEncoded.getChannels() == 2) { isAudioDataStereo = true; } else { throw new EsxException("Sample has too many channels: " + file.getAbsolutePath()); } // Convert stream to list. This needs to be optimized. Converting // a byte at a time is probably too slow... while (nBytesRead >= 0) { nBytesRead = audioInputStreamDecoded.read(audioData, 0, audioData.length); // If we aren't at the end of the stream if (nBytesRead > 0) { for (int i = 0; i < nBytesRead; i++) { // MONO if (!isAudioDataStereo) { audioDataListChannel1.add(audioData[i]); audioDataListChannel2.add(audioData[i]); } // STEREO (LEFT) else if (nTotalBytesRead % 4 < 2) { audioDataListChannel1.add(audioData[i]); } // STEREO (RIGHT) else { audioDataListChannel2.add(audioData[i]); } // Update the total amount of bytes we've read nTotalBytesRead++; } } // Throw Exception if sample is too big if (nTotalBytesRead > EsxUtil.MAX_SAMPLE_MEM_IN_BYTES) { throw new EsxException("Sample is too big: " + file.getAbsolutePath()); } } // Set member variables int frameLength = audioDataListChannel1.size() / 2; this.setNumberOfSampleFrames(frameLength); this.setEnd(frameLength - 1); this.setLoopStart(frameLength - 1); this.setSampleRate((int) audioFormatEncoded.getSampleRate()); this.setAudioDataChannel1(EsxUtil.listToByteArray(audioDataListChannel1)); this.setAudioDataChannel2(EsxUtil.listToByteArray(audioDataListChannel2)); this.setStereoOriginal(isAudioDataStereo); // Set calculated Sample Tune (from Sample Rate) SampleTune newSampleTune = EsxFactory.eINSTANCE.createSampleTune(); float newFloat = newSampleTune.calculateSampleTuneFromSampleRate(this.getSampleRate()); newSampleTune.setValue(newFloat); this.setSampleTune(newSampleTune); // Set name String newSampleName = new String(); newSampleName = StringUtils.left(StringUtils.trim(file.getName()), 8); this.setName(newSampleName); // Attempt to set loopStart and End from .wav smpl chunk if (file.getAbsolutePath().toLowerCase().endsWith(".wav")) { try { RIFFWave riffWave = WavFactory.eINSTANCE.createRIFFWave(file); ChunkSampler chunkSampler = (ChunkSampler) riffWave .getFirstChunkByEClass(WavPackage.Literals.CHUNK_SAMPLER); if (chunkSampler != null && chunkSampler.getSampleLoops().size() > 0) { SampleLoop sampleLoop = chunkSampler.getSampleLoops().get(0); Long tempLoopStart = sampleLoop.getStart(); Long tempLoopEnd = sampleLoop.getEnd(); if (tempLoopStart < this.getEnd() && tempLoopStart >= 0) { this.setLoopStart(tempLoopStart.intValue()); } if (tempLoopEnd < this.getEnd() && tempLoopEnd > this.getLoopStart()) { this.setEnd(tempLoopEnd.intValue()); } } } catch (Exception e) { e.printStackTrace(); } } } catch (UnsupportedAudioFileException e) { e.printStackTrace(); throw new EsxException("Invalid audio file: " + file.getAbsolutePath()); } catch (IOException e) { e.printStackTrace(); throw new EsxException("Invalid audio file: " + file.getAbsolutePath()); } catch (Exception e) { e.printStackTrace(); throw new EsxException("Invalid audio file: " + file.getAbsolutePath()); } }
From source file:org.apache.tika.parser.audio.AudioParser.java
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // AudioSystem expects the stream to support the mark feature if (!stream.markSupported()) { stream = new BufferedInputStream(stream); }/*from w ww .j a v a 2 s . com*/ stream = new SkipFullyInputStream(stream); try { AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(stream); Type type = fileFormat.getType(); if (type == Type.AIFC || type == Type.AIFF) { metadata.set(Metadata.CONTENT_TYPE, "audio/x-aiff"); } else if (type == Type.AU || type == Type.SND) { metadata.set(Metadata.CONTENT_TYPE, "audio/basic"); } else if (type == Type.WAVE) { metadata.set(Metadata.CONTENT_TYPE, "audio/vnd.wave"); } AudioFormat audioFormat = fileFormat.getFormat(); int channels = audioFormat.getChannels(); if (channels != AudioSystem.NOT_SPECIFIED) { metadata.set("channels", String.valueOf(channels)); // TODO: Use XMPDM.TRACKS? (see also frame rate in AudioFormat) } float rate = audioFormat.getSampleRate(); if (rate != AudioSystem.NOT_SPECIFIED) { metadata.set("samplerate", String.valueOf(rate)); metadata.set(XMPDM.AUDIO_SAMPLE_RATE, Integer.toString((int) rate)); } int bits = audioFormat.getSampleSizeInBits(); if (bits != AudioSystem.NOT_SPECIFIED) { metadata.set("bits", String.valueOf(bits)); if (bits == 8) { metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "8Int"); } else if (bits == 16) { metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "16Int"); } else if (bits == 32) { metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "32Int"); } } metadata.set("encoding", audioFormat.getEncoding().toString()); // Javadoc suggests that some of the following properties might // be available, but I had no success in finding any: // "duration" Long playback duration of the file in microseconds // "author" String name of the author of this file // "title" String title of this file // "copyright" String copyright message // "date" Date date of the recording or release // "comment" String an arbitrary text addMetadata(metadata, fileFormat.properties()); addMetadata(metadata, audioFormat.properties()); } catch (UnsupportedAudioFileException e) { // There is no way to know whether this exception was // caused by the document being corrupted or by the format // just being unsupported. So we do nothing. } XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); xhtml.endDocument(); }
From source file:org.jcodec.codecs.wav.WavHeader.java
public static WavHeader create(AudioFormat af, int size) { WavHeader w = emptyWavHeader();/*from www .j a v a 2 s . co m*/ w.dataSize = size; FmtChunk fmt = new FmtChunk(); int bitsPerSample = af.getSampleSizeInBits(); int bytesPerSample = bitsPerSample / 8; int sampleRate = (int) af.getSampleRate(); w.fmt.bitsPerSample = (short) bitsPerSample; w.fmt.blockAlign = (short) (af.getFrameSize()); w.fmt.byteRate = (int) af.getFrameRate() * af.getFrameSize(); w.fmt.numChannels = (short) af.getChannels(); w.fmt.sampleRate = (int) af.getSampleRate(); return w; }