List of usage examples for javax.sound.sampled AudioFormat getFrameRate
public float getFrameRate()
From source file:sx.blah.discord.api.internal.DiscordUtils.java
/** * Converts an {@link AudioInputStream} to 48000Hz 16 bit stereo signed Big Endian PCM format. * * @param stream The original stream.//from ww w . ja v a 2s. com * @return The PCM encoded stream. */ public static AudioInputStream getPCMStream(AudioInputStream stream) { AudioFormat baseFormat = stream.getFormat(); //Converts first to PCM data. If the data is already PCM data, this will not change anything. AudioFormat toPCM = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(), //AudioConnection.OPUS_SAMPLE_RATE, baseFormat.getSampleSizeInBits() != -1 ? baseFormat.getSampleSizeInBits() : 16, baseFormat.getChannels(), //If we are given a frame size, use it. Otherwise, assume 16 bits (2 8bit shorts) per channel. baseFormat.getFrameSize() != -1 ? baseFormat.getFrameSize() : 2 * baseFormat.getChannels(), baseFormat.getFrameRate() != -1 ? baseFormat.getFrameRate() : baseFormat.getSampleRate(), baseFormat.isBigEndian()); AudioInputStream pcmStream = AudioSystem.getAudioInputStream(toPCM, stream); //Then resamples to a sample rate of 48000hz and ensures that data is Big Endian. AudioFormat audioFormat = new AudioFormat(toPCM.getEncoding(), OpusUtil.OPUS_SAMPLE_RATE, toPCM.getSampleSizeInBits(), toPCM.getChannels(), toPCM.getFrameSize(), toPCM.getFrameRate(), true); return AudioSystem.getAudioInputStream(audioFormat, pcmStream); }
From source file:com.arkatay.yada.codec.AudioRecorder.java
/** * Creates a new instance of AudioRecorder with an audio format that the * input voice data is using.//from ww w.j a v a 2 s . c om * * @param audioFormat the audioFormat used by the recorded voice data */ public AudioRecorder(AudioFormat audioFormat, boolean injectSilence) { // Create a logger for this class log = LogFactory.getLog(getClass()); this.audioFormat = audioFormat; this.injectSilence = injectSilence; hashInteger = new HashInteger(); recorderMap = new HashMap<HashInteger, AudioChannelRecorder>(32); silenceBuffer = new byte[SILENCE_BUFFER_SIZE]; // Fill silence buffer for (int i = 0; i < SILENCE_BUFFER_SIZE; i++) { silenceBuffer[i] = 0; } // Calculate nbr samples per millisecond samplesPerMillisecond = (int) (audioFormat.getFrameRate() * audioFormat.getFrameSize() / 1000); }
From source file:edu.tsinghua.lumaqq.Sounder.java
/** * /*from ww w . j ava2s. c o m*/ * @param filename * @return */ private boolean loadSound(String filename) { // ?? File file = new File(filename); try { currentSound = AudioSystem.getAudioInputStream(file); } catch (Exception e) { try { FileInputStream is = new FileInputStream(file); currentSound = new BufferedInputStream(is, 1024); } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } // ?????? if (currentSound instanceof AudioInputStream) { try { AudioInputStream stream = (AudioInputStream) currentSound; AudioFormat format = stream.getFormat(); // ?? ALAW/ULAW ? ALAW/ULAW ?? PCM if ((format.getEncoding() == AudioFormat.Encoding.ULAW) || (format.getEncoding() == AudioFormat.Encoding.ALAW)) { AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); stream = AudioSystem.getAudioInputStream(tmp, stream); format = tmp; } DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); Clip clip = (Clip) AudioSystem.getLine(info); clip.open(stream); currentSound = clip; } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) { try { sequencer.open(); if (currentSound instanceof Sequence) { sequencer.setSequence((Sequence) currentSound); } else { sequencer.setSequence((BufferedInputStream) currentSound); } log.trace("Sequence Created"); } catch (InvalidMidiDataException imde) { log.error("???"); currentSound = null; return false; } catch (Exception ex) { log.error(ex.getMessage()); currentSound = null; return false; } } return true; }
From source file:com.player.BasicMP3Player.java
/** * Inits AudioInputStream and AudioFileFormat from the data source. * /* ww w . j av a 2s . c o m*/ * @throws BasicPlayerException */ private void initAudioInputStream() throws BasicPlayerException { try { reset(); notifyEvent(BasicPlayerEvent.OPENING, getEncodedStreamPosition(), -1, m_dataSource); if (m_dataSource instanceof URL) { initAudioInputStream((URL) m_dataSource); } else if (m_dataSource instanceof File) { initAudioInputStream((File) m_dataSource); } else if (m_dataSource instanceof InputStream) { initAudioInputStream((InputStream) m_dataSource); } createLine(); // Notify listeners with AudioFileFormat properties. Map properties = null; if (m_audioFileFormat instanceof TAudioFileFormat) { // Tritonus SPI compliant audio file format. properties = ((TAudioFileFormat) m_audioFileFormat).properties(); // Clone the Map because it is not mutable. properties = deepCopy(properties); } else properties = new HashMap(); // Add JavaSound properties. if (m_audioFileFormat.getByteLength() > 0) properties.put("audio.length.bytes", new Integer(m_audioFileFormat.getByteLength())); if (m_audioFileFormat.getFrameLength() > 0) properties.put("audio.length.frames", new Integer(m_audioFileFormat.getFrameLength())); if (m_audioFileFormat.getType() != null) properties.put("audio.type", (m_audioFileFormat.getType().toString())); // Audio format. AudioFormat audioFormat = m_audioFileFormat.getFormat(); if (audioFormat.getFrameRate() > 0) properties.put("audio.framerate.fps", new Float(audioFormat.getFrameRate())); if (audioFormat.getFrameSize() > 0) properties.put("audio.framesize.bytes", new Integer(audioFormat.getFrameSize())); if (audioFormat.getSampleRate() > 0) properties.put("audio.samplerate.hz", new Float(audioFormat.getSampleRate())); if (audioFormat.getSampleSizeInBits() > 0) properties.put("audio.samplesize.bits", new Integer(audioFormat.getSampleSizeInBits())); if (audioFormat.getChannels() > 0) properties.put("audio.channels", new Integer(audioFormat.getChannels())); if (audioFormat instanceof TAudioFormat) { // Tritonus SPI compliant audio format. Map addproperties = ((TAudioFormat) audioFormat).properties(); properties.putAll(addproperties); } Iterator it = m_listeners.iterator(); while (it.hasNext()) { BasicPlayerListener bpl = (BasicPlayerListener) it.next(); bpl.opened(m_dataSource, properties); } m_status = OPENED; notifyEvent(BasicPlayerEvent.OPENED, getEncodedStreamPosition(), -1, null); } catch (LineUnavailableException e) { throw new BasicPlayerException(e); } catch (UnsupportedAudioFileException e) { throw new BasicPlayerException(e); } catch (IOException e) { throw new BasicPlayerException(e); } }
From source file:SimpleSoundPlayer.java
public boolean loadSound(Object object) { duration = 0.0;/*from www .j a va2 s.c o m*/ currentName = ((File) object).getName(); try { currentSound = AudioSystem.getAudioInputStream((File) object); } catch (Exception e1) { try { FileInputStream is = new FileInputStream((File) object); currentSound = new BufferedInputStream(is, 1024); } catch (Exception e3) { e3.printStackTrace(); currentSound = null; return false; } // } } // user pressed stop or changed tabs while loading if (sequencer == null) { currentSound = null; return false; } if (currentSound instanceof AudioInputStream) { try { AudioInputStream stream = (AudioInputStream) currentSound; AudioFormat format = stream.getFormat(); /** * we can't yet open the device for ALAW/ULAW playback, convert * ALAW/ULAW to PCM */ if ((format.getEncoding() == AudioFormat.Encoding.ULAW) || (format.getEncoding() == AudioFormat.Encoding.ALAW)) { AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(), format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2, format.getFrameRate(), true); stream = AudioSystem.getAudioInputStream(tmp, stream); format = tmp; } DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(), ((int) stream.getFrameLength() * format.getFrameSize())); Clip clip = (Clip) AudioSystem.getLine(info); clip.addLineListener(this); clip.open(stream); currentSound = clip; // seekSlider.setMaximum((int) stream.getFrameLength()); } catch (Exception ex) { ex.printStackTrace(); currentSound = null; return false; } } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) { try { sequencer.open(); if (currentSound instanceof Sequence) { sequencer.setSequence((Sequence) currentSound); } else { sequencer.setSequence((BufferedInputStream) currentSound); } } catch (InvalidMidiDataException imde) { System.out.println("Unsupported audio file."); currentSound = null; return false; } catch (Exception ex) { ex.printStackTrace(); currentSound = null; return false; } } duration = getDuration(); return true; }
From source file:BasicPlayer.java
/** * Inits AudioInputStream and AudioFileFormat from the data source. * @throws BasicPlayerException/*from w ww. j av a2s . c o m*/ */ protected void initAudioInputStream() throws BasicPlayerException { try { reset(); notifyEvent(BasicPlayerEvent.OPENING, getEncodedStreamPosition(), -1, m_dataSource); if (m_dataSource instanceof URL) { initAudioInputStream((URL) m_dataSource); } else if (m_dataSource instanceof File) { initAudioInputStream((File) m_dataSource); } else if (m_dataSource instanceof InputStream) { initAudioInputStream((InputStream) m_dataSource); } createLine(); // Notify listeners with AudioFileFormat properties. Map properties = null; if (m_audioFileFormat instanceof TAudioFileFormat) { // Tritonus SPI compliant audio file format. properties = ((TAudioFileFormat) m_audioFileFormat).properties(); // Clone the Map because it is not mutable. properties = deepCopy(properties); } else properties = new HashMap(); // Add JavaSound properties. if (m_audioFileFormat.getByteLength() > 0) properties.put("audio.length.bytes", new Integer(m_audioFileFormat.getByteLength())); if (m_audioFileFormat.getFrameLength() > 0) properties.put("audio.length.frames", new Integer(m_audioFileFormat.getFrameLength())); if (m_audioFileFormat.getType() != null) properties.put("audio.type", (m_audioFileFormat.getType().toString())); // Audio format. AudioFormat audioFormat = m_audioFileFormat.getFormat(); if (audioFormat.getFrameRate() > 0) properties.put("audio.framerate.fps", new Float(audioFormat.getFrameRate())); if (audioFormat.getFrameSize() > 0) properties.put("audio.framesize.bytes", new Integer(audioFormat.getFrameSize())); if (audioFormat.getSampleRate() > 0) properties.put("audio.samplerate.hz", new Float(audioFormat.getSampleRate())); if (audioFormat.getSampleSizeInBits() > 0) properties.put("audio.samplesize.bits", new Integer(audioFormat.getSampleSizeInBits())); if (audioFormat.getChannels() > 0) properties.put("audio.channels", new Integer(audioFormat.getChannels())); if (audioFormat instanceof TAudioFormat) { // Tritonus SPI compliant audio format. Map addproperties = ((TAudioFormat) audioFormat).properties(); properties.putAll(addproperties); } // Add SourceDataLine properties.put("basicplayer.sourcedataline", m_line); Iterator it = m_listeners.iterator(); while (it.hasNext()) { BasicPlayerListener bpl = (BasicPlayerListener) it.next(); bpl.opened(m_dataSource, properties); } m_status = OPENED; notifyEvent(BasicPlayerEvent.OPENED, getEncodedStreamPosition(), -1, null); } catch (LineUnavailableException e) { throw new BasicPlayerException(e); } catch (UnsupportedAudioFileException e) { throw new BasicPlayerException(e); } catch (IOException e) { throw new BasicPlayerException(e); } }
From source file:com.opensmile.maven.EmoRecService.java
private double getAudioDuration(String filename) { File file = new File(filename); AudioInputStream audioInputStream; float durationInSeconds = 0; try {// w ww. j av a2 s . co m audioInputStream = AudioSystem.getAudioInputStream(file); AudioFormat format = audioInputStream.getFormat(); long audioFileLength = file.length(); int frameSize = format.getFrameSize(); float frameRate = format.getFrameRate(); durationInSeconds = (audioFileLength / (frameSize * frameRate)); } catch (UnsupportedAudioFileException | IOException e) { e.printStackTrace(); } return durationInSeconds; }
From source file:com.arkatay.yada.codec.AudioEncoder.java
public void startModule(TargetDataLine inputLine, int audioFormatIndex) throws LineUnavailableException { capturedFrameSizeInNanos = 20L * millisToNanos; if (state != STATE_OFF) throw new IllegalStateException("Trying to re-start the encoder"); // Check bounds AudioFormat[] audioFormats = getSupportedAudioFormats(); if (audioFormatIndex < 0 || audioFormatIndex >= audioFormats.length) throw new LineUnavailableException("Audio format array out of bounds"); // Get format AudioFormat audioFormat = audioFormats[audioFormatIndex]; // Create line if created internally if (inputLine == null) { inputLine = createLine(audioFormat); }// ww w. jav a 2 s . co m // Validate the audio format if external else if (!audioFormat.matches(inputLine.getFormat())) { throw new LineUnavailableException("Audio format not supported"); } this.inputLine = inputLine; this.audioFormatIndex = audioFormatIndex; // Call init on the sub-class implementation init(); // Calculate stuff capturedFrameSizeInBytes = (int) (audioFormat.getFrameRate() * audioFormat.getFrameSize() * capturedFrameSizeInNanos / (1000 * millisToNanos)); diffTimeNanosLimit = diffTimeMillisLimit * millisToNanos; // Open the input line, the wanted buffer size is N times as big as the frame size inputLine.open(audioFormat, 4 * capturedFrameSizeInBytes); inputLineBufferSize = inputLine.getBufferSize(); log.debug("Input line is open with buffer size " + inputLineBufferSize); // Create a buffer for the captured frame captureBuffer = new byte[capturedFrameSizeInBytes]; // Go to state idle state = STATE_IDLE; // Start the capturing thread, it will block until startProcessing is called thread.start(); }