Example usage for javax.sound.sampled AudioFormat AudioFormat

List of usage examples for javax.sound.sampled AudioFormat AudioFormat

Introduction

In this page you can find the example usage for javax.sound.sampled AudioFormat AudioFormat.

Prototype

public AudioFormat(Encoding encoding, float sampleRate, int sampleSizeInBits, int channels, int frameSize,
        float frameRate, boolean bigEndian) 

Source Link

Document

Constructs an AudioFormat with the given parameters.

Usage

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toAlawFormat(AudioFormat source) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.ALAW, source.getSampleRate(), 8, // sample size in bits
            source.getChannels(), 1, // frame size in bytes
            source.getFrameRate(), source.isBigEndian());
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toPcm16Format(AudioFormat source, boolean bigEndianOutput) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.PCM_UNSIGNED, source.getSampleRate(), 16, // sample size in bits
            source.getChannels(), 2, // frame size in bytes
            source.getFrameRate(), bigEndianOutput);
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toUlawFormat(AudioFormat source) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.ULAW, source.getSampleRate(), 8, // sample size in bits
            source.getChannels(), 1, // frame size in bytes
            source.getFrameRate(), source.isBigEndian());
}

From source file:com.player.BasicMP3Player.java

/**
 * Inits a DateLine.<br>//from   w  w w  .j  a va  2 s .  c o m
 * We check if the line supports Gain and Pan controls. From the AudioInputStream, i.e. from the
 * sound file, we fetch information about the format of the audio data. These information include
 * the sampling frequency, the number of channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line for this audio file. Furthermore, we
 * have to give JavaSound a hint about how big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't care about the exact size. JavaSound
 * will use some default value for the buffer size.
 */
private void createLine() throws LineUnavailableException {
    log.info("Create Line");
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        log.info("Create Line : Source format : " + sourceFormat.toString());
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), 16, sourceFormat.getChannels(), sourceFormat.getChannels() * 2,
                sourceFormat.getSampleRate(), false);
        log.info("Create Line : Target format: " + targetFormat);
        // Keep a reference on encoded stream to progress notification.
        m_encodedaudioInputStream = m_audioInputStream;
        try {
            // Get total length in bytes of the encoded stream.
            encodedLength = m_encodedaudioInputStream.available();
        } catch (IOException e) {
            log.error("Cannot get m_encodedaudioInputStream.available()", e);
        }
        // Create decoded stream.
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        m_line = (SourceDataLine) AudioSystem.getLine(info);

        /*-- Display supported controls --*/
        Control[] c = m_line.getControls();
        for (int p = 0; p < c.length; p++) {
            log.debug("Controls : " + c[p].toString());
        }

        /*-- Is Gain Control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.MASTER_GAIN)) {
            m_gainControl = (FloatControl) m_line.getControl(FloatControl.Type.MASTER_GAIN);
            log.info("Master Gain Control : [" + m_gainControl.getMinimum() + "," + m_gainControl.getMaximum()
                    + "] " + m_gainControl.getPrecision());
        }

        /*-- Is Pan control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.PAN)) {
            m_panControl = (FloatControl) m_line.getControl(FloatControl.Type.PAN);
            log.info("Pan Control : [" + m_panControl.getMinimum() + "," + m_panControl.getMaximum() + "] "
                    + m_panControl.getPrecision());
        }
    }
}

From source file:BasicPlayer.java

/**
 * Inits a DateLine.<br>//  www. j ava2 s . c o  m
 *
 * We check if the line supports Gain and Pan controls.
 *
 * From the AudioInputStream, i.e. from the sound file, we
 * fetch information about the format of the audio data. These
 * information include the sampling frequency, the number of
 * channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line
 * for this audio file.
 * Furthermore, we have to give JavaSound a hint about how
 * big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't
 * care about the exact size. JavaSound will use some default
 * value for the buffer size.
 */
protected void createLine() throws LineUnavailableException {
    log.info("Create Line");
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        log.info("Create Line : Source format : " + sourceFormat.toString());
        int nSampleSizeInBits = sourceFormat.getSampleSizeInBits();
        if (nSampleSizeInBits <= 0)
            nSampleSizeInBits = 16;
        if ((sourceFormat.getEncoding() == AudioFormat.Encoding.ULAW)
                || (sourceFormat.getEncoding() == AudioFormat.Encoding.ALAW))
            nSampleSizeInBits = 16;
        if (nSampleSizeInBits != 8)
            nSampleSizeInBits = 16;
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), nSampleSizeInBits, sourceFormat.getChannels(),
                sourceFormat.getChannels() * (nSampleSizeInBits / 8), sourceFormat.getSampleRate(), false);
        log.info("Create Line : Target format: " + targetFormat);
        // Keep a reference on encoded stream to progress notification.
        m_encodedaudioInputStream = m_audioInputStream;
        try {
            // Get total length in bytes of the encoded stream.
            encodedLength = m_encodedaudioInputStream.available();
        } catch (IOException e) {
            log.error("Cannot get m_encodedaudioInputStream.available()", e);
        }
        // Create decoded stream.
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        Mixer mixer = getMixer(m_mixerName);
        if (mixer != null) {
            log.info("Mixer : " + mixer.getMixerInfo().toString());
            m_line = (SourceDataLine) mixer.getLine(info);
        } else {
            m_line = (SourceDataLine) AudioSystem.getLine(info);
            m_mixerName = null;
        }
        log.info("Line : " + m_line.toString());
        log.debug("Line Info : " + m_line.getLineInfo().toString());
        log.debug("Line AudioFormat: " + m_line.getFormat().toString());
    }
}

From source file:com.skratchdot.electribe.model.esx.impl.SampleImpl.java

/**
 * @param file//from  w  w w  . j a  v a  2s  .c om
 * @throws EsxException
 */
protected SampleImpl(File file) throws EsxException {
    super();
    init();

    // Declare our streams and formats
    AudioFormat audioFormatEncoded;
    AudioFormat audioFormatDecoded;
    AudioInputStream audioInputStreamEncoded;
    AudioInputStream audioInputStreamDecoded;

    try {
        // Initialize our streams and formats
        audioInputStreamEncoded = AudioSystem.getAudioInputStream(file);
        audioFormatEncoded = audioInputStreamEncoded.getFormat();
        audioFormatDecoded = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                audioFormatEncoded.getSampleRate(), 16, audioFormatEncoded.getChannels(),
                audioFormatEncoded.getChannels() * 2, audioFormatEncoded.getSampleRate(), true);
        audioInputStreamDecoded = AudioSystem.getAudioInputStream(audioFormatDecoded, audioInputStreamEncoded);

        // We have a decoded stereo audio stream
        // Now we need to get the stream info into a list we can manipulate
        byte[] audioData = new byte[4096];
        int nBytesRead = 0;
        long nTotalBytesRead = 0;
        List<Byte> audioDataListChannel1 = new ArrayList<Byte>();
        List<Byte> audioDataListChannel2 = new ArrayList<Byte>();
        boolean isAudioDataStereo = false;

        // Set isAudioDataStereo
        if (audioFormatEncoded.getChannels() == 1) {
            isAudioDataStereo = false;
        } else if (audioFormatEncoded.getChannels() == 2) {
            isAudioDataStereo = true;
        } else {
            throw new EsxException("Sample has too many channels: " + file.getAbsolutePath());
        }

        // Convert stream to list. This needs to be optimized. Converting
        // a byte at a time is probably too slow...
        while (nBytesRead >= 0) {
            nBytesRead = audioInputStreamDecoded.read(audioData, 0, audioData.length);

            // If we aren't at the end of the stream
            if (nBytesRead > 0) {
                for (int i = 0; i < nBytesRead; i++) {
                    // MONO
                    if (!isAudioDataStereo) {
                        audioDataListChannel1.add(audioData[i]);
                        audioDataListChannel2.add(audioData[i]);
                    }
                    // STEREO (LEFT)
                    else if (nTotalBytesRead % 4 < 2) {
                        audioDataListChannel1.add(audioData[i]);
                    }
                    // STEREO (RIGHT)
                    else {
                        audioDataListChannel2.add(audioData[i]);
                    }

                    // Update the total amount of bytes we've read
                    nTotalBytesRead++;
                }
            }

            // Throw Exception if sample is too big
            if (nTotalBytesRead > EsxUtil.MAX_SAMPLE_MEM_IN_BYTES) {
                throw new EsxException("Sample is too big: " + file.getAbsolutePath());
            }
        }

        // Set member variables
        int frameLength = audioDataListChannel1.size() / 2;
        this.setNumberOfSampleFrames(frameLength);
        this.setEnd(frameLength - 1);
        this.setLoopStart(frameLength - 1);
        this.setSampleRate((int) audioFormatEncoded.getSampleRate());
        this.setAudioDataChannel1(EsxUtil.listToByteArray(audioDataListChannel1));
        this.setAudioDataChannel2(EsxUtil.listToByteArray(audioDataListChannel2));
        this.setStereoOriginal(isAudioDataStereo);

        // Set calculated Sample Tune (from Sample Rate)
        SampleTune newSampleTune = EsxFactory.eINSTANCE.createSampleTune();
        float newFloat = newSampleTune.calculateSampleTuneFromSampleRate(this.getSampleRate());
        newSampleTune.setValue(newFloat);
        this.setSampleTune(newSampleTune);

        // Set name
        String newSampleName = new String();
        newSampleName = StringUtils.left(StringUtils.trim(file.getName()), 8);
        this.setName(newSampleName);

        // Attempt to set loopStart and End from .wav smpl chunk
        if (file.getAbsolutePath().toLowerCase().endsWith(".wav")) {
            try {
                RIFFWave riffWave = WavFactory.eINSTANCE.createRIFFWave(file);
                ChunkSampler chunkSampler = (ChunkSampler) riffWave
                        .getFirstChunkByEClass(WavPackage.Literals.CHUNK_SAMPLER);
                if (chunkSampler != null && chunkSampler.getSampleLoops().size() > 0) {
                    SampleLoop sampleLoop = chunkSampler.getSampleLoops().get(0);
                    Long tempLoopStart = sampleLoop.getStart();
                    Long tempLoopEnd = sampleLoop.getEnd();
                    if (tempLoopStart < this.getEnd() && tempLoopStart >= 0) {
                        this.setLoopStart(tempLoopStart.intValue());
                    }
                    if (tempLoopEnd < this.getEnd() && tempLoopEnd > this.getLoopStart()) {
                        this.setEnd(tempLoopEnd.intValue());
                    }
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

    } catch (UnsupportedAudioFileException e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    } catch (IOException e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    } catch (Exception e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    }
}

From source file:sx.blah.discord.api.internal.DiscordUtils.java

/**
 * Converts an {@link AudioInputStream} to 48000Hz 16 bit stereo signed Big Endian PCM format.
 *
 * @param stream The original stream./*from ww  w .j  av a  2  s  .co m*/
 * @return The PCM encoded stream.
 */
public static AudioInputStream getPCMStream(AudioInputStream stream) {
    AudioFormat baseFormat = stream.getFormat();

    //Converts first to PCM data. If the data is already PCM data, this will not change anything.
    AudioFormat toPCM = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(),
            //AudioConnection.OPUS_SAMPLE_RATE,
            baseFormat.getSampleSizeInBits() != -1 ? baseFormat.getSampleSizeInBits() : 16,
            baseFormat.getChannels(),
            //If we are given a frame size, use it. Otherwise, assume 16 bits (2 8bit shorts) per channel.
            baseFormat.getFrameSize() != -1 ? baseFormat.getFrameSize() : 2 * baseFormat.getChannels(),
            baseFormat.getFrameRate() != -1 ? baseFormat.getFrameRate() : baseFormat.getSampleRate(),
            baseFormat.isBigEndian());
    AudioInputStream pcmStream = AudioSystem.getAudioInputStream(toPCM, stream);

    //Then resamples to a sample rate of 48000hz and ensures that data is Big Endian.
    AudioFormat audioFormat = new AudioFormat(toPCM.getEncoding(), OpusUtil.OPUS_SAMPLE_RATE,
            toPCM.getSampleSizeInBits(), toPCM.getChannels(), toPCM.getFrameSize(), toPCM.getFrameRate(), true);

    return AudioSystem.getAudioInputStream(audioFormat, pcmStream);
}