Example usage for javax.sound.sampled AudioFormat getSampleRate

List of usage examples for javax.sound.sampled AudioFormat getSampleRate

Introduction

In this page you can find the example usage for javax.sound.sampled AudioFormat getSampleRate.

Prototype

public float getSampleRate() 

Source Link

Document

Obtains the sample rate.

Usage

From source file:com.skratchdot.electribe.model.esx.impl.SampleImpl.java

/**
 * @param file/*w  ww.  j  ava 2s.c o m*/
 * @throws EsxException
 */
protected SampleImpl(File file) throws EsxException {
    super();
    init();

    // Declare our streams and formats
    AudioFormat audioFormatEncoded;
    AudioFormat audioFormatDecoded;
    AudioInputStream audioInputStreamEncoded;
    AudioInputStream audioInputStreamDecoded;

    try {
        // Initialize our streams and formats
        audioInputStreamEncoded = AudioSystem.getAudioInputStream(file);
        audioFormatEncoded = audioInputStreamEncoded.getFormat();
        audioFormatDecoded = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                audioFormatEncoded.getSampleRate(), 16, audioFormatEncoded.getChannels(),
                audioFormatEncoded.getChannels() * 2, audioFormatEncoded.getSampleRate(), true);
        audioInputStreamDecoded = AudioSystem.getAudioInputStream(audioFormatDecoded, audioInputStreamEncoded);

        // We have a decoded stereo audio stream
        // Now we need to get the stream info into a list we can manipulate
        byte[] audioData = new byte[4096];
        int nBytesRead = 0;
        long nTotalBytesRead = 0;
        List<Byte> audioDataListChannel1 = new ArrayList<Byte>();
        List<Byte> audioDataListChannel2 = new ArrayList<Byte>();
        boolean isAudioDataStereo = false;

        // Set isAudioDataStereo
        if (audioFormatEncoded.getChannels() == 1) {
            isAudioDataStereo = false;
        } else if (audioFormatEncoded.getChannels() == 2) {
            isAudioDataStereo = true;
        } else {
            throw new EsxException("Sample has too many channels: " + file.getAbsolutePath());
        }

        // Convert stream to list. This needs to be optimized. Converting
        // a byte at a time is probably too slow...
        while (nBytesRead >= 0) {
            nBytesRead = audioInputStreamDecoded.read(audioData, 0, audioData.length);

            // If we aren't at the end of the stream
            if (nBytesRead > 0) {
                for (int i = 0; i < nBytesRead; i++) {
                    // MONO
                    if (!isAudioDataStereo) {
                        audioDataListChannel1.add(audioData[i]);
                        audioDataListChannel2.add(audioData[i]);
                    }
                    // STEREO (LEFT)
                    else if (nTotalBytesRead % 4 < 2) {
                        audioDataListChannel1.add(audioData[i]);
                    }
                    // STEREO (RIGHT)
                    else {
                        audioDataListChannel2.add(audioData[i]);
                    }

                    // Update the total amount of bytes we've read
                    nTotalBytesRead++;
                }
            }

            // Throw Exception if sample is too big
            if (nTotalBytesRead > EsxUtil.MAX_SAMPLE_MEM_IN_BYTES) {
                throw new EsxException("Sample is too big: " + file.getAbsolutePath());
            }
        }

        // Set member variables
        int frameLength = audioDataListChannel1.size() / 2;
        this.setNumberOfSampleFrames(frameLength);
        this.setEnd(frameLength - 1);
        this.setLoopStart(frameLength - 1);
        this.setSampleRate((int) audioFormatEncoded.getSampleRate());
        this.setAudioDataChannel1(EsxUtil.listToByteArray(audioDataListChannel1));
        this.setAudioDataChannel2(EsxUtil.listToByteArray(audioDataListChannel2));
        this.setStereoOriginal(isAudioDataStereo);

        // Set calculated Sample Tune (from Sample Rate)
        SampleTune newSampleTune = EsxFactory.eINSTANCE.createSampleTune();
        float newFloat = newSampleTune.calculateSampleTuneFromSampleRate(this.getSampleRate());
        newSampleTune.setValue(newFloat);
        this.setSampleTune(newSampleTune);

        // Set name
        String newSampleName = new String();
        newSampleName = StringUtils.left(StringUtils.trim(file.getName()), 8);
        this.setName(newSampleName);

        // Attempt to set loopStart and End from .wav smpl chunk
        if (file.getAbsolutePath().toLowerCase().endsWith(".wav")) {
            try {
                RIFFWave riffWave = WavFactory.eINSTANCE.createRIFFWave(file);
                ChunkSampler chunkSampler = (ChunkSampler) riffWave
                        .getFirstChunkByEClass(WavPackage.Literals.CHUNK_SAMPLER);
                if (chunkSampler != null && chunkSampler.getSampleLoops().size() > 0) {
                    SampleLoop sampleLoop = chunkSampler.getSampleLoops().get(0);
                    Long tempLoopStart = sampleLoop.getStart();
                    Long tempLoopEnd = sampleLoop.getEnd();
                    if (tempLoopStart < this.getEnd() && tempLoopStart >= 0) {
                        this.setLoopStart(tempLoopStart.intValue());
                    }
                    if (tempLoopEnd < this.getEnd() && tempLoopEnd > this.getLoopStart()) {
                        this.setEnd(tempLoopEnd.intValue());
                    }
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

    } catch (UnsupportedAudioFileException e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    } catch (IOException e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    } catch (Exception e) {
        e.printStackTrace();
        throw new EsxException("Invalid audio file: " + file.getAbsolutePath());
    }
}

From source file:org.apache.tika.parser.audio.AudioParser.java

public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context)
        throws IOException, SAXException, TikaException {
    // AudioSystem expects the stream to support the mark feature
    if (!stream.markSupported()) {
        stream = new BufferedInputStream(stream);
    }//from ww  w  . j  av  a2  s  .  com
    stream = new SkipFullyInputStream(stream);
    try {
        AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(stream);
        Type type = fileFormat.getType();
        if (type == Type.AIFC || type == Type.AIFF) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/x-aiff");
        } else if (type == Type.AU || type == Type.SND) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/basic");
        } else if (type == Type.WAVE) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/vnd.wave");
        }

        AudioFormat audioFormat = fileFormat.getFormat();
        int channels = audioFormat.getChannels();
        if (channels != AudioSystem.NOT_SPECIFIED) {
            metadata.set("channels", String.valueOf(channels));
            // TODO: Use XMPDM.TRACKS? (see also frame rate in AudioFormat)
        }
        float rate = audioFormat.getSampleRate();
        if (rate != AudioSystem.NOT_SPECIFIED) {
            metadata.set("samplerate", String.valueOf(rate));
            metadata.set(XMPDM.AUDIO_SAMPLE_RATE, Integer.toString((int) rate));
        }
        int bits = audioFormat.getSampleSizeInBits();
        if (bits != AudioSystem.NOT_SPECIFIED) {
            metadata.set("bits", String.valueOf(bits));
            if (bits == 8) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "8Int");
            } else if (bits == 16) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "16Int");
            } else if (bits == 32) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "32Int");
            }
        }
        metadata.set("encoding", audioFormat.getEncoding().toString());

        // Javadoc suggests that some of the following properties might
        // be available, but I had no success in finding any:

        // "duration" Long playback duration of the file in microseconds
        // "author" String name of the author of this file
        // "title" String title of this file
        // "copyright" String copyright message
        // "date" Date date of the recording or release
        // "comment" String an arbitrary text

        addMetadata(metadata, fileFormat.properties());
        addMetadata(metadata, audioFormat.properties());
    } catch (UnsupportedAudioFileException e) {
        // There is no way to know whether this exception was
        // caused by the document being corrupted or by the format
        // just being unsupported. So we do nothing.
    }

    XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
    xhtml.startDocument();
    xhtml.endDocument();
}

From source file:org.jcodec.codecs.wav.WavHeader.java

public static WavHeader create(AudioFormat af, int size) {
    WavHeader w = emptyWavHeader();/*w  w  w. j  av a2  s  . co  m*/
    w.dataSize = size;
    FmtChunk fmt = new FmtChunk();
    int bitsPerSample = af.getSampleSizeInBits();
    int bytesPerSample = bitsPerSample / 8;
    int sampleRate = (int) af.getSampleRate();
    w.fmt.bitsPerSample = (short) bitsPerSample;
    w.fmt.blockAlign = (short) (af.getFrameSize());
    w.fmt.byteRate = (int) af.getFrameRate() * af.getFrameSize();
    w.fmt.numChannels = (short) af.getChannels();
    w.fmt.sampleRate = (int) af.getSampleRate();
    return w;
}

From source file:org.snitko.app.playback.PlaySound.java

private AudioFormat getOutFormat(AudioFormat inFormat) {
    final int ch = inFormat.getChannels();
    final float rate = inFormat.getSampleRate();
    return new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, rate, 16, ch, ch * 2, rate, false);
}

From source file:org.yccheok.jstock.chat.Utils.java

public static void playSound(final Sound sound) {
    if (sounds.size() == 0) {
        for (Sound s : Sound.values()) {
            AudioInputStream stream = null;
            Clip clip = null;/*from  w  w  w  . j  ava  2  s  . com*/

            try {
                switch (s) {
                case ALERT:
                    stream = AudioSystem
                            .getAudioInputStream(new File(Utils.getSoundsDirectory() + "alert.wav"));
                    break;
                case LOGIN:
                    stream = AudioSystem
                            .getAudioInputStream(new File(Utils.getSoundsDirectory() + "login.wav"));
                    break;
                case LOGOUT:
                    stream = AudioSystem
                            .getAudioInputStream(new File(Utils.getSoundsDirectory() + "logout.wav"));
                    break;
                case RECEIVE:
                    stream = AudioSystem
                            .getAudioInputStream(new File(Utils.getSoundsDirectory() + "receive.wav"));
                    break;
                case SEND:
                    stream = AudioSystem.getAudioInputStream(new File(Utils.getSoundsDirectory() + "send.wav"));
                    break;
                default:
                    throw new java.lang.IllegalArgumentException("Missing case " + sound);
                }

                // At present, ALAW and ULAW encodings must be converted
                // to PCM_SIGNED before it can be played
                AudioFormat format = stream.getFormat();
                if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
                    format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                            format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                            format.getFrameRate(), true); // big endian
                    stream = AudioSystem.getAudioInputStream(format, stream);
                }

                // Create the clip
                DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(),
                        ((int) stream.getFrameLength() * format.getFrameSize()));
                clip = (Clip) AudioSystem.getLine(info);

                // This method does not return until the audio file is completely loaded
                clip.open(stream);
                clip.drain();
                sounds.put(s, clip);
            } catch (MalformedURLException e) {
                log.error(null, e);
            } catch (IOException e) {
                log.error(null, e);
            } catch (LineUnavailableException e) {
                log.error(null, e);
            } catch (UnsupportedAudioFileException e) {
                log.error(null, e);
            } finally {
            }
        }

    }
    soundPool.execute(new Runnable() {
        @Override
        public void run() {
            Clip clip = sounds.get(sound);

            if (clip == null) {
                return;
            }

            clip.stop();
            clip.flush();
            clip.setFramePosition(0);
            clip.loop(0);
            // Wait for the sound to finish.
            //while (clip.isRunning()) {
            //    try {
            //        Thread.sleep(1);
            //    } catch (InterruptedException ex) {
            //        log.error(null, ex);
            //    }
            //}
        }
    });
}

From source file:sec_algo.aud_sec.java

public BufferedWriter getAudioStream() {
    FileInputStream fin = null;/* ww w  . j av a 2 s.  c  om*/
    BufferedWriter audstream = null;

    try {
        fin = new FileInputStream(this.file);
        //           audstream = new BufferedWriter(new FileWriter(returnFileName()+"_ex."+returnFileExt()));
        //           byte contents[] = new byte[100];
        //           while(fin.read(contents)!= -1){
        //               System.out.println("reading & writing from file");
        //               for(byte b : contents)
        //                   for(int x = 0; x < 8; x++)
        //                       audstream.write(b>>x & 1);
        //           }
        //           System.out.println("Finished!");
        //           System.out.println("audstream contents: " + audstream.toString());
        byte[] header = new byte[8];
        fin.read(header);
        fin.close();
        //           System.out.println("header bytes: " + Arrays.toString(header));
        ArrayList<String> bitstring = new ArrayList<String>();
        for (int i = 0; i < header.length; i++)
            bitstring.add(String.format("%8s", Integer.toBinaryString(header[i] & 0xFF)).replace(' ', '0'));
        System.out.print("bit input: [/");
        for (int i = 0; i < bitstring.size(); i++) {
            System.out.print(bitstring.get(i) + " ");
        }
        System.out.println("]/");

        System.out.println(bitstring.get(0) + " " + bitstring.get(1) + " " + bitstring.get(2));
        System.out.println("Bitrate index: " + bitstring.get(2).substring(0, 4));

        AudioInputStream in = AudioSystem.getAudioInputStream(this.file);
        AudioInputStream din = null;
        AudioFormat baseFormat = in.getFormat();
        AudioFormat decodedFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(),
                getBitrate(bitstring.get(2).substring(0, 4)), baseFormat.getChannels(),
                baseFormat.getChannels() * 2, baseFormat.getSampleRate(), false);
        din = AudioSystem.getAudioInputStream(decodedFormat, in);
        int size = din.available();
        byte[] bytaud = new byte[size];
        din.read(bytaud);
        bitstring = new ArrayList<String>();
        for (int i = 0; i < header.length; i++)
            bitstring.add(String.format("%8s", Integer.toBinaryString(header[i] & 0xFF)).replace(' ', '0'));
        System.out.print("bit input: [/");
        for (int i = 0; i < bitstring.size(); i++) {
            System.out.print(bitstring.get(i) + " ");
        }
        System.out.println("]/");
        in.close();
        din.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return audstream;
}

From source file:sx.blah.discord.api.internal.DiscordUtils.java

/**
 * Converts an {@link AudioInputStream} to 48000Hz 16 bit stereo signed Big Endian PCM format.
 *
 * @param stream The original stream./*from   w w w .ja v a2s  .  com*/
 * @return The PCM encoded stream.
 */
public static AudioInputStream getPCMStream(AudioInputStream stream) {
    AudioFormat baseFormat = stream.getFormat();

    //Converts first to PCM data. If the data is already PCM data, this will not change anything.
    AudioFormat toPCM = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, baseFormat.getSampleRate(),
            //AudioConnection.OPUS_SAMPLE_RATE,
            baseFormat.getSampleSizeInBits() != -1 ? baseFormat.getSampleSizeInBits() : 16,
            baseFormat.getChannels(),
            //If we are given a frame size, use it. Otherwise, assume 16 bits (2 8bit shorts) per channel.
            baseFormat.getFrameSize() != -1 ? baseFormat.getFrameSize() : 2 * baseFormat.getChannels(),
            baseFormat.getFrameRate() != -1 ? baseFormat.getFrameRate() : baseFormat.getSampleRate(),
            baseFormat.isBigEndian());
    AudioInputStream pcmStream = AudioSystem.getAudioInputStream(toPCM, stream);

    //Then resamples to a sample rate of 48000hz and ensures that data is Big Endian.
    AudioFormat audioFormat = new AudioFormat(toPCM.getEncoding(), OpusUtil.OPUS_SAMPLE_RATE,
            toPCM.getSampleSizeInBits(), toPCM.getChannels(), toPCM.getFrameSize(), toPCM.getFrameRate(), true);

    return AudioSystem.getAudioInputStream(audioFormat, pcmStream);
}

From source file:xtrememp.tag.GenericInfo.java

/**
 * Load info from AudioFileFormat.//from   w ww .  j ava 2 s  .  c om
 *
 * @param aff
 * @throws javax.sound.sampled.UnsupportedAudioFileException
 */
protected void loadInfo(AudioFileFormat aff) throws UnsupportedAudioFileException {
    encodingType = aff.getType().toString();
    AudioFormat audioFormat = aff.getFormat();
    channelsAsNumber = audioFormat.getChannels();
    sampleRateAsNumber = (int) audioFormat.getSampleRate();
    bitspersample = audioFormat.getSampleSizeInBits();
    framesize = audioFormat.getFrameSize();
    bitRateAsNumber = Math.round(bitspersample * sampleRateAsNumber * channelsAsNumber / 1000);
}