Example usage for javax.sound.sampled AudioFormat getSampleRate

List of usage examples for javax.sound.sampled AudioFormat getSampleRate

Introduction

In this page you can find the example usage for javax.sound.sampled AudioFormat getSampleRate.

Prototype

public float getSampleRate() 

Source Link

Document

Obtains the sample rate.

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile"));

    // From URL//from  w w w  . j  av a 2 s .  co  m
    // stream = AudioSystem.getAudioInputStream(new URL(
    // "http://hostname/audiofile"));

    AudioFormat format = stream.getFormat();
    if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
        format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                format.getFrameRate(), true); // big endian
        stream = AudioSystem.getAudioInputStream(format, stream);
    }

    DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(),
            ((int) stream.getFrameLength() * format.getFrameSize()));
    Clip clip = (Clip) AudioSystem.getLine(info);

    clip.open(stream);

    clip.start();
}

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile"));
    //    stream = AudioSystem.getAudioInputStream(new URL(
    //      "http://hostname/audiofile"));

    AudioFormat format = stream.getFormat();
    if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
        format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                format.getFrameRate(), true); // big endian
        stream = AudioSystem.getAudioInputStream(format, stream);
    }//from w  w  w.j  av  a 2s.  co  m

    SourceDataLine.Info info = new DataLine.Info(SourceDataLine.class, stream.getFormat(),
            ((int) stream.getFrameLength() * format.getFrameSize()));
    SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);
    line.open(stream.getFormat());
    line.start();

    int numRead = 0;
    byte[] buf = new byte[line.getBufferSize()];
    while ((numRead = stream.read(buf, 0, buf.length)) >= 0) {
        int offset = 0;
        while (offset < numRead) {
            offset += line.write(buf, offset, numRead - offset);
        }
    }
    line.drain();
    line.stop();
}

From source file:marytts.signalproc.effects.VocalTractLinearScalerEffect.java

/**
 * Command line interface to the vocal tract linear scaler effect.
 * /*  ww w.  ja  v  a 2  s .c  o  m*/
 * @param args the command line arguments. Exactly two arguments are expected:
 * (1) the factor by which to scale the vocal tract (between 0.25 = very long and 4.0 = very short vocal tract);
 * (2) the filename of the wav file to modify.
 * Will produce a file basename_factor.wav, where basename is the filename without the extension.
 * @throws Exception if processing fails for some reason.
 */
public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println(
                "Usage: java " + VocalTractLinearScalerEffect.class.getName() + " <factor> <filename>");
        System.exit(1);
    }
    float factor = Float.parseFloat(args[0]);
    String filename = args[1];
    AudioDoubleDataSource input = new AudioDoubleDataSource(
            AudioSystem.getAudioInputStream(new File(filename)));
    AudioFormat format = input.getAudioFormat();
    VocalTractLinearScalerEffect effect = new VocalTractLinearScalerEffect((int) format.getSampleRate());
    DoubleDataSource output = effect.apply(input, "amount:" + factor);
    DDSAudioInputStream audioOut = new DDSAudioInputStream(output, format);
    String outFilename = FilenameUtils.removeExtension(filename) + "_" + factor + ".wav";
    AudioSystem.write(audioOut, AudioFileFormat.Type.WAVE, new File(outFilename));
    System.out.println("Created file " + outFilename);
}

From source file:Main.java

public static void main(String args[]) throws Exception {
    final ByteArrayOutputStream out = new ByteArrayOutputStream();
    float sampleRate = 8000;
    int sampleSizeInBits = 8;
    int channels = 1;
    boolean signed = true;
    boolean bigEndian = true;
    final AudioFormat format = new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    final TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
    line.open(format);/*from  w w w .  j av a 2  s  .c  o  m*/
    line.start();
    Runnable runner = new Runnable() {
        int bufferSize = (int) format.getSampleRate() * format.getFrameSize();

        byte buffer[] = new byte[bufferSize];

        public void run() {
            try {

                int count = line.read(buffer, 0, buffer.length);
                if (count > 0) {
                    out.write(buffer, 0, count);
                }

                out.close();
            } catch (IOException e) {
                System.err.println("I/O problems: " + e);
                System.exit(-1);
            }
        }
    };
    Thread captureThread = new Thread(runner);
    captureThread.start();

    byte audio[] = out.toByteArray();
    InputStream input = new ByteArrayInputStream(audio);
    final SourceDataLine line1 = (SourceDataLine) AudioSystem.getLine(info);
    final AudioInputStream ais = new AudioInputStream(input, format, audio.length / format.getFrameSize());
    line1.open(format);
    line1.start();

    runner = new Runnable() {
        int bufferSize = (int) format.getSampleRate() * format.getFrameSize();

        byte buffer[] = new byte[bufferSize];

        public void run() {
            try {
                int count;
                while ((count = ais.read(buffer, 0, buffer.length)) != -1) {
                    if (count > 0) {
                        line1.write(buffer, 0, count);
                    }
                }
                line1.drain();
                line1.close();
            } catch (IOException e) {
                System.err.println("I/O problems: " + e);
                System.exit(-3);
            }
        }
    };
    Thread playThread = new Thread(runner);
    playThread.start();

}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

/**
 * Utility method to convert an {@link AudioFormat} object to a String.
 * {@code AudioFormat} does implement a toString method, but it's output
 * varies depending upon the contents. I find it more useful to always print
 * the value of all fields.//from w ww . j a v  a2s . c o  m
 *
 * @param format
 *            {@code AudioFormat} to convert to a String
 * @return {@code AudioFormat} object as a String
 */
private static String audioFormatToString(AudioFormat format) {
    return new ToStringBuilder(format).append("encoding", format.getEncoding())
            .append("sampleRate", format.getSampleRate())
            .append("sampleSizeInBits", format.getSampleSizeInBits()).append("channels", format.getChannels())
            .append("frameSize", format.getFrameSize()).append("frameRate", format.getFrameRate())
            .append("isBigEndian", format.isBigEndian()).toString();
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toAlawFormat(AudioFormat source) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.ALAW, source.getSampleRate(), 8, // sample size in bits
            source.getChannels(), 1, // frame size in bytes
            source.getFrameRate(), source.isBigEndian());
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toUlawFormat(AudioFormat source) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.ULAW, source.getSampleRate(), 8, // sample size in bits
            source.getChannels(), 1, // frame size in bytes
            source.getFrameRate(), source.isBigEndian());
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

/**
 * Returns the number of samples that represent the specified time period.
 *
 * @param outputFormat//  w  ww .  j  a  va2  s  .co  m
 *            format of the data, needed to obtain the sample rate
 * @param timePeriod
 *            period of time
 * @param timeUnit
 *            TimeUnit of the {@code timePeriod}
 * @return number of samples that represent the specified time period
 */
@VisibleForTesting
public static int getNumberOfSamplesPerTimePeriod(AudioFormat outputFormat, long timePeriod,
        TimeUnit timeUnit) {
    double timePeriodSec = timeUnit.toNanos(timePeriod) / 1E9;
    double numberOfSamples = timePeriodSec * outputFormat.getSampleRate();
    return (int) Math.ceil(numberOfSamples);
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

private static AudioFormat toPcm16Format(AudioFormat source, boolean bigEndianOutput) {
    Preconditions.checkNotNull(source, "Source AudioFormat cannot be null.");

    return new AudioFormat(AudioFormat.Encoding.PCM_UNSIGNED, source.getSampleRate(), 16, // sample size in bits
            source.getChannels(), 2, // frame size in bytes
            source.getFrameRate(), bigEndianOutput);
}

From source file:Main.java

/** Read sampled audio data from the specified URL and play it */
public static void streamSampledAudio(URL url)
        throws IOException, UnsupportedAudioFileException, LineUnavailableException {
    AudioInputStream ain = null; // We read audio data from here
    SourceDataLine line = null; // And write it here.

    try {/* w ww . j ava  2 s  . c o m*/
        // Get an audio input stream from the URL
        ain = AudioSystem.getAudioInputStream(url);

        // Get information about the format of the stream
        AudioFormat format = ain.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, format);

        // If the format is not supported directly (i.e. if it is not PCM
        // encoded, then try to transcode it to PCM.
        if (!AudioSystem.isLineSupported(info)) {
            // This is the PCM format we want to transcode to.
            // The parameters here are audio format details that you
            // shouldn't need to understand for casual use.
            AudioFormat pcm = new AudioFormat(format.getSampleRate(), 16, format.getChannels(), true, false);

            // Get a wrapper stream around the input stream that does the
            // transcoding for us.
            ain = AudioSystem.getAudioInputStream(pcm, ain);

            // Update the format and info variables for the transcoded data
            format = ain.getFormat();
            info = new DataLine.Info(SourceDataLine.class, format);
        }

        // Open the line through which we'll play the streaming audio.
        line = (SourceDataLine) AudioSystem.getLine(info);
        line.open(format);

        // Allocate a buffer for reading from the input stream and writing
        // to the line. Make it large enough to hold 4k audio frames.
        // Note that the SourceDataLine also has its own internal buffer.
        int framesize = format.getFrameSize();
        byte[] buffer = new byte[4 * 1024 * framesize]; // the buffer
        int numbytes = 0; // how many bytes

        // We haven't started the line yet.
        boolean started = false;

        for (;;) { // We'll exit the loop when we reach the end of stream
            // First, read some bytes from the input stream.
            int bytesread = ain.read(buffer, numbytes, buffer.length - numbytes);
            // If there were no more bytes to read, we're done.
            if (bytesread == -1)
                break;
            numbytes += bytesread;

            // Now that we've got some audio data, to write to the line,
            // start the line, so it will play that data as we write it.
            if (!started) {
                line.start();
                started = true;
            }

            // We must write bytes to the line in an integer multiple of
            // the framesize. So figure out how many bytes we'll write.
            int bytestowrite = (numbytes / framesize) * framesize;

            // Now write the bytes. The line will buffer them and play
            // them. This call will block until all bytes are written.
            line.write(buffer, 0, bytestowrite);

            // If we didn't have an integer multiple of the frame size,
            // then copy the remaining bytes to the start of the buffer.
            int remaining = numbytes - bytestowrite;
            if (remaining > 0)
                System.arraycopy(buffer, bytestowrite, buffer, 0, remaining);
            numbytes = remaining;
        }

        // Now block until all buffered sound finishes playing.
        line.drain();
    } finally { // Always relinquish the resources we use
        if (line != null)
            line.close();
        if (ain != null)
            ain.close();
    }
}