Example usage for javax.sound.sampled AudioFormat AudioFormat

List of usage examples for javax.sound.sampled AudioFormat AudioFormat

Introduction

In this page you can find the example usage for javax.sound.sampled AudioFormat AudioFormat.

Prototype

public AudioFormat(float sampleRate, int sampleSizeInBits, int channels, boolean signed, boolean bigEndian) 

Source Link

Document

Constructs an AudioFormat with a linear PCM encoding and the given parameters.

Usage

From source file:Main.java

public static void main(String args[]) throws Exception {
    final ByteArrayOutputStream out = new ByteArrayOutputStream();
    float sampleRate = 8000;
    int sampleSizeInBits = 8;
    int channels = 1;
    boolean signed = true;
    boolean bigEndian = true;
    final AudioFormat format = new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    final TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
    line.open(format);//from   w  ww. j  a  v a  2s  .  c om
    line.start();
    Runnable runner = new Runnable() {
        int bufferSize = (int) format.getSampleRate() * format.getFrameSize();

        byte buffer[] = new byte[bufferSize];

        public void run() {
            try {

                int count = line.read(buffer, 0, buffer.length);
                if (count > 0) {
                    out.write(buffer, 0, count);
                }

                out.close();
            } catch (IOException e) {
                System.err.println("I/O problems: " + e);
                System.exit(-1);
            }
        }
    };
    Thread captureThread = new Thread(runner);
    captureThread.start();

    byte audio[] = out.toByteArray();
    InputStream input = new ByteArrayInputStream(audio);
    final SourceDataLine line1 = (SourceDataLine) AudioSystem.getLine(info);
    final AudioInputStream ais = new AudioInputStream(input, format, audio.length / format.getFrameSize());
    line1.open(format);
    line1.start();

    runner = new Runnable() {
        int bufferSize = (int) format.getSampleRate() * format.getFrameSize();

        byte buffer[] = new byte[bufferSize];

        public void run() {
            try {
                int count;
                while ((count = ais.read(buffer, 0, buffer.length)) != -1) {
                    if (count > 0) {
                        line1.write(buffer, 0, count);
                    }
                }
                line1.drain();
                line1.close();
            } catch (IOException e) {
                System.err.println("I/O problems: " + e);
                System.exit(-3);
            }
        }
    };
    Thread playThread = new Thread(runner);
    playThread.start();

}

From source file:com.music.tools.ScaleTester.java

public static void main(String[] args) {
    System.out.println(/*ww w. j a  va2 s . c o m*/
            "Usage: java ScaleTester <fundamental frequency> <chromatic scale size> <scale size> <use ET>");
    final AudioFormat af = new AudioFormat(sampleRate, 16, 1, true, true);
    try {
        fundamentalFreq = getArgument(args, 0, FUNDAMENTAL_FREQUENCY, Double.class);
        int pitchesInChromaticScale = getArgument(args, 1, CHROMATIC_SCALE_SILZE, Integer.class);

        List<Double> harmonicFrequencies = new ArrayList<>();
        List<String> ratios = new ArrayList<>();
        Set<Double> frequencies = new HashSet<Double>();
        frequencies.add(fundamentalFreq);
        int octaveMultiplier = 2;
        for (int i = 2; i < 100; i++) {
            // Exclude the 7th harmonic TODO exclude the 11th as well?
            // http://www.phy.mtu.edu/~suits/badnote.html
            if (i % 7 == 0) {
                continue;
            }
            double actualFreq = fundamentalFreq * i;
            double closestTonicRatio = actualFreq / (fundamentalFreq * octaveMultiplier);
            if (closestTonicRatio < 1 || closestTonicRatio > 2) {
                octaveMultiplier *= 2;
            }
            double closestTonic = actualFreq - actualFreq % (fundamentalFreq * octaveMultiplier);
            double normalizedFreq = fundamentalFreq * (actualFreq / closestTonic);

            harmonicFrequencies.add(actualFreq);
            frequencies.add(normalizedFreq);
            if (frequencies.size() == pitchesInChromaticScale) {
                break;
            }
        }

        System.out.println("Harmonic (overtone) frequencies: " + harmonicFrequencies);
        System.out.println("Transposed harmonic frequencies: " + frequencies);

        List<Double> chromaticScale = new ArrayList<>(frequencies);
        Collections.sort(chromaticScale);

        // find the "perfect" interval (e.g. perfect fifth)
        int perfectIntervalIndex = 0;
        int idx = 0;
        for (Iterator<Double> it = chromaticScale.iterator(); it.hasNext();) {
            Double noteFreq = it.next();
            long[] fraction = findCommonFraction(noteFreq / fundamentalFreq);
            fractionCache.put(noteFreq, fraction);
            if (fraction[0] == 3 && fraction[1] == 2) {
                perfectIntervalIndex = idx;
                System.out.println("Perfect interval (3/2) idx: " + perfectIntervalIndex);
            }
            idx++;
            ratios.add(Arrays.toString(fraction));
        }
        System.out.println("Ratios to fundemental frequency: " + ratios);

        if (getBooleanArgument(args, 4, USE_ET)) {
            chromaticScale = temper(chromaticScale);
        }

        System.out.println();
        System.out.println("Chromatic scale: " + chromaticScale);

        Set<Double> scaleSet = new HashSet<Double>();
        scaleSet.add(chromaticScale.get(0));
        idx = 0;
        List<Double> orderedInCircle = new ArrayList<>();
        // now go around the circle of perfect intervals and put the notes
        // in order
        while (orderedInCircle.size() < chromaticScale.size()) {
            orderedInCircle.add(chromaticScale.get(idx));
            idx += perfectIntervalIndex;
            idx = idx % chromaticScale.size();
        }
        System.out.println("Pitches Ordered in circle of perfect intervals: " + orderedInCircle);

        List<Double> scale = new ArrayList<Double>(scaleSet);
        int currentIdxInCircle = orderedInCircle.size() - 1; // start with
                                                             // the last
                                                             // note in the
                                                             // circle
        int scaleSize = getArgument(args, 3, SCALE_SIZE, Integer.class);
        while (scale.size() < scaleSize) {
            double pitch = orderedInCircle.get(currentIdxInCircle % orderedInCircle.size());
            if (!scale.contains(pitch)) {
                scale.add(pitch);
            }
            currentIdxInCircle++;
        }
        Collections.sort(scale);

        System.out.println("Scale: " + scale);

        SourceDataLine line = AudioSystem.getSourceDataLine(af);
        line.open(af);
        line.start();

        Double[] scaleFrequencies = scale.toArray(new Double[scale.size()]);

        // first play the whole scale
        WaveMelodyGenerator.playScale(line, scaleFrequencies);
        // then generate a random melody in the scale
        WaveMelodyGenerator.playMelody(line, scaleFrequencies);

        line.drain();
        line.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:org.jcodec.player.filters.http.MediaInfoParser.java

private static AudioFormat parseFormat(JSONObject format) {
    return new AudioFormat(((Number) format.get("sampleRate")).intValue(),
            ((Number) format.get("sampleSizeInBits")).intValue(), ((Number) format.get("channels")).intValue(),
            true, (Boolean) format.get("bigEndian"));
}

From source file:org.snitko.app.record.SoundRecorder.java

/**
 * Defines an audio format.  Using standard telephony format for now
 */// w ww.ja  v a  2 s .  c  om
private AudioFormat getAudioFormat() {
    float sampleRate = 16000.0F;
    int sampleSizeInBits = 16;
    int channels = 1;
    boolean signed = true;
    boolean bigEndian = false;
    return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
}

From source file:Main.java

/** Read sampled audio data from the specified URL and play it */
public static void streamSampledAudio(URL url)
        throws IOException, UnsupportedAudioFileException, LineUnavailableException {
    AudioInputStream ain = null; // We read audio data from here
    SourceDataLine line = null; // And write it here.

    try {/* w w w .ja  v  a 2s . c  om*/
        // Get an audio input stream from the URL
        ain = AudioSystem.getAudioInputStream(url);

        // Get information about the format of the stream
        AudioFormat format = ain.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, format);

        // If the format is not supported directly (i.e. if it is not PCM
        // encoded, then try to transcode it to PCM.
        if (!AudioSystem.isLineSupported(info)) {
            // This is the PCM format we want to transcode to.
            // The parameters here are audio format details that you
            // shouldn't need to understand for casual use.
            AudioFormat pcm = new AudioFormat(format.getSampleRate(), 16, format.getChannels(), true, false);

            // Get a wrapper stream around the input stream that does the
            // transcoding for us.
            ain = AudioSystem.getAudioInputStream(pcm, ain);

            // Update the format and info variables for the transcoded data
            format = ain.getFormat();
            info = new DataLine.Info(SourceDataLine.class, format);
        }

        // Open the line through which we'll play the streaming audio.
        line = (SourceDataLine) AudioSystem.getLine(info);
        line.open(format);

        // Allocate a buffer for reading from the input stream and writing
        // to the line. Make it large enough to hold 4k audio frames.
        // Note that the SourceDataLine also has its own internal buffer.
        int framesize = format.getFrameSize();
        byte[] buffer = new byte[4 * 1024 * framesize]; // the buffer
        int numbytes = 0; // how many bytes

        // We haven't started the line yet.
        boolean started = false;

        for (;;) { // We'll exit the loop when we reach the end of stream
            // First, read some bytes from the input stream.
            int bytesread = ain.read(buffer, numbytes, buffer.length - numbytes);
            // If there were no more bytes to read, we're done.
            if (bytesread == -1)
                break;
            numbytes += bytesread;

            // Now that we've got some audio data, to write to the line,
            // start the line, so it will play that data as we write it.
            if (!started) {
                line.start();
                started = true;
            }

            // We must write bytes to the line in an integer multiple of
            // the framesize. So figure out how many bytes we'll write.
            int bytestowrite = (numbytes / framesize) * framesize;

            // Now write the bytes. The line will buffer them and play
            // them. This call will block until all bytes are written.
            line.write(buffer, 0, bytestowrite);

            // If we didn't have an integer multiple of the frame size,
            // then copy the remaining bytes to the start of the buffer.
            int remaining = numbytes - bytestowrite;
            if (remaining > 0)
                System.arraycopy(buffer, bytestowrite, buffer, 0, remaining);
            numbytes = remaining;
        }

        // Now block until all buffered sound finishes playing.
        line.drain();
    } finally { // Always relinquish the resources we use
        if (line != null)
            line.close();
        if (ain != null)
            ain.close();
    }
}

From source file:org.sipfoundry.voicemail.EmailFormatterTest.java

private void makeWaves(File wavFile, byte filler, int length) throws IOException {
    byte[] fill = new byte[length];
    for (int i = 0; i < length; i++) {
        fill[i] = filler;//  w w  w  . j  a va2  s  .  co m
    }
    AudioInputStream ais = new AudioInputStream(new ByteArrayInputStream(fill),
            new AudioFormat(8000, 16, 1, true, false), fill.length);
    AudioSystem.write(ais, AudioFileFormat.Type.WAVE, wavFile);
}

From source file:com.github.woz_dialog.ros_woz_dialog_project.TTSHTTPClient.java

public void synthesise(String utterance) throws Exception {

    try {// w  w  w. java2 s  .c o  m
        log.fine("calling Nuance server to synthesise utterance \"" + utterance + "\"");

        HttpPost httppost = new HttpPost(ttsURI);
        httppost.addHeader("Content-Type", "text/plain");
        httppost.addHeader("Accept", "audio/x-wav;codec=pcm;bit=16;rate=16000");
        HttpEntity entity = new StringEntity(utterance);

        //HttpEntity entity = new ByteArrayEntity(utterance.getBytes("UTF-8"));

        httppost.setEntity(entity);

        HttpResponse response = ttsClient.execute(httppost);

        HttpEntity resEntity = response.getEntity();

        if (resEntity == null || response.getStatusLine().getStatusCode() != 200) {
            System.out.println("Response status: " + response.getStatusLine());
            throw new Exception("Response status: " + response.getStatusLine());
        }

        format = new AudioFormat(16000, 16, 1, true, false);

        System.out.println(response.getStatusLine().getStatusCode());

        data = new byte[0];
        write(resEntity.getContent());
        httppost.releaseConnection();

        //Get the file path
        String basepath = System.getProperty("user.home");
        basepath = basepath + "/wav/" + LANGUAGE + "/" + VOICE;
        File dir = new File(basepath);

        if (!dir.exists()) {
            // attempt to create the directory here
            boolean successful = dir.mkdirs();
            if (successful) {
                // creating the directory succeeded
                System.out.println("directory was created successfully");
            } else {
                // creating the directory failed
                log.severe("failed trying to create the directory");
                throw new Exception("failed trying to create the directory");
            }

            return;

        }

        String fullpath = basepath + "/" + utterance.toLowerCase() + ".wav";

        //Record the sound
        generateFile(data, new File(fullpath));

        //Play the received sound

        SourceDataLine line = AudioSystem.getSourceDataLine(format);

        line.open(format);
        line.start();

        rewind();

        int nBytesRead = 0;
        byte[] abData = new byte[512 * 16];

        while (nBytesRead != -1) {
            nBytesRead = read(abData, 0, abData.length);

            if (nBytesRead >= 0) {
                line.write(abData, 0, nBytesRead);
            }
        }

        line.drain();
        if (line.isOpen()) {
            line.close();
        }

    } catch (LineUnavailableException e) {
        log.warning("Audio line is unavailable: " + e);
        throw e;
    } catch (Exception e) {
        throw e;
    }

}

From source file:opendial.plugins.NuanceSpeech.java

/**
 * Synthesises the provided utterance (first looking at the cache of existing
 * synthesised speech, and starting the generation if no one is already present).
 * /* w w  w. j  a v a 2  s  . c o  m*/
 * @param utterance the utterance to synthesise
 */
private void synthesise(String utterance) {

    String systemSpeechVar = system.getSettings().systemSpeech;

    SpeechData outputSpeech;
    if (ttsCache.containsKey(utterance)) {
        outputSpeech = ttsCache.get(utterance);
        outputSpeech.rewind();
    } else {
        AudioFormat format = new AudioFormat(16000, 16, 1, true, false);
        outputSpeech = new SpeechData(format);
        new Thread(() -> synthesise(utterance, outputSpeech)).start();
    }

    currentSynthesis.add(outputSpeech);
    new Thread(() -> {
        while (!currentSynthesis.get(0).equals(outputSpeech)) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
            }
        }
        system.addContent(systemSpeechVar, outputSpeech);
        currentSynthesis.remove(0);
    }).start();
}

From source file:com.stainlesscode.mediapipeline.Engine.java

protected void initializeOutputLayer()
        throws InstantiationException, IllegalAccessException, ClassNotFoundException {
    if (LogUtil.isDebugEnabled())
        LogUtil.debug("entering initializeOutputLayer");

    if (videoOutput == null) {
        LogUtil.warn("No VideoOutput specified, creating a default (but it's probably not visible anywhere)");
        videoOutput = VideoOutputFactory.createVideoOutput(engineConfiguration);
        if (videoOutput instanceof MediaPlayerEventListener) {
            this.addMediaPlayerEventListener(((MediaPlayerEventListener) videoOutput));
        }//www  .j  av a2 s  .  com
    }

    videoOutput.init(engineRuntime);

    if (audioOutput == null) {
        audioOutput = AudioOutputFactory.createAudioOutput(engineConfiguration);
        if (audioOutput instanceof MediaPlayerEventListener) {
            this.addMediaPlayerEventListener(((MediaPlayerEventListener) audioOutput));
        }
    }

    AudioFormat format = new AudioFormat(engineRuntime.getAudioCoder().getSampleRate(),
            (int) IAudioSamples.findSampleBitDepth(engineRuntime.getAudioCoder().getSampleFormat()),
            engineRuntime.getAudioCoder().getChannels(), true, false);

    audioOutput.init(engineRuntime, format);

    audioPlayer = new DefaultAudioPlayer(audioOutput, engineRuntime);

    videoPlayer = new MediaPlayerEventAwareVideoPlayer(videoOutput, engineRuntime);

    if (videoPlayer instanceof MediaPlayerEventListener) {
        this.addMediaPlayerEventListener(((MediaPlayerEventListener) videoPlayer));
    }

    if (LogUtil.isDebugEnabled())
        LogUtil.debug("leaving initializeOutputLayer");
}