Example usage for javax.sound.sampled AudioFormat getEncoding

List of usage examples for javax.sound.sampled AudioFormat getEncoding

Introduction

In this page you can find the example usage for javax.sound.sampled AudioFormat getEncoding.

Prototype

public Encoding getEncoding() 

Source Link

Document

Obtains the type of encoding for sounds in this format.

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile"));

    // From URL//w  w  w.  j a v  a  2s. c o m
    // stream = AudioSystem.getAudioInputStream(new URL(
    // "http://hostname/audiofile"));

    AudioFormat format = stream.getFormat();
    if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
        format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                format.getFrameRate(), true); // big endian
        stream = AudioSystem.getAudioInputStream(format, stream);
    }

    DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(),
            ((int) stream.getFrameLength() * format.getFrameSize()));
    Clip clip = (Clip) AudioSystem.getLine(info);

    clip.open(stream);

    clip.start();
}

From source file:Main.java

public static void main(String[] argv) throws Exception {
    AudioInputStream stream = AudioSystem.getAudioInputStream(new File("audiofile"));
    //    stream = AudioSystem.getAudioInputStream(new URL(
    //      "http://hostname/audiofile"));

    AudioFormat format = stream.getFormat();
    if (format.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
        format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                format.getFrameRate(), true); // big endian
        stream = AudioSystem.getAudioInputStream(format, stream);
    }//from   w w  w .  j  a va2s.  c  o m

    SourceDataLine.Info info = new DataLine.Info(SourceDataLine.class, stream.getFormat(),
            ((int) stream.getFrameLength() * format.getFrameSize()));
    SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);
    line.open(stream.getFormat());
    line.start();

    int numRead = 0;
    byte[] buf = new byte[line.getBufferSize()];
    while ((numRead = stream.read(buf, 0, buf.length)) >= 0) {
        int offset = 0;
        while (offset < numRead) {
            offset += line.write(buf, offset, numRead - offset);
        }
    }
    line.drain();
    line.stop();
}

From source file:com.andrewkroh.cicso.rtp.AudioFileStreamer.java

/**
 * Utility method to convert an {@link AudioFormat} object to a String.
 * {@code AudioFormat} does implement a toString method, but it's output
 * varies depending upon the contents. I find it more useful to always print
 * the value of all fields./*from  w  ww. jav  a2s  . c  o m*/
 *
 * @param format
 *            {@code AudioFormat} to convert to a String
 * @return {@code AudioFormat} object as a String
 */
private static String audioFormatToString(AudioFormat format) {
    return new ToStringBuilder(format).append("encoding", format.getEncoding())
            .append("sampleRate", format.getSampleRate())
            .append("sampleSizeInBits", format.getSampleSizeInBits()).append("channels", format.getChannels())
            .append("frameSize", format.getFrameSize()).append("frameRate", format.getFrameRate())
            .append("isBigEndian", format.isBigEndian()).toString();
}

From source file:it.univpm.deit.semedia.musicuri.core.Toolset.java

/**
 * Extracts/encodes the AudioSignatureDS for a given audio file
 * @param file the audio file to encode 
 * @return a string containing the whole XML-formatted MPEG-7 description document
 *///from w  w  w.  j  a va2 s. co  m
public static String createMPEG7Description(File file) throws IOException {
    if (isSupportedAudioFile(file)) {
        System.out.println("Extracting Query Audio Signature");
        String xmlString = null;
        Config configuration = new ConfigDefault();
        configuration.enableAll(false);
        configuration.setValue("AudioSignature", "enable", true);
        configuration.setValue("AudioSignature", "decimation", 32);
        //System.out.println("File: " + file.getName());

        AudioInputStream ais = null;
        try {
            ais = AudioSystem.getAudioInputStream(file);
            AudioFormat f = ais.getFormat();
            if (f.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
                System.out.println("Converting Audio stream format");
                ais = AudioSystem.getAudioInputStream(AudioFormat.Encoding.PCM_SIGNED, ais);
                f = ais.getFormat();
            }

            String workingDir = getCWD();
            String tempFilename = workingDir + "/temp.wav";
            AudioSystem.write(ais, AudioFileFormat.Type.WAVE, new File(tempFilename));

            File tmpFile = new File(tempFilename);
            AudioInFloatSampled audioin = new AudioInFloatSampled(tmpFile);

            String str = tmpFile.getCanonicalPath();
            String[] ar = { str };
            //xmlString = Encoder.fromWAVtoXML(ar);

            // gather information about audio file
            MP7MediaInformation media_info = new MP7MediaInformation();
            media_info.setFileSize(tmpFile.length());

            AudioFormat format = audioin.getSourceFormat();
            media_info.setSample(format.getSampleRate(), format.getSampleSizeInBits());
            media_info.setNumberOfChannels(audioin.isMono() ? 1 : 2);

            // create mpeg-7 writer
            MP7Writer mp7writer = new MP7Writer();
            mp7writer.setMediaInformation(media_info);

            // create encoder
            Encoder encoder = null;

            Config config = new ConfigDefault();
            config.enableAll(false);
            config.setValue("AudioSignature", "enable", true);
            config.setValue("AudioSignature", "decimation", 32);
            encoder = new Encoder(audioin.getSampleRate(), mp7writer, config);
            //encoder.addTimeElapsedListener(new Ticker(System.err));

            // copy audio signal from source to encoder
            long oldtime = System.currentTimeMillis();
            float[] audio;
            while ((audio = audioin.get()) != null) {
                if (!audioin.isMono())
                    audio = AudioInFloat.getMono(audio);
                encoder.put(audio);
            }
            encoder.flush();
            System.out.println("Extraction Time     : " + (System.currentTimeMillis() - oldtime) + " ms");

            // whole MPEG-7 description into a string
            xmlString = mp7writer.toString();
            //System.out.println( xmlString )

        } catch (Exception e) {
            e.printStackTrace(System.err);
        } finally {
            //ais.close();
        }

        return xmlString;
    } else {
        System.out.println("Unsupported audio file format");
        return null;
    }
}

From source file:iristk.speech.nuance9.BaseRecognizer.java

public static String getEncoding(AudioFormat format) throws IllegalArgumentException {
    if (format.getFrameRate() != 8000 && format.getFrameRate() != 16000)
        throw new IllegalArgumentException("Can only process 8khz or 16khz");
    if (format.isBigEndian())
        throw new IllegalArgumentException("Can only process little-endian");
    if (format.getChannels() != 1)
        throw new IllegalArgumentException("Can only process mono sound");
    if (format.getEncoding() == Encoding.ULAW)
        return "audio/basic;rate=8000";
    else if (format.getEncoding() == Encoding.PCM_SIGNED) {
        if (format.getFrameSize() != 2)
            throw new IllegalArgumentException("Can only process 16 bit PCM sound");
        return "audio/L16;rate=8000";
    } else//from w  w  w.  ja  v a  2  s  . com
        throw new IllegalArgumentException("Bad audio encoding: " + format.getEncoding());
}

From source file:edu.tsinghua.lumaqq.Sounder.java

/**
 * //from   w  ww . j a v  a2 s  . c  o  m
 * @param filename
 * @return
 */
private boolean loadSound(String filename) {
    // ??
    File file = new File(filename);
    try {
        currentSound = AudioSystem.getAudioInputStream(file);
    } catch (Exception e) {
        try {
            FileInputStream is = new FileInputStream(file);
            currentSound = new BufferedInputStream(is, 1024);
        } catch (Exception ex) {
            log.error(ex.getMessage());
            currentSound = null;
            return false;
        }
    }

    // ??????
    if (currentSound instanceof AudioInputStream) {
        try {
            AudioInputStream stream = (AudioInputStream) currentSound;
            AudioFormat format = stream.getFormat();

            // ?? ALAW/ULAW ?  ALAW/ULAW ?? PCM                
            if ((format.getEncoding() == AudioFormat.Encoding.ULAW)
                    || (format.getEncoding() == AudioFormat.Encoding.ALAW)) {
                AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                        format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                        format.getFrameRate(), true);
                stream = AudioSystem.getAudioInputStream(tmp, stream);
                format = tmp;
            }
            DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(),
                    ((int) stream.getFrameLength() * format.getFrameSize()));

            Clip clip = (Clip) AudioSystem.getLine(info);
            clip.open(stream);
            currentSound = clip;
        } catch (Exception ex) {
            log.error(ex.getMessage());
            currentSound = null;
            return false;
        }
    } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) {
        try {
            sequencer.open();
            if (currentSound instanceof Sequence) {
                sequencer.setSequence((Sequence) currentSound);
            } else {
                sequencer.setSequence((BufferedInputStream) currentSound);
            }
            log.trace("Sequence Created");
        } catch (InvalidMidiDataException imde) {
            log.error("???");
            currentSound = null;
            return false;
        } catch (Exception ex) {
            log.error(ex.getMessage());
            currentSound = null;
            return false;
        }
    }

    return true;
}

From source file:SimpleSoundPlayer.java

public boolean loadSound(Object object) {
    duration = 0.0;/*from   w ww .  j a v  a2 s . c  o m*/

    currentName = ((File) object).getName();
    try {
        currentSound = AudioSystem.getAudioInputStream((File) object);
    } catch (Exception e1) {
        try {
            FileInputStream is = new FileInputStream((File) object);
            currentSound = new BufferedInputStream(is, 1024);
        } catch (Exception e3) {
            e3.printStackTrace();
            currentSound = null;
            return false;
        }
        // }
    }

    // user pressed stop or changed tabs while loading
    if (sequencer == null) {
        currentSound = null;
        return false;
    }

    if (currentSound instanceof AudioInputStream) {
        try {
            AudioInputStream stream = (AudioInputStream) currentSound;
            AudioFormat format = stream.getFormat();

            /**
             * we can't yet open the device for ALAW/ULAW playback, convert
             * ALAW/ULAW to PCM
             */

            if ((format.getEncoding() == AudioFormat.Encoding.ULAW)
                    || (format.getEncoding() == AudioFormat.Encoding.ALAW)) {
                AudioFormat tmp = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, format.getSampleRate(),
                        format.getSampleSizeInBits() * 2, format.getChannels(), format.getFrameSize() * 2,
                        format.getFrameRate(), true);
                stream = AudioSystem.getAudioInputStream(tmp, stream);
                format = tmp;
            }
            DataLine.Info info = new DataLine.Info(Clip.class, stream.getFormat(),
                    ((int) stream.getFrameLength() * format.getFrameSize()));

            Clip clip = (Clip) AudioSystem.getLine(info);
            clip.addLineListener(this);
            clip.open(stream);
            currentSound = clip;
            // seekSlider.setMaximum((int) stream.getFrameLength());
        } catch (Exception ex) {
            ex.printStackTrace();
            currentSound = null;
            return false;
        }
    } else if (currentSound instanceof Sequence || currentSound instanceof BufferedInputStream) {
        try {
            sequencer.open();
            if (currentSound instanceof Sequence) {
                sequencer.setSequence((Sequence) currentSound);
            } else {
                sequencer.setSequence((BufferedInputStream) currentSound);
            }

        } catch (InvalidMidiDataException imde) {
            System.out.println("Unsupported audio file.");
            currentSound = null;
            return false;
        } catch (Exception ex) {
            ex.printStackTrace();
            currentSound = null;
            return false;
        }
    }

    duration = getDuration();

    return true;
}

From source file:BasicPlayer.java

/**
 * Inits a DateLine.<br>//w w  w. ja va 2 s  . c o  m
 *
 * We check if the line supports Gain and Pan controls.
 *
 * From the AudioInputStream, i.e. from the sound file, we
 * fetch information about the format of the audio data. These
 * information include the sampling frequency, the number of
 * channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line
 * for this audio file.
 * Furthermore, we have to give JavaSound a hint about how
 * big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't
 * care about the exact size. JavaSound will use some default
 * value for the buffer size.
 */
protected void createLine() throws LineUnavailableException {
    log.info("Create Line");
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        log.info("Create Line : Source format : " + sourceFormat.toString());
        int nSampleSizeInBits = sourceFormat.getSampleSizeInBits();
        if (nSampleSizeInBits <= 0)
            nSampleSizeInBits = 16;
        if ((sourceFormat.getEncoding() == AudioFormat.Encoding.ULAW)
                || (sourceFormat.getEncoding() == AudioFormat.Encoding.ALAW))
            nSampleSizeInBits = 16;
        if (nSampleSizeInBits != 8)
            nSampleSizeInBits = 16;
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), nSampleSizeInBits, sourceFormat.getChannels(),
                sourceFormat.getChannels() * (nSampleSizeInBits / 8), sourceFormat.getSampleRate(), false);
        log.info("Create Line : Target format: " + targetFormat);
        // Keep a reference on encoded stream to progress notification.
        m_encodedaudioInputStream = m_audioInputStream;
        try {
            // Get total length in bytes of the encoded stream.
            encodedLength = m_encodedaudioInputStream.available();
        } catch (IOException e) {
            log.error("Cannot get m_encodedaudioInputStream.available()", e);
        }
        // Create decoded stream.
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        Mixer mixer = getMixer(m_mixerName);
        if (mixer != null) {
            log.info("Mixer : " + mixer.getMixerInfo().toString());
            m_line = (SourceDataLine) mixer.getLine(info);
        } else {
            m_line = (SourceDataLine) AudioSystem.getLine(info);
            m_mixerName = null;
        }
        log.info("Line : " + m_line.toString());
        log.debug("Line Info : " + m_line.getLineInfo().toString());
        log.debug("Line AudioFormat: " + m_line.getFormat().toString());
    }
}

From source file:org.apache.tika.parser.audio.AudioParser.java

public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context)
        throws IOException, SAXException, TikaException {
    // AudioSystem expects the stream to support the mark feature
    if (!stream.markSupported()) {
        stream = new BufferedInputStream(stream);
    }/*from   www  . jav a 2  s.co  m*/
    stream = new SkipFullyInputStream(stream);
    try {
        AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(stream);
        Type type = fileFormat.getType();
        if (type == Type.AIFC || type == Type.AIFF) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/x-aiff");
        } else if (type == Type.AU || type == Type.SND) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/basic");
        } else if (type == Type.WAVE) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/vnd.wave");
        }

        AudioFormat audioFormat = fileFormat.getFormat();
        int channels = audioFormat.getChannels();
        if (channels != AudioSystem.NOT_SPECIFIED) {
            metadata.set("channels", String.valueOf(channels));
            // TODO: Use XMPDM.TRACKS? (see also frame rate in AudioFormat)
        }
        float rate = audioFormat.getSampleRate();
        if (rate != AudioSystem.NOT_SPECIFIED) {
            metadata.set("samplerate", String.valueOf(rate));
            metadata.set(XMPDM.AUDIO_SAMPLE_RATE, Integer.toString((int) rate));
        }
        int bits = audioFormat.getSampleSizeInBits();
        if (bits != AudioSystem.NOT_SPECIFIED) {
            metadata.set("bits", String.valueOf(bits));
            if (bits == 8) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "8Int");
            } else if (bits == 16) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "16Int");
            } else if (bits == 32) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "32Int");
            }
        }
        metadata.set("encoding", audioFormat.getEncoding().toString());

        // Javadoc suggests that some of the following properties might
        // be available, but I had no success in finding any:

        // "duration" Long playback duration of the file in microseconds
        // "author" String name of the author of this file
        // "title" String title of this file
        // "copyright" String copyright message
        // "date" Date date of the recording or release
        // "comment" String an arbitrary text

        addMetadata(metadata, fileFormat.properties());
        addMetadata(metadata, audioFormat.properties());
    } catch (UnsupportedAudioFileException e) {
        // There is no way to know whether this exception was
        // caused by the document being corrupted or by the format
        // just being unsupported. So we do nothing.
    }

    XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
    xhtml.startDocument();
    xhtml.endDocument();
}