List of usage examples for javax.sound.sampled AudioInputStream AudioInputStream
public AudioInputStream(InputStream stream, AudioFormat format, long length)
From source file:Main.java
public static void main(String args[]) throws Exception { final ByteArrayOutputStream out = new ByteArrayOutputStream(); float sampleRate = 8000; int sampleSizeInBits = 8; int channels = 1; boolean signed = true; boolean bigEndian = true; final AudioFormat format = new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian); DataLine.Info info = new DataLine.Info(TargetDataLine.class, format); final TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info); line.open(format);//from w ww.ja v a 2 s. c o m line.start(); Runnable runner = new Runnable() { int bufferSize = (int) format.getSampleRate() * format.getFrameSize(); byte buffer[] = new byte[bufferSize]; public void run() { try { int count = line.read(buffer, 0, buffer.length); if (count > 0) { out.write(buffer, 0, count); } out.close(); } catch (IOException e) { System.err.println("I/O problems: " + e); System.exit(-1); } } }; Thread captureThread = new Thread(runner); captureThread.start(); byte audio[] = out.toByteArray(); InputStream input = new ByteArrayInputStream(audio); final SourceDataLine line1 = (SourceDataLine) AudioSystem.getLine(info); final AudioInputStream ais = new AudioInputStream(input, format, audio.length / format.getFrameSize()); line1.open(format); line1.start(); runner = new Runnable() { int bufferSize = (int) format.getSampleRate() * format.getFrameSize(); byte buffer[] = new byte[bufferSize]; public void run() { try { int count; while ((count = ais.read(buffer, 0, buffer.length)) != -1) { if (count > 0) { line1.write(buffer, 0, count); } } line1.drain(); line1.close(); } catch (IOException e) { System.err.println("I/O problems: " + e); System.exit(-3); } } }; Thread playThread = new Thread(runner); playThread.start(); }
From source file:org.sipfoundry.voicemail.EmailFormatterTest.java
private void makeWaves(File wavFile, byte filler, int length) throws IOException { byte[] fill = new byte[length]; for (int i = 0; i < length; i++) { fill[i] = filler;//from w ww. j a va 2 s . c om } AudioInputStream ais = new AudioInputStream(new ByteArrayInputStream(fill), new AudioFormat(8000, 16, 1, true, false), fill.length); AudioSystem.write(ais, AudioFileFormat.Type.WAVE, wavFile); }
From source file:com.gameminers.mav.firstrun.TeachSphinxThread.java
@Override public void run() { try {//from w w w . j av a 2s . c om File training = new File(Mav.configDir, "training-data"); training.mkdirs(); while (Mav.silentFrames < 30) { sleep(100); } Mav.listening = true; InputStream prompts = ClassLoader.getSystemResourceAsStream("resources/sphinx/train/arcticAll.prompts"); List<String> arctic = IOUtils.readLines(prompts); IOUtils.closeQuietly(prompts); Mav.audioManager.playClip("listen1"); byte[] buf = new byte[2048]; int start = 0; int end = 21; AudioInputStream in = Mav.audioManager.getSource().getAudioInputStream(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); while (true) { for (int i = start; i < end; i++) { baos.reset(); String prompt = arctic.get(i); RenderState.setText("\u00A7LRead this aloud:\n" + Fonts.wrapStringToFit( prompt.substring(prompt.indexOf(':') + 1), Fonts.base[1], Display.getWidth())); File file = new File(training, prompt.substring(0, prompt.indexOf(':')) + ".wav"); file.createNewFile(); int read = 0; while (Mav.silentListenFrames > 0) { read = Mav.audioManager.getSource().getAudioInputStream().read(buf); } baos.write(buf, 0, read); while (Mav.silentListenFrames < 60) { in.read(buf); if (read == -1) { RenderState.setText( "\u00A7LAn error occurred\nUnexpected end of stream\nPlease restart Mav"); RenderState.targetHue = 0; return; } baos.write(buf, 0, read); } AudioSystem.write(new AudioInputStream(new ByteArrayInputStream(baos.toByteArray()), in.getFormat(), baos.size() / 2), AudioFileFormat.Type.WAVE, file); Mav.audioManager.playClip("notif2"); } Mav.ttsInterface.say(Mav.phoneticUserName + ", that should be enough for now. Do you want to keep training anyway?"); RenderState.setText("\u00A7LOkay, " + Mav.userName + "\nI think that should be\nenough. Do you want to\nkeep training anyway?\n\u00A7s(Say 'Yes' or 'No' out loud)"); break; //start = end+1; //end += 20; } } catch (Exception e) { e.printStackTrace(); } }
From source file:it.sardegnaricerche.voiceid.sr.VCluster.java
public void trimSegments(File inputFile) throws IOException { String base = Utils.getBasename(inputFile); File mydir = new File(base); mydir.mkdirs();// ww w .j a va 2s .c o m String mywav = mydir.getAbsolutePath() + "/" + this.getLabel() + ".wav"; AudioFileFormat fileFormat = null; AudioInputStream inputStream = null; AudioInputStream shortenedStream = null; AudioInputStream current = null; int bytesPerSecond = 0; long framesOfAudioToCopy = 0; wavFile = new File(mywav); try { fileFormat = AudioSystem.getAudioFileFormat(inputFile); AudioFormat format = fileFormat.getFormat(); boolean firstTime = true; for (VSegment s : this.getSegments()) { bytesPerSecond = format.getFrameSize() * (int) format.getFrameRate(); inputStream = AudioSystem.getAudioInputStream(inputFile); inputStream.skip(0); inputStream.skip((int) (s.getStart() * 100) * bytesPerSecond / 100); framesOfAudioToCopy = (int) (s.getDuration() * 100) * (int) format.getFrameRate() / 100; if (firstTime) { shortenedStream = new AudioInputStream(inputStream, format, framesOfAudioToCopy); } else { current = new AudioInputStream(inputStream, format, framesOfAudioToCopy); shortenedStream = new AudioInputStream(new SequenceInputStream(shortenedStream, current), format, shortenedStream.getFrameLength() + framesOfAudioToCopy); } firstTime = false; } AudioSystem.write(shortenedStream, fileFormat.getType(), wavFile); } catch (Exception e) { logger.severe(e.getMessage()); e.printStackTrace(); } finally { if (inputStream != null) try { inputStream.close(); } catch (Exception e) { logger.severe(e.getMessage()); } if (shortenedStream != null) try { shortenedStream.close(); } catch (Exception e) { logger.severe(e.getMessage()); } if (current != null) try { current.close(); } catch (Exception e) { logger.severe(e.getMessage()); } } logger.fine("filename: " + wavFile.getAbsolutePath()); }
From source file:be.tarsos.transcoder.ffmpeg.FFMPEGExecutor.java
public AudioInputStream pipe(Attributes attributes) throws EncoderException { String pipeEnvironment;/*from w ww . j a v a 2 s. c om*/ String pipeArgument; File pipeLogFile; int pipeBuffer; if (System.getProperty("os.name").indexOf("indows") > 0) { pipeEnvironment = "cmd.exe"; pipeArgument = "/C"; } else { pipeEnvironment = "/bin/bash"; pipeArgument = "-c"; } pipeLogFile = new File("decoder_log.txt"); //buffer 1/4 second of audio. pipeBuffer = attributes.getSamplingRate() / 4; AudioFormat audioFormat = Encoder.getTargetAudioFormat(attributes); String command = toString(); ProcessBuilder pb = new ProcessBuilder(pipeEnvironment, pipeArgument, command); pb.redirectError(Redirect.appendTo(pipeLogFile)); LOG.fine("Starting piped decoding process"); final Process process; try { process = pb.start(); } catch (IOException e1) { throw new EncoderException("Problem starting piped sub process: " + e1.getMessage()); } InputStream stdOut = new BufferedInputStream(process.getInputStream(), pipeBuffer); //read and ignore the 46 byte wav header, only pipe the pcm samples to the audioinputstream byte[] header = new byte[46]; double sleepSeconds = 0; double timeoutLimit = 20; //seconds try { while (stdOut.available() < header.length) { try { Thread.sleep(100); sleepSeconds += 0.1; } catch (InterruptedException e) { e.printStackTrace(); } if (sleepSeconds > timeoutLimit) { throw new Error("Could not read from pipe within " + timeoutLimit + " seconds: timeout!"); } } int bytesRead = stdOut.read(header); if (bytesRead != header.length) { throw new EncoderException( "Could not read complete WAV-header from pipe. This could result in mis-aligned frames!"); } } catch (IOException e1) { throw new EncoderException("Problem reading from piped sub process: " + e1.getMessage()); } final AudioInputStream audioStream = new AudioInputStream(stdOut, audioFormat, AudioSystem.NOT_SPECIFIED); //This thread waits for the end of the subprocess. new Thread(new Runnable() { public void run() { try { process.waitFor(); LOG.fine("Finished piped decoding process"); } catch (InterruptedException e) { LOG.severe("Interrupted while waiting for sub process exit."); e.printStackTrace(); } } }, "Decoding Pipe Reader").start(); return audioStream; }
From source file:edu.mit.csail.sls.wami.relay.WamiRelay.java
/** * This delegates recognition to the {@link IRecognizer} associated with * this relay, providing the appropriate callbacks * /*from w w w . j av a2 s.c o m*/ * @param audioIn * The audio input stream to recognizer * @throws RecognizerException * On recognition error * @throws IOException * on error reading from the audioIn stream */ public void recognize(AudioInputStream audioIn) throws RecognizerException, IOException { final ByteArrayOutputStream audioByteStream = new ByteArrayOutputStream(); final AudioFormat audioFormat = audioIn.getFormat(); TeeInputStream tee = new TeeInputStream(audioIn, audioByteStream, true); AudioInputStream forkedStream = new AudioInputStream(tee, audioIn.getFormat(), AudioSystem.NOT_SPECIFIED); if (recognizer == null) { throw new RecognizerException("No recognizer specified!"); } else if (wamiApp == null) { throw new RecognizerException("No wami app specified!"); } recognizer.recognize(forkedStream, new IRecognitionListener() { private long startedTimestamp; public void onRecognitionResult(final IRecognitionResult result) { // if the result is final, then before we delegate it // we switch over our audio stream so that // getLastRecordedAudio() works properly inside of // on RecognitionResult long timestampMillis = System.currentTimeMillis(); if (!result.isIncremental()) { try { audioByteStream.close(); } catch (IOException e) { e.printStackTrace(); // shouldn't occur } synchronized (lastAudioLock) { lastAudioBytes = audioByteStream.toByteArray(); lastAudioFormat = audioFormat; } } wamiApp.onRecognitionResult(result); logEvent(result, timestampMillis); if (!result.isIncremental()) { logUtterance(audioByteStream.toByteArray(), audioFormat, startedTimestamp); } } public void onRecognitionStarted() { startedTimestamp = System.currentTimeMillis(); logEvent(new RecognitionStartedLogEvent(), startedTimestamp); wamiApp.onRecognitionStarted(); } }); }
From source file:org.sipfoundry.voicemail.mailbox.AbstractMailboxManager.java
protected void concatAudio(File newFile, File orig1, File orig2) throws Exception { String operation = "dunno"; AudioInputStream clip1 = null; AudioInputStream clip2 = null; AudioInputStream concatStream = null; try {// w ww. ja v a 2 s .co m operation = "getting AudioInputStream from " + orig1.getPath(); clip1 = AudioSystem.getAudioInputStream(orig1); operation = "getting AudioInputStream from " + orig2.getPath(); clip2 = AudioSystem.getAudioInputStream(orig2); operation = "building SequnceInputStream"; concatStream = new AudioInputStream(new SequenceInputStream(clip1, clip2), clip1.getFormat(), clip1.getFrameLength() + clip2.getFrameLength()); operation = "writing SequnceInputStream to " + newFile.getPath(); AudioSystem.write(concatStream, AudioFileFormat.Type.WAVE, newFile); LOG.info("VmMessage::concatAudio created combined file " + newFile.getPath()); } catch (Exception e) { String trouble = "VmMessage::concatAudio Problem while " + operation; throw new Exception(trouble, e); } finally { IOUtils.closeQuietly(clip1); IOUtils.closeQuietly(clip2); IOUtils.closeQuietly(concatStream); } }
From source file:org.sipfoundry.voicemail.VmMessage.java
/** * Combine two wav files into one bigger one * //from w w w . j a va 2 s.c o m * @param newFile * @param orig1 * @param orig2 * @throws Exception */ static void concatAudio(File newFile, File orig1, File orig2) throws Exception { String operation = "dunno"; try { operation = "getting AudioInputStream from " + orig1.getPath(); AudioInputStream clip1 = AudioSystem.getAudioInputStream(orig1); operation = "getting AudioInputStream from " + orig2.getPath(); AudioInputStream clip2 = AudioSystem.getAudioInputStream(orig2); operation = "building SequnceInputStream"; AudioInputStream concatStream = new AudioInputStream(new SequenceInputStream(clip1, clip2), clip1.getFormat(), clip1.getFrameLength() + clip2.getFrameLength()); operation = "writing SequnceInputStream to " + newFile.getPath(); AudioSystem.write(concatStream, AudioFileFormat.Type.WAVE, newFile); LOG.info("VmMessage::concatAudio created combined file " + newFile.getPath()); } catch (Exception e) { String trouble = "VmMessage::concatAudio Problem while " + operation; // LOG.error(trouble, e); throw new Exception(trouble, e); } }