Example usage for android.media AudioTrack AudioTrack

List of usage examples for android.media AudioTrack AudioTrack

Introduction

In this page you can find the example usage for android.media AudioTrack AudioTrack.

Prototype

private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode,
            int sessionId, boolean offload) throws IllegalArgumentException 

Source Link

Usage

From source file:Main.java

public static AudioTrack createTrack(int samplingRate) {
    AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, samplingRate, AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_DEFAULT, samplingRate, AudioTrack.MODE_STREAM);
    return track;
}

From source file:zlyh.dmitry.recaller.threading.PlayBlockThread.java

@Override
public void run() {
    AudioTrack audioTrack = null;//from  w  w  w  . java 2  s  .c om
    FileInputStream in = null;

    try {
        File rawpcm = new File(path);
        if (!rawpcm.exists()) {
            this.interrupt();
        }

        togglePlaying(true);

        final int audioLength = (int) rawpcm.length();
        final int minBufferSize = AudioRecord.getMinBufferSize(RecordRunnable.frequency,
                AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
        audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, RecordRunnable.frequency,
                AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, minBufferSize,
                AudioTrack.MODE_STREAM);

        final int block = 256 * 1024;
        byte[] byteData = new byte[block];

        try {
            in = new FileInputStream(rawpcm);
        } catch (FileNotFoundException e) {
            e.printStackTrace();
            this.interrupt();
        }

        if (in != null) {
            try {
                int bytesread = 0;
                int offset;
                audioTrack.play();
                while (bytesread < audioLength && !isInterrupted()) {
                    offset = in.read(byteData, 0, block);
                    if (offset != -1) {
                        audioTrack.write(byteData, 0, offset);
                        bytesread += offset;
                    } else {
                        break;
                    }
                }
                in.close();

                togglePlaying(false);

                if (audioTrack.getState() == AudioTrack.PLAYSTATE_PLAYING) {
                    audioTrack.stop();
                }

                if (audioTrack.getState() == AudioTrack.STATE_INITIALIZED) {
                    audioTrack.release();
                }
            } catch (Exception e) {
                e.printStackTrace();
                try {
                    in.close();
                } catch (IOException e1) {
                    e1.printStackTrace();
                }

                if (audioTrack.getState() == AudioTrack.PLAYSTATE_PLAYING) {
                    audioTrack.stop();
                }
                if (audioTrack.getState() == AudioTrack.STATE_INITIALIZED) {
                    audioTrack.release();
                }
                togglePlaying(false);

            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        if (audioTrack != null) {
            if (audioTrack.getState() == AudioTrack.PLAYSTATE_PLAYING) {
                audioTrack.stop();
            }
            if (audioTrack.getState() == AudioTrack.STATE_INITIALIZED) {
                audioTrack.release();
            }
        }

        if (in != null) {
            try {
                in.close();
            } catch (IOException e1) {
                e1.printStackTrace();
            }
        }
        togglePlaying(false);

    }

}

From source file:com.ibm.watson.developer_cloud.android.text_to_speech.v1.TTSUtility.java

private void initPlayer() {
    stopTtsPlayer();/*w  w w  .  j  a  v  a 2s  .c o  m*/
    // IMPORTANT: minimum required buffer size for the successful creation of an AudioTrack instance in streaming mode.
    int bufferSize = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT);

    synchronized (this) {
        audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
                AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
        if (audioTrack != null)
            audioTrack.play();
    }
}

From source file:net.sf.asap.Player.java

public void run() {
    int config = info.getChannels() == 1 ? AudioFormat.CHANNEL_CONFIGURATION_MONO
            : AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    int len = AudioTrack.getMinBufferSize(ASAP.SAMPLE_RATE, config, AudioFormat.ENCODING_PCM_8BIT);
    if (len < 16384)
        len = 16384;//  w w  w  .  j  a  v  a 2  s  .  c o  m
    final byte[] buffer = new byte[len];
    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, ASAP.SAMPLE_RATE, config,
            AudioFormat.ENCODING_PCM_8BIT, len, AudioTrack.MODE_STREAM);
    audioTrack.play();

    for (;;) {
        synchronized (this) {
            if (len < buffer.length || isPaused()) {
                try {
                    wait();
                } catch (InterruptedException ex) {
                }
            }
            if (stop) {
                audioTrack.stop();
                return;
            }
        }
        synchronized (asap) {
            len = asap.generate(buffer, buffer.length, ASAPSampleFormat.U8);
        }
        audioTrack.write(buffer, 0, len);
    }
}

From source file:com.example.rttytranslator.Dsp_service.java

public void startAudio() {
    if (!_enableDecoder)
        return;//from   w w  w .  j a va 2s .  c  om

    //boolean mic = this.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);

    System.out.println("isRecording: " + isRecording);

    if (!isRecording) {
        isRecording = true;

        buffsize = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT);
        buffsize = Math.max(buffsize, 3000);

        mRecorder = new AudioRecord(AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT, buffsize);

        mPlayer = new AudioTrack(AudioManager.STREAM_MUSIC, 8000, AudioFormat.CHANNEL_OUT_MONO,
                AudioFormat.ENCODING_PCM_16BIT, 2 * buffsize, AudioTrack.MODE_STREAM);

        if (enableEcho) {
            AudioManager manager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
            manager.setMode(AudioManager.MODE_IN_CALL);
            manager.setSpeakerphoneOn(true);
        }

        if (mRecorder.getState() != AudioRecord.STATE_INITIALIZED) {

            mRecorder = new AudioRecord(AudioSource.DEFAULT, 8000, AudioFormat.CHANNEL_IN_MONO,
                    AudioFormat.ENCODING_PCM_16BIT, buffsize);

        }

        mRecorder.startRecording();
        System.out.println("STARTING THREAD");
        Thread ct = new captureThread();

        ct.start();
    }
}

From source file:com.tt.engtrain.showcontent.ContentListItemActivity.java

@Override
protected void onCreate(final Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_baselist);
    mSpeechEvaluator = SpeechEvaluator.createEvaluator(ContentListItemActivity.this, null);
    mToast = Toast.makeText(this, "", Toast.LENGTH_SHORT);
    int iMinBufSize = AudioTrack.getMinBufferSize(8000, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
            AudioFormat.ENCODING_PCM_16BIT);
    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 8000, AudioFormat.CHANNEL_CONFIGURATION_STEREO,
            AudioFormat.ENCODING_PCM_16BIT, iMinBufSize, AudioTrack.MODE_STREAM);
    mTts = SpeechSynthesizer.createSynthesizer(this, mTtsInitListener);
    initData();/*  www .j av  a  2  s  . c o  m*/
    initListView();
    // initspker();

}

From source file:net.reichholf.dreamdroid.fragment.SignalFragment.java

void playSound(double freqOfTone) {
    double duration = 0.1; // seconds
    int sampleRate = 8000; // a number

    double dnumSamples = duration * sampleRate;
    dnumSamples = Math.ceil(dnumSamples);
    int numSamples = (int) dnumSamples;
    double sample[] = new double[numSamples];
    byte generatedSnd[] = new byte[2 * numSamples];

    for (int i = 0; i < numSamples; ++i) { // Fill the sample array
        sample[i] = Math.sin(freqOfTone * 2 * Math.PI * i / (sampleRate));
    }//  w ww. j a v  a2 s.  co m

    // convert to 16 bit pcm sound array
    // assumes the sample buffer is normalized.
    int idx = 0;
    int i = 0;

    int ramp = numSamples / 20; // Amplitude ramp as a percent of sample
    // count

    for (i = 0; i < numSamples; ++i) { // Ramp amplitude up (to avoid
        // clicks)
        if (i < ramp) {
            double dVal = sample[i];
            // Ramp up to maximum
            final short val = (short) ((dVal * 32767 * i / ramp));
            // in 16 bit wav PCM, first byte is the low order byte
            generatedSnd[idx++] = (byte) (val & 0x00ff);
            generatedSnd[idx++] = (byte) ((val & 0xff00) >>> 8);
        } else if (i < numSamples - ramp) {
            // Max amplitude for most of the samples
            double dVal = sample[i];
            // scale to maximum amplitude
            final short val = (short) ((dVal * 32767));
            // in 16 bit wav PCM, first byte is the low order byte
            generatedSnd[idx++] = (byte) (val & 0x00ff);
            generatedSnd[idx++] = (byte) ((val & 0xff00) >>> 8);
        } else {
            double dVal = sample[i];
            // Ramp down to zero
            final short val = (short) ((dVal * 32767 * (numSamples - i) / ramp));
            // in 16 bit wav PCM, first byte is the low order byte
            generatedSnd[idx++] = (byte) (val & 0x00ff);
            generatedSnd[idx++] = (byte) ((val & 0xff00) >>> 8);
        }
    }

    AudioTrack audioTrack = null; // Get audio track
    try {
        audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
                AudioFormat.ENCODING_PCM_16BIT, (int) numSamples * 2, AudioTrack.MODE_STATIC);
        // Load the track
        audioTrack.write(generatedSnd, 0, generatedSnd.length);
        audioTrack.play(); // Play the track
    } catch (Exception e) {
    }

    int x = 0;
    do { // Montior playback to find when done
        if (audioTrack != null)
            x = audioTrack.getPlaybackHeadPosition();
        else
            x = numSamples;
    } while (x < numSamples);

    if (audioTrack != null)
        audioTrack.release(); // Track play done. Release track.
}

From source file:uk.co.armedpineapple.cth.SDLActivity.java

public static Object audioInit(int sampleRate, boolean is16Bit, boolean isStereo, int desiredFrames) {
    int channelConfig = isStereo ? AudioFormat.CHANNEL_OUT_STEREO : AudioFormat.CHANNEL_OUT_MONO;
    int audioFormat = is16Bit ? AudioFormat.ENCODING_PCM_16BIT : AudioFormat.ENCODING_PCM_8BIT;
    int frameSize = (isStereo ? 2 : 1) * (is16Bit ? 2 : 1);

    Log.v("SDL", "SDL audio: wanted " + (isStereo ? "stereo" : "mono") + " " + (is16Bit ? "16-bit" : "8-bit")
            + " " + (sampleRate / 1000f) + "kHz, " + desiredFrames + " frames buffer");

    // Let the user pick a larger buffer if they really want -- but ye
    // gods they probably shouldn't, the minimums are horrifyingly high
    // latency already
    desiredFrames = Math.max(desiredFrames,
            (AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat) + frameSize - 1) / frameSize);

    mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig, audioFormat,
            desiredFrames * frameSize, AudioTrack.MODE_STREAM);

    audioStartThread();/*  w w w  . jav a  2s . c  o  m*/

    Log.v("SDL",
            "SDL audio: got " + ((mAudioTrack.getChannelCount() >= 2) ? "stereo" : "mono") + " "
                    + ((mAudioTrack.getAudioFormat() == AudioFormat.ENCODING_PCM_16BIT) ? "16-bit" : "8-bit")
                    + " " + (mAudioTrack.getSampleRate() / 1000f) + "kHz, " + desiredFrames + " frames buffer");

    if (is16Bit) {
        audioBuffer = new short[desiredFrames * (isStereo ? 2 : 1)];
    } else {
        audioBuffer = new byte[desiredFrames * (isStereo ? 2 : 1)];
    }
    return audioBuffer;
}

From source file:net.sf.asap.PlayerService.java

public void run() {
    // read file//from   w  ww.  j  a  va2 s  . c o m
    String filename = uri.getPath();
    byte[] module = new byte[ASAPInfo.MAX_MODULE_LENGTH];
    int moduleLen;
    try {
        InputStream is;
        switch (uri.getScheme()) {
        case "file":
            if (Util.isZip(filename)) {
                String zipFilename = filename;
                filename = uri.getFragment();
                is = new ZipInputStream(zipFilename, filename);
            } else
                is = new FileInputStream(filename);
            break;
        case "http":
            is = httpGet(uri);
            break;
        default:
            throw new FileNotFoundException(uri.toString());
        }
        moduleLen = Util.readAndClose(is, module);
    } catch (IOException ex) {
        showError(R.string.error_reading_file);
        return;
    }

    // load file
    try {
        asap.load(filename, module, moduleLen);
        info = asap.getInfo();
        switch (song) {
        case SONG_DEFAULT:
            song = info.getDefaultSong();
            break;
        case SONG_LAST:
            song = info.getSongs() - 1;
            break;
        default:
            break;
        }
        playSong();
    } catch (Exception ex) {
        showError(R.string.invalid_file);
        return;
    }

    PendingIntent contentIntent = PendingIntent.getActivity(this, 0, new Intent(this, Player.class), 0);
    String title = info.getTitleOrFilename();
    Notification notification = new Notification(R.drawable.icon, title, System.currentTimeMillis());
    notification.flags |= Notification.FLAG_ONGOING_EVENT;
    notification.setLatestEventInfo(this, title, info.getAuthor(), contentIntent);
    startForegroundCompat(NOTIFICATION_ID, notification);

    // playback
    int channelConfig = info.getChannels() == 1 ? AudioFormat.CHANNEL_CONFIGURATION_MONO
            : AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    int bufferLen = AudioTrack.getMinBufferSize(ASAP.SAMPLE_RATE, channelConfig,
            AudioFormat.ENCODING_PCM_16BIT) >> 1;
    if (bufferLen < 16384)
        bufferLen = 16384;
    final byte[] byteBuffer = new byte[bufferLen << 1];
    final short[] shortBuffer = new short[bufferLen];
    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, ASAP.SAMPLE_RATE, channelConfig,
            AudioFormat.ENCODING_PCM_16BIT, bufferLen << 1, AudioTrack.MODE_STREAM);
    audioTrack.play();

    for (;;) {
        synchronized (this) {
            if (bufferLen < shortBuffer.length || isPaused()) {
                try {
                    wait();
                } catch (InterruptedException ex) {
                }
            }
            if (stop) {
                audioTrack.stop();
                return;
            }
        }
        synchronized (asap) {
            int pos = seekPosition;
            if (pos >= 0) {
                seekPosition = -1;
                try {
                    asap.seek(pos);
                } catch (Exception ex) {
                }
            }
            bufferLen = asap.generate(byteBuffer, byteBuffer.length, ASAPSampleFormat.S16_L_E) >> 1;
        }
        for (int i = 0; i < bufferLen; i++)
            shortBuffer[i] = (short) ((byteBuffer[i << 1] & 0xff) | byteBuffer[i << 1 | 1] << 8);
        audioTrack.write(shortBuffer, 0, bufferLen);
    }
}

From source file:com.brejza.matt.habmodem.Dsp_service.java

public void startAudio() {
    if (!_enableDecoder)
        return;/*from w w w.  ja  v a2  s. com*/

    boolean mic = this.getPackageManager().hasSystemFeature(PackageManager.FEATURE_MICROPHONE);

    System.out.println("isRecording: " + isRecording);
    logEvent("Starting Audio. Mic avaliable: " + mic, false);
    if (!isRecording) {
        isRecording = true;

        buffsize = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT);
        buffsize = Math.max(buffsize, 3000);

        mRecorder = new AudioRecord(AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT, buffsize);

        mPlayer = new AudioTrack(AudioManager.STREAM_MUSIC, 8000, AudioFormat.CHANNEL_OUT_MONO,
                AudioFormat.ENCODING_PCM_16BIT, 2 * buffsize, AudioTrack.MODE_STREAM);

        if (enableEcho) {
            AudioManager manager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
            manager.setMode(AudioManager.MODE_IN_CALL);
            manager.setSpeakerphoneOn(true);
        }

        if (mRecorder.getState() != AudioRecord.STATE_INITIALIZED) {

            mRecorder = new AudioRecord(AudioSource.DEFAULT, 8000, AudioFormat.CHANNEL_IN_MONO,
                    AudioFormat.ENCODING_PCM_16BIT, buffsize);

            if (mRecorder.getState() != AudioRecord.STATE_INITIALIZED) {
                logEvent("Error - Could not initialise audio", true);
                return;
            }
            logEvent("Using default audio source", false);
        }

        mRecorder.startRecording();
        System.out.println("STARTING THREAD");
        Thread ct = new captureThread();
        logEvent("Starting Audio Thread.", false);
        setDecoderRunningNotification();
        ct.start();
    }
}