Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue() 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE .

Usage

From source file:com.clickha.nifi.processors.FetchFileTransferV2.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();//ww  w . j a va  2s. c om
    if (flowFile == null) {
        return;
    }

    final StopWatch stopWatch = new StopWatch(true);
    final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
    final int port = context.getProperty(UNDEFAULTED_PORT).evaluateAttributeExpressions(flowFile).asInteger();
    final String filename = context.getProperty(REMOTE_FILENAME).evaluateAttributeExpressions(flowFile)
            .getValue();

    // Try to get a FileTransfer object from our cache.
    BlockingQueue<FileTransferIdleWrapper> transferQueue;
    synchronized (fileTransferMap) {
        final Tuple<String, Integer> tuple = new Tuple<>(host, port);

        transferQueue = fileTransferMap.get(tuple);
        if (transferQueue == null) {
            transferQueue = new LinkedBlockingQueue<>();
            fileTransferMap.put(tuple, transferQueue);
        }

        // periodically close idle connections
        if (System.currentTimeMillis() - lastClearTime > IDLE_CONNECTION_MILLIS) {
            closeConnections(false);
            lastClearTime = System.currentTimeMillis();
        }
    }

    // we have a queue of FileTransfer Objects. Get one from the queue or create a new one.
    FileTransferV2 transfer;
    FileTransferIdleWrapper transferWrapper = transferQueue.poll();
    if (transferWrapper == null) {
        transfer = createFileTransfer(context);
    } else {
        transfer = transferWrapper.getFileTransfer();
    }

    // Pull data from remote system.
    final InputStream in;
    try {
        in = transfer.getInputStream(filename, flowFile);

        flowFile = session.write(flowFile, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                StreamUtils.copy(in, out);
                transfer.flush();
            }
        });
        transferQueue.offer(new FileTransferIdleWrapper(transfer, System.nanoTime()));
    } catch (final FileNotFoundException e) {
        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {} because the file could not be found on the remote system; routing to {}",
                new Object[] { flowFile, filename, host, REL_NOT_FOUND.getName() });
        session.transfer(session.penalize(flowFile), REL_NOT_FOUND);
        session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND);
        return;
    } catch (final PermissionDeniedException e) {
        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {} due to insufficient permissions; routing to {}",
                new Object[] { flowFile, filename, host, REL_PERMISSION_DENIED.getName() });
        session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED);
        session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED);
        return;
    } catch (final ProcessException | IOException e) {
        try {
            transfer.close();
        } catch (final IOException e1) {
            getLogger().warn("Failed to close connection to {}:{} due to {}",
                    new Object[] { host, port, e.toString() }, e);
        }

        getLogger().error(
                "Failed to fetch content for {} from filename {} on remote host {}:{} due to {}; routing to comms.failure",
                new Object[] { flowFile, filename, host, port, e.toString() }, e);
        session.transfer(session.penalize(flowFile), REL_COMMS_FAILURE);
        return;
    }

    // Add FlowFile attributes
    final String protocolName = transfer.getProtocolName();
    final Map<String, String> attributes = new HashMap<>();
    attributes.put(protocolName + ".remote.host", host);
    attributes.put(protocolName + ".remote.port", String.valueOf(port));
    attributes.put(protocolName + ".remote.filename", filename);

    if (filename.contains("/")) {
        final String path = StringUtils.substringBeforeLast(filename, "/");
        final String filenameOnly = StringUtils.substringAfterLast(filename, "/");
        attributes.put(CoreAttributes.PATH.key(), path);
        attributes.put(CoreAttributes.FILENAME.key(), filenameOnly);
    } else {
        attributes.put(CoreAttributes.FILENAME.key(), filename);
    }
    flowFile = session.putAllAttributes(flowFile, attributes);

    // emit provenance event and transfer FlowFile
    session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename,
            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
    session.transfer(flowFile, REL_SUCCESS);

    // it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where
    // we ingest the data, delete/move the remote file, and then NiFi dies/is shut down before the session is committed. This would
    // result in data loss! If we commit the session first, we are safe.
    session.commit();

    final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue();
    if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) {
        try {
            transfer.deleteFile(null, filename);
        } catch (final FileNotFoundException e) {
            // file doesn't exist -- effectively the same as removing it. Move on.
        } catch (final IOException ioe) {
            getLogger().warn(
                    "Successfully fetched the content for {} from {}:{}{} but failed to remove the remote file due to {}",
                    new Object[] { flowFile, host, port, filename, ioe }, ioe);
        }
    } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) {
        String targetDir = context.getProperty(MOVE_DESTINATION_DIR).evaluateAttributeExpressions(flowFile)
                .getValue();
        if (!targetDir.endsWith("/")) {
            targetDir = targetDir + "/";
        }
        final String simpleFilename = StringUtils.substringAfterLast(filename, "/");
        final String target = targetDir + simpleFilename;

        try {
            transfer.rename(filename, target);
        } catch (final IOException ioe) {
            getLogger().warn(
                    "Successfully fetched the content for {} from {}:{}{} but failed to rename the remote file due to {}",
                    new Object[] { flowFile, host, port, filename, ioe }, ioe);
        }
    }
}

From source file:com.lucidtechnics.blackboard.Blackboard.java

private void init() {
    java.io.File file = new java.io.File(getAppsHome());

    if (file.exists() == false) {
        file.mkdirs();/*from   w ww . j  av a2 s.c o m*/
    }

    java.io.File appsDirectory = new java.io.File(getAppsHome());

    if (appsDirectory.isDirectory() != true) {
        throw new RuntimeException(
                "Directory: " + getAppsHome() + " as set in blackboard.apps.home is not a directory");
    }

    java.io.File[] directoryFiles = appsDirectory.listFiles();

    for (int i = 0; i < directoryFiles.length; i++) {
        if (directoryFiles[i].isDirectory() == true) {
            String appName = directoryFiles[i].getName();

            if (logger.isInfoEnabled() == true) {
                logger.info("Configuring app: " + appName);
            }

            java.io.File[] workspaceDirectoryFiles = directoryFiles[i].listFiles();

            for (int j = 0; j < workspaceDirectoryFiles.length; j++) {
                if (workspaceDirectoryFiles[j].isDirectory() == true) {
                    String workspaceName = workspaceDirectoryFiles[j].getName();

                    if (logger.isInfoEnabled() == true) {
                        logger.info("Processing workspace: " + workspaceName);
                    }

                    java.io.File[] eventDirectoryFiles = workspaceDirectoryFiles[j].listFiles();

                    WorkspaceConfiguration workspaceConfiguration = configureWorkspace(appName, workspaceName,
                            workspaceDirectoryFiles[j]);

                    for (int k = 0; k < eventDirectoryFiles.length; k++) {
                        if (eventDirectoryFiles[k].isDirectory() == true) {
                            processEventPlans(appName, workspaceName, workspaceConfiguration,
                                    eventDirectoryFiles[k]);
                        }
                    }
                }
            }
        }
    }

    if (logger.isInfoEnabled() == true) {
        logger.info("Loaded event configurations: " + getEventToWorkspaceMap());
    }

    setBlackboardExecutor(new ThreadPoolExecutor(1, 1, 100, TimeUnit.SECONDS, new LinkedBlockingQueue()));

    setScheduledBlackboardExecutor(new ScheduledThreadPoolExecutor(getMaxScheduledBlackboardThread()));

    for (int i = 0; i <= getMaxWorkspaceThread(); i++) {
        getWorkspaceExecutorMap().put(i,
                new ThreadPoolExecutor(1, 1, 100, TimeUnit.SECONDS, new LinkedBlockingQueue()));
    }

    setPersistenceExecutor(new ThreadPoolExecutor(getMaxPersistenceThread(), getMaxPersistenceThread(), 100,
            TimeUnit.SECONDS, new LinkedBlockingQueue()));

    if (logger.isInfoEnabled() == true) {
        logger.info("Blackboard Workspace Server Initialization Inception.");
        logger.info("Apache 2.0 Open Source License.");
        logger.info("Copyright Owner - Lucid Technics, LLC.");
        logger.info("Authors - Bediako Ntodi George and David Yuctan Hodge.");
        logger.info("Initialization was successful.");
    }

    org.apache.jcs.JCS.setConfigFilename("/blackboard.ccf");
}

From source file:com.twitter.distributedlog.admin.DistributedLogAdmin.java

private static Map<String, StreamCandidate> checkStreams(
        final com.twitter.distributedlog.DistributedLogManagerFactory factory, final Collection<String> streams,
        final ExecutorService executorService, final BookKeeperClient bkc, final String digestpw,
        final int concurrency) throws IOException {
    final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>();
    streamQueue.addAll(streams);//from   w  w  w.  j  a va2  s. c  o  m
    final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>();
    final AtomicInteger numPendingStreams = new AtomicInteger(streams.size());
    final CountDownLatch doneLatch = new CountDownLatch(1);
    Runnable checkRunnable = new Runnable() {
        @Override
        public void run() {
            while (!streamQueue.isEmpty()) {
                String stream;
                try {
                    stream = streamQueue.take();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
                StreamCandidate candidate;
                try {
                    LOG.info("Checking stream {}.", stream);
                    candidate = checkStream(factory, stream, executorService, bkc, digestpw);
                    LOG.info("Checked stream {} - {}.", stream, candidate);
                } catch (IOException e) {
                    LOG.error("Error on checking stream {} : ", stream, e);
                    doneLatch.countDown();
                    break;
                }
                if (null != candidate) {
                    candidateMap.put(stream, candidate);
                }
                if (numPendingStreams.decrementAndGet() == 0) {
                    doneLatch.countDown();
                }
            }
        }
    };
    Thread[] threads = new Thread[concurrency];
    for (int i = 0; i < concurrency; i++) {
        threads[i] = new Thread(checkRunnable, "check-thread-" + i);
        threads[i].start();
    }
    try {
        doneLatch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
    if (numPendingStreams.get() != 0) {
        throw new IOException(numPendingStreams.get() + " streams left w/o checked");
    }
    for (int i = 0; i < concurrency; i++) {
        threads[i].interrupt();
        try {
            threads[i].join();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
    return candidateMap;
}

From source file:com.offbynull.portmapper.natpmp.NatPmpController.java

private <T extends NatPmpResponse> T attemptRequest(ByteBuffer sendBuffer, int attempt, Creator<T> creator)
        throws InterruptedException {

    final LinkedBlockingQueue<ByteBuffer> recvBufferQueue = new LinkedBlockingQueue<>();

    UdpCommunicatorListener listener = new UdpCommunicatorListener() {

        @Override/*w w w  .j  a v a2  s .c o  m*/
        public void incomingPacket(InetSocketAddress sourceAddress, DatagramChannel channel,
                ByteBuffer packet) {
            if (channel != unicastChannel) {
                return;
            }

            recvBufferQueue.add(packet);
        }
    };

    // timeout duration should double each iteration, starting from 250 according to spec
    // i = 1, maxWaitTime = (1 << (1-1)) * 250 = (1 << 0) * 250 = 1 * 250 = 250
    // i = 2, maxWaitTime = (1 << (2-1)) * 250 = (1 << 1) * 250 = 2 * 250 = 500
    // i = 3, maxWaitTime = (1 << (3-1)) * 250 = (1 << 2) * 250 = 4 * 250 = 1000
    // i = 4, maxWaitTime = (1 << (4-1)) * 250 = (1 << 3) * 250 = 8 * 250 = 2000
    // ...
    try {
        communicator.addListener(listener);
        communicator.send(unicastChannel, gateway, sendBuffer);

        int maxWaitTime = (1 << (attempt - 1)) * 250; // NOPMD

        T pcpResponse = null;

        long endTime = System.currentTimeMillis() + maxWaitTime;
        long waitTime;
        while ((waitTime = endTime - System.currentTimeMillis()) > 0L) {
            waitTime = Math.max(waitTime, 0L); // must be at least 0, probably should never happen

            ByteBuffer recvBuffer = recvBufferQueue.poll(waitTime, TimeUnit.MILLISECONDS);

            if (recvBuffer != null) {
                pcpResponse = creator.create(recvBuffer);
                if (pcpResponse != null) {
                    break;
                }
            }
        }

        return pcpResponse;
    } finally {
        communicator.removeListener(listener);
    }
}

From source file:test.com.azaptree.services.executor.ThreadPoolExecutorTest.java

@Test
public void testThreadPoolConfigHashCode() {
    Assert.assertEquals(new ThreadPoolConfig("azap", true).hashCode(),
            new ThreadPoolConfig("azap", true).hashCode());
    Assert.assertEquals(new ThreadPoolConfig("azap").hashCode(), new ThreadPoolConfig("azap").hashCode());
    Assert.assertEquals(new ThreadPoolConfig().hashCode(), new ThreadPoolConfig().hashCode());
    Assert.assertNotEquals(new ThreadPoolConfig().hashCode(), new ThreadPoolConfig("azap").hashCode());

    final ThreadPoolConfig[] configs = new ThreadPoolConfig[2];
    for (int i = 0; i < 2; i++) {
        configs[i] = new ThreadPoolConfig("azap", 10, 50, true);
        configs[i].setAllowCoreThreadTimeOut(true);
        configs[i].setWorkQueue(new LinkedBlockingQueue<Runnable>());
    }/*from  ww w .j a v  a 2  s  .c  om*/

    Assert.assertEquals(configs[0].hashCode(), configs[1].hashCode());
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdLedgerManagerTest.java

@Test
public void testRegisterLedgerMetadataListener() throws Exception {
    long ledgerId = System.currentTimeMillis();

    // create a ledger metadata
    LedgerMetadata metadata = LedgerMetadataBuilder.create().withEnsembleSize(3).withWriteQuorumSize(3)
            .withAckQuorumSize(2).withPassword("test-password".getBytes(UTF_8))
            .withDigestType(DigestType.CRC32C.toApiDigestType()).newEnsembleEntry(0L, createNumBookies(3))
            .build();/*from  w  w w. j  a  v  a  2  s .  c  o m*/
    result(lm.createLedgerMetadata(ledgerId, metadata));
    Versioned<LedgerMetadata> readMetadata = lm.readLedgerMetadata(ledgerId).get();
    log.info("Create ledger metadata : {}", readMetadata.getValue());

    // register first listener

    LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue1 = new LinkedBlockingQueue<>();
    LedgerMetadataListener listener1 = (lid, m) -> {
        log.info("[listener1] Received ledger {} metadata : {}", lid, m);
        metadataQueue1.add(m);
    };
    log.info("Registered first listener for ledger {}", ledgerId);
    lm.registerLedgerMetadataListener(ledgerId, listener1);
    // we should receive a metadata notification when a ledger is created
    Versioned<LedgerMetadata> notifiedMetadata = metadataQueue1.take();
    assertEquals(readMetadata, notifiedMetadata);
    ValueStream<LedgerMetadata> lms = lm.getLedgerMetadataStream(ledgerId);
    assertNotNull(lms.waitUntilWatched());
    assertNotNull(result(lms.waitUntilWatched()));

    // register second listener

    LinkedBlockingQueue<Versioned<LedgerMetadata>> metadataQueue2 = new LinkedBlockingQueue<>();
    LedgerMetadataListener listener2 = (lid, m) -> {
        log.info("[listener2] Received ledger {} metadata : {}", lid, m);
        metadataQueue2.add(m);
    };
    log.info("Registered second listener for ledger {}", ledgerId);
    lm.registerLedgerMetadataListener(ledgerId, listener2);
    Versioned<LedgerMetadata> notifiedMetadata2 = metadataQueue2.take();
    assertEquals(readMetadata, notifiedMetadata2);
    assertNotNull(lm.getLedgerMetadataStream(ledgerId));

    // update the metadata
    lm.writeLedgerMetadata(ledgerId,
            LedgerMetadataBuilder.from(metadata).newEnsembleEntry(10L, createNumBookies(3)).build(),
            notifiedMetadata.getVersion()).get();
    readMetadata = lm.readLedgerMetadata(ledgerId).get();
    assertEquals(readMetadata, metadataQueue1.take());
    assertEquals(readMetadata, metadataQueue2.take());
    lms = lm.getLedgerMetadataStream(ledgerId);
    assertNotNull(lms);
    assertEquals(2, lms.getNumConsumers());

    // remove listener2
    lm.unregisterLedgerMetadataListener(ledgerId, listener2);
    lms = lm.getLedgerMetadataStream(ledgerId);
    assertNotNull(lms);
    assertEquals(1, lms.getNumConsumers());

    // update the metadata again
    lm.writeLedgerMetadata(ledgerId,
            LedgerMetadataBuilder.from(metadata).newEnsembleEntry(20L, createNumBookies(3)).build(),
            readMetadata.getVersion()).get();
    readMetadata = lm.readLedgerMetadata(ledgerId).get();
    assertEquals(readMetadata, metadataQueue1.take());
    assertNull(metadataQueue2.poll());

    // remove listener1
    lm.unregisterLedgerMetadataListener(ledgerId, listener1);
    // the value stream will be removed
    while (lm.getLedgerMetadataStream(ledgerId) != null) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    assertEquals(0, lms.getNumConsumers());

    // update the metadata again
    lm.writeLedgerMetadata(ledgerId,
            LedgerMetadataBuilder.from(metadata).newEnsembleEntry(30L, createNumBookies(3)).build(),
            readMetadata.getVersion()).get();
    readMetadata = lm.readLedgerMetadata(ledgerId).get();
    assertNull(metadataQueue1.poll());
    assertNull(metadataQueue2.poll());

    log.info("Registered first listener for ledger {} again", ledgerId);
    lm.registerLedgerMetadataListener(ledgerId, listener1);
    notifiedMetadata = metadataQueue1.take();
    assertEquals(readMetadata, notifiedMetadata);
    lms = lm.getLedgerMetadataStream(ledgerId);
    assertNotNull(lms);
    assertEquals(1, lms.getNumConsumers());

    // delete the ledger
    lm.removeLedgerMetadata(ledgerId, readMetadata.getVersion()).get();
    // the listener will eventually be removed
    while (lm.getLedgerMetadataStream(ledgerId) != null) {
        TimeUnit.MILLISECONDS.sleep(100);
    }
    assertEquals(1, lms.getNumConsumers());
    assertNull(metadataQueue1.poll());
    assertNull(metadataQueue2.poll());
}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

protected ThreadPoolExecutor createSingleThreadExecutor(ThreadFactory factory) {
    BlockingQueue<Runnable> queue;
    if (enableBusyWait) {
        // Use queue with busy-wait polling strategy
        queue = new BlockingMpscQueue<>(maxTasksInQueue > 0 ? maxTasksInQueue : DEFAULT_MAX_ARRAY_QUEUE_SIZE);
    } else {/*  w  ww . j  a  v a 2  s.c o  m*/
        // By default, use regular JDK LinkedBlockingQueue
        queue = new LinkedBlockingQueue<>();
    }
    return new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, queue, factory);
}

From source file:org.apache.axis2.transport.http.server.HttpFactory.java

/**
 * Create the queue used to hold incoming requests when requestCoreThreadPoolSize threads are busy.
 * Default is an unbounded queue./*from   ww w. j  a v  a  2  s  .  co m*/
 */
public BlockingQueue newRequestBlockingQueue() {
    return new LinkedBlockingQueue();
}

From source file:org.opencron.server.service.ExecuteService.java

/**
 * ? /*  ww  w .j ava  2 s . c om*/
 */
public void batchExecuteJob(final Long userId, String command, String agentIds) {
    final Queue<JobVo> jobQueue = new LinkedBlockingQueue<JobVo>();

    String[] arrayIds = agentIds.split(";");
    for (String agentId : arrayIds) {
        Agent agent = agentService.getAgent(Long.parseLong(agentId));
        JobVo jobVo = new JobVo(userId, command, agent);
        jobQueue.add(jobVo);
    }

    Thread jobThread = new Thread(new Runnable() {
        @Override
        public void run() {
            for (final JobVo jobVo : jobQueue) {
                //?(?,?)
                Thread thread = new Thread(new Runnable() {
                    public void run() {
                        executeSingleJob(jobVo, userId);
                    }
                });
                thread.start();
            }
        }
    });
    jobThread.start();
}

From source file:com.obviousengine.android.focus.ZslFocusCamera.java

/**
 * Instantiates a new camera based on Camera 2 API.
 *
 * @param device The underlying Camera 2 device.
 * @param characteristics The device's characteristics.
 * @param pictureSize the size of the final image to be taken.
 *//*from   w  w  w .  j a  va  2s. c o  m*/
ZslFocusCamera(CameraDevice device, CameraCharacteristics characteristics, Size pictureSize) {
    Timber.v("Creating new ZslFocusCamera");

    this.device = device;
    this.characteristics = characteristics;
    fullSizeAspectRatio = calculateFullSizeAspectRatio(characteristics);

    cameraThread = new HandlerThread("FocusCamera");
    // If this thread stalls, it will delay viewfinder frames.
    cameraThread.setPriority(Thread.MAX_PRIORITY);
    cameraThread.start();
    cameraHandler = new Handler(cameraThread.getLooper());

    cameraListenerThread = new HandlerThread("FocusCamera-Listener");
    cameraListenerThread.start();
    cameraListenerHandler = new Handler(cameraListenerThread.getLooper());

    // TODO: Encoding on multiple cores results in preview jank due to
    // excessive GC.
    int numEncodingCores = Utils.getNumCpuCores();
    imageSaverThreadPool = new ThreadPoolExecutor(numEncodingCores, numEncodingCores, 10, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());

    captureManager = new ImageCaptureManager(MAX_CAPTURE_IMAGES, cameraListenerHandler, imageSaverThreadPool);
    captureManager.setCaptureReadyListener(new ImageCaptureManager.CaptureReadyListener() {
        @Override
        public void onReadyStateChange(boolean capturePossible) {
            readyStateManager.setInput(ReadyStateRequirement.CAPTURE_MANAGER_READY, capturePossible);
        }
    });

    // Listen for changes to auto focus state and dispatch to
    // focusStateListener.
    captureManager.addMetadataChangeListener(CaptureResult.CONTROL_AF_STATE,
            new ImageCaptureManager.MetadataChangeListener() {
                @Override
                public void onImageMetadataChange(Key<?> key, Object oldValue, Object newValue,
                        CaptureResult result) {
                    if (focusStateListener == null) {
                        return;
                    }
                    focusStateListener.onFocusStatusUpdate(
                            AutoFocusHelper.stateFromCamera2State(result.get(CaptureResult.CONTROL_AF_STATE)),
                            result.getFrameNumber());
                }
            });

    // Allocate the image reader to store all images received from the
    // camera.
    if (pictureSize == null) {
        // TODO The default should be selected by the caller, and
        // pictureSize should never be null.
        pictureSize = getDefaultPictureSize();
    }
    captureImageReader = ImageReader.newInstance(pictureSize.getWidth(), pictureSize.getHeight(),
            CAPTURE_IMAGE_FORMAT, MAX_CAPTURE_IMAGES);

    captureImageReader.setOnImageAvailableListener(captureManager, cameraHandler);
    mediaActionSound.load(MediaActionSound.SHUTTER_CLICK);
}