Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java

public void afterPropertiesSet() throws Exception {
    executor = new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS,
            new ArrayBlockingQueue(poolSize * 4), new NamedThreadFactory(WORKER_NAME),
            new ThreadPoolExecutor.CallerRunsPolicy());
}

From source file:org.wso2.carbon.registry.eventing.services.EventingServiceImpl.java

private synchronized void setupExecutorService() {
    if (executor == null) {
        executor = new ThreadPoolExecutor(25, 150, 1000, TimeUnit.NANOSECONDS,
                new ArrayBlockingQueue<Runnable>(100));
    }/* www  .j a v  a  2  s  .c  o  m*/
}

From source file:API.amazon.mws.feeds.service.MarketplaceWebServiceClient.java

/**
 * Constructs MarketplaceWebServiceClient with AWS Access Key ID, AWS Secret Key
 * and MarketplaceWebServiceConfig. Use MarketplaceWebServiceConfig to pass additional
 * configuration that affects how service is being called.
 *
 * @param awsAccessKeyId/*from   w  w  w . ja  v a  2  s  . c  o  m*/
 *          AWS Access Key ID
 * @param awsSecretAccessKey
 *          AWS Secret Access Key
 * @param config
 *          Additional configuration options
 */
@SuppressWarnings("serial")
public MarketplaceWebServiceClient(String awsAccessKeyId, String awsSecretAccessKey, String applicationName,
        String applicationVersion, MarketplaceWebServiceConfig config) {
    this.awsAccessKeyId = awsAccessKeyId;
    this.awsSecretAccessKey = awsSecretAccessKey;
    this.config = config;
    this.httpClient = configureHttpClient(applicationName, applicationVersion);
    this.asyncExecutor = new ThreadPoolExecutor(config.getMaxAsyncThreads(), config.getMaxAsyncThreads(), 60L,
            TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(config.getMaxAsyncQueueSize()) {

                @Override
                public boolean offer(Runnable task) {
                    log.debug("Maximum number of concurrent threads reached, queuing task...");
                    return super.offer(task);
                }
            }, new ThreadFactory() {

                private final AtomicInteger threadNumber = new AtomicInteger(1);

                public Thread newThread(Runnable task) {
                    Thread thread = new Thread(task,
                            "MarketplaceWebServiceClient-Thread-" + threadNumber.getAndIncrement());
                    thread.setDaemon(true);
                    if (thread.getPriority() != Thread.NORM_PRIORITY) {
                        thread.setPriority(Thread.NORM_PRIORITY);
                    }
                    log.debug("ThreadFactory created new thread: " + thread.getName());
                    return thread;
                }
            }, new RejectedExecutionHandler() {

                public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
                    log.debug("Maximum number of concurrent threads reached, and queue is full. "
                            + "Running task in the calling thread..." + Thread.currentThread().getName());
                    if (!executor.isShutdown()) {
                        task.run();
                    }
                }
            });
}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

/**
 * Compresses the snapshot and uploads it to a bucket in objectstorage gateway as a single or multipart upload based on the configuration in
 * {@link StorageInfo}. Bucket name should be configured before invoking this method. It can be looked up and initialized by {@link #prepareForUpload()} or
 * explicitly set using {@link #setBucketName(String)}
 * //from  ww w . j ava 2s. com
 * @param sourceFileName
 *            absolute path to the snapshot on the file system
 */
@Override
public void upload(String sourceFileName) throws SnapshotTransferException {
    validateInput(); // Validate input
    loadTransferConfig(); // Load the transfer configuration parameters from database
    SnapshotProgressCallback progressCallback = new SnapshotProgressCallback(snapshotId); // Setup the progress callback

    Boolean error = Boolean.FALSE;
    ArrayBlockingQueue<SnapshotPart> partQueue = null;
    SnapshotPart part = null;
    SnapshotUploadInfo snapUploadInfo = null;
    Future<List<PartETag>> uploadPartsFuture = null;
    Future<String> completeUploadFuture = null;

    byte[] buffer = new byte[READ_BUFFER_SIZE];
    Long readOffset = 0L;
    Long bytesRead = 0L;
    Long bytesWritten = 0L;
    int len;
    int partNumber = 1;

    try {
        // Get the uncompressed file size for uploading as metadata
        Long uncompressedSize = getFileSize(sourceFileName);

        // Setup the snapshot and part entities.
        snapUploadInfo = SnapshotUploadInfo.create(snapshotId, bucketName, keyName);
        Path zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf(partNumber));
        part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset);

        FileInputStream inputStream = new FileInputStream(sourceFileName);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        GZIPOutputStream gzipStream = new GZIPOutputStream(baos);
        FileOutputStream outputStream = new FileOutputStream(zipFilePath.toString());

        try {
            LOG.debug("Reading snapshot " + snapshotId + " and compressing it to disk in chunks of size "
                    + partSize + " bytes or greater");
            while ((len = inputStream.read(buffer)) > 0) {
                bytesRead += len;
                gzipStream.write(buffer, 0, len);

                if ((bytesWritten + baos.size()) < partSize) {
                    baos.writeTo(outputStream);
                    bytesWritten += baos.size();
                    baos.reset();
                } else {
                    gzipStream.close();
                    baos.writeTo(outputStream); // Order is important. Closing the gzip stream flushes stuff
                    bytesWritten += baos.size();
                    baos.reset();
                    outputStream.close();

                    if (partNumber > 1) {// Update the part status
                        part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.FALSE);
                    } else {// Initialize multipart upload only once after the first part is created
                        LOG.info("Uploading snapshot " + snapshotId
                                + " to objectstorage using multipart upload");
                        progressCallback.setUploadSize(uncompressedSize);
                        uploadId = initiateMulitpartUpload(uncompressedSize);
                        snapUploadInfo = snapUploadInfo.updateUploadId(uploadId);
                        part = part.updateStateCreated(uploadId, bytesWritten, bytesRead, Boolean.FALSE);
                        partQueue = new ArrayBlockingQueue<SnapshotPart>(queueSize);
                        uploadPartsFuture = Threads.enqueue(serviceConfig, UploadPartTask.class, poolSize,
                                new UploadPartTask(partQueue, progressCallback));
                    }

                    // Check for the future task before adding part to the queue.
                    if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                        // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                        throw new SnapshotUploadPartException(
                                "Error uploading parts, aborting part creation process. Check previous log messages for the exact error");
                    }

                    // Add part to the queue
                    partQueue.put(part);

                    // Prep the metadata for the next part
                    readOffset += bytesRead;
                    bytesRead = 0L;
                    bytesWritten = 0L;

                    // Setup the part entity for next part
                    zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf((++partNumber)));
                    part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber,
                            readOffset);

                    gzipStream = new GZIPOutputStream(baos);
                    outputStream = new FileOutputStream(zipFilePath.toString());
                }
            }

            gzipStream.close();
            baos.writeTo(outputStream);
            bytesWritten += baos.size();
            baos.reset();
            outputStream.close();
            inputStream.close();

            // Update the part status
            part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.TRUE);

            // Update the snapshot upload info status
            snapUploadInfo = snapUploadInfo.updateStateCreatedParts(partNumber);
        } catch (Exception e) {
            LOG.error("Failed to upload " + snapshotId + " due to: ", e);
            error = Boolean.TRUE;
            throw new SnapshotTransferException("Failed to upload " + snapshotId + " due to: ", e);
        } finally {
            if (inputStream != null) {
                inputStream.close();
            }
            if (gzipStream != null) {
                gzipStream.close();
            }
            if (outputStream != null) {
                outputStream.close();
            }
            baos.reset();
        }

        if (partNumber > 1) {
            // Check for the future task before adding the last part to the queue.
            if (uploadPartsFuture != null && uploadPartsFuture.isDone()) {
                // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong
                throw new SnapshotUploadPartException(
                        "Error uploading parts, aborting part upload process. Check previous log messages for the exact error");
            }
            // Add the last part to the queue
            partQueue.put(part);
            // Kick off the completion task
            completeUploadFuture = Threads.enqueue(serviceConfig, CompleteMpuTask.class, poolSize,
                    new CompleteMpuTask(uploadPartsFuture, snapUploadInfo, partNumber));
        } else {
            try {
                LOG.info("Uploading snapshot " + snapshotId
                        + " to objectstorage as a single object. Compressed size of snapshot (" + bytesWritten
                        + " bytes) is less than minimum part size (" + partSize
                        + " bytes) for multipart upload");
                PutObjectResult putResult = uploadSnapshotAsSingleObject(zipFilePath.toString(), bytesWritten,
                        uncompressedSize, progressCallback);
                markSnapshotAvailable();
                try {
                    part = part.updateStateUploaded(putResult.getETag());
                    snapUploadInfo = snapUploadInfo.updateStateUploaded(putResult.getETag());
                } catch (Exception e) {
                    LOG.debug("Failed to update status in DB for " + snapUploadInfo);
                }
                LOG.info("Uploaded snapshot " + snapshotId + " to objectstorage");
            } catch (Exception e) {
                error = Boolean.TRUE;
                LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
                throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
            } finally {
                deleteFile(zipFilePath);
            }
        }
    } catch (SnapshotTransferException e) {
        error = Boolean.TRUE;
        throw e;
    } catch (Exception e) {
        error = Boolean.TRUE;
        LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e);
        throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e);
    } finally {
        if (error) {
            abortUpload(snapUploadInfo);
            if (uploadPartsFuture != null && !uploadPartsFuture.isDone()) {
                uploadPartsFuture.cancel(true);
            }
            if (completeUploadFuture != null && !completeUploadFuture.isDone()) {
                completeUploadFuture.cancel(true);
            }
        }
    }
}

From source file:net.sf.xfd.provider.PublicProvider.java

final boolean checkAccess(Uri uri, String grantMode, String necessaryMode) {
    try {//  w w w. j  a va2  s .  c o  m
        verifyMac(uri, grantMode, necessaryMode);

        return true;
    } catch (FileNotFoundException fnfe) {
        final Context context = getContext();

        assert context != null;

        ObjectIntMap<String> decisions = null;

        final String caller = getCallingPackage();

        if (!TextUtils.isEmpty(caller)) {
            decisions = accessCache.get(uri.getPath());
            if (decisions == null) {
                decisions = new ObjectIntHashMap<>();
            } else {
                //noinspection SynchronizationOnLocalVariableOrMethodParameter
                synchronized (decisions) {
                    final int decision = decisions.get(caller);

                    switch (decision) {
                    case RESPONSE_ALLOW:
                        return true;
                    }
                }
            }
        }

        final ArrayBlockingQueue<Bundle> queue = new ArrayBlockingQueue<>(1);

        //noinspection RestrictedApi
        final ResultReceiver receiver = new ResultReceiver(new Handler(Looper.getMainLooper())) {
            @Override
            protected void onReceiveResult(int resultCode, Bundle resultData) {
                try {
                    queue.offer(resultData, 4, TimeUnit.SECONDS);
                } catch (InterruptedException ignored) {
                }
            }
        };

        try {
            final Intent intent = authActivityIntent(context);

            if (intent == null)
                return false;

            final Bundle result;

            final CharSequence resolved = base.resolve(uri.getPath());

            // try to ensure, that no more than one dialog can appear at once
            uxLock.lockInterruptibly();
            try {
                context.startActivity(intent.putExtra(EXTRA_MODE, necessaryMode).putExtra(EXTRA_CALLER, caller)
                        .putExtra(EXTRA_UID, Binder.getCallingUid()).putExtra(EXTRA_CALLBACK, receiver)
                        .putExtra(EXTRA_PATH, resolved));

                result = queue.poll(10, TimeUnit.SECONDS);
            } finally {
                uxLock.unlock();
            }

            int decision = RESPONSE_DENY;

            if (result != null) {
                decision = result.getInt(EXTRA_RESPONSE, -1);
            }

            if (decision == RESPONSE_ALLOW) {
                if (decisions != null) {
                    //noinspection SynchronizationOnLocalVariableOrMethodParameter
                    synchronized (decisions) {
                        decisions.put(caller, RESPONSE_ALLOW);

                        accessCache.put(uri.getPath(), decisions);
                    }
                }

                return true;
            }
        } catch (InterruptedException ignored) {
        }
    }

    return false;
}

From source file:alma.acs.nc.AcsEventSubscriberImplBase.java

/**
 * Subclass may override, but must call super.createConnectionAction().
 *//*  w  ww .ja  v a 2s  .c om*/
protected void createConnectionAction(EventDispatcher evtDispatcher, ErrorReporter errRep,
        SCInstance scInstance, Collection<TriggerEvent> derivedEvents) throws AcsJStateMachineActionEx {
    eventHandlingExecutor = new ThreadPoolExecutor(0, 1, 1L, TimeUnit.MINUTES,
            new ArrayBlockingQueue<Runnable>(EVENT_QUEUE_CAPACITY), services.getThreadFactory(),
            new ThreadPoolExecutor.AbortPolicy());
}

From source file:com.facebook.LinkBench.LinkBenchDriverInj.java

void sendrequests() throws IOException, InterruptedException, Throwable {

    if (!doRequest) {
        logger.info("Skipping request phase per the cmdline arg");
        return;/*from   ww  w .  jav  a2  s  . com*/
    }

    // config info for requests
    nrequesters = ConfigUtil.getInt(props, Config.NUM_REQUESTERS);
    if (nrequesters == 0) {
        logger.info("NO REQUEST PHASE CONFIGURED. ");
        return;
    }
    List<LinkBenchRequestInj> requesters = new LinkedList<LinkBenchRequestInj>();

    RequestProgress progress = LinkBenchRequestInj.createProgress(logger, props);

    Random masterRandom = createMasterRNG(props, Config.REQUEST_RANDOM_SEED);
    requestrate = ConfigUtil.getLong(props, Config.REQUEST_RATE, 0L);
    maxTime = ConfigUtil.getLong(props, Config.MAX_TIME);

    genQueue = new ArrayBlockingQueue<Long>(1000000); // 10000 should be in Config really. TODO

    statsQueue = new ArrayBlockingQueue<StatMessage>(1000000); // 1000000 should be in Config. TODO

    // create GlobalStats thread

    GlobalStats gs = new GlobalStats(statsQueue, props, csvStreamFile);
    Thread t = new Thread(gs, "Global Stats Thread");
    t.start();

    // create requesters
    for (int i = 0; i < nrequesters; i++) {
        Stores stores = initStores();
        LinkBenchRequestInj l = new LinkBenchRequestInj(stores.linkStore, stores.nodeStore, props,
                csvStreamFile, progress, new Random(masterRandom.nextLong()), i, nrequesters, genQueue,
                statsQueue);
        requesters.add(l);
    }
    progress.startTimer();
    // run requesters
    concurrentExec(requesters, true, new Random(masterRandom.nextLong()));

    // stop Thread with global statistics
    t.interrupt();
    t.join();
    gs.printQuantileStats();

    long finishTime = System.currentTimeMillis();
    // Calculate duration accounting for warmup time
    long benchmarkTime = finishTime - progress.getBenchmarkStartTime();

    long requestsdone = 0;
    int abortedRequesters = 0;
    // wait for requesters
    for (LinkBenchRequestInj requester : requesters) {
        requestsdone += requester.getRequestsDone();
        if (requester.didAbort()) {
            abortedRequesters++;
        }
    }

    logger.info("REQUEST PHASE COMPLETED. " + requestsdone + " requests done in " + (benchmarkTime / 1000)
            + " seconds." + " Requests/second = " + (1000 * requestsdone) / benchmarkTime);
    if (abortedRequesters > 0) {
        logger.error(String.format(
                "Benchmark did not complete cleanly: %d/%d "
                        + "request threads aborted.  See error log entries for details.",
                abortedRequesters, nrequesters));
    }
}

From source file:io.teak.sdk.TeakNotification.java

/**
 * Cancel a push notification that was scheduled with {@link TeakNotification#scheduleNotification(String, String, long)}
 *
 * @param scheduleId//  ww w  . j a v a2  s .c om
 * @return
 */
@SuppressWarnings("unused")
public static FutureTask<String> cancelNotification(final String scheduleId) {
    if (!Teak.isEnabled()) {
        Log.e(LOG_TAG, "Teak is disabled, ignoring cancelNotification().");
        return null;
    }

    if (scheduleId == null || scheduleId.isEmpty()) {
        Log.e(LOG_TAG, "scheduleId cannot be null or empty");
        return null;
    }

    final ArrayBlockingQueue<String> q = new ArrayBlockingQueue<>(1);
    final FutureTask<String> ret = new FutureTask<>(new Callable<String>() {
        public String call() {
            try {
                return q.take();
            } catch (InterruptedException e) {
                Log.e(LOG_TAG, Log.getStackTraceString(e));
            }
            return null;
        }
    });

    Session.whenUserIdIsReadyRun(new Session.SessionRunnable() {
        @Override
        public void run(Session session) {
            HashMap<String, Object> payload = new HashMap<>();
            payload.put("id", scheduleId);

            new Request("/me/cancel_local_notify.json", payload, session) {
                @Override
                protected void done(int responseCode, String responseBody) {
                    try {
                        JSONObject response = new JSONObject(responseBody);
                        if (response.getString("status").equals("ok")) {
                            q.offer(response.getJSONObject("event").getString("id"));
                        } else {
                            q.offer("");
                        }
                    } catch (Exception ignored) {
                        q.offer("");
                    }
                    ret.run();
                }
            }.run();
        }
    });

    return ret;
}

From source file:com.prod.intelligent7.engineautostart.ConnectDaemonService.java

boolean confirmHasMailBox() {
    if (outBoundMailBox == null) {
        outBoundMailBox = new ArrayBlockingQueue<String>(mailBoxLimit);
        if (mDaemon != null)
            mDaemon.setOutBoundDataQ(outBoundMailBox);
    }/*from   w w  w . jav a 2s  .  c  om*/
    //outBoundMailBox=mDaemon.getOutBoundDataQ();
    return true;
}

From source file:org.apache.hadoop.net.unix.TestDomainSocket.java

/**
 * Test a simple client/server interaction.
 *
 * @throws IOException/*from  w w w.j a  v a2s .  c  o  m*/
 */
void testClientServer1(final Class<? extends WriteStrategy> writeStrategyClass,
        final Class<? extends ReadStrategy> readStrategyClass, final DomainSocket preConnectedSockets[])
        throws Exception {
    final String TEST_PATH = new File(sockDir.getDir(), "test_sock_client_server1").getAbsolutePath();
    final byte clientMsg1[] = new byte[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 };
    final byte serverMsg1[] = new byte[] { 0x9, 0x8, 0x7, 0x6, 0x5 };
    final byte clientMsg2 = 0x45;
    final ArrayBlockingQueue<Throwable> threadResults = new ArrayBlockingQueue<Throwable>(2);
    final DomainSocket serv = (preConnectedSockets != null) ? null : DomainSocket.bindAndListen(TEST_PATH);
    Thread serverThread = new Thread() {
        public void run() {
            // Run server
            DomainSocket conn = null;
            try {
                conn = preConnectedSockets != null ? preConnectedSockets[0] : serv.accept();
                byte in1[] = new byte[clientMsg1.length];
                ReadStrategy reader = readStrategyClass.newInstance();
                reader.init(conn);
                reader.readFully(in1, 0, in1.length);
                Assert.assertTrue(Arrays.equals(clientMsg1, in1));
                WriteStrategy writer = writeStrategyClass.newInstance();
                writer.init(conn);
                writer.write(serverMsg1);
                InputStream connInputStream = conn.getInputStream();
                int in2 = connInputStream.read();
                Assert.assertEquals((int) clientMsg2, in2);
                conn.close();
            } catch (Throwable e) {
                threadResults.add(e);
                Assert.fail(e.getMessage());
            }
            threadResults.add(new Success());
        }
    };
    serverThread.start();

    Thread clientThread = new Thread() {
        public void run() {
            try {
                DomainSocket client = preConnectedSockets != null ? preConnectedSockets[1]
                        : DomainSocket.connect(TEST_PATH);
                WriteStrategy writer = writeStrategyClass.newInstance();
                writer.init(client);
                writer.write(clientMsg1);
                ReadStrategy reader = readStrategyClass.newInstance();
                reader.init(client);
                byte in1[] = new byte[serverMsg1.length];
                reader.readFully(in1, 0, in1.length);
                Assert.assertTrue(Arrays.equals(serverMsg1, in1));
                OutputStream clientOutputStream = client.getOutputStream();
                clientOutputStream.write(clientMsg2);
                client.close();
            } catch (Throwable e) {
                threadResults.add(e);
            }
            threadResults.add(new Success());
        }
    };
    clientThread.start();

    for (int i = 0; i < 2; i++) {
        Throwable t = threadResults.take();
        if (!(t instanceof Success)) {
            Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
        }
    }
    serverThread.join(120000);
    clientThread.join(120000);
    if (serv != null) {
        serv.close();
    }
}