Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue() 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE .

Usage

From source file:ca.ualberta.cmput301w14t08.geochan.managers.ThreadManager.java

/**
 * Private constructor due to singleton pattern.
 *//*from  w  w  w .jav a2 s  .c  om*/
private ThreadManager() {
    commentListCache = new LruCache<String, CommentList>(MAXIMUM_CACHE_SIZE);
    getImageCache = new LruCache<String, Bitmap>(MAXIMUM_CACHE_SIZE);
    getPOICache = new LruCache<String, String>(MAXIMUM_CACHE_SIZE);

    getCommentListRunnableQueue = new LinkedBlockingQueue<Runnable>();
    getCommentsRunnableQueue = new LinkedBlockingQueue<Runnable>();
    postImageRunnableQueue = new LinkedBlockingQueue<Runnable>();
    postRunnableQueue = new LinkedBlockingQueue<Runnable>();
    updateRunnableQueue = new LinkedBlockingQueue<Runnable>();
    getImageRunnableQueue = new LinkedBlockingQueue<Runnable>();
    getThreadCommentsRunnableQueue = new LinkedBlockingQueue<Runnable>();
    getPOIRunnableQueue = new LinkedBlockingQueue<Runnable>();

    getCommentsTaskQueue = new LinkedBlockingQueue<GetCommentsTask>();
    postTaskQueue = new LinkedBlockingQueue<PostTask>();
    getImageTaskQueue = new LinkedBlockingQueue<GetImageTask>();
    getThreadCommentsTaskQueue = new LinkedBlockingQueue<GetThreadCommentsTask>();
    getPOITaskQueue = new LinkedBlockingQueue<GetPOITask>();

    getCommentListPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, getCommentListRunnableQueue);
    getCommentsPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, getCommentsRunnableQueue);
    postImagePool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, postImageRunnableQueue);
    postPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT,
            postRunnableQueue);
    updatePool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, updateRunnableQueue);
    getImagePool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, getImageRunnableQueue);
    getThreadCommentsPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, getThreadCommentsRunnableQueue);
    getPOIPool = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE_TIME,
            KEEP_ALIVE_TIME_UNIT, getPOIRunnableQueue);

    handler = new Handler(Looper.getMainLooper()) {

        @Override
        public void handleMessage(Message inputMessage) {
            switch (inputMessage.what) {
            case POST_TASK_COMPLETE:
                PostTask postTaskComplete = (PostTask) inputMessage.obj;
                if (postTaskComplete.getDialog() != null) {
                    postTaskComplete.getDialog().dismiss();
                }
                ThreadComment threadComment = postTaskComplete.getThreadComment();
                if (threadComment != null) {
                    if (!postTaskComplete.isEdit()) {
                        // Update the model and sort accordingly
                        ThreadList.addThread(threadComment);
                        SortUtil.sortThreads(PreferencesManager.getInstance().getThreadSort(),
                                ThreadList.getThreads());
                    }
                    FragmentActivity activity = (FragmentActivity) context;
                    ThreadListFragment fragment = (ThreadListFragment) activity.getSupportFragmentManager()
                            .findFragmentByTag("threadListFrag");
                    if (fragment != null) {
                        fragment.finishReload();
                    }
                }
                break;

            case GET_THREADS_COMPLETE:
                GetThreadCommentsTask threadTask = (GetThreadCommentsTask) inputMessage.obj;
                threadTask.getFragment().finishReload();
                recycleGetThreadCommentsTask(threadTask);
                break;

            case GET_THREADS_FAILED:
                GetThreadCommentsTask threadTaskFail = (GetThreadCommentsTask) inputMessage.obj;
                threadTaskFail.getFragment().finishReload();
                recycleGetThreadCommentsTask(threadTaskFail);
                break;

            case GET_COMMENTS_COMPLETE:
                GetCommentsTask task = (GetCommentsTask) inputMessage.obj;
                task.getFragment().finishReload();
                recycleCommentsTask(task);
                break;

            case GET_COMMENTS_FAILED:
                GetCommentsTask taskFail = (GetCommentsTask) inputMessage.obj;
                taskFail.getFragment().finishReload();
                recycleCommentsTask(taskFail);
                break;

            case GET_COMMENT_LIST_RUNNING:
                break;

            case GET_COMMENT_LIST_FAILED:
                GetCommentsTask taskListFail = (GetCommentsTask) inputMessage.obj;
                taskListFail.getFragment().finishReload();
                recycleCommentsTask(taskListFail);
                break;

            case GET_IMAGE_RUNNING:
                GetImageTask imageTask = (GetImageTask) inputMessage.obj;
                if (imageTask.getDialog() != null) {
                    imageTask.getDialog().show();
                }
                break;

            case GET_IMAGE_FAILED:
                GetImageTask imageTaskFail = (GetImageTask) inputMessage.obj;
                if (imageTaskFail.getDialog() != null) {
                    imageTaskFail.getDialog().dismiss();
                }
                recycleGetImageTask(imageTaskFail);
                break;

            case GET_IMAGE_COMPLETE:
                GetImageTask imageTaskComplete = (GetImageTask) inputMessage.obj;
                if (imageTaskComplete.getDialog() != null) {
                    imageTaskComplete.getDialog().dismiss();
                }
                Bitmap bitmap = imageTaskComplete.getImageCache();
                String id = imageTaskComplete.getId();
                ImageView view = imageTaskComplete.getmImageWeakRef().get();
                if (view != null) {
                    view.setImageBitmap(bitmap);
                }
                CacheManager.getInstance().serializeImage(bitmap, id);
                recycleGetImageTask(imageTaskComplete);
                break;

            case GET_POI_RUNNING:
                GetPOITask poiTaskRunning = (GetPOITask) inputMessage.obj;
                if (poiTaskRunning.getDialog() != null) {
                    poiTaskRunning.getDialog().show();
                }
                break;

            case GET_POI_COMPLETE:
                GetPOITask poiTaskComplete = (GetPOITask) inputMessage.obj;
                if (poiTaskComplete.getDialog() != null) {
                    poiTaskComplete.getDialog().dismiss();
                }
                if (poiTaskComplete.getMarker() != null) {
                    poiTaskComplete.getMarker().setSubDescription((poiTaskComplete.getPOICache()));
                    poiTaskComplete.getMarker().showInfoWindow();

                }
                poiTaskComplete.getLocation().setLocationDescription(poiTaskComplete.getPOICache());
                recycleGetPOITask(poiTaskComplete);
                break;

            case GET_POI_FAILED:
                GetPOITask poiTaskFailed = (GetPOITask) inputMessage.obj;
                if (poiTaskFailed.getDialog() != null) {
                    poiTaskFailed.getDialog().dismiss();
                }
                if (poiTaskFailed.getMarker() != null) {
                    poiTaskFailed.getMarker().setSubDescription(("Unknown Location"));
                    poiTaskFailed.getMarker().showInfoWindow();
                }
                poiTaskFailed.getLocation().setLocationDescription("Unknown Location");
                recycleGetPOITask(poiTaskFailed);
                break;

            case POST_GET_POI_RUNNING:
                PostTask postPoiTaskRunning = (PostTask) inputMessage.obj;
                if (postPoiTaskRunning.getDialog() != null) {
                    postPoiTaskRunning.getDialog().show();
                }
                break;

            case POST_GET_POI_COMPLETE:
                PostTask postPoiTaskComplete = (PostTask) inputMessage.obj;
                if (postPoiTaskComplete.getDialog() != null) {
                    postPoiTaskComplete.getDialog().setMessage("Posting to Server");
                }
                break;

            case POST_GET_POI_FAILED:
                PostTask postPoiTaskFailed = (PostTask) inputMessage.obj;
                if (postPoiTaskFailed.getDialog() != null) {
                    postPoiTaskFailed.getDialog().dismiss();
                }
                break;

            case UPDATE_FAILED:
                PostTask postTaskUpdateFailed = (PostTask) inputMessage.obj;
                if (postTaskUpdateFailed.getDialog() != null) {
                    postTaskUpdateFailed.getDialog().dismiss();
                }
                break;

            case POST_FAILED:
                PostTask postTaskFailed = (PostTask) inputMessage.obj;
                if (postTaskFailed.getDialog() != null) {
                    postTaskFailed.getDialog().dismiss();
                }
                break;

            case POST_RUNNING:
                PostTask postTaskRun = (PostTask) inputMessage.obj;
                if (postTaskRun.getDialog() != null && !postTaskRun.getDialog().isShowing()) {
                    postTaskRun.getDialog().show();
                }
                break;

            case POST_IMAGE_FAILED:
                PostTask postTaskImageFailed = (PostTask) inputMessage.obj;
                if (postTaskImageFailed.getDialog() != null) {
                    postTaskImageFailed.getDialog().dismiss();
                }
                break;

            default:
                super.handleMessage(inputMessage);
                break;
            }
        }
    };
}

From source file:io.druid.data.input.impl.PrefetchableTextFilesFirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser, File temporaryDirectory) throws IOException {
    if (maxCacheCapacityBytes == 0 && maxFetchCapacityBytes == 0) {
        return super.connect(firehoseParser, temporaryDirectory);
    }//from   w w  w . ja v  a  2  s . co m

    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }

    Preconditions.checkState(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
            temporaryDirectory);
    Preconditions.checkState(temporaryDirectory.isDirectory(), "temporaryDirectory[%s] is not a directory",
            temporaryDirectory);

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = createFetchExecutor();

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        // When prefetching is enabled, fetchFiles and nextFetchIndex are updated by the fetchExecutor thread, but
        // read by both the main thread (in hasNext()) and the fetchExecutor thread (in fetch()). To guarantee that
        // fetchFiles and nextFetchIndex are updated atomically, this lock must be held before updating
        // them.
        private final Object fetchLock = new Object();
        private final LinkedBlockingQueue<FetchedFile> fetchFiles = new LinkedBlockingQueue<>();

        // Number of bytes currently fetched files.
        // This is updated when a file is successfully fetched or a fetched file is deleted.
        private final AtomicLong fetchedBytes = new AtomicLong(0);
        private final boolean cacheInitialized;
        private final boolean prefetchEnabled;

        private Future<Void> fetchFuture;
        private int cacheIterateIndex;
        // nextFetchIndex indicates which object should be downloaded when fetch is triggered.
        private int nextFetchIndex;

        {
            cacheInitialized = totalCachedBytes > 0;
            prefetchEnabled = maxFetchCapacityBytes > 0;

            if (cacheInitialized) {
                nextFetchIndex = cacheFiles.size();
            }
            if (prefetchEnabled) {
                fetchIfNeeded(totalCachedBytes);
            }
        }

        private void fetchIfNeeded(long remainingBytes) {
            if ((fetchFuture == null || fetchFuture.isDone()) && remainingBytes <= prefetchTriggerBytes) {
                fetchFuture = fetchExecutor.submit(() -> {
                    fetch();
                    return null;
                });
            }
        }

        /**
         * Fetch objects to a local disk up to {@link PrefetchableTextFilesFirehoseFactory#maxFetchCapacityBytes}.
         * This method is not thread safe and must be called by a single thread.  Note that even
         * {@link PrefetchableTextFilesFirehoseFactory#maxFetchCapacityBytes} is 0, at least 1 file is always fetched.
         * This is for simplifying design, and should be improved when our client implementations for cloud storages
         * like S3 support range scan.
         */
        private void fetch() throws Exception {
            for (int i = nextFetchIndex; i < objects.size()
                    && fetchedBytes.get() <= maxFetchCapacityBytes; i++) {
                final ObjectType object = objects.get(i);
                LOG.info("Fetching object[%s], fetchedBytes[%d]", object, fetchedBytes.get());
                final File outFile = File.createTempFile(FETCH_FILE_PREFIX, null, temporaryDirectory);
                fetchedBytes.addAndGet(download(object, outFile, 0));
                synchronized (fetchLock) {
                    fetchFiles.put(new FetchedFile(object, outFile));
                    nextFetchIndex++;
                }
            }
        }

        /**
         * Downloads an object. It retries downloading {@link PrefetchableTextFilesFirehoseFactory#maxFetchRetry}
         * times and throws an exception.
         *
         * @param object   an object to be downloaded
         * @param outFile  a file which the object data is stored
         * @param tryCount current retry count
         *
         * @return number of downloaded bytes
         *
         * @throws IOException
         */
        private long download(ObjectType object, File outFile, int tryCount) throws IOException {
            try (final InputStream is = openObjectStream(object);
                    final CountingOutputStream cos = new CountingOutputStream(new FileOutputStream(outFile))) {
                IOUtils.copy(is, cos);
                return cos.getCount();
            } catch (IOException e) {
                final int nextTry = tryCount + 1;
                if (!Thread.currentThread().isInterrupted() && nextTry < maxFetchRetry) {
                    LOG.error(e, "Failed to download object[%s], retrying (%d of %d)", object, nextTry,
                            maxFetchRetry);
                    outFile.delete();
                    return download(object, outFile, nextTry);
                } else {
                    LOG.error(e, "Failed to download object[%s], retries exhausted, aborting", object);
                    throw e;
                }
            }
        }

        @Override
        public boolean hasNext() {
            synchronized (fetchLock) {
                return (cacheInitialized && cacheIterateIndex < cacheFiles.size()) || !fetchFiles.isEmpty()
                        || nextFetchIndex < objects.size();
            }
        }

        @Override
        public LineIterator next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }

            // If fetch() fails, hasNext() always returns true because nextFetchIndex must be smaller than the number
            // of objects, which means next() is always called. The below method checks that fetch() threw an exception
            // and propagates it if exists.
            checkFetchException();

            final OpenedObject openedObject;

            try {
                // Check cache first
                if (cacheInitialized && cacheIterateIndex < cacheFiles.size()) {
                    final FetchedFile fetchedFile = cacheFiles.get(cacheIterateIndex++);
                    openedObject = new OpenedObject(fetchedFile, getNoopCloser());
                } else if (prefetchEnabled) {
                    openedObject = openObjectFromLocal();
                } else {
                    openedObject = openObjectFromRemote();
                }

                final InputStream stream = wrapObjectStream(openedObject.object, openedObject.objectStream);

                return new ResourceCloseableLineIterator(new InputStreamReader(stream, Charsets.UTF_8),
                        openedObject.resourceCloser);
            } catch (IOException e) {
                throw Throwables.propagate(e);
            }
        }

        private void checkFetchException() {
            if (fetchFuture != null && fetchFuture.isDone()) {
                try {
                    fetchFuture.get();
                    fetchFuture = null;
                } catch (InterruptedException | ExecutionException e) {
                    throw Throwables.propagate(e);
                }
            }
        }

        private OpenedObject openObjectFromLocal() throws IOException {
            final FetchedFile fetchedFile;
            final Closeable resourceCloser;

            if (!fetchFiles.isEmpty()) {
                // If there are already fetched files, use them
                fetchedFile = fetchFiles.poll();
                resourceCloser = cacheIfPossibleAndGetCloser(fetchedFile, fetchedBytes);
                fetchIfNeeded(fetchedBytes.get());
            } else {
                // Otherwise, wait for fetching
                try {
                    fetchIfNeeded(fetchedBytes.get());
                    fetchedFile = fetchFiles.poll(fetchTimeout, TimeUnit.MILLISECONDS);
                    if (fetchedFile == null) {
                        // Check the latest fetch is failed
                        checkFetchException();
                        // Or throw a timeout exception
                        throw new RuntimeException(new TimeoutException());
                    }
                    resourceCloser = cacheIfPossibleAndGetCloser(fetchedFile, fetchedBytes);
                    // trigger fetch again for subsequent next() calls
                    fetchIfNeeded(fetchedBytes.get());
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }
            return new OpenedObject(fetchedFile, resourceCloser);
        }

        private OpenedObject openObjectFromRemote() throws IOException {
            final OpenedObject openedObject;
            final Closeable resourceCloser = getNoopCloser();

            if (totalCachedBytes < maxCacheCapacityBytes) {
                LOG.info("Caching object[%s]", objects.get(nextFetchIndex));
                try {
                    // Since maxFetchCapacityBytes is 0, at most one file is fetched.
                    fetch();
                    FetchedFile fetchedFile = fetchFiles.poll();
                    if (fetchedFile == null) {
                        throw new ISE("Cannot fetch object[%s]", objects.get(nextFetchIndex));
                    }
                    cacheIfPossible(fetchedFile);
                    fetchedBytes.addAndGet(-fetchedFile.length());
                    openedObject = new OpenedObject(fetchedFile, resourceCloser);
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
            } else {
                final ObjectType object = objects.get(nextFetchIndex++);
                LOG.info("Reading object[%s]", object);
                openedObject = new OpenedObject(object, openObjectStream(object), resourceCloser);
            }
            return openedObject;
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(fetchExecutor.awaitTermination(fetchTimeout, TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}

From source file:groovyx.net.http.AsyncHTTPBuilder.java

/**
 * Initializes threading parameters for the HTTPClient's
 * {@link ThreadSafeClientConnManager}, and this class' ThreadPoolExecutor.
 *///from  www . j  a  v a  2  s  .  c  om
protected void initThreadPools(final int poolSize, final ExecutorService threadPool) {
    if (poolSize < 1)
        throw new IllegalArgumentException("poolSize may not be < 1");
    // Create and initialize HTTP parameters
    HttpParams params = client != null ? client.getParams() : new BasicHttpParams();
    ConnManagerParams.setMaxTotalConnections(params, poolSize);
    ConnManagerParams.setMaxConnectionsPerRoute(params, new ConnPerRouteBean(poolSize));

    HttpProtocolParams.setVersion(params, HttpVersion.HTTP_1_1);

    // Create and initialize scheme registry
    SchemeRegistry schemeRegistry = new SchemeRegistry();
    schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80));
    schemeRegistry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443));

    ClientConnectionManager cm = new ThreadSafeClientConnManager(params, schemeRegistry);
    super.client = new DefaultHttpClient(cm, params);

    this.threadPool = threadPool != null ? threadPool
            : new ThreadPoolExecutor(poolSize, poolSize, 120, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());
}

From source file:org.apache.axis2.transport.xmpp.XMPPListener.java

/**
 * Start a pool of Workers. For each connection in connectionFactories,
 * assign a packer listener. This packet listener will trigger when a 
 * message arrives./*from w ww.j a  v a2  s  . co  m*/
 */
public void start() throws AxisFault {
    // create thread pool of workers
    ExecutorService workerPool = new ThreadPoolExecutor(1, WORKERS_MAX_THREADS, WORKER_KEEP_ALIVE, TIME_UNIT,
            new LinkedBlockingQueue(), new org.apache.axis2.util.threadpool.DefaultThreadFactory(
                    new ThreadGroup("XMPP Worker thread group"), "XMPPWorker"));

    Iterator iter = connectionFactories.values().iterator();
    while (iter.hasNext()) {
        XMPPConnectionFactory connectionFactory = (XMPPConnectionFactory) iter.next();
        XMPPPacketListener xmppPacketListener = new XMPPPacketListener(connectionFactory,
                this.configurationContext, workerPool);
        connectionFactory.listen(xmppPacketListener);
    }
}

From source file:com.google.gplus.provider.TestGPlusUserActivityCollector.java

/**
 * Creates a randomized activity and randomized date range.
 * The activity feed is separated into three chunks,
 * |. . . data too recent to be in date range . . .||. . . data in date range. . .||. . . data too old to be in date range|
 * [index 0, ............................................................................................., index length-1]
 * Inside of those chunks data has no order, but the list is ordered by those three chunks.
 *
 * The test will check to see if the num of data in the date range make onto the output queue.
 *///w w  w . j  a  va2s.c o m
@Test
@Repeat(iterations = 3)
public void testWithBeforeAndAfterDates() throws InterruptedException {
    //initialize counts assuming no date ranges will be used
    int numActivities = randomIntBetween(0, 1000);
    int numActivitiesInDateRange = numActivities;
    int numberOutOfRange = 0;
    int numBerforeRange = 0;
    int numAfterRange = 0;
    //determine if date ranges will be used
    DateTime beforeDate = null;
    DateTime afterDate = null;
    if (randomInt() % 2 == 0) {
        beforeDate = DateTime.now().minusDays(randomIntBetween(1, 5));
    }
    if (randomInt() % 2 == 0) {
        if (beforeDate == null) {
            afterDate = DateTime.now().minusDays(randomIntBetween(1, 10));
        } else {
            afterDate = beforeDate.minusDays(randomIntBetween(1, 10));
        }
    }
    //update counts if date ranges are going to be used.
    if (beforeDate != null || afterDate != null) { //assign amount to be in range
        numActivitiesInDateRange = randomIntBetween(0, numActivities);
        numberOutOfRange = numActivities - numActivitiesInDateRange;
    }
    if (beforeDate == null && afterDate != null) { //assign all out of range to be before the start of the range
        numBerforeRange = numberOutOfRange;
    } else if (beforeDate != null && afterDate == null) { //assign all out of range to be after the start of the range
        numAfterRange = numberOutOfRange;
    } else if (beforeDate != null && afterDate != null) { //assign half before range and half after the range
        numAfterRange = (numberOutOfRange / 2) + (numberOutOfRange % 2);
        numBerforeRange = numberOutOfRange / 2;
    }

    Plus plus = createMockPlus(numBerforeRange, numAfterRange, numActivitiesInDateRange, afterDate, beforeDate);
    BackOffStrategy strategy = new ConstantTimeBackOffStrategy(1);
    BlockingQueue<StreamsDatum> datums = new LinkedBlockingQueue<>();
    UserInfo userInfo = new UserInfo();
    userInfo.setUserId("A");
    userInfo.setAfterDate(afterDate);
    userInfo.setBeforeDate(beforeDate);
    GPlusUserActivityCollector collector = new GPlusUserActivityCollector(plus, datums, strategy, userInfo);
    collector.run();

    assertEquals(numActivitiesInDateRange, datums.size());
    while (!datums.isEmpty()) {
        StreamsDatum datum = datums.take();
        assertNotNull(datum);
        assertNotNull(datum.getDocument());
        assertTrue(datum.getDocument() instanceof String);
        assertTrue(((String) datum.getDocument()).contains(IN_RANGE_IDENTIFIER)); //only in range documents are on the out going queue.
    }
}

From source file:org.apache.ambari.server.state.services.AlertNoticeDispatchService.java

/**
 * Constructor.//w  ww .  j  ava  2  s .  c  om
 */
public AlertNoticeDispatchService() {
    m_executor = new ThreadPoolExecutor(0, 2, 5L, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(),
            new AlertDispatchThreadFactory(), new ThreadPoolExecutor.CallerRunsPolicy());

    GsonBuilder gsonBuilder = new GsonBuilder();
    gsonBuilder.registerTypeAdapter(AlertTargetProperties.class, new AlertTargetPropertyDeserializer());

    m_gson = gsonBuilder.create();
}

From source file:org.kaaproject.kaa.server.verifiers.facebook.verifier.FacebookUserVerifier.java

@Override
public void start() {
    LOG.info("facebook user verifier started");
    tokenVerifiersPool = new ThreadPoolExecutor(0, configuration.getMaxParallelConnections(),
            MAX_SEC_FACEBOOK_REQUEST_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
    PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
    httpClient = HttpClients.custom().setConnectionManager(connectionManager).build();
    // Increase max total connection
    connectionManager.setMaxTotal(configuration.getMaxParallelConnections());
}

From source file:org.obm.opush.PingHandlerTest.java

@Test
@Ignore("OBMFULL-4125")
public void test3BlockingClient() throws Exception {
    prepareMockNoChange(Arrays.asList(users.jaures));

    opushServer.start();/*from w  w  w.  ja v a2s  .  c  o m*/

    OPClient opClient = testUtils.buildWBXMLOpushClient(users.jaures, opushServer.getHttpPort(), httpClient);

    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(20, 20, 1, TimeUnit.MINUTES,
            new LinkedBlockingQueue<Runnable>());

    Stopwatch stopwatch = Stopwatch.createStarted();

    List<Future<Document>> futures = new ArrayList<Future<Document>>();
    for (int i = 0; i < 4; ++i) {
        futures.add(queuePingCommand(opClient, users.jaures, threadPoolExecutor));
    }

    for (Future<Document> f : futures) {
        Document response = f.get();
        checkNoChangeResponse(response);
    }

    checkExecutionTime(2, 5, stopwatch);
}

From source file:au.org.ala.spatial.analysis.layers.LayerDistanceIndex.java

/**
 * @param threadcount    number of threads to run analysis.
 * @param onlyThesePairs array of distances to run as fieldId1 + " " +
 *                       fieldId2 where fieldId1.compareTo(fieldId2) &lt 0 or null for all missing
 *                       distances./*w ww  .j  av  a2  s  .c  om*/
 * @throws InterruptedException
 */
public void occurrencesUpdate(int threadcount, String[] onlyThesePairs) throws InterruptedException {

    //create distances file if it does not exist.
    File layerDistancesFile = new File(IntersectConfig.getAlaspatialOutputPath() + LAYER_DISTANCE_FILE);
    if (!layerDistancesFile.exists()) {
        FileWriter fw = null;
        try {
            fw = new FileWriter(layerDistancesFile);
            fw.flush();
        } catch (Exception e) {
            logger.error(e.getMessage(), e);
        } finally {
            if (fw != null) {
                try {
                    fw.close();
                } catch (Exception e) {
                    logger.error(e.getMessage(), e);
                }
            }
        }
    }

    Map<String, Double> map = loadDistances();

    LinkedBlockingQueue<String> todo = new LinkedBlockingQueue();

    if (onlyThesePairs != null && onlyThesePairs.length > 0) {
        for (String s : onlyThesePairs) {
            todo.add(s);
        }
    } else {
        //find all environmental layer analysis files
        File root = new File(IntersectConfig.getAlaspatialOutputPath());
        File[] dirs = root.listFiles(new FileFilter() {

            @Override
            public boolean accept(File pathname) {
                return pathname != null && pathname.isDirectory();
            }
        });

        HashMap<String, String> domains = new HashMap<String, String>();
        for (File dir : dirs) {
            //iterate through files so we get everything
            File[] files = new File(dir.getPath()).listFiles(new FileFilter() {

                @Override
                public boolean accept(File pathname) {
                    return pathname.getName().endsWith(".grd") && pathname.getName().startsWith("el");
                }
            });

            for (int i = 0; i < files.length; i++) {
                for (int j = i + 1; j < files.length; j++) {
                    String file1 = files[i].getName().replace(".grd", "");
                    String file2 = files[j].getName().replace(".grd", "");

                    //only operate on file names that are valid fields
                    if (Client.getFieldDao().getFieldById(file1) != null
                            && Client.getFieldDao().getFieldById(file2) != null) {

                        String domain1 = domains.get(file1);
                        if (domain1 == null) {
                            String pid1 = Client.getFieldDao().getFieldById(file1).getSpid();
                            domain1 = Client.getLayerDao().getLayerById(Integer.parseInt(pid1)).getdomain();
                            domains.put(file1, domain1);
                        }
                        String domain2 = domains.get(file2);
                        if (domain2 == null) {
                            String pid2 = Client.getFieldDao().getFieldById(file2).getSpid();
                            domain2 = Client.getLayerDao().getLayerById(Integer.parseInt(pid2)).getdomain();
                            domains.put(file2, domain2);
                        }

                        String key = (file1.compareTo(file2) < 0) ? file1 + " " + file2 : file2 + " " + file1;

                        //domain test
                        if (isSameDomain(parseDomain(domain1), parseDomain(domain2))) {
                            if (!map.containsKey(key) && !todo.contains(key)) {
                                todo.put(key);
                            }
                        }
                    }
                }
            }
        }
    }

    LinkedBlockingQueue<String> toDisk = new LinkedBlockingQueue<String>();
    CountDownLatch cdl = new CountDownLatch(todo.size());
    CalcThread[] threads = new CalcThread[threadcount];
    for (int i = 0; i < threadcount; i++) {
        threads[i] = new CalcThread(cdl, todo, toDisk);
        threads[i].start();
    }

    ToDiskThread toDiskThread = new ToDiskThread(
            IntersectConfig.getAlaspatialOutputPath() + LAYER_DISTANCE_FILE, toDisk);
    toDiskThread.start();

    cdl.await();

    for (int i = 0; i < threadcount; i++) {
        threads[i].interrupt();
    }

    toDiskThread.interrupt();
}

From source file:com.chinamobile.bcbsp.bspcontroller.QueueManager.java

/**
 * get the jobs in all the waitqueues./*from  w ww .ja v a2s.  c  o  m*/
 * @return
 *        waitQueue jobs.
 */
public Collection<JobInProgress> getJobs() {
    int WaitQueuesTotal = 4;
    Collection<JobInProgress> jobs = new LinkedBlockingQueue<JobInProgress>();
    String[] waitQueues = { "HIGHER_WAIT_QUEUE", "HIGH_WAIT_QUEUE", "NORMAL_WAIT_QUEUE", "LOW_WAIT_QUEUE",
            "LOWER_WAIT_QUEUE" };
    for (int i = 0; i <= WaitQueuesTotal; i++) {
        jobs.addAll(findQueue(waitQueues[i]).getJobs());
    }
    return jobs;
}