Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.datatorrent.contrib.apachelog.ApacheLogInputGenerator.java

@Override
public void setup(OperatorContext arg0) {
    holdingBuffer = new ArrayBlockingQueue<String>(bufferSize);
    try {/*  www. ja v a 2s .  c  o m*/
        ipAddress = readLines(ipAddressFile);
        List<String> urlByteStatus = readLines(urlFile);
        referers = readLines(refererFile);
        agents = readLines(agentFile);
        //removing the first url if it starts with #
        if (urlByteStatus.get(0).startsWith("#")) {
            urlByteStatus.remove(0);
        }

        LOG.info("Number of IP Addresses: {}", ipAddress.size());
        LOG.info("Number of URLs: {}", urlByteStatus.size());
        LOG.info("Number of Referers: {}", referers.size());
        LOG.info("Number of User Agents: {}", agents.size());
        url = new ArrayList<String>();
        bytes = new ArrayList<Integer>();
        status = new ArrayList<Integer>();
        StringTokenizer token;
        for (String str : urlByteStatus) {
            token = new StringTokenizer(str, delimiter);
            url.add(token.nextToken().trim());
            bytes.add(Integer.parseInt(token.nextToken().trim()));
            status.add(Integer.parseInt(token.nextToken().trim()));
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    random = new Random();
    ipAddressCount = ipAddress.size();
    agentsCount = agents.size();
    urlCount = url.size();
    refererCount = referers.size();
    sdf = new SimpleDateFormat("dd/MMM/yyyy:HH:mm:ss Z");
}

From source file:org.sbs.goodcrawler.storage.PendingStore.java

/**
 * @desc ?/*  ww  w . j a  v  a2  s.c  o  m*/
 */
private void init() {
    File file = new File(PropertyConfigurationHelper.getInstance().getString("status.save.path", "status")
            + File.separator + "stores.good");
    if (file.exists()) {
        try {
            FileInputStream fisUrl = new FileInputStream(file);
            ObjectInputStream oisUrl = new ObjectInputStream(fisUrl);
            instance = (PendingStore) oisUrl.readObject();
            oisUrl.close();
            fisUrl.close();
            Queue = instance.Queue;
            failure = instance.failure;
            success = instance.success;
            count = instance.count;
            ignored = instance.ignored;
            System.out.println("recovery store queue..." + Queue.size());
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }
    if (null == Queue)
        Queue = new ArrayBlockingQueue<>(PropertyConfigurationHelper.getInstance()
                .getInt(GlobalConstants.pendingStoreMessgeQueueSize, 2000));
}

From source file:com.kurento.kmf.media.PointerDetectorFilterTest.java

/**
 * @throws InterruptedException/*from ww  w  .  j av a  2 s .  com*/
 * 
 */
@Test
public void testWindowEvents() throws InterruptedException {

    PointerDetectorWindowMediaParam window0 = new PointerDetectorWindowMediaParam("window0", 50, 50, 200, 50);

    PointerDetectorWindowMediaParam window1 = new PointerDetectorWindowMediaParam("window1", 50, 50, 200, 150);

    filter.addWindow(window0);
    filter.addWindow(window1);

    final BlockingQueue<WindowInEvent> eventsIn = new ArrayBlockingQueue<WindowInEvent>(1);

    final BlockingQueue<WindowOutEvent> eventsOut = new ArrayBlockingQueue<WindowOutEvent>(1);

    filter.addWindowInListener(new MediaEventListener<WindowInEvent>() {

        @Override
        public void onEvent(WindowInEvent event) {
            eventsIn.add(event);
        }
    });

    filter.addWindowOutListener(new MediaEventListener<WindowOutEvent>() {

        @Override
        public void onEvent(WindowOutEvent event) {
            eventsOut.add(event);
        }
    });

    player.play();
    Assert.assertTrue("window0".equals(eventsIn.poll(20, SECONDS).getWindowId()));
    Assert.assertTrue("window0".equals(eventsOut.poll(5, SECONDS).getWindowId()));

    player.stop();
}

From source file:edu.vt.middleware.cas.ldap.LoadDriver.java

public LoadDriver(final int sampleCount, final int workerCount, final File credentialsFile,
        final ApplicationContext context) {

    this.credentialsFile = credentialsFile;
    this.resultExecutor = Executors.newSingleThreadExecutor();
    this.workExecutor = Executors.newFixedThreadPool(workerCount);
    this.context = context;
    final AuthenticationHandler handler = this.context.getBean(AuthenticationHandler.class);
    if (handler == null) {
        throw new IllegalStateException("AuthenticationHandler bean not found.");
    }/*  w  w  w  .j  a v  a  2 s.  c o  m*/
    if (!handler.supports(new UsernamePasswordCredentials())) {
        throw new IllegalStateException("AuthenticationHandler bean does not support password authentication");
    }
    this.state.setWorkQueue(new ArrayBlockingQueue<UsernamePasswordCredentials>(sampleCount));
    this.state.setResultQueue(new ArrayBlockingQueue<Sample>(sampleCount));
    this.state.setAuthenticationHandler(handler);
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.JWTSecurityHandler.java

public JWTSecurityHandler(RMContext rmContext, RMAppSecurityManager rmAppSecurityManager) {
    this.rmContext = rmContext;
    this.rmAppSecurityManager = rmAppSecurityManager;
    this.renewalTasks = new ConcurrentHashMap<>();
    this.invalidationEvents = new ArrayBlockingQueue<JWTInvalidationEvent>(INVALIDATION_EVENTS_QUEUE_SIZE);
    this.eventHandler = rmContext.getDispatcher().getEventHandler();
    this.random = new RandomDataGenerator();
}

From source file:org.apache.hadoop.raid.IADecoder.java

/**
 * @param forRecovery determine the type of this decoder, for recovery or for degraded read 
 *   (someday, i may combine them into the same function)
 *///from   www  .j  a v a  2s  . com
public IADecoder(Configuration conf, int stripeSize, int paritySize, boolean forRecovery) {
    super(conf, stripeSize, paritySize);
    LOG.info("initial decoder: k=" + stripeSize + " m=" + paritySize + " bufSize:" + bufSize);

    inputs = new FSDataInputStream[stripeSize + paritySize];

    threadNum = conf.getInt("hdfs.raid.decoder.threadnum", 1);

    //data queue, input to decode
    this.q = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        q[i] = new ArrayBlockingQueue<DecodePackage>(1024 / paritySize);

    //signal queue, decode to output
    this.p = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        p[i] = new ArrayBlockingQueue<Integer>(100);

    Thread[] ds = new Thread[threadNum];
    for (int i = 0; i < threadNum; i++) {
        if (forRecovery) {
            IARecoveryDecoder decoder = new IARecoveryDecoder(i);
            ds[i] = new Thread(decoder);
        } else {
            IADegradedReadDecoder decoder = new IADegradedReadDecoder(i);
            ds[i] = new Thread(decoder);
        }
        ds[i].start();
    }

    LOG.info("IADecoder 27/1");

}

From source file:org.mitre.mpf.mst.TestSystemStress3.java

/**
 * This test intentionally runs one file per job
 *//*w w  w.j  a v  a  2  s .c om*/
@Test(timeout = 180 * MINUTES)
public void runFaceOcvDetectImageManyJobs() throws Exception {
    testCtr++;
    log.info("Beginning test #{} runFaceOcvDetectImageManyJobs()", testCtr);
    IOFileFilter fileFilter = FileFilterUtils.and(FileFilterUtils.fileFileFilter(),
            FileFilterUtils.suffixFileFilter(".jpg"));

    int numExtractors = 6; // number of extractors on Jenkins (* number of nodes, now 1)
    //        int numExtractors = 2;  // number of extractors on local VM * 1 node

    // for testing on local VM only
    //        Collection<File> files = FileUtils.listFiles(new File(getClass().getClassLoader().getResource("samples/face").getFile()),
    //            fileFilter, null);

    // for testing on Jenkins
    // 10,000 jpgs
    Collection<File> files = FileUtils.listFiles(new File("/mpfdata/datasets/mugshots_10000"), fileFilter,
            null);

    BlockingQueue<File> fQueue = new ArrayBlockingQueue<File>(files.size());
    for (File file : files) {
        fQueue.put(file);
    }
    ExecutorService executor = Executors.newFixedThreadPool(numExtractors);
    JobRunner[] jobRunners = new JobRunner[numExtractors];
    for (int i = 0; i < numExtractors; i++) {
        jobRunners[i] = new JobRunner(fQueue);
        executor.submit(jobRunners[i]);
    }
    executor.shutdown();
    executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);

    Assert.assertEquals(
            "Number of files to process={} doesn't match actual number of jobs run={} (one job/file)",
            files.size(), manyJobsNumFilesProcessed);
    log.info("Successfully ran {} jobs for {} files, one file per job, without a hiccup",
            manyJobsNumFilesProcessed, files.size());
    log.info("Finished test runFaceOcvDetectImageManyJobs()");
}

From source file:org.sbs.goodcrawler.urlmanager.PendingUrls.java

/**
 * @desc ?/*from   w w  w .  j  a v a2  s .  c  o  m*/
 */
private void init() {
    File file = new File(PropertyConfigurationHelper.getInstance().getString("status.save.path", "status")
            + File.separator + "urls.good");
    if (file.exists()) {
        try {
            FileInputStream fisUrl = new FileInputStream(file);
            ObjectInputStream oisUrl = new ObjectInputStream(fisUrl);
            instance = (PendingUrls) oisUrl.readObject();
            oisUrl.close();
            fisUrl.close();
            Queue = instance.Queue;
            failure = instance.failure;
            success = instance.success;
            urlCount = instance.urlCount;
            ignored = instance.ignored;
            System.out.println("recovery url queue..." + Queue.size());
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }
    if (null == Queue)
        Queue = new ArrayBlockingQueue<>(PropertyConfigurationHelper.getInstance()
                .getInt(GlobalConstants.pendingUrlsQueueSize, 1000000));
}

From source file:org.apache.hadoop.raid.JRSEncoder.java

protected void encodeFileToStream(FileSystem fs, Path srcFile, long srcSize, long blockSize, OutputStream out,
        Progressable reporter) throws IOException {
    // (disable) One parity block can be written directly to out, rest to local files.
    //tmpOuts[0] = out;

    //File[] tmpFiles = new File[paritySize];
    byte[][] bufs = new byte[paritySize][];

    /*/*from  w w w  . ja  v a2s . c  o  m*/
     * signal queue to trigger ouput
     * No need blocking queue (adjust in the future)
     */
    BlockingQueue<byte[]> closedBuf = new ArrayBlockingQueue<byte[]>(14);

    /*
     * Output thread
     */
    DataSender ds = new DataSender(closedBuf, out, blockSize, srcSize);
    Thread dst = new Thread(ds);
    dst.start();

    // Loop over stripes in the file.
    for (long stripeStart = 0; stripeStart < srcSize; stripeStart += blockSize * stripeSize) {
        reporter.progress();

        LOG.info("Starting encoding of stripe " + srcFile + ":" + stripeStart);

        /*
         * create temp file to write parity block (one file for each block)
         */
        for (int i = 0; i < paritySize; i++) {
            //tmpFiles[i] = File.createTempFile("parity", "_" + i); 
            //LOG.info("Created tmp file " + tmpFiles[i]);
            //tmpFiles[i].deleteOnExit();
            bufs[i] = new byte[(int) blockSize];
        }

        // Create input streams for blocks in the stripe.
        InputStream[] blocks = stripeInputs(fs, srcFile, stripeStart, srcSize, blockSize);

        /*
         * encode data
         */
        encodeStripe(blocks, stripeStart, blockSize, bufs, reporter);

        /*
         * triger output
         */
        for (int i = 0; i < paritySize; i++) {
            try {
                closedBuf.put(bufs[i]);
            } catch (InterruptedException e) {
            }
            reporter.progress();
        }
    }

    try {
        //waiting for the end of output
        dst.join();
    } catch (InterruptedException e) {
        LOG.info("thread join interrupted");
    }
}

From source file:nlp.mediawiki.parser.MultistreamBzip2XmlDumpParser.java

/**
 * Full constructor// w  ww. java  2  s. c  o m
 * @param index the index file containing all the indicies of blocks
 * @param pages the page file contain all the multistreams
 * @param numThreads the number of threads to use above the current
 * @param batchsize the size of a batch
 */
public MultistreamBzip2XmlDumpParser(File index, File pages, int batchsize, int numThreads) {
    this.indexFile = index;
    this.pageFile = pages;
    this.workers = new Worker[numThreads];
    this.blocks = new ArrayBlockingQueue<PageBlock>(numThreads * 3);
    this.pageReader = new PageReader(new IndexReader(index, pages, numThreads * 3), pages);
    this.batchsize = batchsize;
}