Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:org.activiti.engine.impl.asyncexecutor.DefaultAsyncJobExecutor.java

protected void initAsyncJobExecutionThreadPool() {
    if (threadPoolQueue == null) {
        log.info("Creating thread pool queue of size {}", queueSize);
        threadPoolQueue = new ArrayBlockingQueue<Runnable>(queueSize);
    }/* ww w  . j  a  v  a 2 s.c  om*/

    if (executorService == null) {
        log.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}",
                corePoolSize, maxPoolSize, keepAliveTime);

        BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
                .namingPattern("activiti-async-job-executor-thread-%d").build();
        executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
                TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory);
    }
}

From source file:org.eclipse.smila.connectivity.framework.crawler.web.WebCrawler.java

/**
 * {@inheritDoc}//w ww. j a va2  s.c o m
 */
@Override
public void initialize(final DataSourceConnectionConfig config)
        throws CrawlerException, CrawlerCriticalException {
    _log.info("Initializing WebCrawler...");
    synchronized (_openedMonitor) {
        if (_opened) {
            throw new CrawlerCriticalException(
                    "Crawler is busy (it should not happen because new instances are created by ComponentFactories)");
        }
        _opened = true;
    }

    _dataReferenceRecords = new HashMap<ConnectivityId, Record>();
    _records = new HashMap<ConnectivityId, Record>();

    _performanceCounters = new CrawlerPerformanceCounterHelper<WebCrawlerPerformanceAgent>(config, hashCode(),
            WebCrawlerPerformanceAgent.class);
    _queue = new ArrayBlockingQueue<Record>(CAPACITY);
    _forceClosing = false;
    _producerRunning = true;

    _dataSourceID = config.getDataSourceID();
    final Attributes attributes = config.getAttributes();
    final List<IAttribute> attrs = attributes.getAttribute();
    _attributes = attrs.toArray(new Attribute[attrs.size()]);

    final Process process = (Process) config.getProcess();
    _webSites = process.getWebSite().iterator();

    // _webSiteIterator = new WebSiteIterator(_webSites.next());
    initDataFolder();
    initializeNextSite();

    _crawlThread = new CrawlingProducerThread();
    _crawlThread.start();

    _log.debug("WebCrawler indexer started");
}

From source file:mazewar.Mazewar.java

/**
 * The place where all the pieces are put together.
 */// w  w  w.  j a  v  a2  s  . c  o m
public Mazewar(String zkServer, int zkPort, int port, String name, String game, boolean robot) {
    super("ECE419 Mazewar");
    consolePrintLn("ECE419 Mazewar started!");

    /* Set up parent */
    ZK_PARENT += game;

    // Throw up a dialog to get the GUIClient name.
    if (name != null) {
        clientId = name;
    } else {
        clientId = JOptionPane.showInputDialog("Enter your name");
    }
    if ((clientId == null) || (clientId.length() == 0)) {
        Mazewar.quit();
    }

    /* Connect to ZooKeeper and get sequencer details */
    List<ClientNode> nodeList = null;
    try {
        zkWatcher = new ZkWatcher();
        zkConnected = new CountDownLatch(1);
        zooKeeper = new ZooKeeper(zkServer + ":" + zkPort, ZK_TIMEOUT, new Watcher() {
            @Override
            public void process(WatchedEvent event) {
                /* Release Lock if ZooKeeper is connected */
                if (event.getState() == SyncConnected) {
                    zkConnected.countDown();
                } else {
                    System.err.println("Could not connect to ZooKeeper!");
                    System.exit(0);
                }
            }
        });
        zkConnected.await();

        /* Successfully connected, now create our node on ZooKeeper */
        zooKeeper.create(Joiner.on('/').join(ZK_PARENT, clientId),
                Joiner.on(':').join(InetAddress.getLocalHost().getHostAddress(), port).getBytes(),
                ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);

        /* Get Seed from Parent */
        mazeSeed = Long.parseLong(new String(zooKeeper.getData(ZK_PARENT, false, null)));

        /* Initialize Sequence Number */
        sequenceNumber = new AtomicInteger(zooKeeper.exists(ZK_PARENT, false).getVersion());

        /* Get list of nodes */
        nodeList = ClientNode.sortList(zooKeeper.getChildren(ZK_PARENT, false));
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(1);
    }

    // Create the maze
    maze = new MazeImpl(new Point(mazeWidth, mazeHeight), mazeSeed);
    assert (maze != null);

    // Have the ScoreTableModel listen to the maze to find
    // out how to adjust scores.
    ScoreTableModel scoreModel = new ScoreTableModel();
    assert (scoreModel != null);
    maze.addMazeListener(scoreModel);

    /* Initialize packet queue */
    packetQueue = new ArrayBlockingQueue<MazePacket>(QUEUE_SIZE);
    sequencedQueue = new PriorityBlockingQueue<MazePacket>(QUEUE_SIZE, new Comparator<MazePacket>() {
        @Override
        public int compare(MazePacket o1, MazePacket o2) {
            return o1.sequenceNumber.compareTo(o2.sequenceNumber);
        }
    });

    /* Inject Event Bus into Client */
    Client.setEventBus(eventBus);

    /* Initialize ZMQ Context */
    context = ZMQ.context(2);

    /* Set up publisher */
    publisher = context.socket(ZMQ.PUB);
    publisher.bind("tcp://*:" + port);
    System.out.println("ZeroMQ Publisher Bound On: " + port);

    try {
        Thread.sleep(100);
    } catch (Exception e) {
        e.printStackTrace();
    }

    /* Set up subscriber */
    subscriber = context.socket(ZMQ.SUB);
    subscriber.subscribe(ArrayUtils.EMPTY_BYTE_ARRAY);

    clients = new ConcurrentHashMap<String, Client>();
    try {
        for (ClientNode client : nodeList) {
            if (client.getName().equals(clientId)) {
                clientPath = ZK_PARENT + "/" + client.getPath();
                guiClient = robot ? new RobotClient(clientId) : new GUIClient(clientId);
                clients.put(clientId, guiClient);
                maze.addClient(guiClient);
                eventBus.register(guiClient);
                subscriber.connect("tcp://" + new String(zooKeeper.getData(clientPath, false, null)));
            } else {
                addRemoteClient(client);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

    checkNotNull(guiClient, "Should have received our clientId in CLIENTS list!");

    // Create the GUIClient and connect it to the KeyListener queue
    this.addKeyListener(guiClient);
    this.isRobot = robot;

    // Use braces to force constructors not to be called at the beginning of the
    // constructor.
    /*{
    maze.addClient(new RobotClient("Norby"));
    maze.addClient(new RobotClient("Robbie"));
    maze.addClient(new RobotClient("Clango"));
    maze.addClient(new RobotClient("Marvin"));
    }*/

    // Create the panel that will display the maze.
    overheadPanel = new OverheadMazePanel(maze, guiClient);
    assert (overheadPanel != null);
    maze.addMazeListener(overheadPanel);

    // Don't allow editing the console from the GUI
    console.setEditable(false);
    console.setFocusable(false);
    console.setBorder(BorderFactory.createTitledBorder(BorderFactory.createEtchedBorder()));

    // Allow the console to scroll by putting it in a scrollpane
    JScrollPane consoleScrollPane = new JScrollPane(console);
    assert (consoleScrollPane != null);
    consoleScrollPane
            .setBorder(BorderFactory.createTitledBorder(BorderFactory.createEtchedBorder(), "Console"));

    // Create the score table
    scoreTable = new JTable(scoreModel);
    assert (scoreTable != null);
    scoreTable.setFocusable(false);
    scoreTable.setRowSelectionAllowed(false);

    // Allow the score table to scroll too.
    JScrollPane scoreScrollPane = new JScrollPane(scoreTable);
    assert (scoreScrollPane != null);
    scoreScrollPane.setBorder(BorderFactory.createTitledBorder(BorderFactory.createEtchedBorder(), "Scores"));

    // Create the layout manager
    GridBagLayout layout = new GridBagLayout();
    GridBagConstraints c = new GridBagConstraints();
    getContentPane().setLayout(layout);

    // Define the constraints on the components.
    c.fill = GridBagConstraints.BOTH;
    c.weightx = 1.0;
    c.weighty = 3.0;
    c.gridwidth = GridBagConstraints.REMAINDER;
    layout.setConstraints(overheadPanel, c);
    c.gridwidth = GridBagConstraints.RELATIVE;
    c.weightx = 2.0;
    c.weighty = 1.0;
    layout.setConstraints(consoleScrollPane, c);
    c.gridwidth = GridBagConstraints.REMAINDER;
    c.weightx = 1.0;
    layout.setConstraints(scoreScrollPane, c);

    // Add the components
    getContentPane().add(overheadPanel);
    getContentPane().add(consoleScrollPane);
    getContentPane().add(scoreScrollPane);

    // Pack everything neatly.
    pack();

    // Let the magic begin.
    setVisible(true);
    overheadPanel.repaint();
    this.requestFocusInWindow();
}

From source file:com.prod.intelligent7.engineautostart.ConnectDaemonService.java

void startDaemon() {
    mDaemon = null;//www . j  a  va 2s  . c o m
    String mHost = getSharedPreferences(MainActivity.package_name + ".profile", MODE_PRIVATE)
            .getString(SERVER_IP, "220.134.85.189");
    //start a thread to talk to server every minute

    String mPort = getSharedPreferences(MainActivity.package_name + ".profile", MODE_PRIVATE)
            .getString(SERVER_PORT, "9696");
    //start a thread to talk to server every minute
    if (mHost.charAt(0) == '-') {
        mHost = getResources().getString(R.string.prod_server);
        mPort = getResources().getString(R.string.port);
    }
    mDaemon = TcpConnectDaemon.getInstance(mHost, Integer.parseInt(mPort));//
    //new TcpConnectDaemon(mHost, Integer.parseInt(mPort));
    if (mDaemon == null)
        return;
    mDaemon.setModeInterval(TcpConnectDaemon.MODE_REPEAT, serverHeartBit);

    if (outBoundMailBox == null)
        outBoundMailBox = new ArrayBlockingQueue<String>(mailBoxLimit);
    mDaemon.setOutBoundDataQ(outBoundMailBox);
    /* new change mailbox should be hold by me not the postman
    // so that the box is still there even the man quit
    Vector<String> keep=null;
    if (outBoundMailBox!= null && outBoundMailBox.size() > 0)
    {
    keep=new Vector<String>();
    while (outBoundMailBox.size()> 0)
    {
        try {
            keep.add(outBoundMailBox.take());
        } catch(InterruptedException e){}
    }
    }
    outBoundMailBox=mDaemon.getOutBoundDataQ();
    if (keep!=null && keep.size() > 0)
    {
    for (int i=0; i<keep.size(); i++){
        try {
            outBoundMailBox.put(keep.get(i));
        } catch(InterruptedException e){}
    }
    keep.clear();
    keep=null;
    }
    */
    mDaemon.attachToService(this);
    //mDaemon.setUrgentMailBox(urgentMailBox);
    mDaemon.addListener(this);
    mDaemon.start();
    // need start schedule too; MainActivity.N_BOOT_PARAMS, nBootParam); //HH:MM-on minutes-off minutes-cycle last for minutes
    // MainActivity.ONE_BOOT_PARAMS, bootParam); //  yy/mm/dd:hh:mm-last for minutes
}

From source file:ubic.gemma.core.loader.genome.gene.ncbi.NcbiGeneLoader.java

private void load(String geneInfoFile, String gene2AccFile, String geneHistoryFile, String geneEnsemblFile,
        Collection<Taxon> supportedTaxa) {
    /*/* w ww. j  a va2s.  c  o m*/
     * In case this is reused.
     */
    this.generatorDone.set(false);
    this.converterDone.set(false);
    this.loaderDone.set(false);

    NcbiGeneDomainObjectGenerator sdog = new NcbiGeneDomainObjectGenerator(supportedTaxa);
    sdog.setDoDownload(doDownload);
    sdog.setProducerDoneFlag(generatorDone);
    sdog.setStartingNcbiId(startingNcbiId);

    NcbiGeneConverter converter = new NcbiGeneConverter();
    converter.setSourceDoneFlag(generatorDone);
    converter.setProducerDoneFlag(converterDone);

    // create queue for GeneInfo objects
    final BlockingQueue<NcbiGeneData> geneInfoQueue = new ArrayBlockingQueue<>(NcbiGeneLoader.QUEUE_SIZE);
    final BlockingQueue<Gene> geneQueue = new ArrayBlockingQueue<>(NcbiGeneLoader.QUEUE_SIZE);

    // Threaded producer - loading files into queue as GeneInfo objects
    if (StringUtils.isEmpty(geneInfoFile) || StringUtils.isEmpty(geneInfoFile)) {
        sdog.generate(geneInfoQueue);
    } else {
        sdog.generateLocal(geneInfoFile, gene2AccFile, geneHistoryFile, geneEnsemblFile, geneInfoQueue);
    }

    // Threaded consumer/producer - consumes GeneInfo objects and generates
    // Gene/GeneProduct/DatabaseEntry entries
    converter.convert(geneInfoQueue, geneQueue);

    // Threaded consumer. Consumes Gene objects and persists them into
    // the database
    this.load(geneQueue);

    // update taxon table to indicate that now there are genes loaded for that taxa.
    // all or nothing so that if fails for some taxa then no taxa will be updated.
    this.updateTaxaWithGenesUsable(sdog.getSupportedTaxaWithNCBIGenes());
}

From source file:org.protempa.Executor.java

void execute() throws QueryException {
    try {//from   ww  w.  jav a 2 s .  co  m
        Thread retrieveDataThread;
        Thread doProcessThread;
        synchronized (this) {
            if (this.canceled) {
                return;
            }
            log(Level.INFO, "Processing data");
            DataStreamingEvent doProcessPoisonPill = new DataStreamingEvent("poison", Collections.emptyList());
            QueueObject hqrPoisonPill = new QueueObject();
            BlockingQueue<DataStreamingEvent> doProcessQueue = new ArrayBlockingQueue<>(1000);
            BlockingQueue<QueueObject> hqrQueue = new ArrayBlockingQueue<>(1000);
            retrieveDataThread = new RetrieveDataThread(doProcessQueue, doProcessPoisonPill);
            doProcessThread = new DoProcessThread(doProcessQueue, hqrQueue, doProcessPoisonPill, hqrPoisonPill,
                    retrieveDataThread);
            this.handleQueryResultThread = new HandleQueryResultThread(hqrQueue, hqrPoisonPill,
                    doProcessThread);
            retrieveDataThread.start();
            doProcessThread.start();
            this.handleQueryResultThread.start();
        }

        try {
            retrieveDataThread.join();
            log(Level.INFO, "Done retrieving data");
        } catch (InterruptedException ex) {
            log(Level.FINER, "Protempa producer thread join interrupted", ex);
        }
        try {
            doProcessThread.join();
            log(Level.INFO, "Done processing data");
        } catch (InterruptedException ex) {
            log(Level.FINER, "Protempa consumer thread join interrupted", ex);
        }
        try {
            this.handleQueryResultThread.join();
            log(Level.INFO, "Done outputting results");
        } catch (InterruptedException ex) {
            log(Level.FINER, "Protempa consumer thread join interrupted", ex);
        }

        if (!exceptions.isEmpty()) {
            throw exceptions.get(0);
        }
    } catch (QueryException ex) {
        this.failed = true;
        throw ex;
    }
}

From source file:ubic.gemma.core.apps.ArrayDesignBlatCli.java

@Override
protected Exception doWork(String[] args) {
    Exception err = this.processCommandLine(args);
    if (err != null)
        return err;

    final Date skipIfLastRunLaterThan = this.getLimitingDate();

    if (!this.getArrayDesignsToProcess().isEmpty()) {

        if (this.blatResultFile != null && this.getArrayDesignsToProcess().size() > 1) {
            throw new IllegalArgumentException(
                    "Cannot provide a blat result file when multiple arrays are being analyzed");
        }// www .j a v  a 2 s.  c  om

        for (ArrayDesign arrayDesign : this.getArrayDesignsToProcess()) {
            if (!this.shouldRun(skipIfLastRunLaterThan, arrayDesign, ArrayDesignSequenceAnalysisEvent.class)) {
                AbstractCLI.log
                        .warn(arrayDesign + " was last run more recently than " + skipIfLastRunLaterThan);
                return null;
            }

            arrayDesign = this.thaw(arrayDesign);
            Collection<BlatResult> persistedResults;
            try {
                if (this.blatResultFile != null) {
                    Collection<BlatResult> blatResults = this.getBlatResultsFromFile(arrayDesign);

                    if (blatResults == null || blatResults.size() == 0) {
                        throw new IllegalStateException("No blat results in file!");
                    }

                    AbstractCLI.log.info("Got " + blatResults.size() + " blat records");
                    persistedResults = arrayDesignSequenceAlignmentService.processArrayDesign(arrayDesign,
                            taxon, blatResults);
                    this.audit(arrayDesign, "BLAT results read from file: " + blatResultFile);
                    this.updateMergedOrSubsumed(arrayDesign);

                } else {
                    // Run blat from scratch.
                    persistedResults = arrayDesignSequenceAlignmentService.processArrayDesign(arrayDesign,
                            this.sensitive);
                    this.audit(arrayDesign, "Based on a fresh alignment analysis; BLAT score threshold was "
                            + this.blatScoreThreshold + "; sensitive mode was " + this.sensitive);
                    this.updateMergedOrSubsumed(arrayDesign);
                }
                AbstractCLI.log.info("Persisted " + persistedResults.size() + " results");
            } catch (IOException e) {
                this.errorObjects.add(e);
            }
        }

    } else if (taxon != null) {

        Collection<ArrayDesign> allArrayDesigns = getArrayDesignService().findByTaxon(taxon);
        AbstractCLI.log.warn("*** Running BLAT for all " + taxon.getCommonName() + " Array designs *** ["
                + allArrayDesigns.size() + " items]");

        final SecurityContext context = SecurityContextHolder.getContext();

        // split over multiple threads so we can multiplex. Put the array designs in a queue.

        /*
         * Here is our task runner.
         */
        class BlatCliConsumer extends Consumer {

            private BlatCliConsumer(BlockingQueue<ArrayDesign> q) {
                super(q, context);
            }

            @Override
            void consume(ArrayDesign x) {
                if (!ArrayDesignBlatCli.this.shouldRun(skipIfLastRunLaterThan, x,
                        ArrayDesignSequenceAnalysisEvent.class)) {
                    return;
                }
                x = getArrayDesignService().thaw(x);

                ArrayDesignBlatCli.this.processArrayDesign(x);

            }
        }

        BlockingQueue<ArrayDesign> arrayDesigns = new ArrayBlockingQueue<>(allArrayDesigns.size());
        arrayDesigns.addAll(allArrayDesigns);

        Collection<Thread> threads = new ArrayList<>();
        for (int i = 0; i < this.numThreads; i++) {
            Consumer c1 = new BlatCliConsumer(arrayDesigns);
            Thread k = new Thread(c1);
            threads.add(k);
            k.start();
        }

        this.waitForThreadPoolCompletion(threads);

        /*
         * All done
         */
        this.summarizeProcessing();

    } else {
        exitwithError();
    }

    return null;
}

From source file:org.codice.ddf.commands.catalog.IngestCommand.java

@Override
protected Object executeWithSubject() throws Exception {

    final CatalogFacade catalog = getCatalog();
    final File inputFile = new File(filePath);

    if (!inputFile.exists()) {
        printErrorMessage("File or directory [" + filePath + "] must exist.");
        console.println("If the file does indeed exist, try putting the path in quotes.");
        return null;
    }/*from ww  w.  j  a va2  s. com*/

    if (deprecatedBatchSize != DEFAULT_BATCH_SIZE) {
        // user specified the old style batch size, so use that
        printErrorMessage(
                "Batch size positional argument is DEPRECATED, please use --batchsize option instead.");
        batchSize = deprecatedBatchSize;
    }

    if (batchSize <= 0) {
        printErrorMessage(
                "A batch size of [" + batchSize + "] was supplied. Batch size must be greater than 0.");
        return null;
    }

    if (!StringUtils.isEmpty(failedDir)) {
        failedIngestDirectory = new File(failedDir);
        if (!verifyFailedIngestDirectory()) {
            return null;
        }

        /**
         * Batch size is always set to 1, when using an Ingest Failure Directory.  If a batch size is specified by the user, issue 
         * a warning stating that a batch size of 1 will be used.
         */
        if (batchSize != DEFAULT_BATCH_SIZE) {
            console.println("WARNING: An ingest failure directory was supplied in addition to a batch size of "
                    + batchSize
                    + ". When using an ingest failure directory, the batch size must be 1. Setting batch size to 1.");
        }

        batchSize = 1;
    }

    BundleContext bundleContext = getBundleContext();
    if (!DEFAULT_TRANSFORMER_ID.equals(transformerId)) {
        ServiceReference[] refs = null;

        try {
            refs = bundleContext.getServiceReferences(InputTransformer.class.getName(),
                    "(|" + "(" + Constants.SERVICE_ID + "=" + transformerId + ")" + ")");
        } catch (InvalidSyntaxException e) {
            throw new IllegalArgumentException("Invalid transformer transformerId: " + transformerId, e);
        }

        if (refs == null || refs.length == 0) {
            throw new IllegalArgumentException("Transformer " + transformerId + " not found");
        } else {
            transformer = (InputTransformer) bundleContext.getService(refs[0]);
        }
    }

    Stream<Path> ingestStream = Files.walk(inputFile.toPath(), FileVisitOption.FOLLOW_LINKS);

    int totalFiles = (inputFile.isDirectory()) ? inputFile.list().length : 1;
    fileCount.getAndSet(totalFiles);

    final ArrayBlockingQueue<Metacard> metacardQueue = new ArrayBlockingQueue<>(batchSize * multithreaded);

    ExecutorService queueExecutor = Executors.newSingleThreadExecutor();

    final long start = System.currentTimeMillis();

    printProgressAndFlush(start, fileCount.get(), 0);

    queueExecutor.submit(() -> buildQueue(ingestStream, metacardQueue, start));

    final ScheduledExecutorService batchScheduler = Executors.newSingleThreadScheduledExecutor();

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    submitToCatalog(batchScheduler, executorService, metacardQueue, catalog, start);

    while (!doneBuildingQueue.get() || processingThreads.get() != 0) {
        try {
            TimeUnit.SECONDS.sleep(2);
        } catch (InterruptedException e) {
            LOGGER.error("Ingest 'Waiting for processing to finish' thread interrupted: {}", e);
        }
    }

    try {
        queueExecutor.shutdown();
        executorService.shutdown();
        batchScheduler.shutdown();
    } catch (SecurityException e) {
        LOGGER.error("Executor service shutdown was not permitted: {}", e);
    }

    printProgressAndFlush(start, fileCount.get(), ingestCount.get() + ignoreCount.get());
    long end = System.currentTimeMillis();
    console.println();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));

    console.println();
    console.printf(" %d file(s) ingested in %s %n", ingestCount.get(), elapsedTime);

    LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime,
            calculateRecordsPerSecond(ingestCount.get(), start, end));
    INGEST_LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime,
            calculateRecordsPerSecond(ingestCount.get(), start, end));

    if (fileCount.get() != ingestCount.get()) {
        console.println();
        if ((fileCount.get() - ingestCount.get() - ignoreCount.get()) >= 1) {
            String failedAmount = Integer.toString(fileCount.get() - ingestCount.get() - ignoreCount.get());
            printErrorMessage(
                    failedAmount + " file(s) failed to be ingested.  See the ingest log for more details.");
            INGEST_LOGGER.warn("{} files(s) failed to be ingested.", failedAmount);
        }
        if (ignoreList != null) {
            String ignoredAmount = Integer.toString(ignoreCount.get());
            printColor(Ansi.Color.YELLOW,
                    ignoredAmount + " file(s) ignored.  See the ingest log for more details.");
            INGEST_LOGGER.warn("{} files(s) were ignored.", ignoredAmount);
        }
    }
    console.println();

    return null;
}

From source file:org.apache.hadoop.hive.metastore.hbase.HBaseImport.java

private int init(String... args) throws ParseException {
    Options options = new Options();

    doAll = doKerberos = false;/*  w w w  .  j ava  2s . c om*/
    parallel = 1;
    batchSize = 1000;

    options.addOption(
            OptionBuilder.withLongOpt("all").withDescription("Import the full metastore").create('a'));

    options.addOption(OptionBuilder.withLongOpt("batchsize")
            .withDescription("Number of partitions to read and write in a batch, defaults to 1000").hasArg()
            .create('b'));

    options.addOption(OptionBuilder.withLongOpt("database").withDescription("Import a single database")
            .hasArgs().create('d'));

    options.addOption(OptionBuilder.withLongOpt("help").withDescription("You're looking at it").create('h'));

    options.addOption(OptionBuilder.withLongOpt("function").withDescription("Import a single function")
            .hasArgs().create('f'));

    options.addOption(OptionBuilder.withLongOpt("kerberos")
            .withDescription("Import all kerberos related objects (master key, tokens)").create('k'));

    options.addOption(OptionBuilder.withLongOpt("parallel")
            .withDescription(
                    "Parallel factor for loading (only applied to tables and partitions), " + "defaults to 1")
            .hasArg().create('p'));

    options.addOption(
            OptionBuilder.withLongOpt("role").withDescription("Import a single role").hasArgs().create('r'));

    options.addOption(OptionBuilder.withLongOpt("tables").withDescription("Import a single tables").hasArgs()
            .create('t'));

    CommandLine cli = new GnuParser().parse(options, args);

    // Process help, if it was asked for, this must be done first
    if (cli.hasOption('h')) {
        printHelp(options);
        return 1;
    }

    boolean hasCmd = false;
    // Now process the other command line args
    if (cli.hasOption('a')) {
        hasCmd = true;
        doAll = true;
    }
    if (cli.hasOption('b')) {
        batchSize = Integer.parseInt(cli.getOptionValue('b'));
    }
    if (cli.hasOption('d')) {
        hasCmd = true;
        dbsToImport = Arrays.asList(cli.getOptionValues('d'));
    }
    if (cli.hasOption('f')) {
        hasCmd = true;
        functionsToImport = Arrays.asList(cli.getOptionValues('f'));
    }
    if (cli.hasOption('p')) {
        parallel = Integer.parseInt(cli.getOptionValue('p'));
    }
    if (cli.hasOption('r')) {
        hasCmd = true;
        rolesToImport = Arrays.asList(cli.getOptionValues('r'));
    }
    if (cli.hasOption('k')) {
        doKerberos = true;
    }
    if (cli.hasOption('t')) {
        hasCmd = true;
        tablesToImport = Arrays.asList(cli.getOptionValues('t'));
    }
    if (!hasCmd) {
        printHelp(options);
        return 1;
    }

    dbs = new ArrayList<>();
    // We don't want to bound the size of the table queue because we keep it all in memory
    partitionedTables = new LinkedBlockingQueue<>();
    tableNameQueue = new LinkedBlockingQueue<>();
    indexNameQueue = new LinkedBlockingQueue<>();

    // Bound the size of this queue so we don't get too much in memory.
    partQueue = new ArrayBlockingQueue<>(parallel * 2);
    return 0;
}

From source file:it.geosolutions.geobatch.flow.file.FileBasedFlowManager.java

/**
 * @param flowCfg/*from   w w w  .  j av  a  2s . c o m*/
 * @throws IOException
 */
private void initialize(FileBasedFlowConfiguration flowCfg, File geoBatchConfigDir, File geoBatchTempDir)
        throws Exception, NullPointerException {

    this.initialized = false;

    this.name = flowCfg.getName();
    this.description = flowCfg.getDescription();

    flowConfigDir = initConfigDir(flowCfg, geoBatchConfigDir);
    flowTempDir = initTempDir(flowCfg, geoBatchTempDir);

    // get global config
    final GBSettingsCatalog settingsCatalog = CatalogHolder.getSettingsCatalog();
    final GBSettings settings;
    final FlowSettings fs;
    settings = settingsCatalog.find("FLOW");
    if ((settings != null) && (settings instanceof FlowSettings)) {
        fs = (FlowSettings) settings;
    } else {
        fs = new FlowSettings();
        // store the file for further flow loads
        settingsCatalog.save(fs);
    }

    this.keepConsumers = flowCfg.isKeepConsumers();
    if (fs.isKeepConsumers() && keepConsumers == null)
        this.keepConsumers = true;
    else
        this.keepConsumers = false;

    this.maxStoredConsumers = flowCfg.getMaxStoredConsumers();
    if (maxStoredConsumers == null || maxStoredConsumers < 1) {
        this.maxStoredConsumers = fs.getMaxStoredConsumers();
    }

    final int queueSize = (flowCfg.getWorkQueueSize() > 0) ? flowCfg.getWorkQueueSize() : fs.getWorkQueueSize();
    final int corePoolSize = (flowCfg.getCorePoolSize() > 0) ? flowCfg.getCorePoolSize() : fs.getCorePoolSize();
    final int maximumPoolSize = (flowCfg.getMaximumPoolSize() > 0) ? flowCfg.getMaximumPoolSize()
            : fs.getMaximumPoolSize();
    final long keepAlive = (flowCfg.getKeepAliveTime() > 0) ? flowCfg.getKeepAliveTime()
            : fs.getKeepAliveTime(); // seconds

    final BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(queueSize);

    this.executor = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAlive, TimeUnit.SECONDS, queue);

    this.paused = false;
    this.terminationRequest = false;
    this.autorun = flowCfg.isAutorun();
    if (this.autorun) {
        if (LOGGER.isInfoEnabled()) {
            LOGGER.info("Automatic Flow Startup for '" + getId() + "'");
        }
        this.resume();
    }
}