Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:org.apache.sentry.tests.e2e.dbprovider.TestConcurrentClients.java

/**
 * Test when concurrent HS2 clients talking to server,
 * Privileges are correctly created and updated.
 * @throws Exception/*from  ww  w  . ja va 2s.  c  om*/
 */
@Test
public void testConccurentHS2Client() throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(NUM_OF_THREADS);
    final TestRuntimeState state = new TestRuntimeState();

    for (int i = 0; i < NUM_OF_TASKS; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                LOGGER.info("Starting tests: create role, show role, create db and tbl, and create partitions");
                if (state.failed) {
                    return;
                }
                try {
                    Long startTime = System.currentTimeMillis();
                    Long elapsedTime = 0L;
                    while (Long.compare(elapsedTime, HS2_CLIENT_TEST_DURATION_MS) <= 0) {
                        String randStr = randomString(5);
                        String test_role = "test_role_" + randStr;
                        String test_db = "test_db_" + randStr;
                        String test_tb = "test_tb_" + randStr;
                        LOGGER.info("Start to test sentry with hs2 client with role " + test_role);
                        adminCreateRole(test_role);
                        adminShowRole(test_role);
                        createDbTb(ADMIN1, test_db, test_tb);
                        adminGrant(test_db, test_tb, test_role, USERGROUP1);
                        createPartition(USER1_1, test_db, test_tb);
                        adminCleanUp(test_db, test_role);
                        elapsedTime = System.currentTimeMillis() - startTime;
                        LOGGER.info("elapsedTime = " + elapsedTime);
                    }
                    state.setNumSuccess();
                } catch (Exception e) {
                    LOGGER.error("Exception: " + e);
                    state.setFirstException(e);
                }
            }
        });
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(1000); //millisecond
    }
    Throwable ex = state.getFirstException();
    assertFalse(ex == null ? "Test failed" : ex.toString(), state.failed);
    assertEquals(NUM_OF_TASKS, state.getNumSuccess());
}

From source file:com.bittorrent.mpetazzoni.client.SharedTorrent.java

/**
 * Build this torrent's pieces array.// w w  w  .  j  a v  a  2s. c o  m
 *
 * <p>
 * Hash and verify any potentially present local data and create this
 * torrent's pieces array from their respective hash provided in the
 * torrent meta-info.
 * </p>
 *
 * <p>
 * This function should be called soon after the constructor to initialize
 * the pieces array.
 * </p>
 */
public synchronized void init() throws InterruptedException, IOException {
    if (this.isInitialized()) {
        throw new IllegalStateException("Torrent was already initialized!");
    }

    int threads = getHashingThreadsCount();
    int nPieces = (int) (Math.ceil((double) this.getSize() / this.pieceLength));
    int step = 10;

    this.pieces = new Piece[nPieces];
    this.completedPieces = new BitSet(nPieces);
    this.piecesHashes.clear();

    ExecutorService executor = Executors.newFixedThreadPool(threads);
    List<Future<Piece>> results = new LinkedList<Future<Piece>>();

    try {
        logger.info("Analyzing local data for {} with {} threads ({} pieces)...",
                new Object[] { this.getName(), threads, nPieces });
        for (int idx = 0; idx < nPieces; idx++) {
            byte[] hash = new byte[Torrent.PIECE_HASH_SIZE];
            this.piecesHashes.get(hash);

            // The last piece may be shorter than the torrent's global piece
            // length. Let's make sure we get the right piece length in any
            // situation.
            long off = ((long) idx) * this.pieceLength;
            long len = Math.min(this.bucket.size() - off, this.pieceLength);

            this.pieces[idx] = new Piece(this.bucket, idx, off, len, hash, this.isSeeder());

            Callable<Piece> hasher = new Piece.CallableHasher(this.pieces[idx]);
            results.add(executor.submit(hasher));

            if (results.size() >= threads) {
                this.validatePieces(results);
            }

            if (idx / (float) nPieces * 100f > step) {
                logger.info("  ... {}% complete", step);
                step += 10;
            }
        }

        this.validatePieces(results);
    } finally {
        // Request orderly executor shutdown and wait for hashing tasks to
        // complete.
        executor.shutdown();
        while (!executor.isTerminated()) {
            if (this.stop) {
                throw new InterruptedException("Torrent data analysis " + "interrupted.");
            }

            Thread.sleep(10);
        }
    }

    logger.debug("{}: we have {}/{} bytes ({}%) [{}/{} pieces].",
            new Object[] { this.getName(), (this.getSize() - this.left), this.getSize(),
                    String.format("%.1f", (100f * (1f - this.left / (float) this.getSize()))),
                    this.completedPieces.cardinality(), this.pieces.length });
    this.initialized = true;
}

From source file:com.paniclauncher.data.Settings.java

/**
 * This checks the servers hashes.xml file and downloads and new/updated files that differ from what the user has
 *//*from   w ww . j a v  a 2  s .  c  om*/
private void checkForUpdatedFiles() {
    String hashes = null;
    while (hashes == null) {
        hashes = Utils.urlToString(getFileURL("launcher/hashes.xml"));
        if (hashes == null) {
            boolean changed = disableServerGetNext(); // Disable the server and get the next one
            if (!changed) {
                this.offlineMode = true;
                return;
            }
        }
    }
    ArrayList<PanicLauncherDownloadable> downloads = new ArrayList<PanicLauncherDownloadable>();
    try {
        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
        DocumentBuilder builder = factory.newDocumentBuilder();
        Document document = builder.parse(new InputSource(new StringReader(hashes)));
        document.getDocumentElement().normalize();
        NodeList nodeList = document.getElementsByTagName("hash");
        for (int i = 0; i < nodeList.getLength(); i++) {
            Node node = nodeList.item(i);
            if (node.getNodeType() == Node.ELEMENT_NODE) {
                Element element = (Element) node;
                String name = element.getAttribute("name");
                String type = element.getAttribute("type");
                String md5 = element.getAttribute("md5");
                File file = null;
                if (type.equalsIgnoreCase("Root")) {
                    file = new File(configsDir, name);
                } else if (type.equalsIgnoreCase("Images")) {
                    file = new File(imagesDir, name);
                    name = "images/" + name;
                } else if (type.equalsIgnoreCase("Skins")) {
                    file = new File(skinsDir, name);
                    name = "skins/" + name;
                } else if (type.equalsIgnoreCase("Languages")) {
                    file = new File(languagesDir, name);
                    name = "languages/" + name;
                } else if (type.equalsIgnoreCase("Libraries")) {
                    file = new File(librariesDir, name);
                    name = "libraries/" + name;
                } else if (type.equalsIgnoreCase("Launcher")) {
                    String version = element.getAttribute("version");
                    if (!getVersion().equalsIgnoreCase(version)) {
                        if (getVersion().equalsIgnoreCase("%VERSION%")) {
                            continue;
                        } else {
                            log("Update to Launcher found. Current version: " + this.version + ", New version: "
                                    + version);
                            downloadUpdate();
                        }
                    } else {
                        continue;
                    }
                } else {
                    continue; // Don't know what to do with this file so ignore it
                }
                boolean download = false; // If we have to download the file or not
                if (!file.exists()) {
                    download = true; // File doesn't exist so download it
                } else {
                    if (!Utils.getMD5(file).equalsIgnoreCase(md5)) {
                        download = true; // MD5 hashes don't match so download it
                    }
                }

                if (download) {
                    if (!file.canWrite()) {
                        file.delete();
                    }
                    downloads.add(new PanicLauncherDownloadable("launcher/" + name, file, md5));
                    log("Downloading: " + name);
                }
            }
        }
    } catch (SAXException e) {
        this.console.logStackTrace(e);
    } catch (ParserConfigurationException e) {
        this.console.logStackTrace(e);
    } catch (IOException e) {
        this.console.logStackTrace(e);
    }
    ExecutorService executor = Executors.newFixedThreadPool(8);
    for (PanicLauncherDownloadable download : downloads) {
        executor.execute(download);
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
    }
}

From source file:com.p2p.peercds.client.SharedTorrent.java

/**
 * Build this torrent's pieces array./*  w w  w . j av a2s.  co  m*/
 *
 * <p>
 * Hash and verify any potentially present local data and create this
 * torrent's pieces array from their respective hash provided in the
 * torrent meta-info.
 * </p>
 *
 * <p>
 * This function should be called soon after the constructor to initialize
 * the pieces array.
 * </p>
 */
public synchronized void init() throws InterruptedException, IOException {
    //      if (this.isInitialized()) {
    //         throw new IllegalStateException("Torrent was already initialized!");
    //      }

    int threads = getHashingThreadsCount();
    int nPieces = (int) (Math.ceil((double) this.getSize() / this.pieceLength));
    int step = 10;

    this.pieces = new Piece[nPieces];
    this.completedPieces = new BitSet(nPieces);
    this.piecesHashes.clear();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    List<Future<Piece>> results = new LinkedList<Future<Piece>>();

    try {
        logger.info("Analyzing local data for {} with {} threads ({} pieces)...",
                new Object[] { this.getName(), threads, nPieces });
        for (int idx = 0; idx < nPieces; idx++) {
            byte[] hash = new byte[PIECE_HASH_SIZE];
            this.piecesHashes.get(hash);

            // The last piece may be shorter than the torrent's global piece
            // length. Let's make sure we get the right piece length in any
            // situation.
            long off = ((long) idx) * this.pieceLength;
            long len = Math.min(this.bucket.size() - off, this.pieceLength);

            this.pieces[idx] = new Piece(this.bucket, idx, off, len, hash, this.isSeeder());

            Callable<Piece> hasher = new Piece.CallableHasher(this.pieces[idx]);
            results.add(executor.submit(hasher));

            if (results.size() >= threads) {
                this.validatePieces(results);
            }

            if (idx / (float) nPieces * 100f > step) {
                logger.info("  ... {}% complete", step);
                step += 10;
            }
        }

        this.validatePieces(results);
    } finally {
        // Request orderly executor shutdown and wait for hashing tasks to
        // complete.
        executor.shutdown();
        while (!executor.isTerminated()) {
            if (this.stop) {
                throw new InterruptedException("Torrent data analysis " + "interrupted.");
            }

            Thread.sleep(10);
        }
    }

    logger.debug("{}: we have {}/{} bytes ({}%) [{}/{} pieces].",
            new Object[] { this.getName(), (this.getSize() - this.left), this.getSize(),
                    String.format("%.1f", (100f * (1f - this.left / (float) this.getSize()))),
                    this.completedPieces.cardinality(), this.pieces.length });
    this.initialized = true;
}

From source file:org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure.java

/**
 * Create Split directory//from   w w  w. j  av a 2s  . co  m
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs)
        throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(
            regionFs.getFamilies().size());
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> sfis = regionFs.getStoreFiles(family);
        if (sfis == null)
            continue;
        Collection<StoreFileInfo> filteredSfis = null;
        for (StoreFileInfo sfi : sfis) {
            // Filter. There is a lag cleaning up compacted reference files. They get cleared
            // after a delay in case outstanding Scanners still have references. Because of this,
            // the listing of the Store content may have straggler reference files. Skip these.
            // It should be safe to skip references at this point because we checked above with
            // the region if it thinks it is splittable and if we are here, it thinks it is
            // splitable.
            if (sfi.isReference()) {
                LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
                continue;
            }
            if (filteredSfis == null) {
                filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
                files.put(family, filteredSfis);
            }
            filteredSfis.add(sfi);
            nbFiles++;
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<Integer, Integer>(0, 0);
    }
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(
            conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
                    conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)),
            nbFiles);
    LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region="
            + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
    final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads,
            Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);

    // Split each store file.
    final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
        byte[] familyName = Bytes.toBytes(e.getKey());
        final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
        final Collection<StoreFileInfo> storeFiles = e.getValue();
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(
                        mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();

    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException(
                    "Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }

    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("pid=" + getProcId() + " split storefiles for region " + getParentRegion().getShortNameToLog()
                + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<Integer, Integer>(daughterA, daughterB);
}

From source file:org.codice.ddf.commands.catalog.DumpCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final File dumpDir = new File(dirPath);

    if (!dumpDir.exists()) {
        printErrorMessage("Directory [" + dirPath + "] must exist.");
        console.println("If the directory does indeed exist, try putting the path in quotes.");
        return null;
    }//from w  w  w. j a v  a  2 s . com

    if (!dumpDir.isDirectory()) {
        printErrorMessage("Path [" + dirPath + "] must be a directory.");
        return null;
    }

    if (!DEFAULT_TRANSFORMER_ID.matches(transformerId)) {
        transformers = getTransformers();
        if (transformers == null) {
            console.println(transformerId + " is an invalid metacard transformer.");
            return null;
        }
    }

    CatalogFacade catalog = getCatalog();
    FilterBuilder builder = getFilterBuilder();

    Filter createdFilter = null;
    if ((createdAfter != null) && (createdBefore != null)) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().during().dates(createStartDateTime.toDate(),
                createEndDateTime.toDate());
    } else if (createdAfter != null) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        createdFilter = builder.attribute(Metacard.CREATED).is().after().date(createStartDateTime.toDate());
    } else if (createdBefore != null) {
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().before().date(createEndDateTime.toDate());
    }

    Filter modifiedFilter = null;
    if ((modifiedAfter != null) && (modifiedBefore != null)) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().during()
                .dates(modifiedStartDateTime.toDate(), modifiedEndDateTime.toDate());
    } else if (modifiedAfter != null) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().after().date(modifiedStartDateTime.toDate());
    } else if (modifiedBefore != null) {
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().before().date(modifiedEndDateTime.toDate());
    }

    Filter filter = null;
    if ((createdFilter != null) && (modifiedFilter != null)) {
        // Filter by both created and modified dates
        filter = builder.allOf(createdFilter, modifiedFilter);
    } else if (createdFilter != null) {
        // Only filter by created date
        filter = createdFilter;
    } else if (modifiedFilter != null) {
        // Only filter by modified date
        filter = modifiedFilter;
    } else {
        // Don't filter by date range
        filter = builder.attribute(Metacard.ID).is().like().text(WILDCARD);
    }

    if (cqlFilter != null) {
        filter = CQL.toFilter(cqlFilter);
    }

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(false);
    query.setPageSize(pageSize);

    Map<String, Serializable> props = new HashMap<String, Serializable>();
    // Avoid caching all results while dumping with native query mode
    props.put("mode", "native");

    final AtomicLong resultCount = new AtomicLong(0);
    long start = System.currentTimeMillis();

    SourceResponse response = catalog.query(new QueryRequestImpl(query, props));

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    while (response.getResults().size() > 0) {
        response = catalog.query(new QueryRequestImpl(query, props));

        if (multithreaded > 1) {
            final List<Result> results = new ArrayList<Result>(response.getResults());
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    boolean transformationFailed = false;
                    for (final Result result : results) {
                        Metacard metacard = result.getMetacard();
                        try {
                            exportMetacard(dumpDir, metacard);
                        } catch (IOException | CatalogTransformerException e) {
                            transformationFailed = true;
                            LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e);
                            executorService.shutdownNow();
                        }
                        printStatus(resultCount.incrementAndGet());
                    }
                    if (transformationFailed) {
                        LOGGER.error(
                                "One or more metacards failed to transform. Enable debug log for more details.");
                    }
                }
            });
        } else {
            for (final Result result : response.getResults()) {
                Metacard metacard = result.getMetacard();
                exportMetacard(dumpDir, metacard);
                printStatus(resultCount.incrementAndGet());
            }
        }

        if (response.getResults().size() < pageSize || pageSize == -1) {
            break;
        }

        if (pageSize > 0) {
            query.setStartIndex(query.getStartIndex() + pageSize);
        }
    }

    executorService.shutdown();

    while (!executorService.isTerminated()) {
        try {
            TimeUnit.MILLISECONDS.sleep(100);
        } catch (InterruptedException e) {
            // ignore
        }
    }

    long end = System.currentTimeMillis();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
    console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
    LOGGER.info("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
    console.println();

    return null;
}

From source file:org.openiam.idm.srvc.synch.srcadapter.CSVAdapter.java

@Override
public SyncResponse startSynch(final SynchConfig config, SynchReviewEntity sourceReview,
        final SynchReviewEntity resultReview) {

    log.debug("CSV startSynch CALLED.^^^^^^^^");
    System.out.println("CSV startSynch CALLED.^^^^^^^^");

    SyncResponse res = new SyncResponse(ResponseStatus.SUCCESS);

    SynchReview review = null;//from  ww  w .  j a  v  a  2 s .com
    if (sourceReview != null) {
        review = synchReviewDozerConverter.convertToDTO(sourceReview, false);
    }
    LineObject rowHeaderForReport = null;
    InputStream input = null;

    try {
        final ValidationScript validationScript = StringUtils.isNotEmpty(config.getValidationRule())
                ? SynchScriptFactory.createValidationScript(config, review)
                : null;
        final List<TransformScript> transformScripts = SynchScriptFactory.createTransformationScript(config,
                review);
        final MatchObjectRule matchRule = matchRuleFactory.create(config.getCustomMatchRule()); // check if matchRule exists

        if (validationScript == null || transformScripts == null || matchRule == null) {
            res = new SyncResponse(ResponseStatus.FAILURE);
            res.setErrorText("The problem in initialization of CSVAdapter, please check validationScript= "
                    + validationScript + ", transformScripts=" + transformScripts + ", matchRule=" + matchRule
                    + " all must be set!");
            res.setErrorCode(ResponseCode.INVALID_ARGUMENTS);
            return res;
        }

        if (sourceReview != null && !sourceReview.isSourceRejected()) {
            return startSynchReview(config, sourceReview, resultReview, validationScript, transformScripts,
                    matchRule);
        }

        CSVHelper parser;
        String csvFileName = config.getFileName();
        if (useRemoteFilestorage) {
            input = remoteFileStorageManager.downloadFile(SYNC_DIR, csvFileName);
            parser = new CSVHelper(input, "UTF-8");
        } else {
            String fileName = uploadRoot + File.separator + SYNC_DIR + File.separator + csvFileName;
            input = new FileInputStream(fileName);
            parser = new CSVHelper(input, "UTF-8", CSVStrategy.EXCEL_STRATEGY);
        }

        final String[][] rows = parser.getAllValues();

        //Get Header
        final LineObject rowHeader = populateTemplate(rows[0]);
        rowHeaderForReport = rowHeader;
        if (rows.length > 1) {

            int part = rows.length / THREAD_COUNT;
            int remains = rows.length - part * THREAD_COUNT;

            List<Part> partsList = new ArrayList<Part>();
            for (int i = 0; i < THREAD_COUNT; i++) {
                if (i != THREAD_COUNT - 1) {
                    partsList.add(new Part(i * part, (i + 1) * part));
                } else {
                    partsList.add(new Part(i * part, (i + 1) * part + remains));
                }
            }

            final Counter counter = new Counter();
            ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
            List<Future<Integer>> list = new ArrayList<Future<Integer>>();

            final String[][] rowsWithoutHeader = Arrays.copyOfRange(rows, 1, rows.length);
            for (final Part p : partsList) {
                Callable<Integer> worker = new Callable<Integer>() {
                    @Override
                    public Integer call() throws Exception {
                        System.out.println("======= CSV Adapter Part [" + p.getStartIndx() + "; "
                                + p.getEndIndx() + "] started.");

                        int number = 0;
                        String[][] rowsForProcessing = Arrays.copyOfRange(rowsWithoutHeader, p.getStartIndx(),
                                p.getEndIndx());
                        for (String[] row : rowsForProcessing) {
                            LineObject rowObj = rowHeader.copy();
                            populateRowObject(rowObj, row);
                            processLineObject(rowObj, config, resultReview, validationScript, transformScripts,
                                    matchRule);
                            number = counter.increment();
                            System.out.println("======= CSV Adapter Part [" + p.getStartIndx() + "; "
                                    + p.getEndIndx() + "]  counter.increment = " + number);
                        }
                        System.out.println("======= CSV Adapter Part [" + p.getStartIndx() + "; "
                                + p.getEndIndx() + "] finished.");
                        return number;
                    }
                };
                Future<Integer> submit = executor.submit(worker);
                list.add(submit);
            }

            // This will make the executor accept no new threads
            // and finish all existing threads in the queue
            executor.shutdown();
            // Wait until all threads are finish
            while (!executor.isTerminated()) {
            }
            Integer set = 0;
            for (Future<Integer> future : list) {
                try {
                    set += future.get();
                } catch (InterruptedException e) {
                    log.warn(e.getMessage());
                } catch (ExecutionException e) {
                    log.warn("CSVAdapter: future.get() throw problem message");
                }
            }
            System.out.println("CSV ================= All Processed records = " + set);
        }

    } catch (ClassNotFoundException cnfe) {
        log.error(cnfe);
        res = new SyncResponse(ResponseStatus.FAILURE);
        res.setErrorCode(ResponseCode.CLASS_NOT_FOUND);
        return res;
    } catch (FileNotFoundException fe) {
        fe.printStackTrace();
        log.error(fe);
        //            auditBuilder.addAttribute(AuditAttributeName.DESCRIPTION, "FileNotFoundException: "+fe.getMessage());
        //            auditLogProvider.persist(auditBuilder);
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.FILE_EXCEPTION);
        log.debug("CSV SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
        return resp;

    } catch (IOException io) {
        io.printStackTrace();
        /*
        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.IO_EXCEPTION.toString(), io.toString());
        auditHelper.logEvent(synchStartLog);
        */
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.IO_EXCEPTION);
        log.debug("CSV SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
        return resp;
    } catch (SftpException sftpe) {
        log.error(sftpe);
        /*
        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.FILE_EXCEPTION.toString(), sftpe.toString());
        auditHelper.logEvent(synchStartLog);
        */
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.FILE_EXCEPTION);
        sftpe.printStackTrace();
        log.debug("CSV SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
    } catch (JSchException jsche) {
        log.error(jsche);
        /*
        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.FILE_EXCEPTION.toString(), jsche.toString());
        auditHelper.logEvent(synchStartLog);
        */
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.FILE_EXCEPTION);
        jsche.printStackTrace();
        log.debug("CSV SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
    } finally {
        if (resultReview != null) {
            if (CollectionUtils.isNotEmpty(resultReview.getReviewRecords())) { // add header row
                resultReview.addRecord(generateSynchReviewRecord(rowHeaderForReport, true));
            }
        }
        if (input != null) {
            try {
                input.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }

    log.debug("CSV SYNCHRONIZATION COMPLETE^^^^^^^^");

    //        auditBuilder.addAttribute(AuditAttributeName.DESCRIPTION, "CSV SYNCHRONIZATION COMPLETE^^^^^^^^");
    return new SyncResponse(ResponseStatus.SUCCESS);
}

From source file:com.thruzero.common.web.model.container.builder.xml.XmlPanelSetBuilder.java

protected PanelSet buildConcurrently() throws Exception {
    PanelSet result = new PanelSet(panelSetId);

    if (!panelNodes.isEmpty()) {
        // Build the panels in parallel (e.g., RSS Feed panels should be created in parallel).
        ExecutorService executorService = Executors.newFixedThreadPool(panelNodes.size());
        logHelper.logExecutorServiceCreated(panelSetId);

        final Map<String, AbstractPanel> panels = new HashMap<String, AbstractPanel>();
        for (final InfoNodeElement panelNode : panelNodes) {
            final AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                    .createBuilder(panelNode.getName(), panelNode);
            final String panelKey = Integer.toHexString(panelNode.hashCode());

            if (panelBuilder == null) {
                panels.put(panelKey, new ErrorHtmlPanel("error", "Panel ERROR",
                        "PanelBuilder not found for panel type " + panelNode.getName()));
            } else {
                //logger.debug("  - prepare to build: " + panelNode.getName());
                executorService.execute(new Runnable() {
                    @Override/*from w w w  . ja v a2s .  co  m*/
                    public void run() {
                        try {
                            AbstractPanel panel = panelBuilder.build();
                            panels.put(panelKey, panel);
                        } catch (Exception e) {
                            panels.put(panelKey, panelBuilder.buildErrorPanel(panelBuilder.getPanelId(),
                                    "Panel ERROR",
                                    "PanelBuilder encountered an Exception: " + e.getClass().getSimpleName()));
                        }
                    }

                    @Override
                    public String toString() {
                        return panelBuilder.getPanelInfoForError();
                    }
                });
            }
        }

        // Wait for all panels to be built
        executorService.shutdown();
        logHelper.logExecutorServiceShutdown(panelSetId);
        try {
            executorService.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            // ignore (handled below)
            logHelper.logExecutorServiceInterrupted(panelSetId);
        }

        if (executorService.isTerminated()) {
            logHelper.logExecutorServiceIsTerminated(panelSetId);
        } else {
            logHelper.logExecutorServiceIsNotTerminated(executorService, executorService.shutdownNow(),
                    panelSetId);
        }

        // add panels in the same order as defined
        for (InfoNodeElement panelNode : panelNodes) {
            String panelKey = Integer.toHexString(panelNode.hashCode());
            AbstractPanel panel = panels.get(panelKey);
            if (panel == null) {
                // if it wasn't added to the panelNodes map, then there must have been a timeout error
                AbstractXmlPanelBuilder panelBuilder = panelBuilderTypeRegistry
                        .createBuilder(panelNode.getName(), panelNode);

                result.addPanel(panelBuilder.buildErrorPanel(panelKey, "Panel ERROR",
                        "PanelBuilder encountered a timeout error: " + panelNode.getName()));
            } else {
                result.addPanel(panel);
            }
        }
    }
    logHelper.logPanelSetCompleted(panelSetId);

    return result;
}

From source file:plaid.compilerjava.CompilerCore.java

private void generateCode(List<CompilationUnit> cus, final PackageRep plaidpath) throws Exception {
    if (cc.isVerbose()) {
        System.out.println("Generating code.");
    }/* w  ww  . ja v  a2  s. co m*/

    final List<File> allFiles = new ArrayList<File>();
    ExecutorService taskPool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    for (final CompilationUnit cu : cus) {
        if (!cc.forceRecompile()) {
            boolean rebuild = false;
            for (Decl d : cu.getDecls()) {
                //System.out.println(d);
                StringBuilder packageName = new StringBuilder();
                for (String s : cu.getPackageName()) {
                    packageName.append(s);
                    packageName.append(System.getProperty("file.separator"));
                }
                File targetFile = new File(cc.getTempDir() + System.getProperty("file.separator") + packageName
                        + d.getName() + ".java");
                if (!targetFile.exists() || targetFile.lastModified() < cu.getSourceFile().lastModified()) {
                    rebuild = true;
                    break;
                }
            }
            if (!rebuild) {
                if (cc.isVerbose()) {
                    System.out.println("file up-to-date : " + cu.getSourceFile());
                }
                continue;
            }
            if (cc.isVerbose()) {
                System.out.println("Rebuild: " + cu.getSourceFile());
            }
        }
        Callable<Object> task = new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                try {
                    if (cc.isVerbose())
                        System.out.println("Generating code for:\n" + cu);
                    List<File> fileList = cu.codegen(cc, plaidpath);

                    synchronized (allFiles) {
                        allFiles.addAll(fileList);
                    }
                } catch (PlaidException p) {
                    System.err.println("Error while compiling " + cu.getSourceFile().toString() + ":");
                    System.err.println("");
                    printExceptionInformation(p);
                }
                return null;
            }
        };
        taskPool.submit(task);
    }
    taskPool.shutdown();
    while (!taskPool.isTerminated()) {
        taskPool.awaitTermination(1, TimeUnit.MINUTES);
    }

    if (!cc.isKeepTemporaryFiles()) {
        for (File f : allFiles) {
            f.deleteOnExit();
        }
    }

    if (cc.isVerbose()) {
        System.out.println("invoke Java compiler");
    }
    if (cc.isInvokeCompiler() && allFiles.size() > 0) {
        JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
        StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null);
        Iterable<? extends JavaFileObject> fileObjects = fileManager.getJavaFileObjectsFromFiles(allFiles);

        List<String> optionList = new ArrayList<String>();
        optionList.addAll(Arrays.asList("-target", "1.5"));
        // Set compiler's classpath to be same as the runtime's
        optionList.addAll(Arrays.asList("-classpath", System.getProperty("java.class.path")));
        // TODO: Add a separate compiler flag for this.
        optionList.addAll(Arrays.asList("-d", cc.getOutputDir()));
        //         optionList.add("-verbose");

        // Invoke the compiler
        CompilationTask task = compiler.getTask(null, null, null, optionList, null, fileObjects);
        Boolean resultCode = task.call();
        if (!resultCode.booleanValue())
            throw new RuntimeException("Error while compiling generated Java files.");
    }
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/*from   ww  w.  ja  va 2s.  c o m*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}