Example usage for java.util.concurrent ForkJoinPool ForkJoinPool

List of usage examples for java.util.concurrent ForkJoinPool ForkJoinPool

Introduction

In this page you can find the example usage for java.util.concurrent ForkJoinPool ForkJoinPool.

Prototype

private ForkJoinPool(byte forCommonPoolOnly) 

Source Link

Document

Constructor for common pool using parameters possibly overridden by system properties

Usage

From source file:com.homeadvisor.kafdrop.service.CuratorKafkaMonitor.java

@PostConstruct
public void start() throws Exception {
    try {//w  w  w.  j  a  v a2 s  . c om
        kafkaVersion = new Version(properties.getKafkaVersion());
    } catch (Exception ex) {
        throw new IllegalStateException("Invalid kafka version: " + properties.getKafkaVersion(), ex);
    }

    threadPool = new ForkJoinPool(properties.getThreadPoolSize());

    FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
    backOffPolicy.setBackOffPeriod(properties.getRetry().getBackoffMillis());

    final SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(properties.getRetry().getMaxAttempts(),
            ImmutableMap.of(InterruptedException.class, false, Exception.class, true));

    retryTemplate = new RetryTemplate();
    retryTemplate.setBackOffPolicy(backOffPolicy);
    retryTemplate.setRetryPolicy(retryPolicy);

    cacheInitCounter.set(4);

    brokerPathCache = new PathChildrenCache(curatorFramework, ZkUtils.BrokerIdsPath(), true);
    brokerPathCache.getListenable().addListener(new BrokerListener());
    brokerPathCache.getListenable().addListener((f, e) -> {
        if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) {
            cacheInitCounter.decrementAndGet();
            LOG.info("Broker cache initialized");
        }
    });
    brokerPathCache.start(StartMode.POST_INITIALIZED_EVENT);

    topicConfigPathCache = new PathChildrenCache(curatorFramework, ZkUtils.TopicConfigPath(), true);
    topicConfigPathCache.getListenable().addListener((f, e) -> {
        if (e.getType() == PathChildrenCacheEvent.Type.INITIALIZED) {
            cacheInitCounter.decrementAndGet();
            LOG.info("Topic configuration cache initialized");
        }
    });
    topicConfigPathCache.start(StartMode.POST_INITIALIZED_EVENT);

    topicTreeCache = new TreeCache(curatorFramework, ZkUtils.BrokerTopicsPath());
    topicTreeCache.getListenable().addListener((client, event) -> {
        if (event.getType() == TreeCacheEvent.Type.INITIALIZED) {
            cacheInitCounter.decrementAndGet();
            LOG.info("Topic tree cache initialized");
        }
    });
    topicTreeCache.start();

    consumerTreeCache = new TreeCache(curatorFramework, ZkUtils.ConsumersPath());
    consumerTreeCache.getListenable().addListener((client, event) -> {
        if (event.getType() == TreeCacheEvent.Type.INITIALIZED) {
            cacheInitCounter.decrementAndGet();
            LOG.info("Consumer tree cache initialized");
        }
    });
    consumerTreeCache.start();

    controllerNodeCache = new NodeCache(curatorFramework, ZkUtils.ControllerPath());
    controllerNodeCache.getListenable().addListener(this::updateController);
    controllerNodeCache.start(true);
    updateController();
}

From source file:com.chingo247.structureapi.plan.StructurePlanManager.java

public void reload(final boolean verbose) {
    synchronized (plans) {
        plans.clear();/*ww w.j ava  2  s.  c  o m*/

    }

    // Make dirs if not exist!
    planDirectory.mkdirs();

    StructureAPI.getInstance().getEventDispatcher().dispatchEvent(new StructurePlansReloadEvent());

    // If it isn't null and there are still processes running... terminate them
    if (forkJoinPool != null && !forkJoinPool.isShutdown()) {
        forkJoinPool.shutdown();
    }

    forkJoinPool = new ForkJoinPool(parallelism);
    StructureAPI.getInstance().getExecutor().submit(new Runnable() {

        @Override
        public void run() {
            try {
                StructurePlanReader reader = new StructurePlanReader();
                List<IStructurePlan> plansList = reader.readDirectory(planDirectory, verbose, forkJoinPool);
                for (IStructurePlan plan : plansList) {
                    boolean exists = getPlan(plan.getId()) != null;
                    if (exists) {
                        continue; // it's exact the same plan...
                    }
                    putPlan(plan);
                }
                if (!forkJoinPool.isShutdown()) {
                    forkJoinPool.shutdown();
                }
                StructureAPI.getInstance().getEventDispatcher().dispatchEvent(new StructurePlansLoadedEvent());
            } catch (Exception ex) {
                LOG.log(Level.SEVERE, ex.getMessage(), ex);
            }
        }
    });

}

From source file:learn.jersey.services.MultiThreadedClientExample.java

@Override
public int run(String[] args) throws Exception {

    if (args.length < 1 || args.length > 2) {
        System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
        return -1;
    }//w w w  . ja  va 2s.  co  m

    final TableName tableName = TableName.valueOf(args[0]);
    int numOperations = DEFAULT_NUM_OPERATIONS;

    // the second arg is the number of operations to send.
    if (args.length == 2) {
        numOperations = Integer.parseInt(args[1]);
    }

    // Threads for the client only.
    //
    // We don't want to mix hbase and business logic.
    //
    ExecutorService service = new ForkJoinPool(threads * 2);

    // Create two different connections showing how it's possible to
    // separate different types of requests onto different connections
    final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service);
    final Connection readConnection = ConnectionFactory.createConnection(getConf(), service);

    // At this point the entire cache for the region locations is full.
    // Only do this if the number of regions in a table is easy to fit into
    // memory.
    //
    // If you are interacting with more than 25k regions on a client then
    // it's probably not good
    // to do this at all.
    warmUpConnectionCache(readConnection, tableName);
    warmUpConnectionCache(writeConnection, tableName);

    List<Future<Boolean>> futures = new ArrayList<>(numOperations);
    for (int i = 0; i < numOperations; i++) {
        double r = ThreadLocalRandom.current().nextDouble();
        Future<Boolean> f;

        // For the sake of generating some synthetic load this queues
        // some different callables.
        // These callables are meant to represent real work done by your
        // application.
        if (r < .30) {
            f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName));
        } else if (r < .50) {
            f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName));
        } else {
            f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName));
        }
        futures.add(f);
    }

    // Wait a long time for all the reads/writes to complete
    for (Future<Boolean> f : futures) {
        f.get(10, TimeUnit.MINUTES);
    }

    // Clean up after our selves for cleanliness
    internalPool.shutdownNow();
    service.shutdownNow();
    return 0;
}

From source file:org.apache.hadoop.hbase.client.example.MultiThreadedClientExample.java

@Override
public int run(String[] args) throws Exception {

    if (args.length < 1 || args.length > 2) {
        System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
        return -1;
    }/*  w  ww. j a  v  a  2 s  . c  o  m*/

    final TableName tableName = TableName.valueOf(args[0]);
    int numOperations = DEFAULT_NUM_OPERATIONS;

    // the second arg is the number of operations to send.
    if (args.length == 2) {
        numOperations = Integer.parseInt(args[1]);
    }

    // Threads for the client only.
    //
    // We don't want to mix hbase and business logic.
    //
    ExecutorService service = new ForkJoinPool(threads * 2);

    // Create two different connections showing how it's possible to
    // separate different types of requests onto different connections
    final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service);
    final Connection readConnection = ConnectionFactory.createConnection(getConf(), service);

    // At this point the entire cache for the region locations is full.
    // Only do this if the number of regions in a table is easy to fit into memory.
    //
    // If you are interacting with more than 25k regions on a client then it's probably not good
    // to do this at all.
    warmUpConnectionCache(readConnection, tableName);
    warmUpConnectionCache(writeConnection, tableName);

    List<Future<Boolean>> futures = new ArrayList<>(numOperations);
    for (int i = 0; i < numOperations; i++) {
        double r = ThreadLocalRandom.current().nextDouble();
        Future<Boolean> f;

        // For the sake of generating some synthetic load this queues
        // some different callables.
        // These callables are meant to represent real work done by your application.
        if (r < .30) {
            f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName));
        } else if (r < .50) {
            f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName));
        } else {
            f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName));
        }
        futures.add(f);
    }

    // Wait a long time for all the reads/writes to complete
    for (Future<Boolean> f : futures) {
        f.get(10, TimeUnit.MINUTES);
    }

    // Clean up after our selves for cleanliness
    internalPool.shutdownNow();
    service.shutdownNow();
    return 0;
}

From source file:com.hygenics.parser.SpecifiedDump.java

/**
 * Runs the Dump/*w w  w  .j av  a  2s . c o m*/
 */
public void run() {

    if (archive) {
        if (tables.keySet().size() > 0) {

            Archiver zip = new Archiver();
            String basefile = tables.keySet().iterator().next().split("\\|")[1];

            if (basefile.trim().length() > 0) {
                zip.setBasedirectory(basefile);
                zip.setZipDirectory(basefile + "archive.zip");
                zip.setAvoidanceString(".zip|archive");
                zip.setDelFiles(true);
                zip.run();
            }
        }
    }

    int dumped = 0;
    ForkJoinPool fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors());
    boolean checkedTables = (this.tablesMustHave == null);
    for (String tf : tables.keySet()) {
        String[] split = tf.split("\\|");
        log.info("Dumping for " + split[0]);
        String schema = null;
        try {
            schema = split[0].split("\\.")[0];

            if (!checkedTables) {
                ArrayList<String> mustHaveTemp = (ArrayList<String>) this.tablesMustHave.clone();
                ArrayList<String> existingTables = this.template.getJsonData(
                        "SELECT table_name FROM information_schema.tables WHERE table_schema ILIKE '%" + schema
                                + "%'");
                for (String tdict : existingTables) {
                    String table = JsonObject.readFrom(tdict).get("table_name").asString();
                    if (mustHaveTemp.contains(table)) {
                        mustHaveTemp.remove(table);

                        // get count
                        if (this.template.getCount(schema + "." + table) == 0) {
                            try {
                                throw new MissingData(
                                        "Data Missing from Required Table: " + schema + "." + table);
                            } catch (MissingData e) {
                                e.printStackTrace();
                            }
                        }
                    }
                }

                if (mustHaveTemp.size() > 0) {
                    log.error("Drop Schema " + schema + "  is missing the following tables:\n");
                    for (String table : mustHaveTemp) {
                        log.error(table + "\n");
                    }

                    try {
                        throw new TableMissingException();
                    } catch (TableMissingException e) {
                        e.printStackTrace();
                        System.exit(-1);
                    }
                }

            }

        } catch (IndexOutOfBoundsException e) {
            try {
                throw new SQLMalformedException("FATAL ERROR: Table name " + split[0] + " malformed");
            } catch (SQLMalformedException e2) {
                e2.printStackTrace();
                System.exit(-1);
            }
        }

        log.info("Checking  table: " + split[0] + "&& schema: " + schema);
        if (template.checkTable(split[0], schema)) {
            if (template.getCount(schema + "." + split[0].replace(schema + ".", "")) > 0) {
                Set<String> keys = tables.get(tf).keySet();
                String sql;
                String select = "SELECT ";
                String distinct = null;
                String attrs = null;
                String where = null;
                String group = null;
                String order = null;

                /**
                 * SET THE ATTRIBUTES WHICH CAN BE SPECIFIED WITH
                 * distinct-for concacting distinct part of query not0-for
                 * specifiying that the length must be greater than 0 in the
                 * WHERE clause group-for grouping the attribute not
                 * null-for specifying that the attr cannot be null
                 * orderby-for specifying our one order attr
                 */
                for (String k : keys) {
                    if (k.toLowerCase().contains("distinct")) {
                        distinct = (distinct == null)
                                ? "distinct on(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                : distinct + "," + tables.get(tf).get(k).replaceAll("\\sas.*", "");
                    }

                    if (k.toLowerCase().contains("group")) {
                        group = (group == null) ? "GROUP BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                : group + "," + tables.get(tf).get(k).replaceAll("\\sas.*", "");
                    }

                    if (k.toLowerCase().contains("not0")) {
                        if (k.contains("not0OR")) {
                            where = (where == null)
                                    ? "WHERE length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ") >0 "
                                    : where + "OR length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ")";
                        } else {
                            where = (where == null)
                                    ? "WHERE length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ") >0 "
                                    : where + "AND length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ")";
                        }
                    }

                    if (k.toLowerCase().contains("notnull")) {
                        if (k.toLowerCase().contains("notnullor")) {
                            where = (where == null)
                                    ? "WHERE " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL"
                                    : where + " OR " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL";
                        } else {
                            where = (where == null)
                                    ? "WHERE " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL"
                                    : where + " AND " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL";
                        }
                    }

                    if (k.toLowerCase().contains("order")) {
                        if (k.toLowerCase().contains("orderdesc")) {
                            order = (order == null)
                                    ? "ORDER BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "") + " ASC"
                                    : order;
                        } else {
                            order = (order == null)
                                    ? "ORDER BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "") + " DESC"
                                    : order;
                        }
                    }

                    String field = tables.get(tf).get(k);
                    if (k.toLowerCase().contains("attr")) {
                        if (unicoderemove == true) {
                            field = "trim(replace(regexp_replace(" + field
                                    + ",'[^\\u0020-\\u007e,\\(\\);\\-\\[\\]]+',' '),'" + this.delimiter + "','"
                                    + this.replacedel + "')) as " + field;
                        } else {
                            field = "trim(replace(" + field + ",'" + this.delimiter + "','" + this.replacedel
                                    + "'))";
                        }

                        attrs = (attrs == null) ? field : attrs + "," + field;
                    }
                }

                select = (distinct == null) ? select : select.trim() + " " + distinct.trim() + ")";
                select += " " + attrs.trim();
                select += " FROM " + split[0].trim();
                select = (where == null) ? select : select.trim() + " " + where.trim();
                select = (group == null) ? select : select.trim() + " " + group.trim();
                select = (order == null) ? select : select.trim() + " " + order.trim();

                if (extracondition != null) {
                    select += (select.contains(" WHERE ") == true) ? " AND" + extracondition
                            : " WHERE " + extracondition;
                }

                select = select.trim();

                log.info("Dump Select Command: " + select);

                sql = "COPY  (" + select + ") TO STDOUT WITH DELIMITER '" + delimiter.trim()
                        + "' NULL as '' CSV HEADER";
                fjp.execute(new ToFile(sql, split[1].trim()));

                select = "SELECT ";
                distinct = null;
                attrs = null;
                where = null;
                group = null;
                order = null;
                dumped += 1;
            } else {
                try {
                    throw new NoDataException("No Data found in " + split[0]);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        } else {
            try {
                throw new SQLMalformedException("WARNING: Table " + split[0] + " is missing");
            } catch (SQLMalformedException e) {
                e.printStackTrace();
            }
        }
    }

    try {
        fjp.awaitTermination(60000, TimeUnit.MILLISECONDS);
        fjp.shutdown();
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    if (dumped == 0) {
        log.error("No Date found in any tables");
        System.exit(-1);
    }

}

From source file:org.csanchez.jenkins.plugins.kubernetes.KubernetesTestUtil.java

/**
 * Delete pods with matching labels/*www  .java  2  s.c  o m*/
 * 
 * @param client
 * @param labels
 * @param wait
 *            wait some time for pods to finish
 * @return whether any pod was deleted
 * @throws Exception
 */
public static boolean deletePods(KubernetesClient client, Map<String, String> labels, boolean wait)
        throws Exception {

    if (client != null) {

        // wait for 90 seconds for all pods to be terminated
        if (wait) {
            LOGGER.log(INFO, "Waiting for pods to terminate");
            ForkJoinPool forkJoinPool = new ForkJoinPool(1);
            try {
                forkJoinPool.submit(() -> IntStream.range(1, 1_000_000).anyMatch(i -> {
                    try {
                        FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client
                                .pods().withLabels(labels);
                        LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods));
                        boolean allTerminated = pods.list().getItems().isEmpty();
                        if (allTerminated) {
                            LOGGER.log(INFO, "All pods are terminated: {0}", print(pods));
                        } else {
                            LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods));
                            Thread.sleep(5000);
                        }
                        return allTerminated;
                    } catch (InterruptedException e) {
                        LOGGER.log(INFO, "Waiting for pods to terminate - interrupted");
                        return true;
                    }
                })).get(90, TimeUnit.SECONDS);
            } catch (TimeoutException e) {
                LOGGER.log(INFO, "Waiting for pods to terminate - timed out");
                // job not done in interval
            }
        }

        FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client.pods()
                .withLabels(labels);
        if (!pods.list().getItems().isEmpty()) {
            LOGGER.log(WARNING, "Deleting leftover pods: {0}", print(pods));
            if (Boolean.TRUE.equals(pods.delete())) {
                return true;
            }

        }
    }
    return false;
}

From source file:com.hygenics.parser.KVParser.java

private void sendToDb(ArrayList<String> json, boolean split) {
    if (json.size() > 0)
        log.info("Records to Add: " + json.size());

    if (split) {//from   w  w  w  . j a v a2  s .com

        ForkJoinPool f2 = new ForkJoinPool(
                (Runtime.getRuntime().availableProcessors() + ((int) Math.ceil(procs * qnums))));
        ArrayList<String> l;
        int size = (int) Math.ceil(json.size() / qnums);
        for (int conn = 0; conn < qnums; conn++) {
            l = new ArrayList<String>();
            if (((conn + 1) * size) < json.size()) {
                l.addAll(json.subList((conn * size), ((conn + 1) * size)));

            } else {

                l.addAll(json.subList((conn * size), json.size()));
            }
            f2.execute(new SplitPost(template, l));
        }
        int w = 0;
        while (f2.isQuiescent() == false && f2.getActiveThreadCount() > 0) {
            w++;
        }

        f2.shutdown();

        int incrementor = 0;

        while (f2.isShutdown() == false && f2.getActiveThreadCount() > 0 && f2.isQuiescent() == false) {
            incrementor++;
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            log.info("Shutting Down" + incrementor);
        }

        l = null;
        f2 = null;

    } else {
        for (String j : json) {

            boolean valid = false;

            try {
                Json.read(j);
                valid = true;
            } catch (Exception e) {
                log.info("ERROR: JSON NOT FORMATTED PROPERLY");
                System.out.println(j);
            }

            try {

                this.template.postSingleJson(j);
            } catch (Exception e) {
                log.info("Failed to Post");
                log.error(j);
                e.printStackTrace();
            }
        }
    }

}

From source file:com.hygenics.parser.SpecDumpWithReference.java

/**
 * Runs the Dump/*from   w w  w  . ja va 2 s  .  co m*/
 */
public void run() {

    if (archive) {
        Archiver zip = new Archiver();
        String[] barr = baseFile.split("\\/");
        String basefile = "";
        for (int i = 0; i > barr.length - 1; i++) {
            basefile += (i == 0) ? barr[i] : "/" + barr[i];
        }
        if (basefile.trim().length() > 0) {
            zip.setBasedirectory(basefile);
            zip.setZipDirectory(basefile + "archive.zip");
            zip.setAvoidanceString(".zip|archive");
            zip.setDelFiles(true);
            zip.run();
        }
    }

    int dumped = 0;
    log.info("Tables Found: " + tables.size());
    ForkJoinPool fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors());
    boolean checkedTables = (this.tablesMustHave == null);
    for (String tf : tables.keySet()) {
        String[] split = (this.baseschema + "." + tf + "|" + this.baseFile + tf).split("\\|");
        log.info("Dumping for " + split[0]);
        String schema = null;
        try {
            schema = split[0].split("\\.")[0];

            if (!checkedTables) {
                ArrayList<String> mustHaveTemp = (ArrayList<String>) this.tablesMustHave.clone();
                ArrayList<String> existingTables = this.template.getJsonData(
                        "SELECT table_name FROM information_schema.tables WHERE table_schema ILIKE '%" + schema
                                + "%'");
                for (String tdict : existingTables) {

                    String table = Json.parse(tdict).asObject().get("table_name").asString();
                    if (mustHaveTemp.contains(table)) {
                        mustHaveTemp.remove(table);

                        // get count
                        if (this.template.getCount(schema + "." + table) == 0) {
                            try {
                                throw new MissingData(
                                        "Data Missing from Required Table: " + schema + "." + table);
                            } catch (MissingData e) {
                                e.printStackTrace();
                                if (tablesMustHave.contains(table)) {
                                    log.error("Critical Table Missing Data! Terminating!");
                                    System.exit(-1);
                                }
                            }
                        }

                    }
                }

                if (mustHaveTemp.size() > 0) {
                    log.error("Drop Schema " + schema + "  is missing the following tables:\n");
                    for (String table : mustHaveTemp) {
                        log.error(table + "\n");
                    }

                    try {
                        throw new TableMissingException();
                    } catch (TableMissingException e) {
                        e.printStackTrace();
                        System.exit(-1);
                    }
                }
            }

        } catch (IndexOutOfBoundsException e) {
            try {
                throw new SQLMalformedException("FATAL ERROR: Table name " + split[0] + " malformed");
            } catch (SQLMalformedException e2) {
                e2.printStackTrace();
                System.exit(-1);
            }
        }

        log.info("Checking  table: " + split[0] + "&& schema: " + schema);

        if (template.checkTable(split[0], schema)) {
            // check if there are records

            if (template.getCount(schema + "." + split[0].replace(schema + ".", "")) > 0) {
                dumped += 1;
                Set<String> keys = tables.get(tf).keySet();
                String sql;
                String select = "SELECT ";
                String distinct = null;
                String attrs = null;
                String where = null;
                String group = null;
                String order = null;

                /**
                 * SET THE ATTRIBUTES WHICH CAN BE SPECIFIED WITH
                 * distinct-for concacting distinct part of query not0-for
                 * specifiying that the length must be greater than 0 in the
                 * WHERE clause group-for grouping the attribute not
                 * null-for specifying that the attr cannot be null
                 * orderby-for specifying our one order attr
                 */
                for (String k : keys) {
                    if (k.toLowerCase().contains("distinct")) {
                        distinct = (distinct == null)
                                ? "distinct on(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                : distinct + "," + tables.get(tf).get(k).replaceAll("\\sas.*", "");
                    }

                    if (k.toLowerCase().contains("group")) {
                        group = (group == null) ? "GROUP BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                : group + "," + tables.get(tf).get(k).replaceAll("\\sas.*", "");
                    }

                    if (k.toLowerCase().contains("not0")) {
                        if (k.contains("not0OR")) {
                            where = (where == null)
                                    ? "WHERE length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ") >0 "
                                    : where + "OR length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ")";
                        } else {
                            where = (where == null)
                                    ? "WHERE length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ") >0 "
                                    : where + "AND length(" + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + ")";
                        }
                    }

                    if (k.toLowerCase().contains("notnull")) {
                        if (k.toLowerCase().contains("notnullor")) {
                            where = (where == null)
                                    ? "WHERE " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL"
                                    : where + " OR " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL";
                        } else {
                            where = (where == null)
                                    ? "WHERE " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL"
                                    : where + " AND " + tables.get(tf).get(k).replaceAll("\\sas.*", "")
                                            + " IS NOT NULL";
                        }
                    }

                    if (k.toLowerCase().contains("order")) {
                        if (k.toLowerCase().contains("orderdesc")) {
                            order = (order == null)
                                    ? "ORDER BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "") + " ASC"
                                    : order;
                        } else {
                            order = (order == null)
                                    ? "ORDER BY " + tables.get(tf).get(k).replaceAll("\\sas.*", "") + " DESC"
                                    : order;
                        }
                    }

                    String field = tables.get(tf).get(k);
                    if (k.toLowerCase().contains("attr")) {
                        if (unicoderemove == true) {
                            field = "regexp_replace(trim(replace(regexp_replace(cast(" + field + " as text)"
                                    + ",'[^\\u0020-\\u007e,\\(\\);\\-\\[\\]]+',' '),'" + this.delimiter + "','"
                                    + this.replacedel + "')),'[\\r|\\n]+','   ','gm') as " + field;
                        } else {
                            field = "regexp_replace(trim(replace(cast(" + field + " as text),'" + this.delimiter
                                    + "','" + this.replacedel + "')),'[\\r|\\n]+','   ','gm')";
                        }

                        attrs = (attrs == null) ? field : attrs + "," + field;
                    }
                }

                select = (distinct == null) ? select : select.trim() + " " + distinct.trim() + ")";
                select += " " + attrs.trim();
                select += " FROM " + split[0].trim();
                select = (where == null) ? select : select.trim() + " " + where.trim();
                select = (group == null) ? select : select.trim() + " " + group.trim();
                select = (order == null) ? select : select.trim() + " " + order.trim();

                if (extracondition != null) {
                    select += (select.contains(" WHERE ") == true) ? " AND" + extracondition
                            : " WHERE " + extracondition;
                }

                select = select.trim();

                log.info("Dump Select Command: " + select);

                sql = "COPY  (" + select + ") TO STDOUT WITH DELIMITER '" + delimiter.trim()
                        + "' NULL as '' CSV HEADER";
                fjp.execute(new ToFile(sql, split[1].trim()));

                select = "SELECT ";
                distinct = null;
                attrs = null;
                where = null;
                group = null;
                order = null;
            } else {
                try {

                    throw new NoDataException("WARNING: Table " + split[0] + " has no Data");

                } catch (NoDataException e) {
                    e.printStackTrace();
                    if (tablesMustHave != null && tablesMustHave.contains(split[0])) {
                        log.error("Table is a Must Have Table by has not Data. Terminating!");
                        System.exit(-1);
                    }
                }
            }
        } else {
            try {
                throw new SQLMalformedException("WARNING: Table " + split[0] + " is missing");
            } catch (SQLMalformedException e) {
                e.printStackTrace();
            }
        }
    }

    try {
        fjp.awaitTermination(60000, TimeUnit.MILLISECONDS);
        fjp.shutdown();
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    if (dumped == 0) {
        log.info("No Data Found in any Table");
        System.exit(-1);
    }

}

From source file:com.hygenics.parser.JDump.java

private void toFile() {
    ArrayList<String> archs = new ArrayList<String>();
    List<Future<ArrayList<String>>> qfutures;
    Set<Callable<ArrayList<String>>> qcollect = new HashSet<Callable<ArrayList<String>>>(4);

    ForkJoinPool fjp = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * procnum));

    int dumped = 0;

    if (archive) {
        log.info("Cleaning");
        for (String k : fpaths.keySet()) {
            String fpath = "";

            for (String ofp : fpaths.get(k).keySet()) {
                fpath = ofp;//from w  w  w.j av  a  2  s . com
            }

            if (fpath.length() > 0) {
                String[] barr = fpath.split("\\/");
                String basefile = "";
                Archiver zip = new Archiver();
                for (int i = 0; i > barr.length - 1; i++) {
                    basefile += (i == 0) ? barr[i] : "/" + barr[i];
                }
                if (basefile.trim().length() > 0) {
                    zip.setBasedirectory(basefile);
                    zip.setZipDirectory(basefile + "archive.zip");
                    zip.setAvoidanceString(".zip|archive");
                    zip.setDelFiles(true);
                    zip.run();
                }
            }
        }
    }

    log.info("Dumping");
    for (String table : fpaths.keySet()) {
        int offset = 0;
        if (template.checkTable(table, table.split("\\.")[0])) {
            if (template.getCount(table) > 0) {
                log.info("Dumping for " + table);
                // get header
                String select = "SELECT * FROM " + table;
                String fpath = null;
                ArrayList<String> jsons;
                String condition;
                int w = 0;
                int start = offset;
                int chunksize = (int) Math.ceil(pullsize / qnum);

                // get fpath
                for (String ofp : fpaths.get(table).keySet()) {
                    start = fpaths.get(table).get(ofp);
                    fpath = ofp;
                }

                // perform write
                if (headers != null && fpath != null) {
                    List<String> headersList = headers.get(table);

                    String output = null;
                    boolean existed = true;

                    if (addFileDate) {
                        fpath = fpath
                                + Calendar.getInstance().getTime().toString().trim().replaceAll(":|\\s", "")
                                + ".txt";
                    }

                    // check to see if file should be created
                    if (!new File(fpath).exists()) {

                        try {
                            new File(fpath).createNewFile();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                        existed = false;
                    }

                    // check to see if file must be recreated
                    if (!append) {

                        File f = new File(fpath);
                        f.delete();
                        try {
                            f.createNewFile();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                    }

                    if (headersList != null && (append == false || existed == false)) {
                        for (String header : headersList) {
                            output = (output == null) ? StringEscapeUtils.unescapeXml(header)
                                    : output + delimeter + StringEscapeUtils.unescapeXml(header);
                        }
                    }

                    do {

                        // get records
                        jsons = new ArrayList<String>(pullsize);
                        log.info("Looking for Pages.");
                        for (int conn = 0; conn < qnum; conn++) {
                            // create condition
                            condition = " WHERE " + pullid + " >= " + (start + (conn * chunksize)) + " AND "
                                    + pullid + " < " + Integer.toString(start + (chunksize * (conn + 1)));

                            if (extracondition != null) {
                                condition += " " + extracondition.trim();
                            }

                            // get queries
                            qcollect.add(new SplitQuery(template, (select + condition)));
                            log.info("Fetching " + select + condition);
                        }

                        start += (chunksize * qnum);

                        qfutures = fjp.invokeAll(qcollect);

                        w = 0;
                        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                            w++;
                        }
                        log.info("Waited for " + w + " cycles");

                        for (Future<ArrayList<String>> f : qfutures) {
                            try {

                                ArrayList<String> test = f.get();
                                if (test != null) {
                                    if (test.size() > 0) {
                                        jsons.addAll(test);
                                    }
                                }

                                if (f.isDone() == false) {
                                    f.cancel(true);
                                }

                                f = null;
                            } catch (Exception e) {
                                log.warn("Encoding Error!");
                                e.printStackTrace();
                            }
                        }
                        qcollect = new HashSet<Callable<ArrayList<String>>>(4);
                        qfutures = null;
                        log.info("Finished Getting Pages");

                        // post records to the file
                        try (FileWriter fw = new FileWriter(new File(fpath), true)) {
                            // get and write headers

                            if (jsons.size() > 0) {
                                fw.write(output + "\n");
                                // write data
                                for (String json : jsons) {
                                    output = null;
                                    JsonObject jo = JsonObject.readFrom(json);
                                    if (jo.size() >= headersList.size()) {// allows
                                        // trimming
                                        // of
                                        // table
                                        // to
                                        // key
                                        // aspects
                                        output = null;

                                        for (String key : headers.get(table)) {

                                            if (jo.get(key.toLowerCase()) != null) {
                                                String data = StringEscapeUtils
                                                        .unescapeXml(jo.get(key.toLowerCase()).asString());

                                                if (replacementPattern != null) {
                                                    data = data.replaceAll(replacementPattern, "");
                                                    data = data.replace(delimeter, delimreplace);
                                                }

                                                output = (output == null)
                                                        ? data.replaceAll("[^\u0020-\u0070 ]+", "")
                                                        : output + delimeter
                                                                + data.replaceAll("[^\u0020-\u0070 ]+", "");
                                            } else {
                                                output += delimeter;
                                            }
                                        }

                                        if (output != null && output.trim().length() > headersList.size()) {
                                            fw.write(output + "\n");
                                        }
                                    } else {
                                        if (jsons.size() == 0) {
                                            Log.info(
                                                    "Number of Headers and Keys from Json Array and Headers List Impossible to Match");
                                            try {
                                                throw new MismatchException(
                                                        "Number of Headers: " + headersList.size()
                                                                + " && Number of Keys: " + jo.size());
                                            } catch (MismatchException e) {
                                                e.printStackTrace();
                                            }
                                        }
                                    }

                                    output = null;
                                }
                            } else {
                                log.info("EOF FOUND! No New Records in This Iteration....Stopping.");
                            }
                        } catch (IOException e) {
                            e.printStackTrace();
                        }

                    } while (jsons.size() > 0);

                } else {
                    try {
                        throw new NullPointerException(
                                "No Headers Input to Class. Please Create the Requisite Map.");
                    } catch (NullPointerException e) {
                        e.printStackTrace();
                    }
                }
                dumped += 1;
            } else {
                try {
                    throw new NoDataException("No Data Found in Table " + table);
                } catch (NoDataException e) {
                    e.printStackTrace();
                }
            }
        } else {
            log.info("Missing Table " + table);
            try {
                throw new NullPointerException("Table " + table + " Does Not Exist!!!");
            } catch (NullPointerException e) {
                e.printStackTrace();
            }
        }
    } // end LOOP

    if (!fjp.isShutdown()) {
        fjp.shutdownNow();
    }

    if (dumped == 0) {
        log.error("No Data Found in Any Table");
        System.exit(-1);
    }
}

From source file:MSUmpire.PeptidePeakClusterDetection.PDHandlerBase.java

protected void FindAllMzTracePeakCurves(ScanCollection scanCollection) throws IOException {
    //        final HashSet<String> IncludedHashMap = new HashSet<>();

    //        Logger.getRootLogger().info("Processing all scans to detect possible m/z peak curves....");
    Logger.getRootLogger().info("Processing all scans to detect possible m/z peak curves and");
    Logger.getRootLogger().info("Smoothing detected signals......");
    float preRT = 0f;

    //Loop for each scan in the ScanCollection
    final ArrayList<ForkJoinTask<ArrayList<PeakCurve>>> ftemp = new ArrayList<>();
    final ForkJoinPool fjp = new ForkJoinPool(NoCPUs);
    final int idx_end = scanCollection.GetScanNoArray(MSlevel).size();

    final int[] ia = new int[idx_end + 1];
    ia[0] = 0;//  ww w .  j  av  a 2 s .co m
    for (int idx = 0; idx < idx_end; idx++) {
        final int scanNO = scanCollection.GetScanNoArray(MSlevel).get(idx);
        final ScanData sd = scanCollection.GetScan(scanNO);
        ia[idx + 1] = sd.Data.size() + ia[idx];
    }

    final boolean[] included = new boolean[ia[ia.length - 1]];
    if (step == -1)
        step = fjp.getParallelism() * 32;
    long peakCurvesCount = 0;
    for (int idx = 0; idx < idx_end; idx++) {
        int scanNO = scanCollection.GetScanNoArray(MSlevel).get(idx);
        ScanData scanData = scanCollection.GetScan(scanNO);

        //If we are doing targeted peak detection and the RT of current scan is not in the range of targeted list, jump to the next scan 
        if (TargetedOnly && !FoundInInclusionRTList(scanData.RetentionTime)) {
            continue;
        }
        if (idx == 0) {
            preRT = scanData.RetentionTime - 0.01f;
        }
        for (int i = 0; i < scanData.PointCount(); i++) {
            XYData peak = scanData.Data.get(i);
            //If we are doing targeted peak detection and the RT and m/z of current peak is not in the range of targeted list, jump to the next peak 
            if (TargetedOnly && !FoundInInclusionMZList(scanData.RetentionTime, peak.getX())) {
                continue;
            }

            if (peak.getX() < parameter.MinMZ) {
                continue;
            }

            //Check if the current peak has been included in previously developed peak curves
            //                if (!IncludedHashMap.contains(scanNO + "_" + peak.getX())) {//The peak hasn't been included
            final int id_scanNO_peak = int_id(ia, idx, i);
            if (!included[id_scanNO_peak]) {//The peak hasn't been included
                //The current peak will be the starting peak of a new peak curve
                //Add it to the hash table

                //                    IncludedHashMap.add(scanNO + "_" + peak.getX());
                included[id_scanNO_peak] = true;

                float startmz = peak.getX();
                float startint = peak.getY();

                //Find the maximum peak within PPM window as the starting peak
                for (int j = i + 1; j < scanData.PointCount(); j++) {
                    XYData currentpeak = scanData.Data.get(j);
                    final int id_scanNO_currentpeak = int_id(ia, idx, j);
                    if (!included[id_scanNO_currentpeak]) {
                        //                        if (!IncludedHashMap.contains(scanNO + "_" + currentpeak.getX())) {
                        if (InstrumentParameter.CalcPPM(currentpeak.getX(), startmz) <= PPM) {
                            included[id_scanNO_currentpeak] = true;
                            //                                IncludedHashMap.add(scanNO + "_" + currentpeak.getX());

                            if (currentpeak.getY() >= startint) {
                                startmz = currentpeak.getX();
                                startint = currentpeak.getY();
                            }
                        } else {
                            break;
                        }
                    }
                }

                //Initialize a new peak curve
                PeakCurve Peakcurve = new PeakCurve(parameter);
                //Add a background peak
                Peakcurve.AddPeak(preRT, startmz, scanData.background);
                //Add the starting peak
                Peakcurve.AddPeak(scanData.RetentionTime, startmz, startint);
                Peakcurve.StartScan = scanNO;

                int missedScan = 0;
                float endrt = scanData.RetentionTime;
                int endScan = scanData.ScanNum;
                float bk = 0f;

                //Starting from the next scan, find the following peaks given the starting peak
                for (int idx2 = idx + 1; idx2 < scanCollection.GetScanNoArray(MSlevel).size()
                        && (missedScan < parameter.NoMissedScan /*|| (TargetedOnly && Peakcurve.RTWidth()<parameter.MaxCurveRTRange)*/); idx2++) {
                    int scanNO2 = scanCollection.GetScanNoArray(MSlevel).get(idx2);
                    ScanData scanData2 = scanCollection.GetScan(scanNO2);

                    endrt = scanData2.RetentionTime;
                    endScan = scanData2.ScanNum;
                    bk = scanData2.background;
                    float currentmz = 0f;
                    float currentint = 0f;

                    //If the scan is empty
                    if (scanData2.PointCount() == 0) {
                        if (parameter.FillGapByBK) {
                            Peakcurve.AddPeak(scanData2.RetentionTime, Peakcurve.TargetMz,
                                    scanData2.background);
                        }
                        missedScan++;
                        continue;
                    }

                    //Find the m/z index 
                    int mzidx = scanData2.GetLowerIndexOfX(Peakcurve.TargetMz);
                    for (int pkidx = mzidx; pkidx < scanData2.Data.size(); pkidx++) {
                        XYData currentpeak = scanData2.Data.get(pkidx);
                        if (currentpeak.getX() < parameter.MinMZ) {
                            continue;
                        }
                        //Check if the peak has been included or not
                        final int int_id_scanNO2_currentpeak = int_id(ia, idx2, pkidx);
                        //                            if (!included.get(int_id_scanNO2_currentpeak)) {
                        if (!included[int_id_scanNO2_currentpeak]) {
                            if (InstrumentParameter.CalcPPM(currentpeak.getX(), Peakcurve.TargetMz) > PPM) {
                                if (currentpeak.getX() > Peakcurve.TargetMz) {
                                    break;
                                }
                            } else {
                                //////////The peak is in the ppm window, select the highest peak
                                included[int_id_scanNO2_currentpeak] = true;
                                //                                    IncludedHashMap.add(scanNO2 + "_" + currentpeak.getX());
                                if (currentint < currentpeak.getY()) {
                                    currentmz = currentpeak.getX();
                                    currentint = currentpeak.getY();
                                }
                            }
                        }
                    }

                    //No peak in the PPM window has been found
                    if (currentmz == 0f) {
                        if (parameter.FillGapByBK) {
                            Peakcurve.AddPeak(scanData2.RetentionTime, Peakcurve.TargetMz,
                                    scanData2.background);
                        }
                        missedScan++;
                    } else {
                        missedScan = 0;
                        Peakcurve.AddPeak(scanData2.RetentionTime, currentmz, currentint);
                    }
                }
                Peakcurve.AddPeak(endrt, Peakcurve.TargetMz, bk);
                Peakcurve.EndScan = endScan;

                //First check if the peak curve is in targeted list
                if (FoundInInclusionList(Peakcurve.TargetMz, Peakcurve.StartRT(), Peakcurve.EndRT())) {
                    //                        LCMSPeakBase.UnSortedPeakCurves.add(Peakcurve);
                    ++peakCurvesCount;
                    ftemp.add(fjp.submit(new PeakCurveSmoothingUnit(Peakcurve, parameter)));
                    //Then check if the peak curve passes the criteria
                } else if (Peakcurve.GetRawSNR() > LCMSPeakBase.SNR
                        && Peakcurve.GetPeakList().size() >= parameter.MinPeakPerPeakCurve + 2) {
                    //                        LCMSPeakBase.UnSortedPeakCurves.add(Peakcurve);
                    ++peakCurvesCount;
                    ftemp.add(fjp.submit(new PeakCurveSmoothingUnit(Peakcurve, parameter)));
                } else {
                    Peakcurve = null;
                }
            }
        }
        preRT = scanData.RetentionTime;
        if (ReleaseScans) {
            scanData.dispose();
        }
        /** the if statement below does PeakCurveSmoothing() and ClearRawPeaks()
         */
        final boolean last_iter = idx + 1 == idx_end;
        if (ftemp.size() == step || last_iter) {
            final List<ForkJoinTask<ArrayList<PeakCurve>>> ftemp_sublist_view = last_iter ? ftemp
                    : ftemp.subList(0, step / 2);
            for (final Future<ArrayList<PeakCurve>> f : ftemp_sublist_view) {
                try {
                    LCMSPeakBase.UnSortedPeakCurves.addAll(f.get());
                } catch (InterruptedException | ExecutionException e) {
                    throw new RuntimeException(e);
                }
            }
            ftemp_sublist_view.clear();
            if (!last_iter && fjp.getActiveThreadCount() < fjp.getParallelism()) {
                //                    System.out.println("PeakCurveSmoothingUnit: fjp.getActiveThreadCount()\t"+fjp.getActiveThreadCount()+"\t"+step);
                step *= 2;
            }
        }
    }
    assert ftemp.isEmpty();
    //System.out.print("PSM removed (PeakCurve generation):" + PSMRemoved );

    int i = 1;
    //Assign peak curve index
    for (PeakCurve peakCurve : LCMSPeakBase.UnSortedPeakCurves) {
        peakCurve.Index = i++;
    }

    System.gc();
    //        Logger.getRootLogger().info(LCMSPeakBase.UnSortedPeakCurves.size() + " Peak curves found (Memory usage:" + Math.round((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576) + "MB)");
    Logger.getRootLogger()
            .info(peakCurvesCount + " Peak curves found (Memory usage:"
                    + Math.round(
                            (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576)
                    + "MB)");
}