Example usage for java.util List addAll

List of usage examples for java.util List addAll

Introduction

In this page you can find the example usage for java.util List addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Appends all of the elements in the specified collection to the end of this list, in the order that they are returned by the specified collection's iterator (optional operation).

Usage

From source file:com.jlgranda.fede.ejb.mail.reader.FacturaElectronicaMailReader.java

public static void main(String[] args) throws MessagingException, IOException, Exception {

    FacturaElectronicaMailReader famr = new FacturaElectronicaMailReader();
    String server = "jlgranda.com";
    String username = "1104499049@jlgranda.com";
    String password = "FKR5oznrtVwnEirkrbl4rmeba0mFCmYh";

    String proto = "TLS";
    IMAPClient client = new IMAPClient(server, username, password);
    String contentType = null;/*from  w w w.  ja  va2  s .c o  m*/
    StringReader reader = null;
    Address[] fromAddress = null;
    String messageContent = "";
    String attachFiles = "";
    org.apache.james.mime4j.dom.Multipart multiPart = null;
    MimeBodyPart part = null;
    int i = 0;
    String partContentType = null;
    String partName = null;
    String from = "";
    String subject = "";
    String sentDate = "";
    int contadorFacturasLeidas = 0;
    List<FacturaReader> result = new ArrayList<>(); //Guardar enlaces a factura si es el caso

    boolean facturaEncontrada = false;
    int index = 0;
    int numberOfParts = 0;
    String[] token = null;
    MessageBuilder builder = new DefaultMessageBuilder();
    ByteArrayOutputStream os = null;
    EmailHelper emailHelper = new EmailHelper();
    for (Message message : client.getMessages("inbox", false)) {
        //System.out.println("Message #" + email.fullMail(message) + ":");

        //attachFiles = "";
        fromAddress = message.getFrom();
        from = fromAddress[0].toString();
        subject = message.getSubject();
        sentDate = message.getSentDate() != null ? message.getSentDate().toString() : "";
        if (subject.contains(
                "Fwd: Ghost - Doc. Electrnico: 2201201601180145025300120010320000336391234567819")) {
            System.out.println("--------------------------------------" + (index++)
                    + "-----------------------------------------");
            System.out.println("From: " + fromAddress);
            System.out.println("Subject: " + subject);
            try {
                org.apache.james.mime4j.dom.Message mime4jMessage = builder
                        .parseMessage(new ByteArrayInputStream(emailHelper.fullMail(message).getBytes()));
                result.addAll(famr.handleMessage(mime4jMessage));
            } catch (org.apache.james.mime4j.MimeIOException mioe) {
                mioe.printStackTrace();
            } catch (org.apache.james.mime4j.MimeException me) {
                me.printStackTrace();
            }
            System.out
                    .println("-------------------------------------------------------------------------------");
        }
    }
    System.err.println("Facturas encontradas>> " + result.size());
    client.close();

}

From source file:com.ibm.watson.catalyst.corpus.tfidf.ApplyTemplate.java

public static void main(String[] args) {

    System.out.println("Loading Corpus.");
    JsonNode root;//from  ww  w .  j  a v a 2 s .  co  m
    TermCorpus c;
    JsonNode documents;
    try (InputStream in = new FileInputStream(new File("tfidf-health-1.json"))) {
        root = MAPPER.readTree(in);
        documents = root.get("documents");
        TermCorpusBuilder cb = new TermCorpusBuilder();
        cb.setDocumentCombiner(0, 0);
        cb.setJson(new File("health-corpus.json"));
        c = cb.build();
    } catch (FileNotFoundException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        return;
    } catch (JsonProcessingException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        return;
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        return;
    }
    System.out.println("Corpus loaded.");

    List<TemplateMatch> matches = new ArrayList<TemplateMatch>();
    Iterator<TermDocument> documentIterator = c.getDocuments().iterator();

    int index = 0;
    for (JsonNode document : documents) {
        Pattern p1 = Template.getTemplatePattern(document, "\\b(an? |the )?(\\w+ ){0,4}",
                "( \\w+)?(?= is (an?|one|the)\\b)");
        if (p1.toString().equals("\\b(an? |the )?(\\w+ ){0,4}()( \\w+)?(?= is (an?|one|the)\\b)"))
            continue;
        Pattern p2 = Template.getTemplatePattern(document, "^(\\w+ ){0,2}",
                "( \\w+){0,1}?(?=( can| may)? causes?\\b)");
        Pattern p3 = Template.getTemplatePattern(document, "(?<=the use of )(\\w+ ){0,3}",
                "( \\w+| ){0,2}?(?=( (and|does|in|for|can|is|as|to|of)\\b|\\.))");
        Pattern p4 = Template.getTemplatePattern(document, "^(\\w+ ){0,3}",
                "( \\w+){0,1}(?=( can| may) leads? to\\b)");
        Pattern p5 = Template.getTemplatePattern(document, "(?<=\\bthe risk of )(\\w+ ){0,3}",
                "( (disease|stroke|attack|cancer))?\\b");
        Pattern p6 = Template.getTemplatePattern(document, "(\\w{3,} ){0,3}",
                "( (disease|stroke|attack|cancer))?(?= is caused by\\b)");
        Pattern p7 = Template.getTemplatePattern(document, "(?<= is caused by )(\\w+ ){0,10}", "");
        Pattern p8 = Template.getTemplatePattern(document, "\\b", "( \\w{4,})(?= can be used)");
        Pattern p9 = Template.getTemplatePattern(document, "(?<= can be used )(\\w+ ){0,10}", "\\b");
        TermDocument d = documentIterator.next();

        DocumentMatcher dm = new DocumentMatcher(d);
        matches.addAll(dm.getParagraphMatches(p1, "What is ", "?"));
        matches.addAll(dm.getParagraphMatches(p2, "What does ", " cause?"));
        matches.addAll(dm.getParagraphMatches(p3, "How is ", " used?"));
        matches.addAll(dm.getParagraphMatches(p4, "What can ", " lead to?"));
        matches.addAll(dm.getParagraphMatches(p5, "What impacts the risk of ", "?"));
        matches.addAll(dm.getParagraphMatches(p6, "What causes ", "?"));
        matches.addAll(dm.getParagraphMatches(p7, "What is caused by ", "?"));
        matches.addAll(dm.getParagraphMatches(p8, "How can ", " be used?"));
        matches.addAll(dm.getParagraphMatches(p9, "What can be used ", "?"));
        System.out.print("Progress: " + ((100 * ++index) / documents.size()) + "%\r");
    }
    System.out.println();

    List<TemplateMatch> condensedMatches = new ArrayList<TemplateMatch>();

    for (TemplateMatch match : matches) {
        for (TemplateMatch baseMatch : condensedMatches) {
            if (match.sameQuestion(baseMatch)) {
                baseMatch.addAnswers(match);
                break;
            }
        }
        condensedMatches.add(match);
    }

    try (BufferedWriter bw = new BufferedWriter(new FileWriter("health-questions.txt"))) {
        for (TemplateMatch match : condensedMatches) {
            bw.write(match.toString());
        }
        bw.write("\n");
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    System.out.println("Done and generated: " + condensedMatches.size());

}

From source file:com.act.lcms.v2.MZCollisionCounter.java

public static void main(String[] args) throws Exception {
    CLIUtil cliUtil = new CLIUtil(MassChargeCalculator.class, HELP_MESSAGE, OPTION_BUILDERS);
    CommandLine cl = cliUtil.parseCommandLine(args);

    File inputFile = new File(cl.getOptionValue(OPTION_INPUT_INCHI_LIST));
    if (!inputFile.exists()) {
        cliUtil.failWithMessage("Input file at does not exist at %s", inputFile.getAbsolutePath());
    }//from   w w  w.j a  va  2s  .  c o m

    List<MassChargeCalculator.MZSource> sources = new ArrayList<>();
    try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) {
        String line;
        while ((line = reader.readLine()) != null) {
            line = line.trim();
            sources.add(new MassChargeCalculator.MZSource(line));
            if (sources.size() % 1000 == 0) {
                LOGGER.info("Loaded %d sources from input file", sources.size());
            }
        }
    }

    Set<String> considerIons = Collections.emptySet();
    if (cl.hasOption(OPTION_ONLY_CONSIDER_IONS)) {
        List<String> ions = Arrays.asList(cl.getOptionValues(OPTION_ONLY_CONSIDER_IONS));
        LOGGER.info("Only considering ions for m/z calculation: %s", StringUtils.join(ions, ", "));
        considerIons = new HashSet<>(ions);
    }

    TSVWriter<String, Long> tsvWriter = new TSVWriter<>(Arrays.asList("collisions", "count"));
    tsvWriter.open(new File(cl.getOptionValue(OPTION_OUTPUT_FILE)));

    try {
        LOGGER.info("Loaded %d sources in total from input file", sources.size());

        MassChargeCalculator.MassChargeMap mzMap = MassChargeCalculator.makeMassChargeMap(sources,
                considerIons);

        if (!cl.hasOption(OPTION_COUNT_WINDOW_INTERSECTIONS)) {
            // Do an exact analysis of the m/z collisions if windowing is not specified.

            LOGGER.info("Computing precise collision histogram.");
            Iterable<Double> mzs = mzMap.ionMZIter();
            Map<Integer, Long> collisionHistogram = histogram(
                    StreamSupport.stream(mzs.spliterator(), false).map(mz -> { // See comment about Iterable below.
                        try {
                            return mzMap.ionMZToMZSources(mz).size();
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }));
            List<Integer> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Integer collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision.longValue());
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        } else {
            /* After some deliberation (thanks Gil!), the windowed variant of this calculation counts the number of
             * structures whose 0.01 Da m/z windows (for some set of ions) overlap with each other.
             *
             * For example, let's assume we have five total input structures, and are only searching for one ion.  Let's
             * also assume that three of those structures have m/z A and the remaining two have m/z B.  The windows might
             * look like this in the m/z domain:
             * |----A----|
             *        |----B----|
             * Because A represents three structures and overlaps with B, which represents two, we assign A a count of 5--
             * this is the number of structures we believe could fall into the range of A given our current peak calling
             * approach.  Similarly, B is assigned a count of 5, as the possibility for collision/confusion is symmetric.
             *
             * Note that this is an over-approximation of collisions, as we could more precisely only consider intersections
             * when the exact m/z of B falls within the window around A and vice versa.  However, because we have observed
             * cases where the MS sensor doesn't report structures at exactly the m/z we predict, we employ this weaker
             * definition of intersection to give a slightly pessimistic view of what confusions might be possible. */
            // Compute windows for every m/z.  We don't care about the original mz values since we just want the count.
            List<Double> mzs = mzMap.ionMZsSorted();

            final Double windowHalfWidth;
            if (cl.hasOption(OPTION_WINDOW_HALFWIDTH)) {
                // Don't use get with default for this option, as we want the exact FP value of the default tolerance.
                windowHalfWidth = Double.valueOf(cl.getOptionValue(OPTION_WINDOW_HALFWIDTH));
            } else {
                windowHalfWidth = DEFAULT_WINDOW_TOLERANCE;
            }

            /* Window = (lower bound, upper bound), counter of represented m/z's that collide with this window, and number
             * of representative structures (which will be used in counting collisions). */
            LinkedList<CollisionWindow> allWindows = new LinkedList<CollisionWindow>() {
                {
                    for (Double mz : mzs) {
                        // CPU for memory trade-off: don't re-compute the window bounds over and over and over and over and over.
                        try {
                            add(new CollisionWindow(mz, windowHalfWidth, mzMap.ionMZToMZSources(mz).size()));
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }
                }
            };

            // Sweep line time!  The window ranges are the interesting points.  We just accumulate overlap counts as we go.
            LinkedList<CollisionWindow> workingSet = new LinkedList<>();
            List<CollisionWindow> finished = new LinkedList<>();

            while (allWindows.size() > 0) {
                CollisionWindow thisWindow = allWindows.pop();
                // Remove any windows from the working set that don't overlap with the next window.
                while (workingSet.size() > 0 && workingSet.peekFirst().getMaxMZ() < thisWindow.getMinMZ()) {
                    finished.add(workingSet.pop());
                }

                for (CollisionWindow w : workingSet) {
                    /* Add the size of the new overlapping window's structure count to each of the windows in the working set,
                     * which represents the number of possible confused structures that fall within the overlapping region.
                     * We exclude the window itself as it should already have counted the colliding structures it represents. */
                    w.getAccumulator().add(thisWindow.getStructureCount());

                    /* Reciprocally, add the structure counts of all windows with which the current window overlaps to it. */
                    thisWindow.getAccumulator().add(w.getStructureCount());
                }

                // Now that accumulation is complete, we can safely add the current window.
                workingSet.add(thisWindow);
            }

            // All the interesting events are done, so drop the remaining windows into the finished set.
            finished.addAll(workingSet);

            Map<Long, Long> collisionHistogram = histogram(
                    finished.stream().map(w -> w.getAccumulator().longValue()));
            List<Long> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Long collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision);
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        }
    } finally {
        if (tsvWriter != null) {
            tsvWriter.close();
        }
    }
}

From source file:com.act.lcms.db.analysis.PathwayProductAnalysis.java

public static void main(String[] args) throws Exception {
    Options opts = new Options();
    for (Option.Builder b : OPTION_BUILDERS) {
        opts.addOption(b.build());// w w w. j  a v a2  s  . c  o  m
    }

    CommandLine cl = null;
    try {
        CommandLineParser parser = new DefaultParser();
        cl = parser.parse(opts, args);
    } catch (ParseException e) {
        System.err.format("Argument parsing failed: %s\n", e.getMessage());
        HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null,
                true);
        System.exit(1);
    }

    if (cl.hasOption("help")) {
        HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null,
                true);
        return;
    }

    File lcmsDir = new File(cl.getOptionValue(OPTION_DIRECTORY));
    if (!lcmsDir.isDirectory()) {
        System.err.format("File at %s is not a directory\n", lcmsDir.getAbsolutePath());
        HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null,
                true);
        System.exit(1);
    }

    Double fontScale = null;
    if (cl.hasOption("font-scale")) {
        try {
            fontScale = Double.parseDouble(cl.getOptionValue("font-scale"));
        } catch (IllegalArgumentException e) {
            System.err.format("Argument for font-scale must be a floating point number.\n");
            System.exit(1);
        }
    }

    try (DB db = DB.openDBFromCLI(cl)) {
        Set<Integer> takeSamplesFromPlateIds = null;
        if (cl.hasOption(OPTION_FILTER_BY_PLATE_BARCODE)) {
            String[] plateBarcodes = cl.getOptionValues(OPTION_FILTER_BY_PLATE_BARCODE);
            System.out.format("Considering only sample wells in plates: %s\n",
                    StringUtils.join(plateBarcodes, ", "));
            takeSamplesFromPlateIds = new HashSet<>(plateBarcodes.length);
            for (String plateBarcode : plateBarcodes) {
                Plate p = Plate.getPlateByBarcode(db, plateBarcode);
                if (p == null) {
                    System.err.format("WARNING: unable to find plate in DB with barcode %s\n", plateBarcode);
                } else {
                    takeSamplesFromPlateIds.add(p.getId());
                }
            }
            // Allow filtering on barcode even if we couldn't find any in the DB.
        }

        System.out.format("Loading/updating LCMS scan files into DB\n");
        ScanFile.insertOrUpdateScanFilesInDirectory(db, lcmsDir);

        System.out.format("Processing LCMS scans\n");
        Pair<List<LCMSWell>, Set<Integer>> positiveWellsAndPlateIds = Utils.extractWellsAndPlateIds(db,
                cl.getOptionValues(OPTION_STRAINS), cl.getOptionValues(OPTION_CONSTRUCT),
                takeSamplesFromPlateIds, false);
        List<LCMSWell> positiveWells = positiveWellsAndPlateIds.getLeft();
        if (positiveWells.size() == 0) {
            throw new RuntimeException("Found no LCMS wells for specified strains/constructs");
        }
        // Only take negative samples from the plates where we found the positive samples.
        Pair<List<LCMSWell>, Set<Integer>> negativeWellsAndPlateIds = Utils.extractWellsAndPlateIds(db,
                cl.getOptionValues(OPTION_NEGATIVE_STRAINS), cl.getOptionValues(OPTION_NEGATIVE_CONSTRUCTS),
                positiveWellsAndPlateIds.getRight(), true);
        List<LCMSWell> negativeWells = negativeWellsAndPlateIds.getLeft();
        if (negativeWells == null || negativeWells.size() == 0) {
            System.err.format("WARNING: no valid negative samples found in same plates as positive samples\n");
        }

        // Extract the chemicals in the pathway and their product masses, then look up info on those chemicals
        List<Pair<ChemicalAssociatedWithPathway, Double>> productMasses = Utils
                .extractMassesForChemicalsAssociatedWithConstruct(db, cl.getOptionValue(OPTION_CONSTRUCT));
        List<Pair<String, Double>> searchMZs = new ArrayList<>(productMasses.size());
        List<ChemicalAssociatedWithPathway> pathwayChems = new ArrayList<>(productMasses.size());
        for (Pair<ChemicalAssociatedWithPathway, Double> productMass : productMasses) {
            String chemName = productMass.getLeft().getChemical();
            searchMZs.add(Pair.of(chemName, productMass.getRight()));
            pathwayChems.add(productMass.getLeft());
        }
        System.out.format("Searching for intermediate/side-reaction products:\n");
        for (Pair<String, Double> searchMZ : searchMZs) {
            System.out.format("  %s: %.3f\n", searchMZ.getLeft(), searchMZ.getRight());
        }

        // Look up the standard by name.
        List<StandardWell> standardWells = new ArrayList<>();
        if (cl.hasOption(OPTION_STANDARD_WELLS)) {
            Plate standardPlate = Plate.getPlateByBarcode(db, cl.getOptionValue(OPTION_STANDARD_PLATE_BARCODE));
            Map<Integer, StandardWell> pathwayIdToStandardWell = extractStandardWellsFromOptionsList(db,
                    pathwayChems, cl.getOptionValues(OPTION_STANDARD_WELLS), standardPlate);
            for (ChemicalAssociatedWithPathway c : pathwayChems) { // TODO: we can avoid this loop.
                StandardWell well = pathwayIdToStandardWell.get(c.getId());
                if (well != null) {
                    standardWells.add(well);
                }
            }
        } else {
            for (ChemicalAssociatedWithPathway c : pathwayChems) {
                String standardName = c.getChemical();
                System.out.format("Searching for well containing standard %s\n", standardName);
                List<StandardWell> wells = StandardIonAnalysis.getStandardWellsForChemical(db, c.getChemical());
                if (wells != null) {
                    standardWells.addAll(wells);
                }
            }
        }

        boolean useFineGrainedMZ = cl.hasOption("fine-grained-mz");
        boolean useSNR = cl.hasOption(OPTION_USE_SNR);

        /* Process the standard, positive, and negative wells, producing ScanData containers that will allow them to be
         * iterated over for graph writing. We do not need to specify granular includeIons and excludeIons since
         * this would not take advantage of our caching strategy which uses a list of metlin ions as an index. */
        HashMap<Integer, Plate> plateCache = new HashMap<>();
        Pair<List<ScanData<StandardWell>>, Double> allStandardScans = AnalysisHelper.processScans(db, lcmsDir,
                searchMZs, ScanData.KIND.STANDARD, plateCache, standardWells, useFineGrainedMZ, EMPTY_SET,
                EMPTY_SET, useSNR);
        Pair<List<ScanData<LCMSWell>>, Double> allPositiveScans = AnalysisHelper.processScans(db, lcmsDir,
                searchMZs, ScanData.KIND.POS_SAMPLE, plateCache, positiveWells, useFineGrainedMZ, EMPTY_SET,
                EMPTY_SET, useSNR);
        Pair<List<ScanData<LCMSWell>>, Double> allNegativeScans = AnalysisHelper.processScans(db, lcmsDir,
                searchMZs, ScanData.KIND.NEG_CONTROL, plateCache, negativeWells, useFineGrainedMZ, EMPTY_SET,
                EMPTY_SET, useSNR);

        String fmt = "pdf";
        String outImg = cl.getOptionValue(OPTION_OUTPUT_PREFIX) + "." + fmt;
        String outData = cl.getOptionValue(OPTION_OUTPUT_PREFIX) + ".data";
        String outAnalysis = cl.getOptionValue(OPTION_OUTPUT_PREFIX) + ".tsv";

        System.err.format("Writing combined scan data to %s and graphs to %s\n", outData, outImg);
        String plottingDirectory = cl.getOptionValue(OPTION_PLOTTING_DIR);

        List<ScanData<LCMSWell>> posNegWells = new ArrayList<>();
        posNegWells.addAll(allPositiveScans.getLeft());
        posNegWells.addAll(allNegativeScans.getLeft());

        Map<Integer, String> searchIons;
        if (cl.hasOption(OPTION_PATHWAY_SEARCH_IONS)) {
            searchIons = extractPathwayStepIons(pathwayChems, cl.getOptionValues(OPTION_PATHWAY_SEARCH_IONS),
                    cl.getOptionValue(OPTION_SEARCH_ION, "M+H"));
            /* This is pretty lazy, but works with the existing API.  Extract all selected ions for all search masses when
             * performing the scan, then filter down to the desired ions for the plot at the end.
             * TODO: specify the masses and scans per sample rather than batching everything together.  It might be slower,
             * but it'll be clearer to read. */
        } else {
            // We need to make sure that the standard metlin ion we choose is consistent with the ion modes of
            // the given positive, negative and standard scan files. For example, we should not pick a negative
            // metlin ion if all our available positive control scan files are in the positive ion mode.
            Map<Integer, Pair<Boolean, Boolean>> ionModes = new HashMap<>();
            for (ChemicalAssociatedWithPathway chemical : pathwayChems) {
                boolean isPositiveScanPresent = false;
                boolean isNegativeScanPresent = false;

                for (ScanData<StandardWell> scan : allStandardScans.getLeft()) {
                    if (chemical.getChemical().equals(scan.getWell().getChemical())
                            && chemical.getChemical().equals(scan.getTargetChemicalName())) {
                        if (MS1.IonMode.valueOf(
                                scan.getScanFile().getMode().toString().toUpperCase()) == MS1.IonMode.POS) {
                            isPositiveScanPresent = true;
                        }

                        if (MS1.IonMode.valueOf(
                                scan.getScanFile().getMode().toString().toUpperCase()) == MS1.IonMode.NEG) {
                            isNegativeScanPresent = true;
                        }
                    }
                }

                for (ScanData<LCMSWell> scan : posNegWells) {
                    if (chemical.getChemical().equals(scan.getWell().getChemical())
                            && chemical.getChemical().equals(scan.getTargetChemicalName())) {
                        if (MS1.IonMode.valueOf(
                                scan.getScanFile().getMode().toString().toUpperCase()) == MS1.IonMode.POS) {
                            isPositiveScanPresent = true;
                        }

                        if (MS1.IonMode.valueOf(
                                scan.getScanFile().getMode().toString().toUpperCase()) == MS1.IonMode.NEG) {
                            isNegativeScanPresent = true;
                        }
                    }
                }

                ionModes.put(chemical.getId(), Pair.of(isPositiveScanPresent, isNegativeScanPresent));
            }

            // Sort in descending order of media where MeOH and Water related media are promoted to the top and
            // anything derived from yeast media are demoted. We do this because we want to first process the water
            // and meoh media before processing the yeast media since the yeast media depends on the analysis of the former.
            Collections.sort(standardWells, new Comparator<StandardWell>() {
                @Override
                public int compare(StandardWell o1, StandardWell o2) {
                    if (StandardWell.doesMediaContainYeastExtract(o1.getMedia())
                            && !StandardWell.doesMediaContainYeastExtract(o2.getMedia())) {
                        return 1;
                    } else {
                        return 0;
                    }
                }
            });

            searchIons = extractPathwayStepIonsFromStandardIonAnalysis(pathwayChems, lcmsDir, db, standardWells,
                    plottingDirectory, ionModes);
        }

        produceLCMSPathwayHeatmaps(lcmsDir, outData, outImg, outAnalysis, pathwayChems, allStandardScans,
                allPositiveScans, allNegativeScans, fontScale, cl.hasOption(OPTION_USE_HEATMAP), searchIons);
    }
}

From source file:com.inkubator.common.util.NewMain.java

/**
 * @param args the command line arguments
 *///from   www.  java  2  s  .  c om
public static void main(String[] args) throws IOException {

    File file1 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page1.txt");
    File file2 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page2.txt");
    //        File file3 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\json\\json\\menado\\page3.txt");
    File file3 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page3.txt");
    File file4 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page4.txt");
    File file5 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page5.txt");
    File file6 = new File(
            "C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\JSON_Ek\\Surabaya\\Page6.txt");
    //        File file7 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 7.txt");
    //        File file8 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 8.txt");
    //        File file9 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 9.txt");
    //        File file10 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 10.txt");
    //        File file11 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 11.txt");
    //        File file12 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 12.txt");
    //        File file13 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 13.txt");
    //        File file14 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 14.txt");
    //        File file15 = new File("C:\\Users\\deni.fahri\\AppData\\Roaming\\Skype\\My Skype Received Files\\Bandung\\Bandung\\Page 15.txt");
    //        File file16 = new File("C:\\Users\\deni.fahri\\Downloads\\page16.txt");

    //        File file2 = new File("C:\\Users\\deni.fahri\\Documents\\hasil.txt");
    String agoda = FilesUtil.getAsStringFromFile(file1);
    String agoda1 = FilesUtil.getAsStringFromFile(file2);
    String agoda2 = FilesUtil.getAsStringFromFile(file3);
    String agoda3 = FilesUtil.getAsStringFromFile(file4);
    String agoda4 = FilesUtil.getAsStringFromFile(file5);
    String agoda5 = FilesUtil.getAsStringFromFile(file6);
    //        String agoda6 = FilesUtil.getAsStringFromFile(file7);
    //        String agoda7 = FilesUtil.getAsStringFromFile(file8);
    //        String agoda8 = FilesUtil.getAsStringFromFile(file9);
    //        String agoda9 = FilesUtil.getAsStringFromFile(file10);
    //        String agoda10 = FilesUtil.getAsStringFromFile(file11);
    //        String agoda11 = FilesUtil.getAsStringFromFile(file12);
    //        String agoda12 = FilesUtil.getAsStringFromFile(file13);
    //        String agoda13 = FilesUtil.getAsStringFromFile(file14);
    //        String agoda14 = FilesUtil.getAsStringFromFile(file15);
    //        String agoda15 = FilesUtil.getAsStringFromFile(file16);
    ////        System.out.println(" Test Nya adalah :" + agoda);
    ////        String a=StringUtils.substringAfter("\"HotelTranslatedName\":", agoda);
    ////        System.out.println(" hasil; "+a);
    ////        // TODO code application logic here
    ////        System.out.println("Nilai " + JsonConverter.getValueByKeyStatic(agoda, "HotelTranslatedName"));
    TypeToken<List<HotelModel>> token = new TypeToken<List<HotelModel>>() {
    };
    Gson gson = new GsonBuilder().create();
    //        List<HotelModel> data = new ArrayList<>();
    //        HotelModel hotelModel = new HotelModel();
    //        hotelModel.setAddress("sdfsdffsfsdfsdfdsfdsf");
    //        hotelModel.setAccommodationName("Aku");
    //        HotelModel hotelModel1 = new HotelModel();
    //        hotelModel1.setAddress("sdfsdffsfsdfsdfdsfdsf");
    //        hotelModel1.setAccommodationName("Avvvku");
    //        HotelModel hotelModel2 = new HotelModel();
    //        hotelModel2.setAddress("sdfsdffsfsdfsdfdsfdsf");
    //        hotelModel2.setAccommodationName("Akvvvu");
    //        data.add(hotelModel);
    //        data.add(hotelModel1);
    //        data.add(hotelModel2);
    //        String json = gson.toJson(data);
    List<HotelModel> total = new ArrayList<>();
    List<HotelModel> data1 = new ArrayList<>();
    List<HotelModel> data2 = new ArrayList<>();
    List<HotelModel> data3 = new ArrayList<>();
    List<HotelModel> data4 = new ArrayList<>();
    List<HotelModel> data5 = new ArrayList<>();
    List<HotelModel> data6 = new ArrayList<>();
    List<HotelModel> data7 = new ArrayList<>();
    List<HotelModel> data8 = new ArrayList<>();
    List<HotelModel> data9 = new ArrayList<>();
    List<HotelModel> data10 = new ArrayList<>();
    List<HotelModel> data11 = new ArrayList<>();
    List<HotelModel> data12 = new ArrayList<>();
    List<HotelModel> data13 = new ArrayList<>();
    List<HotelModel> data14 = new ArrayList<>();
    List<HotelModel> data15 = new ArrayList<>();
    List<HotelModel> data16 = new ArrayList<>();

    data1 = gson.fromJson(agoda, token.getType());
    data2 = gson.fromJson(agoda1, token.getType());
    data3 = gson.fromJson(agoda2, token.getType());
    data4 = gson.fromJson(agoda3, token.getType());
    data5 = gson.fromJson(agoda4, token.getType());
    data6 = gson.fromJson(agoda5, token.getType());
    //        data7 = gson.fromJson(agoda6, token.getType());
    //        data8 = gson.fromJson(agoda7, token.getType());
    //        data9 = gson.fromJson(agoda8, token.getType());
    //        data10 = gson.fromJson(agoda9, token.getType());
    //        data11 = gson.fromJson(agoda10, token.getType());
    //        data12 = gson.fromJson(agoda11, token.getType());
    //        data13 = gson.fromJson(agoda12, token.getType());
    //        data14 = gson.fromJson(agoda13, token.getType());
    //        data15 = gson.fromJson(agoda14, token.getType());
    //        data16 = gson.fromJson(agoda15, token.getType());
    total.addAll(data1);
    total.addAll(data2);
    total.addAll(data3);
    total.addAll(data4);
    total.addAll(data5);
    total.addAll(data6);
    //        total.addAll(data7);
    //        total.addAll(data8);
    //        total.addAll(data9);
    //        total.addAll(data10);
    //        total.addAll(data11);
    //        total.addAll(data12);
    //        total.addAll(data13);
    //        total.addAll(data14);
    //        total.addAll(data15);
    //        total.addAll(data16);
    System.out.println(" Ukurannn nya " + total.size());

    //        System.out.println(" Ukurannya " + data2.size());
    for (HotelModel mode : total) {
        System.out.println(mode);
    }
    //        HotelModel hotelModel = gson.fromJson(agoda, HotelModel.class);
    //        String Data = hotelModel.getHotelTranslatedName() + ";" + hotelModel.getStarRating() + ";" + hotelModel.getAddress() + ";" + hotelModel.getIsFreeWifi();
    //        FilesUtil.writeToFileFromString(file2, Data);
    //        System.out.println(hotelModel);
    //
    HSSFWorkbook workbook = new HSSFWorkbook();
    HSSFSheet sheet = workbook.createSheet("Agoda Data Hotel Surabaya");

    ////
    TreeMap<String, Object[]> datatoExel = new TreeMap<>();
    int i = 1;
    //        datatoExel.put("1", new Object[]{"Hotel Agoda Jakarta"});
    datatoExel.put("1", new Object[] { "Nama Hotel", "Arena", "Alamat", "Rating", "Apakah Gratis Wifi",
            "Harga Mulai Dari", "Longitude", "Latitude" });
    for (HotelModel mode : total) {
        datatoExel.put(String.valueOf(i + 1),
                new Object[] { mode.getHotelTranslatedName(), mode.getAreaName(), mode.getAddress(),
                        mode.getStarRating(), mode.getIsFreeWifi(),
                        mode.getTextPrice() + " " + mode.getCurrencyCode(), mode.getCoordinate().getLongitude(),
                        mode.getCoordinate().getLatitude() });
        i++;
    }
    //
    ////          int i=1;
    ////        for (HotelModel mode : data2) {
    ////             datatoExel.put(String.valueOf(i), new Object[]{1d, "John", 1500000d});
    //////        }
    ////       
    ////        datatoExel.put("4", new Object[]{3d, "Dean", 700000d});
    ////
    Set<String> keyset = datatoExel.keySet();
    int rownum = 0;
    for (String key : keyset) {
        Row row = sheet.createRow(rownum++);
        Object[] objArr = datatoExel.get(key);
        int cellnum = 0;
        for (Object obj : objArr) {
            Cell cell = row.createCell(cellnum++);
            if (obj instanceof Date) {
                cell.setCellValue((Date) obj);
            } else if (obj instanceof Boolean) {
                cell.setCellValue((Boolean) obj);
            } else if (obj instanceof String) {
                cell.setCellValue((String) obj);
            } else if (obj instanceof Double) {
                cell.setCellValue((Double) obj);
            }
        }
    }

    try {
        FileOutputStream out = new FileOutputStream(new File("C:\\Users\\deni.fahri\\Documents\\Surabaya.xls"));
        workbook.write(out);
        out.close();
        System.out.println("Excel written successfully..");

    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:squash.tools.FakeBookingCreator.java

public static void main(String[] args) throws IOException {
    int numberOfDays = 21;
    int numberOfCourts = 5;
    int maxCourtSpan = 5;
    int numberOfSlots = 16;
    int maxSlotSpan = 3;
    int minSurnameLength = 2;
    int maxSurnameLength = 20;
    int minBookingsPerDay = 0;
    int maxBookingsPerDay = 8;
    LocalDate startDate = LocalDate.of(2016, 7, 5);

    DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
    List<Booking> bookings = new ArrayList<>();
    for (LocalDate date = startDate; date.isBefore(startDate.plusDays(numberOfDays)); date = date.plusDays(1)) {
        int numBookings = ThreadLocalRandom.current().nextInt(minBookingsPerDay, maxBookingsPerDay + 1);
        List<Booking> daysBookings = new ArrayList<>();
        for (int bookingIndex = 0; bookingIndex < numBookings; bookingIndex++) {
            String player1 = RandomStringUtils.randomAlphabetic(1) + "." + RandomStringUtils.randomAlphabetic(
                    ThreadLocalRandom.current().nextInt(minSurnameLength, maxSurnameLength + 1));
            String player2 = RandomStringUtils.randomAlphabetic(1) + "." + RandomStringUtils.randomAlphabetic(
                    ThreadLocalRandom.current().nextInt(minSurnameLength, maxSurnameLength + 1));

            Set<ImmutablePair<Integer, Integer>> bookedCourts = new HashSet<>();
            daysBookings.forEach((booking) -> {
                addBookingToSet(booking, bookedCourts);
            });//  www. java 2  s .c  om

            Booking booking;
            Set<ImmutablePair<Integer, Integer>> courtsToBook = new HashSet<>();
            do {
                // Loop until we create a booking of free courts
                int court = ThreadLocalRandom.current().nextInt(1, numberOfCourts + 1);
                int courtSpan = ThreadLocalRandom.current().nextInt(1,
                        Math.min(maxCourtSpan + 1, numberOfCourts - court + 2));
                int slot = ThreadLocalRandom.current().nextInt(1, numberOfSlots + 1);
                int slotSpan = ThreadLocalRandom.current().nextInt(1,
                        Math.min(maxSlotSpan + 1, numberOfSlots - slot + 2));
                booking = new Booking(court, courtSpan, slot, slotSpan, player1 + "/" + player2);
                booking.setDate(date.format(formatter));
                courtsToBook.clear();
                addBookingToSet(booking, courtsToBook);
            } while (Boolean.valueOf(Sets.intersection(courtsToBook, bookedCourts).size() > 0));

            daysBookings.add(booking);
        }
        bookings.addAll(daysBookings);
    }

    // Encode bookings as JSON
    // Create the node factory that gives us nodes.
    JsonNodeFactory factory = new JsonNodeFactory(false);
    // Create a json factory to write the treenode as json.
    JsonFactory jsonFactory = new JsonFactory();
    ObjectNode rootNode = factory.objectNode();

    ArrayNode bookingsNode = rootNode.putArray("bookings");
    for (int i = 0; i < bookings.size(); i++) {
        Booking booking = bookings.get(i);
        ObjectNode bookingNode = factory.objectNode();
        bookingNode.put("court", booking.getCourt());
        bookingNode.put("courtSpan", booking.getCourtSpan());
        bookingNode.put("slot", booking.getSlot());
        bookingNode.put("slotSpan", booking.getSlotSpan());
        bookingNode.put("name", booking.getName());
        bookingNode.put("date", booking.getDate());
        bookingsNode.add(bookingNode);
    }
    // Add empty booking rules array - just so restore works
    rootNode.putArray("bookingRules");
    rootNode.put("clearBeforeRestore", true);

    try (JsonGenerator generator = jsonFactory.createGenerator(new File("FakeBookings.json"),
            JsonEncoding.UTF8)) {
        ObjectMapper mapper = new ObjectMapper();
        mapper.setSerializationInclusion(Include.NON_EMPTY);
        mapper.setSerializationInclusion(Include.NON_NULL);
        mapper.writeTree(generator, rootNode);
    }
}

From source file:com.thesmartweb.swebrank.Main.java

/**
 * @param args the command line arguments
 *//*from   www  . j  a v  a  2s  .  c  om*/
public static void main(String[] args) {
    Path input_path = Paths.get("//mnt//var//DBs//inputsL10//nba//");//input directory
    String output_parent_directory = "//mnt//var//DBs//outputsConfL10//nba//";//output directory
    String config_path = "//mnt//var//DBs//config//";//input directory
    //---Disable apache log manually----
    //System.setProperty("org.apache.commons.logging.Log","org.apache.commons.logging.impl.NoOpLog");
    System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.Log4JLogger");
    //--------------Domain that is searched----------
    String domain = "";
    //------------------search engine related options----------------------
    List<String> queries = null;
    int results_number = 0;//the number of results that are returned from each search engine
    List<Boolean> enginechoice = null;
    //list element #0. True/False Bing
    //list element #1. True/False Google
    //list element #2. True/False Yahoo!
    //list element #3. True/False Merged
    //-----------Moz options---------------------
    List<Boolean> mozMetrics = null;
    //The list is going to contain the moz related input in the following order
    //list element #1. True/False, True we use Moz API, false not
    //list element #2. True if we use Domain Authority
    //list element #3. True if we use External MozRank
    //list element #4. True if we use MozRank
    //list element #5. True if we use MozTrust
    //list element #6. True if we use Subdomain MozRank
    //list element #7. True if we use Page Authority
    //only one is used (the first to be set to true)
    boolean moz_threshold_option = false;//set to true we use the threshold
    Double moz_threshold = 0.0;//if we want to have a threshold in moz
    int top_count_moz = 0;//if we want to get the moz top-something results
    //---------------Semantic Analysis method----------------
    List<Boolean> ContentSemantics = null;
    int SensebotConcepts = 0;//define the amount of concepts that sensebot is going to recognize
    List<Double> SWebRankSettings = null;
    //------(string)directory is going to be used later-----
    String output_child_directory;
    //-------we get all the paths of the txt (input) files from the input directory-------
    DataManipulation getfiles = new DataManipulation();//class responsible for the extraction of paths
    Collection<File> inputs_files;//array to include the paths of the txt files
    inputs_files = getfiles.getinputfiles(input_path.toString(), "txt");//method to retrieve all the path of the input documents
    //------------read the txt files------------
    for (File input : inputs_files) {
        ReadInput ri = new ReadInput();//function to read the input
        boolean check_reading_input = ri.perform(input);
        if (check_reading_input) {
            domain = ri.domain;
            //----------
            queries = ri.queries;
            results_number = ri.results_number;
            enginechoice = ri.enginechoice;
            //------------
            mozMetrics = ri.mozMetrics;
            moz_threshold_option = ri.moz_threshold_option;
            moz_threshold = ri.moz_threshold.doubleValue();
            //---------------
            ContentSemantics = ri.ContentSemantics;
            SWebRankSettings = ri.SWebRankSettings;
        }
        int top_visible = 0;//option to set the amount of results you can get in the merged search engine
        //------if we choose to use a Moz metric or Visibility score for our ranking, we need to set the results_number for the search engines to its max which is 50 
        //-----we set the top results number for moz or Visibility rank----
        if (mozMetrics.get(0) || enginechoice.get(3)) {
            if (mozMetrics.get(0)) {
                top_count_moz = results_number;
            } //if moz is true, top_count_moz gets the value of result number
            if (enginechoice.get(3)) {
                top_visible = results_number;
            } //if merged engine is true, top_visible gets the value of result number
            results_number = 50;//this is the max amount of results that you can get from the search engine APIs
        }
        //-----if we want to use Moz we should check first if it works
        if (mozMetrics.get(0)) {
            Moz Moz = new Moz();
            //---if it works, moz remains true, otherwise it is set to false
            mozMetrics.add(0, Moz.check(config_path));
            //if it is false and we have chosen to use Visibility score with Moz, we reset back to the standard settings (ranking and not merged)
            //therefore, we reset the number of results from 50 to the top_count_moz which contained the original number of results
            if (!mozMetrics.get(0)) {
                if (!enginechoice.get(3)) {
                    results_number = top_count_moz;
                }
            }
        }
        //----------we set the wordLists that we are going to use---------------------
        List<String> finalList = new ArrayList<String>();//finalList is going to contain all the content in the end
        Total_analysis ta = new Total_analysis();//we call total analysis
        int iteration_counter = 0;//the iteration_counter is used in order to count the number of iterations of the algorithm and to be checked with perf_limit
        //this list of arraylists  is going to contain all the wordLists that are produced for every term of the String[] query,
        //in order to calculate the NGD scores between every term of the wordList and the term that was used as query in order to produce the spesific wordList
        List<ArrayList<String>> array_wordLists = new ArrayList<>();
        List<String> wordList_previous = new ArrayList<>();
        List<String> wordList_new = new ArrayList<>();
        double convergence = 0;//we create the convergence percentage and initialize it
        String conv_percentages = "";//string that contains all the convergence percentages
        DataManipulation wordsmanipulation = new DataManipulation();//method to manipulate various word data (String, list<String>, etc)
        do { //if we run the algorithm for the 1st time we already have the query so we skip the loop below that produces the new array of query
            if (iteration_counter != 0) {
                wordList_previous = wordList_new;
                //we add the previous wordList to the finalList
                finalList = wordsmanipulation.AddAList(wordList_previous, finalList);
                List<String> query_new_list_total = new ArrayList<>();
                int iteration_previous = iteration_counter - 1;
                Combinations_Engine cn = new Combinations_Engine();//call the class to combine the terms produced
                for (String query : queries) {
                    List<String> ids = new ArrayList<>();
                    if (enginechoice.get(0)) {
                        String id = domain + "/" + query + "/bing" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    if (enginechoice.get(1)) {
                        String id = domain + "/" + query + "/google" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    if (enginechoice.get(2)) {
                        String id = domain + "/" + query + "/yahoo" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    ElasticGetWordList ESget = new ElasticGetWordList();//we call this class to get the wordlist from the Elastic Search
                    List<String> maxWords = ESget.getMaxWords(ids, SWebRankSettings.get(9).intValue(),
                            config_path);//we are going to get a max amount of words
                    int query_index = queries.indexOf(query);
                    int size_query_new = SWebRankSettings.get(10).intValue();//the amount of new queries we are willing to create
                    //we create the new queries for every query of the previous round by combining the words produced from this query
                    List<String> query_new_list = cn.perform(maxWords, SWebRankSettings.get(7), queries,
                            SWebRankSettings.get(6), query_index, size_query_new, config_path);
                    //we add the list of new queries to the total list that containas all the new queries
                    query_new_list_total.addAll(query_new_list);
                    System.out.println("query pointer=" + query_index + "");
                }
                //---------------------the following cleans a list from null and duplicates
                query_new_list_total = wordsmanipulation.clearListString(query_new_list_total);
                //--------------we create the new directory that our files are going to be saved 
                String txt_directory = FilenameUtils.getBaseName(input.getName());
                output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter
                        + "//";
                //----------------append the wordlist to a file------------------
                wordsmanipulation.AppendWordList(query_new_list_total,
                        output_child_directory + "queries_" + iteration_counter + ".txt");
                if (query_new_list_total.size() < 1) {
                    break;
                } //if we don't create new queries we end the while loop
                //total analysis' function is going to do all the work and return back what we need
                ta = new Total_analysis();
                ta.perform(wordList_previous, iteration_counter, output_child_directory, domain, enginechoice,
                        query_new_list_total, results_number, top_visible, mozMetrics, moz_threshold_option,
                        moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts,
                        SWebRankSettings, config_path);
                //we get the array of wordlists
                array_wordLists = ta.getarray_wordLists();
                //get the wordlist that includes all the new queries
                wordList_new = ta.getwordList_total();
                //---------------------the following cleans a list from null and duplicates-------------
                wordList_new = wordsmanipulation.clearListString(wordList_new);
                //----------------append the wordlist to a file--------------------
                wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt");
                //the concergence percentage of this iteration
                convergence = ta.getConvergence();//we are going to use convergence score to check the convergence
                //a string that contains all the convergence percentage for each round separated by \n character
                conv_percentages = conv_percentages + "\n" + convergence;
                //a file that is going to include the convergence percentages
                wordsmanipulation.AppendString(conv_percentages,
                        output_child_directory + "convergence_percentage.txt");
                //we add the new wordList to the finalList
                finalList = wordsmanipulation.AddAList(wordList_new, finalList);
                //we set the query array to be equal to the query new total that we have created
                queries = query_new_list_total;
                //we increment the iteration_counter in order to count the iterations of the algorithm and to use the perf_limit
                iteration_counter++;
            } else {//the following source code is performed on the 1st run of the loop
                    //------------we extract the parent path of the file 
                String txt_directory = FilenameUtils.getBaseName(input.getName());
                //----------we create a string that is going to be used for the corresponding directory of outputs
                output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter
                        + "//";
                //we call total analysis function performOld
                ta.perform(wordList_new, iteration_counter, output_child_directory, domain, enginechoice,
                        queries, results_number, top_visible, mozMetrics, moz_threshold_option,
                        moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts,
                        SWebRankSettings, config_path);
                //we get the array of wordlists
                array_wordLists = ta.getarray_wordLists();
                //get the wordlist that includes all the new queries
                wordList_new = ta.getwordList_total();
                //---------------------the following cleans a list from null and duplicates
                wordList_new = wordsmanipulation.clearListString(wordList_new);
                //----------------append the wordlist to a file
                wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt");
                //-----------------------------------------
                iteration_counter++;//increase the iteration_counter that counts the iterations of the algorithm
            }
        } while (convergence < SWebRankSettings.get(5).doubleValue()
                && iteration_counter < SWebRankSettings.get(8).intValue());//while the convergence percentage is below the limit and the iteration_counter below the performance limit
        if (iteration_counter == 1) {
            finalList = wordsmanipulation.AddAList(wordList_new, finalList);
        }
        //--------------------content List----------------
        if (!finalList.isEmpty()) {
            //---------------------the following cleans the final list from null and duplicates
            finalList = wordsmanipulation.clearListString(finalList);
            //write the keywords to a file
            boolean flag_file = false;//boolean flag to declare successful write to file
            flag_file = wordsmanipulation.AppendWordList(finalList,
                    output_parent_directory + "total_content.txt");
            if (!flag_file) {
                System.out.print("can not create the content file for: " + output_parent_directory
                        + "total_content.txt");
            }
        }
        //we are going to save the total content with its convergence on the ElasticSearch cluster in a separated index
        //Node node = nodeBuilder().client(true).clusterName("lshrankldacluster").node();
        //Client client = node.client();
        //get the elastic search indexes in a list
        List<String> elasticIndexes = ri.GetKeyFile(config_path, "elasticSearchIndexes");
        Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "lshrankldacluster")
                .build();
        Client client = new TransportClient(settings)
                .addTransportAddress(new InetSocketTransportAddress("localhost", 9300));
        JSONObject objEngineLevel = new JSONObject();
        objEngineLevel.put("TotalContent", finalList);//we save the total content
        objEngineLevel.put("Convergences", conv_percentages);//we save the convergence percentages
        IndexRequest indexReq = new IndexRequest(elasticIndexes.get(0), "content", domain);//we save also the domain 
        indexReq.source(objEngineLevel);
        IndexResponse indexRes = client.index(indexReq).actionGet();
        //node.close();
        client.close();
        //----------------------convergence percentages writing to file---------------
        //use the conv_percentages string
        if (conv_percentages.length() != 0) {
            boolean flag_file = false;//boolean flag to declare successful write to file
            flag_file = wordsmanipulation.AppendString(conv_percentages,
                    output_parent_directory + "convergence_percentages.txt");
            if (!flag_file) {
                System.out.print("can not create the convergence file for: " + output_parent_directory
                        + "convergence_percentages.txt");
            }
        }
    }
}

From source file:de.intranda.goobi.plugins.CSICMixedImport.java

public static void main(String[] args) throws PreferencesException, WriteException {
    CSICMixedImport converter = new CSICMixedImport();
    converter.prefs = new Prefs();

    try {/* www.  j a  v  a 2 s .  co  m*/
        converter.prefs.loadPrefs("/opt/digiverso/goobi/rulesets/rulesetCSIC.xml");
    } catch (PreferencesException e) {
        logger.error(e.getMessage(), e);
    }

    converter.setImportFolder("output/");
    List<Record> records = new ArrayList<Record>();
    if (!converter.exportFolder.isDirectory()) {
        logger.warn("No export directory found. Aborting");
        return;
    }

    // converter.createFileformat(file);
    File file = new File("/mnt/csic/0017_FACN/000009800_9800001020.xml");
    converter.setFile(file);
    records.addAll(converter.generateRecordsFromFile());
    // }

    List<ImportObject> results = converter.generateFiles(records);

    for (ImportObject record : results) {
        System.out.println(record.getProcessTitle() + " \t \t " + record.getImportReturnValue());
    }
}

From source file:fmiquerytest.Coordinates.java

public static void main(String[] args) {
    df_short.setTimeZone(tz);
    df_iso.setTimeZone(tz);/* www  . ja  v  a  2  s.co  m*/
    df_daycode.setTimeZone(tz);
    DecimalFormatSymbols otherSymbols = new DecimalFormatSymbols();
    otherSymbols.setDecimalSeparator('.');
    df_fiveDecimal.setDecimalFormatSymbols(otherSymbols);
    String startTime = df_short.format(new Date(startTimeMillis));
    System.out.println("startTime: " + startTime);

    //Clean up old weather data 
    //**********************************************************************
    FileSystemTools.cleanupOldWeatherData(daysToStoreWeatherData);

    //Google query
    //**********************************************************************
    if (gShare.equals("")) {
        Scanner input = new Scanner(System.in);
        System.out.println("Paste Google Directions Share:");
        gShare = input.nextLine();
    }
    String gQuery = Parser.getQueryFromShare(gShare);
    System.out.println("Google query URL: " + gQuery);

    //Check if we already have this route
    //Valid only if the route option is 0 (default)
    //Because otherwise we cannot be sure we already have the optional route
    List<routeStep> gSteps = new ArrayList<>();
    if (FileSystemTools.isSavedRoute(gQuery) && gRouteOption == 0) {
        System.out.println("Route found from saved list. Loading.");
        gSteps = FileSystemTools.loadSavedRoute(gQuery);
    } else {
        gSteps = Parser.getSteps(gQuery);
        if (gRouteOption == 0) {
            System.out.println("Saving new route to list.");
            FileSystemTools.saveRoute(gQuery, gSteps);
        }
    }

    //Compile route table with current settings
    //**********************************************************************
    List<routeStep> routeData = RouteTools.compileRoute(gSteps, refreshInterval);
    String endTime = df_short.format(new Date(startTimeMillis + routeDur * 1000));
    System.out.println("endTime: " + endTime);
    //Forecast from FMI is only for 48h - warning if we are going over
    //Or is it 54h? http://ilmatieteenlaitos.fi/avoin-data-saaennustedata-hirlam
    if (((startTimeMillis + routeDur * 1000) - System.currentTimeMillis()) / (1000 * 60 * 60) > 48) {
        System.out.println("**************************************************" + newLine + "WARNING:" + newLine
                + "Weather forecast available only for 48 hours" + newLine
                + "**************************************************");
    }

    //Prepare time and file variables
    //**********************************************************************
    String nowAsISO = df_iso.format(new Date());
    System.out.println("Start ISO time: " + nowAsISO);
    double timeMarginal = routeDur * 1.2 + 3600;
    String endTimeForFmi = df_iso.format(new Date(startTimeMillis + (intValue(timeMarginal)) * 1000));
    String endTimeForFile = df_iso.format(new Date(startTimeMillis + (intValue(routeDur + 3600)) * 1000));
    System.out.println("End ISO time:   " + endTimeForFmi);
    String fmiParam = new StringBuilder("&starttime=").append(nowAsISO).append("&endtime=")
            .append(endTimeForFmi).toString();
    File weatherDataFileNameFirst = new File("weather" + nowAsISO.replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
    File weatherDataFileNameLast = new File("weather" + endTimeForFmi.replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
    File weatherDataFileNameStart = new File(
            "weather" + (df_iso.format(new Date(startTimeMillis))).replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
    File weatherDataFileNameEnd = new File("weather" + endTimeForFile.replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
    List<stationData> allStations = new ArrayList<>();
    List<stationData> fmiData = new ArrayList<>();
    List<String> savedFileTimes = new ArrayList<>();

    //**********************************************************************
    //Check if we already have the weather data
    //**********************************************************************
    if (!weatherDataFileNameStart.exists() || !weatherDataFileNameEnd.exists()) {
        //FMI query
        //**********************************************************************
        String fmiCities = new StringBuilder(fmiBase).append(fmiKey).append(fmiMiddle).append(fmiQueryCities)
                .append(fmiParam).toString();
        String fmiObsStations = new StringBuilder(fmiBase).append(fmiKey).append(fmiMiddle)
                .append(fmiQueryObsStations).append(fmiParam).toString();
        //System.out.println("FMI cities URL: "+fmiCities);
        //System.out.println("FMI obsstations URL: "+fmiObsStations);

        //Collect weather data from FMI
        //**********************************************************************
        System.out.print("FMI data:" + newLine + fmiCities + newLine + "Loading and processing...");
        fmiData.addAll(Parser.getStations(fmiCities));
        System.out.println("SUCCESS.");
        System.out.print("FMI data:" + newLine + fmiObsStations + newLine + "Loading and processing...");
        fmiData.addAll(Parser.getStations(fmiObsStations));
        System.out.println("SUCCESS.");

        //Get unique stations
        //**********************************************************************
        List<stationData> uniqueStations = ToolBox.getUniqueStations(fmiData);
        System.out.println("Parsed stations count: " + uniqueStations.size());

        //Save or load stations
        //**********************************************************************
        List<stationData> savedStations = new ArrayList<>();
        if (!stationFileName.exists()) {
            //Save current parsed stations to file
            FileSystemTools.saveObjectToFile(uniqueStations, stationFileName);
        } else {
            //Or if the stations were already saved, load them
            System.out.println("Station information file found: " + stationFileName);
            System.out.print("Loading...");
            savedStations = FileSystemTools.loadStationsFromFile(stationFileName);
            System.out.println("DONE.");
            System.out.println("Loaded stations count: " + savedStations.size());
        }

        //Merge station information
        //**********************************************************************
        System.out.println("Merging station information.");
        savedStations.addAll(uniqueStations);
        allStations = ToolBox.getUniqueStations(savedStations);
        System.out.println("Merged stations count: " + allStations.size());

        //Find names for stations
        //**********************************************************************
        String gMapsGeoCode = "https://maps.googleapis.com/maps/api/geocode/xml?latlng=";
        //for (stationData station : allStations){
        for (int i = 0; i < allStations.size(); i++) {
            if (allStations.get(i).stationName.equals("")) {
                gQuery = new StringBuilder(gMapsGeoCode).append(allStations.get(i).stationLocation.Lat)
                        .append(",").append(allStations.get(i).stationLocation.Lon).append("&key=").append(gKey)
                        .toString();
                System.out.println("Google query URL: " + gQuery);

                allStations.get(i).stationName = Parser.getStationName(gQuery);
            }
        }
        //System.out.println("Station names parsed.");
        Collections.sort(allStations);

        //Print stations and separate them for saving
        //**********************************************************************
        List<stationData> onlyStations = new ArrayList<>();
        //int indeksi = 0;
        List<weatherData> weatherPoint = new ArrayList<>();
        weatherPoint.add(0, new weatherData("", "", ""));
        for (stationData station : allStations) {
            //System.out.format("%-4s%-30s%-10s%-10s%n",
            //                    indeksi,station.stationName,station.stationLocation.Lat,station.stationLocation.Lon);
            //++indeksi;
            onlyStations.add(new stationData(station.stationLocation, station.stationName, weatherPoint));
        }

        //Save station names
        //**********************************************************************
        System.out.println("Saving station names.");
        FileSystemTools.saveObjectToFile(onlyStations, stationFileName);

        //Save weather dataset
        //**********************************************************************
        //Compute file names between start and end
        System.out.println("Saving weather data...");
        long currentTimeAsDouble = System.currentTimeMillis();
        int hoursPassed = intValue(Math.floor(currentTimeAsDouble - startTimeMillis) / 1000 / 60 / 60);
        File weatherDataFileNameTemp = weatherDataFileNameFirst;
        while (!weatherDataFileNameTemp.equals(weatherDataFileNameLast)) {
            String savedFileTime = df_iso.format(new Date(startTimeMillis + ((hoursPassed * 3600) * 1000)));
            savedFileTimes.add(savedFileTime);
            weatherDataFileNameTemp = new File(
                    "weather" + savedFileTime.replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
            //System.out.println("Weather data file: "+weatherDataFileNameTemp);
            //This if we don't actually maybe want
            //if (!weatherDataFileNameTemp.exists()){
            List<stationData> thisHourWeather = FileSystemTools.extractHourOfWeatherData(savedFileTime,
                    fmiData);
            //System.out.println("Saving: "+weatherDataFileNameTemp);
            FileSystemTools.saveObjectToFile(thisHourWeather, weatherDataFileNameTemp);
            //}
            ++hoursPassed;
        }
    }
    //If we have weather data saved, definitely we have the stations also
    //**********************************************************************
    else {
        System.out.println("Loading weather data...");
        File weatherDataFileNameTemp = weatherDataFileNameStart;
        int hoursPassed = 0;
        while (!weatherDataFileNameTemp.equals(weatherDataFileNameEnd)) {
            String savedFileTime = df_iso.format(new Date(startTimeMillis + ((hoursPassed * 3600) * 1000)));
            savedFileTimes.add(savedFileTime);
            weatherDataFileNameTemp = new File(
                    "weather" + savedFileTime.replaceAll("[^A-Za-z0-9 ]", "") + ".txt");
            System.out.println("Weather data file: " + weatherDataFileNameTemp);
            if (weatherDataFileNameTemp.exists()) {
                fmiData.addAll(FileSystemTools.loadStationsFromFile(weatherDataFileNameTemp));
            }
            ++hoursPassed;
        }
        allStations = FileSystemTools.loadStationsFromFile(stationFileName);
        System.out.println("DONE.");
    }

    //Find closest weather stations in route points and extract their data
    //**********************************************************************
    System.out.println("Calculating nearest stations in route points:");
    List<Integer> neededStations = new ArrayList<>();
    for (routeStep step : routeData) {
        distance[] stationDistances = RouteTools.calculateStationDistances(step.StartLocation, allStations);
        System.out.format("%-6s%.5f, %.5f  ", "Step: ", step.StartLocation.Lat, step.StartLocation.Lon);
        for (int i = 0; i < 1; i++) {
            System.out.format("%-9s%-5s%-20s%.5f%n", "Station: ", stationDistances[i].stationNum,
                    allStations.get(stationDistances[i].stationNum).stationName,
                    stationDistances[i].stationDistance);
        }
        neededStations.add(stationDistances[0].stationNum);
    }
    System.out.println("Needed stations: " + neededStations.toString().trim());
    //Remove duplicates from needed stations list
    Set<Integer> uniqueEntries = new HashSet<Integer>(neededStations);
    //Extract weather data from needed stations
    Map routeWeather = Collections.synchronizedMap(new HashMap());
    routeWeather = WeatherTools.extractNeededStations(uniqueEntries, fmiData, allStations);

    //Find what fields we have
    List<String> allParameters = new ArrayList<>();
    for (int i = 0; i < fmiData.size(); ++i) {
        allParameters.add(fmiData.get(i).weatherData.get(0).parameterName);
    }
    Set<String> uniqueParameters = new HashSet<String>(allParameters);
    for (String par : uniqueParameters) {
        for (Integer num : uniqueEntries) {
            for (String time : savedFileTimes) {
                //System.out.format("%-5s%-25s%-35s%s%n",num,time,par,routeWeather.get(num+"-"+time+"-"+par));
            }
        }
    }

    // Build the final data table
    //**********************************************************************
    List<stepWeather> stepDataBase = new ArrayList<>();
    stepDataBase = RouteTools.combineRouteDatabase(routeData, neededStations, allStations);

    //Find sunrise and sunset times during the route
    //**********************************************************************
    List<String> sunEvents = DayLightTime.calculateSunEvents(stepDataBase);
    for (String s : sunEvents) {
        System.out.println(s.replaceAll(",", "."));
    }

    //Make a webpage to show the weather data
    //**********************************************************************
    WeatherTools.makeResultHtml(stepDataBase, allStations, routeWeather, sunEvents);
}

From source file:Main.java

public static <T> void addAll(List<? extends T> source, List<? super T> destination) {
    destination.addAll(source);
}