Example usage for java.util TreeMap firstKey

List of usage examples for java.util TreeMap firstKey

Introduction

In this page you can find the example usage for java.util TreeMap firstKey.

Prototype

public K firstKey() 

Source Link

Usage

From source file:org.biomart.configurator.controller.MartController.java

/**
 * @param fksToBeDropped// www .j a  va  2  s .c om
 * @param dmd
 * @param schema
 * @param catalog
 * @param stepSize
 * @throws SQLException
 * @throws DataModelException
 */
public void synchroniseKeysUsingDMD(final SourceSchema ss, final Collection<ForeignKey> fksToBeDropped,
        final DatabaseMetaData dmd, final String schema, final String catalog)
        throws SQLException, DataModelException {
    Log.debug("Running DMD key synchronisation");
    // Loop through all the tables in the database, which is the same
    // as looping through all the primary keys.
    Log.debug("Finding tables");
    for (final Iterator<Table> i = ss.getTables().iterator(); i.hasNext();) {

        // Obtain the table and its primary key.
        final SourceTable pkTable = (SourceTable) i.next();
        final PrimaryKey pk = pkTable.getPrimaryKey();
        // Skip all tables which have no primary key.
        if (pk == null)
            continue;

        Log.debug("Processing primary key " + pk);

        // Make a list of relations that already exist in this schema,
        // from some previous run. Any relations that are left in this
        // list by the end of the loop for this table no longer exist in
        // the database, and will be dropped.
        final Collection<Relation> relationsToBeDropped = new TreeSet<Relation>(pk.getRelations()); // Tree for
                                                                                                    // order

        // Identify all foreign keys in the database metadata that refer
        // to the current primary key.
        Log.debug("Finding referring foreign keys");
        String searchCatalog = catalog;
        String searchSchema = schema;
        final ResultSet dbTblFKCols = dmd.getExportedKeys(searchCatalog, searchSchema, pkTable.getName());

        // Loop through the results. There will be one result row per
        // column per key, so we need to build up a set of key columns
        // in a map.
        // The map keys represent the column position within a key. Each
        // map value is a list of columns. In essence the map is a 2-D
        // representation of the foreign keys which refer to this PK,
        // with the keys of the map (Y-axis) representing the column
        // position in the FK, and the values of the map (X-axis)
        // representing each individual FK. In all cases, FK columns are
        // assumed to be in the same order as the PK columns. The map is
        // sorted by key column position.
        // An assumption is made that the query will return columns from
        // the FK in the same order as all other FKs, ie. all column 1s
        // will be returned before any 2s, and then all 2s will be
        // returned
        // in the same order as the 1s they are associated with, etc.
        final TreeMap<Short, List<Column>> dbFKs = new TreeMap<Short, List<Column>>();
        while (dbTblFKCols.next()) {
            final String fkTblName = dbTblFKCols.getString("FKTABLE_NAME");
            final String fkColName = dbTblFKCols.getString("FKCOLUMN_NAME");
            final Short fkColSeq = new Short(dbTblFKCols.getShort("KEY_SEQ"));
            if (fkTblName != null && fkTblName.contains("$")) { // exclude ORACLE's temporary tables (unlikely to be
                                                                // found here though)
                continue;
            }

            // Note the column.
            if (!dbFKs.containsKey(fkColSeq))
                dbFKs.put(fkColSeq, new ArrayList<Column>());
            // In some dbs, FKs can be invalid, so we need to check
            // them.
            final Table fkTbl = ss.getTableByName(fkTblName);
            if (fkTbl != null) {
                final Column fkCol = (Column) fkTbl.getColumnByName(fkColName);
                if (fkCol != null)
                    (dbFKs.get(fkColSeq)).add(fkCol);
            }
        }
        dbTblFKCols.close();

        // Sort foreign keys by name (case insensitive)
        for (List<Column> columnList : dbFKs.values()) {
            Collections.sort(columnList);
        }

        // Only construct FKs if we actually found any.
        if (!dbFKs.isEmpty()) {
            // Identify the sequence of the first column, which may be 0
            // or 1, depending on database implementation.
            final int firstColSeq = ((Short) dbFKs.firstKey()).intValue();

            // How many columns are in the PK?
            final int pkColCount = pkTable.getPrimaryKey().getColumns().size();

            // How many FKs do we have?
            final int fkCount = dbFKs.get(dbFKs.firstKey()).size();

            // Loop through the FKs, and construct each one at a time.
            for (int j = 0; j < fkCount; j++) {
                // Set up an array to hold the FK columns.
                final List<Column> candidateFKColumns = new ArrayList<Column>();

                // For each FK column name, look up the actual column in
                // the table.
                for (final Iterator<Map.Entry<Short, List<Column>>> k = dbFKs.entrySet().iterator(); k
                        .hasNext();) {
                    final Map.Entry<Short, List<Column>> entry = k.next();
                    final Short keySeq = (Short) entry.getKey();
                    // Convert the db-specific column index to a
                    // 0-indexed figure for the array of fk columns.
                    final int fkColSeq = keySeq.intValue() - firstColSeq;
                    candidateFKColumns.add((Column) (entry.getValue()).get(j));
                }

                // Create a template foreign key based around the set
                // of candidate columns we found.
                ForeignKey fkObject;
                try {
                    List<Column> columns = new ArrayList<Column>();
                    for (int k = 0; k < candidateFKColumns.size(); k++) {
                        columns.add(candidateFKColumns.get(k));
                    }
                    fkObject = new ForeignKey(columns);
                    // new KeyController(fkObject);
                } catch (final Throwable t) {
                    throw new BioMartError(t);
                }
                final Table fkTable = fkObject.getTable();

                // If any FK already exists on the target table with the
                // same columns in the same order, then reuse it.
                boolean fkAlreadyExists = false;
                for (final Iterator<ForeignKey> f = fkTable.getForeignKeys().iterator(); f.hasNext()
                        && !fkAlreadyExists;) {
                    final ForeignKey candidateFK = f.next();
                    if (candidateFK.equals(fkObject)) {
                        // Found one. Reuse it!
                        fkObject = candidateFK;
                        // Update the status to indicate that the FK is
                        // backed by the database, if previously it was
                        // handmade.
                        if (fkObject.getStatus().equals(ComponentStatus.HANDMADE))
                            fkObject.setStatus(ComponentStatus.INFERRED);
                        // Remove the FK from the list to be dropped
                        // later, as it definitely exists now.
                        fksToBeDropped.remove(candidateFK);
                        // Flag the key as existing.
                        fkAlreadyExists = true;
                    }
                }

                // Has the key been reused, or is it a new one?
                if (!fkAlreadyExists)
                    try {
                        fkTable.getForeignKeys().add(fkObject);
                        // fkTable.getForeignKeys().add(fk);
                    } catch (final Throwable t) {
                        throw new BioMartError(t);
                    }

                // Work out whether the relation from the FK to
                // the PK should be 1:M or 1:1. The rule is that
                // it will be 1:M in all cases except where the
                // FK table has a PK with identical columns to
                // the FK, in which case it is 1:1, as the FK
                // is unique.
                Cardinality card = Cardinality.MANY_A;
                final PrimaryKey fkPK = fkTable.getPrimaryKey();
                if (fkPK != null && fkObject.getColumns().equals(fkPK.getColumns()))
                    card = Cardinality.ONE;

                // Check to see if it already has a relation.
                boolean relationExists = false;
                for (final Iterator<Relation> f = fkObject.getRelations().iterator(); f.hasNext();) {
                    // Obtain the next relation.
                    final Relation candidateRel = f.next();

                    // a) a relation already exists between the FK
                    // and the PK.
                    if (candidateRel.getOtherKey(fkObject).equals(pk)) {
                        // If cardinality matches, make it
                        // inferred. If doesn't match, make it
                        // modified and update original cardinality.
                        try {
                            if (card.equals(candidateRel.getCardinality())) {
                                if (!candidateRel.getStatus().equals(ComponentStatus.INFERRED_INCORRECT))
                                    candidateRel.setStatus(ComponentStatus.INFERRED);
                            } else {
                                if (!candidateRel.getStatus().equals(ComponentStatus.INFERRED_INCORRECT))
                                    candidateRel.setStatus(ComponentStatus.MODIFIED);
                                candidateRel.setOriginalCardinality(card);
                            }
                        } catch (final AssociationException ae) {
                            throw new BioMartError(ae);
                        }
                        // Don't drop it at the end of the loop.
                        relationsToBeDropped.remove(candidateRel);
                        // Say we've found it.
                        relationExists = true;
                    }

                    // b) a handmade relation exists elsewhere which
                    // should not be dropped. All other relations
                    // elsewhere will be dropped.
                    else if (candidateRel.getStatus().equals(ComponentStatus.HANDMADE))
                        // Don't drop it at the end of the loop.
                        relationsToBeDropped.remove(candidateRel);
                }

                // If relation did not already exist, create it.
                if (!relationExists && !pk.equals(fkObject)) {
                    // Establish the relation.
                    try {
                        new RelationSource(pk, fkObject, card);
                        // pk.getObject().addRelation(relation);
                        // fk.getObject().addRelation(relation);
                    } catch (final Throwable t) {
                        throw new BioMartError(t);
                    }
                }
            }
        }

        // Remove any relations that we didn't find in the database (but
        // leave the handmade ones behind).
        for (final Iterator<Relation> j = relationsToBeDropped.iterator(); j.hasNext();) {
            final Relation r = j.next();
            if (r.getStatus().equals(ComponentStatus.HANDMADE))
                continue;
            r.getFirstKey().removeRelation(r);
            r.getSecondKey().removeRelation(r);
        }
    }
}

From source file:org.apache.pdfbox.pdfparser.NonSequentialPDFParser.java

/**
 * Will parse every object necessary to load a single page from the pdf document.
 * We try our best to order objects according to offset in file before reading
 * to minimize seek operations.// w w  w  . j  a  v  a 2  s .co m
 * 
 * @param dict the COSObject from the parent pages.
 * @param excludeObjects dictionary object reference entries with these names will not be parsed
 * 
 * @throws IOException
 */
private void parseDictObjects(COSDictionary dict, COSName... excludeObjects) throws IOException {
    // ---- create queue for objects waiting for further parsing
    final Queue<COSBase> toBeParsedList = new LinkedList<COSBase>();
    // offset ordered object map
    final TreeMap<Long, List<COSObject>> objToBeParsed = new TreeMap<Long, List<COSObject>>();
    // in case of compressed objects offset points to stmObj
    final Set<Long> parsedObjects = new HashSet<Long>();
    final Set<Long> addedObjects = new HashSet<Long>();

    // ---- add objects not to be parsed to list of already parsed objects
    if (excludeObjects != null) {
        for (COSName objName : excludeObjects) {
            COSBase baseObj = dict.getItem(objName);
            if (baseObj instanceof COSObject) {
                parsedObjects.add(getObjectId((COSObject) baseObj));
            }
        }
    }

    addNewToList(toBeParsedList, dict.getValues(), addedObjects);

    // ---- go through objects to be parsed
    while (!(toBeParsedList.isEmpty() && objToBeParsed.isEmpty())) {
        // -- first get all COSObject from other kind of objects and
        //    put them in objToBeParsed; afterwards toBeParsedList is empty
        COSBase baseObj;
        while ((baseObj = toBeParsedList.poll()) != null) {
            if (baseObj instanceof COSStream) {
                addNewToList(toBeParsedList, ((COSStream) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSDictionary) {
                addNewToList(toBeParsedList, ((COSDictionary) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSArray) {
                final Iterator<COSBase> arrIter = ((COSArray) baseObj).iterator();
                while (arrIter.hasNext()) {
                    addNewToList(toBeParsedList, arrIter.next(), addedObjects);
                }
            } else if (baseObj instanceof COSObject) {
                COSObject obj = (COSObject) baseObj;
                long objId = getObjectId(obj);
                COSObjectKey objKey = new COSObjectKey(obj.getObjectNumber().intValue(),
                        obj.getGenerationNumber().intValue());

                if (!(parsedObjects.contains(objId) /*|| document.hasObjectInPool( objKey ) */ )) {
                    Long fileOffset = xrefTrailerResolver.getXrefTable().get(objKey);
                    //  it is allowed that object references point to null, thus we have to test
                    if (fileOffset != null) {
                        if (fileOffset > 0) {
                            objToBeParsed.put(fileOffset, Collections.singletonList(obj));
                        } else {
                            // negative offset means we have a compressed object within object stream;
                            // get offset of object stream
                            fileOffset = xrefTrailerResolver.getXrefTable()
                                    .get(new COSObjectKey(-fileOffset, 0));
                            if ((fileOffset == null) || (fileOffset <= 0)) {
                                throw new IOException(
                                        "Invalid object stream xref object reference: " + fileOffset);
                            }

                            List<COSObject> stmObjects = objToBeParsed.get(fileOffset);
                            if (stmObjects == null) {
                                objToBeParsed.put(fileOffset, stmObjects = new ArrayList<COSObject>());
                            }
                            stmObjects.add(obj);
                        }
                    } else {
                        // NULL object
                        COSObject pdfObject = document.getObjectFromPool(objKey);
                        pdfObject.setObject(COSNull.NULL);
                    }
                }
            }
        }

        // ---- read first COSObject with smallest offset;
        //      resulting object will be added to toBeParsedList
        if (objToBeParsed.isEmpty()) {
            break;
        }

        for (COSObject obj : objToBeParsed.remove(objToBeParsed.firstKey())) {
            COSBase parsedObj = parseObjectDynamically(obj, false);

            obj.setObject(parsedObj);
            addNewToList(toBeParsedList, parsedObj, addedObjects);

            parsedObjects.add(getObjectId(obj));
        }
    }
}

From source file:org.apache.pdfbox.pdfparser.COSParser.java

/**
 * Will parse every object necessary to load a single page from the pdf document. We try our
 * best to order objects according to offset in file before reading to minimize seek operations.
 *
 * @param dict the COSObject from the parent pages.
 * @param excludeObjects dictionary object reference entries with these names will not be parsed
 *
 * @throws IOException if something went wrong
 *///from   w  w  w .j  a  va2  s. c o m
protected void parseDictObjects(COSDictionary dict, COSName... excludeObjects) throws IOException {
    // ---- create queue for objects waiting for further parsing
    final Queue<COSBase> toBeParsedList = new LinkedList<COSBase>();
    // offset ordered object map
    final TreeMap<Long, List<COSObject>> objToBeParsed = new TreeMap<Long, List<COSObject>>();
    // in case of compressed objects offset points to stmObj
    final Set<Long> parsedObjects = new HashSet<Long>();
    final Set<Long> addedObjects = new HashSet<Long>();

    addExcludedToList(excludeObjects, dict, parsedObjects);
    addNewToList(toBeParsedList, dict.getValues(), addedObjects);

    // ---- go through objects to be parsed
    while (!(toBeParsedList.isEmpty() && objToBeParsed.isEmpty())) {
        // -- first get all COSObject from other kind of objects and
        // put them in objToBeParsed; afterwards toBeParsedList is empty
        COSBase baseObj;
        while ((baseObj = toBeParsedList.poll()) != null) {
            if (baseObj instanceof COSDictionary) {
                addNewToList(toBeParsedList, ((COSDictionary) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSArray) {
                final Iterator<COSBase> arrIter = ((COSArray) baseObj).iterator();
                while (arrIter.hasNext()) {
                    addNewToList(toBeParsedList, arrIter.next(), addedObjects);
                }
            } else if (baseObj instanceof COSObject) {
                COSObject obj = (COSObject) baseObj;
                long objId = getObjectId(obj);
                COSObjectKey objKey = new COSObjectKey(obj.getObjectNumber(), obj.getGenerationNumber());

                if (!parsedObjects.contains(objId)) {
                    Long fileOffset = xrefTrailerResolver.getXrefTable().get(objKey);
                    // it is allowed that object references point to null,
                    // thus we have to test
                    if (fileOffset != null && fileOffset != 0) {
                        if (fileOffset > 0) {
                            objToBeParsed.put(fileOffset, Collections.singletonList(obj));
                        } else {
                            // negative offset means we have a compressed
                            // object within object stream;
                            // get offset of object stream
                            fileOffset = xrefTrailerResolver.getXrefTable()
                                    .get(new COSObjectKey((int) -fileOffset, 0));
                            if ((fileOffset == null) || (fileOffset <= 0)) {
                                throw new IOException("Invalid object stream xref object reference for key '"
                                        + objKey + "': " + fileOffset);
                            }

                            List<COSObject> stmObjects = objToBeParsed.get(fileOffset);
                            if (stmObjects == null) {
                                stmObjects = new ArrayList<COSObject>();
                                objToBeParsed.put(fileOffset, stmObjects);
                            }
                            stmObjects.add(obj);
                        }
                    } else {
                        // NULL object
                        COSObject pdfObject = document.getObjectFromPool(objKey);
                        pdfObject.setObject(COSNull.NULL);
                    }
                }
            }
        }

        // ---- read first COSObject with smallest offset
        // resulting object will be added to toBeParsedList
        if (objToBeParsed.isEmpty()) {
            break;
        }

        for (COSObject obj : objToBeParsed.remove(objToBeParsed.firstKey())) {
            COSBase parsedObj = parseObjectDynamically(obj, false);

            obj.setObject(parsedObj);
            addNewToList(toBeParsedList, parsedObj, addedObjects);

            parsedObjects.add(getObjectId(obj));
        }
    }
}

From source file:snpviewer.SnpViewer.java

public void writeRegionToFile(final String chromosome, final double start, final double end) {
    /* get coordinates of selection and report back
     * write SNPs in region to file/*from   w w  w.  j  a va2s.  c  o m*/
     */
    FileChooser fileChooser = new FileChooser();
    FileChooser.ExtensionFilter extFilter = new FileChooser.ExtensionFilter("Excel  (*.xlsx)", "*.xlsx");
    fileChooser.getExtensionFilters().add(extFilter);
    fileChooser.setTitle("Write region to Excel file (.xlsx)...");
    File rFile = fileChooser.showSaveDialog(mainWindow);
    if (rFile == null) {
        return;
    } else if (!rFile.getName().endsWith(".xlsx")) {
        rFile = new File(rFile.getAbsolutePath() + ".xlsx");
    }
    final File regionFile = rFile;
    final Task<Boolean> writeTask = new Task() {
        @Override
        protected Boolean call() throws Exception {
            try {

                updateProgress(-1, -1);
                ArrayList<SnpFile> bothFiles = new ArrayList<>();
                bothFiles.addAll(affFiles);
                bothFiles.addAll(unFiles);
                TreeMap<Integer, HashMap<String, String>> coordMap = new TreeMap();
                /*coordmap - key is position, key of hashmap 
                 * is input filename and value call
                 */
                HashMap<Integer, String> coordToId = new HashMap<>();
                double progress = 0;
                double total = bothFiles.size() * 5;
                try {
                    BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(regionFile));
                    Workbook wb = new XSSFWorkbook();
                    Sheet sheet = wb.createSheet();
                    int rowNo = 0;
                    Row row = sheet.createRow(rowNo++);
                    for (SnpFile f : bothFiles) {
                        if (isCancelled()) {
                            return false;
                        }
                        updateProgress(++progress, total);
                        updateMessage("Reading region in " + f.inputFile.getName());
                        List<SnpFile.SnpLine> lines = f.getSnpsInRegion(chromosome, (int) start, (int) end);
                        for (SnpFile.SnpLine snpLine : lines) {
                            if (isCancelled()) {
                                return false;
                            }
                            Integer coord = snpLine.getPosition();
                            if (!coordMap.containsKey(coord)) {
                                coordMap.put(coord, new HashMap<String, String>());
                            }
                            String filename = f.inputFile.getName();
                            String rsId = snpLine.getId();
                            String call = snpLine.getCall();
                            coordMap.get(coord).put(filename, call);
                            coordToId.put(coord, rsId);
                        }
                    }
                    Cell cell = row.createCell(0);
                    cell.setCellValue(
                            "chr" + chromosome + ":" + coordMap.firstKey() + "-" + coordMap.lastKey());
                    row = sheet.createRow(rowNo++);
                    cell = row.createCell(0);
                    cell.setCellValue(
                            coordToId.get(coordMap.firstKey()) + ";" + coordToId.get(coordMap.lastKey()));
                    row = sheet.createRow(rowNo++);
                    int colNo = 0;
                    cell = row.createCell(colNo++);
                    cell.setCellValue("Position");
                    cell = row.createCell(colNo++);
                    cell.setCellValue("rsID");
                    for (SnpFile f : bothFiles) {
                        cell = row.createCell(colNo++);
                        if (f.getSampleName() != null && f.getSampleName().length() > 0) {
                            cell.setCellValue(f.getSampleName());
                        } else {
                            cell.setCellValue(f.getInputFileName());
                        }
                    }
                    progress = coordMap.size();
                    total = 5 * coordMap.size();
                    updateMessage("Writing region to file...");
                    for (Entry current : coordMap.entrySet()) {
                        if (isCancelled()) {
                            return false;
                        }
                        progress += 4;
                        updateProgress(progress, total);
                        row = sheet.createRow(rowNo++);
                        colNo = 0;
                        Integer coord = (Integer) current.getKey();
                        cell = row.createCell(colNo++);
                        cell.setCellValue(coord);
                        String rsId = coordToId.get(coord);
                        cell = row.createCell(colNo++);
                        cell.setCellValue(rsId);
                        HashMap<String, String> fileToCall = (HashMap<String, String>) current.getValue();
                        for (SnpFile f : bothFiles) {
                            cell = row.createCell(colNo++);
                            if (fileToCall.containsKey(f.inputFile.getName())) {
                                cell.setCellValue(fileToCall.get(f.inputFile.getName()));
                            } else {
                                cell.setCellValue("-");
                            }
                        }
                    }
                    CellRangeAddress[] regions = { new CellRangeAddress(0, rowNo, 2, 2 + bothFiles.size()) };
                    SheetConditionalFormatting sheetCF = sheet.getSheetConditionalFormatting();

                    ConditionalFormattingRule rule1 = sheetCF
                            .createConditionalFormattingRule(ComparisonOperator.EQUAL, "\"AA\"");
                    PatternFormatting fill1 = rule1.createPatternFormatting();
                    fill1.setFillBackgroundColor(IndexedColors.LIGHT_GREEN.index);
                    fill1.setFillPattern(PatternFormatting.SOLID_FOREGROUND);
                    ConditionalFormattingRule rule2 = sheetCF
                            .createConditionalFormattingRule(ComparisonOperator.EQUAL, "\"BB\"");
                    PatternFormatting fill2 = rule2.createPatternFormatting();
                    fill2.setFillBackgroundColor(IndexedColors.PALE_BLUE.index);
                    fill2.setFillPattern(PatternFormatting.SOLID_FOREGROUND);
                    ConditionalFormattingRule rule3 = sheetCF
                            .createConditionalFormattingRule(ComparisonOperator.EQUAL, "\"AB\"");
                    PatternFormatting fill3 = rule3.createPatternFormatting();
                    fill3.setFillBackgroundColor(IndexedColors.ROSE.index);
                    fill3.setFillPattern(PatternFormatting.SOLID_FOREGROUND);
                    sheetCF.addConditionalFormatting(regions, rule3, rule2);
                    sheetCF.addConditionalFormatting(regions, rule1);
                    wb.write(out);
                    out.close();
                    return true;
                } catch (IOException ex) {
                    return false;
                }
            } catch (Exception ex) {
                return false;
            }
        }
    };//end of task

    setProgressMode(true);
    progressBar.progressProperty().bind(writeTask.progressProperty());
    progressMessage.textProperty().bind(writeTask.messageProperty());
    writeTask.setOnSucceeded(new EventHandler<WorkerStateEvent>() {
        @Override
        public void handle(WorkerStateEvent e) {
            if (e.getSource().getValue() == true) {
                Dialogs.showInformationDialog(null,
                        "Region written to file " + "(" + regionFile.getName() + ") successfully",
                        "Region Written", "SNP Viewer");
            } else {
                Dialogs.showErrorDialog(null, "Region write failed.", "Write Failed", "SNP Viewer");
            }
            setProgressMode(false);
            progressBar.progressProperty().unbind();
            progressBar.progressProperty().set(0);
            progressMessage.textProperty().unbind();
            progressMessage.setText("");
            progressTitle.setText("");

        }

    });
    writeTask.setOnFailed(new EventHandler<WorkerStateEvent>() {
        @Override
        public void handle(WorkerStateEvent e) {
            setProgressMode(false);
            progressBar.progressProperty().unbind();
            progressBar.progressProperty().set(0);
            progressMessage.textProperty().unbind();
            progressMessage.setText("");
            progressTitle.setText("Region write failed!");
            Dialogs.showErrorDialog(null, "Error writing region to file\n", "Region write error", "SNP Viewer",
                    e.getSource().getException());

        }

    });
    writeTask.setOnCancelled(new EventHandler<WorkerStateEvent>() {
        @Override
        public void handle(WorkerStateEvent e) {
            progressMessage.setText("Region write cancelled");
            progressTitle.setText("Cancelled");
            setProgressMode(false);
            progressBar.progressProperty().unbind();
            progressBar.progressProperty().set(0);
            Dialogs.showErrorDialog(null, "Error writing region to file\n", "Region write error", "SNP Viewer");
        }

    });
    cancelButton.setOnAction(new EventHandler<ActionEvent>() {
        @Override
        public void handle(ActionEvent actionEvent) {
            writeTask.cancel();

        }
    });
    progressTitle.setText("Writing region to .xlsx file");
    new Thread(writeTask).start();
}

From source file:nl.rivm.cib.episim.model.disease.infection.MSEIRSPlot.java

@Override
public void start(final Stage stage) {
    final SIRConfig conf = ConfigFactory.create(SIRConfig.class);
    final double[] t = conf.t();
    final long[] pop = conf.population();
    final double n0 = Arrays.stream(pop).sum();
    final String[] colors = conf.colors(), colors2 = conf.colors2();

    final Pane plot = new Pane();
    plot.setPrefSize(400, 300);// w  ww. j a v  a  2s. com
    plot.setMinSize(50, 50);

    final NumberAxis xAxis = new NumberAxis(t[0], t[1], (t[1] - t[0]) / 10);
    final NumberAxis yAxis = new NumberAxis(0, n0, n0 / 10);
    final Pane axes = new Pane();
    axes.prefHeightProperty().bind(plot.heightProperty());
    axes.prefWidthProperty().bind(plot.widthProperty());

    xAxis.setSide(Side.BOTTOM);
    xAxis.setMinorTickVisible(false);
    xAxis.setPrefWidth(axes.getPrefWidth());
    xAxis.prefWidthProperty().bind(axes.widthProperty());
    xAxis.layoutYProperty().bind(axes.heightProperty());

    yAxis.setSide(Side.LEFT);
    yAxis.setMinorTickVisible(false);
    yAxis.setPrefHeight(axes.getPrefHeight());
    yAxis.prefHeightProperty().bind(axes.heightProperty());
    yAxis.layoutXProperty().bind(Bindings.subtract(1, yAxis.widthProperty()));
    axes.getChildren().setAll(xAxis, yAxis);

    final Label lbl = new Label(String.format("R0=%.1f, recovery=%.1ft\nSIR(0)=%s", conf.reproduction(),
            conf.recovery(), Arrays.toString(pop)));
    lbl.setTextAlignment(TextAlignment.CENTER);
    lbl.setTextFill(Color.WHITE);

    final Path[] deterministic = { new Path(), new Path(), new Path() };
    IntStream.range(0, pop.length).forEach(i -> {
        final Color color = Color.valueOf(colors[i]);
        final Path path = deterministic[i];
        path.setStroke(color.deriveColor(0, 1, 1, 0.6));
        path.setStrokeWidth(2);
        path.setClip(new Rectangle(0, 0, plot.getPrefWidth(), plot.getPrefHeight()));
    });

    plot.getChildren().setAll(axes);

    // fill paths with integration estimates
    final double xl = xAxis.getLowerBound(), sx = plot.getPrefWidth() / (xAxis.getUpperBound() - xl),
            yh = plot.getPrefHeight(), sy = yh / (yAxis.getUpperBound() - yAxis.getLowerBound());
    final TreeMap<Double, Integer> iDeterministic = new TreeMap<>();

    MSEIRSTest.deterministic(conf, () -> new DormandPrince853Integrator(1.0E-8, 10, 1.0E-20, 1.0E-20))
            .subscribe(yt -> {
                iDeterministic.put(yt.getKey(), deterministic[0].getElements().size());
                final double[] y = yt.getValue();
                final double x = (yt.getKey() - xl) * sx;
                for (int i = 0; i < y.length; i++) {
                    final double yi = yh - y[i] * sy;
                    final PathElement di = deterministic[i].getElements().isEmpty() ? new MoveTo(x, yi)
                            : new LineTo(x, yi);
                    deterministic[i].getElements().add(di);
                }
            }, e -> LOG.error("Problem", e), () -> plot.getChildren().addAll(deterministic));

    final Path[] stochasticTau = { new Path(), new Path(), new Path() };
    IntStream.range(0, pop.length).forEach(i -> {
        final Color color = Color.valueOf(colors[i]);
        final Path path = stochasticTau[i];
        path.setStroke(color);
        path.setStrokeWidth(1);
        path.setClip(new Rectangle(0, 0, plot.getPrefWidth(), plot.getPrefHeight()));
    });

    final TreeMap<Double, Integer> iStochasticTau = new TreeMap<>();
    MSEIRSTest.stochasticGillespie(conf).subscribe(yt -> {
        final double x = (yt.getKey() - xl) * sx;
        iStochasticTau.put(yt.getKey(), stochasticTau[0].getElements().size());
        final long[] y = yt.getValue();
        for (int i = 0; i < y.length; i++) {
            final double yi = yh - y[i] * sy;
            final ObservableList<PathElement> path = stochasticTau[i].getElements();
            if (path.isEmpty()) {
                path.add(new MoveTo(x, yi)); // first
            } else {
                final PathElement last = path.get(path.size() - 1);
                final double y_prev = last instanceof MoveTo ? ((MoveTo) last).getY() : ((LineTo) last).getY();
                path.add(new LineTo(x, y_prev));
                path.add(new LineTo(x, yi));
            }
        }
    }, e -> LOG.error("Problem", e), () -> plot.getChildren().addAll(stochasticTau));

    final Path[] stochasticRes = { new Path(), new Path(), new Path() };
    IntStream.range(0, pop.length).forEach(i -> {
        final Color color = Color.valueOf(colors2[i]);
        final Path path = stochasticRes[i];
        path.setStroke(color);
        path.setStrokeWidth(1);
        path.setClip(new Rectangle(0, 0, plot.getPrefWidth(), plot.getPrefHeight()));
    });

    final TreeMap<Double, Integer> iStochasticRes = new TreeMap<>();
    MSEIRSTest.stochasticSellke(conf).subscribe(yt -> {
        final double x = (yt.getKey() - xl) * sx;
        iStochasticRes.put(yt.getKey(), stochasticRes[0].getElements().size());
        final long[] y = yt.getValue();
        for (int i = 0; i < y.length; i++) {
            final double yi = yh - y[i] * sy;
            final ObservableList<PathElement> path = stochasticRes[i].getElements();
            if (path.isEmpty()) {
                path.add(new MoveTo(x, yi)); // first
            } else {
                final PathElement last = path.get(path.size() - 1);
                final double y_prev = last instanceof MoveTo ? ((MoveTo) last).getY() : ((LineTo) last).getY();
                path.add(new LineTo(x, y_prev));
                path.add(new LineTo(x, yi));
            }
        }
    }, e -> LOG.error("Problem", e), () -> plot.getChildren().addAll(stochasticRes));

    // auto-scale on stage/plot resize 
    // FIXME scaling around wrong origin, use ScatterChart?
    //         xAxis.widthProperty()
    //               .addListener( (ChangeListener<Number>) ( observable,
    //                  oldValue, newValue ) ->
    //               {
    //                  final double scale = ((Double) newValue)
    //                        / plot.getPrefWidth();
    //                  plot.getChildren().filtered( n -> n instanceof Path )
    //                        .forEach( n ->
    //                        {
    //                           final Path path = (Path) n;
    //                           path.setScaleX( scale );
    //                           path.setTranslateX( (path
    //                                 .getBoundsInParent().getWidth()
    //                                 - path.getLayoutBounds().getWidth())
    //                                 / 2 );
    //                        } );
    //               } );
    //         plot.heightProperty()
    //               .addListener( (ChangeListener<Number>) ( observable,
    //                  oldValue, newValue ) ->
    //               {
    //                  final double scale = ((Double) newValue)
    //                        / plot.getPrefHeight();
    //                  plot.getChildren().filtered( n -> n instanceof Path )
    //                        .forEach( n ->
    //                        {
    //                           final Path path = (Path) n;
    //                           path.setScaleY( scale );
    //                           path.setTranslateY(
    //                                 (path.getBoundsInParent()
    //                                       .getHeight() * (scale - 1))
    //                                       / 2 );
    //                        } );
    //               } );

    final StackPane layout = new StackPane(lbl, plot);
    layout.setAlignment(Pos.TOP_CENTER);
    layout.setPadding(new Insets(50));
    layout.setStyle("-fx-background-color: rgb(35, 39, 50);");

    final Line vertiCross = new Line();
    vertiCross.setStroke(Color.SILVER);
    vertiCross.setStrokeWidth(1);
    vertiCross.setVisible(false);
    axes.getChildren().add(vertiCross);

    final Tooltip tip = new Tooltip("");
    tip.setAutoHide(false);
    tip.hide();
    axes.setOnMouseExited(ev -> tip.hide());
    axes.setOnMouseMoved(ev -> {
        final Double x = (Double) xAxis.getValueForDisplay(ev.getX());
        if (x > xAxis.getUpperBound() || x < xAxis.getLowerBound()) {
            tip.hide();
            vertiCross.setVisible(false);
            return;
        }
        final Double y = (Double) yAxis.getValueForDisplay(ev.getY());
        if (y > yAxis.getUpperBound() || y < yAxis.getLowerBound()) {
            tip.hide();
            vertiCross.setVisible(false);
            return;
        }
        final double xs = xAxis.getDisplayPosition(x);
        vertiCross.setStartX(xs);
        vertiCross.setStartY(yAxis.getDisplayPosition(0));
        vertiCross.setEndX(xs);
        vertiCross.setEndY(yAxis.getDisplayPosition(yAxis.getUpperBound()));
        vertiCross.setVisible(true);
        final int i = (iDeterministic.firstKey() > x ? iDeterministic.firstEntry()
                : iDeterministic.floorEntry(x)).getValue();
        final Object[] yi = Arrays.stream(deterministic).mapToDouble(p -> getY(p, i))
                .mapToObj(yAxis::getValueForDisplay).map(n -> DecimalUtil.toScale(n, 1)).toArray();
        final int j = (iStochasticTau.firstKey() > x ? iStochasticTau.firstEntry()
                : iStochasticTau.floorEntry(x)).getValue();
        final Object[] yj = Arrays.stream(stochasticTau).mapToDouble(p -> getY(p, j))
                .mapToObj(yAxis::getValueForDisplay).map(n -> DecimalUtil.toScale(n, 0)).toArray();
        final int k = (iStochasticRes.firstKey() > x ? iStochasticRes.firstEntry()
                : iStochasticRes.floorEntry(x)).getValue();
        final Object[] yk = Arrays.stream(stochasticRes).mapToDouble(p -> getY(p, k))
                .mapToObj(yAxis::getValueForDisplay).map(n -> DecimalUtil.toScale(n, 0)).toArray();
        final String txt = String.format("SIR(t=%.1f)\n" + "~det%s\n" + "~tau%s\n" + "~res%s", x,
                Arrays.toString(yi), Arrays.toString(yj), Arrays.toString(yk));

        tip.setText(txt);
        tip.show(axes, ev.getScreenX() - ev.getSceneX() + xs, ev.getScreenY() + 15);
    });

    try {
        stage.getIcons().add(new Image(FileUtil.toInputStream("icon.jpg")));
    } catch (final IOException e) {
        LOG.error("Problem", e);
    }
    stage.setTitle("Deterministic vs. Stochastic");
    stage.setScene(new Scene(layout, Color.rgb(35, 39, 50)));
    //         stage.setOnHidden( ev -> tip.hide() );
    stage.show();
}

From source file:io.warp10.continuum.gts.GTSHelper.java

public static List<GeoTimeSerie> chunk(GeoTimeSerie gts, long lastchunk, long chunkwidth, long chunkcount,
        String chunklabel, boolean keepempty, long overlap) throws WarpScriptException {

    if (overlap < 0 || overlap > chunkwidth) {
        throw new WarpScriptException("Overlap cannot exceed chunk width.");
    }//from   ww  w  . jav  a  2s  . co m

    //
    // Check if 'chunklabel' exists in the GTS labels
    //

    Metadata metadata = gts.getMetadata();

    if (metadata.getLabels().containsKey(chunklabel)) {
        throw new WarpScriptException(
                "Cannot operate on Geo Time Series which already have a label named '" + chunklabel + "'");
    }

    TreeMap<Long, GeoTimeSerie> chunks = new TreeMap<Long, GeoTimeSerie>();

    //
    // If GTS is bucketized, make sure bucketspan is less than boxwidth
    //

    boolean bucketized = GTSHelper.isBucketized(gts);

    if (bucketized) {
        if (gts.bucketspan > chunkwidth) {
            throw new WarpScriptException(
                    "Cannot operate on Geo Time Series with a bucketspan greater than the chunk width.");
        }
    } else {
        // GTS is not bucketized and has 0 values, if lastchunk was 0, return an empty list as we
        // are unable to produce chunks
        if (0 == gts.values && 0L == lastchunk) {
            return new ArrayList<GeoTimeSerie>();
        }
    }

    //
    // Set chunkcount to Integer.MAX_VALUE if it's 0
    //

    boolean zeroChunkCount = false;

    if (0 == chunkcount) {
        chunkcount = Integer.MAX_VALUE;
        zeroChunkCount = true;
    }

    //
    // Sort timestamps in reverse order so we can produce all chunks in O(n)
    //

    GTSHelper.sort(gts, true);

    //
    // Loop on the chunks
    //

    // Index in the timestamp array
    int idx = 0;

    long bucketspan = gts.bucketspan;
    int bucketcount = gts.bucketcount;
    long lastbucket = gts.lastbucket;

    //
    // If lastchunk is 0, use lastbucket or the most recent tick
    //

    if (0 == lastchunk) {
        if (isBucketized(gts)) {
            lastchunk = lastbucket;
        } else {
            // Use the most recent tick
            lastchunk = gts.ticks[0];
            // Make sure lastchunk is aligned on 'chunkwidth' boundary
            if (0 != (lastchunk % chunkwidth)) {
                lastchunk = lastchunk - (lastchunk % chunkwidth) + chunkwidth;
            }
        }
    }

    for (long i = 0; i < chunkcount; i++) {

        // If we have no more values and were not specified a chunk count, exit the loop, we're done
        if (idx >= gts.values && zeroChunkCount) {
            break;
        }

        // Compute chunk bounds
        long chunkend = lastchunk - i * chunkwidth;
        long chunkstart = chunkend - chunkwidth + 1;

        GeoTimeSerie chunkgts = new GeoTimeSerie(lastbucket, bucketcount, bucketspan, 16);

        // Set metadata for the GTS
        chunkgts.setMetadata(metadata);
        // Add 'chunklabel'
        chunkgts.getMetadata().putToLabels(chunklabel, Long.toString(chunkend));

        if (bucketized) {
            // Chunk is outside the GTS, it will be empty 
            if (lastbucket < chunkstart || chunkend <= lastbucket - (bucketcount * bucketspan)) {
                // Add the (empty) chunk if keepempty is true
                if (keepempty || overlap > 0) {
                    chunks.put(chunkend, chunkgts);
                }
                continue;
            }

            // Set the bucketized parameters in the GTS

            // If bucketspan does not divide chunkwidth, chunks won't be bucketized

            if (0 == chunkwidth % bucketspan) {
                chunkgts.bucketspan = bucketspan;
                chunkgts.lastbucket = chunkend;
                chunkgts.bucketcount = (int) ((chunkend - chunkstart + 1) / bucketspan);
            } else {
                chunkgts.bucketspan = 0L;
                chunkgts.lastbucket = 0L;
                chunkgts.bucketspan = 0;
            }
        }

        //
        // Add the datapoints which fall within the current chunk
        //

        // Advance until the current tick is before 'chunkend'       
        while (idx < gts.values && gts.ticks[idx] > chunkend) {
            idx++;
        }

        // We've exhausted the values
        if (idx >= gts.values) {
            // only add chunk if it's not empty or empty with 'keepempty' set to true
            if (0 != chunkgts.values || (keepempty || overlap > 0)) {
                chunks.put(chunkend, chunkgts);
            }
            continue;
        }

        // The current tick is before the beginning of the current chunk
        if (gts.ticks[idx] < chunkstart) {
            // only add chunk if it's not empty or empty with 'keepempty' set to true
            if (0 != chunkgts.values || (keepempty || overlap > 0)) {
                chunks.put(chunkend, chunkgts);
            }
            continue;
        }

        while (idx < gts.values && gts.ticks[idx] >= chunkstart) {
            GTSHelper.setValue(chunkgts, GTSHelper.tickAtIndex(gts, idx), GTSHelper.locationAtIndex(gts, idx),
                    GTSHelper.elevationAtIndex(gts, idx), GTSHelper.valueAtIndex(gts, idx), false);
            idx++;
        }

        // only add chunk if it's not empty or empty with 'keepempty' set to true
        if (0 != chunkgts.values || (keepempty || overlap > 0)) {
            chunks.put(chunkend, chunkgts);
        }
    }

    //
    // Handle overlapping is need be.
    // We need to iterate over all ticks and add datapoints to each GTS they belong to
    //

    if (overlap > 0) {

        //
        // Check if we need to add a first and a last chunk
        //

        long ts = GTSHelper.tickAtIndex(gts, 0);

        if (ts <= chunks.firstKey() - chunkwidth) {
            Entry<Long, GeoTimeSerie> currentFirst = chunks.firstEntry();
            GeoTimeSerie firstChunk = currentFirst.getValue().cloneEmpty();
            if (GTSHelper.isBucketized(currentFirst.getValue())) {
                firstChunk.lastbucket = firstChunk.lastbucket - firstChunk.bucketspan;
            }
            chunks.put(currentFirst.getKey() - chunkwidth, firstChunk);
        }

        ts = GTSHelper.tickAtIndex(gts, gts.values - 1);

        if (ts >= chunks.lastKey() - chunkwidth + 1 - overlap) {
            Entry<Long, GeoTimeSerie> currentLast = chunks.lastEntry();
            GeoTimeSerie lastChunk = currentLast.getValue().cloneEmpty();
            if (GTSHelper.isBucketized(currentLast.getValue())) {
                lastChunk.lastbucket = lastChunk.lastbucket + lastChunk.bucketspan;
            }
            chunks.put(currentLast.getKey() + chunkwidth, lastChunk);
        }

        //
        // Put all entries in a list so we can access them randomly
        //

        List<Entry<Long, GeoTimeSerie>> allchunks = new ArrayList<Entry<Long, GeoTimeSerie>>(chunks.entrySet());

        int[] currentSizes = new int[allchunks.size()];

        for (int i = 0; i < currentSizes.length; i++) {
            currentSizes[i] = allchunks.get(i).getValue().values;
        }

        //
        // Iterate over chunks, completing with prev and next overlaps
        // Remember the timestamps are in reverse order so far.
        //

        for (int i = 0; i < allchunks.size(); i++) {
            GeoTimeSerie current = allchunks.get(i).getValue();
            long lowerBound = allchunks.get(i).getKey() - chunkwidth + 1 - overlap;
            long upperBound = allchunks.get(i).getKey() + overlap;
            if (i > 0) {
                GeoTimeSerie prev = allchunks.get(i - 1).getValue();
                for (int j = 0; j < currentSizes[i - 1]; j++) {
                    long timestamp = GTSHelper.tickAtIndex(prev, j);
                    if (timestamp < lowerBound) {
                        break;
                    }
                    GTSHelper.setValue(current, timestamp, GTSHelper.locationAtIndex(prev, j),
                            GTSHelper.elevationAtIndex(prev, j), GTSHelper.valueAtIndex(prev, j), false);
                }
            }
            if (i < allchunks.size() - 1) {
                GeoTimeSerie next = allchunks.get(i + 1).getValue();
                for (int j = currentSizes[i + 1] - 1; j >= 0; j--) {
                    long timestamp = GTSHelper.tickAtIndex(next, j);
                    if (timestamp > upperBound) {
                        break;
                    }
                    GTSHelper.setValue(current, timestamp, GTSHelper.locationAtIndex(next, j),
                            GTSHelper.elevationAtIndex(next, j), GTSHelper.valueAtIndex(next, j), false);
                }
            }
        }
    }

    List<GeoTimeSerie> result = new ArrayList<GeoTimeSerie>();

    for (GeoTimeSerie g : chunks.values()) {
        if (!keepempty && 0 == g.values) {
            continue;
        }
        result.add(g);
    }

    return result;
}