Example usage for java.util Collection clear

List of usage examples for java.util Collection clear

Introduction

In this page you can find the example usage for java.util Collection clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this collection (optional operation).

Usage

From source file:com.nextep.designer.dbgm.services.impl.DataService.java

@Override
public void loadDataLinesFromRepository(IDataSet dataSet, IProgressMonitor m) {
    SubMonitor monitor = SubMonitor.convert(m, 100000);

    // Check if already loaded
    if (dataSet.getStorageHandle() != null || dataSet.getUID() == null) {
        return;//w w  w.j ava  2  s .co  m
    } else {
        storageService.createDataSetStorage(dataSet);
    }

    // Make sure that data set version is tagged
    Session session = HibernateUtil.getInstance().getSandBoxSession();
    final IVersionInfo version = VersionHelper.getVersionInfo(dataSet);
    Long versionTag = version.getVersionTag();
    long computedVersionTag = VersionHelper.computeVersion(version);
    if (versionTag == null || versionTag.longValue() != computedVersionTag) {
        version.setVersionTag(computedVersionTag);
        CorePlugin.getIdentifiableDao().save(version, true, session, true);
    }
    // Connecting explicitly to repository
    Connection repoConn = null;
    PreparedStatement stmt = null;
    ResultSet rset = null;
    // Building reference map to avoid instantiating references
    final Map<Long, IReference> colRefMap = new HashMap<Long, IReference>();
    for (IReference colRef : dataSet.getColumnsRef()) {
        colRefMap.put(colRef.getUID().rawId(), colRef);
    }
    // Building version tree id list
    List<Long> idList = buildVersionIdHistoryList(version);
    try {
        repoConn = getRepositoryConnection();
        monitor.subTask(DBGMMessages.getString("service.data.executingRepositoryQuery")); //$NON-NLS-1$
        final String selectStmt = buildSelectRepositoryValuesStmt(idList);
        stmt = repoConn.prepareStatement(selectStmt);
        // "SELECT dlc.dset_row_id, dlv.column_refid, dlv.column_value "
        // + "FROM dbgm_dset_rows dlc LEFT JOIN dbgm_dset_rows dln "
        // + "       ON dln.dset_refid = dlc.dset_refid "
        // + "      AND dln.dset_row_id = dlc.dset_row_id "
        // + "      AND dln.version_tag > dlc.version_tag "
        // + "      AND dln.version_tag <= ? "
        // + "  JOIN dbgm_dset_row_values dlv ON dlv.drow_id = dlc.drow_id "
        // + "WHERE dlc.dset_refid = ? AND dlc.version_tag <= ? "
        // + "AND dln.dset_refid IS NULL ");
        // "ORDER BY dlc.dlin_no, dlc.version_tag" );
        final long setRefId = version.getReference().getUID().rawId();
        int colIndex = 1;
        stmt.setLong(colIndex++, setRefId);
        for (Long id : idList) {
            stmt.setLong(colIndex++, id);
        }
        stmt.setLong(colIndex++, setRefId);
        // stmt.setLong(4, versionTag);
        rset = stmt.executeQuery();
        monitor.worked(1);
        IDataLine line = typedObjectFactory.create(IDataLine.class);
        long lineId;
        boolean isEmpty = true;

        // Preparing line buffer
        Collection<IDataLine> bufferedLines = new ArrayList<IDataLine>(LINE_BUFFER_SIZE);
        long counter = 0;
        while (rset.next()) {
            if (monitor.isCanceled()) {
                return;
            }
            lineId = rset.getLong(1);

            // If new row id, new line
            if (line.getRowId() != lineId && !isEmpty) {
                bufferedLines.add(line);
                if (bufferedLines.size() >= LINE_BUFFER_SIZE) {
                    counter += LINE_BUFFER_SIZE;
                    monitor.subTask(
                            MessageFormat.format(DBGMMessages.getString("service.data.loadedLines"), counter)); //$NON-NLS-1$
                    monitor.worked(LINE_BUFFER_SIZE);
                    addDataline(dataSet, bufferedLines.toArray(new IDataLine[bufferedLines.size()]));
                    bufferedLines.clear();
                }
                line = typedObjectFactory.create(IDataLine.class);
            }
            line.setRowId(lineId);
            isEmpty = false;
            final long colRefId = rset.getLong(2);
            final String strValue = rset.getString(3);
            final IReference colRef = colRefMap.get(colRefId);

            /*
             * We might have unresolved column reference when the column has been removed from
             * the dataset. In this case we simply ignore the value.
             */
            if (colRef != null) {
                final Object value = storageService.decodeValue(colRef, strValue);
                // Preparing column value
                final IColumnValue colValue = typedObjectFactory.create(IColumnValue.class);
                colValue.setDataLine(line);
                colValue.setColumnRef(colRef);
                colValue.setValue(value);
                line.addColumnValue(colValue);
            }
        }
        if (!isEmpty) {
            bufferedLines.add(line);
            addDataline(dataSet, bufferedLines.toArray(new IDataLine[bufferedLines.size()]));
        }
        monitor.done();
    } catch (SQLException e) {
        throw new ErrorException(
                DBGMMessages.getString("service.data.loadRepositoryDataSetError") + e.getMessage(), e); //$NON-NLS-1$
    } finally {
        safeClose(rset, stmt, repoConn, true);
    }
    handleDataSetStructuralChanges(dataSet);
}

From source file:edu.cmu.ark.QuestionTransducer.java

/**
 * This method removes question objects that have duplicate yields (i.e., output strings). It
 * goes in order so that higher ranked questions, which are expected to appear first, will
 * remain.//w w w.  jav a 2 s.c om
 * 
 * @param givenQuestions
 */
public static void removeDuplicateQuestions(Collection<Question> givenQuestions) {
    Map<String, Question> yieldMap = new HashMap<String, Question>();
    String yield;

    //add questions that used NP Clarification first
    for (Question q : givenQuestions) {
        if (q.getFeatureValue("performedNPClarification") == 0.0) {
            continue;
        }
        yield = q.getTree().yield().toString();
        if (yieldMap.containsKey(yield)) {
            if (GlobalProperties.getDebug())
                System.err.println("Removing duplicate: " + yield);
            continue;
        }

        yieldMap.put(yield, q);
    }

    //now add any new questions that don't involve NP Clarification
    for (Question q : givenQuestions) {
        if (q.getFeatureValue("performedNPClarification") == 1.0) {
            continue;
        }
        yield = q.getTree().yield().toString();
        if (yieldMap.containsKey(yield)) {
            if (GlobalProperties.getDebug())
                System.err.println("Removing duplicate: " + yield);

            //if a previous question that involved NP Clarification has the same yield (i.e., text),
            //then mark it as using NP Clarification for the answer only
            Question other = yieldMap.get(yield);
            if (other.getFeatureValue("performedNPClarification") == 1.0
                    && other.getSourceSentenceNumber() == q.getSourceSentenceNumber()) {
                //other.setFeatureValue("performedNPClarificationAnswerOnly", 1.0);
                other.setFeatureValue("performedNPClarification", 0.0);
            }
            continue;
        }

        yieldMap.put(yield, q);
    }

    givenQuestions.clear();
    givenQuestions.addAll(yieldMap.values());
}

From source file:edu.ksu.cis.indus.staticanalyses.concurrency.independence.IndependenceDetectionCLI.java

/**
 * Executes atomicity detection algorithm according to given option.
 * //from w  w w.  j  a v  a 2  s  .  co m
 * @param cl is the command line.
 * @pre cl != null
 * @param <T> dummy type parameter.
 */
private <T extends ITokens<T, Value>> void execute(final CommandLine cl) {
    setInfoLogger(LOGGER);

    final String _tagName = "AtomicityDetection:FA";
    final IValueAnalyzer<Value> _aa = OFAnalyzer.getFSOSAnalyzer(_tagName,
            TokenUtil.<T, Value, Type>getTokenManager(new SootValueTypeManager()), getStmtGraphFactory());
    final ValueAnalyzerBasedProcessingController _pc = new ValueAnalyzerBasedProcessingController();
    final Collection<IProcessor> _processors = new ArrayList<IProcessor>();
    final PairManager _pairManager = new PairManager(false, true);
    final CallGraphInfo _cgi = new CallGraphInfo(new PairManager(false, true));
    final OFABasedCallInfoCollector _callGraphInfoCollector = new OFABasedCallInfoCollector();
    final IThreadGraphInfo _tgi = new ThreadGraph(_cgi, new CFGAnalysis(_cgi, getBbm()), _pairManager);
    final ValueAnalyzerBasedProcessingController _cgipc = new ValueAnalyzerBasedProcessingController();
    final OneAllStmtSequenceRetriever _ssr = new OneAllStmtSequenceRetriever();

    _ssr.setStmtGraphFactory(getStmtGraphFactory());

    _pc.setStmtSequencesRetriever(_ssr);
    _pc.setAnalyzer(_aa);
    _pc.setProcessingFilter(new TagBasedProcessingFilter(_tagName));

    _cgipc.setStmtSequencesRetriever(_ssr);
    _cgipc.setAnalyzer(_aa);
    _cgipc.setProcessingFilter(new CGBasedProcessingFilter(_cgi));

    final Map _info = new HashMap();
    _info.put(ICallGraphInfo.ID, _cgi);
    _info.put(IThreadGraphInfo.ID, _tgi);
    _info.put(PairManager.ID, _pairManager);
    _info.put(IEnvironment.ID, _aa.getEnvironment());
    _info.put(IValueAnalyzer.ID, _aa);

    final EquivalenceClassBasedEscapeAnalysis _ecba = new EquivalenceClassBasedEscapeAnalysis(_cgi, null,
            getBbm());
    final IEscapeInfo _escapeInfo = _ecba.getEscapeInfo();
    _info.put(IEscapeInfo.ID, _escapeInfo);

    initialize();
    _aa.analyze(getEnvironment(), getRootMethods());

    _callGraphInfoCollector.reset();
    _processors.clear();
    _processors.add(_callGraphInfoCollector);
    _pc.reset();
    _pc.driveProcessors(_processors);
    _cgi.reset();
    _cgi.createCallGraphInfo(_callGraphInfoCollector.getCallInfo());
    writeInfo("CALL GRAPH:\n" + _cgi.toString());

    _processors.clear();
    ((ThreadGraph) _tgi).reset();
    _processors.add((IProcessor) _tgi);
    _cgipc.reset();
    _cgipc.driveProcessors(_processors);
    writeInfo("THREAD GRAPH:\n" + ((ThreadGraph) _tgi).toString());

    final AnalysesController _ac = new AnalysesController(_info, _cgipc, getBbm());
    _ac.addAnalyses(EquivalenceClassBasedEscapeAnalysis.ID, Collections.singleton(_ecba));
    _ac.initialize();
    _ac.execute();
    writeInfo("END: Escape analysis");

    detector.setEscapeAnalysis(_escapeInfo);
    detector.hookup(_cgipc);
    _cgipc.process();
    detector.unhook(_cgipc);
    writeInfo("BEGIN: Independent statement detection");

    final String _optionValue = cl.getOptionValue("scheme", "tag-stmt");

    if (_optionValue.equals("tag-region")) {
        final IndependentRegionDetector _regionDetector = new IndependentRegionDetector();
        _regionDetector.setAtomicityDetector(detector);
        _regionDetector.setBasicBlockGraphMgr(getBbm());
        _regionDetector.hookup(_cgipc);
        _cgipc.process();
        _regionDetector.unhook(_cgipc);
        insertAtomicBoundaries(_regionDetector, _cgi);
    } else {
        annotateAtomicStmts(_cgi);
    }
    writeInfo("END: Independent region detection");
    dumpJimpleAndClassFiles(outputDir, true, false);
}

From source file:org.apache.ant.compress.taskdefs.ArchiveBase.java

/**
 * Checks whether the target is more recent than the resources
 * that shall be added to it.//ww  w. j av a  2 s . com
 *
 * <p>Will only ever be invoked if the target exists.</p>
 *
 * @param src the resources that have been found as sources, may
 * be modified in "update" mode to remove entries that are up to
 * date
 * @param existingEntries the target archive as fileset
 *
 * @return true if the target is up-to-date
 */
protected boolean isUpToDate(Collection/*<ResourceWithFlags>*/ src, ArchiveFileSet existingEntries)
        throws IOException {

    final Resource[] srcResources = new Resource[src.size()];
    int index = 0;
    for (Iterator i = src.iterator(); i.hasNext();) {
        ResourceWithFlags r = (ResourceWithFlags) i.next();
        srcResources[index++] = new MappedResource(r.getResource(), new MergingMapper(r.getName()));
    }
    Resource[] outOfDate = ResourceUtils.selectOutOfDateSources(this, srcResources, new IdentityMapper(),
            existingEntries.getDirectoryScanner(getProject()));
    if (outOfDate.length > 0 && Mode.UPDATE.equals(getMode().getValue())) {
        HashSet/*<String>*/ oodNames = new HashSet/*<String>*/();
        for (int i = 0; i < outOfDate.length; i++) {
            oodNames.add(outOfDate[i].getName());
        }
        List/*<ResourceWithFlags>*/ copy = new LinkedList/*<ResourceWithFlags>*/(src);
        src.clear();
        for (Iterator i = copy.iterator(); i.hasNext();) {
            ResourceWithFlags r = (ResourceWithFlags) i.next();
            if (oodNames.contains(r.getName())) {
                src.add(r);
            }
        }
    }
    return outOfDate.length == 0;
}

From source file:de.vandermeer.asciithemes.TA_Grid.java

@Override
default StrBuilder toDoc() {
    Collection<Object> content = new ArrayList<>();
    ArrayList<StrBuilder> normalGrid = null;
    ArrayList<StrBuilder> strongGrid = null;
    ArrayList<StrBuilder> heavyGrid = null;
    ArrayList<StrBuilder> lightGrid = null;
    ArrayList<StrBuilder> exampleGrid = null;

    int lightRule = this.hasRuleSet(TA_GridConfig.RULESET_LIGHT) ? TA_GridConfig.RULESET_LIGHT
            : TA_GridConfig.RULESET_NORMAL;
    int strongRule = this.hasRuleSet(TA_GridConfig.RULESET_STRONG) ? TA_GridConfig.RULESET_STRONG
            : TA_GridConfig.RULESET_NORMAL;
    int heavyRule = this.hasRuleSet(TA_GridConfig.RULESET_HEAVY) ? TA_GridConfig.RULESET_HEAVY : strongRule;

    content.add(TA_GridConfig.RULESET_NORMAL);
    content.add(Pair.of(TA_GridConfig.RULESET_NORMAL, new String[][] { new String[] { " ", " ", " ", " " } }));
    content.add(TA_GridConfig.RULESET_NORMAL);
    content.add(Pair.of(TA_GridConfig.RULESET_NORMAL, new String[][] { new String[] { " ", " ", " ", " " } }));
    content.add(TA_GridConfig.RULESET_NORMAL);
    normalGrid = this.addGrid(content, TA_GridOptions.THEME_FULL_GRID);

    if (this.hasRuleSet(TA_GridConfig.RULESET_STRONG)) {
        content.clear();
        content.add(TA_GridConfig.RULESET_STRONG);
        content.add(//from  w ww. j  a v  a2s .c  o  m
                Pair.of(TA_GridConfig.RULESET_STRONG, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_STRONG);
        content.add(
                Pair.of(TA_GridConfig.RULESET_STRONG, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_STRONG);
        strongGrid = this.addGrid(content, TA_GridOptions.THEME_FULL_GRID);
    }

    if (this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        content.clear();
        content.add(TA_GridConfig.RULESET_HEAVY);
        content.add(
                Pair.of(TA_GridConfig.RULESET_HEAVY, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_HEAVY);
        content.add(
                Pair.of(TA_GridConfig.RULESET_HEAVY, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_HEAVY);
        heavyGrid = this.addGrid(content, TA_GridOptions.THEME_FULL_GRID);
    }

    if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT)) {
        content.clear();
        content.add(TA_GridConfig.RULESET_LIGHT);
        content.add(
                Pair.of(TA_GridConfig.RULESET_LIGHT, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_LIGHT);
        content.add(
                Pair.of(TA_GridConfig.RULESET_LIGHT, new String[][] { new String[] { " ", " ", " ", " " } }));
        content.add(TA_GridConfig.RULESET_LIGHT);
        lightGrid = this.addGrid(content, TA_GridOptions.THEME_FULL_GRID);
    }

    content.clear();
    content.add(heavyRule);
    content.add(Pair.of(strongRule, new String[][] { new String[] { " h1  ", " h2  ", " h3  ", " h4  " } }));
    content.add(strongRule);
    content.add(Pair.of(strongRule, new String[][] { new String[] { " c11 to c14 w/col-span " } }
    //                  new String[][]{new String[]{" c11 ", " c12 ", " c13 ", " c14 "}}
    ));
    content.add(TA_GridConfig.RULESET_NORMAL);
    content.add(Pair.of(strongRule,
            //                  new String[][]{new String[]{" c21 & c22 ", " c23 & c24 "}}
            new String[][] { new String[] { " c21 ", " c22 ", " c23 ", " c24 " } }));
    content.add(lightRule);
    content.add(Pair.of(strongRule, new String[][] { new String[] { " c31 - c32 ", " c33 - c34 " } }
    //                  new String[][]{new String[]{" c31 ", " c32 ", " c33 ", " c34 "}}
    ));
    content.add(strongRule);
    content.add(Pair.of(strongRule, new String[][] { new String[] { " f1  ", " f2  ", " f3  ", " f4  " } }));
    content.add(heavyRule);
    exampleGrid = this.addGrid(content, TA_GridOptions.THEME_FULL_GRID);

    String space = "      ";

    if (this.hasRuleSet(TA_GridConfig.RULESET_STRONG)) {
        exampleGrid.add(0, new StrBuilder().append("Normal         Strong         Example"));
    } else if (this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        exampleGrid.add(0, new StrBuilder().append("Normal                        Example"));
    } else {
        exampleGrid.add(0, new StrBuilder().append("Normal         Example"));
    }
    for (int i = 0; i < normalGrid.size(); i++) {
        if (this.hasRuleSet(TA_GridConfig.RULESET_STRONG)) {
            exampleGrid.get(i + 1).insert(0, new StrBuilder().append(normalGrid.get(i)).append(space)
                    .append(strongGrid.get(i)).append(space));
        } else {
            exampleGrid.get(i + 1).insert(0, new StrBuilder().append(normalGrid.get(i)));
        }
    }

    exampleGrid.get(6).insert(0, new StrBuilder().append("                              "));
    if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT) && this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        exampleGrid.get(7).insert(0, new StrBuilder().append("Light          Heavy          "));
    } else if (this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        exampleGrid.get(7).insert(0, new StrBuilder().append("               Heavy          "));
    } else if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT)) {
        exampleGrid.get(7).insert(0, new StrBuilder().append("Light                         "));
    } else {
        exampleGrid.get(7).insert(0, new StrBuilder().append("                              "));
    }
    for (int i = 8; i < exampleGrid.size(); i++) {
        if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT) && this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
            exampleGrid.get(i).insert(0, new StrBuilder().append(lightGrid.get(i - 8)).append(space)
                    .append(heavyGrid.get(i - 8)).append(space));
        } else if (this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
            exampleGrid.get(i).insert(0, new StrBuilder().append(space).append(space).append("   ")
                    .append(heavyGrid.get(i - 8)).append(space));
        } else if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT)) {
            exampleGrid.get(i).insert(0, new StrBuilder().append(lightGrid.get(i - 8)).append(space)
                    .append(space).append(space).append("   "));
        } else {
            exampleGrid.get(i).insert(0, new StrBuilder().append("                              "));
        }
    }
    if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT) && this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        exampleGrid.add(new StrBuilder().append(lightGrid.get(lightGrid.size() - 1)).append(space)
                .append(heavyGrid.get(heavyGrid.size() - 1)));
    } else if (this.hasRuleSet(TA_GridConfig.RULESET_HEAVY)) {
        exampleGrid.add(new StrBuilder().append(space).append(space).append("   ")
                .append(heavyGrid.get(heavyGrid.size() - 1)));
    } else if (this.hasRuleSet(TA_GridConfig.RULESET_LIGHT)) {
        exampleGrid.add(new StrBuilder().append(lightGrid.get(lightGrid.size() - 1)));
    }

    return new StrBuilder().appendWithSeparators(exampleGrid, "\n");
}

From source file:org.geotools.data.complex.DataAccessMappingFeatureIterator.java

/**
 * Return all matching properties from provided root attribute and xPath.
 *
 * @param root//from w  ww.ja v  a  2 s. co m
 *            The root attribute to start searching from
 * @param xpath
 *            The xPath matching the attribute
 * @return The matching attributes collection
 */
private Collection<Property> getProperties(ComplexAttribute root, StepList xpath) {

    final StepList steps = new StepList(xpath);

    Iterator<Step> stepsIterator = steps.iterator();
    Collection<Property> properties = null;
    Step step = null;
    if (stepsIterator.hasNext()) {
        step = stepsIterator.next();
        properties = ((ComplexAttribute) root).getProperties(Types.toTypeName(step.getName()));
    }

    while (stepsIterator.hasNext()) {
        step = stepsIterator.next();
        Collection<Property> nestedProperties = new ArrayList<Property>();
        for (Property property : properties) {
            assert property instanceof ComplexAttribute;
            Collection<Property> tempProperties = ((ComplexAttribute) property)
                    .getProperties(Types.toTypeName(step.getName()));
            if (!tempProperties.isEmpty()) {
                nestedProperties.addAll(tempProperties);
            }
        }
        properties.clear();
        if (nestedProperties.isEmpty()) {
            return properties;
        }
        properties.addAll(nestedProperties);
    }
    return properties;
}

From source file:org.kuali.student.ap.coursesearch.service.impl.CourseDetailsViewHelperServiceImpl.java

/**
 * Finds the information on the primary instructor from a list of instructors
 *
 * @param instructors - List of instructors participating in activity
 * @return Information on the primary instructor from the list
 *///from  ww w. ja v  a 2  s.  co m
private OfferingInstructorInfo findDisplayInstructor(List<OfferingInstructorInfo> instructors) {
    OfferingInstructorInfo result = null;

    // If list of instructors is empty return null
    if (instructors != null && !instructors.isEmpty()) {

        // Build the display name for the Instructor
        Collection<OfferingInstructorInfo> highestInstEffortInstructors = new ArrayList<OfferingInstructorInfo>();
        float highestInstEffortComparison = 0f;

        // find instructors with highest participation from the list
        for (OfferingInstructorInfo instructor : instructors) {

            // Only instructors with participation are considered
            if (instructor.getPercentageEffort() != null) {

                // If participation is higher than current list, reset with higher participation instructor
                if (instructor.getPercentageEffort() > highestInstEffortComparison) {
                    highestInstEffortInstructors.clear();
                    highestInstEffortComparison = instructor.getPercentageEffort();
                    highestInstEffortInstructors.add(instructor);
                }

                // If participation is equal to current highest add instructor to current list
                else if (instructor.getPercentageEffort() == highestInstEffortComparison) {
                    highestInstEffortInstructors.add(instructor);
                }
            }
        }

        // Select instructor
        if (highestInstEffortInstructors.isEmpty()) {
            return result;
        } else if (highestInstEffortInstructors.size() == 1) {
            // If only one participate return first
            result = highestInstEffortInstructors.iterator().next();
        } else {

            // If multiple instructors with highest participation get first alphabetically
            List<String> names = new ArrayList<String>(highestInstEffortInstructors.size());
            Map<String, OfferingInstructorInfo> nameMap = new HashMap<String, OfferingInstructorInfo>(
                    highestInstEffortInstructors.size());
            for (OfferingInstructorInfo oiInfo : highestInstEffortInstructors) {
                names.add(oiInfo.getPersonName());
                nameMap.put(oiInfo.getPersonName(), oiInfo);
            }
            Collections.sort(names);
            result = nameMap.get(names.get(0));
        }
    }

    return result;
}

From source file:org.geotools.gce.imagemosaic.ImageMosaicPostgisIndexOnlineTest.java

/**
 * Complex test for Postgis indexing on db.
 * //from   w  ww  . j a va2 s.  c  om
 * @throws Exception
 */
@Test
public void testSortingAndLimiting() throws Exception {
    final File workDir = new File(TestData.file(this, "."), tempFolderName2);
    assertTrue(workDir.mkdir());
    FileUtils.copyFile(TestData.file(this, "watertemp.zip"), new File(workDir, "watertemp.zip"));
    TestData.unzipFile(this, tempFolderName2 + "/watertemp.zip");
    final URL timeElevURL = TestData.url(this, tempFolderName2);

    //place datastore.properties file in the dir for the indexing
    FileWriter out = null;
    try {
        out = new FileWriter(new File(TestData.file(this, "."), tempFolderName2 + "/datastore.properties"));

        final Set<Object> keyset = fixture.keySet();
        for (Object key : keyset) {
            final String key_ = (String) key;
            final String value = fixture.getProperty(key_);
            out.write(key_.replace(" ", "\\ ") + "=" + value.replace(" ", "\\ ") + "\n");
        }
        out.flush();
    } finally {
        if (out != null) {
            IOUtils.closeQuietly(out);
        }
    }

    // now start the test
    final AbstractGridFormat format = TestUtils.getFormat(timeElevURL);
    assertNotNull(format);
    ImageMosaicReader reader = TestUtils.getReader(timeElevURL, format);
    assertNotNull(reader);

    final String[] metadataNames = reader.getMetadataNames();
    assertNotNull(metadataNames);
    assertEquals(12, metadataNames.length);

    assertEquals("true", reader.getMetadataValue("HAS_TIME_DOMAIN"));
    assertEquals("true", reader.getMetadataValue("HAS_ELEVATION_DOMAIN"));

    // dispose and create new reader
    reader.dispose();
    final MyImageMosaicReader reader1 = new MyImageMosaicReader(timeElevURL);
    final RasterManager rasterManager = reader1.getRasterManager(reader1.getGridCoverageNames()[0]);

    // query
    final SimpleFeatureType type = rasterManager.granuleCatalog.getType("waterTempPG2");
    Query query = null;
    if (type != null) {
        // creating query
        query = new Query(type.getTypeName());

        // sorting and limiting
        // max number of elements
        query.setMaxFeatures(1);

        // sorting
        final SortBy[] clauses = new SortBy[] {
                new SortByImpl(FeatureUtilities.DEFAULT_FILTER_FACTORY.property("ingestion"),
                        SortOrder.DESCENDING),
                new SortByImpl(FeatureUtilities.DEFAULT_FILTER_FACTORY.property("elevation"),
                        SortOrder.ASCENDING), };
        query.setSortBy(clauses);

    }

    // checking that we get a single feature and that feature is correct
    final Collection<GranuleDescriptor> features = new ArrayList<GranuleDescriptor>();
    rasterManager.getGranuleDescriptors(query, new GranuleCatalogVisitor() {

        @Override
        public void visit(GranuleDescriptor granule, Object o) {
            features.add(granule);

        }
    });
    assertEquals(features.size(), 1);
    GranuleDescriptor granule = features.iterator().next();
    SimpleFeature sf = granule.getOriginator();
    assertNotNull(sf);
    Object ingestion = sf.getAttribute("ingestion");
    assertTrue(ingestion instanceof Timestamp);
    final GregorianCalendar gc = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
    gc.setTimeInMillis(1225497600000l);
    assertEquals(0, (((Timestamp) ingestion).compareTo(gc.getTime())));
    Object elevation = sf.getAttribute("elevation");
    assertTrue(elevation instanceof Integer);
    assertEquals(((Integer) elevation).intValue(), 0);

    // Reverting order (the previous timestamp shouldn't match anymore)
    final SortBy[] clauses = new SortBy[] {
            new SortByImpl(FeatureUtilities.DEFAULT_FILTER_FACTORY.property("ingestion"), SortOrder.ASCENDING),
            new SortByImpl(FeatureUtilities.DEFAULT_FILTER_FACTORY.property("elevation"),
                    SortOrder.DESCENDING), };
    query.setSortBy(clauses);

    // checking that we get a single feature and that feature is correct
    features.clear();
    rasterManager.getGranuleDescriptors(query, new GranuleCatalogVisitor() {

        @Override
        public void visit(GranuleDescriptor granule, Object o) {
            features.add(granule);

        }
    });
    assertEquals(features.size(), 1);
    granule = features.iterator().next();
    sf = granule.getOriginator();
    assertNotNull(sf);
    ingestion = sf.getAttribute("ingestion");
    assertTrue(ingestion instanceof Timestamp);
    assertNotSame(0, (((Timestamp) ingestion).compareTo(gc.getTime())));
    elevation = sf.getAttribute("elevation");
    assertTrue(elevation instanceof Integer);
    assertNotSame(((Integer) elevation).intValue(), 0);

}

From source file:eu.medsea.util.EncodingGuesser.java

/**
 * Get a Collection of all the possible encodings this byte array could be used to represent.
 * @param data/*from w w w  .  j  a va  2  s .  c o m*/
 * @return the Collection of possible encodings from the supported encodings
 */
public static Collection getPossibleEncodings(byte[] data) {

    Collection possibleEncodings = new TreeSet();
    if (data == null || data.length == 0) {
        return possibleEncodings;
    }

    // We may have to take account of a BOM (Byte Order Mark) as this could be present at the beginning of
    // the source byte array. These sequences may match valid bytes at the beginning of binary data but this shouldn't
    // match any encodings anyway.

    String encoding = null;
    for (Iterator it = supportedEncodings.iterator(); it.hasNext();) {
        // This will eliminate encodings it can't possibly be from the supported encodings
        // by converting the source byte array to a String using each encoding in turn and
        // then getting the resultant byte array and checking it against the passed in data.

        try {
            // One problem to overcome is that the passed in data may be terminated by an
            // incomplete character for the current encoding so we need to remove the last character
            // then get the resulting bytes and only match this against the source byte array.

            encoding = (String) it.next();

            // Check if this encoding has a known bom and if so does it match the beginning of the data array ?
            // returns either 0 or the length of the bom
            int lengthBOM = getLengthBOM(encoding, data);

            // Don't use the BOM when constructing the String
            String test = new String(getByteArraySubArray(data, lengthBOM, data.length - lengthBOM), encoding);

            // Only remove the last character if the String is more than 1 character long
            if (test.length() > 1) {
                // Remove last character from the test string.
                test = test.substring(0, test.length() - 2);
            }

            // This is the byte array we will compare with the passed in source array copy
            byte[] compare = null;
            try {
                compare = test.getBytes(encoding);
            } catch (UnsupportedOperationException ignore) {
                continue;
            }

            // Check if source and destination byte arrays are equal
            if (!compareByteArrays(data, lengthBOM, compare, 0, compare.length)) {
                // dosn't match so ignore this encoding as it is unlikely to be correct
                // even if it does contain valid text data.
                continue;
            }

            // If we get this far and the lengthBOM is not 0 then we have a match for this encoding.
            if (lengthBOM != 0) {
                // We know we have a perfect match for this encoding so ditch the rest and return just this one
                possibleEncodings.clear();
                possibleEncodings.add(encoding);
                return possibleEncodings;
            }

            // This is a possible match.
            possibleEncodings.add(encoding);
        } catch (UnsupportedEncodingException uee) {
            log.error("The encoding [" + encoding + "] is not supported by your JVM.");
        } catch (Exception e) {
            // Log the error but carry on with the next encoding
            log.error(e.getLocalizedMessage(), e);
        }
    }
    return possibleEncodings;
}

From source file:ubic.gemma.core.ontology.OntologyServiceImpl.java

@Override
public Map<String, CharacteristicValueObject> countObsoleteOccurrences(int start, int stop, int step) {
    Map<String, CharacteristicValueObject> vos = new HashMap<>();

    int minId = start;
    int maxId = step;

    int nullCnt = 0;
    int obsoleteCnt = 0;

    // Loading all characteristics in steps
    while (maxId < stop) {

        OntologyServiceImpl.log.info("Checking characteristics with IDs between " + minId + " and " + maxId);

        List<Long> ids = new ArrayList<>(step);
        for (int i = minId; i < maxId + 1; i++) {
            ids.add((long) i);
        }/*from w w  w.j  av  a 2  s  .  com*/

        minId = maxId + 1;
        maxId += step;

        Collection<Characteristic> chars = characteristicService.load(ids);

        if (chars == null || chars.isEmpty()) {
            OntologyServiceImpl.log.info("No characteristics in the current ID range, moving on.");
            continue;
        }
        OntologyServiceImpl.log.info(
                "Found " + chars.size() + " characteristics in the current ID range, checking for obsoletes.");

        // Detect obsoletes
        for (Characteristic ch : chars) {
            if (StringUtils.isBlank(ch.getValueUri())) {
                nullCnt++;
            } else if (this.isObsolete(ch.getValueUri())) {
                String key = this.foundValueKey(ch);
                if (!vos.containsKey(key)) {
                    vos.put(key, new CharacteristicValueObject(ch));
                }
                vos.get(key).incrementOccurrenceCount();
                obsoleteCnt++;
                OntologyServiceImpl.log
                        .info("Found obsolete term: " + ch.getValue() + " / " + ch.getValueUri());
            }
        }

        ids.clear();
        chars.clear();
    }

    OntologyServiceImpl.log.info("Terms with empty uri: " + nullCnt);
    OntologyServiceImpl.log.info("Obsolete terms found: " + obsoleteCnt);

    return vos;
}