Example usage for java.util TreeMap isEmpty

List of usage examples for java.util TreeMap isEmpty

Introduction

In this page you can find the example usage for java.util TreeMap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this map contains no key-value mappings.

Usage

From source file:com.att.aro.core.packetanalysis.impl.VideoUsageAnalysisImpl.java

private void updateSegments() {
    log.info("updateSegments()");
    TreeMap<String, Integer> segmentList;
    Integer segment;//from  w ww.j  ava 2 s . c om
    if (videoUsage != null) {
        for (AROManifest manifest : videoUsage.getManifests()) {
            if (manifest.getDuration() > 0 || !manifest.getSegmentEventList().isEmpty()) {
                segmentList = manifest.getSegmentList();
                if (segmentList != null && !segmentList.isEmpty()) {
                    for (VideoEvent videoEvent : manifest.getVideoEventList().values()) {
                        // key = generateVideoEventKey(segment, timestamp, videoEvent.getQuality());
                        if (videoEvent.getSegment() < 0) {
                            String key = "";
                            if (videoEvent.getVed().getDateTime() != null) {
                                key = String.format("%s.%s", videoEvent.getVed().getDateTime(),
                                        videoEvent.getVed().getExtension());
                            } else if (videoEvent.getVed().getSegmentReference() != null) {
                                key = videoEvent.getVed().getSegmentReference();
                            }
                            segment = segmentList.get(key);
                            if (segment != null) {
                                videoEvent.setSegment(segment);
                            }
                        }
                        if (videoEvent.getDuration() <= 0) {
                            videoEvent.setDuration(manifest.getDuration());
                        }
                    }
                }
            }
        }
    }
}

From source file:org.finra.herd.dao.helper.EmrPricingHelper.java

/**
 * Finds all the clusters that are within the range of lowest core instance price.
 * <p>// w  w  w  .j a v a2 s  .  co  m
 * For example, if the core prices are 0.30, 0.32, 0.34, 0.36, and the threshold value is 0.1(10%), then the lowest core price range should be [0.30, 0.33].
 * The upper bound is derived by calculating 0.30*(1 + 0.1) = 0.33
 *
 * @param emrClusterPrices the list of clusters to select from
 * @param lowestCoreInstancePriceThresholdPercentage the threshold value that defines the range of lowest core instance price
 *
 * @return the list of clusters that fall in lowest core instance price range
 */
List<EmrClusterPriceDto> getEmrClusterPricesWithinLowestCoreInstancePriceThreshold(
        final List<EmrClusterPriceDto> emrClusterPrices,
        final BigDecimal lowestCoreInstancePriceThresholdPercentage) {
    // Builds a tree map that has the core instance price as the key, and the list of pricing with the same core instance price as the value. The tree map
    // is automatically sorted, so it is easy to find the lowest core instance price range.
    TreeMap<BigDecimal, List<EmrClusterPriceDto>> emrClusterPriceMapKeyedByCoreInstancePrice = new TreeMap<>();
    for (final EmrClusterPriceDto emrClusterPriceDto : emrClusterPrices) {
        final BigDecimal coreInstancePrice = getEmrClusterCoreInstancePrice(emrClusterPriceDto);
        if (emrClusterPriceMapKeyedByCoreInstancePrice.containsKey(coreInstancePrice)) {
            emrClusterPriceMapKeyedByCoreInstancePrice.get(coreInstancePrice).add(emrClusterPriceDto);
        } else {
            List<EmrClusterPriceDto> emrClusterPriceList = new ArrayList<>();
            emrClusterPriceList.add(emrClusterPriceDto);
            emrClusterPriceMapKeyedByCoreInstancePrice.put(coreInstancePrice, emrClusterPriceList);
        }
    }

    // Log all the information in the tree map
    LOGGER.info("All available EMR clusters keyed by core instance price: availableEmrClusters={}",
            jsonHelper.objectToJson(emrClusterPriceMapKeyedByCoreInstancePrice));

    // Finds the list of pricing in the range of the lowest core instance price
    List<EmrClusterPriceDto> lowestCoreInstancePriceEmrClusters = new ArrayList<>();
    if (!emrClusterPriceMapKeyedByCoreInstancePrice.isEmpty()) {
        // calculate the lowest core instance price range
        final BigDecimal lowestCoreInstancePriceLowerBound = emrClusterPriceMapKeyedByCoreInstancePrice
                .firstEntry().getKey();
        final BigDecimal lowestCoreInstancePriceUpperBound = lowestCoreInstancePriceLowerBound
                .multiply(BigDecimal.ONE.add(lowestCoreInstancePriceThresholdPercentage));

        LOGGER.info("emrClusterLowestCoreInstancePriceRange={}", jsonHelper.objectToJson(
                Arrays.asList(lowestCoreInstancePriceLowerBound, lowestCoreInstancePriceUpperBound)));

        for (final Map.Entry<BigDecimal, List<EmrClusterPriceDto>> entry : emrClusterPriceMapKeyedByCoreInstancePrice
                .entrySet()) {
            final BigDecimal coreInstancePrice = entry.getKey();
            // Fall into the lowest price range? add it to the list.
            // There is no need to check the lower bound here, since the tree map is sorted, and lower bound is the lowest core price in the tree map.
            if (coreInstancePrice.compareTo(lowestCoreInstancePriceUpperBound) <= 0) {
                lowestCoreInstancePriceEmrClusters.addAll(entry.getValue());
            } else {
                // since the tree map is sorted in ascending order, we do not need to check the rest of entries in the map
                break;
            }
        }
    }
    return lowestCoreInstancePriceEmrClusters;
}

From source file:org.apache.hadoop.mapred.HFSPScheduler.java

private void assignSizeBasedTasks(TaskType type, HelperForType helper,
        TreeMap<JobDurationInfo, JobInProgress> sizeBasedJobs,
        TreeMap<JobDurationInfo, TaskStatuses> taskStatusesSizeBased) throws IOException {

    final boolean isMap = type == TaskType.MAP;
    int totClaimedSlots = 0;

    // StringBuilder builder = new StringBuilder("SBJobs(");
    // builder.append(type).append("): [");
    // boolean first = true;
    // for (Entry<JobDurationInfo,JobInProgress> jip : sizeBasedJobs.entrySet())
    // {/*ww w .j  av  a 2 s  . c o m*/
    // if (first)
    // first = false;
    // else
    // builder.append(",");
    // builder.append(jip.getValue().getJobID())
    // .append(" -> ")
    // .append(jip.getKey().getPhaseDuration())
    // .append("/")
    // .append(jip.getKey().getPhaseTotalDuration())
    // .append(" p: ")
    // .append(this.getNumPendingNewTasks(jip.getValue(), type))
    // .append(" r: ")
    // .append(this.getNumRunningTasks(jip.getValue(), type))
    // .append(" f: ")
    // .append(this.getNumFinishedTasks(jip.getValue(), type));
    // }
    // builder.append("]");
    // LOG.debug(builder.toString());

    for (Entry<JobDurationInfo, JobInProgress> entry : sizeBasedJobs.entrySet()) {

        JobInProgress jip = entry.getValue();
        JobDurationInfo jdi = entry.getKey();
        TaskStatuses taskStatuses = taskStatusesSizeBased.get(jdi);

        if (!this.isJobReadyForTypeScheduling(jip, type)) {
            if (LOG.isDebugEnabled() && jip.getStatus().getRunState() != JobStatus.SUCCEEDED) {
                LOG.debug(
                        "SIZEBASED(" + jip.getJobID() + ":" + type + "):" + "job is not ready for scheduling ("
                                + "status: " + JobStatus.getJobRunState(jip.getStatus().getRunState())
                                + ", mapProgress: " + jip.getStatus().mapProgress() + ", reduceProgress: "
                                + jip.getStatus().reduceProgress() + ", scheduleReduces: "
                                + jip.scheduleReduces() + ")");
            }
            continue;
        }

        // NEW
        int pendingNewTasks = this.getNumPendingNewTasks(jip, type);
        int pendingResumableTasks = (taskStatuses == null) ? 0 : taskStatuses.suspendedTaskStatuses.size();

        int totAvailableSizeBasedSlots = helper.totAvailableSizeBasedSlots();

        // missing slots for resumable
        int missingResumableSlots = 0;
        if (pendingResumableTasks > 0 && pendingResumableTasks > totAvailableSizeBasedSlots) {
            if (totAvailableSizeBasedSlots <= 0)
                missingResumableSlots = pendingResumableTasks;
            else
                missingResumableSlots = pendingResumableTasks - totAvailableSizeBasedSlots;
            totAvailableSizeBasedSlots = (pendingResumableTasks > totAvailableSizeBasedSlots) ? 0
                    : totAvailableSizeBasedSlots - pendingResumableTasks;
        }

        int missingNewSlots = 0;
        if (pendingNewTasks > 0 && pendingNewTasks > totAvailableSizeBasedSlots) {
            if (totAvailableSizeBasedSlots <= 0)
                missingNewSlots = pendingNewTasks;
            else
                missingNewSlots = pendingNewTasks - totAvailableSizeBasedSlots;
            totAvailableSizeBasedSlots = (pendingNewTasks > totAvailableSizeBasedSlots) ? 0
                    : totAvailableSizeBasedSlots - pendingNewTasks;
        }

        TreeMap<TaskAttemptID, TaskStatus> suspended = null;
        if (taskStatuses != null)
            suspended = taskStatuses.suspendedTaskStatuses;

        if (pendingNewTasks > 0 || pendingResumableTasks > 0 || (suspended != null && !suspended.isEmpty())) {
            LOG.debug(jip.getJobID() + ":" + type + " (d: " + jdi.getPhaseDuration() + "/"
                    + jdi.getPhaseTotalDuration() + "):" + " pendingNewTasks: " + pendingNewTasks
                    + " pendingResumableTasks: " + pendingResumableTasks
                    // + " notResumableTasksOnThisTT: " + notResumableTasks
                    + " totAvailableSizeBasedSlots: "
                    + (helper.totAvailableSizeBasedSlots() <= 0 ? 0 : helper.totAvailableSizeBasedSlots())
                    + " currAvailableSlots: " + helper.currAvailableSlots + " => missingNewSlots: "
                    + missingNewSlots + " missingResumableSlots: " + missingResumableSlots);
        }

        if (this.preemptionStrategy.isPreemptionActive()
                && (missingNewSlots > 0 || missingResumableSlots > 0)) {
            ClaimedSlots claimedSlots = this.claimSlots(helper, Phase.SIZE_BASED, jip, missingNewSlots,
                    missingResumableSlots, totClaimedSlots, sizeBasedJobs, taskStatusesSizeBased);

            totClaimedSlots += claimedSlots.getNumPreemptedForNewTasks()
                    + claimedSlots.getNumPreemptedForResumableTasks();

            LOG.debug(jip.getJobID() + " taskStatusesOnTT: " + taskStatusesSizeBased.get(jdi)
                    + " pendingNewTasks: " + pendingNewTasks + " pendingResumableTasks: "
                    + pendingResumableTasks + " missingNewSlots: " + missingNewSlots
                    + " missingResumableSlots: " + missingResumableSlots);
        }

        while (pendingNewTasks > 0 || pendingResumableTasks > 0
                || (suspended != null && !suspended.isEmpty())) {

            if (helper.currAvailableSlots <= 0) {
                LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " no slots available on "
                        + taskHelper.ttStatus.getTrackerName());
                return;
            }

            LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " totAvailableSizeBasedSlots(): "
                    + helper.totAvailableSizeBasedSlots() + " pendingNewTasks: " + pendingNewTasks
                    + " pendingResumableTasks: " + pendingResumableTasks + " suspended("
                    + (suspended == null ? 0 : suspended.size()) + "): " + suspended);

            if (this.preemptionStrategy.isPreemptionActive() && (suspended != null && !suspended.isEmpty())) {
                TaskStatus toResume = suspended.remove(suspended.firstKey());
                // LOG.debug("RESUME: " + toResume.getTaskID() + " " +
                // toResume.getRunState());
                TaskAttemptID tAID = toResume.getTaskID();
                JobInProgress rJIP = this.taskTrackerManager.getJob(tAID.getTaskID().getJobID());
                TaskInProgress tip = rJIP.getTaskInProgress(tAID.getTaskID());
                if (this.preemptionStrategy.resume(tip, toResume)) {
                    taskHelper.resume(tAID, Phase.SIZE_BASED);
                    pendingResumableTasks -= 1;
                } else {
                    LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " cannot resume " + tAID
                            + " on " + taskHelper.ttStatus.getTrackerName());
                }
            } else {

                Task task = this.obtainNewTask(jip, taskHelper.ttStatus, isMap, taskHelper.currentTime);

                if (task == null) {
                    LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):"
                            + " cannot obtain slot for new task on " + taskHelper.ttStatus.getTrackerName()
                            + " (#pendingNew: " + pendingNewTasks + ", #pendingResumable: "
                            + pendingResumableTasks + ", #free_" + type + "_slots: " + helper.currAvailableSlots
                            + ")");
                    break;
                }

                taskHelper.slotObtained(task, Phase.SIZE_BASED);
                pendingNewTasks -= 1;
            }
        }
    }
}

From source file:org.apache.pdfbox.pdfparser.NonSequentialPDFParser.java

/**
 * Will parse every object necessary to load a single page from the pdf document.
 * We try our best to order objects according to offset in file before reading
 * to minimize seek operations./*w  w w.j ava 2s  .co  m*/
 * 
 * @param dict the COSObject from the parent pages.
 * @param excludeObjects dictionary object reference entries with these names will not be parsed
 * 
 * @throws IOException
 */
private void parseDictObjects(COSDictionary dict, COSName... excludeObjects) throws IOException {
    // ---- create queue for objects waiting for further parsing
    final Queue<COSBase> toBeParsedList = new LinkedList<COSBase>();
    // offset ordered object map
    final TreeMap<Long, List<COSObject>> objToBeParsed = new TreeMap<Long, List<COSObject>>();
    // in case of compressed objects offset points to stmObj
    final Set<Long> parsedObjects = new HashSet<Long>();
    final Set<Long> addedObjects = new HashSet<Long>();

    // ---- add objects not to be parsed to list of already parsed objects
    if (excludeObjects != null) {
        for (COSName objName : excludeObjects) {
            COSBase baseObj = dict.getItem(objName);
            if (baseObj instanceof COSObject) {
                parsedObjects.add(getObjectId((COSObject) baseObj));
            }
        }
    }

    addNewToList(toBeParsedList, dict.getValues(), addedObjects);

    // ---- go through objects to be parsed
    while (!(toBeParsedList.isEmpty() && objToBeParsed.isEmpty())) {
        // -- first get all COSObject from other kind of objects and
        //    put them in objToBeParsed; afterwards toBeParsedList is empty
        COSBase baseObj;
        while ((baseObj = toBeParsedList.poll()) != null) {
            if (baseObj instanceof COSStream) {
                addNewToList(toBeParsedList, ((COSStream) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSDictionary) {
                addNewToList(toBeParsedList, ((COSDictionary) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSArray) {
                final Iterator<COSBase> arrIter = ((COSArray) baseObj).iterator();
                while (arrIter.hasNext()) {
                    addNewToList(toBeParsedList, arrIter.next(), addedObjects);
                }
            } else if (baseObj instanceof COSObject) {
                COSObject obj = (COSObject) baseObj;
                long objId = getObjectId(obj);
                COSObjectKey objKey = new COSObjectKey(obj.getObjectNumber().intValue(),
                        obj.getGenerationNumber().intValue());

                if (!(parsedObjects.contains(objId) /*|| document.hasObjectInPool( objKey ) */ )) {
                    Long fileOffset = xrefTrailerResolver.getXrefTable().get(objKey);
                    //  it is allowed that object references point to null, thus we have to test
                    if (fileOffset != null) {
                        if (fileOffset > 0) {
                            objToBeParsed.put(fileOffset, Collections.singletonList(obj));
                        } else {
                            // negative offset means we have a compressed object within object stream;
                            // get offset of object stream
                            fileOffset = xrefTrailerResolver.getXrefTable()
                                    .get(new COSObjectKey(-fileOffset, 0));
                            if ((fileOffset == null) || (fileOffset <= 0)) {
                                throw new IOException(
                                        "Invalid object stream xref object reference: " + fileOffset);
                            }

                            List<COSObject> stmObjects = objToBeParsed.get(fileOffset);
                            if (stmObjects == null) {
                                objToBeParsed.put(fileOffset, stmObjects = new ArrayList<COSObject>());
                            }
                            stmObjects.add(obj);
                        }
                    } else {
                        // NULL object
                        COSObject pdfObject = document.getObjectFromPool(objKey);
                        pdfObject.setObject(COSNull.NULL);
                    }
                }
            }
        }

        // ---- read first COSObject with smallest offset;
        //      resulting object will be added to toBeParsedList
        if (objToBeParsed.isEmpty()) {
            break;
        }

        for (COSObject obj : objToBeParsed.remove(objToBeParsed.firstKey())) {
            COSBase parsedObj = parseObjectDynamically(obj, false);

            obj.setObject(parsedObj);
            addNewToList(toBeParsedList, parsedObj, addedObjects);

            parsedObjects.add(getObjectId(obj));
        }
    }
}

From source file:hydrograph.ui.dataviewer.filter.FilterConditionsDialog.java

private boolean storeGroupSelection(TableViewer tableViewer,
        TreeMap<Integer, List<List<Integer>>> groupSelectionMap) {

    boolean retVal = false;
    List<List<Integer>> grpList = new ArrayList<>();
    List<Integer> selectionList = new ArrayList<>();

    TableItem[] items = tableViewer.getTable().getItems();

    for (TableItem tableItem : items) {
        Button button = (Button) tableItem.getData(GROUP_CHECKBOX);
        if (button.getSelection()) {
            selectionList.add(tableViewer.getTable().indexOf(tableItem));
        }/*w w  w  . ja va2s  .  co m*/
    }

    if (groupSelectionMap.isEmpty()) {
        grpList.add(selectionList);
        groupSelectionMap.put(0, grpList);
        retVal = true;
    } else {
        if (FilterHelper.INSTANCE.validateUserGroupSelection(groupSelectionMap, selectionList)) {
            if (FilterHelper.INSTANCE.isColumnModifiable(groupSelectionMap, selectionList)) {
                retVal = true;
            } else {
                grpList.add(selectionList);
                Map<Integer, List<List<Integer>>> tempMap = new TreeMap<>();
                tempMap.putAll(groupSelectionMap);
                groupSelectionMap.clear();
                groupSelectionMap.put(0, grpList);
                for (int i = 0; i < tempMap.size(); i++) {
                    groupSelectionMap.put(i + 1, tempMap.get(i));
                }
                retVal = true;
                FilterHelper.INSTANCE.rearrangeGroups(groupSelectionMap, selectionList);
            }
        }
    }
    return retVal;
}

From source file:beproject.MainGUI.java

void createTagCloud() throws SQLException {
    TreeMap tmp = getFrequentWords();
    Cloud cld = new Cloud();
    JPanel tmpPanel = new JPanel();
    FlowLayout t1 = new FlowLayout();
    tmpPanel.setPreferredSize(new Dimension(512, 512));
    tmpPanel.setLayout(t1);/*w  ww  . j  a v  a2s .c o  m*/
    tmpPanel.setBounds(0, 0, 512, 512);
    //FlowLayout lm=(FlowLayout) tmpPanel.getLayout();
    for (int i = 0; i < 40 && !tmp.isEmpty(); i++) {
        Map.Entry mp = tmp.pollFirstEntry();
        Tag t = new Tag((String) mp.getKey(), (int) (mp.getValue()));
        cld.addTag(t);
    }
    Random rand = new Random();
    for (Tag tag : cld.tags()) {
        final JLabel label = new JLabel(tag.getName());
        label.setOpaque(false);
        label.setFont(label.getFont().deriveFont(rand.nextFloat() * 39));
        label.setForeground(new Color(rand.nextInt()));
        tmpPanel.add(label);
    }
    if (tagCloudPanel == null) {
        tagCloudPanel = new JScrollPane(tmpPanel, JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
                JScrollPane.HORIZONTAL_SCROLLBAR_NEVER);
    } else {
        jPanel3.remove(tagCloudPanel);
        jPanel3.validate();
        tagCloudPanel = new JScrollPane(tmpPanel, JScrollPane.VERTICAL_SCROLLBAR_ALWAYS,
                JScrollPane.HORIZONTAL_SCROLLBAR_NEVER);
    }
    //tagCloudPanel.setLayout(new ScrollPaneLayout());
    //tagCloudPanel.setAutoscrolls(true);
    tmpPanel.validate();
    tagCloudPanel.validate();
    jPanel3.add(tagCloudPanel, BorderLayout.CENTER);
    jPanel3.validate();

}

From source file:com.globocom.grou.report.ts.opentsdb.OpenTSDBClient.java

@SuppressWarnings("unchecked")
@Override/*w w  w.  j a  v  a2s.  c o  m*/
public Map<String, Double> makeReport(Test test) {
    final TreeMap<String, Double> mapOfResult = new TreeMap<>();
    ArrayList<HashMap<String, Object>> metrics = Optional.ofNullable(metrics(test)).orElse(new ArrayList<>());
    metrics.stream().filter(metric -> Objects.nonNull(metric.get("metric"))).forEach(metric -> {
        String key = (String) metric.get("metric");
        String aggr = (String) metric.get("aggr");
        int durationTimeMillis = test.getDurationTimeMillis();
        Map<String, Double> dps = Optional.ofNullable((Map<String, Double>) metric.get("dps"))
                .orElse(Collections.emptyMap());
        final AtomicDouble reduceSum = new AtomicDouble(0.0);
        final AtomicDouble reduceMax = new AtomicDouble(0.0);
        dps.entrySet().stream().mapToDouble(Map.Entry::getValue).forEach(delta -> {
            reduceSum.addAndGet(delta);
            if (reduceMax.get() < delta)
                reduceMax.set(delta);
        });
        double value = reduceSum.get();
        double max = reduceMax.get();
        if (!Double.isNaN(value)) {
            if ("sum".equals(aggr)) {
                int durationTimeSecs = durationTimeMillis / 1000;
                double avg = value / (double) durationTimeSecs;
                mapOfResult.put(key + " (total)", formatValue(value));
                mapOfResult.put(key + " (avg tps)", formatValue(avg));
                mapOfResult.put(key + " (max tps)",
                        formatValue(max / Math.max(1.0, (double) durationTimeSecs / (double) NUM_SAMPLES)));
            } else {
                value = value / (double) dps.size();
                mapOfResult.put(key, formatValue(value));
            }
        }
    });
    if (mapOfResult.isEmpty())
        LOGGER.error("Test {}.{}: makeReport return NULL", test.getProject(), test.getName());
    return mapOfResult;
}

From source file:com.unboundid.scim2.common.utils.JsonDiff.java

/**
 * Removes the value from an ArrayNode that matches the provided node.
 *
 * @param sourceValue The sourceValue node to match.
 * @param targetValues The ArrayNode containing the values to remove from.
 * @return The matching value that was removed or {@code null} if no matching
 *         value was found./*from w ww. jav  a2 s  .co m*/
 */
private JsonNode removeMatchingValue(final JsonNode sourceValue, final ArrayNode targetValues) {
    if (sourceValue.isObject()) {
        // Find a target value that has the most fields in common with the source
        // and have identical values. Common fields that are also one of the
        // SCIM standard multi-value sub-attributes (ie. type, value, etc...) have
        // a higher weight when determining the best matching value.
        TreeMap<Integer, Integer> matchScoreToIndex = new TreeMap<Integer, Integer>();
        for (int i = 0; i < targetValues.size(); i++) {
            JsonNode targetValue = targetValues.get(i);
            if (targetValue.isObject()) {
                int matchScore = 0;
                Iterator<String> si = sourceValue.fieldNames();
                while (si.hasNext()) {
                    String field = si.next();
                    if (sourceValue.get(field).equals(targetValue.path(field))) {
                        if (field.equals("value") || field.equals("$ref")) {
                            // These fields have the highest chance of having unique values.
                            matchScore += 3;
                        } else if (field.equals("type") || field.equals("display")) {
                            // These fields should mostly be unique.
                            matchScore += 2;
                        } else if (field.equals("primary")) {
                            // This field will definitely not be unique.
                            matchScore += 0;
                        } else {
                            // Not one of the normative fields. Use the default weight.
                            matchScore += 1;
                        }
                    }
                }
                // Only consider the match if there is not already match with the same
                // score. This will prefer matches at the same index in the array.
                if (matchScore > 0 && !matchScoreToIndex.containsKey(matchScore)) {
                    matchScoreToIndex.put(matchScore, i);
                }
            }
        }
        if (!matchScoreToIndex.isEmpty()) {
            return targetValues.remove(matchScoreToIndex.lastEntry().getValue());
        }
    } else {
        // Find an exact match
        for (int i = 0; i < targetValues.size(); i++) {
            if (JsonUtils.compareTo(sourceValue, targetValues.get(i), null) == 0) {
                return targetValues.remove(i);
            }
        }
    }

    // Can't find a match at all.
    return null;
}

From source file:org.apache.pdfbox.pdfparser.COSParser.java

/**
 * Will parse every object necessary to load a single page from the pdf document. We try our
 * best to order objects according to offset in file before reading to minimize seek operations.
 *
 * @param dict the COSObject from the parent pages.
 * @param excludeObjects dictionary object reference entries with these names will not be parsed
 *
 * @throws IOException if something went wrong
 *//*from  w  w w.j  a va 2  s  .c  o m*/
protected void parseDictObjects(COSDictionary dict, COSName... excludeObjects) throws IOException {
    // ---- create queue for objects waiting for further parsing
    final Queue<COSBase> toBeParsedList = new LinkedList<COSBase>();
    // offset ordered object map
    final TreeMap<Long, List<COSObject>> objToBeParsed = new TreeMap<Long, List<COSObject>>();
    // in case of compressed objects offset points to stmObj
    final Set<Long> parsedObjects = new HashSet<Long>();
    final Set<Long> addedObjects = new HashSet<Long>();

    addExcludedToList(excludeObjects, dict, parsedObjects);
    addNewToList(toBeParsedList, dict.getValues(), addedObjects);

    // ---- go through objects to be parsed
    while (!(toBeParsedList.isEmpty() && objToBeParsed.isEmpty())) {
        // -- first get all COSObject from other kind of objects and
        // put them in objToBeParsed; afterwards toBeParsedList is empty
        COSBase baseObj;
        while ((baseObj = toBeParsedList.poll()) != null) {
            if (baseObj instanceof COSDictionary) {
                addNewToList(toBeParsedList, ((COSDictionary) baseObj).getValues(), addedObjects);
            } else if (baseObj instanceof COSArray) {
                final Iterator<COSBase> arrIter = ((COSArray) baseObj).iterator();
                while (arrIter.hasNext()) {
                    addNewToList(toBeParsedList, arrIter.next(), addedObjects);
                }
            } else if (baseObj instanceof COSObject) {
                COSObject obj = (COSObject) baseObj;
                long objId = getObjectId(obj);
                COSObjectKey objKey = new COSObjectKey(obj.getObjectNumber(), obj.getGenerationNumber());

                if (!parsedObjects.contains(objId)) {
                    Long fileOffset = xrefTrailerResolver.getXrefTable().get(objKey);
                    // it is allowed that object references point to null,
                    // thus we have to test
                    if (fileOffset != null && fileOffset != 0) {
                        if (fileOffset > 0) {
                            objToBeParsed.put(fileOffset, Collections.singletonList(obj));
                        } else {
                            // negative offset means we have a compressed
                            // object within object stream;
                            // get offset of object stream
                            fileOffset = xrefTrailerResolver.getXrefTable()
                                    .get(new COSObjectKey((int) -fileOffset, 0));
                            if ((fileOffset == null) || (fileOffset <= 0)) {
                                throw new IOException("Invalid object stream xref object reference for key '"
                                        + objKey + "': " + fileOffset);
                            }

                            List<COSObject> stmObjects = objToBeParsed.get(fileOffset);
                            if (stmObjects == null) {
                                stmObjects = new ArrayList<COSObject>();
                                objToBeParsed.put(fileOffset, stmObjects);
                            }
                            stmObjects.add(obj);
                        }
                    } else {
                        // NULL object
                        COSObject pdfObject = document.getObjectFromPool(objKey);
                        pdfObject.setObject(COSNull.NULL);
                    }
                }
            }
        }

        // ---- read first COSObject with smallest offset
        // resulting object will be added to toBeParsedList
        if (objToBeParsed.isEmpty()) {
            break;
        }

        for (COSObject obj : objToBeParsed.remove(objToBeParsed.firstKey())) {
            COSBase parsedObj = parseObjectDynamically(obj, false);

            obj.setObject(parsedObj);
            addNewToList(toBeParsedList, parsedObj, addedObjects);

            parsedObjects.add(getObjectId(obj));
        }
    }
}

From source file:org.sleuthkit.autopsy.experimental.autoingest.FileExporterSettingsPanel.java

/**
 * Read the settings from disk.//  w w w.  j av a 2  s  .  c  om
 */
public void load() {
    try {
        FileExportSettings settings = FileExportSettings.load();
        if (settings != null) {
            Path path = settings.getFilesRootDirectory();
            if (path != null) {
                tbRootDirectory.setText(path.toString());
            }
            path = settings.getReportsRootDirectory();
            if (path != null) {
                tbReportDirectory.setText(path.toString());
            }
            TreeMap<String, FileExportRuleSet> treeMap = settings.getRuleSets();
            if (treeMap != null && !treeMap.isEmpty()) {
                exportRuleSet = treeMap.firstEntry().getValue();
            }
            boolean answer = settings.getFileExportEnabledState();
            setEnabledState(answer);
            cbEnableFileExport.setSelected(answer);
        }
        return;
    } catch (FileExportSettings.PersistenceException ex) {
        logger.log(Level.INFO, "Unable to load rule settings: {0}", ex.getMessage()); //NON-NLS
    }
    setEnabledState(false);
    cbEnableFileExport.setSelected(false);
}