Example usage for java.util TreeMap putAll

List of usage examples for java.util TreeMap putAll

Introduction

In this page you can find the example usage for java.util TreeMap putAll.

Prototype

public void putAll(Map<? extends K, ? extends V> map) 

Source Link

Document

Copies all of the mappings from the specified map to this map.

Usage

From source file:com.cyberway.issue.crawler.admin.StatisticsTracker.java

/**
 * Sort the entries of the given HashMap in descending order by their
 * values, which must be longs wrapped with <code>LongWrapper</code>.
 * <p>//w  ww.ja  v a 2  s. co  m
 * Elements are sorted by value from largest to smallest. Equal values are
 * sorted in an arbitrary, but consistent manner by their keys. Only items
 * with identical value and key are considered equal.
 *
 * If the passed-in map requires access to be synchronized, the caller
 * should ensure this synchronization. 
 * 
 * @param mapOfLongWrapperValues
 *            Assumes values are wrapped with LongWrapper.
 * @return a sorted set containing the same elements as the map.
 */
public TreeMap<String, LongWrapper> getReverseSortedCopy(
        final Map<String, LongWrapper> mapOfLongWrapperValues) {
    TreeMap<String, LongWrapper> sortedMap = new TreeMap<String, LongWrapper>(new Comparator<String>() {
        public int compare(String e1, String e2) {
            long firstVal = mapOfLongWrapperValues.get(e1).longValue;
            long secondVal = mapOfLongWrapperValues.get(e2).longValue;
            if (firstVal < secondVal) {
                return 1;
            }
            if (secondVal < firstVal) {
                return -1;
            }
            // If the values are the same, sort by keys.
            return e1.compareTo(e2);
        }
    });
    try {
        sortedMap.putAll(mapOfLongWrapperValues);
    } catch (UnsupportedOperationException e) {
        Iterator<String> i = mapOfLongWrapperValues.keySet().iterator();
        for (; i.hasNext();) {
            // Ok. Try doing it the slow way then.
            String key = i.next();
            sortedMap.put(key, mapOfLongWrapperValues.get(key));
        }
    }
    return sortedMap;
}

From source file:beproject.MainGUI.java

TreeMap getFrequentWords() throws SQLException {
    HashMap<String, Integer> i = new HashMap<>();
    ResultSet rs = stmt// w w  w.  j a va2  s  . c  o  m
            .executeQuery("select tweet from tweets where moviename='" + movieName + "' LIMIT 0, 10000");
    while (rs.next()) {
        String[] data = rs.getString(1).toLowerCase().split("[ \t\n\'\";?!,]");
        for (String tmp : data) {
            if (tmp.contains("http") || tmp.length() < 4)
                continue;
            Integer a = i.putIfAbsent(tmp, 1);
            if (a != null) {
                i.put(tmp, a + 1);
            }
        }
    }

    ValueComparator bvc = new ValueComparator(i);
    TreeMap<String, Integer> sorted_map = new TreeMap<>(bvc);
    sorted_map.putAll(i);
    return sorted_map;
}

From source file:org.archive.crawler.admin.StatisticsTracker.java

/**
 * Sort the entries of the given HashMap in descending order by their
 * values, which must be longs wrapped with <code>AtomicLong</code>.
 * <p>/*from ww  w  . j  a va2  s . co  m*/
 * Elements are sorted by value from largest to smallest. Equal values are
 * sorted in an arbitrary, but consistent manner by their keys. Only items
 * with identical value and key are considered equal.
 *
 * If the passed-in map requires access to be synchronized, the caller
 * should ensure this synchronization. 
 * 
 * @param mapOfAtomicLongValues
 *            Assumes values are wrapped with AtomicLong.
 * @return a sorted set containing the same elements as the map.
 */
public TreeMap<String, AtomicLong> getReverseSortedCopy(final Map<String, AtomicLong> mapOfAtomicLongValues) {
    TreeMap<String, AtomicLong> sortedMap = new TreeMap<String, AtomicLong>(new Comparator<String>() {
        public int compare(String e1, String e2) {
            long firstVal = mapOfAtomicLongValues.get(e1).get();
            long secondVal = mapOfAtomicLongValues.get(e2).get();
            if (firstVal < secondVal) {
                return 1;
            }
            if (secondVal < firstVal) {
                return -1;
            }
            // If the values are the same, sort by keys.
            return e1.compareTo(e2);
        }
    });
    try {
        sortedMap.putAll(mapOfAtomicLongValues);
    } catch (UnsupportedOperationException e) {
        for (String key : mapOfAtomicLongValues.keySet()) {
            sortedMap.put(key, mapOfAtomicLongValues.get(key));
        }
    }
    return sortedMap;
}

From source file:org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.java

Map<String, String> createDefaultHostParams(Cluster cluster) {
    StackId stackId = cluster.getDesiredStackVersion();
    TreeMap<String, String> hostLevelParams = new TreeMap<String, String>();
    hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
    hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
    hostLevelParams.put(JDK_NAME, managementController.getJDKName());
    hostLevelParams.put(JCE_NAME, managementController.getJCEName());
    hostLevelParams.put(STACK_NAME, stackId.getStackName());
    hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
    hostLevelParams.put(DB_NAME, managementController.getServerDB());
    hostLevelParams.put(MYSQL_JDBC_URL, managementController.getMysqljdbcUrl());
    hostLevelParams.put(ORACLE_JDBC_URL, managementController.getOjdbcUrl());
    hostLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
    hostLevelParams.putAll(managementController.getRcaParameters());

    return hostLevelParams;
}

From source file:org.lockss.config.Tdb.java

/** Print a full description of all elements in the Tdb */
public void prettyPrint(PrintStream ps) {
    ps.println("Tdb");
    TreeMap<String, TdbPublisher> sorted = new TreeMap<String, TdbPublisher>(
            CatalogueOrderComparator.SINGLETON);
    sorted.putAll(getAllTdbPublishers());
    for (TdbPublisher tdbPublisher : sorted.values()) {
        tdbPublisher.prettyPrint(ps, 2);
    }// www.  j a v  a 2  s  .  c  o m
}

From source file:org.talend.mdm.webapp.browserecords.server.actions.BrowseRecordsAction.java

public static List<ItemBaseModel> getViewsListOrderedByLabels(Map<String, String> unsortedViewsMap) {
    TreeMap<String, String> sortedViewsByLabelsMap = new TreeMap<String, String>(
            new ViewLabelComparator(unsortedViewsMap));
    sortedViewsByLabelsMap.putAll(unsortedViewsMap);

    List<ItemBaseModel> viewsList = new ArrayList<ItemBaseModel>();
    for (String viewName : sortedViewsByLabelsMap.keySet()) {
        String viewLabel = unsortedViewsMap.get(viewName);
        ItemBaseModel bm = new ItemBaseModel();
        bm.set("name", viewLabel); //$NON-NLS-1$
        bm.set("value", viewName); //$NON-NLS-1$
        viewsList.add(bm);/*from   w  w  w . j  av a2 s  .  c om*/
    }
    return viewsList;
}

From source file:org.apache.hadoop.mapred.HFSPScheduler.java

@Override
public List<Task> assignTasks(TaskTracker taskTracker) throws IOException {

    this.update();

    taskHelper.init(taskTracker.getStatus());

    // Update time waited for local maps for jobs skipped on last heartbeat
    if (this.delayEnabled)
        this.updateLocalityWaitTimes(taskHelper.currentTime);

    for (TaskType type : TASK_TYPES) {

        HelperForType helper = taskHelper.helper(type);

        if (!this.preemptionStrategy.isPreemptionActive() && helper.currAvailableSlots == 0) {
            // LOG.debug("assign(" + taskTracker.getTrackerName() + ", " + type
            // + "): no slots available");
            continue;
        }//from   w w w .  j  ava2  s  .  c o  m

        TreeSet<JobInProgress> trainJobs = new TreeSet<JobInProgress>(
                type == TaskType.MAP ? TRAIN_COMPARATOR_MAP : TRAIN_COMPARATOR_REDUCE);

        Collection<JobInProgress> trainJips = this.getJobs(QueueType.TRAIN, type);
        synchronized (trainJips) {
            trainJobs.addAll(trainJips);
        }

        TreeMap<JobDurationInfo, JobInProgress> sizeBasedJobs = new TreeMap<JobDurationInfo, JobInProgress>(
                JOB_DURATION_COMPARATOR);

        TreeMap<JobDurationInfo, JobInProgress> jobQueue = this.getSizeBasedJobQueue(type);
        synchronized (jobQueue) {
            sizeBasedJobs.putAll(jobQueue);
        }

        TreeMap<JobDurationInfo, TaskStatuses> taskStatusesSizeBased = helper.taskStatusesSizeBased;

        if (helper.doTrainScheduling) {
            assignTrainTasks(type, helper, trainJobs, sizeBasedJobs, taskStatusesSizeBased);
        }

        if (helper.doSizeBasedScheduling) {
            assignSizeBasedTasks(type, helper, sizeBasedJobs, taskStatusesSizeBased);
        }

    }

    if (LOG.isDebugEnabled()) {
        taskHelper.logInfos(LOG);
    }

    return (List<Task>) taskHelper.result.clone();
}

From source file:com.datatorrent.contrib.hdht.HDHTWriter.java

/**
 * Flush changes from write cache to disk. New data files will be written and meta data replaced atomically. The flush
 * frequency determines availability of changes to external readers.
 *
 * @throws IOException/* ww w. j  a  v  a 2 s .c  om*/
 */
private void writeDataFiles(Bucket bucket) throws IOException {
    BucketIOStats ioStats = getOrCretaStats(bucket.bucketKey);
    LOG.debug("Writing data files in bucket {}", bucket.bucketKey);
    // copy meta data on write
    BucketMeta bucketMetaCopy = kryo.copy(getMeta(bucket.bucketKey));

    /** Process purge requests before flushing data from cache to maintain
     * the oder or purge and put operations. This makes sure that purged data
     * removed from file, before new data is added to the files */
    HashSet<String> filesToDelete = Sets.newHashSet();
    bucketMetaCopy = processPurge(bucket, bucketMetaCopy, filesToDelete);

    // bucket keys by file
    TreeMap<Slice, BucketFileMeta> bucketSeqStarts = bucketMetaCopy.files;
    Map<BucketFileMeta, Map<Slice, Slice>> modifiedFiles = Maps.newHashMap();

    for (Map.Entry<Slice, byte[]> entry : bucket.frozenWriteCache.entrySet()) {
        // find file for key
        Map.Entry<Slice, BucketFileMeta> floorEntry = bucketSeqStarts.floorEntry(entry.getKey());
        BucketFileMeta floorFile;
        if (floorEntry != null) {
            floorFile = floorEntry.getValue();
        } else {
            floorEntry = bucketSeqStarts.firstEntry();
            if (floorEntry == null || floorEntry.getValue().name != null) {
                // no existing file or file with higher key
                floorFile = new BucketFileMeta();
            } else {
                // placeholder for new keys, move start key
                floorFile = floorEntry.getValue();
                bucketSeqStarts.remove(floorEntry.getKey());
            }
            floorFile.startKey = entry.getKey();
            if (floorFile.startKey.length != floorFile.startKey.buffer.length) {
                // normalize key for serialization
                floorFile.startKey = new Slice(floorFile.startKey.toByteArray());
            }
            bucketSeqStarts.put(floorFile.startKey, floorFile);
        }

        Map<Slice, Slice> fileUpdates = modifiedFiles.get(floorFile);
        if (fileUpdates == null) {
            modifiedFiles.put(floorFile, fileUpdates = Maps.newHashMap());
        }
        fileUpdates.put(entry.getKey(), new Slice(entry.getValue()));
    }

    // write modified files
    for (Map.Entry<BucketFileMeta, Map<Slice, Slice>> fileEntry : modifiedFiles.entrySet()) {
        BucketFileMeta fileMeta = fileEntry.getKey();
        TreeMap<Slice, Slice> fileData = new TreeMap<Slice, Slice>(getKeyComparator());

        if (fileMeta.name != null) {
            // load existing file
            long start = System.currentTimeMillis();
            FileReader reader = store.getReader(bucket.bucketKey, fileMeta.name);
            reader.readFully(fileData);
            ioStats.dataBytesRead += store.getFileSize(bucket.bucketKey, fileMeta.name);
            ioStats.dataReadTime += System.currentTimeMillis() - start;
            /* these keys are re-written */
            ioStats.dataKeysRewritten += fileData.size();
            ioStats.filesReadInCurrentWriteCycle++;
            ioStats.dataFilesRead++;
            reader.close();
            filesToDelete.add(fileMeta.name);
        }

        // apply updates
        fileData.putAll(fileEntry.getValue());
        // new file
        writeFile(bucket, bucketMetaCopy, fileData);
    }

    LOG.debug("Files written {} files read {}", ioStats.filesWroteInCurrentWriteCycle,
            ioStats.filesReadInCurrentWriteCycle);
    // flush meta data for new files
    try {
        LOG.debug("Writing {} with {} file entries", FNAME_META, bucketMetaCopy.files.size());
        OutputStream os = store.getOutputStream(bucket.bucketKey, FNAME_META + ".new");
        Output output = new Output(os);
        bucketMetaCopy.committedWid = bucket.committedLSN;
        bucketMetaCopy.recoveryStartWalPosition = bucket.recoveryStartWalPosition;
        kryo.writeClassAndObject(output, bucketMetaCopy);
        output.close();
        os.close();
        store.rename(bucket.bucketKey, FNAME_META + ".new", FNAME_META);
    } catch (IOException e) {
        throw new RuntimeException("Failed to write bucket meta data " + bucket.bucketKey, e);
    }

    // clear pending changes
    ioStats.dataKeysWritten += bucket.frozenWriteCache.size();
    // switch to new version
    this.metaCache.put(bucket.bucketKey, bucketMetaCopy);

    // delete old files
    for (String fileName : filesToDelete) {
        store.delete(bucket.bucketKey, fileName);
    }
    invalidateReader(bucket.bucketKey, filesToDelete);
    // clearing cache after invalidating readers
    bucket.frozenWriteCache.clear();

    // cleanup WAL files which are not needed anymore.
    minimumRecoveryWalPosition = bucketMetaCopy.recoveryStartWalPosition;
    for (Long bucketId : this.bucketKeys) {
        BucketMeta meta = getMeta(bucketId);
        if (meta.recoveryStartWalPosition.fileId < minimumRecoveryWalPosition.fileId
                || (meta.recoveryStartWalPosition.fileId == minimumRecoveryWalPosition.fileId
                        && meta.recoveryStartWalPosition.offset < minimumRecoveryWalPosition.offset)) {
            minimumRecoveryWalPosition = meta.recoveryStartWalPosition;
        }
    }
    this.wal.cleanup(minimumRecoveryWalPosition.fileId);
    ioStats.filesReadInCurrentWriteCycle = 0;
    ioStats.filesWroteInCurrentWriteCycle = 0;
}

From source file:delfos.group.results.groupevaluationmeasures.MAE_byMemberStdDev.java

@Override
public GroupEvaluationMeasureResult getMeasureResult(GroupRecommenderSystemResult groupRecommenderSystemResult,
        DatasetLoader<? extends Rating> originalDatasetLoader, RelevanceCriteria relevanceCriteria,
        DatasetLoader<? extends Rating> trainingDatasetLoader,
        DatasetLoader<? extends Rating> testDatasetLoader) {

    TreeMap<Integer, MeanIterative> maeAllMembers = new TreeMap<>();

    for (GroupOfUsers groupOfUsers : groupRecommenderSystemResult.getGroupsOfUsers()) {
        Collection<Recommendation> groupRecommendations = groupRecommenderSystemResult
                .getGroupOutput(groupOfUsers).getRecommendations().getRecommendations();

        if (groupRecommendations.isEmpty()) {
            continue;
        }/* w  w w .  j av a2  s  .  co  m*/
        MeanIterative maeGroup = new MeanIterative();
        Map<Integer, MeanIterative> maeMembers = new TreeMap<>();
        for (User member : groupOfUsers.getMembers()) {
            maeMembers.put(member.getId(), new MeanIterative());
        }

        Map<Integer, Map<Integer, ? extends Rating>> groupTrueRatings = new TreeMap<>();

        groupOfUsers.getIdMembers().stream().forEach((idUser) -> {
            try {
                groupTrueRatings.put(idUser, testDatasetLoader.getRatingsDataset().getUserRatingsRated(idUser));
            } catch (UserNotFound ex) {
                ERROR_CODES.USER_NOT_FOUND.exit(ex);
            }
        });

        for (Recommendation recommendation : groupRecommendations) {
            if (Double.isNaN(recommendation.getPreference().doubleValue())) {
                continue;
            }
            int idItem = recommendation.getItem().getId();
            for (int idUser : groupOfUsers.getIdMembers()) {
                if (groupTrueRatings.get(idUser).containsKey(idItem)) {
                    double trueRating = groupTrueRatings.get(idUser).get(idItem).getRatingValue().doubleValue();
                    double predicted = recommendation.getPreference().doubleValue();
                    double absoluteError = Math.abs(predicted - trueRating);

                    maeGroup.addValue(absoluteError);
                    maeMembers.get(idUser).addValue(absoluteError);
                }
            }
        }

        maeAllMembers.putAll(maeMembers);

    }

    double[] maeByMember = maeAllMembers.values().parallelStream()
            .mapToDouble(meanMember -> meanMember.getMean()).filter(value -> !Double.isNaN(value)).toArray();

    double maeByMemberStdDev = new StandardDeviation().evaluate(maeByMember);

    if (maeByMember.length == 0) {
        return new GroupEvaluationMeasureResult(this, Double.NaN);
    } else {
        return new GroupEvaluationMeasureResult(this, maeByMemberStdDev);
    }
}

From source file:org.apache.hadoop.mapred.HFSPScheduler.java

/**
 * Sort job durations queues based on the current value
 * /*w w w  .j a  va  2 s .c  om*/
 * @param type
 */
private void sortSizeBasedQueue(TaskType type) {
    TreeMap<JobDurationInfo, JobInProgress> newQueue = new TreeMap<JobDurationInfo, JobInProgress>(
            HFSPScheduler.JOB_DURATION_COMPARATOR);
    Map<JobDurationInfo, JobInProgress> oldQueue = this.getSizeBasedJobQueue(type);

    if (LOG.isDebugEnabled()) { // TODO: deleteme
        HashMap<JobID, JobDurationInfo> jdis = new HashMap<JobID, JobDurationInfo>();
        for (Entry<JobDurationInfo, JobInProgress> entry : oldQueue.entrySet()) {
            JobDurationInfo jdi = entry.getKey();
            assert !jdis.containsKey(jdi.getJobID()) : String.format("%s %s %s", jdi.getJobID(),
                    jdis.get(jdi.getJobID()), jdi);
            jdis.put(jdi.getJobID(), jdi);
        }
    }

    int oldSize = oldQueue.size();
    synchronized (oldQueue) {
        newQueue.putAll(oldQueue);
        oldQueue.clear();

        // FIXME: putAll not working with comparator, don't know why
        for (Entry<JobDurationInfo, JobInProgress> entry : newQueue.entrySet()) {
            oldQueue.put(entry.getKey(), entry.getValue());
        }
    }
    assert oldSize == oldQueue.size() : String.format("oldSize: %s newSize: %s", oldSize, oldQueue.size());

    // if (LOG.isDebugEnabled()) {
    // StringBuilder builder = new StringBuilder("time update on " +
    // "SizeBasedQueue(").append(type).append( "): [");
    // boolean first = true;
    // for (Entry<JobDurationInfo, JobInProgress> entry : oldQueue.entrySet()) {
    // if (first)
    // first = false;
    // else
    // builder.append(", ");
    // builder.append(entry.getKey().getPhaseDuration())
    // .append(" -> ").append(entry.getValue().getJobID());
    // }
    // builder.append("]");
    // LOG.debug(builder.toString());
    // }
}