Example usage for java.util HashMap values

List of usage examples for java.util HashMap values

Introduction

In this page you can find the example usage for java.util HashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:com.tacitknowledge.util.migration.jdbc.DistributedJdbcMigrationLauncherFactoryTest.java

/**
 * Make sure we the right patches go in the right spot
 *
 * @throws Exception if anything goes wrong
 *//*from  w  w w  .  ja  v  a 2  s  .  c om*/
public void testDistributedMigrationContextTargetting() throws Exception {
    int currentPatchLevel = 3;
    HashMap controlledSystems = ((DistributedMigrationProcess) launcher.getMigrationProcess())
            .getControlledSystems();
    // set the patch info store to report the current patch level
    setReportedPatchLevel(controlledSystems.values(), currentPatchLevel);
    // Now do the migrations, and make sure we get the right number of events
    MigrationProcess process = launcher.getMigrationProcess();
    process.setMigrationRunnerStrategy(new OrderedMigrationRunnerStrategy());
    process.doMigrations(MockBuilder.getPatchInfoStore(currentPatchLevel), context);

    // The orders schema has four tasks that should go, make sure they did
    JdbcMigrationLauncher ordersLauncher = (JdbcMigrationLauncher) controlledSystems.get("orders");
    // FIXME need to test multiple contexts
    TestDataSourceMigrationContext ordersContext = (TestDataSourceMigrationContext) ordersLauncher.getContexts()
            .keySet().iterator().next();
    assertEquals("orders", ordersContext.getSystemName());
    assertTrue(ordersContext.hasExecuted("TestTask1"));
    assertTrue(ordersContext.hasExecuted("TestTask2"));
    assertTrue(ordersContext.hasExecuted("TestTask3"));
    assertTrue(ordersContext.hasExecuted("TestTask4"));

    // The core schema has three tasks that should not go, make sure they exist but did not go
    JdbcMigrationLauncher coreLauncher = (JdbcMigrationLauncher) controlledSystems.get("core");
    // FIXME need to test multiple contexts
    TestDataSourceMigrationContext coreContext = (TestDataSourceMigrationContext) coreLauncher.getContexts()
            .keySet().iterator().next();
    assertEquals(3, coreLauncher.getMigrationProcess().getMigrationTasks().size());
    assertEquals("core", coreContext.getSystemName());
    assertFalse(coreContext.hasExecuted("patch0001_first_patch"));
    assertFalse(coreContext.hasExecuted("patch0002_second_patch"));
    assertFalse(coreContext.hasExecuted("patch0003_third_patch"));
}

From source file:org.apache.hadoop.hive.ql.parse.MapReduceCompiler.java

private void breakTaskTree(Task<? extends Serializable> task) {

    if (task instanceof ExecDriver) {
        HashMap<String, Operator<? extends OperatorDesc>> opMap = ((MapredWork) task.getWork()).getMapWork()
                .getAliasToWork();//from   w w w. ja  va  2  s  . c  o m
        if (!opMap.isEmpty()) {
            for (Operator<? extends OperatorDesc> op : opMap.values()) {
                breakOperatorTree(op);
            }
        }
    } else if (task instanceof ConditionalTask) {
        List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task).getListTasks();
        for (Task<? extends Serializable> tsk : listTasks) {
            breakTaskTree(tsk);
        }
    }

    if (task.getChildTasks() == null) {
        return;
    }

    for (Task<? extends Serializable> childTask : task.getChildTasks()) {
        breakTaskTree(childTask);
    }
}

From source file:com.georgeme.Act_Circle.java

private void setUpMap() {
    mMap.setOnMarkerDragListener(this);
    mMap.setOnMapLongClickListener(this);

    if (this.getIntent().getSerializableExtra("result") != null) {
        HashMap<Integer, User> result = (HashMap<Integer, User>) this.getIntent()
                .getSerializableExtra("result");
        if (result.size() == 0) {
            sampleData();//from ww  w.  j  av a  2 s .  c  o  m
        }
        for (User u : result.values()) {
            mUserLatLng.add(
                    new MapUser(new LatLng(Double.valueOf(u.getLantitude()), Double.valueOf(u.getLongitude())),
                            u.getName()));
        }
    } else {
        sampleData();
    }

    updateCenterLatLng();

    // Move the map so that it is centered on the initial circle
    mMap.moveCamera(CameraUpdateFactory.newLatLngZoom(mUserLatLng.get(0).marker.getPosition(), SCALE));
}

From source file:com.jaeksoft.searchlib.crawler.file.process.CrawlFileThread.java

private FileInstanceAbstract[] checkDirectory(FileInstanceAbstract fileInstance)
        throws SearchLibException, URISyntaxException, IOException {

    // Load directory from Index
    HashMap<String, FileInfo> indexFileMap = new HashMap<String, FileInfo>();
    fileManager.getFileInfoList(fileInstance.getURI(), indexFileMap);

    boolean withSubDir = filePathItem.isWithSubDir();

    // If the filePathItem does not support subdir
    if (!withSubDir)
        for (FileInfo fileInfo : indexFileMap.values())
            if (fileInfo.getFileType() == FileTypeEnum.directory)
                smartDelete(crawlQueue, fileInfo);

    // Remove existing files from the map
    FileInstanceAbstract[] files = withSubDir ? fileInstance.listFilesAndDirectories()
            : fileInstance.listFilesOnly();
    if (files != null)
        for (FileInstanceAbstract file : files)
            indexFileMap.remove(file.getURI().toASCIIString());

    // The file that remain in the map can be removed
    if (indexFileMap.size() > 0)
        for (FileInfo fileInfo : indexFileMap.values())
            smartDelete(crawlQueue, fileInfo);

    return files;
}

From source file:org.apache.hadoop.raid.TestRaidHistogram.java

/**
 * Have three stages. Each stage spawns nPercents threads.
 * Each thread iterate $rounds rounds and send random number for 
 * each monitor dir to raidnode including succeed files and failed files. 
 * Set two windows: The first window covers stage3 only.
 * The second window covers stage2 and stage3 only.
 * Calling getBlockFixStatus should be able to filter out all stage1 points
 * The histogram counts for the second window should be double as the of 
 * the first window./* w w  w. j av  a  2s  .c o m*/
 */
public void testHistograms() throws Exception {
    int rounds = 10000;
    int range = 1000000;
    int dividedRange = range / 1000;
    float step = 1.0f / nPercents;
    try {
        mySetup();
        cnode = RaidNode.createRaidNode(null, conf);
        ArrayList<Float> percents = new ArrayList<Float>();

        for (int i = 0; i <= nPercents; i++) {
            percents.add(step * i);
        }
        Collections.shuffle(percents);
        // submit some old data
        sendRecoveryTimes(nPercents, range * (nPercents + 1), range, rounds);
        Thread.sleep(100);
        long ckpTime1 = System.currentTimeMillis();

        sendRecoveryTimes(nPercents, 0, range, rounds);
        Thread.sleep(100);
        long ckpTime2 = System.currentTimeMillis();

        sendRecoveryTimes(nPercents, 0, range, rounds);
        long endTime = System.currentTimeMillis();
        ArrayList<Long> newWindows = new ArrayList<Long>();
        newWindows.add(endTime - ckpTime2);
        newWindows.add(endTime - ckpTime1);
        HashMap<String, RaidHistogram> recoveryTimes = cnode.blockIntegrityMonitor.getRecoveryTimes();
        for (RaidHistogram histogram : recoveryTimes.values()) {
            histogram.setNewWindows(newWindows);
        }
        for (int i = 0; i <= monitorDirs.length; i++) {
            String monitorDir;
            if (i < monitorDirs.length) {
                monitorDir = monitorDirs[i];
            } else {
                monitorDir = BlockIntegrityMonitor.OTHERS;
            }
            assertEquals("Stale entries are not filtered", rounds * nPercents * 3 * 2,
                    cnode.blockIntegrityMonitor.getNumberOfPoints(monitorDir));
            TreeMap<Long, BlockFixStatus> status = cnode.blockIntegrityMonitor.getBlockFixStatus(monitorDir,
                    nPercents, percents, endTime);
            assertTrue(status.containsKey(newWindows.get(0)));
            assertTrue(status.containsKey(newWindows.get(1)));
            BlockFixStatus bfs = status.get(newWindows.get(0));
            assertEquals("Stale entries are not filtered", rounds * nPercents * 2 * 2,
                    cnode.blockIntegrityMonitor.getNumberOfPoints(monitorDir));
            // Verify failed recovered files for the first window
            assertEquals("The number of failed recovery files should match", rounds * nPercents,
                    bfs.failedPaths);
            // Verify histogram for the first window
            assertEquals(nPercents, bfs.counters.length);
            for (int j = 0; j < nPercents; j++) {
                assertEquals(rounds, bfs.counters[j]);
            }
            // Verify percent values for the first window
            assertEquals(nPercents + 1, bfs.percentValues.length);
            assertEquals(0, bfs.percentValues[0]);
            for (int j = 1; j <= nPercents; j++) {
                assertEquals(dividedRange * j - 1, bfs.percentValues[j]);
            }
            bfs = status.get(newWindows.get(1));
            // Verify failed recovered files for the second window
            assertEquals("The number of failed recovery files should match", rounds * nPercents,
                    bfs.failedPaths);
            // Verify histogram for the second window
            assertEquals(nPercents, bfs.counters.length);
            for (int j = 0; j < nPercents; j++) {
                assertEquals(rounds * 2, bfs.counters[j]);
            }
            // Verify percent values for the second window
            assertEquals(nPercents + 1, bfs.percentValues.length);
            assertEquals(0, bfs.percentValues[0]);
            for (int j = 1; j <= nPercents; j++) {
                assertEquals(dividedRange * j - 1, bfs.percentValues[j]);
            }
        }
    } finally {
        myTearDown();
    }
}

From source file:iudex.da.ContentUpdater.java

protected void update(List<UniMap> references, Connection conn) throws SQLException {
    final HashMap<String, UniMap> uhashes = new HashMap<String, UniMap>(references.size());
    final String qry = formatSelect(references, uhashes);
    final UpdateQueryRunner runner = new UpdateQueryRunner();
    runner.query(conn, qry, new RefUpdateHandler(uhashes));

    final ArrayList<UniMap> remains = new ArrayList<UniMap>(uhashes.size());
    for (UniMap rem : uhashes.values()) {
        UniMap out = _transformer.transformReference(rem, null);
        if (out != null)
            remains.add(out);//from  w ww.  j a v  a2s.co  m
    }
    if (remains.size() > 0)
        write(remains, conn);
}

From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.RemoteDPParForMR.java

/**
 * Result file contains hierarchy of workerID-resultvar(incl filename). We deduplicate
 * on the workerID. Without JVM reuse each task refers to a unique workerID, so we
 * will not find any duplicates. With JVM reuse, however, each slot refers to a workerID, 
 * and there are duplicate filenames due to partial aggregation and overwrite of fname 
 * (the RemoteParWorkerMapper ensures uniqueness of those files independent of the 
 * runtime implementation). //www  .  jav a2 s .co  m
 * 
 * @param job 
 * @param fname
 * @return
 * @throws DMLRuntimeException
 */
@SuppressWarnings("deprecation")
public static LocalVariableMap[] readResultFile(JobConf job, String fname)
        throws DMLRuntimeException, IOException {
    HashMap<Long, LocalVariableMap> tmp = new HashMap<Long, LocalVariableMap>();

    FileSystem fs = FileSystem.get(job);
    Path path = new Path(fname);
    LongWritable key = new LongWritable(); //workerID
    Text value = new Text(); //serialized var header (incl filename)

    int countAll = 0;
    for (Path lpath : MatrixReader.getSequenceFilePaths(fs, path)) {
        SequenceFile.Reader reader = new SequenceFile.Reader(FileSystem.get(job), lpath, job);
        try {
            while (reader.next(key, value)) {
                //System.out.println("key="+key.get()+", value="+value.toString());
                if (!tmp.containsKey(key.get()))
                    tmp.put(key.get(), new LocalVariableMap());
                Object[] dat = ProgramConverter.parseDataObject(value.toString());
                tmp.get(key.get()).put((String) dat[0], (Data) dat[1]);
                countAll++;
            }
        } finally {
            if (reader != null)
                reader.close();
        }
    }

    LOG.debug("Num remote worker results (before deduplication): " + countAll);
    LOG.debug("Num remote worker results: " + tmp.size());

    //create return array
    return tmp.values().toArray(new LocalVariableMap[0]);
}

From source file:eu.stratosphere.pact.runtime.hash.HashFunctionCollisionBenchmark.java

/**
 * Create histogram over bucket sizes//from  w  w w  . j av a 2 s. c  om
 * 
 * @param map
 *            Map to be analyzed
 * @param level
 *            Level on which the map is located in
 * @return The total count of hashed values in the map
 */
private int collectStatistics(HashMap<Integer, Object> map, int level) {
    SortedMap<Integer, Integer> bucketSizesForLevel = bucketSizesPerLevel.get(level);

    Iterator<Object> bucketIterator = map.values().iterator();
    int bucketCount = 0;
    int totalValueCount = 0;

    while (bucketIterator.hasNext()) {
        bucketCount++;

        Integer hashValuesInBucket;
        // If we are already on the deepest level, get the count in the
        // bucket, otherwise
        // recursively examine the subtree
        if (level == maxLevel - 1) {
            hashValuesInBucket = (Integer) bucketIterator.next();
        } else {
            @SuppressWarnings("unchecked")
            HashMap<Integer, Object> nextMap = (HashMap<Integer, Object>) bucketIterator.next();
            hashValuesInBucket = collectStatistics(nextMap, level + 1);
        }
        totalValueCount += hashValuesInBucket;
        Integer countOfBucketSizes = bucketSizesForLevel.get(hashValuesInBucket);
        if (countOfBucketSizes == null) {
            countOfBucketSizes = 1;
        } else {
            countOfBucketSizes += 1;
        }
        bucketSizesForLevel.put(hashValuesInBucket, countOfBucketSizes);
    }

    Integer countOfEmptyBuckets = bucketSizesForLevel.get(0);
    if (countOfEmptyBuckets == null) {
        countOfEmptyBuckets = rangeCalculators[level].getBucketCount() - bucketCount;
    } else {
        countOfEmptyBuckets += rangeCalculators[level].getBucketCount() - bucketCount;
    }
    bucketSizesForLevel.put(0, countOfEmptyBuckets);

    return totalValueCount;
}

From source file:afest.datastructures.tree.decision.erts.informationfunctions.GeneralizedNormalizedShannonEntropy.java

@Override
public <T extends ITrainingPoint<R, C>> double getScore(Collection<T> set, ISplit<R> split) {
    HashMap<Boolean, ArrayList<T>> splitSeparation = InformationFunctionsUtils.performSplit(set, split);

    HashMap<Boolean, Integer> countSeparation = new HashMap<Boolean, Integer>();
    for (Boolean key : splitSeparation.keySet()) {
        ArrayList<T> elements = splitSeparation.get(key);
        countSeparation.put(key, elements.size());
    }/*from w  w w.  j a v  a  2s.  co  m*/

    HashMap<C, Integer> countContent = groupElementsByContent(set);
    HashMap<C, Integer> countContentTrue = groupElementsByContent(splitSeparation.get(true));
    HashMap<C, Integer> countContentFalse = groupElementsByContent(splitSeparation.get(false));

    double ht = getEntropy(countSeparation, set.size());
    double hc = getEntropy(countContent, set.size());

    double dSize = (double) set.size();
    double pTrue = countSeparation.get(true) / dSize;
    double hct = 0;
    for (Integer count : countContentTrue.values()) {
        double prob1 = count / dSize;
        double prob2 = prob1 / pTrue;
        hct -= prob1 * MathUtils.log(2, prob2);
    }
    for (Integer count : countContentFalse.values()) {
        double prob1 = count / dSize;
        double prob2 = 1 - (prob1 / pTrue); // pFalse
        hct -= prob1 * MathUtils.log(2, prob2);
    }

    // Mutual Information
    double itc = hc - hct;

    // Normalization
    double ctc = 2 * itc / (hc + ht);

    return ctc;
}

From source file:ch.admin.suis.msghandler.sender.SenderSessionImpl.java

/**
 * For each message, create the ZIP and the envelope.
 *
 * @param pairs        The messages/*  w  w  w  .ja  v  a  2s  . c o m*/
 * @param outboxTmpDir The temp outbox, to prepare our messages
 */
private ArrayList<Message> prepareMessages(HashMap<Integer, Message> pairs, File outboxTmpDir) {
    ArrayList<Message> messages = new ArrayList<>();
    for (Message message : pairs.values()) {
        // Skips any message that contains files that are too new according to the config.
        if (!areFilesOldEnough(message)) {
            continue;
        }

        // create the envelope file
        try {
            message.setEnvelopeFile(createEnvelope(outboxTmpDir, message));
            message.setDataFile(ZipUtils.compress(outboxTmpDir, message.getFiles()));
        } catch (IOException | SAXException e) {
            warnMessageFailure(message, e);
            notSent.add(message);
            continue; // try another message
        }

        // if everything ok, then add the message to the result
        messages.add(message);

        LOG.info(MessageFormat.format("the message {0} is ready to be forwarded to the Sedex adapter",
                message.getMessageId()));
    }
    return messages;
}