Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:org.apache.hadoop.mapred.lib.CombineFileInputFormat.java

/**
 * Return all the splits in the specified set of paths
 */// www  .jav  a2  s .  c  om
private void getMoreSplits(JobConf job, Collection<LocatedFileStatus> stats, long maxSize, long minSizeNode,
        long minSizeRack, long maxNumBlocksPerSplit, List<CombineFileSplit> splits) throws IOException {

    // all blocks for all the files in input set
    OneFileInfo[] files;

    // mapping from a rack name to the list of blocks it has
    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    // mapping from a block to the nodes on which it has replicas
    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    // mapping from a node to the list of blocks that it contains
    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    if (stats.isEmpty()) {
        return;
    }
    files = new OneFileInfo[stats.size()];

    // populate all the blocks for all files
    long totLength = 0;
    int fileIndex = 0;
    for (LocatedFileStatus oneStatus : stats) {
        files[fileIndex] = new OneFileInfo(oneStatus, job,
                isSplitable(FileSystem.get(job), oneStatus.getPath()), rackToBlocks, blockToNodes, nodeToBlocks,
                rackToNodes, maxSize);
        totLength += files[fileIndex].getLength();
        fileIndex++;
    }

    // Sort the blocks on each node from biggest to smallest by size to
    // encourage more node-local single block splits
    sortBlocksBySize(nodeToBlocks);

    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    Set<String> nodes = new HashSet<String>();
    long curSplitSize = 0;

    // process all nodes and create splits that are local
    // to a node.
    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> one = iter.next();
        nodes.add(one.getKey());
        List<OneBlockInfo> blocksInNode = one.getValue();

        // for each block, copy it into validBlocks. Delete it from
        // blockToNodes so that the same block does not appear in
        // two different splits.
        for (OneBlockInfo oneblock : blocksInNode) {
            if (blockToNodes.containsKey(oneblock)) {
                validBlocks.add(oneblock);
                blockToNodes.remove(oneblock);
                curSplitSize += oneblock.length;

                // if the accumulated split size exceeds the maximum, then
                // create this split.
                if ((maxSize != 0 && curSplitSize >= maxSize)
                        || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
                    // create an input split and add it to the splits array
                    // if only one block, add all the node replicas
                    if (validBlocks.size() == 1) {
                        Set<String> blockLocalNodes = new HashSet<String>(
                                Arrays.asList(validBlocks.get(0).hosts));
                        addCreatedSplit(job, splits, blockLocalNodes, validBlocks);
                        addStatsForSplitType(SplitType.SINGLE_BLOCK_LOCAL, curSplitSize, blockLocalNodes.size(),
                                validBlocks.size());
                    } else {
                        addCreatedSplit(job, splits, nodes, validBlocks);
                        addStatsForSplitType(SplitType.NODE_LOCAL, curSplitSize, nodes.size(),
                                validBlocks.size());
                    }
                    curSplitSize = 0;
                    validBlocks.clear();
                }
            }
        }
        // if there were any blocks left over and their combined size is
        // larger than minSplitNode, then combine them into one split.
        // Otherwise add them back to the unprocessed pool. It is likely
        // that they will be combined with other blocks from the same rack later on.
        if (minSizeNode != 0 && curSplitSize >= minSizeNode) {
            // create an input split and add it to the splits array
            addCreatedSplit(job, splits, nodes, validBlocks);
            addStatsForSplitType(SplitType.NODE_LOCAL_LEFTOVER, curSplitSize, nodes.size(), validBlocks.size());
        } else {
            for (OneBlockInfo oneblock : validBlocks) {
                blockToNodes.put(oneblock, oneblock.hosts);
            }
        }
        validBlocks.clear();
        nodes.clear();
        curSplitSize = 0;
    }

    // if blocks in a rack are below the specified minimum size, then keep them
    // in 'overflow'. After the processing of all racks is complete, these overflow
    // blocks will be combined into splits.
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();

    // Process all racks over and over again until there is no more work to do.
    boolean noRacksMadeSplit = false;
    while (blockToNodes.size() > 0) {

        // Create one split for this rack before moving over to the next rack.
        // Come back to this rack after creating a single split for each of the
        // remaining racks.
        // Process one rack location at a time, Combine all possible blocks that
        // reside on this rack as one split. (constrained by minimum and maximum
        // split size).

        // Iterate over all racks.  Add to the overflow blocks only if at least
        // one pass over all the racks was completed without adding any splits
        long splitsAddedOnAllRacks = 0;
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
                .hasNext();) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();

            // for each block, copy it into validBlocks. Delete it from
            // blockToNodes so that the same block does not appear in
            // two different splits.
            boolean createdSplit = false;
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;

                    // if the accumulated split size exceeds the maximum, then
                    // create this split.
                    if ((maxSize != 0 && curSplitSize >= maxSize)
                            || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(job, splits, getHosts(racks), validBlocks);
                        addStatsForSplitType(SplitType.RACK_LOCAL, curSplitSize, getHosts(racks).size(),
                                validBlocks.size());
                        createdSplit = true;
                        ++splitsAddedOnAllRacks;
                        break;
                    }
                }
            }

            // if we created a split, then just go to the next rack
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    // if there is a mimimum size specified, then create a single split
                    // otherwise, store these blocks into overflow data structure
                    addCreatedSplit(job, splits, getHosts(racks), validBlocks);
                    addStatsForSplitType(SplitType.RACK_LOCAL_LEFTOVER, curSplitSize, getHosts(racks).size(),
                            validBlocks.size());
                    ++splitsAddedOnAllRacks;
                } else if (!noRacksMadeSplit) {
                    // Add the blocks back if a pass on all rack found at least one
                    // split or this is the first pass
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                } else {
                    // There were a few blocks in this rack that remained to be processed.
                    // Keep them in 'overflow' block list. These will be combined later.
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }

        if (splitsAddedOnAllRacks == 0) {
            noRacksMadeSplit = true;
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // Process all overflow blocks
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;

        // This might cause an exiting rack location to be re-added,
        // but it should be OK because racks is a Set.
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }

        // if the accumulated split size exceeds the maximum, then
        // create this split.
        if ((maxSize != 0 && curSplitSize >= maxSize)
                || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
            // create an input split and add it to the splits array
            addCreatedSplit(job, splits, getHosts(racks), validBlocks);
            addStatsForSplitType(SplitType.OVERFLOW, curSplitSize, getHosts(racks).size(), validBlocks.size());
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    // Process any remaining blocks, if any.
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(job, splits, getHosts(racks), validBlocks);
        addStatsForSplitType(SplitType.OVERFLOW_LEFTOVER, curSplitSize, getHosts(racks).size(),
                validBlocks.size());
    }
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testEntrySet() {
    K[] keys = getSortedKeys();//  w  w w.  j a  va 2  s  .c om
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);

    Set<Map.Entry<K, V>> entries = map.entrySet();
    Iterator<Map.Entry<K, V>> entrySetIterator = entries.iterator();
    assertEquals(3, entries.size());
    assertEquals(keys[0] + "=" + values[0], entrySetIterator.next().toString());
    while (entrySetIterator.hasNext()) {
        Map.Entry<K, V> entry = entrySetIterator.next();
        assertTrue(map.get(entry.getKey()) == entry.getValue());
    }

    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    map.clear();
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    map.put(keys[0], values[0]);
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());
    entries.clear();
    assertEquals(map.size(), entries.size());
    _assertEquals(entries, map.entrySet());

    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    Iterator<Entry<K, V>> it = entries.iterator();
    while (it.hasNext()) {
        Map.Entry<K, V> entry = it.next();
        map.containsKey(entry.getKey());
        map.containsValue(entry.getValue());
        it.remove();
    }
    try {
        it.next();
        fail("should throw NoSuchElementException");
    } catch (NoSuchElementException expected) {
    }
    _assertEmpty(map);
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testSubMap_entrySet() {
    K[] keys = getSortedKeys();/*from   ww  w  .  ja va 2  s . c  om*/
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    map.put(keys[3], values[3]);

    SortedMap<K, V> subMap = map.subMap(keys[1], keys[3]);
    Set<Entry<K, V>> entries = subMap.entrySet();
    assertEquals(2, subMap.size());
    assertEquals(subMap.size(), entries.size());
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[0], values[0])));
    assertTrue(entries.contains(new SimpleEntry<K, V>(keys[1], values[1])));
    assertTrue(entries.contains(new SimpleEntry<K, V>(keys[2], values[2])));
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[3], values[3])));

    entries.remove(new SimpleEntry<K, V>(keys[1], values[1]));
    assertEquals(3, map.size());
    assertEquals(subMap.size(), entries.size());
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[1], values[1])));
    assertFalse(subMap.containsKey(keys[1]));
    assertFalse(subMap.containsValue(values[1]));

    entries.clear();
    assertEquals(2, map.size());
    assertEquals(subMap.size(), entries.size());
    assertTrue(entries.isEmpty());
    assertTrue(subMap.isEmpty());

    subMap.put(keys[2], values[2]);
    assertEquals(1, subMap.size());
    assertEquals(subMap.size(), entries.size());

    subMap.put(keys[1], values[1]);
    Iterator<Entry<K, V>> it = entries.iterator();
    while (it.hasNext()) {
        Map.Entry<K, V> entry = it.next();
        subMap.containsKey(entry.getKey());
        subMap.containsValue(entry.getValue());
        it.remove();
    }
    try {
        it.next();
        fail("should throw NoSuchElementException");
    } catch (NoSuchElementException expected) {
    }
    assertEquals(2, map.size());
    assertEquals(0, subMap.size());
    assertEquals(subMap.size(), entries.size());

    map = createNavigableMap();
    Set<Entry<K, V>> entrySet = map.entrySet();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    map.clear();
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    map.put(keys[0], values[0]);
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    entrySet.clear();
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
}

From source file:edu.stanford.mobisocial.dungbeetle.MessagingManagerThread.java

@Override
public void run() {
    ProfileScanningObjHandler profileScanningObjHandler = new ProfileScanningObjHandler();
    Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND);
    Set<Long> notSendingObjects = new HashSet<Long>();
    if (DBG)//from www . ja  v a  2  s.co m
        Log.i(TAG, "Running...");
    mMessenger.init();
    long max_sent = -1;
    while (!interrupted()) {
        mOco.waitForChange();
        mOco.clearChanged();
        Cursor objs = mHelper.queryUnsentObjects(max_sent);
        try {
            Log.i(TAG, "Sending " + objs.getCount() + " objects...");
            if (objs.moveToFirst())
                do {
                    Long objId = objs.getLong(objs.getColumnIndexOrThrow(DbObject._ID));
                    String jsonSrc = objs.getString(objs.getColumnIndexOrThrow(DbObject.JSON));

                    max_sent = objId.longValue();
                    JSONObject json = null;
                    if (jsonSrc != null) {
                        try {
                            json = new JSONObject(jsonSrc);
                        } catch (JSONException e) {
                            Log.e(TAG, "bad json", e);
                        }
                    } else {
                        json = new JSONObject();
                    }

                    if (json != null) {
                        /*
                         * if you update latest feed here then there is a
                         * race condition between when you put a message
                         * into your db, when you actually have a connection
                         * to send the message (which is here) when other
                         * people send you messages the processing gets all
                         * out of order, so instead we update latest
                         * immediately when you add messages into your db
                         * inside DBHelper.java addToFeed();
                         */
                        // mFeedModifiedObjHandler.handleObj(mContext,
                        // feedUri, objId);

                        // TODO: Don't be fooled! This is not truly an
                        // EncodedObj
                        // and does not yet have a hash.
                        DbObj signedObj = App.instance().getMusubi().objForId(objId);
                        if (signedObj == null) {
                            Log.e(TAG, "Error, object " + objId + " not found in database");
                            notSendingObjects.add(objId);
                            continue;
                        }
                        DbEntryHandler h = DbObjects.getObjHandler(json);
                        h.afterDbInsertion(mContext, signedObj);

                        // TODO: Constraint error thrown for now b/c local
                        // user not in contacts
                        profileScanningObjHandler.handleObj(mContext, h, signedObj);
                    }

                    OutgoingMessage m = new OutgoingMsg(objs);
                    if (m.contents().getRecipients().isEmpty()) {
                        Log.w(TAG, "No addressees for direct message " + objId);
                        notSendingObjects.add(objId);
                    } else {
                        mMessenger.sendMessage(m);
                    }
                } while (objs.moveToNext());
            if (notSendingObjects.size() > 0) {
                if (DBG)
                    Log.d(TAG, "Marking " + notSendingObjects.size() + " objects sent");
                mHelper.markObjectsAsSent(notSendingObjects);
                notSendingObjects.clear();
            }
        } catch (Exception e) {
            if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.FROYO) {
                Log.wtf(TAG, "error running notify loop", e);
            } else {
                Log.e(TAG, "error running notify loop", e);
            }
        } finally {
            objs.close();
        }
    }
    mHelper.close();
}

From source file:fr.gouv.vitam.cases.DbRequest.java

/**
 * Compute final Result from list of result (per step)
 * //from  w  w w .j  av  a 2  s .c  om
 * @param useCache 
 * @param results
 * @return the final result
 * @throws IllegalAccessException
 * @throws InstantiationException
 */
public ResultInterface finalizeResults(boolean useCache, final List<ResultInterface> results)
        throws InstantiationException, IllegalAccessException {
    // Algorithm
    // Paths = 0
    // current = last(results).current
    // if (current is "full path" pathes) => build result from current + return result (end)
    // Paths = current
    // For each result from end to start
    // if (result.current is "full path" pathes or result.maxLevel == 1) => futureStop = true
    // current = Paths
    // Paths = 0
    // For each node in current
    // Parents = 0;
    // Foreach p in result.current
    // if (first(node).AllParents intersect last(p) not 0) => Parents.add(p)
    // Foreach p in Parents => Paths.add(p # node) eventually using subpath if not immediate
    // if (futureStop) => break loop on result
    // build result from Paths + return result (end)
    if (results.isEmpty()) {
        LOGGER.error("No List of results");
        return null;
    }
    useCache |= defaultUseCache;
    final Set<String> paths = new HashSet<String>();
    final Set<String> current = new HashSet<String>();
    ResultInterface finalresult = results.get(results.size() - 1);
    ResultInterface result = finalresult;
    if (!result.isLoaded()) {
        ResultInterface result2 = mdAccess.reload(result.getId());
        if (result2 == null) {
            LOGGER.error("Cannot load final result! =" + result.getId());
            return null;
        }
        result = result2;
        finalresult = result;
        if (GlobalDatas.PRINT_REQUEST) {
            LOGGER.warn("Finalize: " + result);
        }
    }
    if (result.getCurrentDaip().isEmpty()) {
        LOGGER.error("No DAip in last element: " + (results.size() - 1));
        return null;
    }
    if (UUID.isMultipleUUID(result.getCurrentDaip().iterator().next())) {
        Set<String> pathfinal = checkStartup(results.get(0), result.getCurrentDaip());
        if (pathfinal.size() < result.getCurrentDaip().size()) {
            finalresult.setCurrentDaip(pathfinal);
            finalresult.updateMinMax();
            if (simulate) {
                finalresult.setLoaded(true);
                finalresult.putBeforeSave();
                LOGGER.info("FinalizeResult: {}", finalresult);
            } else if (useCache) {
                finalresult.save(mdAccess);
            }
        } else if (useCache) {
            finalresult.updateTtl(mdAccess);
        }
        return finalresult;
    }
    paths.addAll(result.getCurrentDaip());
    int lastlevel = result.getMinLevel();
    for (int rank = results.size() - 2; rank >= 1; rank--) {
        result = results.get(rank);
        if (!result.isLoaded()) {
            ResultInterface result2 = mdAccess.reload(result.getId());
            if (result2 == null) {
                LOGGER.error("Cannot load final result! =" + result.getId());
                return null;
            }
            result = result2;
        }
        if (useCache) {
            result.updateTtl(mdAccess);
        }
        if (result.getMinLevel() > lastlevel) {
            if (GlobalDatas.PRINT_REQUEST) {
                LOGGER.warn("Ignore step: at rank: " + rank + " = " + result.getCurrentDaip() + " since "
                        + result.getMinLevel() + " > " + lastlevel);
            }
            continue;
        }
        lastlevel = result.getMinLevel();
        if (GlobalDatas.PRINT_REQUEST) {
            LOGGER.warn(
                    "Finalize step: from " + paths + "\n\tat rank: " + rank + " = " + result.getCurrentDaip());
        }
        if (result.getCurrentDaip().isEmpty()) {
            LOGGER.error("No DAip in rank: " + rank);
            return null;
        }
        boolean futureStop = (UUID.isMultipleUUID(result.getCurrentDaip().iterator().next()));
        futureStop |= result.getMaxLevel() == 1;
        current.addAll(paths);
        paths.clear();
        if (simulate) {
            for (final String node : current) {
                for (final String p : result.getCurrentDaip()) {
                    paths.add(p + node);
                }
            }
        } else {
            for (final String node : current) {
                checkParents(node, result, paths);
            }
        }
        current.clear();
        if (futureStop) {
            // Stop recursivity since path is a full path
            break;
        }
    }
    if (GlobalDatas.PRINT_REQUEST) {
        LOGGER.warn("Finalize last step: " + paths);
    }
    if (paths.isEmpty()) {
        LOGGER.error("No Final PATH");
        return null;
    }
    // Last check is with startup nodes (should we ?)
    result = results.get(0);
    Set<String> lastResult = checkStartup(result, paths);
    paths.clear();
    if (lastResult.isEmpty()) {
        LOGGER.error("No DAip in LastResult");
        return null;
    }
    finalresult.setCurrentDaip(lastResult);
    finalresult.updateMinMax();
    if (simulate) {
        finalresult.setLoaded(true);
        finalresult.putBeforeSave();
        LOGGER.info("FinalizeResult: {}", finalresult);
    } else if (useCache && finalresult.getId() != null) {
        finalresult.save(mdAccess);
    }
    if (GlobalDatas.PRINT_REQUEST) {
        LOGGER.warn("FINALRESULT: " + finalresult);
    }
    return finalresult;
}

From source file:fr.gouv.vitam.mdbes.DbRequest.java

/**
 * Compute final Result from list of result (per step)
 * //w ww .j a  va  2  s  .  co  m
 * @param useCache 
 * @param results
 * @return the final result
 * @throws IllegalAccessException
 * @throws InstantiationException
 */
public ResultInterface finalizeResults(boolean useCache, final List<ResultInterface> results)
        throws InstantiationException, IllegalAccessException {
    // Algorithm
    // Paths = 0
    // current = last(results).current
    // if (current is "full path" pathes) => build result from current + return result (end)
    // Paths = current
    // For each result from end to start
    // if (result.current is "full path" pathes or result.maxLevel == 1) => futureStop = true
    // current = Paths
    // Paths = 0
    // For each node in current
    // Parents = 0;
    // Foreach p in result.current
    // if (first(node).AllParents intersect last(p) not 0) => Parents.add(p)
    // Foreach p in Parents => Paths.add(p # node) eventually using subpath if not immediate
    // if (futureStop) => break loop on result
    // build result from Paths + return result (end)
    if (results.isEmpty()) {
        LOGGER.error("No List of results");
        return null;
    }
    boolean specUseCache = useCache || defaultUseCache;
    final Set<String> paths = new HashSet<String>();
    final Set<String> current = new HashSet<String>();
    ResultInterface finalresult = results.get(results.size() - 1);
    ResultInterface result = finalresult;
    if (!result.isLoaded()) {
        ResultInterface result2 = mdAccess.reload(result.getId());
        if (result2 == null) {
            LOGGER.error("Cannot load final result! =" + result.getId());
            return null;
        }
        result = result2;
        finalresult = result;
        if (GlobalDatas.PRINT_REQUEST) {
            LOGGER.warn("Finalize: " + result);
        }
    }
    if (result.getCurrentDaip().isEmpty()) {
        LOGGER.error("No DAip in last element: " + (results.size() - 1));
        return null;
    }
    if (UUID.isMultipleUUID(result.getCurrentDaip().iterator().next())) {
        Set<String> pathfinal = checkStartup(results.get(0), result.getCurrentDaip());
        if (pathfinal.size() < result.getCurrentDaip().size()) {
            finalresult.setCurrentDaip(pathfinal);
            finalresult.updateMinMax();
            if (simulate) {
                finalresult.setLoaded(true);
                finalresult.putBeforeSave();
                LOGGER.info("FinalizeResult: {}", finalresult);
            } else if (specUseCache) {
                finalresult.save(mdAccess);
            }
        } else if (specUseCache) {
            finalresult.updateTtl(mdAccess);
        }
        return finalresult;
    }
    paths.addAll(result.getCurrentDaip());
    int lastlevel = result.getMinLevel();
    for (int rank = results.size() - 2; rank >= 1; rank--) {
        result = results.get(rank);
        if (!result.isLoaded()) {
            ResultInterface result2 = mdAccess.reload(result.getId());
            if (result2 == null) {
                LOGGER.error("Cannot load final result! =" + result.getId());
                return null;
            }
            result = result2;
        }
        if (specUseCache) {
            result.updateTtl(mdAccess);
        }
        if (result.getMinLevel() > lastlevel) {
            if (GlobalDatas.PRINT_REQUEST) {
                LOGGER.warn("Ignore step: at rank: " + rank + " = " + result.getCurrentDaip() + " since "
                        + result.getMinLevel() + " > " + lastlevel);
            }
            continue;
        }
        lastlevel = result.getMinLevel();
        if (GlobalDatas.PRINT_REQUEST) {
            LOGGER.warn(
                    "Finalize step: from " + paths + "\n\tat rank: " + rank + " = " + result.getCurrentDaip());
        }
        if (result.getCurrentDaip().isEmpty()) {
            LOGGER.error("No DAip in rank: " + rank);
            return null;
        }
        boolean futureStop = (UUID.isMultipleUUID(result.getCurrentDaip().iterator().next()));
        futureStop |= result.getMaxLevel() == 1;
        current.addAll(paths);
        paths.clear();
        if (simulate) {
            for (final String node : current) {
                for (final String p : result.getCurrentDaip()) {
                    paths.add(p + node);
                }
            }
        } else {
            for (final String node : current) {
                checkParents(node, result, paths);
            }
        }
        current.clear();
        if (futureStop) {
            // Stop recursivity since path is a full path
            break;
        }
    }
    if (GlobalDatas.PRINT_REQUEST) {
        LOGGER.warn("Finalize last step: " + paths);
    }
    if (paths.isEmpty()) {
        LOGGER.error("No Final PATH");
        return null;
    }
    // Last check is with startup nodes (should we ?)
    result = results.get(0);
    Set<String> lastResult = checkStartup(result, paths);
    paths.clear();
    if (lastResult.isEmpty()) {
        LOGGER.error("No DAip in LastResult");
        return null;
    }
    finalresult.setCurrentDaip(lastResult);
    finalresult.updateMinMax();
    if (simulate) {
        finalresult.setLoaded(true);
        finalresult.putBeforeSave();
        LOGGER.info("FinalizeResult: {}", finalresult);
    } else if (specUseCache && finalresult.getId() != null) {
        finalresult.save(mdAccess);
    }
    if (GlobalDatas.PRINT_REQUEST) {
        LOGGER.warn("FINALRESULT: " + finalresult);
    }
    return finalresult;
}

From source file:com.github.heuermh.personalgenome.client.converter.JacksonPersonalGenomeConverter.java

@Override
public List<Relative> parseRelatives(final InputStream inputStream) {
    checkNotNull(inputStream);//from ww  w  .  j a  v  a 2 s  . c  o  m
    JsonParser parser = null;
    try {
        parser = jsonFactory.createParser(inputStream);
        parser.nextToken();

        List<Relative> relatives = new ArrayList<Relative>();

        String profileId = null;
        String matchId = null;
        double similarity = 0.0d;
        int sharedSegments = 0;
        Relationship relationship = null;
        Relationship userRelationship = null;
        Set<Relationship> range = new HashSet<Relationship>();

        while (parser.nextToken() != JsonToken.END_OBJECT) {
            String field = parser.getCurrentName();
            parser.nextToken();

            if ("id".equals(field)) {
                profileId = parser.getText();
            } else if ("relatives".equals(field)) {
                while (parser.nextToken() != JsonToken.END_ARRAY) {
                    while (parser.nextToken() != JsonToken.END_OBJECT) {
                        String relativeField = parser.getCurrentName();
                        parser.nextToken();

                        if ("match_id".equals(relativeField)) {
                            matchId = parser.getText();
                        } else if ("similarity".equals(relativeField)) {
                            similarity = Double.parseDouble(parser.getText());
                        } else if ("shared_segments".equals(relativeField)) {
                            sharedSegments = parser.getIntValue();
                        } else if ("relationship".equals(relativeField)) {
                            relationship = Relationship.fromDescription(parser.getText());
                        } else if ("user_relationship_code".equals(relativeField)) {
                            String code = parser.getText();
                            userRelationship = code == "null" ? null
                                    : Relationship.fromCode(Integer.parseInt(code));
                        } else if ("predicted_relationship_code".equals(relativeField)) {
                            if (relationship == null) {
                                String code = parser.getText();
                                relationship = code == "null" ? null
                                        : Relationship.fromCode(Integer.parseInt(code));
                            }
                        } else if ("range".equals(relativeField)) {
                            while (parser.nextToken() != JsonToken.END_ARRAY) {
                                range.add(Relationship.fromDescription(parser.getText()));
                            }
                        }
                        // ignored nested fields
                        else if ("family_locations".equals(relativeField)) {
                            while (parser.nextToken() != JsonToken.END_ARRAY) {
                                // ignore
                            }
                        } else if ("family_surnames".equals(relativeField)) {
                            while (parser.nextToken() != JsonToken.END_ARRAY) {
                                // ignore
                            }
                        } else if ("profile_picture_urls".equals(relativeField)) {
                            while (parser.nextToken() != JsonToken.END_OBJECT) {
                                // ignore
                            }
                        }
                    }
                }
                relatives.add(new Relative(profileId, matchId, similarity, sharedSegments, relationship,
                        userRelationship, range));
                matchId = null;
                similarity = 0.0d;
                sharedSegments = 0;
                relationship = null;
                userRelationship = null;
                range.clear();
            }
        }
        return relatives;
    } catch (IOException e) {
        logger.warn("could not parse relatives", e);
    } finally {
        try {
            inputStream.close();
        } catch (Exception e) {
            // ignored
        }
        try {
            parser.close();
        } catch (Exception e) {
            // ignored
        }
    }
    return null;
}

From source file:com.globalsight.ling.tm3.core.TuStorage.java

/**
 * For create job to leverage TM, it does not care previous/next hash.
 *//*from w w  w .  ja  va  2 s.  c om*/
public List<TM3Tuv<T>> getExactMatches(Connection conn, T key, TM3Locale sourceLocale,
        Set<? extends TM3Locale> targetLocales, Map<TM3Attribute, Object> inlineAttributes,
        Map<TM3Attribute, String> customAttributes, boolean lookupTarget, boolean locking, List<Long> tm3TmIds)
        throws SQLException {
    // avoid an awkward case in getExactMatchStatement
    if (targetLocales != null && targetLocales.isEmpty()) {
        return Collections.emptyList();
    }

    //#1. Find all possible candidates, no max number limitation...
    StatementBuilder sb = getExactMatchStatement(key, sourceLocale, targetLocales, inlineAttributes,
            lookupTarget, tm3TmIds);
    if (customAttributes.size() > 0) {
        sb = getAttributeMatchWrapper(sb, customAttributes);
    }
    PreparedStatement ps = sb.toPreparedStatement(conn);
    ResultSet rs = SQLUtil.execQuery(ps);
    Set<Long> tuvIds = new HashSet<Long>();
    List<Long> tuIds = new ArrayList<Long>();
    HashMap<Long, Long> tuId2srcTuvId = new HashMap<Long, Long>();
    while (rs.next()) {
        tuvIds.add(rs.getLong(1));
        tuIds.add(rs.getLong(2));
        tuId2srcTuvId.put(rs.getLong(2), rs.getLong(1));
    }
    ps.close();

    //#2. If there are too many, need reduce them for performance...
    boolean isCandidateFiltered = false;
    int max = 200;
    if (targetLocales.size() * 10 > max) {
        max = targetLocales.size() * 10;
    }
    String sid = getSidFromInlineAttributes(inlineAttributes);
    if (tuvIds.size() > max) {
        isCandidateFiltered = true;
        if (logger.isDebugEnabled()) {
            logger.info("Candidated exact matches tuvIds number: " + tuvIds.size());
        }
        //#2.1 previous/next hash matched candidates must be returned ...
        Set<Long> hashMatchedTuvIds = new HashSet<Long>();
        List<Long> hashMatchedTuIds = new ArrayList<Long>();
        BaseTmTuv srcTuv = ((GSTuvData) key).getSrcTuv();
        long preHash = srcTuv.getPreviousHash();
        long nextHash = srcTuv.getNextHash();
        if (preHash != -1 && nextHash != -1) {
            sb = new StatementBuilder("SELECT tuv.id, tuv.tuId FROM ");
            sb.append(getStorage().getTuvTableName()).append(" AS tuv, ");
            sb.append(getStorage().getTuvExtTableName()).append(" AS ext");
            sb.append(" WHERE tuv.id = ext.tuvId");
            sb.append(" AND tuv.tuId = ext.tuId");
            sb.append(" AND ext.previousHash = ?").addValue(preHash);
            sb.append(" AND ext.nextHash = ?").addValue(nextHash);
            if (sid != null && sid.trim().length() > 0) {
                sb.append(" AND ext.sid = ?").addValue(sid);
            }
            sb.append(" AND tuv.tmId IN").append(SQLUtil.longGroup(tm3TmIds));
            sb.append(" AND tuv.id IN").append(SQLUtil.longGroup(new ArrayList<Long>(tuvIds)));

            ps = sb.toPreparedStatement(conn);
            rs = SQLUtil.execQuery(ps);
            while (rs.next()) {
                hashMatchedTuvIds.add(rs.getLong(1));
                hashMatchedTuIds.add(rs.getLong(2));
            }
            ps.close();
        }

        //#2.2 Ensure returning at most 10 for every locale...
        List<Long> targetLocaleIds = new ArrayList<Long>();
        if (targetLocales != null) {
            for (TM3Locale locale : targetLocales) {
                targetLocaleIds.add(locale.getId());
            }
        }
        sb = new StatementBuilder("SELECT tuv.tuId, tuv.localeId, COUNT(id) FROM ")
                .append(getStorage().getTuvTableName()).append(" AS tuv, ")
                .append(getStorage().getTuvExtTableName()).append(" AS ext");
        sb.append(" WHERE tuv.id = ext.tuvId").append(" AND tuv.tmId IN").append(SQLUtil.longGroup(tm3TmIds))
                .append(" AND tuv.tuId IN").append(SQLUtil.longGroup(tuIds));
        if (targetLocaleIds.size() > 0) {
            sb.append(" AND tuv.localeId IN").append(SQLUtil.longGroup(targetLocaleIds));
        }
        sb.append(" GROUP BY tuv.tuId, tuv.localeId");

        tuvIds.clear();
        tuIds.clear();
        tuvIds.addAll(hashMatchedTuvIds);
        tuIds.addAll(hashMatchedTuIds);

        long tuId = -1;
        long localeId = -1;
        HashMap<Long, Long> locale2tuvNum = new HashMap<Long, Long>();
        ps = sb.toPreparedStatement(conn);
        rs = SQLUtil.execQuery(ps);
        while (rs.next()) {
            tuId = rs.getLong(1);
            localeId = rs.getLong(2);
            Long tuvNum = locale2tuvNum.get(localeId);
            if (tuvNum == null) {
                tuvNum = new Long(0);
            }
            if (tuvNum < 10) {
                tuIds.add(tuId);
                tuvIds.add(tuId2srcTuvId.get(tuId));// add source tuvId!!!
                locale2tuvNum.put(localeId, tuvNum + rs.getLong(3));
            }
        }
        ps.close();
    }

    // If SID is not empty/null, need filter the candidate TUs here.
    // To avoid slow query, we filter TUs via 2 queries.
    if (!isCandidateFiltered && tuvIds.size() > 0 && sid != null && sid.length() > 0) {
        sb = new StatementBuilder("SELECT tuvId, tuId FROM ").append(getStorage().getTuvExtTableName())
                .append(" WHERE tmId IN").append(SQLUtil.longGroup(tm3TmIds)).append(" AND sid = ?")
                .addValue(sid).append(" AND tuvId IN ").append(SQLUtil.longGroup(new ArrayList<Long>(tuvIds)));
        ps = sb.toPreparedStatement(conn);
        rs = SQLUtil.execQuery(ps);
        tuvIds.clear();
        tuIds.clear();
        while (rs.next()) {
            tuvIds.add(rs.getLong(1));
            tuIds.add(rs.getLong(2));
        }
        ps.close();
    }

    // XXX Could save a query by having the lookup fetch the TU row, or even
    // the TU row + TUV source row (and lazily load rest...)
    List<TM3Tu<T>> tus = getTu(conn, tuIds, locking);
    List<TM3Tuv<T>> tuvs = new ArrayList<TM3Tuv<T>>();
    for (TM3Tu<T> tu : tus) {
        for (TM3Tuv<T> tuv : tu.getAllTuv()) {
            // XXX For safety we should probably be comparing keys here.
            // However, just testing string equality on the serialized
            // form can sometimes produce false negatives (for instance,
            // this is true in GS due to optional attributes on
            // the <segment> markup. TM3TuvData needs an extra method
            // to make this comparison. Since this only matters if
            // there is a hash collision, I'm not going to worry about
            // it for now.
            if (tuvIds.contains(tuv.getId())) {
                tuvs.add(tuv);
            }
        }
    }

    return tuvs;
}

From source file:ar.com.daidalos.afiledialog.StorageUtils.java

public static List<StorageUtils.StorageInfo> getStorageList() {
    Set<Pair<String, String>> mountDevs = new HashSet<>();
    List<StorageInfo> storageInfos = new ArrayList<>();

    String primaryMountPoint = Environment.getExternalStorageDirectory().getPath();

    BufferedReader bufReader = null;
    String regDevices = ".*(/dev/block/vold|/dev/fuse).*";
    String regBadMountPoints = ".*(secure|asec|obb).*";
    String regFileSystems = ".*(vfat|ntfs|exfat|fat32|ext3|ext4|fuse).*";

    try {// ww  w. j a va 2s . co m
        bufReader = new BufferedReader(new FileReader("/proc/self/mountinfo"));
        Log.d(TAG, "/proc/self/mountinfo");

        String line;
        while ((line = bufReader.readLine()) != null) {
            Log.d(TAG, line);

            List<String> columns = Arrays.asList(line.split(" "));

            // see https://www.kernel.org/doc/Documentation/filesystems/proc.txt
            // the part  "3.5  /proc/<pid>/mountinfo - Information about mounts"

            // mount ID:  unique identifier of the mount (may be reused after umount)
            //columns.get(0);

            // parent ID:  ID of parent (or of self for the top of the mount tree)
            //columns.get(1);

            // major:minor:  value of st_dev for files on filesystem
            String majorMinor = columns.get(2);

            // root:  root of the mount within the filesystem
            String rootOfMount = columns.get(3);

            // mount point:  mount point relative to the process's root
            String mountPoint = columns.get(4);

            // mount options:  per mount options
            String mountOptions = columns.get(5);

            // optional fields:  zero or more fields of the form "tag[:value]"
            int i = 6;
            for (; i < columns.size(); ++i) {
                // separator:  marks the end of the optional fields
                if (columns.get(i).equals("-")) {
                    break;
                }
            }

            // filesystem type:  name of filesystem of the form "type[.subtype]"
            String filesystemType = null;
            if (i + 1 < columns.size()) {
                filesystemType = columns.get(i + 1);
            }

            // mount source:  filesystem specific information or "none"
            String mountSource = null;
            if (i + 2 < columns.size()) {
                mountSource = columns.get(i + 2);
                if (mountSource.equals("none")) {
                    mountSource = null;
                }
            }

            // super options:  per super block options
            //if (i + 3 < columns.size()) {
            //    columns.get(i + 3);
            //}

            // mount point
            if (mountPoint.matches(regBadMountPoints)) {
                continue;
            }

            // device
            if (null == mountSource || !mountSource.matches(regDevices)) {
                continue;
            }

            // file system
            if (null == filesystemType || !filesystemType.matches(regFileSystems)) {
                continue;
            }

            // mount flags
            List<String> flags = Arrays.asList(mountOptions.split(","));

            boolean writable = flags.contains("rw");
            boolean readonly = flags.contains("ro");

            if (!writable && !readonly) {
                continue;
            }

            File mountDir = new File(mountPoint);

            if (!mountDir.exists() || !mountDir.isDirectory() || !mountDir.canWrite()) {
                continue;
            }

            Pair<String, String> device = Pair.create(majorMinor, rootOfMount);
            String mountDirName = mountDir.getName();
            boolean primary = mountPoint.equals(primaryMountPoint);

            if (primary && mountDevs.contains(device)) {

                for (Iterator<StorageInfo> iterator = storageInfos.iterator(); iterator.hasNext();) {

                    StorageInfo info = iterator.next();

                    if (info.device.equals(device)) {
                        iterator.remove();
                        storageInfos.add(new StorageInfo(device, mountPoint, mountDirName, true, readonly));
                        break;
                    }
                }
            }

            if (mountDevs.contains(device)) {
                continue;
            }

            mountDevs.add(Pair.create(majorMinor, rootOfMount));
            storageInfos.add(new StorageInfo(device, mountPoint, mountDirName, primary, readonly));
        }

    } catch (IOException ex) {
        Log.d(TAG, ex.getLocalizedMessage());

    } finally {
        if (bufReader != null) {
            try {
                bufReader.close();
            } catch (IOException ignored) {
            }
        }
    }

    mountDevs.clear();
    mountDevs = null;

    Collections.sort(storageInfos, new Comparator<StorageInfo>() {
        public int compare(StorageInfo info1, StorageInfo info2) {
            if (info1 == null && info2 == null) {
                return 0;
            }
            if (info1 == null) {
                return -1;
            }
            if (info2 == null) {
                return 1;
            }
            if (info1.mountDirName == null && info2.mountDirName == null) {
                return 0;
            }
            if (info1.mountDirName == null) {
                return -1;
            }
            if (info2.mountDirName == null) {
                return 1;
            }
            return info1.mountDirName.compareTo(info2.mountDirName);
        }
    });

    return storageInfos;
}

From source file:com.streamsets.pipeline.stage.processor.fieldhasher.TestFieldHasherProcessor.java

@Test
public void testUnsupportedFieldTypes() throws StageException {
    //valid Fields
    final String STRING_FIELD = "stringField";
    final String INT_FIELD = "intField";
    final String BOOLEAN_FIELD = "booleanField";

    //Invalid Fields
    final String NULL_FIELD = "nullField";
    final String LIST_FIELD = "listField";
    final String MAP_FIELD = "mapField";
    final String LIST_MAP_FIELD = "listMapField";

    final String ROOT_PATH = "/";
    final String TARGET_FIELD = "targetField";

    Field stringField = Field.create("string1");
    Field intField = Field.create(1);
    Field booleanField = Field.create(true);
    Field nullField = Field.create(Field.Type.FLOAT, null);

    List<Field> list = new ArrayList<>();
    list.add(Field.create(1));//from   ww  w  .j  a v  a  2s  .  c  o  m
    list.add(Field.create(2));
    list.add(Field.create(3));
    Field listField = Field.create(list);

    Map<String, Field> map = new HashMap<>();
    map.put("k1", Field.create("v1"));
    map.put("k2", Field.create("v2"));
    map.put("k3", Field.create("v3"));

    Field mapField = Field.create(map);

    LinkedHashMap<String, Field> listMap = new LinkedHashMap<>();
    listMap.put("lk1", Field.create("v1"));
    listMap.put("lk2", Field.create("v2"));
    listMap.put("lk3", Field.create("v3"));
    Field listMapField = Field.createListMap(listMap);

    Map<String, Field> fieldMap = new LinkedHashMap<>();
    fieldMap.put(STRING_FIELD, stringField);
    fieldMap.put(INT_FIELD, intField);
    fieldMap.put(BOOLEAN_FIELD, booleanField);
    fieldMap.put(NULL_FIELD, nullField);
    fieldMap.put(MAP_FIELD, mapField);
    fieldMap.put(LIST_FIELD, listField);
    fieldMap.put(LIST_MAP_FIELD, listMapField);

    Record record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));

    final List<String> fieldsToHash = ImmutableList.of(ROOT_PATH + STRING_FIELD, ROOT_PATH + INT_FIELD,
            ROOT_PATH + BOOLEAN_FIELD, ROOT_PATH + NULL_FIELD, ROOT_PATH + LIST_FIELD, ROOT_PATH + MAP_FIELD,
            ROOT_PATH + LIST_MAP_FIELD);

    Set<String> expectedValidFields = new HashSet<String>();
    expectedValidFields.addAll(
            FieldRegexUtil.getMatchingFieldPaths(ROOT_PATH + STRING_FIELD, record.getEscapedFieldPaths()));
    expectedValidFields
            .addAll(FieldRegexUtil.getMatchingFieldPaths(ROOT_PATH + INT_FIELD, record.getEscapedFieldPaths()));
    expectedValidFields.addAll(
            FieldRegexUtil.getMatchingFieldPaths(ROOT_PATH + BOOLEAN_FIELD, record.getEscapedFieldPaths()));

    //Test HashInPlace
    HasherConfig hasherConfig = createInPlaceHasherProcessor(fieldsToHash, HashType.SHA2);
    hasherConfig.useSeparator = true;

    Map<String, Field> expectedVals = new HashMap<String, Field>();
    expectedVals.put(STRING_FIELD, Field.create(computeHashForRecordUsingFields(record,
            ImmutableList.of(ROOT_PATH + STRING_FIELD), HashType.SHA2, OLD_WAY)));
    expectedVals.put(INT_FIELD, Field.create(computeHashForRecordUsingFields(record,
            ImmutableList.of(ROOT_PATH + INT_FIELD), HashType.SHA2, OLD_WAY)));
    expectedVals.put(BOOLEAN_FIELD, Field.create(computeHashForRecordUsingFields(record,
            ImmutableList.of(ROOT_PATH + BOOLEAN_FIELD), HashType.SHA2, OLD_WAY)));
    expectedVals.put(NULL_FIELD, nullField);
    expectedVals.put(LIST_FIELD, listField);
    expectedVals.put(MAP_FIELD, mapField);
    expectedVals.put(LIST_MAP_FIELD, listMapField);

    checkFieldIssueToError(record, hasherConfig, expectedValidFields);

    record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));

    checkFieldIssueContinue(record, hasherConfig, expectedValidFields, expectedVals);

    //Test HashToTarget
    hasherConfig = createTargetFieldHasherProcessor(fieldsToHash, HashType.SHA2, ROOT_PATH + TARGET_FIELD, "");
    hasherConfig.useSeparator = true;

    record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));
    checkFieldIssueToError(record, hasherConfig, expectedValidFields);

    record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));

    expectedVals.clear();
    expectedVals.put(STRING_FIELD, stringField);
    expectedVals.put(INT_FIELD, intField);
    expectedVals.put(BOOLEAN_FIELD, booleanField);
    expectedVals.put(NULL_FIELD, nullField);
    expectedVals.put(LIST_FIELD, listField);
    expectedVals.put(MAP_FIELD, mapField);
    expectedVals.put(LIST_MAP_FIELD, listMapField);
    expectedVals.put(TARGET_FIELD,
            Field.create(computeHashForRecordUsingFields(record, fieldsToHash, HashType.SHA2, OLD_WAY)));

    checkFieldIssueContinue(record, hasherConfig, expectedValidFields, expectedVals);

    //Test RecordHasherConfig
    hasherConfig = createRecordHasherConfig(HashType.SHA2, false, ROOT_PATH + TARGET_FIELD, "");
    hasherConfig.useSeparator = true;

    record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));

    expectedValidFields.clear();
    expectedValidFields.addAll(record.getEscapedFieldPaths());
    expectedValidFields.remove("");
    expectedValidFields.remove(ROOT_PATH + LIST_FIELD);
    expectedValidFields.remove(ROOT_PATH + MAP_FIELD);
    expectedValidFields.remove(ROOT_PATH + LIST_MAP_FIELD);
    expectedValidFields.remove(ROOT_PATH + NULL_FIELD);

    //Check On Field Error, Even specifying error should not throw error for record
    // as we just skip unsupported data types and null fields

    FieldHasherProcessor processor = PowerMockito
            .spy(new FieldHasherProcessor(hasherConfig, OnStagePreConditionFailure.TO_ERROR));

    ProcessorRunner runner = new ProcessorRunner.Builder(FieldHasherDProcessor.class, processor)
            .addOutputLane("a").setOnRecordError(OnRecordError.TO_ERROR).build();
    runner.runInit();
    try {
        StageRunner.Output output = runner.runProcess(Arrays.asList(record));
        Assert.assertEquals(1, output.getRecords().get("a").size());
        Assert.assertEquals(0, runner.getErrorRecords().size());
    } finally {
        runner.runDestroy();
    }

    record = RecordCreator.create("s", "s:1");
    record.set(Field.create(fieldMap));

    expectedVals.clear();
    expectedVals.put(STRING_FIELD, stringField);
    expectedVals.put(INT_FIELD, intField);
    expectedVals.put(BOOLEAN_FIELD, booleanField);
    expectedVals.put(NULL_FIELD, nullField);
    expectedVals.put(LIST_FIELD, listField);
    expectedVals.put(MAP_FIELD, mapField);
    expectedVals.put(LIST_MAP_FIELD, listMapField);
    expectedVals.put(TARGET_FIELD, Field.create(
            computeHashForRecordUsingFields(record, record.getEscapedFieldPaths(), HashType.SHA2, OLD_WAY)));

    checkFieldIssueContinue(record, hasherConfig, expectedValidFields, expectedVals);
}