Example usage for java.util LinkedHashMap entrySet

List of usage examples for java.util LinkedHashMap entrySet

Introduction

In this page you can find the example usage for java.util LinkedHashMap entrySet.

Prototype

public Set<Map.Entry<K, V>> entrySet() 

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:com.ultramegasoft.flavordex2.util.EntryFormHelper.java

/**
 * Set up the extra fields in the form.// www  .  j  a  v a  2s  .c o m
 *
 * @param extras The list of extras
 */
public void setExtras(@NonNull LinkedHashMap<String, ExtraFieldHolder> extras) {
    final LayoutInflater inflater = LayoutInflater.from(mFragment.getContext());
    for (Map.Entry<String, ExtraFieldHolder> extra : extras.entrySet()) {
        mExtras.put(extra.getKey(), extra.getValue());
        if (!extra.getValue().preset && !extra.getValue().deleted) {
            final View root = inflater.inflate(R.layout.edit_info_extra, mInfoTable, false);
            final TextView label = root.findViewById(R.id.label);
            final EditText value = root.findViewById(R.id.value);
            label.setText(mFragment.getString(R.string.label_field, extra.getValue().name));
            initEditText(value, extra.getValue());
            mInfoTable.addView(root);

            getExtraViews().put(extra.getValue(), value);
        }
    }
}

From source file:eu.geopaparazzi.mapsforge.sourcesview.SourcesExpandableListAdapter.java

/**
 * @param activity         activity to use.
 * @param folder2TablesMap the folder and table map.
 *//*from  w ww .  ja  va2s .  c om*/
public SourcesExpandableListAdapter(Activity activity, LinkedHashMap<String, List<BaseMap>> folder2TablesMap) {
    this.activity = activity;

    selectedBaseMap = BaseMapSourcesManager.INSTANCE.getSelectedBaseMap();
    selectionColorColor = Compat.getColor(activity, R.color.main_selection);

    folderList = new ArrayList<>();
    tablesList = new ArrayList<>();
    for (Entry<String, List<BaseMap>> entry : folder2TablesMap.entrySet()) {
        folderList.add(entry.getKey());
        tablesList.add(entry.getValue());
    }

}

From source file:com.genentech.chemistry.openEye.apps.SDFStructureTagger.java

/**
 *
 * @return true if at least one match occured.
 *///  w w  w.j av  a 2  s  .co  m
private boolean addOccurrenceToOutput(OEMolBase mol) {
    LinkedHashMap<String, Integer> map = structureTagger.countOccurrence(mol);
    Iterator<Entry<String, Integer>> iterator = map.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, Integer> entry = iterator.next();
        oechem.OESetSDData(mol, entry.getKey(), String.valueOf(entry.getValue()));
    }
    return map.size() > 0;
}

From source file:com.genentech.chemistry.openEye.apps.SDFStructureTagger.java

/**
 *
 * @return true if at least one match occured.
 *//*from  w  w  w . j  a  v  a2s .c o  m*/
private boolean addExistsToOutput(OEMolBase mol) {
    LinkedHashMap<String, Boolean> map = structureTagger.checkOccurrence(mol);
    Iterator<Entry<String, Boolean>> iterator = map.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, Boolean> entry = iterator.next();
        oechem.OESetSDData(mol, entry.getKey(), entry.getValue().booleanValue() ? "1" : "0");
    }
    return map.size() > 0;
}

From source file:com.nextdoor.bender.ipc.s3.S3Transport.java

@Override
public void sendBatch(TransportBuffer buffer, LinkedHashMap<String, String> partitions, Context context)
        throws TransportException {
    S3TransportBuffer buf = (S3TransportBuffer) buffer;

    /*//  ww w  . j a v a 2s . c  o m
     * Create s3 key (filepath + filename)
     */
    LinkedHashMap<String, String> parts = new LinkedHashMap<String, String>(partitions);

    String filename = parts.remove(FILENAME_KEY);

    if (filename == null) {
        filename = context.getAwsRequestId();
    }

    String key = parts.entrySet().stream().map(s -> s.getKey() + "=" + s.getValue())
            .collect(Collectors.joining("/"));

    key = (key.equals("") ? filename : key + '/' + filename);

    if (this.basePath.endsWith("/")) {
        key = this.basePath + key;
    } else if (!this.basePath.equals("")) {
        key = this.basePath + '/' + key;
    }

    // TODO: make this dynamic
    if (key.endsWith(".gz")) {
        key = key.substring(0, key.length() - 3);
    }

    /*
     * Add or strip out compression format extension
     *
     * TODO: get this based on the compression codec
     */
    if (this.compress || buf.isCompressed()) {
        key += ".bz2";
    }

    ByteArrayOutputStream os = buf.getInternalBuffer();

    /*
     * Compress stream if needed. Don't compress a compressed stream.
     */
    ByteArrayOutputStream payload;
    if (this.compress && !buf.isCompressed()) {
        payload = compress(os);
    } else {
        payload = os;
    }

    /*
     * For memory efficiency convert the output stream into an InputStream. This is done using the
     * easystream library but under the hood it uses piped streams to facilitate this process. This
     * avoids copying the entire contents of the OutputStream to populate the InputStream. Note that
     * this process creates another thread to consume from the InputStream.
     */
    final String s3Key = key;

    /*
     * Write to OutputStream
     */
    final InputStreamFromOutputStream<String> isos = new InputStreamFromOutputStream<String>() {
        public String produce(final OutputStream dataSink) throws Exception {
            /*
             * Note this is executed in a different thread
             */
            payload.writeTo(dataSink);
            return null;
        }
    };

    /*
     * Consume InputStream
     */
    try {
        sendStream(isos, s3Key, payload.size());
    } finally {
        try {
            isos.close();
        } catch (IOException e) {
            throw new TransportException(e);
        } finally {
            buf.close();
        }
    }
}

From source file:AndroidUninstallStock.java

private static LinkedHashMap<String, String> _getListFromPattern(LinkedHashMap<String, String> apkorliblist,
        HashMap<String, String> pattern, AusInfo info, String status, boolean library) {
    LinkedHashMap<String, String> res = new LinkedHashMap<String, String>();
    if (library && !pattern.get("in").equalsIgnoreCase("library")) {
        return res;
    }/*from w w w  .j  av  a 2  s.c  o m*/
    int flags = getBoolean(pattern.get("case-insensitive")) ? Pattern.CASE_INSENSITIVE | Pattern.UNICODE_CASE
            : 0;
    try {
        Pattern pat = Pattern.compile(getBoolean(pattern.get("regexp")) ? pattern.get("pattern")
                : Pattern.quote(pattern.get("pattern")), flags);
        for (Map.Entry<String, String> apk : apkorliblist.entrySet()) {
            String need = "";
            switch (pattern.get("in")) {
            case "library": // TODO well as to specify the pattern package...
            case "path":
                need = apk.getKey();
                break;
            case "path+package":
                need = apk.getKey() + apk.getValue();
                break;
            case "apk":
                need = apk.getKey().substring(apk.getKey().lastIndexOf('/') + 1);
                break;
            case "package":
            default:
                need = apk.getValue();
                break;
            }
            if (pat.matcher(need).find()) {
                res.put(apk.getKey(), apk.getValue());
                System.out.println(status + need + " - " + pat.pattern());
            }
        }
    } catch (PatternSyntaxException e) {
        System.out.println("Warring in: " + info + " pattern: " + e);
    }
    return res;
}

From source file:org.splevo.jamopp.extraction.cache.ReferenceCache.java

/**
 * Resets the cache for the given resource and saves the cache afterwards to prevent old entries
 * from appearing after loading the resource again.
 * //  w ww  . j a  v  a2  s . c  o  m
 * @param resource
 *            The resource for which the cache shall be reset.
 */
public void reset(Resource resource) {
    if (!isCached(resource)) {
        return;
    }
    final String uriToRemovePrefix = resource.getURI().toString() + "#";
    cacheData.getResourceToTargetURIListMap().remove(resource.getURI().toString());
    for (LinkedHashMap<String, String> map : cacheData.getResourceToTargetURIListMap().values()) {
        List<String> toRemove = Lists.newArrayList();
        for (Entry<String, String> entry : map.entrySet()) {
            if (entry.getValue().startsWith(uriToRemovePrefix)) {
                toRemove.add(entry.getKey());
            }
        }
        map.keySet().removeAll(toRemove);
    }
    save();
}

From source file:com.tacitknowledge.util.migration.DistributedMigrationProcess.java

/**
 * Execute a dry run of the patch process and return a count of the number
 * of patches we would have executed./*from  w w  w. j  a v  a 2s  .c o  m*/
 *
 * @param currentPatchInfoStore   The current patch info store
 * @param migrationsWithLaunchers a map of migration task to launcher
 * @return count of the number of patches
 */
protected final int patchDryRun(final PatchInfoStore currentPatchInfoStore,
        final LinkedHashMap migrationsWithLaunchers) throws MigrationException {
    int taskCount = 0;

    for (Iterator i = migrationsWithLaunchers.entrySet().iterator(); i.hasNext();) {
        Entry entry = (Entry) i.next();
        MigrationTask task = (MigrationTask) entry.getKey();
        JdbcMigrationLauncher launcher = (JdbcMigrationLauncher) entry.getValue();

        if (getMigrationRunnerStrategy().shouldMigrationRun(task.getLevel(), currentPatchInfoStore)) {
            log.info("Will execute patch task '" + getTaskLabel(task) + "'");
            if (log.isDebugEnabled()) {
                // Get all the contexts the task will execute in
                for (Iterator j = launcher.getContexts().keySet().iterator(); j.hasNext();) {
                    MigrationContext launcherContext = (MigrationContext) j.next();
                    log.debug("Task will execute in context '" + launcherContext + "'");
                }
            }
            taskCount++;
        }

    }

    return taskCount;
}

From source file:org.easyrec.plugin.aggregator.impl.AggregatorServiceImpl.java

private void pruneProfile(LinkedHashMap<String, Object> profileField, Integer threshold) {
    for (Iterator<Entry<String, Object>> it = profileField.entrySet().iterator(); it.hasNext();) {
        Entry<String, Object> entry = it.next();
        Integer val = (Integer) entry.getValue();
        if (val < threshold) {
            it.remove(); // using iterator to avoid ConcurrentModificationExceptions!!
        }//  w w w  .ja v  a2s . c o m
    }
}

From source file:org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor.java

/**
 * Generate the MapRed Local Work for the given map-join operator
 *
 * @param newWork//from  w  w  w  .  j  a  v  a  2s . co  m
 * @param mapJoinOp
 *          map-join operator for which local work needs to be generated.
 * @param bigTablePos
 * @throws SemanticException
 */
private static void genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, int bigTablePos)
        throws SemanticException {
    // keep the small table alias to avoid concurrent modification exception
    ArrayList<String> smallTableAliasList = new ArrayList<String>();

    // create a new  MapredLocalWork
    MapredLocalWork newLocalWork = new MapredLocalWork(
            new LinkedHashMap<String, Operator<? extends OperatorDesc>>(),
            new LinkedHashMap<String, FetchWork>());

    for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : newWork.getMapWork().getAliasToWork()
            .entrySet()) {
        String alias = entry.getKey();
        Operator<? extends OperatorDesc> op = entry.getValue();

        // if the table scan is for big table; then skip it
        // tracing down the operator tree from the table scan operator
        Operator<? extends OperatorDesc> parentOp = op;
        Operator<? extends OperatorDesc> childOp = op.getChildOperators().get(0);
        while ((childOp != null) && (!childOp.equals(mapJoinOp))) {
            parentOp = childOp;
            assert parentOp.getChildOperators().size() == 1;
            childOp = parentOp.getChildOperators().get(0);
        }
        if (childOp == null) {
            throw new SemanticException("Cannot find join op by tracing down the table scan operator tree");
        }
        // skip the big table pos
        int i = childOp.getParentOperators().indexOf(parentOp);
        if (i == bigTablePos) {
            continue;
        }
        // set alias to work and put into smallTableAliasList
        newLocalWork.getAliasToWork().put(alias, op);
        smallTableAliasList.add(alias);
        // get input path and remove this alias from pathToAlias
        // because this file will be fetched by fetch operator
        LinkedHashMap<String, ArrayList<String>> pathToAliases = newWork.getMapWork().getPathToAliases();

        // keep record all the input path for this alias
        HashSet<String> pathSet = new HashSet<String>();
        HashSet<String> emptyPath = new HashSet<String>();
        for (Map.Entry<String, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
            String path = entry2.getKey();
            ArrayList<String> list = entry2.getValue();
            if (list.contains(alias)) {
                // add to path set
                pathSet.add(path);
                //remove this alias from the alias list
                list.remove(alias);
                if (list.size() == 0) {
                    emptyPath.add(path);
                }
            }
        }
        //remove the path, with which no alias associates
        for (String path : emptyPath) {
            pathToAliases.remove(path);
        }

        // create fetch work
        FetchWork fetchWork = null;
        List<Path> partDir = new ArrayList<Path>();
        List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

        for (String tablePath : pathSet) {
            PartitionDesc partitionDesc = newWork.getMapWork().getPathToPartitionInfo().get(tablePath);
            // create fetchwork for non partitioned table
            if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
                fetchWork = new FetchWork(new Path(tablePath), partitionDesc.getTableDesc());
                break;
            }
            // if table is partitioned,add partDir and partitionDesc
            partDir.add(new Path(tablePath));
            partDesc.add(partitionDesc);
        }
        // create fetchwork for partitioned table
        if (fetchWork == null) {
            TableDesc table = newWork.getMapWork().getAliasToPartnInfo().get(alias).getTableDesc();
            fetchWork = new FetchWork(partDir, partDesc, table);
        }
        // set alias to fetch work
        newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
    }
    // remove small table ailias from aliasToWork;Avoid concurrent modification
    for (String alias : smallTableAliasList) {
        newWork.getMapWork().getAliasToWork().remove(alias);
    }

    // set up local work
    newWork.getMapWork().setMapRedLocalWork(newLocalWork);
    // remove reducer
    newWork.setReduceWork(null);
}