Example usage for java.util LinkedList pop

List of usage examples for java.util LinkedList pop

Introduction

In this page you can find the example usage for java.util LinkedList pop.

Prototype

public E pop() 

Source Link

Document

Pops an element from the stack represented by this list.

Usage

From source file:com.adobe.acs.commons.mcp.impl.processes.asset.HierarchicalElement.java

@SuppressWarnings("squid:S00112")
default void visitAllFiles(CheckedConsumer<HierarchicalElement> visitor,
        CheckedFunction<HierarchicalElement, Stream<HierarchicalElement>> childFolderFunction,
        CheckedFunction<HierarchicalElement, Stream<HierarchicalElement>> childFileFunction) throws Exception {
    LinkedList<HierarchicalElement> nodes = new LinkedList<>();
    nodes.add(this);
    while (!nodes.isEmpty()) {
        HierarchicalElement node = nodes.pop();
        childFolderFunction.apply(node).forEach(nodes::add);
        for (HierarchicalElement child : childFileFunction.apply(node).collect(Collectors.toList())) {
            visitor.accept(child);//  w  ww . j a  v a 2  s.c  o  m
        }
    }
}

From source file:com.mongodb.hadoop.splitter.ShardChunkMongoSplitter.java

@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
    this.init();/* ww w . j a va 2s  .co m*/
    boolean targetShards = MongoConfigUtil.canReadSplitsFromShards(conf);
    DB configDB = this.mongo.getDB("config");
    DBCollection chunksCollection = configDB.getCollection("chunks");

    MongoClientURI inputURI = MongoConfigUtil.getInputURI(conf);
    String inputNS = inputURI.getDatabase() + "." + inputURI.getCollection();

    DBCursor cur = chunksCollection.find(new BasicDBObject("ns", inputNS));

    int numChunks = 0;

    Map<String, String> shardsMap = null;
    if (targetShards) {
        try {
            shardsMap = this.getShardsMap();
        } catch (Exception e) {
            //Something went wrong when trying to
            //read the shards data from the config server,
            //so abort the splitting
            throw new SplitFailedException("Couldn't get shards information from config server", e);
        }
    }

    List<String> mongosHostNames = MongoConfigUtil.getInputMongosHosts(this.conf);
    if (targetShards && mongosHostNames.size() > 0) {
        throw new SplitFailedException(
                "Setting both mongo.input.split.read_from_shards and mongo.input.mongos_hosts"
                        + " does not make sense. ");
    }

    if (mongosHostNames.size() > 0) {
        LOG.info("Using multiple mongos instances (round robin) for reading input.");
    }

    Map<String, LinkedList<InputSplit>> shardToSplits = new HashMap<String, LinkedList<InputSplit>>();

    while (cur.hasNext()) {
        final BasicDBObject row = (BasicDBObject) cur.next();
        BasicDBObject chunkLowerBound = (BasicDBObject) row.get("min");
        BasicDBObject chunkUpperBound = (BasicDBObject) row.get("max");
        MongoInputSplit chunkSplit = createSplitFromBounds(chunkLowerBound, chunkUpperBound);
        chunkSplit.setInputURI(inputURI);
        String shard = (String) row.get("shard");
        if (targetShards) {
            //The job is configured to target shards, so replace the
            //mongos hostname with the host of the shard's servers
            String shardHosts = shardsMap.get(shard);
            if (shardHosts == null) {
                throw new SplitFailedException("Couldn't find shard ID: " + shard + " in config.shards.");
            }

            MongoClientURI newURI = rewriteURI(inputURI, shardHosts);
            chunkSplit.setInputURI(newURI);
        } else if (mongosHostNames.size() > 0) {
            //Multiple mongos hosts are specified, so
            //choose a host name in round-robin fashion
            //and rewrite the URI using that hostname.
            //This evenly distributes the load to avoid
            //pegging a single mongos instance.
            String roundRobinHost = mongosHostNames.get(numChunks % mongosHostNames.size());
            MongoClientURI newURI = rewriteURI(inputURI, roundRobinHost);
            chunkSplit.setInputURI(newURI);
        }
        LinkedList<InputSplit> shardList = shardToSplits.get(shard);
        if (shardList == null) {
            shardList = new LinkedList<InputSplit>();
            shardToSplits.put(shard, shardList);
        }
        shardList.add(chunkSplit);
        numChunks++;
    }

    final List<InputSplit> splits = new ArrayList<InputSplit>(numChunks);
    int splitIndex = 0;
    while (splitIndex < numChunks) {
        Set<String> shardSplitsToRemove = new HashSet<String>();
        for (Map.Entry<String, LinkedList<InputSplit>> shardSplits : shardToSplits.entrySet()) {
            LinkedList<InputSplit> shardSplitsList = shardSplits.getValue();
            InputSplit split = shardSplitsList.pop();
            splits.add(splitIndex, split);
            splitIndex++;
            if (shardSplitsList.isEmpty()) {
                shardSplitsToRemove.add(shardSplits.getKey());
            }
        }
        for (String shardName : shardSplitsToRemove) {
            shardToSplits.remove(shardName);
        }
    }

    return splits;
}

From source file:com.zjy.mongo.splitter.ShardChunkMongoSplitter.java

@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
    boolean targetShards = MongoConfigUtil.canReadSplitsFromShards(getConfiguration());
    DB configDB = getConfigDB();//from  w  w w  . j  av a 2 s .c  om
    DBCollection chunksCollection = configDB.getCollection("chunks");

    MongoClientURI inputURI = MongoConfigUtil.getInputURI(getConfiguration());
    String inputNS = inputURI.getDatabase() + "." + inputURI.getCollection();

    DBCursor cur = chunksCollection.find(new BasicDBObject("ns", inputNS));

    int numChunks = 0;

    Map<String, String> shardsMap = null;
    if (targetShards) {
        try {
            shardsMap = getShardsMap();
        } catch (Exception e) {
            //Something went wrong when trying to
            //read the shards data from the config server,
            //so abort the splitting
            throw new SplitFailedException("Couldn't get shards information from config server", e);
        }
    }

    List<String> mongosHostNames = MongoConfigUtil.getInputMongosHosts(getConfiguration());
    if (targetShards && mongosHostNames.size() > 0) {
        throw new SplitFailedException(
                "Setting both mongo.input.split.read_from_shards and mongo.input.mongos_hosts"
                        + " does not make sense. ");
    }

    if (mongosHostNames.size() > 0) {
        LOG.info("Using multiple mongos instances (round robin) for reading input.");
    }

    Map<String, LinkedList<InputSplit>> shardToSplits = new HashMap<String, LinkedList<InputSplit>>();

    try {
        while (cur.hasNext()) {
            final BasicDBObject row = (BasicDBObject) cur.next();
            BasicDBObject chunkLowerBound = (BasicDBObject) row.get("min");
            BasicDBObject chunkUpperBound = (BasicDBObject) row.get("max");
            MongoInputSplit chunkSplit = createSplitFromBounds(chunkLowerBound, chunkUpperBound);
            chunkSplit.setInputURI(inputURI);
            String shard = (String) row.get("shard");
            if (targetShards) {
                //The job is configured to target shards, so replace the
                //mongos hostname with the host of the shard's servers
                String shardHosts = shardsMap.get(shard);
                if (shardHosts == null) {
                    throw new SplitFailedException("Couldn't find shard ID: " + shard + " in config.shards.");
                }

                MongoClientURI newURI = rewriteURI(inputURI, shardHosts);
                chunkSplit.setInputURI(newURI);
            } else if (mongosHostNames.size() > 0) {
                //Multiple mongos hosts are specified, so
                //choose a host name in round-robin fashion
                //and rewrite the URI using that hostname.
                //This evenly distributes the load to avoid
                //pegging a single mongos instance.
                String roundRobinHost = mongosHostNames.get(numChunks % mongosHostNames.size());
                MongoClientURI newURI = rewriteURI(inputURI, roundRobinHost);
                chunkSplit.setInputURI(newURI);
            }
            LinkedList<InputSplit> shardList = shardToSplits.get(shard);
            if (shardList == null) {
                shardList = new LinkedList<InputSplit>();
                shardToSplits.put(shard, shardList);
            }
            chunkSplit.setKeyField(MongoConfigUtil.getInputKey(getConfiguration()));
            shardList.add(chunkSplit);
            numChunks++;
        }
    } finally {
        MongoConfigUtil.close(configDB.getMongo());
    }

    final List<InputSplit> splits = new ArrayList<InputSplit>(numChunks);
    int splitIndex = 0;
    while (splitIndex < numChunks) {
        Set<String> shardSplitsToRemove = new HashSet<String>();
        for (Entry<String, LinkedList<InputSplit>> shardSplits : shardToSplits.entrySet()) {
            LinkedList<InputSplit> shardSplitsList = shardSplits.getValue();
            InputSplit split = shardSplitsList.pop();
            splits.add(splitIndex, split);
            splitIndex++;
            if (shardSplitsList.isEmpty()) {
                shardSplitsToRemove.add(shardSplits.getKey());
            }
        }
        for (String shardName : shardSplitsToRemove) {
            shardToSplits.remove(shardName);
        }
    }

    return splits;
}

From source file:com.heliosdecompiler.helios.tasks.AddFilesTask.java

private void handleDirectory(File file) throws IOException {
    LinkedList<File> filesToProcess = new LinkedList<>();
    filesToProcess.add(file);//from   www.j  a v a2 s  . c  om
    Set<String> filesProcessed = new HashSet<>();

    while (!filesToProcess.isEmpty()) {
        File current = filesToProcess.pop();
        if (current.isFile() && filesProcessed.add(current.getCanonicalPath())) {
            handle(current);
        } else {
            File[] listFiles = current.listFiles();
            if (listFiles != null) {
                filesToProcess.addAll(Arrays.asList(listFiles));
            }
        }
    }
}

From source file:com.zh.snmp.snmpcore.services.impl.DeviceServiceImpl.java

@Override
public Device setDeviceConfig(String nodeId, List<String> path, List<DinamicValue> dinamicValues, int mode) {
    Device device = findDeviceByDeviceId(nodeId);
    if (device == null) {
        return null;
    }/*from w w w  .j a v  a2  s  .c om*/
    LinkedList<String> pathl = new LinkedList<String>(path);
    DeviceNode dconfig = device.getConfigMap();
    if (!pathl.isEmpty()) {
        String rootc = pathl.pop();
        if (!dconfig.getCode().equals(rootc) || pathl.isEmpty()) {
            return null;
        }
    }
    DeviceNode node = dconfig.findChainChild(pathl);
    if (node != null) {
        node.setSelected(mode == 1);
        if (dinamicValues != null) {
            for (DinamicValue dv : dinamicValues) {
                node.setDinamicValue(dv.getCode(), dv.getValue());
            }
        }
        device.setConfigMap(dconfig);
        return save(device);
    } else {
        return null;
    }
}

From source file:org.metaservice.core.deb.util.GitCache.java

public void runDiscovery() {
    HashSet<String> parsed = new HashSet<>();
    LinkedList<String> toParse = new LinkedList<>();
    HashSet<String> dists = new HashSet<>();
    toParse.add(startString);//w ww.j ava  2 s.  co m
    while (toParse.size() > 0) {
        String uri = toParse.pop();
        try {
            String s = clientMetaservice.get(uri);
            if (s == null) {
                LOGGER.error("Couldn't load " + uri + " skipping.");
                continue;
            }
            Document document = Jsoup.parse(s, uri);
            parsed.add(uri);
            for (Element e : document.select("a:contains(next change)")) {
                String href = e.attr("abs:href");
                if (!parsed.contains(href) && !toParse.contains(href)) {
                    LOGGER.info("adding (next) ", href);
                    toParse.push(href);
                }
            }

            for (Element e : document.select("a[href$=/]")) {
                String absHref = e.attr("abs:href");
                String href = e.attr("href");
                if (!dists.contains(href) && !href.startsWith("/")
                        && !href.startsWith(".") /* &&!toParse.contains (href) */) {
                    if (uri.endsWith("dists/") /*&& !href.contains("sid") && !href.contains("experimental")*/) {
                        dists.add(href);
                        LOGGER.info(href);
                        for (String license : licenses) {
                            String url = absHref + license + "/";
                            LOGGER.info("adding (lic) {}", url);
                            toParse.add(url);
                        }
                    }
                    for (String license : licenses) {
                        if (uri.endsWith(license + "/")) {
                            if (href.startsWith("binary-")) {
                                for (String arch : archs) {
                                    if (href.contains(arch)) {
                                        LOGGER.info("adding (archdir) {}", absHref);
                                        toParse.add(absHref);
                                    }
                                }
                            }
                            if (href.startsWith("source")) {
                                LOGGER.info("adding (archdir) {}", absHref);
                                toParse.add(absHref);
                            }
                        }
                    }

                }

            }

            for (Element e : document.select("a[abs:href$=Packages.gz] , a[abs:href$=Sources.gz]")) {
                String href = e.attr("abs:href");
                //only if this seems to be a non duplicate
                if (document.select("a:contains(prev change)").size() == 0
                        || document.select("a:contains(prev change)").get(0).attr("abs:href").equals(document
                                .select("a:contains(prev):not(:contains(change))").get(0).attr("abs:href"))) {
                    LOGGER.info("RESULT processing ... {} {} ", i++, href);
                    processFileToParse(href);
                }
            }
        } catch (RuntimeException exception) {
            LOGGER.error("RUNTIME EXCEPTION ", exception);
            throw exception;
        }
    }
}

From source file:com.act.reachables.CladeTraversal.java

/**
 * This function traverses the reachables tree from the given start point using BFS, adds all the chemical's derivatives
 * to a file based on if they pass the mechanistic validator, and the derivatives' reaction pathway from the target
 * is also logged. Finally, for all the reactions that did not pass the mechanistic validator, we render those reactions
 * for furthur analysis into a directory.
 *
 * @param startPointId            - The start point node id to traverse from
 * @param validatedInchisFileName - The file containing all the derivative inchis that pass the validator.
 * @param reactionPathwayFileName - The file containing the reaction pathway information from source to target.
 * @param renderedReactionDirName - The directory containing all the rendered chemical reactions that failed the
 *                                mechanistic validator.
 * @throws IOException/*from w  ww.  j  a v  a  2  s. co m*/
 */
private void traverseTreeFromStartPoint(Long startPointId, String validatedInchisFileName,
        String reactionPathwayFileName, String renderedReactionDirName) throws IOException {
    ReactionRenderer render = new ReactionRenderer();
    PrintWriter validatedInchisWriter = new PrintWriter(validatedInchisFileName, "UTF-8");
    PrintWriter reactionPathwayWriter = new PrintWriter(reactionPathwayFileName, "UTF-8");

    LinkedList<Long> queue = new LinkedList<>();
    queue.addAll(this.parentToChildren.get(startPointId));

    while (!queue.isEmpty()) {
        Long candidateId = queue.pop();
        validatedInchisWriter.println(db.readChemicalFromInKnowledgeGraph(candidateId).getInChI());
        reactionPathwayWriter.println(formatPathFromSrcToDerivativeOfSrc(startPointId, candidateId));

        Set<Long> children = this.parentToChildren.get(candidateId);
        if (children != null) {
            for (Long child : children) {
                for (Long rxnId : rxnIdsForEdge(candidateId, child)) {
                    // In the case of a negative rxn id, this signifies the reaction is happening in reverse to what is
                    // referenced in the DB. In order to get the correct db index, one has to transform this negative reaction
                    // into its actual id.
                    if (rxnId < 0) {
                        rxnId = Reaction.reverseNegativeId(rxnId);
                    }

                    // Validate the reaction and only add its children to the queue if the reaction makes sense to our internal
                    // ros and the child is not in the queue already.
                    Map<Integer, List<Ero>> validatorResults = this.validator.validateOneReaction(rxnId);
                    if (validatorResults != null && validatorResults.size() > 0 && !queue.contains(child)) {
                        queue.add(child);
                    } else {
                        try {
                            render.drawReaction(db.getReadDB(), rxnId, renderedReactionDirName, true);
                        } catch (Exception e) {
                            LOGGER.error(
                                    "Error caught when trying to draw and save reaction %d with error message: %s",
                                    rxnId, e.getMessage());
                        }
                    }
                }
            }
        }
    }

    reactionPathwayWriter.close();
    validatedInchisWriter.close();
}

From source file:com.shopzilla.hadoop.mapreduce.MiniMRClusterContextMRTest.java

@Test
public void testWordCount() throws Exception {
    Path input = new Path("/user/test/keywords_data");
    Path output = new Path("/user/test/word_count");

    Job job = new Job(configuration);

    job.setJobName("Word Count Test");

    job.setMapperClass(WordCountMapper.class);
    job.setReducerClass(SumReducer.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(LongWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    job.setNumReduceTasks(1);//from w  w  w.  jav  a2  s  . c o m
    FileInputFormat.setInputPaths(job, input);
    FileOutputFormat.setOutputPath(job, output);

    assertTrue("All files from /data classpath directory should have been copied into HDFS",
            miniMRClusterContext.getFileSystem().exists(input));

    job.waitForCompletion(true);

    assertTrue("Output file should have been created", miniMRClusterContext.getFileSystem().exists(output));

    final LinkedList<String> expectedLines = new LinkedList<String>();
    expectedLines.add("goodbye\t1");
    expectedLines.add("hello\t1");
    expectedLines.add("world\t2");

    miniMRClusterContext.processData(output, new Function<String, Void>() {
        @Override
        public Void apply(String line) {
            assertEquals(expectedLines.pop(), line);
            return null;
        }
    });
    assertEquals(0, expectedLines.size());
}

From source file:org.squashtest.tm.domain.library.structures.LibraryGraph.java

/**
 * <p> Will merge the structure of a graph into this graph. This means that nodes will be created if no equivalent exist already,
 * same goes for inbound/outbound edges . You must provide an implementation of {@link NodeTransformer}
 * in order to allow the conversion of a node from the other graph into a node acceptable for this graph.</p>
 * <p> The merge Nodes and edges inserted that way will not erase existing data provided if nodes having same keys are already present.</p>
 *
 * <p>The generics are the following :
 *    <ul>//from w  ww .  j  av a  2 s  . c o m
 *       <li>OIDENT : the class of the key of the other graph</li>
 *       <li>ON : the type definition of a node from the other graph</li>
 *       <li>OG : the type of the other graph </li>
 *    </ul>
 * </p>
 *
 * @param othergraph
 */
public <OIDENT, ON extends GraphNode<OIDENT, ON>, OG extends LibraryGraph<OIDENT, ON>> void mergeGraph(
        OG othergraph, NodeTransformer<ON, T> transformer) {

    LinkedList<ON> processing = new LinkedList<>(othergraph.getOrphans());

    Set<ON> processed = new HashSet<>();

    while (!processing.isEmpty()) {

        ON current = processing.pop();
        T newParent = transformer.createFrom(current);

        for (ON child : current.getOutbounds()) {

            addEdge(newParent, transformer.createFrom(child));

            if (!processed.contains(child)) {
                processing.add(child);
                processed.add(child);
            }
        }

        // in case the node had no children it might be useful to add itself again
        addNode(newParent);
    }
}

From source file:org.squashtest.tm.domain.library.structures.LibraryGraph.java

/**
 * Will remove from this graph any edge that exists in othergraph. If removeAll is
 * set to true every connection between the source and destination node of such edges
 * will be removed, is false only their cardinalities will be substracted.
 *
 *
 * @param othergraph/*from   w  w w  . ja  v a  2s. c o  m*/
 * @param transformer
 * @param removeAll
 */
public <OIDENT, ON extends GraphNode<OIDENT, ON>, OG extends LibraryGraph<OIDENT, ON>> void substractGraph(
        OG othergraph, NodeTransformer<ON, T> transformer, boolean removeAll) {

    LinkedList<ON> processing = new LinkedList<>(othergraph.getOrphans());

    Set<ON> processed = new HashSet<>();

    while (!processing.isEmpty()) {
        ON otherCurrent = processing.pop();
        IDENT thisCurrent = (IDENT) transformer.createKey(otherCurrent);

        for (ON otherChild : otherCurrent.getOutbounds()) {
            IDENT thisChild = (IDENT) transformer.createKey(otherChild);

            if (hasEdge(thisCurrent, thisChild)) {
                if (removeAll) {
                    removeAllEdges(thisCurrent, thisChild);
                } else {
                    removeEdge(thisCurrent, thisChild);
                }
            }

            if (!processed.contains(otherChild)) {
                processing.add(otherChild);
                processed.add(otherChild);
            }
        }
    }

}