Example usage for java.util LinkedList isEmpty

List of usage examples for java.util LinkedList isEmpty

Introduction

In this page you can find the example usage for java.util LinkedList isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this list contains no elements.

Usage

From source file:io.druid.firehose.s3.StaticS3FirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser) throws IOException {
    Preconditions.checkNotNull(s3Client, "null s3Client");

    final LinkedList<URI> objectQueue = Lists.newLinkedList(uris);

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        @Override//ww  w  . j a  v  a  2  s. c om
        public boolean hasNext() {
            return !objectQueue.isEmpty();
        }

        @Override
        public LineIterator next() {
            final URI nextURI = objectQueue.poll();

            final String s3Bucket = nextURI.getAuthority();
            final S3Object s3Object = new S3Object(
                    nextURI.getPath().startsWith("/") ? nextURI.getPath().substring(1) : nextURI.getPath());

            log.info("Reading from bucket[%s] object[%s] (%s)", s3Bucket, s3Object.getKey(), nextURI);

            try {
                final InputStream innerInputStream = s3Client
                        .getObject(new S3Bucket(s3Bucket), s3Object.getKey()).getDataInputStream();

                final InputStream outerInputStream = s3Object.getKey().endsWith(".gz")
                        ? CompressionUtils.gzipInputStream(innerInputStream)
                        : innerInputStream;

                return IOUtils.lineIterator(
                        new BufferedReader(new InputStreamReader(outerInputStream, Charsets.UTF_8)));
            } catch (Exception e) {
                log.error(e, "Exception reading from bucket[%s] object[%s]", s3Bucket, s3Object.getKey());

                throw Throwables.propagate(e);
            }
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    }, firehoseParser);
}

From source file:io.druid.firehose.oss.StaticOSSFirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser) throws IOException {

    Preconditions.checkNotNull(ossClient, "null ossClient");

    final LinkedList<URI> objectQueue = Lists.newLinkedList(uris);

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        @Override//from   w w  w. j  a v  a  2s.  c om
        public boolean hasNext() {
            return !objectQueue.isEmpty();
        }

        @Override
        public LineIterator next() {
            final URI nextURI = objectQueue.poll();

            final String bucket = nextURI.getAuthority();
            final String key = nextURI.getPath().startsWith("/") ? nextURI.getPath().substring(1)
                    : nextURI.getPath();

            log.info("reading from bucket[%s] object[%s] (%s)", bucket, key, nextURI);

            try {
                final InputStream innerInputStream = ossClient.getObject(bucket, key).getObjectContent();

                final InputStream outerInputStream = key.endsWith(".gz")
                        ? CompressionUtils.gzipInputStream(innerInputStream)
                        : innerInputStream;

                return IOUtils.lineIterator(
                        new BufferedReader(new InputStreamReader(outerInputStream, Charsets.UTF_8)));
            } catch (Exception e) {
                log.error(e, "exception reading from bucket[%s] object[%s]", bucket, key);

                throw Throwables.propagate(e);
            }
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    }, firehoseParser);
}

From source file:fr.aliasource.webmail.pool.impl.KeepAliveTask.java

@Override
public void run() {
    LinkedList<T> dead = new LinkedList<T>();

    for (T t : availableObjects) {
        boolean isAlive = t.keepAlive();
        if (!isAlive) {
            logger.warn("Dead poolable item (" + t + "). will recycle.");
            dead.add(t);/*  w ww  . j av a 2s  .c o  m*/
        }
    }

    if (!dead.isEmpty()) {
        logger.warn("pool usage report: " + pool.getUsageReport());
    }

    for (T t : dead) {
        availableObjects.remove(t);
        recycle();
    }

    if (availableObjects.remainingCapacity() - pool.getUsageCount() > 0) {
        logger.error("Pool refilling failed this time: " + pool.getUsageReport());
    }
}

From source file:org.apache.hadoop.fs.shell.Count.java

@Override
protected void processOptions(LinkedList<String> args) {
    CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER);
    cf.addOptionWithValue(OPTION_TYPE);//w w  w.j ava2s .  co  m
    cf.parse(args);
    if (args.isEmpty()) { // default path is the current working directory
        args.add(".");
    }
    showQuotas = cf.getOpt(OPTION_QUOTA);
    humanReadable = cf.getOpt(OPTION_HUMAN);

    if (showQuotas) {
        String types = cf.getOptValue(OPTION_TYPE);

        if (null != types) {
            showQuotabyType = true;
            storageTypes = getAndCheckStorageTypes(types);
        } else {
            showQuotabyType = false;
        }
    }

    if (cf.getOpt(OPTION_HEADER)) {
        if (showQuotabyType) {
            out.println(ContentSummary.getStorageTypeHeader(storageTypes) + "PATHNAME");
        } else {
            out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
        }
    }
}

From source file:org.apache.impala.infra.tableflattener.SchemaFlattener.java

private void createChildDataset(String name, Schema srcSchema, LinkedList<Field> parentFields,
        FlattenedSchema parentDataset) {
    // Ensure that the parent schema has an id field so the child can reference the
    // parent. A single id field is sufficient.
    if (parentFields.isEmpty() || !parentFields.getFirst().name().equals(parentDataset.getIdFieldName())) {
        parentFields.addFirst(SchemaUtil.createField(parentDataset.getIdFieldName(), Type.LONG));
    }/*from ww  w  .  j  a v a2 s  .c  o  m*/
    FlattenedSchema childDataset = new FlattenedSchema(name, parentDataset);
    LinkedList<Field> fields = Lists.newLinkedList();
    String parentIdFieldName = parentDataset.getName() + childDataset.getNameSeparator()
            + childDataset.getIdFieldName();
    Field parentIdField = SchemaUtil.createField(parentIdFieldName, Type.LONG);
    childDataset.setParentIdField(parentIdField);
    fields.add(parentIdField);
    Schema valueSchema;
    if (srcSchema.getType() == Type.ARRAY) {
        fields.add(SchemaUtil.createField(childDataset.getArrayIdxFieldName(), Type.LONG));
        valueSchema = srcSchema.getElementType();
    } else {
        Preconditions.checkState(srcSchema.getType() == Type.MAP);
        fields.add(SchemaUtil.createField(childDataset.getMapKeyFieldName(), Type.STRING));
        valueSchema = srcSchema.getValueType();
    }

    if (SchemaUtil.isSimpleType(valueSchema)) {
        fields.add(SchemaUtil.createField(childDataset.getCollectionValueFieldName(), valueSchema));
    } else {
        if (SchemaUtil.isNullable(valueSchema)) {
            fields.add(SchemaUtil.createField(
                    childDataset.getIsNullFieldName(childDataset.getCollectionValueFieldName()), Type.BOOLEAN));
            valueSchema = SchemaUtil.reduceUnionToNonNull(valueSchema);
        }
        if (SchemaUtil.requiresChildDataset(valueSchema)) {
            createChildDataset(childDataset.getChildOfCollectionName(), valueSchema, fields, childDataset);
        } else {
            addRecordFields(valueSchema, childDataset, fields,
                    childDataset.getCollectionValueFieldName() + childDataset.getNameSeparator());
        }
    }

    finishCreatingDataset(fields, childDataset);
}

From source file:com.act.reachables.CladeTraversal.java

/**
 * This function traverses the reachables tree from the given start point using BFS, adds all the chemical's derivatives
 * to a file based on if they pass the mechanistic validator, and the derivatives' reaction pathway from the target
 * is also logged. Finally, for all the reactions that did not pass the mechanistic validator, we render those reactions
 * for furthur analysis into a directory.
 *
 * @param startPointId            - The start point node id to traverse from
 * @param validatedInchisFileName - The file containing all the derivative inchis that pass the validator.
 * @param reactionPathwayFileName - The file containing the reaction pathway information from source to target.
 * @param renderedReactionDirName - The directory containing all the rendered chemical reactions that failed the
 *                                mechanistic validator.
 * @throws IOException//from   w  w w. j ava  2 s . c om
 */
private void traverseTreeFromStartPoint(Long startPointId, String validatedInchisFileName,
        String reactionPathwayFileName, String renderedReactionDirName) throws IOException {
    ReactionRenderer render = new ReactionRenderer();
    PrintWriter validatedInchisWriter = new PrintWriter(validatedInchisFileName, "UTF-8");
    PrintWriter reactionPathwayWriter = new PrintWriter(reactionPathwayFileName, "UTF-8");

    LinkedList<Long> queue = new LinkedList<>();
    queue.addAll(this.parentToChildren.get(startPointId));

    while (!queue.isEmpty()) {
        Long candidateId = queue.pop();
        validatedInchisWriter.println(db.readChemicalFromInKnowledgeGraph(candidateId).getInChI());
        reactionPathwayWriter.println(formatPathFromSrcToDerivativeOfSrc(startPointId, candidateId));

        Set<Long> children = this.parentToChildren.get(candidateId);
        if (children != null) {
            for (Long child : children) {
                for (Long rxnId : rxnIdsForEdge(candidateId, child)) {
                    // In the case of a negative rxn id, this signifies the reaction is happening in reverse to what is
                    // referenced in the DB. In order to get the correct db index, one has to transform this negative reaction
                    // into its actual id.
                    if (rxnId < 0) {
                        rxnId = Reaction.reverseNegativeId(rxnId);
                    }

                    // Validate the reaction and only add its children to the queue if the reaction makes sense to our internal
                    // ros and the child is not in the queue already.
                    Map<Integer, List<Ero>> validatorResults = this.validator.validateOneReaction(rxnId);
                    if (validatorResults != null && validatorResults.size() > 0 && !queue.contains(child)) {
                        queue.add(child);
                    } else {
                        try {
                            render.drawReaction(db.getReadDB(), rxnId, renderedReactionDirName, true);
                        } catch (Exception e) {
                            LOGGER.error(
                                    "Error caught when trying to draw and save reaction %d with error message: %s",
                                    rxnId, e.getMessage());
                        }
                    }
                }
            }
        }
    }

    reactionPathwayWriter.close();
    validatedInchisWriter.close();
}

From source file:org.apache.camel.component.pdf.text.DefaultLineBuilderStrategy.java

/**
 * Builds lines from words. Utilizes the same behaviour as office software:
 * <ul>//from w  ww  .j av a 2s .c  o m
 *     <li>If word doesn't fit in current line, and current lines contains other words, then
 *     it will be moved to new line.</td>
 *     <li>Word doesn't fit in the line and line does not contain other words, then word will be
 *     slitted, and split index will be on max amount of characters that fits in the line </li>
 * </ul>
 */
@Override
public Collection<String> buildLines(Collection<String> splittedText) throws IOException {
    LinkedList<String> wordsList = new LinkedList<String>(splittedText);
    Collection<String> lines = new ArrayList<String>();
    LineBuilder currentLine = new LineBuilder();
    float allowedLineWidth = getAllowedLineWidth();
    while (!wordsList.isEmpty()) {
        String word = wordsList.removeFirst();
        if (isWordFitInCurrentLine(currentLine, word, allowedLineWidth)) {
            currentLine.appendWord(word);
            if (wordsList.isEmpty()) {
                lines.add(currentLine.buildLine());
            }
        } else if (currentLine.getWordsCount() != 0) {
            lines.add(currentLine.buildLine());
            wordsList.addFirst(word);
        } else {
            int splitIndex = findSplitIndex(word, allowedLineWidth);
            currentLine.appendWord(word.substring(0, splitIndex));
            lines.add(currentLine.buildLine());
            wordsList.addFirst(word.substring(splitIndex));
        }
    }
    return lines;
}

From source file:com.icantrap.collections.dawg.DawgBuilder.java

private void compress() {
    LinkedList<Node> stack = new LinkedList<Node>();
    int index = 0;

    stack.addLast(root);//from ww  w  .ja v  a 2  s. com
    while (!stack.isEmpty()) {
        Node ptr = stack.removeFirst();

        ptr.index = index++;
        if (root != ptr)
            ptr.siblings = ptr.parent.nextChildren.size() - 1 + (null == ptr.parent.child ? 0 : 1);
        nodeList.add(ptr);

        for (Node nextChild : ptr.nextChildren)
            stack.add(nextChild);
        if (null != ptr.child)
            stack.add(ptr.child);
    }

    // assign child depths to all nodes
    for (Node node : nodeList)
        if (node.terminal) {
            node.childDepth = 0;

            Node ptr = node;
            int depth = 0;
            while (root != ptr) {
                ptr = ptr.parent;
                ++depth;
                if (depth > ptr.childDepth)
                    ptr.childDepth = depth;
                else
                    break;
            }
        }

    // bin nodes by child depth
    for (Node node : nodeList) {
        LinkedList<Node> nodes = childDepths.get(node.childDepth);
        if (null == nodes) {
            nodes = new LinkedList<Node>();
            nodes.add(node);
            childDepths.put(node.childDepth, nodes);
        } else
            nodes.add(node);
    }

    int maxDepth = -1;
    for (int depth : childDepths.keySet())
        if (depth > maxDepth)
            maxDepth = depth;

    for (int depth = 0; depth <= maxDepth; ++depth) {
        LinkedList<Node> nodes = childDepths.get(depth);
        if (null == nodes)
            continue;

        for (ListIterator<Node> pickNodeIter = nodes.listIterator(); pickNodeIter.hasNext();) {
            Node pickNode = pickNodeIter.next();

            if ((null == pickNode.replaceMeWith) && pickNode.isChild && (0 == pickNode.siblings))
                for (ListIterator<Node> searchNodeIter = nodes
                        .listIterator(pickNodeIter.nextIndex()); searchNodeIter.hasNext();) {
                    Node searchNode = searchNodeIter.next();
                    if ((null == searchNode.replaceMeWith) && searchNode.isChild && (0 == searchNode.siblings)
                            && pickNode.equals(searchNode)) {
                        searchNode.parent.child = pickNode;
                        searchNode.replaceMeWith = pickNode;
                    }
                }
        }
    }
}

From source file:com.mongodb.hadoop.splitter.ShardChunkMongoSplitter.java

@Override
public List<InputSplit> calculateSplits() throws SplitFailedException {
    this.init();/*from  w  ww.j  a v a 2s  . c  om*/
    boolean targetShards = MongoConfigUtil.canReadSplitsFromShards(conf);
    DB configDB = this.mongo.getDB("config");
    DBCollection chunksCollection = configDB.getCollection("chunks");

    MongoClientURI inputURI = MongoConfigUtil.getInputURI(conf);
    String inputNS = inputURI.getDatabase() + "." + inputURI.getCollection();

    DBCursor cur = chunksCollection.find(new BasicDBObject("ns", inputNS));

    int numChunks = 0;

    Map<String, String> shardsMap = null;
    if (targetShards) {
        try {
            shardsMap = this.getShardsMap();
        } catch (Exception e) {
            //Something went wrong when trying to
            //read the shards data from the config server,
            //so abort the splitting
            throw new SplitFailedException("Couldn't get shards information from config server", e);
        }
    }

    List<String> mongosHostNames = MongoConfigUtil.getInputMongosHosts(this.conf);
    if (targetShards && mongosHostNames.size() > 0) {
        throw new SplitFailedException(
                "Setting both mongo.input.split.read_from_shards and mongo.input.mongos_hosts"
                        + " does not make sense. ");
    }

    if (mongosHostNames.size() > 0) {
        LOG.info("Using multiple mongos instances (round robin) for reading input.");
    }

    Map<String, LinkedList<InputSplit>> shardToSplits = new HashMap<String, LinkedList<InputSplit>>();

    while (cur.hasNext()) {
        final BasicDBObject row = (BasicDBObject) cur.next();
        BasicDBObject chunkLowerBound = (BasicDBObject) row.get("min");
        BasicDBObject chunkUpperBound = (BasicDBObject) row.get("max");
        MongoInputSplit chunkSplit = createSplitFromBounds(chunkLowerBound, chunkUpperBound);
        chunkSplit.setInputURI(inputURI);
        String shard = (String) row.get("shard");
        if (targetShards) {
            //The job is configured to target shards, so replace the
            //mongos hostname with the host of the shard's servers
            String shardHosts = shardsMap.get(shard);
            if (shardHosts == null) {
                throw new SplitFailedException("Couldn't find shard ID: " + shard + " in config.shards.");
            }

            MongoClientURI newURI = rewriteURI(inputURI, shardHosts);
            chunkSplit.setInputURI(newURI);
        } else if (mongosHostNames.size() > 0) {
            //Multiple mongos hosts are specified, so
            //choose a host name in round-robin fashion
            //and rewrite the URI using that hostname.
            //This evenly distributes the load to avoid
            //pegging a single mongos instance.
            String roundRobinHost = mongosHostNames.get(numChunks % mongosHostNames.size());
            MongoClientURI newURI = rewriteURI(inputURI, roundRobinHost);
            chunkSplit.setInputURI(newURI);
        }
        LinkedList<InputSplit> shardList = shardToSplits.get(shard);
        if (shardList == null) {
            shardList = new LinkedList<InputSplit>();
            shardToSplits.put(shard, shardList);
        }
        shardList.add(chunkSplit);
        numChunks++;
    }

    final List<InputSplit> splits = new ArrayList<InputSplit>(numChunks);
    int splitIndex = 0;
    while (splitIndex < numChunks) {
        Set<String> shardSplitsToRemove = new HashSet<String>();
        for (Map.Entry<String, LinkedList<InputSplit>> shardSplits : shardToSplits.entrySet()) {
            LinkedList<InputSplit> shardSplitsList = shardSplits.getValue();
            InputSplit split = shardSplitsList.pop();
            splits.add(splitIndex, split);
            splitIndex++;
            if (shardSplitsList.isEmpty()) {
                shardSplitsToRemove.add(shardSplits.getKey());
            }
        }
        for (String shardName : shardSplitsToRemove) {
            shardToSplits.remove(shardName);
        }
    }

    return splits;
}

From source file:com.asakusafw.runtime.directio.DirectDataSourceRepository.java

/**
 * Returns all {@link DirectDataSource}s registered in this repository.
 * @return all {@link DirectDataSource}s
 * @throws IOException if failed to initialize data sources
 * @throws InterruptedException if interrupted
 *///from w  w  w  .  j a va  2 s. c om
public Collection<String> getContainerPaths() throws IOException, InterruptedException {
    Collection<String> results = new ArrayList<>();
    LinkedList<Node> work = new LinkedList<>();
    work.add(root);
    while (work.isEmpty() == false) {
        Node node = work.removeFirst();
        if (node.hasContent()) {
            results.add(node.path.getPathString());
        }
        work.addAll(node.children.values());
    }
    return results;
}