Example usage for java.util Iterator Iterator

List of usage examples for java.util Iterator Iterator

Introduction

In this page you can find the example usage for java.util Iterator Iterator.

Prototype

Iterator

Source Link

Usage

From source file:com.vaushell.shaarlijavaapi.ShaarliClient.java

private Iterator<ShaarliLink> iterator(final String query) {
    return new Iterator<ShaarliLink>() {
        // PUBLIC
        @Override//from  ww w  .ja  v a2 s.  com
        public boolean hasNext() {
            if (bufferCursor < buffer.size()) {
                return true;
            } else {
                buffer.clear();
                bufferCursor = 0;

                final List<ShaarliLink> links;
                if (query != null && query.length() > 0) {
                    links = parseLinks(endpoint + "/?page=" + (page++) + "&" + query);
                } else {
                    links = parseLinks(endpoint + "/?page=" + (page++));
                }

                if (links.isEmpty()) {
                    return false;
                } else {
                    final String linksLastID = links.get(links.size() - 1).getID();
                    if (lastID != null && lastID.equals(linksLastID)) {
                        return false;
                    } else {
                        lastID = linksLastID;

                        buffer.addAll(links);

                        return true;
                    }
                }
            }
        }

        @Override
        public ShaarliLink next() {
            return buffer.get(bufferCursor++);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

        // PRIVATE
        private final List<ShaarliLink> buffer = new ArrayList<>();
        private int bufferCursor;
        private int page = 1;
        private String lastID;
    };
}

From source file:com.linkedin.pinot.core.startree.OffHeapStarTreeBuilder.java

/**
 * Assumes the file is already sorted, returns the unique combinations after removing a specified
 * dimension.//from w  ww . j av  a2 s  .  c  om
 * Aggregates the metrics for each unique combination, currently only sum is supported by default
 * @param startDocId
 * @param endDocId
 * @param file
 * @param splitDimensionId
 * @return
 * @throws Exception
 */
private Iterator<Pair<DimensionBuffer, MetricBuffer>> uniqueCombinations(int startDocId, int endDocId,
        File file, int splitDimensionId) throws Exception {
    StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes,
            getSortOrder());
    Iterator<Pair<byte[], byte[]>> iterator1 = dataSorter.iterator(startDocId, endDocId);
    File tempFile = new File(outDir, file.getName() + "_" + startDocId + "_" + endDocId + ".unique.tmp");
    DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile)));
    while (iterator1.hasNext()) {
        Pair<byte[], byte[]> next = iterator1.next();
        byte[] dimensionBuffer = next.getLeft();
        byte[] metricBuffer = next.getRight();
        DimensionBuffer dimensions = DimensionBuffer.fromBytes(dimensionBuffer);
        for (int i = 0; i < numDimensions; i++) {
            String dimensionName = dimensionNameToIndexMap.inverse().get(i);
            if (i == splitDimensionId || (skipMaterializationForDimensions != null
                    && skipMaterializationForDimensions.contains(dimensionName))) {
                dos.writeInt(StarTreeIndexNodeInterf.ALL);
            } else {
                dos.writeInt(dimensions.getDimension(i));
            }
        }
        dos.write(metricBuffer);
    }
    dos.close();
    dataSorter = new StarTreeDataTable(tempFile, dimensionSizeBytes, metricSizeBytes, getSortOrder());
    dataSorter.sort(0, endDocId - startDocId);
    if (debugMode) {
        printFile(tempFile, 0, endDocId - startDocId);
    }
    final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(0, endDocId - startDocId);
    return new Iterator<Pair<DimensionBuffer, MetricBuffer>>() {

        Pair<DimensionBuffer, MetricBuffer> prev = null;
        boolean done = false;

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

        @Override
        public boolean hasNext() {
            return !done;
        }

        @Override
        public Pair<DimensionBuffer, MetricBuffer> next() {
            while (iterator.hasNext()) {
                Pair<byte[], byte[]> next = iterator.next();
                byte[] dimBuffer = next.getLeft();
                byte[] metricBuffer = next.getRight();
                if (prev == null) {
                    prev = Pair.of(DimensionBuffer.fromBytes(dimBuffer),
                            MetricBuffer.fromBytes(metricBuffer, schema.getMetricFieldSpecs()));
                } else {
                    Pair<DimensionBuffer, MetricBuffer> current = Pair.of(DimensionBuffer.fromBytes(dimBuffer),
                            MetricBuffer.fromBytes(metricBuffer, schema.getMetricFieldSpecs()));
                    if (!current.getLeft().equals(prev.getLeft())) {
                        Pair<DimensionBuffer, MetricBuffer> ret = prev;
                        prev = current;
                        LOG.debug("Returning unique {}", prev.getLeft());
                        return ret;
                    } else {
                        prev.getRight().aggregate(current.getRight());
                    }
                }
            }
            done = true;
            LOG.debug("Returning unique {}", prev.getLeft());
            return prev;
        }
    };
}

From source file:HashMapComponentGraph.java

@Override
public Iterator<T> getConnectedNodes(final T node) {
    // create a wrap iterator
    return new Iterator<T>() {
        Iterator<Node> i = edges.get(new Node(node)).iterator();

        @Override//from  w  ww.  ja va2s.c  o  m
        public boolean hasNext() {
            return i.hasNext();
        }

        @Override
        public T next() {
            return i.next().element;
        }

        @Override
        public void remove() {
        }
    };
}

From source file:com.vaushell.shaarlijavaapi.ShaarliClient.java

private Iterator<ShaarliLink> iteratorReverse(final String query) {
    final int maxPage;

    if (query != null && query.length() > 0) {
        maxPage = getMaxPages(endpoint + "&" + query);
    } else {//from  ww w .j a  va 2s.c o m
        maxPage = getMaxPages(endpoint);
    }

    return new Iterator<ShaarliLink>() {
        // PUBLIC
        @Override
        public boolean hasNext() {
            if (bufferCursor < buffer.size()) {
                return true;
            } else {
                if (page < 1) {
                    return false;
                } else {
                    buffer.clear();
                    bufferCursor = 0;

                    final List<ShaarliLink> links;
                    if (query != null && query.length() > 0) {
                        links = parseLinks(endpoint + "/?page=" + (page--) + "&" + query);
                    } else {
                        links = parseLinks(endpoint + "/?page=" + (page--));
                    }

                    if (links.isEmpty()) {
                        return false;
                    } else {
                        Collections.reverse(links);

                        buffer.addAll(links);

                        return true;
                    }
                }
            }
        }

        @Override
        public ShaarliLink next() {
            return buffer.get(bufferCursor++);
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

        // PRIVATE
        private final List<ShaarliLink> buffer = new ArrayList<>();
        private int bufferCursor;
        private int page = maxPage;
    };
}

From source file:org.apache.accumulo.pig.Bytes.java

/**
 * Iterate over keys within the passed inclusive range.
 */// w  w  w .  ja  v  a2 s.c o  m
public static Iterable<byte[]> iterateOnSplits(final byte[] a, final byte[] b, final int num) {
    byte[] aPadded;
    byte[] bPadded;
    if (a.length < b.length) {
        aPadded = padTail(a, b.length - a.length);
        bPadded = b;
    } else if (b.length < a.length) {
        aPadded = a;
        bPadded = padTail(b, a.length - b.length);
    } else {
        aPadded = a;
        bPadded = b;
    }
    if (compareTo(aPadded, bPadded) >= 0) {
        throw new IllegalArgumentException("b <= a");
    }
    if (num <= 0) {
        throw new IllegalArgumentException("num cannot be < 0");
    }
    byte[] prependHeader = { 1, 0 };
    final BigInteger startBI = new BigInteger(add(prependHeader, aPadded));
    final BigInteger stopBI = new BigInteger(add(prependHeader, bPadded));
    final BigInteger diffBI = stopBI.subtract(startBI);
    final BigInteger splitsBI = BigInteger.valueOf(num + 1);
    if (diffBI.compareTo(splitsBI) < 0) {
        return null;
    }
    final BigInteger intervalBI;
    try {
        intervalBI = diffBI.divide(splitsBI);
    } catch (Exception e) {
        LOG.error("Exception caught during division", e);
        return null;
    }

    final Iterator<byte[]> iterator = new Iterator<byte[]>() {
        private int i = -1;

        @Override
        public boolean hasNext() {
            return i < num + 1;
        }

        @Override
        public byte[] next() {
            i++;
            if (i == 0) {
                return a;
            }
            if (i == num + 1) {
                return b;
            }

            BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i)));
            byte[] padded = curBI.toByteArray();
            if (padded[1] == 0) {
                padded = tail(padded, padded.length - 2);
            } else {
                padded = tail(padded, padded.length - 1);
            }
            return padded;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };

    return new Iterable<byte[]>() {
        @Override
        public Iterator<byte[]> iterator() {
            return iterator;
        }
    };
}

From source file:HashMapComponentGraph.java

@Override
public Iterator<T> getFreeNodes() {
    // wrapping iterator
    return new Iterator<T>() {
        Iterator<Node> i = freenodes.iterator();

        @Override//from  w  w w.  j  ava2s.com
        public boolean hasNext() {
            return i.hasNext();
        }

        @Override
        public T next() {
            return i.next().element;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:com.linkedin.pinot.core.startree.OffHeapStarTreeBuilder.java

/**
 * Iterator to iterate over the records from startDocId to endDocId
 *//*  w  ww.  j  av  a  2s .c o  m*/
@Override
public Iterator<GenericRow> iterator(final int startDocId, final int endDocId) throws Exception {
    StarTreeDataTable dataSorter = new StarTreeDataTable(dataFile, dimensionSizeBytes, metricSizeBytes,
            getSortOrder());
    final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId);
    return new Iterator<GenericRow>() {
        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }

        @Override
        public GenericRow next() {
            Pair<byte[], byte[]> pair = iterator.next();
            DimensionBuffer dimensionKey = DimensionBuffer.fromBytes(pair.getLeft());
            MetricBuffer metricsHolder = MetricBuffer.fromBytes(pair.getRight(), schema.getMetricFieldSpecs());
            return toGenericRow(dimensionKey, metricsHolder);
        }
    };
}

From source file:org.orekit.models.earth.tessellation.EllipsoidTessellator.java

/** Get an iterator over mesh nodes indices.
 * @param minIndex minimum node index//from  ww  w. j  a  va 2  s.com
 * @param maxIndex maximum node index
 * @param truncateLast true if we can reduce last tile
 * @return iterator over mesh nodes indices
 */
private Iterable<Range> nodesIndices(final int minIndex, final int maxIndex, final boolean truncateLast) {

    final int first;
    if (truncateLast) {

        // truncate last tile rather than balance tiles around the zone of interest
        first = minIndex;

    } else {

        // balance tiles around the zone of interest rather than truncate last tile

        // number of tiles needed to cover the full indices range
        final int range = maxIndex - minIndex;
        final int nbTiles = (range + quantization - 1) / quantization;

        // extra nodes that must be added to complete the tiles
        final int extraNodes = nbTiles * quantization - range;

        // balance the extra nodes before min index and after maxIndex
        final int extraBefore = (extraNodes + 1) / 2;

        first = minIndex - extraBefore;

    }

    return new Iterable<Range>() {

        /** {@inheritDoc} */
        @Override
        public Iterator<Range> iterator() {
            return new Iterator<Range>() {

                private int nextLower = first;

                /** {@inheritDoc} */
                @Override
                public boolean hasNext() {
                    return nextLower < maxIndex;
                }

                /** {@inheritDoc} */
                @Override
                public Range next() {

                    if (nextLower >= maxIndex) {
                        throw new NoSuchElementException();
                    }
                    final int lower = nextLower;

                    nextLower += quantization;
                    if (truncateLast && nextLower > maxIndex && lower < maxIndex) {
                        // truncate last tile
                        nextLower = maxIndex;
                    }

                    return new Range(lower, nextLower);

                }

                /** {@inheritDoc} */
                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }

            };
        }
    };

}

From source file:HashMapComponentGraph.java

@Override
public Iterator<U> getConnectedEdges(final T node) {
    if (edges.containsKey(new Node(node))) {
        // create a wrap iterator
        return new Iterator<U>() {
            Set<Node> outEdges = edges.get(new Node(node));
            Iterator<Node> i = outEdges.iterator();

            @Override//from  w w  w.  jav a2s . c  o  m
            public boolean hasNext() {
                return i.hasNext();
            }

            @Override
            public U next() {
                // create a edge pair from the node types, and return the
                // edge data
                return edgeData.get(new Pair<T>(node, i.next().element));
            }

            @Override
            public void remove() {
                throw new UnsupportedOperationException();
            }
        };
    } else {
        // create a dummy empty iterator
        return new Iterator<U>() {
            @Override
            public boolean hasNext() {
                return false;
            }

            @Override
            public U next() {
                return null;
            }

            @Override
            public void remove() {
                throw new UnsupportedOperationException();
            }

        };
    }
}

From source file:org.apache.nifi.controller.repository.StandardProcessSession.java

private void updateProvenanceRepo(final Checkpoint checkpoint) {
    // Update Provenance Repository
    final ProvenanceEventRepository provenanceRepo = context.getProvenanceRepository();

    // We need to de-dupe the events that we've created and those reported to the provenance reporter,
    // in case the Processor developer submitted the same events to the reporter. So we use a LinkedHashSet
    // for this, so that we are able to ensure that the events are submitted in the proper order.
    final Set<ProvenanceEventRecord> recordsToSubmit = new LinkedHashSet<>();
    final Map<String, Set<ProvenanceEventType>> eventTypesPerFlowFileId = new HashMap<>();

    final Set<ProvenanceEventRecord> processorGenerated = checkpoint.reportedEvents;

    // We first want to submit FORK events because if the Processor is going to create events against
    // a FlowFile, that FlowFile needs to be shown to be created first.
    // However, if the Processor has generated a FORK event, we don't want to use the Framework-created one --
    // we prefer to use the event generated by the Processor. We can determine this by checking if the Set of events genereated
    // by the Processor contains any of the FORK events that we generated
    for (final Map.Entry<FlowFile, ProvenanceEventBuilder> entry : checkpoint.forkEventBuilders.entrySet()) {
        final ProvenanceEventBuilder builder = entry.getValue();
        final FlowFile flowFile = entry.getKey();

        updateEventContentClaims(builder, flowFile, checkpoint.records.get(flowFile));
        final ProvenanceEventRecord event = builder.build();

        if (!event.getChildUuids().isEmpty() && !isSpuriousForkEvent(event, checkpoint.removedFlowFiles)) {
            // If framework generated the event, add it to the 'recordsToSubmit' Set.
            if (!processorGenerated.contains(event)) {
                recordsToSubmit.add(event);
            }//from   w ww  .  java2s .  com

            // Register the FORK event for each child and each parent.
            for (final String childUuid : event.getChildUuids()) {
                addEventType(eventTypesPerFlowFileId, childUuid, event.getEventType());
            }
            for (final String parentUuid : event.getParentUuids()) {
                addEventType(eventTypesPerFlowFileId, parentUuid, event.getEventType());
            }
        }
    }

    // Now add any Processor-reported events.
    for (final ProvenanceEventRecord event : processorGenerated) {
        if (isSpuriousForkEvent(event, checkpoint.removedFlowFiles)) {
            continue;
        }

        // Check if the event indicates that the FlowFile was routed to the same
        // connection from which it was pulled (and only this connection). If so, discard the event.
        if (isSpuriousRouteEvent(event, checkpoint.records)) {
            continue;
        }

        recordsToSubmit.add(event);
        addEventType(eventTypesPerFlowFileId, event.getFlowFileUuid(), event.getEventType());
    }

    // Finally, add any other events that we may have generated.
    for (final List<ProvenanceEventRecord> eventList : checkpoint.generatedProvenanceEvents.values()) {
        for (final ProvenanceEventRecord event : eventList) {
            if (isSpuriousForkEvent(event, checkpoint.removedFlowFiles)) {
                continue;
            }

            recordsToSubmit.add(event);
            addEventType(eventTypesPerFlowFileId, event.getFlowFileUuid(), event.getEventType());
        }
    }

    // Check if content or attributes changed. If so, register the appropriate events.
    for (final StandardRepositoryRecord repoRecord : checkpoint.records.values()) {
        final ContentClaim original = repoRecord.getOriginalClaim();
        final ContentClaim current = repoRecord.getCurrentClaim();

        boolean contentChanged = false;
        if (original == null && current != null) {
            contentChanged = true;
        }
        if (original != null && current == null) {
            contentChanged = true;
        }
        if (original != null && current != null && !original.equals(current)) {
            contentChanged = true;
        }

        final FlowFileRecord curFlowFile = repoRecord.getCurrent();
        final String flowFileId = curFlowFile.getAttribute(CoreAttributes.UUID.key());
        boolean eventAdded = false;

        if (checkpoint.removedFlowFiles.contains(flowFileId)) {
            continue;
        }

        final boolean newFlowFile = repoRecord.getOriginal() == null;
        if (contentChanged && !newFlowFile) {
            recordsToSubmit
                    .add(provenanceReporter.build(curFlowFile, ProvenanceEventType.CONTENT_MODIFIED).build());
            addEventType(eventTypesPerFlowFileId, flowFileId, ProvenanceEventType.CONTENT_MODIFIED);
            eventAdded = true;
        }

        if (checkpoint.createdFlowFiles.contains(flowFileId)) {
            final Set<ProvenanceEventType> registeredTypes = eventTypesPerFlowFileId.get(flowFileId);
            boolean creationEventRegistered = false;
            if (registeredTypes != null) {
                if (registeredTypes.contains(ProvenanceEventType.CREATE)
                        || registeredTypes.contains(ProvenanceEventType.FORK)
                        || registeredTypes.contains(ProvenanceEventType.JOIN)
                        || registeredTypes.contains(ProvenanceEventType.RECEIVE)
                        || registeredTypes.contains(ProvenanceEventType.FETCH)) {
                    creationEventRegistered = true;
                }
            }

            if (!creationEventRegistered) {
                recordsToSubmit.add(provenanceReporter.build(curFlowFile, ProvenanceEventType.CREATE).build());
                eventAdded = true;
            }
        }

        if (!eventAdded && !repoRecord.getUpdatedAttributes().isEmpty()) {
            // We generate an ATTRIBUTES_MODIFIED event only if no other event has been
            // created for the FlowFile. We do this because all events contain both the
            // newest and the original attributes, so generating an ATTRIBUTES_MODIFIED
            // event is redundant if another already exists.
            if (!eventTypesPerFlowFileId.containsKey(flowFileId)) {
                recordsToSubmit.add(
                        provenanceReporter.build(curFlowFile, ProvenanceEventType.ATTRIBUTES_MODIFIED).build());
                addEventType(eventTypesPerFlowFileId, flowFileId, ProvenanceEventType.ATTRIBUTES_MODIFIED);
            }
        }
    }

    // We want to submit the 'recordsToSubmit' collection, followed by the auto-terminated events to the Provenance Repository.
    // We want to do this with a single call to ProvenanceEventRepository#registerEvents because it may be much more efficient
    // to do so.
    // However, we want to modify the events in 'recordsToSubmit' to obtain the data from the most recent version of the FlowFiles
    // (except for SEND events); see note below as to why this is
    // Therefore, we create an Iterable that can iterate over each of these events, modifying them as needed, and returning them
    // in the appropriate order. This prevents an unnecessary step of creating an intermediate List and adding all of those values
    // to the List.
    // This is done in a similar veign to how Java 8's streams work, iterating over the events and returning a processed version
    // one-at-a-time as opposed to iterating over the entire Collection and putting the results in another Collection. However,
    // we don't want to change the Framework to require Java 8 at this time, because it's not yet as prevalent as we would desire
    final Map<String, FlowFileRecord> flowFileRecordMap = new HashMap<>();
    for (final StandardRepositoryRecord repoRecord : checkpoint.records.values()) {
        final FlowFileRecord flowFile = repoRecord.getCurrent();
        flowFileRecordMap.put(flowFile.getAttribute(CoreAttributes.UUID.key()), flowFile);
    }

    final List<ProvenanceEventRecord> autoTermEvents = checkpoint.autoTerminatedEvents;
    final Iterable<ProvenanceEventRecord> iterable = new Iterable<ProvenanceEventRecord>() {
        final Iterator<ProvenanceEventRecord> recordsToSubmitIterator = recordsToSubmit.iterator();
        final Iterator<ProvenanceEventRecord> autoTermIterator = autoTermEvents == null ? null
                : autoTermEvents.iterator();

        @Override
        public Iterator<ProvenanceEventRecord> iterator() {
            return new Iterator<ProvenanceEventRecord>() {
                @Override
                public boolean hasNext() {
                    return recordsToSubmitIterator.hasNext()
                            || autoTermIterator != null && autoTermIterator.hasNext();
                }

                @Override
                public ProvenanceEventRecord next() {
                    if (recordsToSubmitIterator.hasNext()) {
                        final ProvenanceEventRecord rawEvent = recordsToSubmitIterator.next();

                        // Update the Provenance Event Record with all of the info that we know about the event.
                        // For SEND events, we do not want to update the FlowFile info on the Event, because the event should
                        // reflect the FlowFile as it was sent to the remote system. However, for other events, we want to use
                        // the representation of the FlowFile as it is committed, as this is the only way in which it really
                        // exists in our system -- all other representations are volatile representations that have not been
                        // exposed.
                        return enrich(rawEvent, flowFileRecordMap, checkpoint.records,
                                rawEvent.getEventType() != ProvenanceEventType.SEND);
                    } else if (autoTermIterator != null && autoTermIterator.hasNext()) {
                        return enrich(autoTermIterator.next(), flowFileRecordMap, checkpoint.records, true);
                    }

                    throw new NoSuchElementException();
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }
            };
        }
    };

    provenanceRepo.registerEvents(iterable);
}