Example usage for java.util Iterator forEachRemaining

List of usage examples for java.util Iterator forEachRemaining

Introduction

In this page you can find the example usage for java.util Iterator forEachRemaining.

Prototype

default void forEachRemaining(Consumer<? super E> action) 

Source Link

Document

Performs the given action for each remaining element until all elements have been processed or the action throws an exception.

Usage

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorOverGraphTest.java

@Test
@LoadGraphWith(LoadGraphWith.GraphData.MODERN)
public void shouldAllowTraversalToIterateInDifferentThreadThanOriginallyEvaluatedWithoutAutoCommit()
        throws Exception {
    // this test sort of simulates Gremlin Server interaction where a Traversal is eval'd in one Thread, but
    // then iterated in another.  this basically tests the state of the Gremlin Server GremlinExecutor when
    // being used in session mode
    final ExecutorService evalExecutor = Executors.newSingleThreadExecutor(testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().executorService(evalExecutor).create();

    final Map<String, Object> bindings = new HashMap<>();
    bindings.put("g", g);

    final AtomicInteger vertexCount = new AtomicInteger(0);

    final ExecutorService iterationExecutor = Executors.newSingleThreadExecutor(testingThreadFactory);
    gremlinExecutor.eval("g.V().out()", bindings).thenAcceptAsync(o -> {
        final Iterator itty = (Iterator) o;
        itty.forEachRemaining(v -> vertexCount.incrementAndGet());
    }, iterationExecutor).join();/* w ww.  ja  v  a 2  s .c  om*/

    assertEquals(6, vertexCount.get());

    gremlinExecutor.close();
    evalExecutor.shutdown();
    evalExecutor.awaitTermination(30000, TimeUnit.MILLISECONDS);
    iterationExecutor.shutdown();
    iterationExecutor.awaitTermination(30000, TimeUnit.MILLISECONDS);
}

From source file:org.apache.tinkerpop.gremlin.process.computer.bulkloading.BulkLoaderVertexProgram.java

private void executeInternal(final Vertex sourceVertex, final Messenger<Tuple> messenger, final Memory memory) {
    if (memory.isInitialIteration()) {
        this.listener.resetStats();
        // get or create the vertex
        final Vertex targetVertex = bulkLoader.getOrCreateVertex(sourceVertex, graph, g);
        // write all the properties of the vertex to the newly created vertex
        final Iterator<VertexProperty<Object>> vpi = sourceVertex.properties();
        if (this.listener.isNewVertex()) {
            vpi.forEachRemaining(vp -> bulkLoader.createVertexProperty(vp, targetVertex, graph, g));
        } else {/*from w  ww  . j a  v a2s .co  m*/
            vpi.forEachRemaining(vp -> bulkLoader.getOrCreateVertexProperty(vp, targetVertex, graph, g));
        }
        this.commit(false);
        if (!bulkLoader.useUserSuppliedIds()) {
            // create an id pair and send it to all the vertex's incoming adjacent vertices
            sourceVertex.property(DEFAULT_BULK_LOADER_VERTEX_ID, targetVertex.id());
            messenger.sendMessage(messageScope, Pair.with(sourceVertex.id(), targetVertex.id()));
        }
    } else if (memory.getIteration() == 1) {
        if (bulkLoader.useUserSuppliedIds()) {
            final Vertex outV = bulkLoader.getVertex(sourceVertex, graph, g);
            final boolean incremental = outV.edges(Direction.OUT).hasNext();
            sourceVertex.edges(Direction.OUT).forEachRemaining(edge -> {
                final Vertex inV = bulkLoader.getVertex(edge.inVertex(), graph, g);
                if (incremental) {
                    bulkLoader.getOrCreateEdge(edge, outV, inV, graph, g);
                } else {
                    bulkLoader.createEdge(edge, outV, inV, graph, g);
                }
                this.commit(false);
            });
        } else {
            // create an id map and populate it with all the incoming messages
            final Map<Object, Object> idPairs = new HashMap<>();
            final Iterator<Tuple> idi = messenger.receiveMessages();
            while (idi.hasNext()) {
                final Tuple idPair = idi.next();
                idPairs.put(idPair.getValue(0), idPair.getValue(1));
            }
            // get the vertex with given the dummy id property
            final Object outVId = sourceVertex.value(DEFAULT_BULK_LOADER_VERTEX_ID);
            final Vertex outV = bulkLoader.getVertexById(outVId, graph, g);
            // for all the incoming edges of the vertex, get the incoming adjacent vertex and write the edge and its properties
            sourceVertex.edges(Direction.OUT).forEachRemaining(edge -> {
                final Object inVId = idPairs.get(edge.inVertex().id());
                final Vertex inV = bulkLoader.getVertexById(inVId, graph, g);
                bulkLoader.getOrCreateEdge(edge, outV, inV, graph, g);
                this.commit(false);
            });
        }
    } else if (memory.getIteration() == 2) {
        final Object vertexId = sourceVertex.value(DEFAULT_BULK_LOADER_VERTEX_ID);
        bulkLoader.getVertexById(vertexId, graph, g).property(bulkLoader.getVertexIdProperty()).remove();
        this.commit(false);
    }
}

From source file:org.apache.tinkerpop.gremlin.process.computer.clustering.peerpressure.ClusterCountMapReduce.java

@Override
public void reduce(final NullObject key, final Iterator<Serializable> values,
        final ReduceEmitter<NullObject, Integer> emitter) {
    final Set<Serializable> set = new HashSet<>();
    values.forEachRemaining(set::add);
    emitter.emit(NullObject.instance(), set.size());

}

From source file:org.apache.tinkerpop.gremlin.process.computer.clustering.peerpressure.ClusterPopulationMapReduce.java

@Override
public Map<Serializable, Long> generateFinalResult(final Iterator<KeyValue<Serializable, Long>> keyValues) {
    final Map<Serializable, Long> clusterPopulation = new HashMap<>();
    keyValues.forEachRemaining(pair -> clusterPopulation.put(pair.getKey(), pair.getValue()));
    return clusterPopulation;
}

From source file:org.apache.tinkerpop.gremlin.process.traversal.util.DefaultTraversalMetrics.java

public static DefaultTraversalMetrics merge(final Iterator<DefaultTraversalMetrics> toMerge) {
    final DefaultTraversalMetrics newTraversalMetrics = new DefaultTraversalMetrics();

    // iterate the incoming TraversalMetrics
    toMerge.forEachRemaining(inTraversalMetrics -> {
        // aggregate the internal Metrics
        inTraversalMetrics.metrics.forEach((metricsId, toAggregate) -> {

            MutableMetrics aggregateMetrics = newTraversalMetrics.metrics.get(metricsId);
            if (null == aggregateMetrics) {
                // need to create a Metrics to aggregate into
                aggregateMetrics = new MutableMetrics(toAggregate.getId(), toAggregate.getName());

                newTraversalMetrics.metrics.put(metricsId, aggregateMetrics);
                // Set the index of the Metrics
                for (final Map.Entry<Integer, String> entry : inTraversalMetrics.indexToLabelMap.entrySet()) {
                    if (metricsId.equals(entry.getValue())) {
                        newTraversalMetrics.indexToLabelMap.put(entry.getKey(), metricsId);
                        break;
                    }//from   www  .j  a v  a2  s .co m
                }
            }
            aggregateMetrics.aggregate(toAggregate);
        });
    });
    return newTraversalMetrics;
}

From source file:org.apache.tinkerpop.gremlin.process.traversal.util.StandardTraversalMetrics.java

public static StandardTraversalMetrics merge(final Iterator<StandardTraversalMetrics> toMerge) {
    final StandardTraversalMetrics newTraversalMetrics = new StandardTraversalMetrics();

    // iterate the incoming TraversalMetrics
    toMerge.forEachRemaining(inTraversalMetrics -> {
        // aggregate the internal Metrics
        inTraversalMetrics.metrics.forEach((metricsId, toAggregate) -> {

            MutableMetrics aggregateMetrics = newTraversalMetrics.metrics.get(metricsId);
            if (null == aggregateMetrics) {
                // need to create a Metrics to aggregate into
                aggregateMetrics = new MutableMetrics(toAggregate.getId(), toAggregate.getName());

                newTraversalMetrics.metrics.put(metricsId, aggregateMetrics);
                // Set the index of the Metrics
                for (Map.Entry<Integer, String> entry : inTraversalMetrics.indexToLabelMap.entrySet()) {
                    if (metricsId.equals(entry.getValue())) {
                        newTraversalMetrics.indexToLabelMap.put(entry.getKey(), metricsId);
                        break;
                    }/* w  w w  . ja v  a 2 s.c  om*/
                }
            }
            aggregateMetrics.aggregate(toAggregate);
        });
    });
    return newTraversalMetrics;
}

From source file:org.apache.usergrid.persistence.qakka.serialization.sharding.ShardIteratorTest.java

@Test
public void getActiveShards() {

    CassandraClient cassandraClient = getInjector().getInstance(CassandraClientImpl.class);
    CassandraConfig cassandraConfig = getInjector().getInstance(CassandraConfig.class);
    ShardSerialization shardSerialization = new ShardSerializationImpl(cassandraConfig, cassandraClient);

    String queueName = "queue_sit_" + RandomStringUtils.randomAlphanumeric(10);

    Shard shard1 = new Shard(queueName, "region1", Shard.Type.DEFAULT, 100L, null);
    Shard shard2 = new Shard(queueName, "region1", Shard.Type.DEFAULT, 200L, null);

    shardSerialization.createShard(shard1);
    shardSerialization.createShard(shard2);

    Iterator<Shard> shardIterator = new ShardIterator(cassandraClient, queueName, "region1", Shard.Type.DEFAULT,
            Optional.empty());//from   w ww  . j  a v a 2s.  c o  m

    List<Shard> shards = new ArrayList<>(1);

    shardIterator.forEachRemaining(shard -> {

        logger.info("Shard ID: {}", shard.getShardId());
        shards.add(shard);

    });

    assertTrue(shards.size() == 2 && shards.get(0).equals(shard1) && shards.get(1).equals(shard2));

}

From source file:org.apache.usergrid.persistence.qakka.serialization.sharding.ShardIteratorTest.java

@Test
public void seekActiveShards() {

    CassandraClient cassandraClient = getInjector().getInstance(CassandraClientImpl.class);
    CassandraConfig cassandraConfig = getInjector().getInstance(CassandraConfig.class);
    ShardSerialization shardSerialization = new ShardSerializationImpl(cassandraConfig, cassandraClient);

    String queueName = "queue_sit_" + RandomStringUtils.randomAlphanumeric(10);

    Shard shard1 = new Shard(queueName, "region1", Shard.Type.DEFAULT, 100L, null);
    Shard shard2 = new Shard(queueName, "region1", Shard.Type.DEFAULT, 200L, null);
    Shard shard3 = new Shard(queueName, "region1", Shard.Type.DEFAULT, 300L, null);

    shardSerialization.createShard(shard1);
    shardSerialization.createShard(shard2);
    shardSerialization.createShard(shard3);

    Iterator<Shard> shardIterator = new ShardIterator(cassandraClient, queueName, "region1", Shard.Type.DEFAULT,
            Optional.of(200L));//  w w w  .  j a v  a 2  s  .c o  m

    List<Shard> shards = new ArrayList<>(1);

    shardIterator.forEachRemaining(shard -> {

        logger.info("Shard ID: {}", shard.getShardId());
        shards.add(shard);

    });

    assertTrue(shards.size() == 1 && shards.get(0).equals(shard3));
}

From source file:org.apache.usergrid.tools.UniqueValueScanner.java

@Override
public void runTool(CommandLine line) throws Exception {

    startSpring();/*from   w w w . j  a va 2 s  .  c  om*/

    UUID appToFilter = null;
    if (!line.getOptionValue(APPLICATION_ARG).isEmpty()) {
        appToFilter = UUID.fromString(line.getOptionValue(APPLICATION_ARG));
    }

    logger.info("Staring Tool: UniqueValueScanner");
    logger.info("Using Cassandra consistency level: {}",
            System.getProperty("usergrid.read.cl", "CL_LOCAL_QUORUM"));

    keyspace = injector.getInstance(com.netflix.astyanax.Keyspace.class);
    mvccEntitySerializationStrategy = injector.getInstance(MvccEntitySerializationStrategy.class);
    uniqueValueSerializationStrategy = injector.getInstance(UniqueValueSerializationStrategy.class);

    String fieldType = line.getOptionValue(ENTITY_FIELD_TYPE_ARG) != null
            ? line.getOptionValue(ENTITY_FIELD_TYPE_ARG)
            : "name";
    String entityType = line.getOptionValue(ENTITY_TYPE_ARG);
    String entityName = line.getOptionValue(ENTITY_NAME_ARG);

    AtomicInteger count = new AtomicInteger(0);

    if (entityName != null && !entityName.isEmpty()) {

        if (appToFilter == null) {
            throw new RuntimeException("Cannot execute UniqueValueScanner with specific entity without the "
                    + "application UUID for which the entity should exist.");
        }

        if (entityType == null) {
            throw new RuntimeException("Cannot execute UniqueValueScanner without the entity type (singular "
                    + "collection name).");
        }

        logger.info("Running entity unique load only");

        //do stuff w/o read repair
        UniqueValueSet uniqueValueSet = uniqueValueSerializationStrategy.load(
                new ApplicationScopeImpl(new SimpleId(appToFilter, "application")),
                ConsistencyLevel.valueOf(System.getProperty("usergrid.read.cl", "LOCAL_QUORUM")), entityType,
                Collections.singletonList(new StringField(fieldType, entityName)), false);

        StringBuilder stringBuilder = new StringBuilder();

        stringBuilder.append("[");

        uniqueValueSet.forEach(uniqueValue -> {

            String entry = "fieldName=" + uniqueValue.getField().getName() + ", fieldValue="
                    + uniqueValue.getField().getValue() + ", uuid=" + uniqueValue.getEntityId().getUuid()
                    + ", type=" + uniqueValue.getEntityId().getType() + ", version="
                    + uniqueValue.getEntityVersion();
            stringBuilder.append("{").append(entry).append("},");
        });

        stringBuilder.deleteCharAt(stringBuilder.length() - 1);
        stringBuilder.append("]");

        logger.info("Returned unique value set from serialization load = {}", stringBuilder.toString());

    } else {

        logger.info("Running entity unique scanner only");

        // scan through all unique values and log some info

        Iterator<com.netflix.astyanax.model.Row<ScopedRowKey<TypeField>, EntityVersion>> rows = null;
        try {

            rows = keyspace.prepareQuery(CF_UNIQUE_VALUES)
                    .setConsistencyLevel(com.netflix.astyanax.model.ConsistencyLevel
                            .valueOf(System.getProperty("usergrid.read.cl", "CL_LOCAL_QUORUM")))
                    .getAllRows().withColumnRange(new RangeBuilder().setLimit(1000).build()).execute()
                    .getResult().iterator();

        } catch (ConnectionException e) {

            logger.error("Error connecting to cassandra", e);
        }

        UUID finalAppToFilter = appToFilter;

        if (rows != null) {
            rows.forEachRemaining(row -> {

                count.incrementAndGet();

                if (count.get() % 1000 == 0) {
                    logger.info("Scanned {} rows in {}", count.get(), CF_UNIQUE_VALUES.getName());
                }

                final String fieldName = row.getKey().getKey().getField().getName();
                final String fieldValue = row.getKey().getKey().getField().getValue().toString();
                final String scopeType = row.getKey().getScope().getType();
                final UUID scopeUUID = row.getKey().getScope().getUuid();

                if (!fieldName.equalsIgnoreCase(fieldType)
                        || (finalAppToFilter != null && !finalAppToFilter.equals(scopeUUID))) {
                    // do nothing

                } else {

                    // if we have more than 1 column, let's check for a duplicate
                    if (row.getColumns() != null && row.getColumns().size() > 1) {

                        final List<EntityVersion> values = new ArrayList<>(row.getColumns().size());

                        Iterator<Column<EntityVersion>> columns = row.getColumns().iterator();
                        columns.forEachRemaining(column -> {

                            final EntityVersion entityVersion = column.getName();

                            logger.trace(scopeType + ": " + scopeUUID + ", " + fieldName + ": " + fieldValue
                                    + ", " + "entity type: " + entityVersion.getEntityId().getType() + ", "
                                    + "entity uuid: " + entityVersion.getEntityId().getUuid());

                            if (entityType != null
                                    && entityVersion.getEntityId().getType().equalsIgnoreCase(entityType)) {

                                // add the first value into the list
                                if (values.size() == 0) {

                                    values.add(entityVersion);

                                } else {

                                    if (!values.get(0).getEntityId().getUuid()
                                            .equals(entityVersion.getEntityId().getUuid())) {

                                        values.add(entityVersion);

                                        logger.error(
                                                "Duplicate found for field [{}={}].  Entry 1: [{}], Entry 2: [{}]",
                                                fieldName, fieldValue, values.get(0).getEntityId(),
                                                entityVersion.getEntityId());

                                    }

                                }

                            }

                        });
                    }
                }

            });
        } else {

            logger.warn("No rows returned from table: {}", CF_UNIQUE_VALUES.getName());

        }

    }
}

From source file:org.codice.ddf.configuration.migration.MigrationReportImpl.java

@Override
public void verifyCompletion() {
    runCodes();/* w  w  w. j  a  va2  s.  c om*/
    if (numErrors == 0) {
        return;
    }
    final Iterator<MigrationException> i = errors().iterator(); // preserve order
    final MigrationException e = i.next(); // will always be there since numErrors is not 0

    i.forEachRemaining(e::addSuppressed);
    throw e;
}