Example usage for java.util Collections emptyIterator

List of usage examples for java.util Collections emptyIterator

Introduction

In this page you can find the example usage for java.util Collections emptyIterator.

Prototype

@SuppressWarnings("unchecked")
public static <T> Iterator<T> emptyIterator() 

Source Link

Document

Returns an iterator that has no elements.

Usage

From source file:fr.landel.utils.commons.StringUtilsTest.java

/**
 * Test method for {@link StringUtils#joinComma} .
 *///w w w.j a  va  2  s. c o m
@Test
public void testJoinComma() {
    assertNull(StringUtils.joinComma((Object[]) null));
    assertEquals("", StringUtils.joinComma(new Object[0]));
    assertEquals("test", StringUtils.joinComma("test"));
    assertEquals("t1, t2", StringUtils.joinComma("t1", "t2"));
    assertEquals("t1, ", StringUtils.joinComma("t1", null));

    assertNull(StringUtils.joinComma((Iterable<?>) null));
    assertEquals("", StringUtils.joinComma(Collections.emptyList()));
    assertEquals("test", StringUtils.joinComma(Arrays.asList("test")));
    assertEquals("t1, t2", StringUtils.joinComma(Arrays.asList("t1", "t2")));
    assertEquals("t1, ", StringUtils.joinComma(Arrays.asList("t1", null)));

    assertNull(StringUtils.joinComma((Iterator<?>) null));
    assertEquals("", StringUtils.joinComma(Collections.emptyIterator()));
    assertEquals("test", StringUtils.joinComma(Arrays.asList("test").iterator()));
    assertEquals("t1, t2", StringUtils.joinComma(Arrays.asList("t1", "t2").iterator()));
    assertEquals("t1, ", StringUtils.joinComma(Arrays.asList("t1", null).iterator()));
}

From source file:enumj.Enumerator.java

/**
 * Returns en enumerator with no elements.
 *
 * @param <E> type of the enumerated elements.
 * @return the empty enumerator./*from w  w  w .  ja  v  a2  s .com*/
 */
public static <E> Enumerator<E> empty() {
    return of(Collections.emptyIterator());
}

From source file:net.sourceforge.pmd.docs.RuleDocGenerator.java

private Iterator<RuleSet> resolveAdditionalRulesets(List<String> additionalRulesets)
        throws RuleSetNotFoundException {
    if (additionalRulesets == null) {
        return Collections.emptyIterator();
    }//from w w  w .ja  va  2 s . c o  m

    List<RuleSet> rulesets = new ArrayList<>();
    RuleSetFactory ruleSetFactory = new RuleSetFactory();
    for (String filename : additionalRulesets) {
        try {
            // do not take rulesets from pmd-test or pmd-core
            if (!filename.contains("pmd-test") && !filename.contains("pmd-core")) {
                rulesets.add(ruleSetFactory.createRuleSet(filename));
            } else {
                LOG.fine("Ignoring ruleset " + filename);
            }
        } catch (IllegalArgumentException e) {
            // ignore rulesets, we can't read
            LOG.log(Level.WARNING, "ruleset file " + filename + " ignored (" + e.getMessage() + ")", e);
        }
    }
    return rulesets.iterator();
}

From source file:org.apache.drill.exec.store.sys.local.FilePStore.java

@Override
public Iterator<Entry<String, V>> iterator() {
    try {/*from   w ww  . ja  va  2 s  .  com*/
        List<FileStatus> f = fs.list(false, basePath);
        if (f == null || f.isEmpty()) {
            return Collections.emptyIterator();
        }
        List<String> files = Lists.newArrayList();

        for (FileStatus stat : f) {
            String s = stat.getPath().getName();
            if (s.endsWith(DRILL_SYS_FILE_SUFFIX)) {
                files.add(s.substring(0, s.length() - DRILL_SYS_FILE_SUFFIX.length()));
            }
        }

        Collections.sort(files);
        files = files.subList(0, Math.min(files.size(), config.getMaxIteratorSize()));
        return new Iter(files.iterator());

    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.drill.exec.store.sys.local.LocalPStore.java

@Override
public Iterator<Entry<String, V>> iterator() {
    String[] f = basePath.list();
    if (f == null)
        return Collections.emptyIterator();
    List<String> files = Lists.newArrayList();
    for (String s : f) {
        if (s.endsWith(SUFFIX)) {
            files.add(s.substring(0, s.length() - SUFFIX.length()));
        }// w  w w .  j  a  v  a2s .  c o  m
    }

    return new Iter(files.iterator());
}

From source file:org.apache.drill.exec.store.sys.store.LocalPersistentStore.java

@Override
public Iterator<Map.Entry<String, V>> getRange(int skip, int take) {
    try {/*from ww  w.  ja  v a 2s .c o  m*/
        List<FileStatus> f = fs.list(false, basePath);
        if (f == null || f.isEmpty()) {
            return Collections.emptyIterator();
        }
        List<String> files = Lists.newArrayList();

        for (FileStatus stat : f) {
            String s = stat.getPath().getName();
            if (s.endsWith(DRILL_SYS_FILE_SUFFIX)) {
                files.add(s.substring(0, s.length() - DRILL_SYS_FILE_SUFFIX.length()));
            }
        }

        Collections.sort(files);
        return Iterables.transform(Iterables.limit(Iterables.skip(files, skip), take),
                new Function<String, Entry<String, V>>() {
                    @Nullable
                    @Override
                    public Entry<String, V> apply(String key) {
                        return new ImmutableEntry<>(key, get(key));
                    }
                }).iterator();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.druid.server.coordinator.CostBalancerStrategy.java

@Override
public Iterator<ServerHolder> pickServersToDrop(DataSegment toDrop, NavigableSet<ServerHolder> serverHolders) {
    List<ListenableFuture<Pair<Double, ServerHolder>>> futures = Lists.newArrayList();

    for (final ServerHolder server : serverHolders) {
        futures.add(exec.submit(() -> Pair.of(computeCost(toDrop, server, true), server)));
    }/*from ww w .j av a 2  s.com*/

    final ListenableFuture<List<Pair<Double, ServerHolder>>> resultsFuture = Futures.allAsList(futures);

    try {
        // results is an un-ordered list of a pair consisting of the 'cost' of a segment being on a server and the server
        List<Pair<Double, ServerHolder>> results = resultsFuture.get();
        return results.stream()
                // Comparator.comapringDouble will order by lowest cost...
                // reverse it because we want to drop from the highest cost servers first
                .sorted(Comparator.comparingDouble((Pair<Double, ServerHolder> o) -> o.lhs).reversed())
                .map(x -> x.rhs).collect(Collectors.toList()).iterator();
    } catch (Exception e) {
        log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit();
    }
    return Collections.emptyIterator();
}

From source file:org.apache.eagle.alert.engine.spark.function.AlertBoltFunction.java

@Override
public Iterator<Tuple2<PublishPartition, AlertStreamEvent>> call(
        Iterator<Tuple2<Integer, Iterable<PartitionedEvent>>> tuple2Iterator) throws Exception {
    if (!tuple2Iterator.hasNext()) {
        return Collections.emptyIterator();
    }/* w w w.  j av  a 2  s.c o m*/

    PolicyGroupEvaluatorImpl policyGroupEvaluator = null;
    AlertBoltOutputCollectorWrapper alertOutputCollector = null;
    AlertBoltSpec spec;
    StreamContext streamContext;
    Map<String, StreamDefinition> sds;
    Tuple2<Integer, Iterable<PartitionedEvent>> tuple2 = tuple2Iterator.next();
    Iterator<PartitionedEvent> events = tuple2._2.iterator();
    int partitionNum = tuple2._1;
    String boltId = Constants.ALERTBOLTNAME_PREFIX + partitionNum;
    while (events.hasNext()) {
        if (policyGroupEvaluator == null) {
            spec = alertBoltSpecRef.get();
            sds = sdsRef.get();
            boltId = Constants.ALERTBOLTNAME_PREFIX + partitionNum;
            policyGroupEvaluator = new PolicyGroupEvaluatorImpl(boltId + "-evaluator_stage1");
            Set<PublishPartition> cachedPublishPartitions = publishState
                    .getCachedPublishPartitionsByBoltId(boltId);
            streamContext = new StreamSparkContextImpl(null);
            alertOutputCollector = new AlertBoltOutputCollectorWrapper(new SparkOutputCollector(),
                    streamContext, cachedPublishPartitions);
            Map<String, PolicyDefinition> policyDefinitionMap = policyState.getPolicyDefinitionByBoltId(boltId);
            Map<String, CompositePolicyHandler> policyStreamHandlerMap = policyState
                    .getPolicyStreamHandlerByBoltId(boltId);
            byte[] siddhiSnapShot = siddhiState.getSiddhiSnapShotByBoltIdAndPartitionNum(boltId, partitionNum);
            policyGroupEvaluator.init(streamContext, alertOutputCollector, policyDefinitionMap,
                    policyStreamHandlerMap, siddhiSnapShot);
            onAlertBoltSpecChange(boltId, spec, sds, policyGroupEvaluator, alertOutputCollector, policyState,
                    publishState);
        }
        PartitionedEvent event = events.next();
        policyGroupEvaluator.nextEvent(event);
    }

    cleanUpAndStoreSiddhiState(policyGroupEvaluator, alertOutputCollector, boltId, partitionNum);

    return alertOutputCollector.emitAll().iterator();
}

From source file:org.apache.eagle.alert.engine.spark.function.CorrelationSpoutSparkFunction.java

@Override
public Iterator<Tuple2<Integer, PartitionedEvent>> call(Tuple2<String, String> message) {

    ObjectMapper mapper = new ObjectMapper();
    TypeReference<HashMap<String, Object>> typeRef = new TypeReference<HashMap<String, Object>>() {
    };/* www  .j  av  a  2s .c o  m*/
    Map<String, Object> value;
    try {
        value = mapper.readValue(message._2, typeRef);
    } catch (IOException e) {
        LOG.info("covert tuple value to map error");
        return Collections.emptyIterator();
    }
    List<Object> tuple = new ArrayList<>(2);
    String topic = message._1;
    tuple.add(0, topic);
    tuple.add(1, value);
    SpoutSpec spoutSpec = spoutSpecRef.get();
    Tuple2StreamMetadata metadata = spoutSpec.getTuple2StreamMetadataMap().get(topic);
    if (metadata == null) {
        LOG.info(
                "tuple2StreamMetadata is null spout collector for topic {} see monitored metadata invalid, is this data source removed! ",
                topic);
        return Collections.emptyIterator();
    }
    Tuple2StreamConverter converter = new Tuple2StreamConverter(metadata);
    List<Object> tupleContent = converter.convert(tuple);

    List<StreamRepartitionMetadata> streamRepartitionMetadataList = spoutSpec.getStreamRepartitionMetadataMap()
            .get(topic);
    if (streamRepartitionMetadataList == null) {
        LOG.info(
                "streamRepartitionMetadataList is nullspout collector for topic {} see monitored metadata invalid, is this data source removed! ",
                topic);
        return Collections.emptyIterator();
    }
    Map<String, Object> messageContent = (Map<String, Object>) tupleContent.get(3);
    Object streamId = tupleContent.get(1);
    Map<String, StreamDefinition> sds = sdsRef.get();
    StreamDefinition sd = sds.get(streamId);
    if (sd == null) {
        LOG.info("StreamDefinition {} is not found within {}, ignore this message", streamId, sds);
        return Collections.emptyIterator();
    }
    List<Tuple2<Integer, PartitionedEvent>> outputTuple2s = new ArrayList<>(5);

    Long timestamp = (Long) tupleContent.get(2);
    StreamEvent event = convertToStreamEventByStreamDefinition(timestamp, messageContent, sds.get(streamId));

    for (StreamRepartitionMetadata md : streamRepartitionMetadataList) {
        if (!event.getStreamId().equals(md.getStreamId())) {
            continue;
        }
        // one stream may have multiple group-by strategies, each strategy is for a specific group-by
        for (StreamRepartitionStrategy groupingStrategy : md.groupingStrategies) {
            int hash = 0;
            if (groupingStrategy.getPartition().getType().equals(StreamPartition.Type.GROUPBY)) {
                hash = getRoutingHashByGroupingStrategy(messageContent, groupingStrategy);
            } else if (groupingStrategy.getPartition().getType().equals(StreamPartition.Type.SHUFFLE)) {
                hash = Math.abs((int) System.currentTimeMillis());
            }
            int mod = hash % groupingStrategy.numTotalParticipatingRouterBolts;
            // filter out message
            if (mod >= groupingStrategy.startSequence
                    && mod < groupingStrategy.startSequence + numOfRouterBolts) {
                PartitionedEvent pEvent = new PartitionedEvent(event, groupingStrategy.partition, hash);
                outputTuple2s.add(new Tuple2<>(mod, pEvent));
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Message filtered with mod {} not within range {} and {} for message {}", mod,
                            groupingStrategy.startSequence, groupingStrategy.startSequence + numOfRouterBolts,
                            tuple);
                }
            }
        }
    }
    if (CollectionUtils.isEmpty(outputTuple2s)) {
        return Collections.emptyIterator();
    }
    return outputTuple2s.iterator();
}

From source file:org.apache.eagle.alert.engine.spark.function.StreamRouteBoltFunction.java

@Override
public Iterator<Tuple2<Integer, PartitionedEvent>> call(
        Iterator<Tuple2<Integer, Iterable<PartitionedEvent>>> tuple2Iterator) throws Exception {

    if (!tuple2Iterator.hasNext()) {
        return Collections.emptyIterator();
    }//from   w  ww.j ava2  s  . c  o m

    Map<String, StreamDefinition> sdf;
    RouterSpec spec;
    StreamRouterBoltOutputCollector routeCollector = null;
    StreamRouterImpl router = null;
    StreamContext streamContext;
    Tuple2<Integer, Iterable<PartitionedEvent>> tuple2 = tuple2Iterator.next();
    Iterator<PartitionedEvent> events = tuple2._2.iterator();
    int partitionNum = tuple2._1;
    while (events.hasNext()) {
        if (router == null) {
            sdf = sdsRef.get();
            spec = routerSpecRef.get();

            router = new StreamRouterImpl(routeName);
            streamContext = new StreamSparkContextImpl(null);
            Map<StreamPartition, List<StreamRouterSpec>> routeSpecMap = routeState
                    .getRouteSpecMapByPartition(partitionNum);
            Map<StreamPartition, List<StreamRoutePartitioner>> routePartitionerMap = routeState
                    .getRoutePartitionerByPartition(partitionNum);
            cachedSSS = routeState.getCachedSSSMapByPartition(partitionNum);
            cachedSRS = routeState.getCachedSRSMapByPartition(partitionNum);
            routeCollector = new StreamRouterBoltOutputCollector(routeName, new SparkOutputCollector(),
                    streamContext, routeSpecMap, routePartitionerMap);
            Map<String, StreamTimeClock> streamTimeClockMap = winstate
                    .getStreamTimeClockByPartition(partitionNum);
            Map<StreamTimeClockListener, String> streamWindowMap = winstate
                    .getStreamWindowsByPartition(partitionNum);
            Map<StreamPartition, StreamSortHandler> streamSortHandlerMap = winstate
                    .getStreamSortHandlerByPartition(partitionNum);

            router.prepare(streamContext, routeCollector, streamWindowMap, streamTimeClockMap,
                    streamSortHandlerMap);
            onStreamRouteBoltSpecChange(spec, sdf, router, routeCollector, cachedSSS, cachedSRS, partitionNum);

        }

        PartitionedEvent partitionedEvent = events.next();
        router.nextEvent(partitionedEvent);
    }
    cleanUpAndStoreWinstate(router, partitionNum);

    return routeCollector.flush().iterator();
}