Example usage for java.util Set forEach

List of usage examples for java.util Set forEach

Introduction

In this page you can find the example usage for java.util Set forEach.

Prototype

default void forEach(Consumer<? super T> action) 

Source Link

Document

Performs the given action for each element of the Iterable until all elements have been processed or the action throws an exception.

Usage

From source file:org.apache.samza.system.kafka.KafkaSystemAdmin.java

/**
 * Fetch SystemStreamMetadata for each topic with the consumer
 * @param topics set of topics to get metadata info for
 * @return map of topic to SystemStreamMetadata
 *//*from ww  w . jav a2  s.  com*/
private Map<String, SystemStreamMetadata> fetchSystemStreamMetadata(Set<String> topics) {
    Map<SystemStreamPartition, String> allOldestOffsets = new HashMap<>();
    Map<SystemStreamPartition, String> allNewestOffsets = new HashMap<>();
    Map<SystemStreamPartition, String> allUpcomingOffsets = new HashMap<>();

    LOG.info("Fetching SystemStreamMetadata for topics {} on system {}", topics, systemName);

    topics.forEach(topic -> {
        OffsetsMaps offsetsForTopic = threadSafeKafkaConsumer.execute(consumer -> {
            List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
            if (partitionInfos == null) {
                String msg = String.format("Partition info not(yet?) available for system %s topic %s",
                        systemName, topic);
                throw new SamzaException(msg);
            }
            List<TopicPartition> topicPartitions = partitionInfos.stream()
                    .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
                    .collect(Collectors.toList());
            return fetchTopicPartitionsMetadata(topicPartitions);
        });
        allOldestOffsets.putAll(offsetsForTopic.getOldestOffsets());
        allNewestOffsets.putAll(offsetsForTopic.getNewestOffsets());
        allUpcomingOffsets.putAll(offsetsForTopic.getUpcomingOffsets());
    });

    return assembleMetadata(allOldestOffsets, allNewestOffsets, allUpcomingOffsets);
}

From source file:com.abixen.platform.service.businessintelligence.multivisualization.service.impl.AbstractDatabaseService.java

private List<Map<String, DataValueWeb>> getData(Connection connection, DatabaseDataSource databaseDataSource,
        Set<String> chartColumnsSet, ChartConfigurationForm chartConfigurationForm) {
    ResultSet rs;/*  w  ww.j  a va2 s .com*/
    List<Map<String, DataValueWeb>> data = new ArrayList<>();
    try {
        Statement statement = connection.createStatement();
        ResultSetMetaData resultSetMetaData = getDatabaseMetaData(connection, databaseDataSource.getTable());
        if (chartConfigurationForm != null) {
            rs = statement.executeQuery(buildQueryForChartData(databaseDataSource, chartColumnsSet,
                    resultSetMetaData, chartConfigurationForm));
        } else {
            rs = statement.executeQuery(
                    buildQueryForDataSourceData(databaseDataSource, chartColumnsSet, resultSetMetaData)
                            .toString());
        }
        while (rs.next()) {
            final ResultSet row = rs;
            Map<String, DataValueWeb> rowMap = new HashMap<>();
            chartColumnsSet.forEach(chartColumnsSetElement -> {
                rowMap.put(chartColumnsSetElement, getDataFromColumn(row, chartColumnsSetElement));
            });
            data.add(rowMap);
        }
    } catch (SQLException e) {
        e.printStackTrace();
        throw new DataParsingException("Error when parsing data from db. " + e.getMessage());
    }
    return data;
}

From source file:com.autonomy.aci.client.transport.impl.AciHttpClientImpl.java

/**
 * Create a {@code PostMethod} and adds the ACI parameters to the request body.
 * @param serverDetails The details of the ACI server the request will be sent to
 * @param parameters    The parameters to send with the ACI action.
 * @return An {@code HttpPost} that is ready to execute the ACI action.
 * @throws UnsupportedEncodingException Will be thrown if <tt>serverDetails.getCharsetName()</tt> returns a
 *                                      charset that is not supported by the JVM
 * @throws URISyntaxException           If there was a problem construction the request URI from the
 *                                      <tt>serverDetails</tt> and <tt>parameters</tt>
 *///from  www.  jav a2  s . c  o  m
private HttpUriRequest createPostMethod(final AciServerDetails serverDetails,
        final Set<? extends ActionParameter<?>> parameters)
        throws URISyntaxException, UnsupportedEncodingException {
    LOGGER.trace("createPostMethod() called...");

    // Create the URI to use...
    final URI uri = new URIBuilder()
            .setScheme(serverDetails.getProtocol().toString().toLowerCase(Locale.ENGLISH))
            .setHost(serverDetails.getHost()).setPort(serverDetails.getPort()).setPath("/").build();

    // Create the method...
    final HttpPost method = new HttpPost(uri);

    final Charset charset = Charset.forName(serverDetails.getCharsetName());

    final boolean requiresMultipart = parameters.stream().anyMatch(ActionParameter::requiresPostRequest);

    if (requiresMultipart) {
        final MultipartEntityBuilder multipartEntityBuilder = MultipartEntityBuilder.create();
        multipartEntityBuilder.setCharset(charset);

        parameters.forEach(parameter -> parameter.addToEntity(multipartEntityBuilder, charset));

        // Convert the parameters into an entity...
        method.setEntity(multipartEntityBuilder.build());
    } else {
        method.setEntity(new StringEntity(convertParameters(parameters, serverDetails.getCharsetName()),
                serverDetails.getCharsetName()));
    }

    // Return the method...
    return method;
}

From source file:org.apache.cassandra.index.SecondaryIndexManager.java

/**
* Does a full, blocking rebuild of the indexes specified by columns from the sstables.
* Caller must acquire and release references to the sstables used here.
* Note also that only this method of (re)building indexes:
*   a) takes a set of index *names* rather than Indexers
*   b) marks exsiting indexes removed prior to rebuilding
*
* @param sstables the data to build from
* @param indexNames the list of indexes to be rebuilt
*///from   w ww  .  j a v a  2 s  .  c  o  m
public void rebuildIndexesBlocking(Collection<SSTableReader> sstables, Set<String> indexNames) {
    Set<Index> toRebuild = indexes.values().stream()
            .filter(index -> indexNames.contains(index.getIndexMetadata().name))
            .filter(Index::shouldBuildBlocking).collect(Collectors.toSet());
    if (toRebuild.isEmpty()) {
        logger.info("No defined indexes with the supplied names: {}", Joiner.on(',').join(indexNames));
        return;
    }

    toRebuild.forEach(indexer -> markIndexRemoved(indexer.getIndexMetadata().name));

    buildIndexesBlocking(sstables, toRebuild);

    toRebuild.forEach(indexer -> markIndexBuilt(indexer.getIndexMetadata().name));
}

From source file:org.apache.zookeeper.MockZooKeeper.java

@Override
public String create(String path, byte[] data, List<ACL> acl, CreateMode createMode)
        throws KeeperException, InterruptedException {
    mutex.lock();// w  ww.j  a  va  2 s  .  c  o  m
    try {
        checkProgrammedFail();

        if (stopped)
            throw new KeeperException.ConnectionLossException();

        if (tree.containsKey(path)) {
            throw new KeeperException.NodeExistsException(path);
        }

        final String parent = path.substring(0, path.lastIndexOf("/"));
        if (!parent.isEmpty() && !tree.containsKey(parent)) {
            throw new KeeperException.NoNodeException();
        }

        if (createMode == CreateMode.EPHEMERAL_SEQUENTIAL || createMode == CreateMode.PERSISTENT_SEQUENTIAL) {
            byte[] parentData = tree.get(parent).getLeft();
            int parentVersion = tree.get(parent).getRight();
            path = path + parentVersion;

            // Update parent version
            tree.put(parent, Pair.of(parentData, parentVersion + 1));
        }

        tree.put(path, Pair.of(data, 0));

        final Set<Watcher> toNotifyCreate = Sets.newHashSet();
        toNotifyCreate.addAll(watchers.get(path));

        final Set<Watcher> toNotifyParent = Sets.newHashSet();
        if (!parent.isEmpty()) {
            toNotifyParent.addAll(watchers.get(parent));
        }
        watchers.removeAll(path);
        final String finalPath = path;
        executor.execute(() -> {
            toNotifyCreate.forEach(watcher -> watcher
                    .process(new WatchedEvent(EventType.NodeCreated, KeeperState.SyncConnected, finalPath)));
            toNotifyParent.forEach(watcher -> watcher.process(
                    new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, parent)));
        });

        return path;
    } finally {
        mutex.unlock();
    }

}

From source file:nl.knaw.huc.di.tag.tagml.importer.TAGMLListener.java

@Override
public void enterStartTag(StartTagContext ctx) {
    checkEOF(ctx);//from   www . j  a  va  2 s. com
    if (tagNameIsValid(ctx)) {
        MarkupNameContext markupNameContext = ctx.markupName();
        String markupName = markupNameContext.name().getText();
        //      LOG.debug("startTag.markupName=<{}>", markupName);
        checkNameSpace(ctx, markupName);
        ctx.annotation().forEach(annotation -> LOG.debug("  startTag.annotation={{}}", annotation.getText()));

        PrefixContext prefix = markupNameContext.prefix();
        boolean optional = prefix != null && prefix.getText().equals(OPTIONAL_PREFIX);
        boolean resume = prefix != null && prefix.getText().equals(RESUME_PREFIX);

        TAGMarkup markup = resume ? resumeMarkup(ctx)
                : addMarkup(markupName, ctx.annotation(), ctx).setOptional(optional);

        Set<String> layerIds = extractLayerInfo(ctx.markupName().layerInfo());
        Set<String> layers = new HashSet<>();
        state.allOpenMarkup.push(markup);
        boolean firstTag = !document.getLayerNames().contains(TAGML.DEFAULT_LAYER);
        if (firstTag) {
            addDefaultLayer(markup, layers);
            state.rootMarkupId = markup.getDbId();
        }
        layerIds.forEach(layerId -> {
            if (layerId.contains("+")) {
                String[] parts = layerId.split("\\+");
                String parentLayer = parts[0];
                String newLayerId = parts[1];
                document.addLayer(newLayerId, markup, parentLayer);
                //          layers.add(parentLayer);
                layers.add(newLayerId);

            } else if (!(firstTag && DEFAULT_LAYER.equals(layerId))) {
                checkLayerWasAdded(ctx, layerId);
                checkLayerIsOpen(ctx, layerId);
                document.openMarkupInLayer(markup, layerId);
                layers.add(layerId);
            }
        });
        markup.addAllLayers(layers);

        addSuffix(markupNameContext, markup);
        markup.getLayers().forEach(l -> {
            state.openMarkup.putIfAbsent(l, new ArrayDeque<>());
            state.openMarkup.get(l).push(markup);
        });

        currentTextVariationState().addOpenMarkup(markup);
        store.persist(markup.getDTO());
    }
}

From source file:com.abixen.platform.service.businessintelligence.multivisualisation.application.service.database.AbstractDatabaseService.java

private List<Map<String, DataValueDto>> getData(Connection connection, DatabaseDataSource databaseDataSource,
        Set<String> chartColumnsSet, ChartConfigurationForm chartConfigurationForm, Integer limit) {
    ResultSet rs;/*  ww w. j  a v  a 2 s.c  o  m*/
    List<Map<String, DataValueDto>> data = new ArrayList<>();
    try {
        Statement statement = connection.createStatement();
        ResultSetMetaData resultSetMetaData = getDatabaseMetaData(connection, databaseDataSource.getTable());
        if (chartConfigurationForm != null) {
            rs = statement.executeQuery(buildQueryForChartData(databaseDataSource, chartColumnsSet,
                    resultSetMetaData, chartConfigurationForm, limit));
        } else {
            rs = statement.executeQuery(
                    buildQueryForDataSourceData(databaseDataSource, chartColumnsSet, resultSetMetaData, limit)
                            .toString());
        }
        while (rs.next()) {
            final ResultSet row = rs;
            Map<String, DataValueDto> rowMap = new HashMap<>();
            chartColumnsSet.forEach(chartColumnsSetElement -> rowMap.put(chartColumnsSetElement,
                    getDataFromColumn(row, chartColumnsSetElement)));
            data.add(rowMap);
        }
    } catch (SQLException e) {
        throw new DataParsingException("Error when parsing data from db. " + e.getMessage());
    }
    return data;
}

From source file:org.apache.cassandra.index.SecondaryIndexManager.java

/**
 * Perform a blocking flush of selected indexes
 */// www. ja v  a 2  s  .  com
public void flushIndexesBlocking(Set<Index> indexes) {
    if (indexes.isEmpty())
        return;

    List<Future<?>> wait = new ArrayList<>();
    List<Index> nonCfsIndexes = new ArrayList<>();

    // for each CFS backed index, submit a flush task which we'll wait on for completion
    // for the non-CFS backed indexes, we'll flush those while we wait.
    synchronized (baseCfs.getTracker()) {
        indexes.forEach(index -> index.getBackingTable().map(cfs -> wait.add(cfs.forceFlush()))
                .orElseGet(() -> nonCfsIndexes.add(index)));
    }

    executeAllBlocking(nonCfsIndexes.stream(), Index::getBlockingFlushTask);
    FBUtilities.waitOnFutures(wait);
}

From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java

private void setRowIdStatuses(Connector connector, AccumuloTable table, long timestamp,
        Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses)
        throws TableNotFoundException {
    // Set ranges to all row IDs that we have no status for
    List<Range> queryRanges = queryIndexEntries.keySet().stream().filter(x -> !rowIdStatuses.containsKey(x))
            .map(x -> new Range(new Text(x.array()))).collect(Collectors.toList());

    if (queryRanges.size() == 0) {
        return;/*from w  ww .  j a  va 2 s.co  m*/
    }

    BatchScanner scanner = connector.createBatchScanner(table.getFullTableName(), auths, 10);
    scanner.setRanges(queryRanges);

    IteratorSetting iteratorSetting = new IteratorSetting(Integer.MAX_VALUE, TimestampFilter.class);
    TimestampFilter.setEnd(iteratorSetting, timestamp, true);
    scanner.addScanIterator(iteratorSetting);

    scanner.addScanIterator(new IteratorSetting(1, FirstEntryInRowIterator.class));

    // Make a copy of all the row IDs we are querying on to back-fill collection
    Set<ByteBuffer> allRowIDs = new HashSet<>(queryIndexEntries.keySet());

    // Scan the data table, removing all known row IDs and setting their status to present
    Text text = new Text();
    for (Entry<Key, Value> entry : scanner) {
        ByteBuffer rowID = ByteBuffer.wrap(entry.getKey().getRow(text).copyBytes());
        allRowIDs.remove(rowID);

        // Assert that this entry is new
        if (rowIdStatuses.put(rowID, RowStatus.PRESENT) != null) {
            throw new RuntimeException(
                    format("Internal error, row %s already has status", new String(rowID.array(), UTF_8)));
        }
    }
    scanner.close();

    AtomicLong newlyAbsent = new AtomicLong(0);
    // Back-fill the absent map -- rows may already be flagged as absent
    allRowIDs.forEach(rowID -> {
        RowStatus existingStatus = rowIdStatuses.get(rowID);
        if (existingStatus == null) {
            newlyAbsent.incrementAndGet();
            rowIdStatuses.put(rowID, RowStatus.ABSENT);
        } else if (existingStatus == RowStatus.PRESENT) {
            throw new RuntimeException(format("Internal error, row %s already has PRESENT status",
                    new String(rowID.array(), UTF_8)));
        }
    });
}

From source file:org.apache.pulsar.broker.service.BrokerService.java

/**
 * It unloads all owned namespacebundles gracefully.
 * <ul>/*from   w ww .  j a va2s . c  o m*/
 * <li>First it makes current broker unavailable and isolates from the clusters so, it will not serve any new
 * requests.</li>
 * <li>Second it starts unloading namespace bundle one by one without closing the connection in order to avoid
 * disruption for other namespacebundles which are sharing the same connection from the same client.</li>
 * <ul>
 *
 */
public void unloadNamespaceBundlesGracefully() {
    try {
        // make broker-node unavailable from the cluster
        if (pulsar.getLoadManager() != null) {
            pulsar.getLoadManager().get().disableBroker();
        }

        // unload all namespace-bundles gracefully
        long closeTopicsStartTime = System.nanoTime();
        Set<NamespaceBundle> serviceUnits = pulsar.getNamespaceService().getOwnedServiceUnits();
        serviceUnits.forEach(su -> {
            if (su instanceof NamespaceBundle) {
                try {
                    pulsar.getNamespaceService().unloadNamespaceBundle((NamespaceBundle) su);
                } catch (Exception e) {
                    log.warn("Failed to unload namespace bundle {}", su, e);
                }
            }
        });

        double closeTopicsTimeSeconds = TimeUnit.NANOSECONDS
                .toMillis((System.nanoTime() - closeTopicsStartTime)) / 1000.0;
        log.info("Unloading {} namespace-bundles completed in {} seconds", serviceUnits.size(),
                closeTopicsTimeSeconds);
    } catch (Exception e) {
        log.error("Failed to disable broker from loadbalancer list {}", e.getMessage(), e);
    }
}