Example usage for java.util Set forEach

List of usage examples for java.util Set forEach

Introduction

In this page you can find the example usage for java.util Set forEach.

Prototype

default void forEach(Consumer<? super T> action) 

Source Link

Document

Performs the given action for each element of the Iterable until all elements have been processed or the action throws an exception.

Usage

From source file:org.apache.druid.indexing.common.task.AppenderatorDriverRealtimeIndexTaskTest.java

private void makeToolboxFactory(final File directory) {
    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    taskLockbox = new TaskLockbox(taskStorage);

    publishedSegments = new CopyOnWriteArrayList<>();

    ObjectMapper mapper = new DefaultObjectMapper();
    mapper.registerSubtypes(LinearShardSpec.class);
    mapper.registerSubtypes(NumberedShardSpec.class);
    IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper,
            derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
        @Override/*from   w ww . j a  v  a2 s  . c  o  m*/
        public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
            Set<DataSegment> result = super.announceHistoricalSegments(segments);

            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?",
                    segmentLatch == null);

            publishedSegments.addAll(result);
            segments.forEach(s -> segmentLatch.countDown());

            return result;
        }

        @Override
        public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments,
                DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
            SegmentPublishResult result = super.announceHistoricalSegments(segments, startMetadata,
                    endMetadata);

            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?",
                    segmentLatch == null);

            publishedSegments.addAll(result.getSegments());
            result.getSegments().forEach(s -> segmentLatch.countDown());

            return result;
        }
    };
    final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null,
            null);

    final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, emitter,
            EasyMock.createMock(SupervisorManager.class), new Counters());
    final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage,
            taskActionToolbox, new TaskAuditLogConfig(false));
    IntervalChunkingQueryRunnerDecorator queryRunnerDecorator = new IntervalChunkingQueryRunnerDecorator(null,
            null, null) {
        @Override
        public <T> QueryRunner<T> decorate(QueryRunner<T> delegate,
                QueryToolChest<T, ? extends Query<T>> toolChest) {
            return delegate;
        }
    };
    final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(
            ImmutableMap.of(TimeseriesQuery.class,
                    new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(queryRunnerDecorator),
                            new TimeseriesQueryEngine(), (query, future) -> {
                                // do nothing
                            })));
    handOffCallbacks = new ConcurrentHashMap<>();
    final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
        @Override
        public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec,
                Runnable handOffRunnable) {
            handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
            handoffLatch.countDown();
            return true;
        }

        @Override
        public void start() {
            //Noop
        }

        @Override
        public void close() {
            //Noop
        }

    };
    final TestUtils testUtils = new TestUtils();
    rowIngestionMetersFactory = testUtils.getRowIngestionMetersFactory();
    SegmentLoaderConfig segmentLoaderConfig = new SegmentLoaderConfig() {
        @Override
        public List<StorageLocationConfig> getLocations() {
            return Lists.newArrayList();
        }
    };

    taskToolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter,
            new TestDataSegmentPusher(), new TestDataSegmentKiller(), null, // DataSegmentMover
            null, // DataSegmentArchiver
            new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class),
            handoffNotifierFactory, () -> conglomerate, MoreExecutors.sameThreadExecutor(), // queryExecutorService
            EasyMock.createMock(MonitorScheduler.class),
            new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, segmentLoaderConfig,
                    testUtils.getTestObjectMapper())),
            testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024),
            new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(),
            EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class),
            new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0),
            new TaskReportFileWriter(reportsFile));
}

From source file:org.onosproject.segmentrouting.pwaas.L2TunnelHandler.java

/**
 * Processes PWaaS Config updated event.
 *
 * @param event network config updated event
 *//*from   w  w w.  ja  v  a 2  s .  c om*/
public void processPwaasConfigUpdated(NetworkConfigEvent event) {
    log.info("Processing Pwaas CONFIG_UPDATED");
    // We retrieve the old pseudo wires.
    PwaasConfig prevConfig = (PwaasConfig) event.prevConfig().get();
    Set<Long> prevPws = prevConfig.getPwIds();
    // We retrieve the new pseudo wires.
    PwaasConfig config = (PwaasConfig) event.config().get();
    Set<Long> newPws = config.getPwIds();
    // We compute the pseudo wires to update.
    Set<Long> updPws = newPws.stream()
            .filter(tunnelId -> prevPws.contains(tunnelId)
                    && !config.getPwDescription(tunnelId).equals(prevConfig.getPwDescription(tunnelId)))
            .collect(Collectors.toSet());
    // The pseudo wires to remove.
    Set<DefaultL2TunnelDescription> pwToRemove = prevPws.stream().filter(tunnelId -> !newPws.contains(tunnelId))
            .map(prevConfig::getPwDescription).collect(Collectors.toSet());
    tearDown(pwToRemove);
    // The pseudo wires to add.
    Set<DefaultL2TunnelDescription> pwToAdd = newPws.stream().filter(tunnelId -> !prevPws.contains(tunnelId))
            .map(config::getPwDescription).collect(Collectors.toSet());
    deploy(pwToAdd);
    // The pseudo wires to update.
    updPws.forEach(
            tunnelId -> updatePw(prevConfig.getPwDescription(tunnelId), config.getPwDescription(tunnelId)));
}

From source file:org.apache.zookeeper.MockZooKeeper.java

@Override
public Stat setData(final String path, byte[] data, int version) throws KeeperException, InterruptedException {
    mutex.lock();// w w  w.j ava  2s.  co m

    final Set<Watcher> toNotify = Sets.newHashSet();
    int newVersion;

    try {
        checkProgrammedFail();

        if (stopped) {
            throw new KeeperException.ConnectionLossException();
        }

        if (!tree.containsKey(path)) {
            throw new KeeperException.NoNodeException();
        }

        int currentVersion = tree.get(path).getRight();

        // Check version
        if (version != -1 && version != currentVersion) {
            throw new KeeperException.BadVersionException(path);
        }

        newVersion = currentVersion + 1;
        log.debug("[{}] Updating -- current version: {}", path, currentVersion);
        tree.put(path, Pair.of(data, newVersion));

        toNotify.addAll(watchers.get(path));
        watchers.removeAll(path);
    } finally {
        mutex.unlock();
    }

    executor.execute(() -> {
        toNotify.forEach(watcher -> watcher
                .process(new WatchedEvent(EventType.NodeDataChanged, KeeperState.SyncConnected, path)));
    });

    Stat stat = new Stat();
    stat.setVersion(newVersion);
    return stat;
}

From source file:org.hawkular.inventory.impl.tinkerpop.test.BasicTest.java

@Test
public void testRelationshipServiceNamed1() throws Exception {
    Set<Relationship> contains = inventory.tenants().getAll().relationships().named("contains").entities();
    assert contains.stream().anyMatch(rel -> "com.acme.tenant".equals(rel.getSource().getId()) && "URL"
            .equals(rel.getTarget().getId())) : "Tenant 'com.acme.tenant' must contain ResourceType 'URL'.";
    assert contains.stream()
            .anyMatch(rel -> "com.acme.tenant".equals(rel.getSource().getId()) && "production".equals(rel
                    .getTarget().getId())) : "Tenant 'com.acme.tenant' must contain Environment 'production'.";
    assert contains.stream().anyMatch(rel -> "com.example.tenant".equals(rel.getSource().getId()) && "Size"
            .equals(rel.getTarget().getId())) : "Tenant 'com.example.tenant' must contain MetricType 'Size'.";

    contains.forEach((r) -> {
        assert r.getId() != null;
    });//from  w  w w  .j  a  v a 2s.  co m
}

From source file:com.intuit.wasabi.assignment.impl.AssignmentsImpl.java

/**
 * Populate assignments metadata; use metadata cache if it is enabled or use repository to populate from DB
 * <p>//from www .j a v a 2 s  . c  o  m
 * experimentIds is NULL when called from AssignmentsResource.getBatchAssignments() => api:/v1/assignments/applications/{applicationName}/users/{userID}
 * experimentBatch.labels are NULL when called from AssignmentsImpl.doPageAssignments() => api:/v1/assignments/applications/{applicationName}/pages/{pageName}/users/{userID}
 *
 * @param userID                    Input: Given user id
 * @param appName                   Input: Given application name
 * @param context                   Input: Given context
 * @param experimentBatch           Input/Output: Given experiment batch. This object will be modified and become one of the output; in the case of AssignmentsImpl.doPageAssignments()
 * @param allowAssignments          Input: Given batch experiment ids with allow assignment flag.
 * @param prioritizedExperimentList Output: prioritized experiment list of ALL the experiments for the given application.
 * @param experimentMap             Output: Map of 'experiment id TO experiment' of ALL the experiments for the given application.
 * @param bucketMap                 Output: Map of 'experiment id TO BucketList' of ONLY experiments which are associated to the given application and page.
 * @param exclusionMap              Output: Map of 'experiment id TO to its mutual experiment ids' of ONLY experiments which are associated to the given application and page.
 */
private void populateAssignmentsMetadata(User.ID userID, Application.Name appName, Context context,
        ExperimentBatch experimentBatch, Optional<Map<Experiment.ID, Boolean>> allowAssignments,
        PrioritizedExperimentList prioritizedExperimentList, Map<Experiment.ID, Experiment> experimentMap,
        Map<Experiment.ID, BucketList> bucketMap, Map<Experiment.ID, List<Experiment.ID>> exclusionMap) {
    LOGGER.debug(
            "populateAssignmentsMetadata - STARTED: userID={}, appName={}, context={}, experimentBatch={}, experimentIds={}",
            userID, appName, context, experimentBatch, allowAssignments);
    if (isNull(experimentBatch.getLabels()) && !allowAssignments.isPresent()) {
        LOGGER.error(
                "Invalid input to AssignmentsImpl.populateAssignmentsMetadata(): Given input: userID={}, appName={}, context={}, experimentBatch={}, allowAssignments={}",
                userID, appName, context, experimentBatch, allowAssignments);
        return;
    }

    //IF metadata cache is enabled, THEN use metadata cache to populate assignments metadata ELSE use assignments repository to populate assignments metadata
    if (metadataCacheEnabled) {
        //Populate experiments map of all the experiments of given application
        metadataCache.getExperimentsByAppName(appName).forEach(exp -> experimentMap.put(exp.getID(), exp));
        LOGGER.debug("[cache] experimentMap = {}", experimentMap);

        //Populate prioritized experiments list of given application
        Optional<PrioritizedExperimentList> prioritizedExperimentListOptional = metadataCache
                .getPrioritizedExperimentListMap(appName);
        if (prioritizedExperimentListOptional.isPresent()) {
            prioritizedExperimentListOptional.get().getPrioritizedExperiments()
                    .forEach(exp -> prioritizedExperimentList.addPrioritizedExperiment(exp));
        } else {
            //TODO: 1/30/17  What to do if there are no experiments for given application
        }
        LOGGER.debug("[cache] prioritizedExperimentList = {}",
                prioritizedExperimentList.getPrioritizedExperiments());

        //Populate experiments ids of given batch
        Set<Experiment.ID> experimentIds = allowAssignments.isPresent() ? allowAssignments.get().keySet()
                : new HashSet<>();
        populateExperimentIdsAndExperimentBatch(allowAssignments, experimentMap, experimentBatch,
                experimentIds);

        //Based on given experiment ids, populate experiment buckets and exclusions..
        experimentIds.forEach(expId -> {
            bucketMap.put(expId, metadataCache.getBucketList(expId));
            exclusionMap.put(expId, metadataCache.getExclusionList(expId));
        });
        LOGGER.debug("[cache] bucketMap = {}", bucketMap);
        LOGGER.debug("[cache] exclusionMap = {}", exclusionMap);

    } else {
        assignmentsRepository.populateAssignmentsMetadata(userID, appName, context, experimentBatch,
                allowAssignments, prioritizedExperimentList, experimentMap, bucketMap, exclusionMap);
    }

    LOGGER.debug("populateAssignmentsMetadata - FINISHED...");
}

From source file:com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataService.java

@Override
public List<DefinitionMetadataDto> searchDefinitionMetadatas(final Set<String> propertyNames, final String type,
        final String name, final String sortBy, final String sortOrder, final Integer offset,
        final Integer limit) {
    final List<DefinitionMetadataDto> result = Lists.newArrayList();
    final StringBuilder query = new StringBuilder(SQL.SEARCH_DEFINITION_METADATAS);
    final List<Object> paramList = Lists.newArrayList();
    if (type != null) {
        String typeRegex = null;//from  w  w  w  .  ja v a2s.  c  o m
        switch (type) {
        case "database":
            typeRegex = "^[^/]*/[^/]*$";
            break;
        case "table":
            typeRegex = "^[^/]*/[^/]*/[^/]*$";
            break;
        case "partition":
            typeRegex = "^[^/]*/[^/]*/[^/]*/.*$";
            break;
        default:
        }
        if (typeRegex != null) {
            query.append(" and name rlike ?");
            paramList.add(typeRegex);
        }
    }
    if (propertyNames != null && !propertyNames.isEmpty()) {
        propertyNames.forEach(propertyName -> {
            query.append(" and data like ?");
            paramList.add("%\"" + propertyName + "\":%");
        });
    }
    if (!Strings.isNullOrEmpty(name)) {
        query.append(" and name like ?");
        paramList.add(name);
    }
    if (!Strings.isNullOrEmpty(sortBy)) {
        query.append(" order by ").append(sortBy);
        if (!Strings.isNullOrEmpty(sortOrder)) {
            query.append(" ").append(sortOrder);
        }
    }
    if (limit != null) {
        query.append(" limit ");
        if (offset != null) {
            query.append(offset).append(",");
        }
        query.append(limit);
    }
    final Object[] params = new Object[paramList.size()];
    final Connection connection = DBUtil.getReadConnection(poolingDataSource);
    try {
        // Handler for reading the result set
        final ResultSetHandler<Void> handler = rs -> {
            while (rs.next()) {
                final String definitionName = rs.getString("name");
                final String data = rs.getString("data");
                final DefinitionMetadataDto definitionMetadataDto = new DefinitionMetadataDto();
                definitionMetadataDto.setName(QualifiedName.fromString(definitionName));
                definitionMetadataDto.setDefinitionMetadata(metacatJson.parseJsonObject(data));
                result.add(definitionMetadataDto);
            }
            return null;
        };
        new QueryRunner().query(connection, query.toString(), handler, paramList.toArray(params));
    } catch (SQLException e) {
        log.error("Sql exception", e);
        throw new UserMetadataServiceException("Failed to get definition data", e);
    } finally {
        DBUtil.closeReadConnection(connection);
    }
    return result;
}

From source file:com.caricah.iotracah.core.worker.DumbWorker.java

/**
 * Provides the Observer with a new item to observe.
 * <p>/*from ww  w.  ja  va  2  s .  co  m*/
 * The {@link com.caricah.iotracah.core.modules.Server} may call this method 0 or more times.
 * <p>
 * The {@code Observable} will not call this method again after it calls either {@link #onCompleted} or
 * {@link #onError}.
 *
 * @param iotMessage the item emitted by the Observable
 */
@Override
public void onNext(IOTMessage iotMessage) {

    getExecutorService().submit(() -> {
        log.info(" onNext : received {}", iotMessage);
        try {

            IOTMessage response = null;

            switch (iotMessage.getMessageType()) {
            case ConnectMessage.MESSAGE_TYPE:
                ConnectMessage connectMessage = (ConnectMessage) iotMessage;
                response = ConnectAcknowledgeMessage.from(connectMessage.isDup(), connectMessage.getQos(),
                        connectMessage.isRetain(), connectMessage.getKeepAliveTime(),
                        MqttConnectReturnCode.CONNECTION_ACCEPTED);

                break;
            case SubscribeMessage.MESSAGE_TYPE:

                SubscribeMessage subscribeMessage = (SubscribeMessage) iotMessage;

                List<Integer> grantedQos = new ArrayList<>();
                subscribeMessage.getTopicFilterList().forEach(topic -> {

                    String topicKey = quickCheckIdKey("",
                            Arrays.asList(topic.getKey().split(Constant.PATH_SEPARATOR)));

                    Set<String> channelIds = subscriptions.get(topicKey);

                    if (Objects.isNull(channelIds)) {
                        channelIds = new HashSet<>();
                    }

                    channelIds.add(subscribeMessage.getConnectionId());
                    subscriptions.put(topicKey, channelIds);

                    grantedQos.add(topic.getValue());

                });

                response = SubscribeAcknowledgeMessage.from(subscribeMessage.getMessageId(), grantedQos);

                break;
            case UnSubscribeMessage.MESSAGE_TYPE:
                UnSubscribeMessage unSubscribeMessage = (UnSubscribeMessage) iotMessage;
                response = UnSubscribeAcknowledgeMessage.from(unSubscribeMessage.getMessageId());

                break;
            case Ping.MESSAGE_TYPE:
                response = iotMessage;
                break;
            case PublishMessage.MESSAGE_TYPE:

                PublishMessage publishMessage = (PublishMessage) iotMessage;

                Set<String> matchingTopics = getMatchingSubscriptions("", publishMessage.getTopic());

                for (String match : matchingTopics) {
                    Set<String> channelIds = subscriptions.get(match);

                    if (Objects.nonNull(channelIds)) {

                        channelIds.forEach(id -> {

                            PublishMessage clonePublishMessage = publishMessage.cloneMessage();
                            clonePublishMessage.copyTransmissionData(iotMessage);
                            clonePublishMessage.setConnectionId(id);
                            pushToServer(clonePublishMessage);
                        });

                    }

                }

                if (MqttQoS.AT_MOST_ONCE.value() == publishMessage.getQos()) {

                    break;

                } else if (MqttQoS.AT_LEAST_ONCE.value() == publishMessage.getQos()) {

                    response = AcknowledgeMessage.from(publishMessage.getMessageId());
                    break;

                }

            case PublishReceivedMessage.MESSAGE_TYPE:
            case ReleaseMessage.MESSAGE_TYPE:
            case CompleteMessage.MESSAGE_TYPE:
            case DisconnectMessage.MESSAGE_TYPE:
            case AcknowledgeMessage.MESSAGE_TYPE:
            default:
                DisconnectMessage disconnectMessage = DisconnectMessage.from(true);
                disconnectMessage.copyTransmissionData(iotMessage);

                throw new ShutdownException(disconnectMessage);

            }

            if (Objects.nonNull(response)) {

                response.copyTransmissionData(iotMessage);
                pushToServer(response);
            }

        } catch (ShutdownException e) {

            IOTMessage response = e.getResponse();
            if (Objects.nonNull(response)) {
                pushToServer(response);
            }

        } catch (Exception e) {
            log.error(" onNext : Serious error that requires attention ", e);
        }

    });
}

From source file:org.opencb.opencga.storage.core.variant.adaptors.VariantDBAdaptorTest.java

@Test
public void testGetVariantsByType() {
    Set<Variant> snv = new HashSet<>(dbAdaptor
            .get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(), VariantType.SNV), new QueryOptions())
            .getResult());/*w  w  w. j  a va2  s.c o  m*/
    System.out.println("SNV = " + snv.size());
    snv.forEach(
            variant -> assertThat(EnumSet.of(VariantType.SNV, VariantType.SNP), hasItem(variant.getType())));

    Set<Variant> not_snv = new HashSet<>(
            dbAdaptor.get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(), "!" + VariantType.SNV),
                    new QueryOptions()).getResult());
    System.out.println("!SNV = " + not_snv.size());
    not_snv.forEach(
            variant -> assertFalse(EnumSet.of(VariantType.SNV, VariantType.SNP).contains(variant.getType())));

    Set<Variant> snv_snp = new HashSet<>(dbAdaptor.get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(),
            VariantType.SNV + "," + VariantContext.Type.SNP), new QueryOptions()).getResult());
    System.out.println("SNV_SNP = " + snv_snp.size());
    assertEquals(snv_snp, snv);

    Set<Variant> snp = new HashSet<>(dbAdaptor
            .get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(), VariantType.SNP), new QueryOptions())
            .getResult());
    snp.forEach(variant -> assertEquals(VariantType.SNP, variant.getType()));
    snp.forEach(variant -> assertThat(snv, hasItem(variant)));
    System.out.println("SNP = " + snp.size());

    Set<Variant> indels = new HashSet<>(
            dbAdaptor.get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(), VariantType.INDEL),
                    new QueryOptions()).getResult());
    indels.forEach(variant -> assertEquals(VariantType.INDEL, variant.getType()));
    System.out.println("INDEL = " + indels.size());

    Set<Variant> indels_snp = new HashSet<>(
            dbAdaptor.get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(),
                    VariantType.INDEL + "," + VariantType.SNP), new QueryOptions()).getResult());
    indels_snp.forEach(
            variant -> assertThat(EnumSet.of(VariantType.INDEL, VariantType.SNP), hasItem(variant.getType())));
    indels_snp.forEach(variant -> assertTrue(indels.contains(variant) || snp.contains(variant)));
    System.out.println("INDEL_SNP = " + indels_snp.size());

    Set<Variant> indels_snv = new HashSet<>(
            dbAdaptor.get(new Query(VariantDBAdaptor.VariantQueryParams.TYPE.key(),
                    VariantType.INDEL + "," + VariantType.SNV), new QueryOptions()).getResult());
    indels_snv.forEach(variant -> assertThat(EnumSet.of(VariantType.INDEL, VariantType.SNP, VariantType.SNV),
            hasItem(variant.getType())));
    indels_snv.forEach(variant -> assertTrue(indels.contains(variant) || snv.contains(variant)));
    System.out.println("INDEL_SNV = " + indels_snv.size());
}

From source file:ai.grakn.engine.backgroundtasks.taskstorage.GraknStateStorage.java

public Boolean updateState(String id, TaskStatus status, String statusChangeBy, String engineID,
        Throwable failure, String checkpoint, JSONObject configuration) {
    if (id == null)
        return false;

    if (status == null && statusChangeBy == null && engineID == null && failure == null && checkpoint == null
            && configuration == null)
        return false;

    // Existing resource relations to remove
    final Set<String> resourcesToDettach = new HashSet<String>();

    // New resources to add
    Var resources = var(TASK_VAR).id(id);

    if (status != null) {
        resourcesToDettach.add(STATUS);/*from  w ww  .  j  ava  2 s. c o  m*/
        resourcesToDettach.add(STATUS_CHANGE_TIME);
        resources.has(STATUS, status.toString()).has(STATUS_CHANGE_TIME, new Date().getTime());
    }
    if (statusChangeBy != null) {
        resourcesToDettach.add(STATUS_CHANGE_BY);
        resources.has(STATUS_CHANGE_BY, statusChangeBy);
    }
    if (engineID != null) {
        resourcesToDettach.add(ENGINE_ID);
        resources.has(ENGINE_ID, engineID);
    }
    if (failure != null) {
        resourcesToDettach.add(TASK_EXCEPTION);
        resourcesToDettach.add(STACK_TRACE);
        resources.has(TASK_EXCEPTION, failure.toString());
        if (failure.getStackTrace().length > 0)
            resources.has(STACK_TRACE, Arrays.toString(failure.getStackTrace()));
    }
    if (checkpoint != null) {
        resourcesToDettach.add(TASK_CHECKPOINT);
        resources.has(TASK_CHECKPOINT, checkpoint);
    }
    if (configuration != null) {
        resourcesToDettach.add(TASK_CONFIGURATION);
        resources.has(TASK_CONFIGURATION, configuration.toString());
    }

    Optional<Boolean> result = attemptCommitToSystemGraph((graph) -> {
        LOG.debug("dettaching: " + resourcesToDettach);
        LOG.debug("inserting " + resources);
        final Entity task = (Entity) graph.getConcept(id);
        // Remove relations to any resources we want to currently update 
        resourcesToDettach.forEach(typeName -> {
            RoleType roleType = graph.getRoleType(Schema.Resource.HAS_RESOURCE_OWNER.getName(typeName));
            if (roleType == null)
                System.err.println("NO ROLE TYPE FOR RESOURCE " + typeName);
            task.relations(roleType).forEach(Concept::delete);
        });
        // Insert new resources with new values
        graph.graql().insert(resources).execute();
        return true;
    }, true);

    return result.isPresent();
}

From source file:org.apache.nifi.web.api.VersionsResource.java

@POST
@Consumes(MediaType.APPLICATION_JSON)/*  ww  w  .  j av  a  2 s .c o  m*/
@Produces(MediaType.APPLICATION_JSON)
@Path("update-requests/process-groups/{id}")
@ApiOperation(value = "Initiate the Update Request of a Process Group with the given ID", response = VersionedFlowUpdateRequestEntity.class, notes = "For a Process Group that is already under Version Control, this will initiate the action of changing "
        + "from a specific version of the flow in the Flow Registry to a different version of the flow. This can be a lengthy "
        + "process, as it will stop any Processors and disable any Controller Services necessary to perform the action and then restart them. As a result, "
        + "the endpoint will immediately return a VersionedFlowUpdateRequestEntity, and the process of updating the flow will occur "
        + "asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to "
        + "/versions/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to "
        + "/versions/update-requests/{requestId}. " + NON_GUARANTEED_ENDPOINT, authorizations = {
                @Authorization(value = "Read - /process-groups/{uuid}"),
                @Authorization(value = "Write - /process-groups/{uuid}"),
                @Authorization(value = "Read - /{component-type}/{uuid} - For all encapsulated components"),
                @Authorization(value = "Write - /{component-type}/{uuid} - For all encapsulated components"),
                @Authorization(value = "Write - if the template contains any restricted components - /restricted-components") })
@ApiResponses(value = {
        @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."),
        @ApiResponse(code = 401, message = "Client could not be authenticated."),
        @ApiResponse(code = 403, message = "Client is not authorized to make this request."),
        @ApiResponse(code = 404, message = "The specified resource could not be found."),
        @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful.") })
public Response initiateVersionControlUpdate(
        @ApiParam("The process group id.") @PathParam("id") final String groupId,
        @ApiParam(value = "The controller service configuration details.", required = true) final VersionControlInformationEntity requestEntity) {

    // Verify the request
    final RevisionDTO revisionDto = requestEntity.getProcessGroupRevision();
    if (revisionDto == null) {
        throw new IllegalArgumentException("Process Group Revision must be specified");
    }

    final VersionControlInformationDTO requestVersionControlInfoDto = requestEntity
            .getVersionControlInformation();
    if (requestVersionControlInfoDto == null) {
        throw new IllegalArgumentException("Version Control Information must be supplied.");
    }
    if (requestVersionControlInfoDto.getGroupId() == null) {
        throw new IllegalArgumentException("The Process Group ID must be supplied.");
    }
    if (!requestVersionControlInfoDto.getGroupId().equals(groupId)) {
        throw new IllegalArgumentException(
                "The Process Group ID in the request body does not match the Process Group ID of the requested resource.");
    }
    if (requestVersionControlInfoDto.getBucketId() == null) {
        throw new IllegalArgumentException("The Bucket ID must be supplied.");
    }
    if (requestVersionControlInfoDto.getFlowId() == null) {
        throw new IllegalArgumentException("The Flow ID must be supplied.");
    }
    if (requestVersionControlInfoDto.getRegistryId() == null) {
        throw new IllegalArgumentException("The Registry ID must be supplied.");
    }
    if (requestVersionControlInfoDto.getVersion() == null) {
        throw new IllegalArgumentException("The Version of the flow must be supplied.");
    }

    if (isDisconnectedFromCluster()) {
        verifyDisconnectedNodeModification(requestEntity.isDisconnectedNodeAcknowledged());
    }

    // We will perform the updating of the Versioned Flow in a background thread because it can be a long-running process.
    // In order to do this, we will need some parameters that are only available as Thread-Local variables to the current
    // thread, so we will gather the values for these parameters up front.
    final boolean replicateRequest = isReplicateRequest();
    final ComponentLifecycle componentLifecycle = replicateRequest ? clusterComponentLifecycle
            : localComponentLifecycle;
    final NiFiUser user = NiFiUserUtils.getNiFiUser();

    // Workflow for this process:
    // 0. Obtain the versioned flow snapshot to use for the update
    //    a. Contact registry to download the desired version.
    //    b. Get Variable Registry of this Process Group and all ancestor groups
    //    c. Perform diff to find any new variables
    //    d. Get Variable Registry of any child Process Group in the versioned flow
    //    e. Perform diff to find any new variables
    //    f. Prompt user to fill in values for all new variables
    // 1. Determine which components would be affected (and are enabled/running)
    //    a. Component itself is modified in some way, other than position changing.
    //    b. Source and Destination of any Connection that is modified.
    //    c. Any Processor or Controller Service that references a Controller Service that is modified.
    // 2. Verify READ and WRITE permissions for user, for every component.
    // 3. Verify that all components in the snapshot exist on all nodes (i.e., the NAR exists)?
    // 4. Verify that Process Group is already under version control. If not, must start Version Control instead of updateFlow
    // 5. Verify that Process Group is not 'dirty'.
    // 6. Stop all Processors, Funnels, Ports that are affected.
    // 7. Wait for all of the components to finish stopping.
    // 8. Disable all Controller Services that are affected.
    // 9. Wait for all Controller Services to finish disabling.
    // 10. Ensure that if any connection was deleted, that it has no data in it. Ensure that no Input Port
    //    was removed, unless it currently has no incoming connections. Ensure that no Output Port was removed,
    //    unless it currently has no outgoing connections. Checking ports & connections could be done before
    //    stopping everything, but removal of Connections cannot.
    // 11. Update variable registry to include new variables
    //    (only new variables so don't have to worry about affected components? Or do we need to in case a processor
    //    is already referencing the variable? In which case we need to include the affected components above in the
    //    Set of affected components before stopping/disabling.).
    // 12. Update components in the Process Group; update Version Control Information.
    // 13. Re-Enable all affected Controller Services that were not removed.
    // 14. Re-Start all Processors, Funnels, Ports that are affected and not removed.

    // Step 0: Get the Versioned Flow Snapshot from the Flow Registry
    final VersionedFlowSnapshot flowSnapshot = serviceFacade
            .getVersionedFlowSnapshot(requestEntity.getVersionControlInformation(), true);

    // The flow in the registry may not contain the same versions of components that we have in our flow. As a result, we need to update
    // the flow snapshot to contain compatible bundles.
    serviceFacade.discoverCompatibleBundles(flowSnapshot.getFlowContents());

    // Step 1: Determine which components will be affected by updating the version
    final Set<AffectedComponentEntity> affectedComponents = serviceFacade
            .getComponentsAffectedByVersionChange(groupId, flowSnapshot);

    // build a request wrapper
    final InitiateChangeFlowVersionRequestWrapper requestWrapper = new InitiateChangeFlowVersionRequestWrapper(
            requestEntity, componentLifecycle, getAbsolutePath(), affectedComponents, replicateRequest,
            flowSnapshot);

    final Revision requestRevision = getRevision(requestEntity.getProcessGroupRevision(), groupId);
    return withWriteLock(serviceFacade, requestWrapper, requestRevision, lookup -> {
        // Step 2: Verify READ and WRITE permissions for user, for every component.
        final ProcessGroupAuthorizable groupAuthorizable = lookup.getProcessGroup(groupId);
        authorizeProcessGroup(groupAuthorizable, authorizer, lookup, RequestAction.READ, true, false, true,
                true);
        authorizeProcessGroup(groupAuthorizable, authorizer, lookup, RequestAction.WRITE, true, false, true,
                true);

        final VersionedProcessGroup groupContents = flowSnapshot.getFlowContents();
        final Set<ConfigurableComponent> restrictedComponents = FlowRegistryUtils
                .getRestrictedComponents(groupContents, serviceFacade);
        restrictedComponents.forEach(restrictedComponent -> {
            final ComponentAuthorizable restrictedComponentAuthorizable = lookup
                    .getConfigurableComponent(restrictedComponent);
            authorizeRestrictions(authorizer, restrictedComponentAuthorizable);
        });
    }, () -> {
        // Step 3: Verify that all components in the snapshot exist on all nodes
        // Step 4: Verify that Process Group is already under version control. If not, must start Version Control instead of updating flow
        // Step 5: Verify that Process Group is not 'dirty'
        serviceFacade.verifyCanUpdate(groupId, flowSnapshot, false, true);
    }, (revision, wrapper) -> {
        final String idGenerationSeed = getIdGenerationSeed().orElse(null);

        // Create an asynchronous request that will occur in the background, because this request may
        // result in stopping components, which can take an indeterminate amount of time.
        final String requestId = UUID.randomUUID().toString();
        final AsynchronousWebRequest<VersionControlInformationEntity> request = new StandardAsynchronousWebRequest<>(
                requestId, groupId, user, "Stopping Affected Processors");

        // Submit the request to be performed in the background
        final Consumer<AsynchronousWebRequest<VersionControlInformationEntity>> updateTask = vcur -> {
            try {
                final VersionControlInformationEntity updatedVersionControlEntity = updateFlowVersion(groupId,
                        wrapper.getComponentLifecycle(), wrapper.getExampleUri(),
                        wrapper.getAffectedComponents(), wrapper.isReplicateRequest(), revision,
                        wrapper.getVersionControlInformationEntity(), wrapper.getFlowSnapshot(), request,
                        idGenerationSeed, true, true);

                vcur.markComplete(updatedVersionControlEntity);
            } catch (final ResumeFlowException rfe) {
                // Treat ResumeFlowException differently because we don't want to include a message that we couldn't update the flow
                // since in this case the flow was successfully updated - we just couldn't re-enable the components.
                logger.error(rfe.getMessage(), rfe);
                vcur.setFailureReason(rfe.getMessage());
            } catch (final Exception e) {
                logger.error("Failed to update flow to new version", e);
                vcur.setFailureReason("Failed to update flow to new version due to " + e);
            }
        };

        requestManager.submitRequest("update-requests", requestId, request, updateTask);

        // Generate the response.
        final VersionedFlowUpdateRequestDTO updateRequestDto = new VersionedFlowUpdateRequestDTO();
        updateRequestDto.setComplete(request.isComplete());
        updateRequestDto.setFailureReason(request.getFailureReason());
        updateRequestDto.setLastUpdated(request.getLastUpdated());
        updateRequestDto.setProcessGroupId(groupId);
        updateRequestDto.setRequestId(requestId);
        updateRequestDto.setUri(generateResourceUri("versions", "update-requests", requestId));
        updateRequestDto.setPercentCompleted(request.getPercentComplete());
        updateRequestDto.setState(request.getState());

        final VersionedFlowUpdateRequestEntity updateRequestEntity = new VersionedFlowUpdateRequestEntity();
        final RevisionDTO groupRevision = serviceFacade.getProcessGroup(groupId).getRevision();
        updateRequestEntity.setProcessGroupRevision(groupRevision);
        updateRequestEntity.setRequest(updateRequestDto);

        return generateOkResponse(updateRequestEntity).build();
    });
}