Example usage for java.util Set forEach

List of usage examples for java.util Set forEach

Introduction

In this page you can find the example usage for java.util Set forEach.

Prototype

default void forEach(Consumer<? super T> action) 

Source Link

Document

Performs the given action for each element of the Iterable until all elements have been processed or the action throws an exception.

Usage

From source file:com.redhat.red.offliner.Main.java

/**
 * Sort through the given paths from the {@link ArtifactList} and match up all non-checksum paths to <b>BOTH</b>
 * of its associated checksum paths (sha and md5). If one or both are missing, add them to the paths list.
 *
 * @param paths/*from  w  w w. j a  v  a2  s.c om*/
 */
protected void patchPaths(List<String> paths) {
    Logger logger = LoggerFactory.getLogger(getClass());

    Set<String> nonChecksum = new HashSet<>();
    Map<String, String> pathToSha = new HashMap<>();
    Map<String, String> pathToMd5 = new HashMap<>();
    paths.forEach((path) -> {
        if (path.endsWith(SHA_SUFFIX)) {
            String base = path.substring(0, path.length() - SHA_SUFFIX.length());
            logger.trace("For base path: '{}', found SHA checksum: '{}'", base, path);
            pathToSha.put(base, path);
        } else if (path.endsWith(MD5_SUFFIX)) {
            String base = path.substring(0, path.length() - MD5_SUFFIX.length());
            logger.trace("For base path: '{}', found MD-5 checksum: '{}'", base, path);
            pathToMd5.put(base, path);
        } else {
            logger.trace("Found base path: '{}'", path);
            nonChecksum.add(path);
        }
    });

    nonChecksum.forEach((path) -> {
        logger.trace("Checking for checksum paths associated with: '{}'", path);

        logger.trace("In SHA1 mappings:\n\n{}", pathToSha);
        if (!pathToSha.containsKey(path)) {
            String sha = path + SHA_SUFFIX;
            logger.trace("PATCH: Adding sha file: '{}'", sha);
            paths.add(sha);
        }

        logger.trace("In MD5 mappings:\n\n{}", pathToMd5);
        if (!pathToMd5.containsKey(path)) {
            String md5 = path + MD5_SUFFIX;
            logger.trace("PATCH: Adding md5 file: '{}'", md5);
            paths.add(md5);
        }
    });
}

From source file:org.socraticgrid.hl7.ucs.nifi.processor.UCSValidateMessage.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final List<FlowFile> flowFiles = session.get(1);
    if (flowFiles.isEmpty()) {
        return;/*from   w  w w .  jav a  2s  .  c  o m*/
    }

    final ProcessorLog logger = getLogger();

    for (FlowFile flowFile : flowFiles) {
        final ObjectHolder<Throwable> errorHolder = new ObjectHolder<>(null);
        final ObjectHolder<MessageWrapper> messageWrapperHolder = new ObjectHolder<>(null);

        session.read(flowFile, (final InputStream rawIn) -> {
            try {
                messageWrapperHolder.set(MessageSerializer.deserializeMessageWrapper(rawIn));
            } catch (MessageSerializationException ex) {
                errorHolder.set(new RuntimeException(
                        "Error deserializing FlowFile content into a MessageWrapper instance. Routing to FAILURE",
                        ex));
            }
        });

        if (errorHolder.get() != null) {
            logger.error(errorHolder.get().getMessage(), errorHolder.get().getCause());
            UCSCreateException.routeFlowFileToException(context, session, logger, flowFile, REL_FAILURE, null,
                    "Error deserializing FlowFile: " + errorHolder.get().getCause(),
                    ExceptionType.InvalidMessage, null, null);
            continue;
        }

        final Message message = messageWrapperHolder.get().getMessage();

        //Check if all the recipients have an id. If don't, create a new one.
        for (Recipient r : message.getHeader().getRecipientsList()) {
            //if the recipient doesn't have an id, then create one
            if (r.getRecipientId() == null) {
                r.setRecipientId(UUID.randomUUID().toString());
            }

        }

        UCSController ucsService = context.getProperty(UCS_CONTROLLER_SERVICE)
                .asControllerService(UCSController.class);

        Set<Message> messages = UCSValidateMessage.collectNestedMessages(message);
        messages.add(message);

        //Assert that the conversation Id of the message is known in UCS.
        if (!StringUtils.isEmpty(message.getHeader().getRelatedConversationId()) && !ucsService
                .getConversationById(message.getHeader().getRelatedConversationId()).isPresent()) {
            logger.debug("Failing because of unknown conversation id:{} ",
                    new Object[] { message.getHeader().getRelatedConversationId() });
            UCSCreateException.routeFlowFileToException(context, session, logger, flowFile, REL_FAILURE, null,
                    "Unknown Conversation Id: " + message.getHeader().getRelatedConversationId(),
                    ExceptionType.InvalidConversation, null, null);
            continue;
        }

        //Check for duplicated message ids. 
        //first check for duplicated ids inside the same message.
        //This is no so easy to implement beacuse messages could have a 
        //null id.
        Set<Message> duplicatedMessages = messages.stream().filter(m -> m.getHeader().getMessageId() != null)
                .collect(Collectors.groupingBy(m -> m.getHeader().getMessageId())).entrySet().stream()
                .filter(e -> e.getValue().size() > 1).flatMap(e -> e.getValue().stream())
                .collect(Collectors.toSet());

        //Also include in duplicatedIds duplicated messages in UCS 
        messages.stream().filter(m -> m.getHeader().getMessageId() != null)
                .filter(m -> ucsService.getMessageById(m.getHeader().getMessageId()).isPresent())
                .forEach(duplicatedMessages::add);

        //process duplicated ids.
        if (!duplicatedMessages.isEmpty()) {
            logger.debug("Duplicated Message Ids found: {}", new Object[] { duplicatedMessages.stream()
                    .map(m -> m.getHeader().getMessageId()).collect(Collectors.joining(",")) });

            if (context.getProperty(ON_DUPLICATED_MESSAGE_ID).getValue().equalsIgnoreCase(UPDATE_ID)) {
                logger.debug("Updating Messages Ids.");
                duplicatedMessages.forEach(m -> m.getHeader().setMessageId(UUID.randomUUID().toString()));
            } else {
                logger.debug("Failing because of duplicated messages ids");
                UCSCreateException.routeFlowFileToException(context, session, logger, flowFile, REL_FAILURE,
                        null,
                        "Duplicated Message Ids found: " + duplicatedMessages.stream()
                                .map(m -> m.getHeader().getMessageId()).collect(Collectors.joining(",")),
                        ExceptionType.InvalidMessage, null, null);
                continue;
            }
        }

        //If the Message is an AlertMessage, then change its alertStatus
        //to 'Pending'
        if (message instanceof AlertMessage) {
            ((AlertMessage) message).getHeader().setAlertStatus(AlertStatus.Pending);
        }

        //Write the message back to the FlowFile (in case it has been modified)
        flowFile = session.putAttribute(flowFile, VALID_ATTRIBUTE_KEY, "true");
        session.getProvenanceReporter().modifyAttributes(flowFile);

        flowFile = session.write(flowFile, new OutputStreamCallback() {
            @Override
            public void process(OutputStream out) throws IOException {
                try {
                    out.write(
                            MessageSerializer.serializeMessageWrapper(new MessageWrapper(message)).getBytes());
                } catch (MessageSerializationException ex) {
                    errorHolder.set(ex);
                }
                out.flush();
            }
        });

        if (errorHolder.get() != null) {
            logger.error(errorHolder.get().getMessage(), errorHolder.get().getCause());
            UCSCreateException.routeFlowFileToException(context, session, logger, flowFile, REL_FAILURE, null,
                    "Error deserializing FlowFile: " + errorHolder.get().getCause(),
                    ExceptionType.InvalidMessage, null, null);
            continue;
        }

        logger.debug("Message validated. Routing message {} to {}.",
                new Object[] { flowFile, REL_SUCCESS.getName() });
        session.transfer(flowFile, REL_SUCCESS);
        session.getProvenanceReporter().route(flowFile, REL_SUCCESS);
    }
}

From source file:org.openecomp.sdc.translator.services.heattotosca.HeatToToscaUtil.java

static FileDataCollection getFileCollectionsByFilter(List<FileData> fileDataList, Set<FileData.Type> typeFilter,
        TranslationContext translationContext) {
    FileDataCollection fileDataCollection = new FileDataCollection();
    Map<String, FileData> filteredFiles = filterFileDataListByType(fileDataList, typeFilter);
    Set<String> referenced = new HashSet<>();
    List<String> filenames = extractFilenamesFromFileDataList(filteredFiles.values());

    for (FileData fileData : filteredFiles.values()) {
        String fileName = fileData.getFile();

        if (FileData.isHeatFile(fileData.getType())) {
            if (fileData.getBase() != null && fileData.getBase().equals(true)) {
                fileDataCollection.addBaseFiles(fileData);
            }//ww w .j av  a2s  . c om
            HeatOrchestrationTemplate heatOrchestrationTemplate = new YamlUtil()
                    .yamlToObject(translationContext.getFileContent(fileName), HeatOrchestrationTemplate.class);
            for (Resource resource : heatOrchestrationTemplate.getResources().values()) {
                if (filenames.contains(resource.getType())) {
                    handleNestedFile(translationContext, fileDataCollection, filteredFiles, referenced,
                            resource.getType());
                } else if (resource.getType()
                        .equals(HeatResourcesTypes.RESOURCE_GROUP_RESOURCE_TYPE.getHeatResource())) {
                    Object resourceDef = resource.getProperties().get(HeatConstants.RESOURCE_DEF_PROPERTY_NAME);
                    Object innerTypeDef = ((Map) resourceDef).get("type");
                    if (innerTypeDef instanceof String) {
                        String internalResourceType = (String) innerTypeDef;
                        if (filenames.contains(internalResourceType)) {
                            handleNestedFile(translationContext, fileDataCollection, filteredFiles, referenced,
                                    internalResourceType);
                        }
                    }
                }
            }

        } else {
            fileDataCollection.addArtifactFiles(fileData);
            filteredFiles.remove(fileData.getFile());
        }
    }

    referenced.forEach(filteredFiles::remove);
    if (!CollectionUtils.isEmpty(fileDataCollection.getBaseFile())) {
        for (FileData fileData : fileDataCollection.getBaseFile()) {
            filteredFiles.remove(fileData.getFile());
        }
    }
    fileDataCollection.setAddOnFiles(filteredFiles.values());
    return fileDataCollection;
}

From source file:org.hyperledger.fabric.sdk.ServiceDiscovery.java

SDNetwork fullNetworkDiscovery(boolean force) {
    if (channel.isShutdown()) {
        return null;
    }/*  w w  w  . j a v a  2s  .  com*/
    logger.trace(format("Full network discovery force: %b", force));
    try {
        SDNetwork osdNetwork = sdNetwork;
        SDNetwork lsdNetwork = networkDiscovery(transactionContext.retryTransactionSameContext(), force);
        if (channel.isShutdown() || null == lsdNetwork) {
            return null;
        }

        if (osdNetwork != lsdNetwork) { // means it changed.
            final Set<String> chaincodesNames = lsdNetwork.getChaincodesNames();
            List<List<ServiceDiscoveryChaincodeCalls>> lcc = new LinkedList<>();
            chaincodesNames.forEach(s -> {
                List<ServiceDiscoveryChaincodeCalls> lc = new LinkedList<>();
                lc.add(new ServiceDiscoveryChaincodeCalls(s));
                lcc.add(lc);
            });
            chaindcodeMap = discoverEndorserEndpoints(transactionContext.retryTransactionSameContext(), lcc);
            if (channel.isShutdown()) {
                return null;
            }

            channel.sdUpdate(lsdNetwork);
        }

        return lsdNetwork;

    } catch (Exception e) {
        logger.warn("Service discovery got error:" + e.getMessage(), e);
    } finally {
        logger.trace("Full network rediscovery completed.");
    }
    return null;
}

From source file:org.onosproject.t3.impl.TroubleshootManager.java

/**
 * If the initial packet comes tagged with a Vlan we output it with that to ONOS.
 * If ONOS applied a vlan we remove it./*  ww  w .j  a  v  a 2  s. co m*/
 *
 * @param outputPath the output
 * @param trace      the trace we are building
 */

private void handleVlanToController(GroupsInDevice outputPath, StaticPacketTrace trace) {

    VlanIdCriterion initialVid = (VlanIdCriterion) trace.getInitialPacket()
            .getCriterion(Criterion.Type.VLAN_VID);
    VlanIdCriterion finalVid = (VlanIdCriterion) outputPath.getFinalPacket()
            .getCriterion(Criterion.Type.VLAN_VID);

    if (initialVid != null && !initialVid.equals(finalVid) && initialVid.vlanId().equals(VlanId.NONE)) {

        Set<Criterion> finalCriteria = new HashSet<>(outputPath.getFinalPacket().criteria());
        //removing the final vlanId
        finalCriteria.remove(finalVid);
        Builder packetUpdated = DefaultTrafficSelector.builder();
        finalCriteria.forEach(packetUpdated::add);
        //Initial was none so we set it to that
        packetUpdated.add(Criteria.matchVlanId(VlanId.NONE));
        //Update final packet
        outputPath.setFinalPacket(packetUpdated.build());
    }
}

From source file:org.commonjava.indy.pkg.maven.content.MavenMetadataGenerator.java

private MetadataIncrementalResult mergeMissing(final Group group,
        final MetadataIncrementalResult incrementalResult, final String toMergePath, String description,
        BiFunction<ArtifactStore, String, Callable<MetadataResult>> func) throws IndyWorkflowException {
    Set<ArtifactStore> missing = incrementalResult.missing;
    Metadata master = incrementalResult.result;

    // TODO: This should be the outer wrapper for download- or generate-specific behavior.
    logger.debug("Download missing member metadata for {}, missing: {}, size: {}", group.getKey(), missing,
            missing.size());/* w  ww.j  a  v  a  2s.  c o  m*/

    DrainingExecutorCompletionService<MetadataResult> svc = new DrainingExecutorCompletionService<>(
            mavenMDGeneratorService);

    detectOverloadVoid(() -> missing.forEach(store -> svc.submit(func.apply(store, toMergePath))));

    Set<ArtifactStore> resultingMissing = new HashSet<>(); // return stores failed download
    Set<StoreKey> included = new HashSet<>();
    try {
        svc.drain(mr -> {
            if (mr != null) {
                if (mr.missing) {
                    resultingMissing.add(mr.store);
                } else {
                    included.add(mr.store.getKey());
                    merger.merge(master, mr.metadata, group, toMergePath);
                    putToMetadataCache(mr.store.getKey(), toMergePath, new MetadataInfo(mr.metadata));
                }
            }
        });
    } catch (InterruptedException e) {
        logger.debug("Interrupted while merging " + description + " member metadata.");
    } catch (ExecutionException e) {
        throw new IndyWorkflowException("Failed to merge downloaded " + description + " member metadata.", e);
    }

    return new MetadataIncrementalResult(resultingMissing, included, master);
}

From source file:org.apache.usergrid.corepersistence.asyncevents.AsyncEventServiceImpl.java

/**
 *     this method will call initialize for each message, since we are caching the entity indexes,
 *     we don't worry about aggregating by app id
 * @param indexOperationMessage/*  w  ww.j  a va  2 s .  co  m*/
 */
private void initializeEntityIndexes(final IndexOperationMessage indexOperationMessage) {

    // create a set so we can have a unique list of appIds for which we call createEntityIndex
    Set<UUID> appIds = new HashSet<>();

    // loop through all indexRequests and add the appIds to the set
    indexOperationMessage.getIndexRequests().forEach(req -> {
        UUID appId = IndexingUtils.getApplicationIdFromIndexDocId(req.documentId);
        appIds.add(appId);
    });

    // loop through all deindexRequests and add the appIds to the set
    indexOperationMessage.getDeIndexRequests().forEach(req -> {
        UUID appId = IndexingUtils.getApplicationIdFromIndexDocId(req.documentId);
        appIds.add(appId);
    });

    // for each of the appIds in the unique set, call create entity index to ensure the aliases are created
    appIds.forEach(appId -> {
        ApplicationScope appScope = CpNamingUtils.getApplicationScope(appId);
        entityIndexFactory.createEntityIndex(indexLocationStrategyFactory.getIndexLocationStrategy(appScope));
    });
}

From source file:io.druid.indexing.common.task.AppenderatorDriverRealtimeIndexTaskTest.java

private void makeToolboxFactory(final File directory) {
    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    taskLockbox = new TaskLockbox(taskStorage);

    publishedSegments = new CopyOnWriteArrayList<>();

    ObjectMapper mapper = new DefaultObjectMapper();
    mapper.registerSubtypes(LinearShardSpec.class);
    mapper.registerSubtypes(NumberedShardSpec.class);
    IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper,
            derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
        @Override//from   w  ww  .j  a va  2s .c  o  m
        public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
            Set<DataSegment> result = super.announceHistoricalSegments(segments);

            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?",
                    segmentLatch == null);

            publishedSegments.addAll(result);
            segments.forEach(s -> segmentLatch.countDown());

            return result;
        }

        @Override
        public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments,
                DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
            SegmentPublishResult result = super.announceHistoricalSegments(segments, startMetadata,
                    endMetadata);

            Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?",
                    segmentLatch == null);

            publishedSegments.addAll(result.getSegments());
            result.getSegments().forEach(s -> segmentLatch.countDown());

            return result;
        }
    };
    final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null,
            null);

    final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, mdc, emitter,
            EasyMock.createMock(SupervisorManager.class));
    final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage,
            taskActionToolbox);
    IntervalChunkingQueryRunnerDecorator queryRunnerDecorator = new IntervalChunkingQueryRunnerDecorator(null,
            null, null) {
        @Override
        public <T> QueryRunner<T> decorate(QueryRunner<T> delegate,
                QueryToolChest<T, ? extends Query<T>> toolChest) {
            return delegate;
        }
    };
    final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(
            ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(TimeseriesQuery.class,
                    new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(queryRunnerDecorator),
                            new TimeseriesQueryEngine(), new QueryWatcher() {
                                @Override
                                public void registerQuery(Query query, ListenableFuture future) {
                                    // do nothing
                                }
                            })));
    handOffCallbacks = new ConcurrentHashMap<>();
    final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
        @Override
        public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
            return new SegmentHandoffNotifier() {
                @Override
                public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec,
                        Runnable handOffRunnable) {
                    handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
                    handoffLatch.countDown();
                    return true;
                }

                @Override
                public void start() {
                    //Noop
                }

                @Override
                public void close() {
                    //Noop
                }

            };
        }
    };
    final TestUtils testUtils = new TestUtils();
    rowIngestionMetersFactory = testUtils.getRowIngestionMetersFactory();
    SegmentLoaderConfig segmentLoaderConfig = new SegmentLoaderConfig() {
        @Override
        public List<StorageLocationConfig> getLocations() {
            return Lists.newArrayList();
        }
    };

    taskToolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter,
            new TestDataSegmentPusher(), new TestDataSegmentKiller(), null, // DataSegmentMover
            null, // DataSegmentArchiver
            new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class),
            handoffNotifierFactory, () -> conglomerate, MoreExecutors.sameThreadExecutor(), // queryExecutorService
            EasyMock.createMock(MonitorScheduler.class),
            new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, segmentLoaderConfig,
                    testUtils.getTestObjectMapper())),
            testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024),
            new CacheConfig(), testUtils.getTestIndexMergerV9(),
            EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class),
            new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0),
            new TaskReportFileWriter(reportsFile));
}

From source file:com.haulmont.cuba.core.app.dynamicattributes.DynamicAttributesManager.java

@Override
public <E extends BaseGenericIdEntity> void fetchDynamicAttributes(List<E> entities,
        @Nonnull Set<Class> dependentClasses) {
    Set<BaseGenericIdEntity> toProcess = new HashSet<>();
    entities.forEach(entity -> {//from w w w  .j a v a 2 s  .  c  o m
        toProcess.add(entity);
        if (!dependentClasses.isEmpty()) {
            metadata.getTools().traverseAttributes(entity, new EntityAttributeVisitor() {
                @Override
                public void visit(Entity dependentEntity, MetaProperty property) {
                    if (dependentEntity instanceof BaseGenericIdEntity) {
                        toProcess.add((BaseGenericIdEntity) dependentEntity);
                    }
                }

                @Override
                public boolean skip(MetaProperty property) {
                    return metadata.getTools().isPersistent(property) && property.getRange().isClass()
                            && dependentClasses.contains(property.getJavaType());
                }
            });
        }
    });
    if (toProcess.isEmpty())
        return;

    try (Transaction tx = persistence.getTransaction()) {
        Multimap<String, BaseGenericIdEntity> entitiesByType = HashMultimap.create();
        toProcess.forEach(e -> entitiesByType.put(e.getMetaClass().getName(), e));
        entitiesByType.keySet().forEach(entityType -> {
            MetaClass metaClass = metadata.getExtendedEntities()
                    .getOriginalOrThisMetaClass(metadata.getClass(entityType));
            doFetchDynamicAttributes(metaClass, entitiesByType.get(entityType));
        });
        tx.commit();
    }
}

From source file:org.hyperledger.fabric.sdk.NetworkConfig.java

/**
 * Returns a channel configured using the details in the Network Configuration file
 *
 * @param client      The associated client
 * @param channelName The name of the channel
 * @return A configured Channel instance
 *//*from   ww w.  j  a  v  a  2 s .c o  m*/
Channel loadChannel(HFClient client, String channelName) throws NetworkConfigurationException {

    if (logger.isTraceEnabled()) {
        logger.trace(format("NetworkConfig.loadChannel: %s", channelName));
    }

    Channel channel = null;

    JsonObject channels = getJsonObject(jsonConfig, "channels");

    if (channels != null) {
        JsonObject jsonChannel = getJsonObject(channels, channelName);
        if (jsonChannel != null) {
            channel = client.getChannel(channelName);
            if (channel != null) {
                // The channel already exists in the client!
                // Note that by rights this should never happen as HFClient.loadChannelFromConfig should have already checked for this!
                throw new NetworkConfigurationException(
                        format("Channel %s is already configured in the client!", channelName));
            }
            channel = reconstructChannel(client, channelName, jsonChannel);
        } else {

            final Set<String> channelNames = getChannelNames();
            if (channelNames.isEmpty()) {
                throw new NetworkConfigurationException("Channel configuration has no channels defined.");
            }
            final StringBuilder sb = new StringBuilder(1000);

            channelNames.forEach(s -> {
                if (sb.length() != 0) {
                    sb.append(", ");
                }
                sb.append(s);
            });
            throw new NetworkConfigurationException(
                    format("Channel %s not found in configuration file. Found channel names: %s ", channelName,
                            sb.toString()));

        }

    } else {
        throw new NetworkConfigurationException("Channel configuration has no channels defined.");
    }

    return channel;
}