List of usage examples for com.google.common.collect Maps newConcurrentMap
public static <K, V> ConcurrentMap<K, V> newConcurrentMap()
From source file:org.n52.lod.csw.CSWLoDEnabler.java
protected Map<String, GetRecordByIdResponseDocument> retrieveRecordsThreaded(int startPos, int maxRecords, long recordsInTotal) { log.info("Retrieve {} records, starting from {} of {}", maxRecords, startPos, recordsInTotal); // one thread for getting ids List<String> recordIdList = getRecordIds(startPos, maxRecords); // many threads getting records descriptions final Map<String, GetRecordByIdResponseDocument> recordDescriptions = Maps.newConcurrentMap(); ListeningExecutorService executorService = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(maxRecords)); for (String id : recordIdList) { final String recordId = id; log.debug("Adding {} to the model", recordId); CallableRecordDescription c = new CallableRecordDescription(id, csw); ListenableFuture<GetRecordByIdResponseDocument> responseFuture = executorService.submit(c); Futures.addCallback(responseFuture, new FutureCallback<GetRecordByIdResponseDocument>() { private final Logger logger = LoggerFactory.getLogger("Record Downloader"); @Override/*w w w .jav a2s .c om*/ public void onFailure(Throwable t) { logger.error("Error retrieving and parsing record {}", t); report.retrievalIssues.put(recordId, t); } @Override public void onSuccess(GetRecordByIdResponseDocument result) { logger.trace("SUCCESS with {}", result); recordDescriptions.put(recordId, result); report.added++; report.addedIds.add(recordId); } }); } executorService.shutdown(); while (!executorService.isTerminated()) { try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { log.error("Could not await termination", e); } } log.info("Done with requests and parsing, have {} GetRecordById documents.", recordDescriptions.size()); return recordDescriptions; }
From source file:org.apache.twill.yarn.YarnTwillRunnerService.java
private Cancellable watchLiveApps() { final Map<String, Cancellable> watched = Maps.newConcurrentMap(); final AtomicBoolean cancelled = new AtomicBoolean(false); // Watch child changes in the root, which gives all application names. final Cancellable cancellable = ZKOperations.watchChildren(zkClientService, "/", new ZKOperations.ChildrenCallback() { @Override//from ww w . j a va2 s . c om public void updated(NodeChildren nodeChildren) { if (cancelled.get()) { return; } Set<String> apps = ImmutableSet.copyOf(nodeChildren.getChildren()); // For each for the application name, watch for ephemeral nodes under /instances. for (final String appName : apps) { if (watched.containsKey(appName)) { continue; } final String instancePath = String.format("/%s/instances", appName); watched.put(appName, ZKOperations.watchChildren(zkClientService, instancePath, new ZKOperations.ChildrenCallback() { @Override public void updated(NodeChildren nodeChildren) { if (cancelled.get()) { return; } if (nodeChildren.getChildren().isEmpty()) { // No more child, means no live instances Cancellable removed = watched.remove(appName); if (removed != null) { removed.cancel(); } return; } synchronized (YarnTwillRunnerService.this) { // For each of the children, which the node name is the runId, // fetch the application Id and construct TwillController. for (final RunId runId : Iterables .transform(nodeChildren.getChildren(), STRING_TO_RUN_ID)) { if (controllers.contains(appName, runId)) { continue; } updateController(appName, runId, cancelled); } } } })); } // Remove app watches for apps that are gone. Removal of controller from controllers table is done // in the state listener attached to the twill controller. for (String removeApp : Sets.difference(watched.keySet(), apps)) { watched.remove(removeApp).cancel(); } } }); return new Cancellable() { @Override public void cancel() { cancelled.set(true); cancellable.cancel(); for (Cancellable c : watched.values()) { c.cancel(); } } }; }
From source file:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.ShuffleScheduler.java
public ShuffleScheduler(InputContext inputContext, Configuration conf, int numberOfInputs, ExceptionReporter exceptionReporter, MergeManager mergeManager, FetchedInputAllocatorOrderedGrouped allocator, long startTime, CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength, String srcNameTrimmed) throws IOException { this.inputContext = inputContext; this.conf = conf; this.exceptionReporter = exceptionReporter; this.allocator = allocator; this.mergeManager = mergeManager; this.numInputs = numberOfInputs; int abortFailureLimitConf = conf.getInt( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_SOURCE_ATTEMPT_ABORT_LIMIT, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_SOURCE_ATTEMPT_ABORT_LIMIT_DEFAULT); if (abortFailureLimitConf <= -1) { abortFailureLimit = Math.max(15, numberOfInputs / 10); } else {//from w w w . ja v a 2s . c o m //No upper cap, as user is setting this intentionally abortFailureLimit = abortFailureLimitConf; } remainingMaps = new AtomicInteger(numberOfInputs); finishedMaps = new BitSet(numberOfInputs); this.ifileReadAhead = ifileReadAhead; this.ifileReadAheadLength = ifileReadAheadLength; this.srcNameTrimmed = srcNameTrimmed; this.codec = codec; int configuredNumFetchers = conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES_DEFAULT); numFetchers = Math.min(configuredNumFetchers, numInputs); localDiskFetchEnabled = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH_DEFAULT); this.minFailurePerHost = conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_FAILURES_PER_HOST, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_FAILURES_PER_HOST_DEFAULT); Preconditions.checkArgument(minFailurePerHost >= 0, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_FAILURES_PER_HOST + "=" + minFailurePerHost + " should not be negative"); this.hostFailureFraction = conf.getFloat( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_ACCEPTABLE_HOST_FETCH_FAILURE_FRACTION, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_ACCEPTABLE_HOST_FETCH_FAILURE_FRACTION_DEFAULT); this.maxStallTimeFraction = conf.getFloat( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_STALL_TIME_FRACTION, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_STALL_TIME_FRACTION_DEFAULT); Preconditions.checkArgument(maxStallTimeFraction >= 0, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_STALL_TIME_FRACTION + "=" + maxStallTimeFraction + " should not be negative"); this.minReqProgressFraction = conf.getFloat( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_REQUIRED_PROGRESS_FRACTION, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_REQUIRED_PROGRESS_FRACTION_DEFAULT); Preconditions.checkArgument(minReqProgressFraction >= 0, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MIN_REQUIRED_PROGRESS_FRACTION + "=" + minReqProgressFraction + " should not be negative"); this.maxAllowedFailedFetchFraction = conf.getFloat( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_ALLOWED_FAILED_FETCH_ATTEMPT_FRACTION, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_ALLOWED_FAILED_FETCH_ATTEMPT_FRACTION_DEFAULT); Preconditions.checkArgument(maxAllowedFailedFetchFraction >= 0, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_MAX_ALLOWED_FAILED_FETCH_ATTEMPT_FRACTION + "=" + maxAllowedFailedFetchFraction + " should not be negative"); this.checkFailedFetchSinceLastCompletion = conf.getBoolean( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FAILED_CHECK_SINCE_LAST_COMPLETION, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FAILED_CHECK_SINCE_LAST_COMPLETION_DEFAULT); this.applicationId = inputContext.getApplicationId().toString(); this.dagId = inputContext.getDagIdentifier(); this.localHostname = inputContext.getExecutionContext().getHostName(); String auxiliaryService = conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT); final ByteBuffer shuffleMetadata = inputContext.getServiceProviderMetaData(auxiliaryService); this.shufflePort = ShuffleUtils.deserializeShuffleProviderMetaData(shuffleMetadata); this.referee = new Referee(); // Counters used by the ShuffleScheduler this.shuffledInputsCounter = inputContext.getCounters().findCounter(TaskCounter.NUM_SHUFFLED_INPUTS); this.reduceShuffleBytes = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES); this.reduceBytesDecompressed = inputContext.getCounters() .findCounter(TaskCounter.SHUFFLE_BYTES_DECOMPRESSED); this.failedShuffleCounter = inputContext.getCounters().findCounter(TaskCounter.NUM_FAILED_SHUFFLE_INPUTS); this.bytesShuffledToDisk = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES_TO_DISK); this.bytesShuffledToDiskDirect = inputContext.getCounters() .findCounter(TaskCounter.SHUFFLE_BYTES_DISK_DIRECT); this.bytesShuffledToMem = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES_TO_MEM); // Counters used by Fetchers ioErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.IO_ERROR.toString()); wrongLengthErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.WRONG_LENGTH.toString()); badIdErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.BAD_ID.toString()); wrongMapErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.WRONG_MAP.toString()); connectionErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.CONNECTION.toString()); wrongReduceErrsCounter = inputContext.getCounters().findCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.WRONG_REDUCE.toString()); this.startTime = startTime; this.lastProgressTime = startTime; this.sslShuffle = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_ENABLE_SSL, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_ENABLE_SSL_DEFAULT); this.asyncHttp = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_USE_ASYNC_HTTP, false); this.httpConnectionParams = ShuffleUtils.getHttpConnectionParams(conf); SecretKey jobTokenSecret = ShuffleUtils .getJobTokenSecretFromTokenBytes(inputContext.getServiceConsumerMetaData(auxiliaryService)); this.jobTokenSecretManager = new JobTokenSecretManager(jobTokenSecret); final ExecutorService fetcherRawExecutor; if (conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCHER_USE_SHARED_POOL, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCHER_USE_SHARED_POOL_DEFAULT)) { fetcherRawExecutor = inputContext.createTezFrameworkExecutorService(numFetchers, "Fetcher_O {" + srcNameTrimmed + "} #%d"); } else { fetcherRawExecutor = Executors.newFixedThreadPool(numFetchers, new ThreadFactoryBuilder() .setDaemon(true).setNameFormat("Fetcher_O {" + srcNameTrimmed + "} #%d").build()); } this.fetcherExecutor = MoreExecutors.listeningDecorator(fetcherRawExecutor); this.maxFailedUniqueFetches = Math.min(numberOfInputs, 5); referee.start(); this.maxFetchFailuresBeforeReporting = conf.getInt( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_FAILURES_LIMIT, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_FAILURES_LIMIT_DEFAULT); this.reportReadErrorImmediately = conf.getBoolean( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_NOTIFY_READERROR, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_NOTIFY_READERROR_DEFAULT); this.verifyDiskChecksum = conf.getBoolean( TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_VERIFY_DISK_CHECKSUM, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_VERIFY_DISK_CHECKSUM_DEFAULT); /** * Setting to very high val can lead to Http 400 error. Cap it to 75; every attempt id would * be approximately 48 bytes; 48 * 75 = 3600 which should give some room for other info in URL. */ this.maxTaskOutputAtOnce = Math.max(1, Math.min(75, conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_MAX_TASK_OUTPUT_AT_ONCE, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_MAX_TASK_OUTPUT_AT_ONCE_DEFAULT))); this.skippedInputCounter = inputContext.getCounters().findCounter(TaskCounter.NUM_SKIPPED_INPUTS); this.firstEventReceived = inputContext.getCounters().findCounter(TaskCounter.FIRST_EVENT_RECEIVED); this.lastEventReceived = inputContext.getCounters().findCounter(TaskCounter.LAST_EVENT_RECEIVED); this.compositeFetch = ShuffleUtils.isTezShuffleHandler(conf); this.maxPenaltyTime = conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_HOST_PENALTY_TIME_LIMIT_MS, TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_HOST_PENALTY_TIME_LIMIT_MS_DEFAULT); pipelinedShuffleInfoEventsMap = Maps.newConcurrentMap(); LOG.info("ShuffleScheduler running for sourceVertex: " + inputContext.getSourceVertexName() + " with configuration: " + "maxFetchFailuresBeforeReporting=" + maxFetchFailuresBeforeReporting + ", reportReadErrorImmediately=" + reportReadErrorImmediately + ", maxFailedUniqueFetches=" + maxFailedUniqueFetches + ", abortFailureLimit=" + abortFailureLimit + ", maxTaskOutputAtOnce=" + maxTaskOutputAtOnce + ", numFetchers=" + numFetchers + ", hostFailureFraction=" + hostFailureFraction + ", minFailurePerHost=" + minFailurePerHost + ", maxAllowedFailedFetchFraction=" + maxAllowedFailedFetchFraction + ", maxStallTimeFraction=" + maxStallTimeFraction + ", minReqProgressFraction=" + minReqProgressFraction + ", checkFailedFetchSinceLastCompletion=" + checkFailedFetchSinceLastCompletion); }
From source file:org.onosproject.incubator.store.virtual.impl.DistributedVirtualFlowRuleStore.java
@Override public FlowRuleEvent updateTableStatistics(NetworkId networkId, DeviceId deviceId, List<TableStatisticsEntry> tableStats) { if (deviceTableStats.get(networkId) == null) { deviceTableStats.put(networkId, Maps.newConcurrentMap()); }/*from w w w. j a v a 2s . c om*/ deviceTableStats.get(networkId).put(deviceId, tableStats); return null; }
From source file:com.eucalyptus.cluster.callback.CloudWatchHelper.java
public List<PutMetricDataType> collectMetricData(DescribeSensorsResponse msg) throws Exception { ArrayList<PutMetricDataType> putMetricDataList = new ArrayList<PutMetricDataType>(); final Iterable<String> uuidList = instanceInfoProvider.getRunningInstanceUUIDList(); // cloudwatch metric caches final ConcurrentMap<String, DiskReadWriteMetricTypeCache> metricCacheMap = Maps.newConcurrentMap(); final EC2DiskMetricCache ec2DiskMetricCache = new EC2DiskMetricCache(); for (final SensorsResourceType sensorData : msg.getSensorsResources()) { if (!RESOURCE_TYPE_INSTANCE.equals(sensorData.getResourceType()) || !Iterables.contains(uuidList, sensorData.getResourceUuid())) continue; for (final MetricsResourceType metricType : sensorData.getMetrics()) { for (final MetricCounterType counterType : metricType.getCounters()) { for (final MetricDimensionsType dimensionType : counterType.getDimensions()) { // find and fire most recent value for metric/dimension final List<MetricDimensionsValuesType> values = Lists .newArrayList(stripMilliseconds(dimensionType.getValues())); ;/*from ww w . j a v a2 s . c om*/ //CloudWatch use case of metric data // best to enter older data first... Collections.sort(values, Ordering.natural().onResultOf(GetTimestamp.INSTANCE)); if (!values.isEmpty()) { for (MetricDimensionsValuesType value : values) { LOG.trace("ResourceUUID: " + sensorData.getResourceUuid()); LOG.trace("ResourceName: " + sensorData.getResourceName()); LOG.trace("Metric: " + metricType.getMetricName()); LOG.trace("Dimension: " + dimensionType.getDimensionName()); LOG.trace("Timestamp: " + value.getTimestamp()); LOG.trace("Value: " + value.getValue()); final Long currentTimeStamp = value.getTimestamp().getTime(); final Double currentValue = value.getValue(); if (currentValue == null) { LOG.debug("Event received with null 'value', skipping for cloudwatch"); continue; } boolean hasEc2DiskMetricName = EC2_DISK_METRICS .contains(metricType.getMetricName().replace("Volume", "Disk")); // Let's try only creating "zero" points for timestamps from disks if (hasEc2DiskMetricName) { ec2DiskMetricCache.initializeMetrics(sensorData.getResourceUuid(), sensorData.getResourceName(), currentTimeStamp); // Put a place holder in in case we don't have any non-EBS volumes } boolean isEbsMetric = dimensionType.getDimensionName().startsWith("vol-"); boolean isEc2DiskMetric = !isEbsMetric && hasEc2DiskMetricName; if (isEbsMetric || !isEc2DiskMetric) { addToPutMetricDataList(putMetricDataList, new Supplier<InstanceUsageEvent>() { @Override public InstanceUsageEvent get() { return new InstanceUsageEvent(sensorData.getResourceUuid(), sensorData.getResourceName(), metricType.getMetricName(), dimensionType.getSequenceNum(), dimensionType.getDimensionName(), currentValue, currentTimeStamp); } }); if (isEbsMetric) { // special case to calculate VolumeConsumedReadWriteOps // As it is (VolumeThroughputPercentage / 100) * (VolumeReadOps + VolumeWriteOps), and we are hard coding // VolumeThroughputPercentage as 100%, we will just use VolumeReadOps + VolumeWriteOps // And just in case VolumeReadOps is called DiskReadOps we do both cases... addToPutMetricDataList(putMetricDataList, combineReadWriteDiskMetric("DiskReadOps", "DiskWriteOps", metricCacheMap, "DiskConsumedReadWriteOps", metricType, sensorData, dimensionType, value)); addToPutMetricDataList(putMetricDataList, combineReadWriteDiskMetric("VolumeReadOps", "VolumeWriteOps", metricCacheMap, "VolumeConsumedReadWriteOps", metricType, sensorData, dimensionType, value)); // Also need VolumeTotalReadWriteTime to compute VolumeIdleTime addToPutMetricDataList(putMetricDataList, combineReadWriteDiskMetric("VolumeTotalReadTime", "VolumeTotalWriteTime", metricCacheMap, "VolumeTotalReadWriteTime", metricType, sensorData, dimensionType, value)); } } else { // see if it is a volume metric String metricName = metricType.getMetricName().replace("Volume", "Disk"); ec2DiskMetricCache.addToMetric(sensorData.getResourceUuid(), sensorData.getResourceName(), metricName, currentValue, currentTimeStamp); } } } } } } } Collection<Supplier<InstanceUsageEvent>> ec2DiskMetrics = ec2DiskMetricCache.getMetrics(); List<Supplier<InstanceUsageEvent>> ec2DiskMetricsSorted = Lists.newArrayList(ec2DiskMetrics); Collections.sort(ec2DiskMetricsSorted, Ordering.natural().onResultOf(new Function<Supplier<InstanceUsageEvent>, Long>() { @Override @Nullable public Long apply(@Nullable Supplier<InstanceUsageEvent> supplier) { return supplier.get().getValueTimestamp(); } })); for (Supplier<InstanceUsageEvent> ec2DiskMetric : ec2DiskMetricsSorted) { try { addToPutMetricDataList(putMetricDataList, ec2DiskMetric); } catch (Exception ex) { LOG.debug("Unable to add system metric " + ec2DiskMetric, ex); } } return putMetricDataList; }
From source file:uk.co.drnaylor.quickstart.ModuleContainer.java
/** * Starts the module construction and enabling phase. This is the final phase for loading the modules. * * <p>/* w w w . jav a 2 s. c o m*/ * Once this method is called, modules can no longer be removed. * </p> * * @param failOnOneError If set to <code>true</code>, one module failure will mark the whole loading sequence as failed. * Otherwise, no modules being constructed will cause a failure. * * @throws QuickStartModuleLoaderException.Construction if the modules cannot be constructed. * @throws QuickStartModuleLoaderException.Enabling if the modules cannot be enabled. */ public void loadModules(boolean failOnOneError) throws QuickStartModuleLoaderException.Construction, QuickStartModuleLoaderException.Enabling { Preconditions.checkArgument(currentPhase == ConstructionPhase.DISCOVERED); currentPhase = ConstructionPhase.ENABLING; // Get the modules that are being disabled and mark them as such. Set<String> disabledModules = getModules(ModuleStatusTristate.DISABLE); while (!disabledModules.isEmpty()) { // Find any modules that have dependencies on disabled modules, and disable them. List<ModuleSpec> toDisable = getModules(ModuleStatusTristate.ENABLE).stream() .map(discoveredModules::get) .filter(x -> !Collections.disjoint(disabledModules, x.getDependencies())) .collect(Collectors.toList()); if (toDisable.isEmpty()) { break; } if (toDisable.stream().anyMatch(ModuleSpec::isMandatory)) { String s = toDisable.stream().filter(ModuleSpec::isMandatory).map(ModuleSpec::getId) .collect(Collectors.joining(", ")); Class<? extends Module> m = toDisable.stream().filter(ModuleSpec::isMandatory).findFirst().get() .getModuleClass(); throw new QuickStartModuleLoaderException.Construction(m, "Tried to disable mandatory module", new IllegalStateException( "Dependency failure, tried to disable a mandatory module (" + s + ")")); } toDisable.forEach(k -> { k.setStatus(LoadingStatus.DISABLED); disabledModules.add(k.getId()); }); } // Make sure we get a clean slate here. getModules(ModuleStatusTristate.DISABLE) .forEach(k -> discoveredModules.get(k).setPhase(ModulePhase.DISABLED)); // Modules to enable. Map<String, Module> modules = Maps.newConcurrentMap(); // Construct them for (String s : getModules(ModuleStatusTristate.ENABLE)) { ModuleSpec ms = discoveredModules.get(s); try { modules.put(s, getModule(ms)); ms.setPhase(ModulePhase.CONSTRUCTED); } catch (Exception construction) { construction.printStackTrace(); ms.setPhase(ModulePhase.ERRORED); loggerProxy.error("The module " + ms.getModuleClass().getName() + " failed to construct."); if (failOnOneError) { currentPhase = ConstructionPhase.ERRORED; throw new QuickStartModuleLoaderException.Construction(ms.getModuleClass(), "The module " + ms.getModuleClass().getName() + " failed to construct.", construction); } } } if (modules.isEmpty()) { currentPhase = ConstructionPhase.ERRORED; throw new QuickStartModuleLoaderException.Construction(null, "No modules were constructed.", null); } int size = modules.size(); { Iterator<Map.Entry<String, Module>> im = modules.entrySet().iterator(); while (im.hasNext()) { Map.Entry<String, Module> module = im.next(); try { module.getValue().checkExternalDependencies(); } catch (MissingDependencyException ex) { this.discoveredModules.get(module.getKey()).setStatus(LoadingStatus.DISABLED); this.discoveredModules.get(module.getKey()).setPhase(ModulePhase.DISABLED); this.loggerProxy.warn("Module " + module.getKey() + " can not be enabled because an external dependency could not be satisfied."); this.loggerProxy.warn("Message was: " + ex.getMessage()); im.remove(); } } } while (size != modules.size()) { // We might need to disable modules. size = modules.size(); Iterator<Map.Entry<String, Module>> im = modules.entrySet().iterator(); while (im.hasNext()) { Map.Entry<String, Module> module = im.next(); if (!dependenciesSatisfied(this.discoveredModules.get(module.getKey()), getModules(ModuleStatusTristate.ENABLE))) { im.remove(); this.loggerProxy.warn("Module " + module.getKey() + " can not be enabled because an external dependency on a module it " + "depends on could not be satisfied."); this.discoveredModules.get(module.getKey()).setStatus(LoadingStatus.DISABLED); this.discoveredModules.get(module.getKey()).setPhase(ModulePhase.DISABLED); } } } // Enter Config Adapter phase - attaching before enabling so that enable methods can get any associated configurations. for (String s : modules.keySet()) { Module m = modules.get(s); try { attachConfig(s, m); } catch (Exception e) { e.printStackTrace(); if (failOnOneError) { throw new QuickStartModuleLoaderException.Enabling(m.getClass(), "Failed to attach config.", e); } } } // Enter Enable phase. Map<String, Module> c = new HashMap<>(modules); for (EnablePhase v : EnablePhase.values()) { loggerProxy.info(String.format("Starting phase: %s", v.name())); v.onStart(this); for (String s : c.keySet()) { ModuleSpec ms = discoveredModules.get(s); // If the module is errored, then we do not continue. if (ms.getPhase() == ModulePhase.ERRORED) { continue; } try { Module m = modules.get(s); v.onModuleAction(this, enabler, m, ms); } catch (Exception construction) { construction.printStackTrace(); modules.remove(s); if (v != EnablePhase.POSTENABLE) { ms.setPhase(ModulePhase.ERRORED); loggerProxy.error("The module " + ms.getModuleClass().getName() + " failed to enable."); if (failOnOneError) { currentPhase = ConstructionPhase.ERRORED; throw new QuickStartModuleLoaderException.Enabling(ms.getModuleClass(), "The module " + ms.getModuleClass().getName() + " failed to enable.", construction); } } else { loggerProxy .error("The module " + ms.getModuleClass().getName() + " failed to post-enable."); } } } } if (c.isEmpty()) { currentPhase = ConstructionPhase.ERRORED; throw new QuickStartModuleLoaderException.Enabling(null, "No modules were enabled.", null); } try { config.saveAdapterDefaults(this.processDoNotMerge); } catch (IOException e) { e.printStackTrace(); } currentPhase = ConstructionPhase.ENABLED; }
From source file:org.onosproject.incubator.store.virtual.impl.DistributedVirtualFlowRuleStore.java
@Override public Iterable<TableStatisticsEntry> getTableStatistics(NetworkId networkId, DeviceId deviceId) { MastershipService mastershipService = vnaService.get(networkId, MastershipService.class); NodeId master = mastershipService.getMasterFor(deviceId); if (master == null) { log.debug("Failed to getTableStats: No master for {}", deviceId); return Collections.emptyList(); }//from w w w . j a va 2s. c o m if (deviceTableStats.get(networkId) == null) { deviceTableStats.put(networkId, Maps.newConcurrentMap()); } List<TableStatisticsEntry> tableStats = deviceTableStats.get(networkId).get(deviceId); if (tableStats == null) { return Collections.emptyList(); } return ImmutableList.copyOf(tableStats); }
From source file:com.threerings.nexus.server.ObjectManager.java
protected <E extends Keyed> ConcurrentMap<Comparable<?>, Binding<E>> getKeyedMap(Class<? super E> kclass) { ConcurrentMap<Comparable<?>, Binding<?>> emap = _keyeds.get(kclass); if (emap == null) { ConcurrentMap<Comparable<?>, Binding<?>> cmap = _keyeds.putIfAbsent(kclass, emap = Maps.newConcurrentMap()); // if someone beat us to the punch, we need to use their map, not ours if (cmap != null) { emap = cmap;/* w w w. j a v a2s . c o m*/ } } Object noreally = emap; @SuppressWarnings("unchecked") ConcurrentMap<Comparable<?>, Binding<E>> casted = (ConcurrentMap<Comparable<?>, Binding<E>>) noreally; return casted; }
From source file:com.jivesoftware.os.miru.reader.deployable.MiruReaderMain.java
void run(String[] args) throws Exception { ServiceStartupHealthCheck serviceStartupHealthCheck = new ServiceStartupHealthCheck(); try {/* w w w .j a v a 2 s. com*/ final Deployable deployable = new Deployable(args); InstanceConfig instanceConfig = deployable.config(InstanceConfig.class); HealthFactory.initialize(deployable::config, new DeployableHealthCheckRegistry(deployable)); deployable.addManageInjectables(HasUI.class, new HasUI(Arrays.asList(new UI("Miru-Reader", "main", "/ui")))); deployable.addHealthCheck(new GCPauseHealthChecker( deployable.config(GCPauseHealthChecker.GCPauseHealthCheckerConfig.class))); deployable.addHealthCheck(new GCLoadHealthChecker( deployable.config(GCLoadHealthChecker.GCLoadHealthCheckerConfig.class))); deployable.addHealthCheck(new SystemCpuHealthChecker( deployable.config(SystemCpuHealthChecker.SystemCpuHealthCheckerConfig.class))); deployable.addHealthCheck(new LoadAverageHealthChecker( deployable.config(LoadAverageHealthChecker.LoadAverageHealthCheckerConfig.class))); deployable.addHealthCheck(new FileDescriptorCountHealthChecker(deployable .config(FileDescriptorCountHealthChecker.FileDescriptorCountHealthCheckerConfig.class))); deployable.addHealthCheck(new DirectBufferHealthChecker( deployable.config(DirectBufferHealthChecker.DirectBufferHealthCheckerConfig.class))); deployable.addHealthCheck(serviceStartupHealthCheck); deployable.addErrorHealthChecks(deployable.config(ErrorHealthCheckConfig.class)); deployable.addManageInjectables(FullyOnlineVersion.class, (FullyOnlineVersion) () -> { if (serviceStartupHealthCheck.startupHasSucceeded()) { return instanceConfig.getVersion(); } else { return null; } }); deployable.buildManageServer().start(); ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); mapper.registerModule(new GuavaModule()); TenantRoutingProvider tenantRoutingProvider = deployable.getTenantRoutingProvider(); TenantRoutingHttpClientInitializer<String> tenantRoutingHttpClientInitializer = deployable .getTenantRoutingHttpClientInitializer(); HttpDeliveryClientHealthProvider clientHealthProvider = new HttpDeliveryClientHealthProvider( instanceConfig.getInstanceKey(), HttpRequestHelperUtils.buildRequestHelper(false, false, null, instanceConfig.getRoutesHost(), instanceConfig.getRoutesPort()), instanceConfig.getConnectionsHealth(), 5_000, 100); MiruLogAppenderConfig miruLogAppenderConfig = deployable.config(MiruLogAppenderConfig.class); @SuppressWarnings("unchecked") TenantAwareHttpClient<String> miruStumptownClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-stumptown", "main", 10_000), clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); new MiruLogAppenderInitializer().initialize(instanceConfig.getDatacenter(), instanceConfig.getClusterName(), instanceConfig.getHost(), instanceConfig.getServiceName(), String.valueOf(instanceConfig.getInstanceName()), instanceConfig.getVersion(), miruLogAppenderConfig, miruStumptownClient).install(); MiruMetricSamplerConfig metricSamplerConfig = deployable.config(MiruMetricSamplerConfig.class); @SuppressWarnings("unchecked") TenantAwareHttpClient<String> miruAnomalyClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-anomaly", "main", 10_000), clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); new MiruMetricSamplerInitializer().initialize(instanceConfig.getDatacenter(), instanceConfig.getClusterName(), instanceConfig.getHost(), instanceConfig.getServiceName(), String.valueOf(instanceConfig.getInstanceName()), instanceConfig.getVersion(), metricSamplerConfig, miruAnomalyClient).start(); MiruServiceConfig miruServiceConfig = deployable.config(MiruServiceConfig.class); MiruWALConfig walConfig = deployable.config(MiruWALConfig.class); MiruHost miruHost = MiruHostProvider.fromInstance(instanceConfig.getInstanceName(), instanceConfig.getInstanceKey()); MiruResourceLocator diskResourceLocator = new MiruResourceLocatorInitializer() .initialize(miruServiceConfig); MiruInterner<MiruTermId> termInterner = new MiruInterner<MiruTermId>( miruServiceConfig.getEnableTermInterning()) { @Override public MiruTermId create(byte[] bytes) { return new MiruTermId(bytes); } }; MiruInterner<MiruIBA> ibaInterner = new MiruInterner<MiruIBA>(true) { @Override public MiruIBA create(byte[] bytes) { return new MiruIBA(bytes); } }; MiruInterner<MiruTenantId> tenantInterner = new MiruInterner<MiruTenantId>(true) { @Override public MiruTenantId create(byte[] bytes) { return new MiruTenantId(bytes); } }; final MiruTermComposer termComposer = new MiruTermComposer(Charsets.UTF_8, termInterner); final MiruActivityInternExtern internExtern = new MiruActivityInternExtern(ibaInterner, tenantInterner, // makes sense to share string internment as this is authz in both cases Interners.<String>newWeakInterner(), termComposer); MiruBitmaps<?, ?> bitmaps = miruServiceConfig.getBitmapsClass().newInstance(); @SuppressWarnings("unchecked") TenantAwareHttpClient<String> walHttpClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-wal", "main", 10_000), // TODO config clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); // TODO expose to conf @SuppressWarnings("unchecked") TenantAwareHttpClient<String> manageHttpClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-manage", "main", 10_000), // TODO config clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); // TODO expose to conf @SuppressWarnings("unchecked") TenantAwareHttpClient<String> readerHttpClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-reader", "main", 10_000), // TODO config clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); // TODO expose to conf @SuppressWarnings("unchecked") TenantAwareHttpClient<String> catwalkHttpClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections("miru-catwalk", "main", 10_000), // TODO config clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); // TODO expose to conf // TODO add fall back to config final MiruStats miruStats = new MiruStats(); MiruClusterClient clusterClient = new MiruClusterClientInitializer().initialize(miruStats, "", manageHttpClient, mapper); MiruSchemaProvider miruSchemaProvider = new ClusterSchemaProvider(clusterClient, 10000); // TODO config TimestampedOrderIdProvider timestampedOrderIdProvider = new OrderIdProviderImpl( new ConstantWriterIdProvider(0), new SnowflakeIdPacker(), new JiveEpochTimestampProvider()); MiruRealtimeDelivery realtimeDelivery; String realtimeDeliveryService = miruServiceConfig.getRealtimeDeliveryService().trim(); String realtimeDeliveryEndpoint = miruServiceConfig.getRealtimeDeliveryEndpoint().trim(); if (realtimeDeliveryService.isEmpty() || realtimeDeliveryService.isEmpty()) { realtimeDelivery = new NoOpRealtimeDelivery(miruStats); } else { @SuppressWarnings("unchecked") TenantAwareHttpClient<String> realtimeDeliveryHttpClient = tenantRoutingHttpClientInitializer .builder(tenantRoutingProvider.getConnections(realtimeDeliveryService, "main", 10_000), // TODO config clientHealthProvider) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).build(); // TODO expose to conf realtimeDelivery = new RoutingBirdRealtimeDelivery(miruHost, realtimeDeliveryHttpClient, new RoundRobinStrategy(), realtimeDeliveryEndpoint, mapper, miruStats, timestampedOrderIdProvider, miruServiceConfig.getDropRealtimeDeliveryOlderThanNMillis()); } PartitionErrorTracker.PartitionErrorTrackerConfig partitionErrorTrackerConfig = deployable .config(PartitionErrorTracker.PartitionErrorTrackerConfig.class); PartitionErrorTracker partitionErrorTracker = new PartitionErrorTracker(partitionErrorTrackerConfig); deployable.addHealthCheck(partitionErrorTracker); final ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); final ScheduledExecutorService scheduledBootstrapExecutor = Executors.newScheduledThreadPool( miruServiceConfig.getPartitionScheduledBootstrapThreads(), new NamedThreadFactory(threadGroup, "scheduled_bootstrap")); final ScheduledExecutorService scheduledRebuildExecutor = Executors.newScheduledThreadPool( miruServiceConfig.getPartitionScheduledRebuildThreads(), new NamedThreadFactory(threadGroup, "scheduled_rebuild")); final ScheduledExecutorService scheduledSipMigrateExecutor = Executors.newScheduledThreadPool( miruServiceConfig.getPartitionScheduledSipMigrateThreads(), new NamedThreadFactory(threadGroup, "scheduled_sip_migrate")); SickThreads walClientSickThreads = new SickThreads(); deployable.addHealthCheck(new SickThreadsHealthCheck( deployable.config(WALClientSickThreadsHealthCheckConfig.class), walClientSickThreads)); MiruInboxReadTracker inboxReadTracker; MiruLifecyle<MiruService> miruServiceLifecyle; LABStats rebuildLABStats = new LABStats(); LABStats globalLABStats = new LABStats(); if (walConfig.getActivityWALType().equals("rcvs") || walConfig.getActivityWALType().equals("rcvs_amza")) { MiruWALClient<RCVSCursor, RCVSSipCursor> rcvsWALClient = new MiruWALClientInitializer().initialize( "", walHttpClient, mapper, walClientSickThreads, 10_000, "/miru/wal/rcvs", RCVSCursor.class, RCVSSipCursor.class); inboxReadTracker = new RCVSInboxReadTracker(rcvsWALClient); miruServiceLifecyle = new MiruServiceInitializer().initialize(miruServiceConfig, miruStats, rebuildLABStats, globalLABStats, scheduledBootstrapExecutor, scheduledRebuildExecutor, scheduledSipMigrateExecutor, clusterClient, miruHost, miruSchemaProvider, rcvsWALClient, realtimeDelivery, new RCVSSipTrackerFactory(), new RCVSSipIndexMarshaller(), diskResourceLocator, termComposer, internExtern, new SingleBitmapsProvider(bitmaps), partitionErrorTracker, termInterner); } else if (walConfig.getActivityWALType().equals("amza") || walConfig.getActivityWALType().equals("amza_rcvs")) { MiruWALClient<AmzaCursor, AmzaSipCursor> amzaWALClient = new MiruWALClientInitializer().initialize( "", walHttpClient, mapper, walClientSickThreads, 10_000, "/miru/wal/amza", AmzaCursor.class, AmzaSipCursor.class); inboxReadTracker = new AmzaInboxReadTracker(amzaWALClient); miruServiceLifecyle = new MiruServiceInitializer().initialize(miruServiceConfig, miruStats, rebuildLABStats, globalLABStats, scheduledBootstrapExecutor, scheduledRebuildExecutor, scheduledSipMigrateExecutor, clusterClient, miruHost, miruSchemaProvider, amzaWALClient, realtimeDelivery, new AmzaSipTrackerFactory(), new AmzaSipIndexMarshaller(), diskResourceLocator, termComposer, internExtern, new SingleBitmapsProvider(bitmaps), partitionErrorTracker, termInterner); } else { throw new IllegalStateException("Invalid activity WAL type: " + walConfig.getActivityWALType()); } MiruLifecyle<MiruJustInTimeBackfillerizer> backfillerizerLifecycle = new MiruBackfillerizerInitializer() .initialize(miruServiceConfig.getReadStreamIdsPropName(), miruHost, inboxReadTracker); backfillerizerLifecycle.start(); MiruJustInTimeBackfillerizer backfillerizer = backfillerizerLifecycle.getService(); miruServiceLifecyle.start(); MiruService miruService = miruServiceLifecyle.getService(); MiruSoyRendererConfig rendererConfig = deployable.config(MiruSoyRendererConfig.class); File staticResourceDir = new File(System.getProperty("user.dir")); System.out.println("Static resources rooted at " + staticResourceDir.getAbsolutePath()); Resource sourceTree = new Resource(staticResourceDir) .addResourcePath(rendererConfig.getPathToStaticResources()).setDirectoryListingAllowed(false) .setContext("/ui/static"); MiruSoyRenderer renderer = new MiruSoyRendererInitializer().initialize(rendererConfig); MiruReaderUIService uiService = new MiruReaderUIInitializer().initialize( instanceConfig.getClusterName(), instanceConfig.getInstanceName(), renderer, miruStats, miruService, partitionErrorTracker, tenantRoutingProvider, rebuildLABStats, globalLABStats); if (instanceConfig.getMainServiceAuthEnabled()) { deployable.addRouteOAuth("/miru/*", "/plugin/*"); deployable.addSessionAuth("/ui/*", "/miru/*", "/plugin/*"); } else { deployable.addNoAuth("/miru/*", "/plugin/*"); deployable.addSessionAuth("/ui/*"); } deployable.addEndpoints(MiruReaderUIEndpoints.class); deployable.addInjectables(MiruReaderUIService.class, uiService); deployable.addInjectables(MiruStats.class, miruStats); deployable.addEndpoints(MiruWriterEndpoints.class); deployable.addEndpoints(MiruReaderEndpoints.class); deployable.addInjectables(MiruService.class, miruService); deployable.addInjectables(MiruHost.class, miruHost); deployable.addInjectables(ObjectMapper.class, mapper); Map<Class<?>, MiruRemotePartition<?, ?, ?>> pluginRemotesMap = Maps.newConcurrentMap(); Map<MiruHost, MiruHostSelectiveStrategy> readerStrategyCache = Maps.newConcurrentMap(); MiruProvider<Miru> miruProvider = new MiruProvider<Miru>() { @Override public Miru getMiru(MiruTenantId tenantId) { return miruService; } @Override public MiruHost getHost() { return miruHost; } @Override public MiruActivityInternExtern getActivityInternExtern(MiruTenantId tenantId) { return internExtern; } @Override public MiruJustInTimeBackfillerizer getBackfillerizer(MiruTenantId tenantId) { return backfillerizer; } @Override public MiruTermComposer getTermComposer() { return termComposer; } @Override public MiruQueryParser getQueryParser(String defaultField) { return new LuceneBackedQueryParser(defaultField); } @Override public MiruStats getStats() { return miruStats; } @Override public <R extends MiruRemotePartition<?, ?, ?>> R getRemotePartition( Class<R> remotePartitionClass) { return (R) pluginRemotesMap.get(remotePartitionClass); } @Override public TenantAwareHttpClient<String> getReaderHttpClient() { return readerHttpClient; } @Override public TenantAwareHttpClient<String> getCatwalkHttpClient() { return catwalkHttpClient; } @Override public Map<MiruHost, MiruHostSelectiveStrategy> getReaderStrategyCache() { return readerStrategyCache; } @Override public <C extends Config> C getConfig(Class<C> configClass) { return deployable.config(configClass); } @Override public void addHealthCheck(HealthCheck healthCheck) { deployable.addHealthCheck(healthCheck); } }; for (String pluginPackage : miruServiceConfig.getPluginPackages().split(",")) { Reflections reflections = new Reflections( new ConfigurationBuilder().setUrls(ClasspathHelper.forPackage(pluginPackage.trim())) .setScanners(new SubTypesScanner(), new TypesScanner())); Set<Class<? extends MiruPlugin>> pluginTypes = reflections.getSubTypesOf(MiruPlugin.class); for (Class<? extends MiruPlugin> pluginType : pluginTypes) { LOG.info("Loading plugin {}", pluginType.getSimpleName()); MiruPlugin<?, ?> plugin = pluginType.newInstance(); add(miruProvider, deployable, plugin, pluginRemotesMap); //TODO give plugin a start/stop lifecycle } } deployable.addEndpoints(MiruReaderConfigEndpoints.class); deployable.addInjectables(TimestampedOrderIdProvider.class, timestampedOrderIdProvider); deployable.addResource(sourceTree); deployable.addEndpoints(LoadBalancerHealthCheckEndpoints.class); deployable.buildServer().start(); clientHealthProvider.start(); serviceStartupHealthCheck.success(); } catch (Throwable t) { serviceStartupHealthCheck.info("Encountered the following failure during startup.", t); } }
From source file:net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.java
@Override public void init(FloodlightModuleContext fmc) { Device.deviceManager = this; secondaryIndexMap = null; //ColumnTable_.getTable(new ColumnProxy((int) Thread.currentThread().getId()), "SECONDARY",Serializer.LONG ,AnnotatedColumnObject.newAnnotatedColumnObject(Device.class)); //XXX - clean up request. PropertiesConfiguration config = null; // create and load default properties try {/*from w w w . j a va2 s . c o m*/ config = new PropertiesConfiguration("datastore.config"); } catch (ConfigurationException e) { System.err.println("Could not read configuration file"); System.exit(-1); } if (config.getBoolean("benchmark")) { RequestLogger.startRequestLogger(config.getString("benchmark.output")); } deviceMap = new ColumnWorkloadLogger<Long, Device>("DEVICES", RequestLogger.getRequestLogger(), Serializer.LONG, AnnotatedColumnObject.newAnnotatedColumnObject(Device.class)); // deviceMap = ColumnTable_.getTable(new ColumnProxy((int) Thread.currentThread().getId()), "DEVICES",Serializer.LONG ,AnnotatedColumnObject.newAnnotatedColumnObject(Device.class)); classStateMap = new ConcurrentHashMap<String, ClassState>(); apComparator = new AttachmentPointComparator(); perClassIndices = Maps.newConcurrentMap(); addIndex(true, EnumSet.of(DeviceField.IPV4)); this.deviceListeners = new ListenerDispatcher<String, IDeviceListener>(); this.suppressAPs = Collections.newSetFromMap(new ConcurrentHashMap<SwitchPort, Boolean>()); this.floodlightProvider = fmc.getServiceImpl(IFloodlightProviderService.class); this.storageSource = fmc.getServiceImpl(IStorageSourceService.class); this.topology = fmc.getServiceImpl(ITopologyService.class); this.restApi = fmc.getServiceImpl(IRestApiService.class); this.threadPool = fmc.getServiceImpl(IThreadPoolService.class); this.flowReconcileMgr = fmc.getServiceImpl(IFlowReconcileService.class); this.entityClassifier = fmc.getServiceImpl(IEntityClassifierService.class); }