Example usage for java.util.concurrent ConcurrentSkipListSet ConcurrentSkipListSet

List of usage examples for java.util.concurrent ConcurrentSkipListSet ConcurrentSkipListSet

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentSkipListSet ConcurrentSkipListSet.

Prototype

public ConcurrentSkipListSet() 

Source Link

Document

Constructs a new, empty set that orders its elements according to their Comparable natural ordering .

Usage

From source file:org.springframework.kafka.listener.ConcurrentMessageListenerContainerTests.java

@Test
public void testAfterListenCommit() throws Exception {
    this.logger.info("Start manual");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test2", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic2);

    final CountDownLatch latch = new CountDownLatch(4);
    final Set<String> listenerThreadNames = new ConcurrentSkipListSet<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        ConcurrentMessageListenerContainerTests.this.logger.info("manual: " + message);
        listenerThreadNames.add(Thread.currentThread().getName());
        latch.countDown();/*from ww  w . j av  a2s  .  c o  m*/
    });

    ConcurrentMessageListenerContainer<Integer, String> container = new ConcurrentMessageListenerContainer<>(cf,
            containerProps);
    container.setConcurrency(2);
    container.setBeanName("testBatch");
    container.start();

    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic2);
    template.sendDefault(0, "foo");
    template.sendDefault(2, "bar");
    template.sendDefault(0, "baz");
    template.sendDefault(2, "qux");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(listenerThreadNames).allMatch(threadName -> threadName.contains("-listener-"));
    container.stop();
    this.logger.info("Stop manual");
}

From source file:com.alibaba.jstorm.daemon.worker.WorkerData.java

@SuppressWarnings({ "rawtypes", "unchecked" })
public WorkerData(Map conf, IContext context, String topology_id, String supervisor_id, int port,
        String worker_id, String jar_path) throws Exception {

    this.conf = conf;
    this.context = context;
    this.topologyId = topology_id;
    this.supervisorId = supervisor_id;
    this.port = port;
    this.workerId = worker_id;

    this.shutdown = new AtomicBoolean(false);

    this.monitorEnable = new AtomicBoolean(true);
    this.topologyStatus = StatusType.active;

    if (StormConfig.cluster_mode(conf).equals("distributed")) {
        String pidDir = StormConfig.worker_pids_root(conf, worker_id);
        JStormServerUtils.createPid(pidDir);
    }/*from  w  ww  .jav  a2s. c  om*/

    // create zk interface
    this.zkClusterstate = ZkTool.mk_distributed_cluster_state(conf);
    this.zkCluster = Cluster.mk_storm_cluster_state(zkClusterstate);

    Map rawConf = StormConfig.read_supervisor_topology_conf(conf, topology_id);
    this.stormConf = new HashMap<Object, Object>();
    this.stormConf.putAll(conf);
    this.stormConf.putAll(rawConf);

    JStormMetrics.setTopologyId(topology_id);
    JStormMetrics.setPort(port);
    JStormMetrics.setDebug(ConfigExtension.isEnableMetricDebug(stormConf));
    JStormMetrics.setEnabled(ConfigExtension.isEnableMetrics(stormConf));
    JStormMetrics.addDebugMetrics(ConfigExtension.getDebugMetricNames(stormConf));
    AsmMetric.setSampleRate(ConfigExtension.getMetricSampleRate(stormConf));

    ConfigExtension.setLocalSupervisorId(stormConf, supervisorId);
    ConfigExtension.setLocalWorkerId(stormConf, workerId);
    ConfigExtension.setLocalWorkerPort(stormConf, port);
    ControlMessage.setPort(port);

    JStormMetrics.registerWorkerTopologyMetric(
            JStormMetrics.workerMetricName(MetricDef.CPU_USED_RATIO, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getCpuUsage();
                }
            }));

    JStormMetrics.registerWorkerTopologyMetric(
            JStormMetrics.workerMetricName(MetricDef.MEMORY_USED, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getMemUsage();
                }
            }));

    JStormMetrics.registerWorkerMetric(JStormMetrics.workerMetricName(MetricDef.DISK_USAGE, MetricType.GAUGE),
            new AsmGauge(new Gauge<Double>() {
                @Override
                public Double getValue() {
                    return JStormUtils.getDiskUsage();
                }
            }));

    LOG.info("Worker Configuration " + stormConf);

    try {
        boolean enableClassloader = ConfigExtension.isEnableTopologyClassLoader(stormConf);
        boolean enableDebugClassloader = ConfigExtension.isEnableClassloaderDebug(stormConf);

        if (jar_path == null && enableClassloader == true
                && !conf.get(Config.STORM_CLUSTER_MODE).equals("local")) {
            LOG.error("enable classloader, but not app jar");
            throw new InvalidParameterException();
        }

        URL[] urlArray = new URL[0];
        if (jar_path != null) {
            String[] paths = jar_path.split(":");
            Set<URL> urls = new HashSet<URL>();
            for (String path : paths) {
                if (StringUtils.isBlank(path))
                    continue;
                URL url = new URL("File:" + path);
                urls.add(url);
            }
            urlArray = urls.toArray(new URL[0]);
        }

        WorkerClassLoader.mkInstance(urlArray, ClassLoader.getSystemClassLoader(),
                ClassLoader.getSystemClassLoader().getParent(), enableClassloader, enableDebugClassloader);
    } catch (Exception e) {
        LOG.error("init jarClassLoader error!", e);
        throw new InvalidParameterException();
    }

    if (this.context == null) {
        this.context = TransportFactory.makeContext(stormConf);
    }

    boolean disruptorUseSleep = ConfigExtension.isDisruptorUseSleep(stormConf);
    DisruptorQueue.setUseSleep(disruptorUseSleep);
    boolean isLimited = ConfigExtension.getTopologyBufferSizeLimited(stormConf);
    DisruptorQueue.setLimited(isLimited);
    LOG.info("Disruptor use sleep:" + disruptorUseSleep + ", limited size:" + isLimited);

    // this.transferQueue = new LinkedBlockingQueue<TransferData>();
    int buffer_size = Utils.getInt(stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE));
    WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf);
    this.transferQueue = DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI, buffer_size,
            waitStrategy);
    this.transferQueue.consumerStarted();
    this.sendingQueue = DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI, buffer_size,
            waitStrategy);
    this.sendingQueue.consumerStarted();

    this.nodeportSocket = new ConcurrentHashMap<WorkerSlot, IConnection>();
    this.taskNodeport = new ConcurrentHashMap<Integer, WorkerSlot>();
    this.workerToResource = new ConcurrentSkipListSet<ResourceWorkerSlot>();
    this.innerTaskTransfer = new ConcurrentHashMap<Integer, DisruptorQueue>();
    this.deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();
    this.tasksToComponent = new ConcurrentHashMap<Integer, String>();
    this.componentToSortedTasks = new ConcurrentHashMap<String, List<Integer>>();

    Assignment assignment = zkCluster.assignment_info(topologyId, null);
    if (assignment == null) {
        String errMsg = "Failed to get Assignment of " + topologyId;
        LOG.error(errMsg);
        throw new RuntimeException(errMsg);
    }
    workerToResource.addAll(assignment.getWorkers());

    // get current worker's task list

    this.taskids = assignment.getCurrentWorkerTasks(supervisorId, port);
    if (taskids.size() == 0) {
        throw new RuntimeException("No tasks running current workers");
    }
    LOG.info("Current worker taskList:" + taskids);

    // deserialize topology code from local dir
    rawTopology = StormConfig.read_supervisor_topology_code(conf, topology_id);
    sysTopology = Common.system_topology(stormConf, rawTopology);

    generateMaps();

    contextMaker = new ContextMaker(this);

    outTaskStatus = new ConcurrentHashMap<Integer, Boolean>();

    threadPool = Executors.newScheduledThreadPool(THREAD_POOL_NUM);
    TimerTrigger.setScheduledExecutorService(threadPool);

    if (!StormConfig.local_mode(stormConf)) {
        healthReporterThread = new AsyncLoopThread(new JStormHealthReporter(this));
    }

    try {
        Long tmp = StormConfig.read_supervisor_topology_timestamp(conf, topology_id);
        assignmentTS = (tmp == null ? System.currentTimeMillis() : tmp);
    } catch (FileNotFoundException e) {
        assignmentTS = System.currentTimeMillis();
    }

    outboundTasks = new HashSet<Integer>();

    LOG.info("Successfully create WorkerData");

}

From source file:com.threadswarm.imagefeedarchiver.driver.CommandLineDriver.java

@Override
public void run() {
    //setup filters
    List<RssItemFilter> filterList = new LinkedList<RssItemFilter>();
    filterList.add(new PreviouslyDownloadedItemFilter(processedRssItemDAO));
    RssItemFilter chainedItemFilter = new ChainedRssItemFilter(filterList);

    RssChannel rssChannel = null;//  w ww  .j ava2s. com
    try {
        rssChannel = fetchRssChannel(rssFeedUri);
    } catch (IOException | FeedParserException e) {
        LOGGER.error(
                "An Exception was thrown while attempting to download and parse the target RSS feed.. exiting",
                e);
        System.exit(1);
    }

    List<RssItem> filteredItemList = new LinkedList<RssItem>();
    if (rssChannel != null && rssChannel.getItems() != null) {
        for (RssItem rssItem : rssChannel.getItems()) {
            rssItem = chainedItemFilter.filter(rssItem);
            if (rssItem != null)
                filteredItemList.add(rssItem);
        }
    }

    if (!filteredItemList.isEmpty()) {
        //create list of headers to be used when downloading images
        List<Header> headerList = new ArrayList<Header>(2);
        if (doNotTrackRequested) {
            LOGGER.debug("Adding 'DNT' header to worker requests");
            headerList.add(DNT_HEADER);
        }
        headerList.add(new BasicHeader(HttpHeaders.REFERER, rssFeedUri.toString()));
        headerList = Collections.unmodifiableList(headerList);

        ExecutorService executorService = null;
        try {
            executorService = Executors.newFixedThreadPool(threadCount);
            CompletionService<ProcessedRssItem> completionService = new ExecutorCompletionService<ProcessedRssItem>(
                    executorService);
            Set<URI> processedURISet = new ConcurrentSkipListSet<URI>();
            int itemCount = 0;
            for (RssItem rssItem : filteredItemList) {
                completionService.submit(new RssItemProcessor(httpClient, rssItem, processedRssItemDAO,
                        outputDirectory, headerList, processedURISet, downloadDelay, forceHttps));
                itemCount++;
            }

            LOGGER.info("{} jobs submitted for execution", itemCount);

            for (int x = 0; x < itemCount; x++) {
                ProcessedRssItem processedItem = completionService.take().get();
                LOGGER.info("Item status: {} --> [{}]", processedItem.getRssItem().getTitle(),
                        processedItem.getDownloadStatus());
            }
        } catch (InterruptedException e) {
            LOGGER.warn("Thread interrupted while blocking", e);
            Thread.currentThread().interrupt(); // restore interrupt
        } catch (ExecutionException e) {
            LOGGER.error("An Exception was thrown during worker execution and subsequently propagated", e);
            e.printStackTrace();
        } finally {
            executorService.shutdown();
            try {
                executorService.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOGGER.warn("Thread interrupted while blocking", e);
                Thread.currentThread().interrupt(); // restore interrupt
            }
            httpClient.getConnectionManager().shutdown();
        }
    }
}

From source file:oculus.aperture.graph.aggregation.impl.ModularityAggregator.java

@Override
public void run() {

    logger.debug("Running kSnap clustering algorithm on " + nodeMap.size() + " nodes and " + linkMap.size()
            + " links...");

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();//from w ww .j  ava  2  s .co  m
    HashMap<String, ModularityNode> linklookup = new HashMap<String, ModularityAggregator.ModularityNode>();

    for (Node n : nodeMap.values()) {
        ModularityNode mn = new ModularityNode(n);
        linklookup.put(n.getId(), mn);
        groups.add(mn);

    }
    links = new ArrayList<ModularityLink>();

    for (Link l : linkMap.values()) {
        if (linklookup.containsKey(l.getSourceId()) && linklookup.containsKey(l.getTargetId())) {
            //if this is not true we have links pointing to an invalid node...
            ModularityLink ml = new ModularityLink(linklookup.get(l.getSourceId()),
                    linklookup.get(l.getTargetId()));
            links.add(ml);

            ModularityNode start = linklookup.get(l.getSourceId());
            ModularityNode end = linklookup.get(l.getSourceId());
            start.addLink(ml);
            end.addLink(ml);
        }

    }

    boolean notterminate = true;

    int linksize;

    while (notterminate) {
        final List<Future<?>> futures = new ArrayList<Future<?>>();
        notterminate = false;
        final PriorityBlockingQueue<ModularityLink> linksort = new PriorityBlockingQueue<ModularityLink>();
        linksize = links.size();
        final int itrsize = linksize / nThreads;
        for (int i = 0; i < nThreads; i++) {

            final int passval = i;

            Future<?> foo = executor.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    boolean nt = false;
                    for (int lnknum = 0; lnknum < itrsize; lnknum++) {
                        ModularityLink ln = links.get(passval * itrsize + lnknum);
                        long nc = 0;
                        if (ln.source.neighbourcounts.containsKey(ln.target)) {
                            nc = ln.source.neighbourcounts.get(ln.target).intValue();
                        } else {
                            System.out.println("Oooops");
                        }

                        long q = nc - (ln.source.totalvolume * ln.target.totalvolume) / 2;

                        if (q > 0)
                            nt = true;
                        ln.q.set(q);
                        linksort.add(ln);
                    }
                    return nt;
                }
            });

            futures.add(foo);

        }

        for (Future<?> foo : futures) {
            try {
                notterminate = (Boolean) foo.get();
            } catch (InterruptedException interruptedCancellingAndSignalling) {
                Thread.currentThread().interrupt();
            } catch (ExecutionException wtf) {
                wtf.printStackTrace();
            }
        }

        if (!notterminate)
            break;
        //Now we take each link in the queue and add it to maximal matching 
        ConcurrentLinkedQueue<ModularityLink> maximalmatching = new ConcurrentLinkedQueue<ModularityAggregator.ModularityLink>();
        ConcurrentSkipListSet<ModularityNode> vertexcheck = new ConcurrentSkipListSet<ModularityAggregator.ModularityNode>();
        ModularityLink top = linksort.poll();
        maximalmatching.add(top);
        vertexcheck.add(top.source);
        vertexcheck.add(top.target);
        while (!linksort.isEmpty()) {
            ModularityLink nlnk = linksort.poll();
            if (nlnk.q.intValue() < 0)
                continue;

            if (vertexcheck.contains(nlnk.source) || vertexcheck.contains(nlnk.target))
                continue;
            maximalmatching.add(nlnk);
            vertexcheck.add(nlnk.source);
            vertexcheck.add(nlnk.target);
        }

        //Now we take all the pairs in maximal matching and fuse them
        for (ModularityLink ln : maximalmatching) {
            ModularityNode so = ln.source;
            ModularityNode tr = ln.target;
            so.assimilate(tr);
            groups.remove(tr);

            links.remove(ln);
        }
        linksize = links.size();
        if (linksize == 1)
            notterminate = false;
    }

    /*
    final List<Future<?>> futures = new ArrayList<Future<?>>();
            
    Future<?> foo = executor.submit(new Runnable(){
            
       @Override
       public void run() {
            
       }});
            
    futures.add(foo);
    */
    clusterSet = new ArrayList<Set<Node>>();

    for (ModularityNode g : groups) {

        if (cancel) {
            setStatusWaiting();
            return;
        }
        Set<Node> set = new HashSet<Node>();
        clusterSet.add(set);

        for (Node n : g.nodes) {

            if (cancel) {
                setStatusWaiting();
                return;
            }

            set.add(n);

        }

    }
    if (clusterer != null) {
        graphResult = clusterer.convertClusterSet(clusterSet);
    }
    stopWatch.stop();
    System.out.println("Finished Modularity clustering algorithm.");
    System.out.println("Algorithm took " + stopWatch.toString());//30 = 33.487
    stopWatch.reset();
    this.result = result;
}

From source file:org.exoplatform.social.core.storage.impl.ActivityStreamStorageImpl.java

@Override
public void update(ProcessContext ctx) {
    final ReentrantLock lock = new ReentrantLock();
    ThreadLocal<Set<String>> idLocal = new ThreadLocal<Set<String>>();
    try {/*from   ww  w .  ja  v a  2 s  .  c  o m*/

        StreamProcessContext streamCtx = ObjectHelper.cast(StreamProcessContext.class, ctx);
        ExoSocialActivity activity = streamCtx.getActivity();
        ActivityEntity activityEntity = _findById(ActivityEntity.class, activity.getId());

        lock.lock(); // block until condition holds

        Collection<ActivityRef> references = activityEntity.getActivityRefs();
        Set<String> ids = new ConcurrentSkipListSet<String>();

        for (ActivityRef ref : references) {
            ids.add(ref.getId());
        }

        idLocal.set(ids);

        Set<String> idList = idLocal.get();
        if (idList.size() > 0) {
            for (String id : idList) {
                ActivityRef old = _findById(ActivityRef.class, id);
                LOG.debug("ActivityRef will be deleted: " + old.toString());
                ActivityRefListEntity refList = old.getDay().getMonth().getYear().getList();
                //
                if (refList.isOnlyUpdate(old, activity.getUpdated().getTime())) {
                    old.setName("" + activity.getUpdated().getTime());
                    old.setLastUpdated(activity.getUpdated().getTime());
                } else {
                    ActivityRef newRef = refList.getOrCreated(activity.getUpdated().getTime());
                    newRef.setLastUpdated(activity.getUpdated().getTime());
                    newRef.setActivityEntity(activityEntity);
                    getSession().remove(old);
                }

            }
        }

        // mentioners
        addMentioner(streamCtx.getMentioners(), activityEntity);
        //turnOffLock to get increase perf
        //turnOnUpdateLock = false;
    } catch (NodeNotFoundException ex) {
        LOG.warn("Probably was updated activity reference by another session");
        LOG.debug(ex.getMessage(), ex);
        //turnOnLock to avoid next exception
    } catch (ChromatticException ex) {
        Throwable throwable = ex.getCause();
        if (throwable instanceof ItemExistsException || throwable instanceof InvalidItemStateException
                || throwable instanceof PathNotFoundException) {
            LOG.warn("Probably was updated activity reference by another session");
            LOG.debug(ex.getMessage(), ex);
            //turnOnLock to avoid next exception
        } else {
            LOG.warn("Probably was updated activity reference by another session", ex);
            LOG.debug(ex.getMessage(), ex);
        }

    } finally {
        getSession().save();
        lock.unlock();
    }
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

public Tablet(final TabletServer tabletServer, final KeyExtent extent, final TabletResourceManager trm,
        TabletData data) throws IOException {

    this.tabletServer = tabletServer;
    this.extent = extent;
    this.tabletResources = trm;
    this.lastLocation = data.getLastLocation();
    this.lastFlushID = data.getFlushID();
    this.lastCompactID = data.getCompactID();
    this.splitCreationTime = data.getSplitTime();
    this.tabletTime = TabletTime.getInstance(data.getTime());
    this.persistedTime = tabletTime.getTime();
    this.logId = tabletServer.createLogId(extent);

    TableConfiguration tblConf = tabletServer.getTableConfiguration(extent);
    if (null == tblConf) {
        Tables.clearCache(tabletServer.getInstance());
        tblConf = tabletServer.getTableConfiguration(extent);
        requireNonNull(tblConf, "Could not get table configuration for " + extent.getTableId());
    }//ww  w . j  av  a  2 s  .  c o  m

    this.tableConfiguration = tblConf;

    // translate any volume changes
    VolumeManager fs = tabletServer.getFileSystem();
    boolean replicationEnabled = ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration);
    TabletFiles tabletPaths = new TabletFiles(data.getDirectory(), data.getLogEntris(), data.getDataFiles());
    tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer, tabletServer.getLock(), fs, extent, tabletPaths,
            replicationEnabled);

    // deal with relative path for the directory
    Path locationPath;
    if (tabletPaths.dir.contains(":")) {
        locationPath = new Path(tabletPaths.dir);
    } else {
        locationPath = tabletServer.getFileSystem().getFullPath(FileType.TABLE,
                extent.getTableId() + tabletPaths.dir);
    }
    this.location = locationPath;
    this.tabletDirectory = tabletPaths.dir;
    for (Entry<Long, List<FileRef>> entry : data.getBulkImported().entrySet()) {
        this.bulkImported.put(entry.getKey(), new CopyOnWriteArrayList<FileRef>(entry.getValue()));
    }
    setupDefaultSecurityLabels(extent);

    final List<LogEntry> logEntries = tabletPaths.logEntries;
    final SortedMap<FileRef, DataFileValue> datafiles = tabletPaths.datafiles;

    tableConfiguration.addObserver(configObserver = new ConfigurationObserver() {

        private void reloadConstraints() {
            log.debug("Reloading constraints for extent: " + extent);
            constraintChecker.set(new ConstraintChecker(tableConfiguration));
        }

        @Override
        public void propertiesChanged() {
            reloadConstraints();

            try {
                setupDefaultSecurityLabels(extent);
            } catch (Exception e) {
                log.error("Failed to reload default security labels for extent: " + extent.toString());
            }
        }

        @Override
        public void propertyChanged(String prop) {
            if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()))
                reloadConstraints();
            else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
                try {
                    log.info("Default security labels changed for extent: " + extent.toString());
                    setupDefaultSecurityLabels(extent);
                } catch (Exception e) {
                    log.error("Failed to reload default security labels for extent: " + extent.toString());
                }
            }

        }

        @Override
        public void sessionExpired() {
            log.debug("Session expired, no longer updating per table props...");
        }

    });

    tableConfiguration.getNamespaceConfiguration().addObserver(configObserver);
    tabletMemory = new TabletMemory(this);

    // Force a load of any per-table properties
    configObserver.propertiesChanged();
    if (!logEntries.isEmpty()) {
        log.info("Starting Write-Ahead Log recovery for " + this.extent);
        final AtomicLong entriesUsedOnTablet = new AtomicLong(0);
        // track max time from walog entries without timestamps
        final AtomicLong maxTime = new AtomicLong(Long.MIN_VALUE);
        final CommitSession commitSession = getTabletMemory().getCommitSession();
        try {
            Set<String> absPaths = new HashSet<String>();
            for (FileRef ref : datafiles.keySet())
                absPaths.add(ref.path().toString());

            tabletServer.recover(this.getTabletServer().getFileSystem(), extent, tableConfiguration, logEntries,
                    absPaths, new MutationReceiver() {
                        @Override
                        public void receive(Mutation m) {
                            // LogReader.printMutation(m);
                            Collection<ColumnUpdate> muts = m.getUpdates();
                            for (ColumnUpdate columnUpdate : muts) {
                                if (!columnUpdate.hasTimestamp()) {
                                    // if it is not a user set timestamp, it must have been set
                                    // by the system
                                    maxTime.set(Math.max(maxTime.get(), columnUpdate.getTimestamp()));
                                }
                            }
                            getTabletMemory().mutate(commitSession, Collections.singletonList(m));
                            entriesUsedOnTablet.incrementAndGet();
                        }
                    });

            if (maxTime.get() != Long.MIN_VALUE) {
                tabletTime.useMaxTimeFromWALog(maxTime.get());
            }
            commitSession.updateMaxCommittedTime(tabletTime.getTime());

            if (entriesUsedOnTablet.get() == 0) {
                log.debug("No replayed mutations applied, removing unused entries for " + extent);
                MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries,
                        tabletServer.getLock());

                // No replication update to be made because the fact that this tablet didn't use any mutations
                // from the WAL implies nothing about use of this WAL by other tablets. Do nothing.

                logEntries.clear();
            } else if (ReplicationConfigurationUtil.isEnabled(extent,
                    tabletServer.getTableConfiguration(extent))) {
                // The logs are about to be re-used by this tablet, we need to record that they have data for this extent,
                // but that they may get more data. logEntries is not cleared which will cause the elements
                // in logEntries to be added to the currentLogs for this Tablet below.
                //
                // This update serves the same purpose as an update during a MinC. We know that the WAL was defined
                // (written when the WAL was opened) but this lets us know there are mutations written to this WAL
                // that could potentially be replicated. Because the Tablet is using this WAL, we can be sure that
                // the WAL isn't closed (WRT replication Status) and thus we're safe to update its progress.
                Status status = StatusUtil.openWithUnknownLength();
                for (LogEntry logEntry : logEntries) {
                    log.debug("Writing updated status to metadata table for " + logEntry.filename + " "
                            + ProtobufUtil.toString(status));
                    ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.filename, status);
                }
            }

        } catch (Throwable t) {
            if (tableConfiguration.getBoolean(Property.TABLE_FAILURES_IGNORE)) {
                log.warn("Error recovering from log files: ", t);
            } else {
                throw new RuntimeException(t);
            }
        }
        // make some closed references that represent the recovered logs
        currentLogs = new ConcurrentSkipListSet<DfsLogger>();
        for (LogEntry logEntry : logEntries) {
            currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename,
                    logEntry.getColumnQualifier().toString()));
        }

        log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get()
                + " mutations applied, " + getTabletMemory().getNumEntries() + " entries created)");
    }

    String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH);
    if (contextName != null && !contextName.equals("")) {
        // initialize context classloader, instead of possibly waiting for it to initialize for a scan
        // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292
        AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
    }

    // do this last after tablet is completely setup because it
    // could cause major compaction to start
    datafileManager = new DatafileManager(this, datafiles);

    computeNumEntries();

    getDatafileManager().removeFilesAfterScan(data.getScanFiles());

    // look for hints of a failure on the previous tablet server
    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
        // look for any temp files hanging around
        removeOldTemporaryFiles();
    }

    log.log(TLevel.TABLET_HIST, extent + " opened");
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

private synchronized MinorCompactionTask prepareForMinC(long flushId, MinorCompactionReason mincReason) {
    CommitSession oldCommitSession = getTabletMemory().prepareForMinC();
    otherLogs = currentLogs;//from w  ww  .  j a v a2s.  c  om
    currentLogs = new ConcurrentSkipListSet<DfsLogger>();

    FileRef mergeFile = null;
    if (mincReason != MinorCompactionReason.RECOVERY) {
        mergeFile = getDatafileManager().reserveMergingMinorCompactionFile();
    }

    double tracePercent = tabletServer.getConfiguration().getFraction(Property.TSERV_MINC_TRACE_PERCENT);

    return new MinorCompactionTask(this, mergeFile, oldCommitSession, flushId, mincReason, tracePercent);

}

From source file:org.epics.archiverappliance.config.DefaultConfigService.java

@Override
public void postStartup() throws ConfigException {
    if (this.startupState != STARTUP_SEQUENCE.READY_TO_JOIN_APPLIANCE) {
        configlogger.info("Webapp is not in correct state for postStartup " + this.getWarFile().toString()
                + ". It is in " + this.startupState.toString());
        return;//from   w  ww . ja  v  a 2 s .c  o m
    }

    this.startupState = STARTUP_SEQUENCE.POST_STARTUP_RUNNING;
    configlogger.info("Post startup for " + this.getWarFile().toString());

    // Inherit logging from log4j configuration.
    try {
        PlatformLoggingMXBean logging = ManagementFactory.getPlatformMXBean(PlatformLoggingMXBean.class);
        if (logging != null) {
            java.util.logging.Logger.getLogger("com.hazelcast");
            if (clusterLogger.isDebugEnabled()) {
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.FINE.toString());
            } else if (clusterLogger.isInfoEnabled()) {
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.INFO.toString());
            } else {
                logger.info(
                        "Setting clustering logging based on log levels for cluster." + getClass().getName());
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.SEVERE.toString());
            }
        }

        Logger hzMain = Logger.getLogger("com.hazelcast");
        if (clusterLogger.isDebugEnabled()) {
            hzMain.setLevel(Level.DEBUG);
        } else if (clusterLogger.isInfoEnabled()) {
            hzMain.setLevel(Level.INFO);
        } else {
            logger.info("Setting clustering logging based on log levels for cluster." + getClass().getName());
            hzMain.setLevel(Level.FATAL);
        }
    } catch (Exception ex) {
        logger.error("Exception setting logging JVM levels ", ex);
    }

    // Add this to the system props before doing anything with Hz
    System.getProperties().put("hazelcast.logging.type", "log4j");

    HazelcastInstance hzinstance = null;

    // Set the thread count to control how may threads this library spawns.
    Properties hzThreadCounts = new Properties();
    if (System.getenv().containsKey("ARCHAPPL_ALL_APPS_ON_ONE_JVM")) {
        logger.info("Reducing the generic clustering thread counts.");
        hzThreadCounts.put("hazelcast.clientengine.thread.count", "2");
        hzThreadCounts.put("hazelcast.operation.generic.thread.count", "2");
        hzThreadCounts.put("hazelcast.operation.thread.count", "2");
    }

    if (this.warFile == WAR_FILE.MGMT) {
        // The management webapps are the head honchos in the cluster. We set them up differently

        configlogger.debug("Initializing the MGMT webapp's clustering");
        // If we have a hazelcast.xml in the servlet classpath, the XmlConfigBuilder picks that up.
        // If not we use the default config found in hazelcast.jar
        // We then alter this config to suit our purposes.
        Config config = new XmlConfigBuilder().build();
        try {
            if (this.getClass().getResource("hazelcast.xml") == null) {
                logger.info("We override the default cluster config by disabling multicast discovery etc.");
                // We do not use multicast as it is not supported on all networks.
                config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
                // We use TCPIP to discover the members in the cluster.
                // This is part of the config that comes from appliance.xml
                config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
                // Clear any tcpip config that comes from the default config
                // This gets rid of the localhost in the default that prevents clusters from forming..
                // If we need localhost, we'll add it back later.
                config.getNetworkConfig().getJoin().getTcpIpConfig().clear();
                // Enable interfaces; we seem to need this after 2.4 for clients to work correctly in a multi-homed environment.
                // We'll add the actual interface later below
                config.getNetworkConfig().getInterfaces().setEnabled(true);
                config.getNetworkConfig().getInterfaces().clear();

                // We don't really use the authentication provided by the tool; however, we set it to some default
                config.getGroupConfig().setName("archappl");
                config.getGroupConfig().setPassword("archappl");

                // Backup count is 1 by default; we set it explicitly however...
                config.getMapConfig("default").setBackupCount(1);

                config.setProperty("hazelcast.logging.type", "log4j");
            } else {
                logger.debug(
                        "There is a hazelcast.xml in the classpath; skipping default configuration in the code.");
            }
        } catch (Exception ex) {
            throw new ConfigException("Exception configuring cluster", ex);
        }

        config.setInstanceName(myIdentity);

        if (!hzThreadCounts.isEmpty()) {
            logger.info("Reducing the generic clustering thread counts.");
            config.getProperties().putAll(hzThreadCounts);
        }

        try {
            String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":");
            String myHostName = myAddrParts[0];
            InetAddress myInetAddr = InetAddress.getByName(myHostName);
            if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) {
                logger.info("Address for this appliance -- " + myInetAddr.toString()
                        + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                myInetAddr = InetAddress.getByName("127.0.0.1");
            }
            int myClusterPort = Integer.parseInt(myAddrParts[1]);

            logger.debug("We do not let the port auto increment for the MGMT webap");
            config.getNetworkConfig().setPortAutoIncrement(false);

            config.getNetworkConfig().setPort(myClusterPort);
            config.getNetworkConfig().getInterfaces().addInterface(myInetAddr.getHostAddress());
            configlogger.info("Setting my cluster port base to " + myClusterPort + " and using interface "
                    + myInetAddr.getHostAddress());

            for (ApplianceInfo applInfo : appliances.values()) {
                if (applInfo.getIdentity().equals(myIdentity) && this.warFile == WAR_FILE.MGMT) {
                    logger.debug("Not adding myself to the discovery process when I am the mgmt webapp");
                } else {
                    String[] addressparts = applInfo.getClusterInetPort().split(":");
                    String inetaddrpart = addressparts[0];
                    try {
                        InetAddress inetaddr = InetAddress.getByName(inetaddrpart);
                        if (!inetaddrpart.equals("localhost") && inetaddr.isLoopbackAddress()) {
                            logger.info("Address for appliance " + applInfo.getIdentity() + " -  "
                                    + inetaddr.toString()
                                    + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                            inetaddr = InetAddress.getByName("127.0.0.1");
                        }
                        int clusterPort = Integer.parseInt(addressparts[1]);
                        logger.info("Adding " + applInfo.getIdentity()
                                + " from appliances.xml to the cluster discovery using cluster inetport "
                                + inetaddr.toString() + ":" + clusterPort);
                        config.getNetworkConfig().getJoin().getTcpIpConfig()
                                .addMember(inetaddr.getHostAddress() + ":" + clusterPort);
                    } catch (UnknownHostException ex) {
                        configlogger.info("Cannnot resolve the IP address for appliance " + inetaddrpart
                                + ". Skipping adding this appliance to the cliuster.");
                    }
                }
            }
            hzinstance = Hazelcast.newHazelcastInstance(config);
        } catch (Exception ex) {
            throw new ConfigException("Exception adding member to cluster", ex);
        }
    } else {
        // All other webapps are "native" clients.
        try {
            configlogger.debug("Initializing a non-mgmt webapp's clustering");
            ClientConfig clientConfig = new ClientConfig();
            clientConfig.getGroupConfig().setName("archappl");
            clientConfig.getGroupConfig().setPassword("archappl");
            clientConfig.setExecutorPoolSize(4);
            // Non mgmt client can only connect to their MGMT webapp.
            String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":");
            String myHostName = myAddrParts[0];
            InetAddress myInetAddr = InetAddress.getByName(myHostName);
            if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) {
                logger.info("Address for this appliance -- " + myInetAddr.toString()
                        + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                myInetAddr = InetAddress.getByName("127.0.0.1");
            }
            int myClusterPort = Integer.parseInt(myAddrParts[1]);

            configlogger.debug(this.warFile + " connecting as a native client to " + myInetAddr.getHostAddress()
                    + ":" + myClusterPort);
            clientConfig.getNetworkConfig().addAddress(myInetAddr.getHostAddress() + ":" + myClusterPort);
            clientConfig.setProperty("hazelcast.logging.type", "log4j");

            if (!hzThreadCounts.isEmpty()) {
                logger.info("Reducing the generic clustering thread counts.");
                clientConfig.getProperties().putAll(hzThreadCounts);
            }

            if (!clusterLogger.isDebugEnabled()) {
                // The client code logs some SEVERE exceptions on shutdown when deploying on the same Tomcat container.
                // These exceptions are confusing; ideally, we would not have to set the log levels like so.
                Logger.getLogger("com.hazelcast.client.spi.impl.ClusterListenerThread").setLevel(Level.OFF);
                Logger.getLogger("com.hazelcast.client.spi.ClientPartitionService").setLevel(Level.OFF);
            }
            hzinstance = HazelcastClient.newHazelcastClient(clientConfig);
        } catch (Exception ex) {
            throw new ConfigException("Exception adding client to cluster", ex);
        }
    }

    pv2appliancemapping = hzinstance.getMap("pv2appliancemapping");
    namedFlags = hzinstance.getMap("namedflags");
    typeInfos = hzinstance.getMap("typeinfo");
    archivePVRequests = hzinstance.getMap("archivePVRequests");
    channelArchiverDataServers = hzinstance.getMap("channelArchiverDataServers");
    clusterInet2ApplianceIdentity = hzinstance.getMap("clusterInet2ApplianceIdentity");
    aliasNamesToRealNames = hzinstance.getMap("aliasNamesToRealNames");
    pv2ChannelArchiverDataServer = hzinstance.getMap("pv2ChannelArchiverDataServer");
    pubSub = hzinstance.getTopic("pubSub");

    final HazelcastInstance shutdownHzInstance = hzinstance;
    shutdownHooks.add(0, new Runnable() {
        @Override
        public void run() {
            logger.debug("Shutting down clustering instance in webapp " + warFile.toString());
            shutdownHzInstance.shutdown();
        }
    });

    if (this.warFile == WAR_FILE.MGMT) {
        Cluster cluster = hzinstance.getCluster();
        String localInetPort = getMemberKey(cluster.getLocalMember());
        clusterInet2ApplianceIdentity.put(localInetPort, myIdentity);
        logger.debug("Adding myself " + myIdentity + " as having inetport " + localInetPort);
        hzinstance.getMap("clusterInet2ApplianceIdentity")
                .addEntryListener(new EntryAddedListener<Object, Object>() {
                    @Override
                    public void entryAdded(EntryEvent<Object, Object> event) {
                        String appliden = (String) event.getValue();
                        appliancesInCluster.add(appliden);
                        logger.info("Adding appliance " + appliden
                                + " to the list of active appliances as inetport " + ((String) event.getKey()));
                    }
                }, true);
        hzinstance.getMap("clusterInet2ApplianceIdentity")
                .addEntryListener(new EntryRemovedListener<Object, Object>() {
                    @Override
                    public void entryRemoved(EntryEvent<Object, Object> event) {
                        String appliden = (String) event.getValue();
                        appliancesInCluster.remove(appliden);
                        logger.info("Removing appliance " + appliden
                                + " from the list of active appliancesas inetport "
                                + ((String) event.getKey()));
                    }
                }, true);

        logger.debug(
                "Establishing a cluster membership listener to detect when appliances drop off the cluster");
        cluster.addMembershipListener(new MembershipListener() {
            public void memberAdded(MembershipEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                if (clusterInet2ApplianceIdentity.containsKey(inetPort)) {
                    String appliden = clusterInet2ApplianceIdentity.get(inetPort);
                    appliancesInCluster.add(appliden);
                    configlogger.info("Adding newly started appliance " + appliden
                            + " to the list of active appliances for inetport " + inetPort);
                } else {
                    logger.debug("Skipping adding appliance using inetport " + inetPort
                            + " to the list of active instances as we do not have a mapping to its identity");
                }
            }

            public void memberRemoved(MembershipEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                if (clusterInet2ApplianceIdentity.containsKey(inetPort)) {
                    String appliden = clusterInet2ApplianceIdentity.get(inetPort);
                    appliancesInCluster.remove(appliden);
                    configlogger.info("Removing appliance " + appliden + " from the list of active appliances");
                } else {
                    configlogger.debug("Received member removed event for " + inetPort);
                }
            }

            @Override
            public void memberAttributeChanged(MemberAttributeEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                configlogger.debug("Received membership attribute changed event for " + inetPort);
            }
        });

        logger.debug(
                "Adding the current members in the cluster after establishing the cluster membership listener");
        for (Member member : cluster.getMembers()) {
            String mbrInetPort = getMemberKey(member);
            logger.debug("Found member " + mbrInetPort);
            if (clusterInet2ApplianceIdentity.containsKey(mbrInetPort)) {
                String appliden = clusterInet2ApplianceIdentity.get(mbrInetPort);
                appliancesInCluster.add(appliden);
                logger.info("Adding appliance " + appliden + " to the list of active appliances for inetport "
                        + mbrInetPort);
            } else {
                logger.debug("Skipping adding appliance using inetport " + mbrInetPort
                        + " to the list of active instances as we do not have a mapping to its identity");
            }
        }
        logger.info("Established subscription(s) for appliance availability");

        if (this.getInstallationProperties().containsKey(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY)) {
            String namedFlagsFileName = (String) this.getInstallationProperties()
                    .get(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY);
            configlogger.info("Loading named flags from file " + namedFlagsFileName);
            File namedFlagsFile = new File(namedFlagsFileName);
            if (!namedFlagsFile.exists()) {
                configlogger.error(
                        "File containing named flags " + namedFlagsFileName + " specified but not present");
            } else {
                Properties namedFlagsFromFile = new Properties();
                try (FileInputStream is = new FileInputStream(namedFlagsFile)) {
                    namedFlagsFromFile.load(is);
                    for (Object namedFlagFromFile : namedFlagsFromFile.keySet()) {
                        try {
                            String namedFlagFromFileStr = (String) namedFlagFromFile;
                            Boolean namedFlagFromFileValue = Boolean
                                    .parseBoolean((String) namedFlagsFromFile.get(namedFlagFromFileStr));
                            logger.debug("Setting named flag " + namedFlagFromFileStr + " to "
                                    + namedFlagFromFileValue);
                            this.namedFlags.put(namedFlagFromFileStr, namedFlagFromFileValue);
                        } catch (Exception ex) {
                            logger.error("Exception loading named flag from file" + namedFlagsFileName, ex);
                        }
                    }
                } catch (Exception ex) {
                    configlogger.error("Exception loading named flags from " + namedFlagsFileName, ex);
                }
            }
        }
    }

    if (this.warFile == WAR_FILE.ENGINE) {
        // It can take a while for the engine to start up.
        // We probably want to do this in the background so that the appliance as a whole starts up quickly and we get retrieval up and running quickly.
        this.startupExecutor.schedule(new Runnable() {
            @Override
            public void run() {
                try {
                    logger.debug("Starting up the engine's channels on startup.");
                    archivePVSonStartup();
                    logger.debug("Done starting up the engine's channels in startup.");
                } catch (Throwable t) {
                    configlogger.fatal("Exception starting up the engine channels on startup", t);
                }
            }
        }, 1, TimeUnit.SECONDS);
    } else if (this.warFile == WAR_FILE.ETL) {
        this.etlPVLookup.postStartup();
    } else if (this.warFile == WAR_FILE.MGMT) {
        pvsForThisAppliance = new ConcurrentSkipListSet<String>();
        pausedPVsForThisAppliance = new ConcurrentSkipListSet<String>();

        initializePersistenceLayer();

        loadTypeInfosFromPersistence();

        loadAliasesFromPersistence();

        loadArchiveRequestsFromPersistence();

        loadExternalServersFromPersistence();

        registerForNewExternalServers(hzinstance.getMap("channelArchiverDataServers"));

        // Cache the aggregate of all the PVs that are registered to this appliance.
        logger.debug("Building a local aggregate of PV infos that are registered to this appliance");
        for (String pvName : getPVsForThisAppliance()) {
            if (!pvsForThisAppliance.contains(pvName)) {
                applianceAggregateInfo.addInfoForPV(pvName, this.getTypeInfoForPV(pvName), this);
            }
        }
    }

    // Register for changes to the typeinfo map.
    logger.info("Registering for changes to typeinfos");
    hzinstance.getMap("typeinfo").addEntryListener(new EntryAddedListener<Object, Object>() {
        @Override
        public void entryAdded(EntryEvent<Object, Object> entryEvent) {
            logger.debug("Received entryAdded for pvTypeInfo");
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue();
            String pvName = typeInfo.getPvName();
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_ADDED));
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.putTypeInfo(pvName, typeInfo);
                } catch (Exception ex) {
                    logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);
    hzinstance.getMap("typeinfo").addEntryListener(new EntryRemovedListener<Object, Object>() {
        @Override
        public void entryRemoved(EntryEvent<Object, Object> entryEvent) {
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getOldValue();
            String pvName = typeInfo.getPvName();
            logger.info("Received entryRemoved for pvTypeInfo " + pvName);
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_DELETED));
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.deleteTypeInfo(pvName);
                } catch (Exception ex) {
                    logger.error("Exception deleting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);
    hzinstance.getMap("typeinfo").addEntryListener(new EntryUpdatedListener<Object, Object>() {
        @Override
        public void entryUpdated(EntryEvent<Object, Object> entryEvent) {
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue();
            String pvName = typeInfo.getPvName();
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_MODIFIED));
            logger.debug("Received entryUpdated for pvTypeInfo");
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.putTypeInfo(pvName, typeInfo);
                } catch (Exception ex) {
                    logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);

    eventBus.register(this);

    pubSub.addMessageListener(new MessageListener<PubSubEvent>() {
        @Override
        public void onMessage(Message<PubSubEvent> pubSubEventMsg) {
            PubSubEvent pubSubEvent = pubSubEventMsg.getMessageObject();
            if (pubSubEvent.getDestination() != null) {
                if (pubSubEvent.getDestination().equals("ALL")
                        || (pubSubEvent.getDestination().startsWith(myIdentity) && pubSubEvent.getDestination()
                                .endsWith(DefaultConfigService.this.warFile.toString()))) {
                    // We publish messages from hazelcast into this VM only if the intened WAR file is us.
                    logger.debug("Publishing event into this JVM " + pubSubEvent.generateEventDescription());
                    // In this case, we set the source as being the cluster to prevent republishing back into the cluster.
                    pubSubEvent.markSourceAsCluster();
                    eventBus.post(pubSubEvent);
                } else {
                    logger.debug("Skipping publishing event into this JVM "
                            + pubSubEvent.generateEventDescription() + " as destination is not me "
                            + DefaultConfigService.this.warFile.toString());
                }
            } else {
                logger.debug("Skipping publishing event with null destination");
            }
        }
    });

    logger.info("Done registering for changes to typeinfos");

    this.startupState = STARTUP_SEQUENCE.STARTUP_COMPLETE;
    configlogger.info("Start complete for webapp " + this.warFile);
}

From source file:org.epics.archiverappliance.config.DefaultConfigService.java

@Subscribe
public void updatePVSForThisAppliance(PVTypeInfoEvent event) {
    if (logger.isDebugEnabled())
        logger.debug("Received pvTypeInfo change event for pv " + event.getPvName());
    PVTypeInfo typeInfo = event.getTypeInfo();
    String pvName = typeInfo.getPvName();
    if (typeInfo.getApplianceIdentity().equals(myApplianceInfo.getIdentity())) {
        if (event.getChangeType() == ChangeType.TYPEINFO_DELETED) {
            if (pvsForThisAppliance != null) {
                if (pvsForThisAppliance.contains(pvName)) {
                    logger.debug("Removing pv " + pvName
                            + " from the locally cached copy of pvs for this appliance");
                    pvsForThisAppliance.remove(pvName);
                    pausedPVsForThisAppliance.remove(pvName);
                    // For now, we do not anticipate many PVs being deleted from the cache to worry about keeping applianceAggregateInfo upto date...
                    // This may change later... 
                    String[] parts = this.pvName2KeyConverter.breakIntoParts(pvName);
                    for (String part : parts) {
                        parts2PVNamesForThisAppliance.get(part).remove(pvName);
                    }//ww  w.ja v  a2  s  .  co  m
                }
            }
        } else {
            if (pvsForThisAppliance != null) {
                if (!pvsForThisAppliance.contains(pvName)) {
                    logger.debug(
                            "Adding pv " + pvName + " to the locally cached copy of pvs for this appliance");
                    pvsForThisAppliance.add(pvName);
                    if (typeInfo.isPaused()) {
                        pausedPVsForThisAppliance.add(typeInfo.getPvName());
                    }
                    String[] parts = this.pvName2KeyConverter.breakIntoParts(pvName);
                    for (String part : parts) {
                        if (!parts2PVNamesForThisAppliance.containsKey(part)) {
                            parts2PVNamesForThisAppliance.put(part, new ConcurrentSkipListSet<String>());
                        }
                        parts2PVNamesForThisAppliance.get(part).add(pvName);
                    }
                    applianceAggregateInfo.addInfoForPV(pvName, typeInfo, this);
                } else {
                    if (typeInfo.isPaused()) {
                        pausedPVsForThisAppliance.add(typeInfo.getPvName());
                    } else {
                        pausedPVsForThisAppliance.remove(typeInfo.getPvName());
                    }
                }
            }
        }
    }
}

From source file:io.hops.metadata.util.RMUtilities.java

/**
 * Retrieves pending events from NDB. If numberOfEvents is zero, it
 * retrieves all the events.// ww  w  .  ja v a 2  s  .  c  o  m
 *
 * @param numberOfEvents
 * @param status
 * @return
 * @throws IOException
 */
public static Map<String, ConcurrentSkipListSet<PendingEvent>> getPendingEvents(final int numberOfEvents,
        final byte status) throws IOException {
    LightWeightRequestHandler getPendingEventsHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override
        public Object performTask() throws IOException {
            LOG.debug("HOP :: getPendingEvents - START");
            connector.beginTransaction();
            connector.writeLock();
            List<PendingEvent> pendingEvents;
            //Map groups events by RMNode to assign them to ThreadPool
            Map<String, ConcurrentSkipListSet<PendingEvent>> pendingEventsByRMNode = new HashMap<String, ConcurrentSkipListSet<PendingEvent>>();
            PendingEventDataAccess DA = (PendingEventDataAccess) RMStorageFactory
                    .getDataAccess(PendingEventDataAccess.class);
            //Check the number of events to retrieve
            if (numberOfEvents == 0) {
                pendingEvents = DA.getAll(status);
                if (pendingEvents != null && !pendingEvents.isEmpty()) {
                    for (PendingEvent hop : pendingEvents) {
                        if (!pendingEventsByRMNode.containsKey(hop.getRmnodeId())) {
                            pendingEventsByRMNode.put(hop.getRmnodeId(),
                                    new ConcurrentSkipListSet<PendingEvent>());
                        }
                        pendingEventsByRMNode.get(hop.getRmnodeId()).add(hop);
                    }
                }
            }
            connector.commit();
            LOG.debug("HOP :: getPendingEvents - FINISH");
            return pendingEventsByRMNode;
        }
    };
    return (Map<String, ConcurrentSkipListSet<PendingEvent>>) getPendingEventsHandler.handle();
}