Example usage for java.util.concurrent ConcurrentHashMap remove

List of usage examples for java.util.concurrent ConcurrentHashMap remove

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentHashMap remove.

Prototype

public V remove(Object key) 

Source Link

Document

Removes the key (and its corresponding value) from this map.

Usage

From source file:org.wso2.carbon.event.output.adapter.ui.UIEventAdapter.java

@Override
public void destroy() {

    int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();

    ConcurrentHashMap<String, String> tenantSpecificAdapterMap = UIEventAdaptorServiceInternalValueHolder
            .getTenantSpecificOutputEventStreamAdapterMap().get(tenantId);
    if (tenantSpecificAdapterMap != null && streamId != null) {
        tenantSpecificAdapterMap.remove(streamId); //Removing outputadapter and streamId
    }/*from  ww  w.  j  a  v a 2 s .co  m*/

    ConcurrentHashMap<String, LinkedBlockingDeque<Object>> tenantSpecificStreamEventMap = UIEventAdaptorServiceInternalValueHolder
            .getTenantSpecificStreamEventMap().get(tenantId);
    if (tenantSpecificStreamEventMap != null && streamId != null) {
        tenantSpecificStreamEventMap.remove(streamId); //Removing the streamId and events registered for the output adapter
    }
}

From source file:org.wso2.carbon.event.output.adaptor.cassandra.CassandraEventAdaptorType.java

@Override
public void removeConnectionInfo(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration,
        OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) {
    ConcurrentHashMap<OutputEventAdaptorConfiguration, EventAdaptorInfo> cassandraClusterCache = tenantedCassandraClusterCache
            .get(tenantId);//from w ww  . ja v a2 s . c  om
    if (cassandraClusterCache != null) {
        cassandraClusterCache.remove(outputEventAdaptorConfiguration);
    }
}

From source file:org.wso2.carbon.event.output.adaptor.wso2event.WSO2EventAdaptorType.java

@Override
public void removeConnectionInfo(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration,
        OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) {
    ConcurrentHashMap<OutputEventAdaptorConfiguration, LoadBalancingDataPublisher> tenantSpecificAdaptorMap = dataPublisherMap
            .get(tenantId);// ww  w .  j  a va 2 s. co m
    if (tenantSpecificAdaptorMap != null) {
        LoadBalancingDataPublisher loadBalancingDataPublisher = tenantSpecificAdaptorMap
                .remove(outputEventAdaptorConfiguration);
        if (loadBalancingDataPublisher != null) {
            loadBalancingDataPublisher.stop();
        }
    }
}

From source file:org.wso2.carbon.event.output.adaptor.jms.JMSEventAdaptorType.java

@Override
public void removeConnectionInfo(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration,
        OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) {
    ConcurrentHashMap<String, PublisherDetails> topicEventSenderMap = publisherMap
            .get(outputEventAdaptorConfiguration.getName());
    if (topicEventSenderMap != null) {
        String topicName = outputEventAdaptorMessageConfiguration.getOutputMessageProperties()
                .get(JMSEventAdaptorConstants.ADAPTOR_JMS_DESTINATION);
        topicEventSenderMap.remove(topicName);
    }/*from  www. jav  a  2 s. co m*/
}

From source file:org.wso2.carbon.device.mgt.iot.output.adapter.ui.UIEventAdapter.java

@Override
public void destroy() {
    int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();

    ConcurrentHashMap<String, String> tenantSpecificAdapterMap = UIEventAdaptorServiceDataHolder
            .getTenantSpecificOutputEventStreamAdapterMap().get(tenantId);
    if (tenantSpecificAdapterMap != null && streamId != null) {
        tenantSpecificAdapterMap.remove(streamId); //Removing outputadapter and streamId
    }/*www  .  j  a v a  2 s .  co m*/

    ConcurrentHashMap<String, LinkedBlockingDeque<Object>> tenantSpecificStreamEventMap = UIEventAdaptorServiceDataHolder
            .getTenantSpecificStreamEventMap().get(tenantId);
    if (tenantSpecificStreamEventMap != null && streamId != null) {
        //Removing the streamId and events registered for the output adapter
        tenantSpecificStreamEventMap.remove(streamId);
    }
}

From source file:com.kylinolap.job.flow.JobFlowListener.java

@Override
public void jobWasExecuted(JobExecutionContext context, JobExecutionException jobException) {
    log.info(context.getJobDetail().getKey() + " was executed.");
    JobDataMap jobDataMap = context.getJobDetail().getJobDataMap();
    JobFlow jobFlow = (JobFlow) jobDataMap.get(JobConstants.PROP_JOB_FLOW);
    JobEngineConfig engineConfig = jobFlow.getJobengineConfig();
    String jobUuid = jobDataMap.getString(JobConstants.PROP_JOBINSTANCE_UUID);
    int stepSeqID = jobDataMap.getInt(JobConstants.PROP_JOBSTEP_SEQ_ID);
    KylinConfig config = engineConfig.getConfig();

    JobInstance jobInstance = null;/*  w w w . j a va 2s. c  o  m*/
    JobStep jobStep = null;
    try {
        jobInstance = JobDAO.getInstance(config).getJob(jobUuid);
        jobStep = jobInstance.getSteps().get(stepSeqID);
        CubeInstance cube = CubeManager.getInstance(config).getCube(jobInstance.getRelatedCube());

        log.info(context.getJobDetail().getKey() + " status: " + jobStep.getStatus());
        switch (jobStep.getStatus()) {
        case FINISHED:
            // Ensure we are using the latest metadata
            CubeManager.getInstance(config).loadCubeCache(cube);
            updateKylinJobOnSuccess(jobInstance, stepSeqID, engineConfig);
            updateCubeSegmentInfoOnSucceed(jobInstance, engineConfig);
            notifyUsers(jobInstance, engineConfig);
            scheduleNextJob(context, jobInstance);
            break;
        case ERROR:
            updateKylinJobStatus(jobInstance, stepSeqID, engineConfig);
            notifyUsers(jobInstance, engineConfig);
            break;
        case DISCARDED:
            // Ensure we are using the latest metadata
            CubeManager.getInstance(config).loadCubeCache(cube);
            updateCubeSegmentInfoOnDiscard(jobInstance, engineConfig);
            notifyUsers(jobInstance, engineConfig);
            break;
        default:
            break;
        }
    } catch (Exception e) {
        log.error(e.getMessage(), e);
        handleException(jobUuid, stepSeqID, config, e);
    } finally {
        if (null != jobInstance && jobInstance.getStatus().isComplete()) {
            try {
                context.getScheduler().deleteJob(context.getJobDetail().getKey());
                @SuppressWarnings("unchecked")
                ConcurrentHashMap<String, JobFlow> jobFlows = (ConcurrentHashMap<String, JobFlow>) context
                        .getScheduler().getContext().get(JobConstants.PROP_JOB_RUNTIME_FLOWS);
                jobFlows.remove(JobInstance.getJobIdentity(jobInstance));
            } catch (SchedulerException e) {
                log.error(e.getMessage(), e);
            }
        }
    }
}

From source file:org.wso2.carbon.event.input.adaptor.kafka.KafkaEventAdaptorType.java

@Override
public void unsubscribe(InputEventAdaptorMessageConfiguration inputEventAdaptorMessageConfiguration,
        InputEventAdaptorConfiguration inputEventAdaptorConfiguration, AxisConfiguration axisConfiguration,
        String subscriptionId) {/*from  w  w w  . ja va  2 s . co m*/
    if (consumerAdaptorMap != null) {
        int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(true);
        ConcurrentHashMap<String, ConsumerKafkaAdaptor> tenantSpecificAdaptorMap = consumerAdaptorMap
                .get(tenantId);
        if (tenantSpecificAdaptorMap != null) {
            ConsumerKafkaAdaptor consumerKafkaAdaptor = tenantSpecificAdaptorMap.get(subscriptionId);
            if (consumerKafkaAdaptor != null) {
                consumerKafkaAdaptor.shutdown();
                tenantSpecificAdaptorMap.remove(subscriptionId);
            }
        }
    }
}

From source file:uk.co.tfd.symplectic.harvester.SymplecticFetch.java

private void consumeTasks(ConcurrentHashMap<String, FutureTask<String>> worklist, ProgressTracker tracker) {
    for (Entry<String, FutureTask<String>> e : worklist.entrySet()) {
        if (e.getValue().isDone()) {
            try {
                LOGGER.info("Recieved " + e.getValue().get());
            } catch (Exception e1) {
                LOGGER.info("Failed {} ", e.getKey(), e1);
            }//from  w ww .  jav a 2s  . co  m
            worklist.remove(e.getKey());
        }
    }
    if (System.currentTimeMillis() > lastLog + 5000) {
        LOGGER.info("Current Worklist Backlog {} In Pending or Loading state {} ", worklist.size(),
                tracker.pending());
        lastLog = System.currentTimeMillis();
    }
}

From source file:org.wso2.carbon.device.mgt.output.adapter.websocket.WebsocketEventAdapter.java

@Override
public void destroy() {
    int tenantId = CarbonContext.getThreadLocalCarbonContext().getTenantId();

    ConcurrentHashMap<String, String> tenantSpecificAdapterMap = WebsocketEventAdaptorServiceDataHolder
            .getTenantSpecificOutputEventStreamAdapterMap().get(tenantId);
    if (tenantSpecificAdapterMap != null && streamId != null) {
        tenantSpecificAdapterMap.remove(streamId); //Removing outputadapter and streamId
    }/*from   w  w  w  .  j  a  v  a2s.co m*/

    ConcurrentHashMap<String, LinkedBlockingDeque<Object>> tenantSpecificStreamEventMap = WebsocketEventAdaptorServiceDataHolder
            .getTenantSpecificStreamEventMap().get(tenantId);
    if (tenantSpecificStreamEventMap != null && streamId != null) {
        //Removing the streamId and events registered for the output adapter
        tenantSpecificStreamEventMap.remove(streamId);
    }
}

From source file:org.openhab.io.caldav.internal.job.EventReloaderJob.java

private synchronized void removeDeletedEvents(String calendarKey, List<String> oldMap) {
    final CalendarRuntime eventRuntime = EventStorage.getInstance().getEventCache().get(calendarKey);

    for (String filename : oldMap) {
        EventContainer eventContainer = eventRuntime.getEventContainerByFilename(filename);
        if (eventContainer == null) {
            log.warn("Cannot find event container for filename: {}", filename);
            continue;
        }/*from  w ww . ja  v a  2s  .co m*/

        // cancel old jobs
        for (String jobId : eventContainer.getTimerMap()) {
            try {
                String group;
                if (jobId.startsWith(CalDavLoaderImpl.JOB_NAME_EVENT_START)) {
                    group = CalDavLoaderImpl.JOB_NAME_EVENT_START;
                } else if (jobId.startsWith(CalDavLoaderImpl.JOB_NAME_EVENT_END)) {
                    group = CalDavLoaderImpl.JOB_NAME_EVENT_END;
                } else {
                    throw new SchedulerException("unknown job id: " + jobId);
                }
                boolean deleteJob = CalDavLoaderImpl.instance.getScheduler()
                        .deleteJob(JobKey.jobKey(jobId, group));
                log.debug("old job ({}) deleted? {}", jobId, deleteJob);
            } catch (SchedulerException e) {
                log.warn("Cannot delete job '{}'", jobId, e);
            }
        }
        eventContainer.getTimerMap().clear();

        for (EventNotifier notifier : CalDavLoaderImpl.instance.getEventListenerList()) {
            for (CalDavEvent event : eventContainer.getEventList()) {
                try {
                    notifier.eventRemoved(event);
                } catch (Exception e) {
                    log.warn("Error while invoking listener", e);
                }
            }
        }

        ConcurrentHashMap<String, EventContainer> eventContainerMap = eventRuntime.getEventMap();
        this.removeFromDisk(eventContainer);

        log.debug("remove deleted event: {}", eventContainer.getEventId());
        eventContainerMap.remove(eventContainer.getEventId());
    }
}