List of usage examples for java.util.concurrent ConcurrentHashMap put
public V put(K key, V value)
From source file:org.wso2.carbon.governance.metadata.Util.java
private static Map<String, VersionBaseProvider> getVersionBaseProviderMap() throws MetadataException { if (versionBaseProviderMap != null) { return versionBaseProviderMap; }//from ww w . j a va2 s. co m ConcurrentHashMap<String, VersionBaseProvider> providerMap = new ConcurrentHashMap<String, VersionBaseProvider>(); try { FileInputStream fileInputStream = new FileInputStream(getConfigFile()); StAXOMBuilder builder = new StAXOMBuilder(fileInputStream); OMElement configElement = builder.getDocumentElement(); OMElement metadataProviders = configElement.getFirstChildWithName(new QName("metadataProviders")) .getFirstChildWithName(new QName("versionBaseProviders")); Iterator<OMElement> itr = metadataProviders.getChildrenWithLocalName("provider"); while (itr.hasNext()) { OMElement metadataProvider = itr.next(); String providerClass = metadataProvider.getAttributeValue(new QName("class")).trim(); String mediaType = metadataProvider.getAttributeValue(new QName(Constants.ATTRIBUTE_MEDIA_TYPE)); ClassLoader loader = Thread.currentThread().getContextClassLoader(); Class<VersionBaseProvider> classObj = (Class<VersionBaseProvider>) Class.forName(providerClass, true, loader); if (!providerMap.containsKey(mediaType)) { providerMap.put(mediaType, (VersionBaseProvider) classObj.getConstructors()[0].newInstance(mediaType)); } else { // log.error("Classification URI already exists") } } } catch (Exception e) { throw new MetadataException(e.getMessage(), e); } return Util.versionBaseProviderMap = providerMap; }
From source file:org.apache.niolex.config.event.ConfigEventDispatcher.java
/** * Add an event listener who care this event. * @param groupName// w w w . ja v a2 s.com * @param listener */ public void addListener(String groupName, IPacketWriter listener) { ConcurrentHashMap<IPacketWriter, String> queue = clients.get(groupName); if (queue == null) { queue = new ConcurrentHashMap<IPacketWriter, String>(); ConcurrentHashMap<IPacketWriter, String> tmp = clients.putIfAbsent(groupName, queue); if (tmp != null) { queue = tmp; } } queue.put(listener, ""); }
From source file:com.vmware.identity.idm.server.RsaAuthSessionCache.java
public void addSession(String tenantName, String sessionId, AuthenticationSession session) { String normalizedTenant = tenantName.toLowerCase(); ConcurrentHashMap<String, AuthenticationSession> sessionCache = _rsaSessionCacheLookup .get(normalizedTenant);// www.jav a2s . c o m if (sessionCache == null) { sessionCache = new ConcurrentHashMap<String, AuthenticationSession>(); _rsaSessionCacheLookup.put(normalizedTenant, sessionCache); logger.debug("Added RSA session cache for : " + tenantName); } sessionCache.put(sessionId, session); logger.debug("Added RSA session to cache. sessionID: " + sessionId); }
From source file:ca.uhn.fhir.jaxrs.server.util.JaxRsMethodBindings.java
private void addMethodBinding(String key, BaseMethodBinding<?> binding) { ConcurrentHashMap<String, BaseMethodBinding<?>> mapByOperation = getMapForOperation( binding.getRestOperationType()); if (mapByOperation.containsKey(key)) { throw new IllegalArgumentException("Multiple Search Method Bindings Found : " + mapByOperation.get(key) + " -- " + binding.getMethod()); }//from w w w . j av a2 s .c om mapByOperation.put(key, binding); }
From source file:org.wso2.carbon.event.input.adaptor.wso2event.WSO2EventEventAdaptorType.java
public String subscribe(InputEventAdaptorMessageConfiguration inputEventAdaptorMessageConfiguration, InputEventAdaptorListener inputEventAdaptorListener, InputEventAdaptorConfiguration inputEventAdaptorConfiguration, AxisConfiguration axisConfiguration) { String subscriptionId = UUID.randomUUID().toString(); int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId(true); EventAdaptorConf eventAdaptorConf = new EventAdaptorConf(inputEventAdaptorListener, tenantId, PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantDomain()); ConcurrentHashMap<InputEventAdaptorMessageConfiguration, ConcurrentHashMap<String, EventAdaptorConf>> tenantSpecificAdaptorListenerMap = inputEventAdaptorListenerMap .get(tenantId);//from w ww. j a v a2 s. c om if (tenantSpecificAdaptorListenerMap == null) { tenantSpecificAdaptorListenerMap = new ConcurrentHashMap<InputEventAdaptorMessageConfiguration, ConcurrentHashMap<String, EventAdaptorConf>>(); inputEventAdaptorListenerMap.put(tenantId, tenantSpecificAdaptorListenerMap); } if (!tenantSpecificAdaptorListenerMap.keySet().contains(inputEventAdaptorMessageConfiguration)) { ConcurrentHashMap<String, EventAdaptorConf> map = new ConcurrentHashMap<String, WSO2EventEventAdaptorType.EventAdaptorConf>(); map.put(subscriptionId, eventAdaptorConf); tenantSpecificAdaptorListenerMap.put(inputEventAdaptorMessageConfiguration, map); } else { tenantSpecificAdaptorListenerMap.get(inputEventAdaptorMessageConfiguration).put(subscriptionId, eventAdaptorConf); ConcurrentHashMap<InputEventAdaptorMessageConfiguration, StreamDefinition> tenantSpecificInputStreamDefinitionMap = inputStreamDefinitionMap .get(tenantId); if (tenantSpecificInputStreamDefinitionMap != null) { StreamDefinition streamDefinition = tenantSpecificInputStreamDefinitionMap .get(inputEventAdaptorMessageConfiguration); if (streamDefinition != null) { inputEventAdaptorListener.addEventDefinitionCall(streamDefinition); } } } return subscriptionId; }
From source file:com.kixeye.chassis.support.logging.FlumeLoggerLoader.java
@PostConstruct public void initialize() { multicaster.addApplicationListener(loggingListener); ConcurrentHashMap<String, String> flumeConfig = new ConcurrentHashMap<>(); List<String> sinks = new ArrayList<>(); for (String server : servers) { String sinkName = server.replace(":", "-").replace(".", "_"); String[] servers = server.split(":", 2); if (servers.length == 2) { flumeConfig.put(sinkName + ".type", "avro"); flumeConfig.put(sinkName + ".channels", "channel-" + name); flumeConfig.put(sinkName + ".hostname", servers[0]); flumeConfig.put(sinkName + ".port", servers[1]); } else {/*from w w w.jav a 2 s . com*/ logger.error("Invalid server format [{}], should be [hostname:port]", server); } sinks.add(sinkName); } // force some properties flumeConfig.put("channel.type", "file"); flumeConfig.put("sinks", StringUtils.collectionToDelimitedString(sinks, " ")); flumeConfig.putIfAbsent("processor.type", serversUsage); flumeConfig.put("channel.checkpointDir", SystemPropertyUtils.resolvePlaceholders("${user.dir}/flume-data/checkpoint")); flumeConfig.put("channel.dataDirs", SystemPropertyUtils.resolvePlaceholders("${user.dir}/flume-data/data")); agent = new EmbeddedAgent(name); agent.configure(flumeConfig); agent.start(); appender = new FlumeLogAppender(agent, serviceName); installFlumeAppender(); }
From source file:org.apache.oodt.cas.workflow.gui.perspective.view.impl.DefaultTreeView.java
private void addMetadataNodes(DefaultMutableTreeNode metadataNode, Metadata staticMetadata) { for (String group : staticMetadata.getGroups()) { Object userObj;/*from ww w . jav a 2s . c o m*/ if (staticMetadata.getMetadata(group) != null) { ConcurrentHashMap<String, String> map = new ConcurrentHashMap<String, String>(); map.put(group, StringUtils.join(staticMetadata.getAllMetadata(group), ",")); userObj = map; } else { ConcurrentHashMap<String, String> map = new ConcurrentHashMap<String, String>(); map.put(group, null); userObj = map; } DefaultMutableTreeNode groupNode = new DefaultMutableTreeNode(userObj); metadataNode.add(groupNode); this.addMetadataNodes(groupNode, staticMetadata.getSubMetadata(group)); } }
From source file:lockstep.LockstepServer.java
private void clientReceiveSetup(int clientID, DatagramSocket clientUDPSocket, int initialFrameNumber, ConcurrentMap<Integer, TransmissionQueue> transmissionFrameQueues) { ServerReceivingQueue receivingQueue = new ServerReceivingQueue(initialFrameNumber, clientID, executionSemaphore);//from w w w . j a va 2 s .c o m this.receivingQueues.put(clientID, receivingQueue); ConcurrentHashMap<Integer, ReceivingQueue> receivingQueueWrapper = new ConcurrentHashMap<>(); receivingQueueWrapper.put(clientID, receivingQueue); LOG.info("Receiver AckQueue(" + clientID + "): " + ackQueues.get(clientID)); LockstepReceiver receiver = LockstepReceiver.builder().dgramSocket(clientUDPSocket).coreThread(this) .receiverID(clientID).receivingQueues(receivingQueueWrapper) .transmissionQueues(transmissionFrameQueues).name("Receiver-from-" + clientID) .ackSet(ackQueues.get(clientID)).connectionTimeout(connectionTimeout).build(); receivers.put(clientID, receiver); receiver.start(); }
From source file:com.willkara.zeteo.explorers.Explorer.java
public void namer(final File f, ConcurrentHashMap mapper) { String extension = FilenameUtils.getExtension(f.getName()); BaseFileType bft = new BaseFileType(f); if (extension.equals("")) { extension = "N/A"; }//from ww w . j a v a 2s .c o m List<BaseFileType> nameList = (List<BaseFileType>) mapper.get(extension); // if list does not exist create it if (nameList == null) { nameList = new ArrayList<>(); nameList.add(bft); mapper.put(extension, nameList); //System.out.println("Added new one to map."); } else { // add if item is not already in list if (!nameList.contains(bft)) { nameList.add(bft); } //System.out.println("Added to map."); } }
From source file:org.apache.hadoop.hdfs.TestBlockRecovery.java
public void testReadBlockRecovered() throws Exception { System.out.println("testReadBlockRecovered start"); final int DATANODE_NUM = 3; Configuration conf = new Configuration(); conf.setInt("heartbeat.recheck.interval", 1000); conf.setInt("dfs.heartbeat.interval", 1); // create cluster DistributedFileSystem dfs = null;/*from ww w.j a v a 2 s . c o m*/ MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); cluster.waitActive(); dfs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. final String f = "/testReadBlockRecovered"; final Path fpath = new Path(f); FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM); out.write(new byte[512 * 2]); out.sync(); FSDataInputStream in = dfs.open(fpath); in.read(new byte[512]); // By closing the pipeline connection, force a block recovery InjectionHandler.set(new InjectionHandler() { int thrownCount = 0; @Override protected void _processEventIO(InjectionEventI event, Object... args) throws IOException { if (event == InjectionEvent.DFSCLIENT_DATASTREAM_AFTER_WAIT && thrownCount < 1) { thrownCount++; MultiDataOutputStream blockStream = (MultiDataOutputStream) args[0]; blockStream.close(); } } }); out.write(new byte[512 * 2]); out.sync(); InjectionHandler.set(new InjectionHandler() { int thrownCount = 0; @SuppressWarnings("unchecked") @Override protected void _processEventIO(InjectionEventI event, Object... args) throws IOException { if (event == InjectionEvent.DFSCLIENT_READBUFFER_BEFORE && thrownCount < 1) { // Fail one readBuffer() and put all nodes in dead node list to // trigger a refetching of metadata. thrownCount++; ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = (ConcurrentHashMap<DatanodeInfo, DatanodeInfo>) args[0]; DFSLocatedBlocks locatedBlocks = (DFSLocatedBlocks) args[1]; for (DatanodeInfo dinfo : locatedBlocks.get(0).getLocations()) { deadNodes.put(dinfo, dinfo); } throw new IOException("injected exception"); } else if (event == InjectionEvent.DFSCLIENT_READBUFFER_AFTER) { // Make sure a correct replica, not the out-of-date one is ised. // Verifying that by making sure the right metadata is returned. BlockReader br = (BlockReader) args[0]; if (br.blkLenInfoUpdated) { TestCase.assertTrue(br.isBlockFinalized); TestCase.assertEquals(2048, br.getUpdatedBlockLength()); } } } }); out.close(); in.read(new byte[512]); in.close(); } finally { IOUtils.closeStream(dfs); if (cluster != null) { cluster.shutdown(); } } System.out.println("testReadBlockRecovered successful"); }