List of usage examples for java.util.concurrent ConcurrentHashMap remove
public V remove(Object key)
From source file:edu.berkeley.compbio.phyloutils.HugenholtzTaxonomyService.java
private synchronized void reloadNameToProkMSAidMap(HashMultimap<String, Integer> nameToIdsMap) { if (nameToProkMSAidFilename != null) { Map<String, Set<Integer>> nameToProkMSAidMap; try {/*from ww w .j a v a2 s. c o m*/ ConcurrentHashMap<String, Integer> nameToUniqueIdMap = new ConcurrentHashMap<String, Integer>(); nameToProkMSAidMap = StringSetIntMapReader.read(nameToProkMSAidFilename); for (Map.Entry<String, Set<Integer>> entry : nameToProkMSAidMap.entrySet()) { String key = entry.getKey(); Set<Integer> valueSet = entry.getValue(); logger.info("Loaded mapping: " + key + " -> " + DSStringUtils.join(valueSet, ", ")); nameToIdsMap.removeAll(key); nameToUniqueIdMap.remove(key); nameToIdsMap.putAll(key, valueSet); for (Integer id : valueSet) { nameToUniqueIdMap.put(key, id); } } nameToUniqueIdMapStub.put(nameToUniqueIdMap); } catch (IOException e) { throw new Error(e); } } }
From source file:org.wso2.carbon.event.output.adaptor.jms.JMSEventAdaptorType.java
/** * @param outputEventAdaptorMessageConfiguration * - topic name to publish messages * @param message - is and Object[]{Event, EventDefinition} * @param outputEventAdaptorConfiguration * the {@link OutputEventAdaptorConfiguration} object that will be used to * get configuration information * @param tenantId tenant id of the calling thread. *///from www . j a v a 2s . c om public void publish(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration, Object message, OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) { ConcurrentHashMap<String, PublisherDetails> topicEventSender = publisherMap .get(outputEventAdaptorConfiguration.getName()); if (null == topicEventSender) { topicEventSender = new ConcurrentHashMap<String, PublisherDetails>(); if (null != publisherMap.putIfAbsent(outputEventAdaptorConfiguration.getName(), topicEventSender)) { topicEventSender = publisherMap.get(outputEventAdaptorConfiguration.getName()); } } String topicName = outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(JMSEventAdaptorConstants.ADAPTOR_JMS_DESTINATION); PublisherDetails publisherDetails = topicEventSender.get(topicName); Map<String, String> messageConfig = new HashMap<String, String>(); messageConfig.put(JMSConstants.PARAM_DESTINATION, topicName); try { if (null == publisherDetails) { publisherDetails = initPublisher(outputEventAdaptorConfiguration, topicEventSender, topicName, messageConfig); } Message jmsMessage = publisherDetails.getJmsMessageSender().convertToJMSMessage(message, messageConfig); setJMSTransportHeaders(jmsMessage, outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(JMSEventAdaptorConstants.ADAPTOR_JMS_HEADER)); publisherDetails.getJmsMessageSender().send(jmsMessage, messageConfig); } catch (RuntimeException e) { log.warn("Caught exception: " + e.getMessage() + ". Reinitializing connection and sending..."); publisherDetails = topicEventSender.remove(topicName); if (publisherDetails != null) { publisherDetails.getJmsMessageSender().close(); publisherDetails.getJmsConnectionFactory().stop(); } //TODO If this send also fails, the exception will be thrown up. Will that break the flow? // Retry sending after reinitializing connection publisherDetails = initPublisher(outputEventAdaptorConfiguration, topicEventSender, topicName, messageConfig); Message jmsMessage = publisherDetails.getJmsMessageSender().convertToJMSMessage(message, messageConfig); setJMSTransportHeaders(jmsMessage, outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(JMSEventAdaptorConstants.ADAPTOR_JMS_HEADER)); publisherDetails.getJmsMessageSender().send(jmsMessage, messageConfig); } }
From source file:com.app.server.EJBDeployer.java
public void undeploy(URL url) { try {//from www .ja va2 s. c o m //System.out.println(jarEJBMap.get(url.toString())); Vector<Object> objs = staticObjsEjbMap.remove(url.toString()); if (objs != null) { objs.clear(); } Vector<EJBContext> ejbContexts = jarEJBMap.get(url.toString()); if (ejbContexts != null && ejbContexts.size() > 0) { for (EJBContext ejbContext : ejbContexts) { if (ejbContext != null) { HashMap<String, Class> bindings = ejbContext.getRemoteBindings(); Set<String> remoteBindings = bindings.keySet(); Iterator<String> remoteBinding = remoteBindings.iterator(); while (remoteBinding.hasNext()) { String binding = (String) remoteBinding.next(); Object unbindstatefulObjs = ic.lookup("java:/" + binding); if (unbindstatefulObjs instanceof StatefulBeanObject) { Vector<Object> StatefulBeanObjects = StatefulBeanObject.statefulSessionBeanObjectMap .get(url.toString()); StatefulBeanObject.statefulSessionBeanObjectMap.remove(url.toString()); StatefulBeanObjects.clear(); } ic.unbind("java:/" + binding); System.out.println("unregistering the class" + bindings.get(binding)); } jarEJBMap.remove(url.toString()); } } } ConcurrentHashMap<String, MDBContext> mdbContexts = jarMDBMap.get(url.toString()); MDBContext mdbContext; if (mdbContexts != null) { Iterator<String> mdbnames = mdbContexts.keySet().iterator(); while (mdbnames.hasNext()) { String mdbname = mdbnames.next(); mdbContext = mdbContexts.get(mdbname); if (mdbContext.getResourceAdapter() != null) { mdbContext.getResourceAdapter().endpointDeactivation(mdbContext.getMessageEndPointFactory(), mdbContext.getActivationSpec()); mdbContext.getResourceAdapter().stop(); } if (mdbContext.getConsumer() != null) { mdbContext.getConsumer().setMessageListener(null); mdbContext.getConsumer().close(); } if (mdbContext.getSession() != null) mdbContext.getSession().close(); if (mdbContext.getConnection() != null) mdbContext.getConnection().close(); mdbContexts.remove(mdbname); } jarMDBMap.remove(url.toString()); } log.info(url.toString() + " UnDeployed"); } catch (Exception ex) { log.error("Error in undeploying the package " + url, ex); //ex.printStackTrace(); } }
From source file:com.chen.emailsync.SyncManager.java
/** * Sent by services indicating that their thread is finished; action depends on the exitStatus * of the service.//w ww. j a v a 2 s. c o m * * @param svc the service that is finished */ static public void done(AbstractSyncService svc) { SyncManager ssm = INSTANCE; if (ssm == null) return; synchronized (sSyncLock) { long mailboxId = svc.mMailboxId; // If we're no longer the syncing thread for the mailbox, just return if (!ssm.isRunningInServiceThread(mailboxId)) { return; } ssm.releaseMailbox(mailboxId); ssm.setMailboxSyncStatus(mailboxId, EmailContent.SYNC_STATUS_NONE); ConcurrentHashMap<Long, SyncError> errorMap = ssm.mSyncErrorMap; SyncError syncError = errorMap.get(mailboxId); int exitStatus = svc.mExitStatus; Mailbox m = Mailbox.restoreMailboxWithId(ssm, mailboxId); if (m == null) return; if (exitStatus != AbstractSyncService.EXIT_LOGIN_FAILURE) { long accountId = m.mAccountKey; Account account = Account.restoreAccountWithId(ssm, accountId); if (account == null) return; if (ssm.releaseSyncHolds(ssm, AbstractSyncService.EXIT_LOGIN_FAILURE, account)) { new AccountServiceProxy(ssm).notifyLoginSucceeded(accountId); } } int lastResult = EmailContent.LAST_SYNC_RESULT_SUCCESS; // For error states, whether the error is fatal (won't automatically be retried) boolean errorIsFatal = true; try { switch (exitStatus) { case AbstractSyncService.EXIT_DONE: if (svc.hasPendingRequests()) { // TODO Handle this case } errorMap.remove(mailboxId); // If we've had a successful sync, clear the shutdown count synchronized (SyncManager.class) { sClientConnectionManagerShutdownCount = 0; } // Leave now; other statuses are errors return; // I/O errors get retried at increasing intervals case AbstractSyncService.EXIT_IO_ERROR: if (syncError != null) { syncError.escalate(); log(m.mDisplayName + " held for " + (syncError.holdDelay / 1000) + "s"); return; } else { log(m.mDisplayName + " added to syncErrorMap, hold for 15s"); } lastResult = EmailContent.LAST_SYNC_RESULT_CONNECTION_ERROR; errorIsFatal = false; break; // These errors are not retried automatically case AbstractSyncService.EXIT_LOGIN_FAILURE: new AccountServiceProxy(ssm).notifyLoginFailed(m.mAccountKey, svc.mExitReason); lastResult = EmailContent.LAST_SYNC_RESULT_AUTH_ERROR; break; case AbstractSyncService.EXIT_SECURITY_FAILURE: case AbstractSyncService.EXIT_ACCESS_DENIED: lastResult = EmailContent.LAST_SYNC_RESULT_SECURITY_ERROR; break; case AbstractSyncService.EXIT_EXCEPTION: lastResult = EmailContent.LAST_SYNC_RESULT_INTERNAL_ERROR; break; } // Add this box to the error map errorMap.put(mailboxId, ssm.new SyncError(exitStatus, errorIsFatal)); } finally { // Always set the last result ssm.setMailboxLastSyncResult(mailboxId, lastResult); kick("sync completed"); } } }
From source file:com.android.exchange.ExchangeService.java
/** * Sent by services indicating that their thread is finished; action depends on the exitStatus * of the service.// ww w . j a va2 s. c om * * @param svc the service that is finished */ static public void done(AbstractSyncService svc) { ExchangeService exchangeService = INSTANCE; if (exchangeService == null) return; synchronized (sSyncLock) { long mailboxId = svc.mMailboxId; // If we're no longer the syncing thread for the mailbox, just return if (!exchangeService.isRunningInServiceThread(mailboxId)) { return; } exchangeService.releaseMailbox(mailboxId); ConcurrentHashMap<Long, SyncError> errorMap = exchangeService.mSyncErrorMap; SyncError syncError = errorMap.get(mailboxId); int exitStatus = svc.mExitStatus; Mailbox m = Mailbox.restoreMailboxWithId(exchangeService, mailboxId); if (m == null) return; if (exitStatus != AbstractSyncService.EXIT_LOGIN_FAILURE) { long accountId = m.mAccountKey; Account account = Account.restoreAccountWithId(exchangeService, accountId); if (account == null) return; if (exchangeService.releaseSyncHolds(exchangeService, AbstractSyncService.EXIT_LOGIN_FAILURE, account)) { new AccountServiceProxy(exchangeService).notifyLoginSucceeded(accountId); } } switch (exitStatus) { case AbstractSyncService.EXIT_DONE: if (svc.hasPendingRequests()) { // TODO Handle this case } errorMap.remove(mailboxId); // If we've had a successful sync, clear the shutdown count synchronized (ExchangeService.class) { sClientConnectionManagerShutdownCount = 0; } break; // I/O errors get retried at increasing intervals case AbstractSyncService.EXIT_IO_ERROR: if (syncError != null) { syncError.escalate(); log(m.mDisplayName + " held for " + syncError.holdDelay + "ms"); } else { errorMap.put(mailboxId, exchangeService.new SyncError(exitStatus, false)); log(m.mDisplayName + " added to syncErrorMap, hold for 15s"); } break; // These errors are not retried automatically case AbstractSyncService.EXIT_LOGIN_FAILURE: new AccountServiceProxy(exchangeService).notifyLoginFailed(m.mAccountKey); // Fall through case AbstractSyncService.EXIT_SECURITY_FAILURE: case AbstractSyncService.EXIT_ACCESS_DENIED: case AbstractSyncService.EXIT_EXCEPTION: errorMap.put(mailboxId, exchangeService.new SyncError(exitStatus, true)); break; } kick("sync completed"); } }
From source file:spade.utility.BitcoinTools.java
public void writeBlocksToCSV(int startIndex, int endIndex) { // Block block, int lastBlockId int lastBlockId = -1; final BitcoinTools bitcoinTools = new BitcoinTools(); String pattern = "#.##"; DecimalFormat decimalFormat = new DecimalFormat(pattern); final ConcurrentHashMap<Integer, Block> blockMap = new ConcurrentHashMap<Integer, Block>(); final AtomicInteger currentBlock = new AtomicInteger(startIndex); final int stopIndex = endIndex; final int totalThreads = Runtime.getRuntime().availableProcessors(); class BlockFetcher implements Runnable { public void run() { while (true) { if (blockMap.size() > totalThreads * 5) { // max objects to hold in memory max 1 MB * totalThreads * factor try { Thread.sleep(100); continue; } catch (Exception exception) { }//from w w w .j a v a2 s. c o m } int blockToFetch = currentBlock.getAndIncrement(); try { blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch)); } catch (JSONException exception) { Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " has invalid json. Redownloading.", exception); try { blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch)); } catch (JSONException ex) { Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " couldn't be included in CSV.", ex); } } if (blockToFetch >= stopIndex) { break; } } } } ArrayList<Thread> workers = new ArrayList<Thread>(); for (int i = 0; i < totalThreads; i++) { Thread th = new Thread(new BlockFetcher()); workers.add(th); th.start(); } int percentageCompleted = 0; for (int i = startIndex; i < endIndex; i++) { try { Block block; while (!blockMap.containsKey(i)) { } block = blockMap.get(i); blockMap.remove(i); lastBlockId = writeBlockToCSV(block, lastBlockId); if ((((i - startIndex + 1) * 100) / (endIndex - startIndex)) > percentageCompleted) { Runtime rt = Runtime.getRuntime(); long totalMemory = rt.totalMemory() / 1024 / 1024; long freeMemory = rt.freeMemory() / 1024 / 1024; long usedMemory = totalMemory - freeMemory; System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads + " | Heap (MB) - total: " + totalMemory + ", %age free: " + (freeMemory * 100) / totalMemory + " | At Block: " + (i - startIndex + 1) + " / " + (endIndex - startIndex) + " | Percentage Completed: " + percentageCompleted // + " |\r"); + " |\n"); } percentageCompleted = ((i - startIndex + 1) * 100) / (endIndex - startIndex); } catch (IOException ex) { Bitcoin.log(Level.SEVERE, "Unexpected IOException. Stopping CSV creation.", ex); break; } } for (int i = 0; i < totalThreads; i++) { try { workers.get(i).interrupt(); workers.get(i).join(); } catch (InterruptedException exception) { } } System.out.println("\n\ndone with creating CSVes!"); }
From source file:be.solidx.hot.test.TestScriptExecutors.java
@SuppressWarnings("rawtypes") private Collection<Long> multiThreadedTest(final Script script, final int max, final ScriptExecutor scriptExecutor) throws InterruptedException { final int iterations = 100; ExecutorService executor = Executors.newFixedThreadPool(8); final ConcurrentHashMap<String, Long> results = new ConcurrentHashMap<String, Long>(); final ConcurrentHashMap<String, Long> avgs = new ConcurrentHashMap<String, Long>(); long benchStart = System.currentTimeMillis(); for (int i = 0; i < iterations; i++) { Runnable runnable = new Runnable() { @SuppressWarnings("unchecked") @Override//from ww w. ja v a 2 s . c o m public void run() { try { long res = 0; Map<String, Object> parameters = new HashMap<String, Object>(); parameters.put("i", new Integer(max)); parameters.put("n", new Integer(0)); //long starting = System.currentTimeMillis(); Object object = scriptExecutor.execute(script, parameters); if (object instanceof Bindings) { Bindings bindings = (Bindings) object; res = (Integer) bindings.get("result"); bindings.clear(); } else if (object instanceof Double) { res = Math.round((Double) object); } else if (object instanceof Long) { res = (long) object; } else res = new Long((Integer) object); long end = System.currentTimeMillis() - avgs.get(this.toString()); results.put(UUID.randomUUID().getLeastSignificantBits() + "", res); avgs.put(this.toString(), end); } catch (Exception e) { e.printStackTrace(); } } }; avgs.put(runnable.toString(), System.currentTimeMillis()); executor.submit(runnable); } while (results.size() < iterations) { Thread.sleep(50); } //Thread.sleep(20000); double sum = 0; for (Long value : avgs.values()) { sum += value; } System.out.println((sum / (double) iterations) + ""); System.out.println("==== Time needed for all requests: " + (System.currentTimeMillis() - benchStart)); results.remove("avg"); executor = null; return results.values(); }
From source file:org.wso2.carbon.event.output.adaptor.mqtt.MQTTEventAdaptorType.java
/** * @param outputEventAdaptorMessageConfiguration * - topic name to publish messages * @param message - is and Object[]{Event, EventDefinition} * @param outputEventAdaptorConfiguration * the {@link OutputEventAdaptorConfiguration} object that will be used to * get configuration information * @param tenantId tenant id of the calling thread. *//*from w w w. ja v a 2s. c om*/ public void publish(OutputEventAdaptorMessageConfiguration outputEventAdaptorMessageConfiguration, Object message, OutputEventAdaptorConfiguration outputEventAdaptorConfiguration, int tenantId) { ConcurrentHashMap<String, ConcurrentHashMap<String, MQTTAdaptorPublisher>> clientIdSpecificEventSenderMap = publisherMap .get(outputEventAdaptorConfiguration.getName()); if (null == clientIdSpecificEventSenderMap) { clientIdSpecificEventSenderMap = new ConcurrentHashMap<String, ConcurrentHashMap<String, MQTTAdaptorPublisher>>(); if (null != publisherMap.putIfAbsent(outputEventAdaptorConfiguration.getName(), clientIdSpecificEventSenderMap)) { clientIdSpecificEventSenderMap = publisherMap.get(outputEventAdaptorConfiguration.getName()); } } String clientId = outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_CLIENTID); ConcurrentHashMap<String, MQTTAdaptorPublisher> topicSpecificEventPublisherMap = clientIdSpecificEventSenderMap .get(clientId); if (null == topicSpecificEventPublisherMap) { topicSpecificEventPublisherMap = new ConcurrentHashMap<String, MQTTAdaptorPublisher>(); if (null != clientIdSpecificEventSenderMap.putIfAbsent(clientId, topicSpecificEventPublisherMap)) { topicSpecificEventPublisherMap = clientIdSpecificEventSenderMap.get(clientId); } } String topic = outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_TOPIC); MQTTAdaptorPublisher mqttAdaptorPublisher = topicSpecificEventPublisherMap.get(topic); if (mqttAdaptorPublisher == null) { MQTTBrokerConnectionConfiguration mqttBrokerConnectionConfiguration = new MQTTBrokerConnectionConfiguration( outputEventAdaptorConfiguration.getOutputProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_URL), outputEventAdaptorConfiguration.getOutputProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_USERNAME), outputEventAdaptorConfiguration.getOutputProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_PASSWORD), outputEventAdaptorConfiguration.getOutputProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_CLEAN_SESSION), outputEventAdaptorConfiguration.getOutputProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_CONF_KEEP_ALIVE)); mqttAdaptorPublisher = new MQTTAdaptorPublisher(mqttBrokerConnectionConfiguration, outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_TOPIC), outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_CLIENTID)); topicSpecificEventPublisherMap.put(topic, mqttAdaptorPublisher); } String qos = outputEventAdaptorMessageConfiguration.getOutputMessageProperties() .get(MQTTEventAdaptorConstants.ADAPTOR_MESSAGE_QOS); try { if (qos == null) { mqttAdaptorPublisher.publish(message.toString()); } else { mqttAdaptorPublisher.publish(Integer.parseInt(qos), message.toString()); } } catch (OutputEventAdaptorEventProcessingException ex) { log.error(ex); topicSpecificEventPublisherMap.remove(topic); throw new OutputEventAdaptorEventProcessingException(ex); } }
From source file:org.apache.geode.internal.cache.DiskInitFile.java
public void cmnClearRegion(long drId, ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion) { DiskRegionView drv = getDiskRegionById(drId); if (drv.getClearRVV() == null) { this.ifLiveRecordCount++; }/* w w w . java 2s .co m*/ // otherwise previous clear is cancelled so don't change liveRecordCount this.ifTotalRecordCount++; DiskStoreID ownerId = parent.getDiskStoreID(); // Create a fake RVV for clear purposes. We only need to memberToVersion information RegionVersionHolder<DiskStoreID> ownerExceptions = memberToVersion.remove(ownerId); long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion(); RegionVersionVector rvv = new DiskRegionVersionVector(ownerId, memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false, ownerExceptions); drv.setClearRVV(rvv); }
From source file:diffhunter.Indexer.java
public void Make_Index(Database hashdb, String file_name, String read_gene_location) throws FileNotFoundException, IOException { Set_Parameters();/* w ww . j a v a 2 s . co m*/ //System.out.print("Sasa"); ConcurrentHashMap<String, Map<Integer, Integer>> dic_gene_loc_count = new ConcurrentHashMap<>(); ArrayList<String> lines_from_bed_file = new ArrayList<>(); BufferedReader br = new BufferedReader(new FileReader(file_name)); String line = br.readLine(); List<String> toks = Arrays.asList(line.split("\t")); lines_from_bed_file.add(line); String last_Seen_chromosome = toks.get(0).replace("chr", ""); line = br.readLine(); lines_from_bed_file.add(line); toks = Arrays.asList(line.split("\t")); String new_chromosome = toks.get(0).replace("chr", ""); while (((line = br.readLine()) != null) || lines_from_bed_file.size() > 0) { if (line != null) { toks = Arrays.asList(line.split("\t")); new_chromosome = toks.get(0).replace("chr", ""); } // process the line. if (line == null || !new_chromosome.equals(last_Seen_chromosome)) { System.out.println("Processing chromosome" + "\t" + last_Seen_chromosome); last_Seen_chromosome = new_chromosome; lines_from_bed_file.parallelStream().forEach(content -> { List<String> inner_toks = Arrays.asList(content.split("\t")); //WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING WARNINNG WARNING WARNING //STRAND column count should be changed. String strand = inner_toks.get(5); String chromosome_ = inner_toks.get(0).replace("chr", ""); if (!dic_Loc_gene.get(strand).containsKey(chromosome_)) { return; } Integer start_loc = Integer.parseInt(inner_toks.get(1)); Integer end_loc = Integer.parseInt(inner_toks.get(2)); List<Interval<String>> res__ = dic_Loc_gene.get(strand).get(chromosome_).getIntervals(start_loc, end_loc); //IntervalTree<String> pot_gene_name=new IntervalTree<>(res__); // for (int z = 0; z < pot_gene_name.Intervals.Count; z++) //{ for (int z = 0; z < res__.size(); z++) { dic_gene_loc_count.putIfAbsent(res__.get(z).getData(), new HashMap<>()); String gene_symbol = res__.get(z).getData(); Integer temp_gene_start_loc = dic_genes.get(gene_symbol).start_loc; Integer temp_gene_end_loc = dic_genes.get(gene_symbol).end_loc; if (start_loc < temp_gene_start_loc) { start_loc = temp_gene_start_loc; } if (end_loc > temp_gene_end_loc) { end_loc = temp_gene_end_loc; } synchronized (dic_synchrinzer_genes.get(gene_symbol)) { for (int k = start_loc; k <= end_loc; k++) { Integer value_inside = 0; value_inside = dic_gene_loc_count.get(gene_symbol).get(k); dic_gene_loc_count.get(gene_symbol).put(k, value_inside == null ? 1 : (value_inside + 1)); } } } }); /* List<string> keys_ = dic_gene_loc_count.Keys.ToList(); List<string> alt_keys = new List<string>();// dic_gene_loc_count.Keys.ToList(); for (int i = 0; i < keys_.Count; i++) { Dictionary<int, int> dicccc_ = new Dictionary<int, int>(); dic_gene_loc_count[keys_[i]] = new Dictionary<int, int>(dic_gene_loc_count[keys_[i]].Where(x => x.Value >= 2).ToDictionary(x => x.Key, x => x.Value)); if (dic_gene_loc_count[keys_[i]].Count == 0) { dic_gene_loc_count.TryRemove(keys_[i], out dicccc_); continue; } hashdb.Put(Get_BDB(keys_[i]), Get_BDB_Dictionary(dic_gene_loc_count[keys_[i]])); alt_keys.Add(keys_[i]); dic_gene_loc_count.TryRemove(keys_[i], out dicccc_); }*/ ArrayList<String> keys_ = new ArrayList<>(dic_gene_loc_count.keySet()); ArrayList<String> alt_keys = new ArrayList<>(); for (int i = 0; i < keys_.size(); i++) { //LinkedHashMap<Integer, Integer> tmep_map = new LinkedHashMap<>(dic_gene_loc_count.get(keys_.get(i))); LinkedHashMap<Integer, Integer> tmep_map = new LinkedHashMap<>(); /*tmep_map = */ dic_gene_loc_count.get(keys_.get(i)).entrySet().stream().filter(p -> p.getValue() >= 2) .sorted(Comparator.comparing(E -> E.getKey())) .forEach((entry) -> tmep_map.put(entry.getKey(), entry.getValue()));//.collect(Collectors.toMap(p -> p.getKey(), p -> p.getValue())); if (tmep_map.isEmpty()) { dic_gene_loc_count.remove(keys_.get(i)); continue; } //Map<Integer, Integer> tmep_map1 = new LinkedHashMap<>(); //tmep_map1=sortByKey(tmep_map); //tmep_map.entrySet().stream().sorted(Comparator.comparing(E -> E.getKey())).forEach((entry) -> tmep_map1.put(entry.getKey(), entry.getValue())); //BerkeleyDB_Box box=new BerkeleyDB_Box(); hashdb.put(null, BerkeleyDB_Box.Get_BDB(keys_.get(i)), BerkeleyDB_Box.Get_BDB_Dictionary(tmep_map)); alt_keys.add(keys_.get(i)); dic_gene_loc_count.remove(keys_.get(i)); //dic_gene_loc_count.put(keys_.get(i),tmep_map); } hashdb.sync(); int a = 1111; /* hashdb.Sync(); File.AppendAllLines("InputDB\\" + Path.GetFileNameWithoutExtension(file_name) + "_genes.txt", alt_keys); //total_lines_processed_till_now += lines_from_bed_file.Count; //worker.ReportProgress(total_lines_processed_till_now / count_); lines_from_bed_file.Clear(); if (!reader.EndOfStream) { lines_from_bed_file.Add(_line_); } last_Seen_chromosome = new_choromosome;*/ lines_from_bed_file.clear(); if (line != null) { lines_from_bed_file.add(line); } Path p = Paths.get(file_name); file_name = p.getFileName().toString(); BufferedWriter output = new BufferedWriter(new FileWriter((Paths .get(read_gene_location, FilenameUtils.removeExtension(file_name) + ".txt").toString()), true)); for (String alt_key : alt_keys) { output.append(alt_key); output.newLine(); } output.close(); /*if (((line = br.readLine()) != null)) { lines_from_bed_file.add(line); toks=Arrays.asList(line.split("\t")); new_chromosome=toks.get(0).replace("chr", ""); }*/ //last_Seen_chromosome=new_chromosome; } else if (new_chromosome.equals(last_Seen_chromosome)) { lines_from_bed_file.add(line); } } br.close(); hashdb.sync(); hashdb.close(); }