List of usage examples for java.util TreeMap putAll
public void putAll(Map<? extends K, ? extends V> map)
From source file:com.insightml.utils.Collections.java
public static <T, N extends Number> Map<T, N> sort(final Map<T, N> map, final SortOrder order) { final TreeMap<T, N> sorted = new TreeMap<>((o1, o2) -> { final double o1Value = map.get(o1).doubleValue(); final double o2Value = map.get(o2).doubleValue(); if (o1Value == o2Value) { if (o1 instanceof Comparable) { return ((Comparable<T>) o1).compareTo(o2); }/*from w w w. ja v a 2 s . co m*/ // TODO: This can be very unexpected!!! return Integer.compare(o1.hashCode(), o2.hashCode()); } if (order == SortOrder.DESCENDING) { return o1Value < o2Value ? 1 : -1; } return o1Value > o2Value ? 1 : -1; }); sorted.putAll(map); return sorted; }
From source file:net.spfbl.whois.SubnetIPv6.java
private static synchronized TreeMap<String, SubnetIPv6> getMap() { TreeMap<String, SubnetIPv6> map = new TreeMap<String, SubnetIPv6>(); map.putAll(MAP); return map;/*from w ww . j a v a 2 s .c om*/ }
From source file:com.p2p.peercds.common.Torrent.java
/** * Helper method to create a {@link Torrent} object for a set of files. * * <p>/*from ww w . j av a 2 s .c o m*/ * Hash the given files to create the multi-file {@link Torrent} object * representing the Torrent meta-info about them, needed for announcing * and/or sharing these files. Since we created the torrent, we're * considering we'll be a full initial seeder for it. * </p> * * @param parent The parent directory or location of the torrent files, * also used as the torrent's name. * @param files The files to add into this torrent. * @param announce The announce URI that will be used for this torrent. * @param announceList The announce URIs organized as tiers that will * be used for this torrent * @param createdBy The creator's name, or any string identifying the * torrent's creator. */ private static Torrent create(File parent, List<File> files, URI announce, List<List<URI>> announceList, String createdBy) throws InterruptedException, IOException { MessageDigest md = DigestUtils.getSha1Digest(); if (files == null || files.isEmpty()) { logger.info("Creating single-file torrent for {}...", parent.getName()); } else { logger.info("Creating {}-file torrent {}...", files.size(), parent.getName()); } Map<String, BEValue> torrent = new HashMap<String, BEValue>(); if (announce != null) { torrent.put("announce", new BEValue(announce.toString())); } if (announceList != null) { List<BEValue> tiers = new LinkedList<BEValue>(); for (List<URI> trackers : announceList) { List<BEValue> tierInfo = new LinkedList<BEValue>(); for (URI trackerURI : trackers) { tierInfo.add(new BEValue(trackerURI.toString())); } tiers.add(new BEValue(tierInfo)); } torrent.put("announce-list", new BEValue(tiers)); } torrent.put("creation date", new BEValue(new Date().getTime() / 1000)); torrent.put("created by", new BEValue(createdBy)); Map<String, BEValue> info = new HashMap<String, BEValue>(); info.put("name", new BEValue(parent.getName())); info.put("piece length", new BEValue(PIECE_LENGTH)); long size = 0; int numFiles = 0; if (files == null || files.isEmpty()) { info.put("length", new BEValue(parent.length())); size = parent.length(); numFiles++; info.put("pieces", new BEValue(Torrent.hashFile(parent), BYTE_ENCODING)); } else { List<BEValue> fileInfo = new LinkedList<BEValue>(); List<File> updatedFilesList = new ArrayList<File>(); updateFileInfo(files, parent, parent, fileInfo, updatedFilesList); logger.info("Number of files in this multi-file torrent: " + updatedFilesList.size()); for (File file : updatedFilesList) size = size + file.length(); logger.info("Number of bytes in this multi-file torrent: " + size); numFiles = updatedFilesList.size(); info.put("files", new BEValue(fileInfo)); BEValue piecesValue = new BEValue(Torrent.hashFiles(updatedFilesList), BYTE_ENCODING); info.put("pieces", piecesValue); } TreeMap<String, BEValue> sortInfoMap = new TreeMap<String, BEValue>(); sortInfoMap.putAll(info); info = new HashMap<String, BEValue>(); info.putAll(sortInfoMap); sortInfoMap = null; torrent.put("info", new BEValue(info)); md.update(torrent.get("info").getMap().get("pieces").getBytes()); byte[] digest = md.digest(); byte[] transformedDigest = new byte[digest.length]; int i = 0; for (byte bt : digest) { short s = (short) (bt & 0xFF); if (s >= 127) { s = (short) (s - 127); if (s < 32) s = (short) (s + 32); } else if (s < 32) s = (short) (s + 32); transformedDigest[i++] = (byte) s; } for (byte b : transformedDigest) logger.debug(new Byte(b).toString()); logger.info("digest str " + new String(digest)); logger.info("transformed digest str " + new String(transformedDigest)); logger.info("Replacing special characters in the cloud key with regular characters"); String cloudKeyForFile = new String(transformedDigest, "UTF-8"); for (String nsChar : SPECIAL_TO_NSP_MAP.keySet()) { if (cloudKeyForFile.contains(nsChar)) cloudKeyForFile = cloudKeyForFile.replaceAll(java.util.regex.Pattern.quote(nsChar), SPECIAL_TO_NSP_MAP.get(nsChar)); } logger.info("Sanitized cloud Key: " + cloudKeyForFile); CloudUploadProgressListener listener = new CloudUploadProgressListener(size, numFiles); boolean success = false; try { success = CloudHelper.uploadTorrent(BUCKET_NAME, cloudKeyForFile.trim(), parent, listener); } catch (S3FetchException e) { // TODO Auto-generated catch block e.printStackTrace(); } if (!success) logger.info("File won't be uploaded to cloud as file is present in swarm and uploaded to cloud"); else logger.info("New file has been introduced in the swarm and has been uploaded to the cloud"); torrent.put(CLOUD_KEY, new BEValue(cloudKeyForFile)); TreeMap<String, BEValue> sortTorrentMap = new TreeMap<String, BEValue>(); sortTorrentMap.putAll(torrent); torrent = new HashMap<String, BEValue>(); torrent.putAll(sortTorrentMap); sortTorrentMap = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); BEncoder.bencode(new BEValue(torrent), baos); return new Torrent(baos.toByteArray(), true); }
From source file:main.java.workload.WorkloadExecutor.java
public static Transaction streamOneTransaction(Database db, Cluster cluster, Workload wrl, WorkloadBatch wb) { Set<Integer> trTupleSet = null; Set<Integer> trDataSet = null; int min = 0, i = 0, n = 0, tr_id = 0; int type = trDistribution.sample(); Transaction tr = null;// w w w.j ava 2 s . com if (!wb.getTrMap().containsKey(type)) wb.getTrMap().put(type, new TreeMap<Integer, Transaction>()); // new double rand_val = Global.rand.nextDouble(); int toBeRemovedKey = -1; /** * Implementing the new Workload Generation model * (Finalised as per November 20, 2014 and later improved on February 13-14, 2015) */ ++Global.global_trCount; // Transaction birth if (wb.getTrMap().get(type).isEmpty() || rand_val <= Global.percentageChangeInWorkload) { trTupleSet = wrl.getTrTupleSet(db, type); trDataSet = Workload.getTrDataSet(db, cluster, wb, trTupleSet); ++Global.global_trSeq; tr = new Transaction(Global.global_trSeq, type, trDataSet, Sim.time()); // Add the incident transaction id wb.addIncidentTrId(cluster, trDataSet, Global.global_trSeq); // Add the newly created Transaction in the Workload Transaction map wb.getTrMap().get(type).put(tr.getTr_id(), tr); // New improvements------------------------------------------------------------------------------ double initial_period = (double) WorkloadExecutor.uNmax; // initialisation tr.setTr_period(initial_period); perfm.Period.put(tr.getTr_id(), initial_period); Time.put(tr.getTr_id(), Sim.time()); // Transaction repetition and retention of old transaction } else { ArrayList<Integer> idx2_id = new ArrayList<Integer>(); ArrayList<Integer> idx_value = new ArrayList<Integer>(); ArrayList<Integer> uT = new ArrayList<Integer>(); TreeMap<Integer, Integer> idx2 = new TreeMap<Integer, Integer>(new ValueComparator<Integer>(idx)); idx2.putAll(idx); min = Math.min(idx.size(), uNmax); // uNmax or uNmaxT i = 0; Iterator<Entry<Integer, Integer>> itr = idx2.entrySet().iterator(); while (i < min) { idx2_id.add(itr.next().getKey()); ++i; } // Deleting old Transactions if (idx2.size() > min) { toBeRemovedKey = idx2.lastKey(); Transaction tr_old = wb.getTransaction(toBeRemovedKey); tr_old.calculateSpans(cluster); wb.removeTransaction(cluster, tr_old); idx.remove(toBeRemovedKey); } i = 0; while (i < idx2_id.size()) { idx_value.add(idx.get(idx2_id.get(i))); ++i; } i = 0; while (i < idx_value.size()) { uT.add(T.get(idx_value.get(i) - 1)); ++i; } if (uT.size() == 1) n = 0; else n = Global.rand.nextInt(uT.size()); tr_id = uT.get(n); tr = wb.getTransaction(tr_id); tr.setProcessed(false); // New improvements------------------------------------------------------------------------------ double prev_period = perfm.Period.get(tr.getTr_id()); double prev_time = Time.get(tr.getTr_id()); double new_period = Global.expAvgWt * prev_period + (1 - Global.expAvgWt) * (Sim.time() - prev_time); tr.setTr_period(new_period); perfm.Period.remove(tr.getTr_id()); perfm.Period.put(tr.getTr_id(), new_period); Time.remove(tr.getTr_id()); Time.put(tr.getTr_id(), Sim.time()); } // end-if-else() // Calculate latest Span tr.calculateSpans(cluster); // Update Idt tr.calculateIdt(); if (perfm.Span.containsKey(tr.getTr_id())) perfm.Span.remove(tr.getTr_id()); perfm.Span.put(tr.getTr_id(), tr.getTr_serverSpanCost()); // Create an index entry for each newly created Transaction idx.put(tr.getTr_id(), Global.global_trCount); T.add(tr.getTr_id()); // New improvements------------------------------------------------------------------------------ if (Global.global_trCount > Global.observationWindow) { _i = Global.global_trCount; // _i ~ Sim.time() _W = Global.observationWindow; // _W ~ time HashSet<Integer> unq = new HashSet<Integer>(T); for (int _n = (_i - _W); n <= _i; n++) { unq.add(T.get(_n)); } // Captures the number of total unique transaction for this observation window perfm.Unqlen.put((_i - _W), unq.size()); // Calculate the impact of distributed transaction per transaction basis double sum_of_span_by_period = 0.0; sum_of_one_by_period = 0.0; Iterator<Integer> unq_itr = unq.iterator(); while (unq_itr.hasNext()) { int unq_T = unq_itr.next(); int span = perfm.Span.get(unq_T); double period = perfm.Period.get(unq_T); double span_by_period = span / period; // Frequency = 1/Period (f=1/t) per unit time (i.e. 1 second) double one_by_period = 1 / period; // Frequency = 1/Period (f=1/t) per unit time (i.e. 1 second) sum_of_span_by_period += span_by_period; sum_of_one_by_period += one_by_period; } double i_dt = (sum_of_span_by_period) / (Global.servers * sum_of_one_by_period); perfm.I_Dt.put((_i - _W), i_dt); if (Double.isNaN(i_dt)) currentIDt = 0; else currentIDt = i_dt; // Reset repartitioning cooling off period if (WorkloadExecutor.repartitioningCoolingOff && Sim.time() >= WorkloadExecutor.RepartitioningCoolingOffPeriod) { WorkloadExecutor.repartitioningCoolingOff = false; Global.LOGGER.info("-----------------------------------------------------------------------------"); Global.LOGGER.info("Simulation time: " + Sim.time() / (double) Global.observationWindow + " hrs"); Global.LOGGER.info("Repartitioning cooling off period ends."); Global.LOGGER .info("System will now check whether another repartitioning is required at this moment."); Global.LOGGER.info("Current IDt: " + currentIDt); Global.LOGGER.info("User defined IDt threshold: " + Global.userDefinedIDtThreshold); if (currentIDt < Global.userDefinedIDtThreshold) { Global.LOGGER.info("Repartitioning is not required at this moment."); //This is to disable on-demand atomic repartitioning for A-ARHC only if (Global.adaptive) { Global.LOGGER.info("Disabling on-demand atomic repartitioning for A-ARHC ..."); WorkloadExecutor.isAdaptive = false; } Global.LOGGER.info("Continuing transaction processing ..."); } } perfm.time.put((_i - _W), Sim.time()); } // Add a hyperedge to workload hypergraph wb.addHGraphEdge(cluster, tr); // Collect transactional streams if data stream mining is enabled if (Global.streamCollection) Global.dsm.collectStream(cluster, tr); return tr; }
From source file:net.spfbl.whois.SubnetIPv4.java
private static synchronized TreeMap<Long, SubnetIPv4> getMap() { TreeMap<Long, SubnetIPv4> map = new TreeMap<Long, SubnetIPv4>(); map.putAll(MAP); return map;/*from w w w . j a v a2 s . com*/ }
From source file:com.kegare.caveworld.util.CaveConfiguration.java
private void setNewCategoriesMap() { try {/*from ww w . ja v a 2 s.c o m*/ Field field = Configuration.class.getDeclaredField("categories"); field.setAccessible(true); TreeMap<String, ConfigCategory> treeMap = (TreeMap) field.get(this); TreeMap<String, ConfigCategory> newMap = Maps.newTreeMap(this); newMap.putAll(treeMap); field.set(this, newMap); } catch (Throwable e) { } }
From source file:com.alibaba.rocketmq.tools.command.broker.BrokerStatsSubCommand.java
@Override public void execute(CommandLine commandLine, Options options) { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try {/* w w w . j a v a 2s. co m*/ defaultMQAdminExt.start(); String brokerAddr = commandLine.getOptionValue('b').trim(); KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr); // ? TreeMap<String, String> tmp = new TreeMap<String, String>(); tmp.putAll(kvTable.getTable()); Iterator<Entry<String, String>> it = tmp.entrySet().iterator(); while (it.hasNext()) { Entry<String, String> next = it.next(); System.out.printf("%-32s: %s\n", next.getKey(), next.getValue()); } } catch (Exception e) { e.printStackTrace(); } finally { defaultMQAdminExt.shutdown(); } }
From source file:com.alibaba.rocketmq.tools.command.broker.BrokerStatusSubCommand.java
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try {/* w w w . j a v a 2 s .c om*/ defaultMQAdminExt.start(); String brokerAddr = commandLine.getOptionValue('b').trim(); KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr); // ? TreeMap<String, String> tmp = new TreeMap<String, String>(); tmp.putAll(kvTable.getTable()); Iterator<Entry<String, String>> it = tmp.entrySet().iterator(); while (it.hasNext()) { Entry<String, String> next = it.next(); System.out.printf("%-32s: %s\n", next.getKey(), next.getValue()); } } catch (Exception e) { e.printStackTrace(); } finally { defaultMQAdminExt.shutdown(); } }
From source file:org.apache.rocketmq.tools.command.broker.BrokerStatusSubCommand.java
public void printBrokerRuntimeStats(final DefaultMQAdminExt defaultMQAdminExt, final String brokerAddr, final boolean printBroker) throws InterruptedException, MQBrokerException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException { KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr); TreeMap<String, String> tmp = new TreeMap<String, String>(); tmp.putAll(kvTable.getTable()); Iterator<Entry<String, String>> it = tmp.entrySet().iterator(); while (it.hasNext()) { Entry<String, String> next = it.next(); if (printBroker) { System.out.printf("%-24s %-32s: %s%n", brokerAddr, next.getKey(), next.getValue()); } else {/*from ww w .j a v a 2 s .com*/ System.out.printf("%-32s: %s%n", next.getKey(), next.getValue()); } } }
From source file:eu.freme.broker.tools.internationalization.BodySwappingServletRequest.java
@Override public Map<String, String[]> getParameterMap() { TreeMap<String, String[]> map = new TreeMap<String, String[]>(); map.putAll(super.getParameterMap()); map.put("informat", new String[] { "turtle" }); map.remove("input"); if (changeResponse) { map.put("outformat", new String[] { "turtle" }); }/*from ww w.j a va 2s. co m*/ return Collections.unmodifiableMap(map); }