List of usage examples for java.util TreeMap size
int size
To view the source code for java.util TreeMap size.
Click Source Link
From source file:org.apache.zeppelin.interpreter.InterpreterResult.java
private Type getType(String msg) { if (msg == null) { return Type.TEXT; }/* w w w . ja v a 2s . c o m*/ Type[] types = type.values(); TreeMap<Integer, Type> typesLastIndexInMsg = buildIndexMap(msg); if (typesLastIndexInMsg.size() == 0) { return Type.TEXT; } else { Map.Entry<Integer, Type> lastType = typesLastIndexInMsg.firstEntry(); return lastType.getValue(); } }
From source file:org.apache.zeppelin.interpreter.InterpreterResult.java
/** * Magic is like %html %text.//from ww w .jav a2 s .c o m * * @param msg * @return */ private String getData(String msg) { if (msg == null) { return null; } Type[] types = type.values(); TreeMap<Integer, Type> typesLastIndexInMsg = buildIndexMap(msg); if (typesLastIndexInMsg.size() == 0) { return msg; } else { Map.Entry<Integer, Type> lastType = typesLastIndexInMsg.firstEntry(); //add 1 for the % char int magicLength = lastType.getValue().name().length() + 1; // 1 for the last \n or space after magic int subStringPos = magicLength + lastType.getKey() + 1; return msg.substring(subStringPos); } }
From source file:org.commoncrawl.util.NodeAffinityMaskBuilder.java
public static String buildNodeAffinityMask(FileSystem fileSystem, Path partFileDirectory, Map<Integer, String> optionalRootMapHint, Set<String> excludedNodeList, int maxReducersPerNode, boolean skipBalance) throws IOException { TreeMap<Integer, String> partitionToNodeMap = new TreeMap<Integer, String>(); FileStatus paths[] = fileSystem.globStatus(new Path(partFileDirectory, "part-*")); if (paths.length == 0) { throw new IOException("Invalid source Path:" + partFileDirectory); }// ww w . jav a 2s . c om Multimap<String, Integer> inverseMap = TreeMultimap.create(); Map<Integer, List<String>> paritionToDesiredCandidateList = new TreeMap<Integer, List<String>>(); // iterate paths for (FileStatus path : paths) { String currentFile = path.getPath().getName(); int partitionNumber; try { if (currentFile.startsWith("part-r")) { partitionNumber = NUMBER_FORMAT.parse(currentFile.substring("part-r-".length())).intValue(); } else { partitionNumber = NUMBER_FORMAT.parse(currentFile.substring("part-".length())).intValue(); } } catch (ParseException e) { throw new IOException("Invalid Part Name Encountered:" + currentFile); } // get block locations BlockLocation locations[] = fileSystem.getFileBlockLocations(path, 0, path.getLen()); // if passed in root map is not null, then validate that all blocks for the current file reside on the desired node if (optionalRootMapHint != null) { // the host all blocks should reside on String desiredHost = optionalRootMapHint.get(partitionNumber); ArrayList<String> misplacedBlocks = new ArrayList<String>(); // ok walk all blocks for (BlockLocation location : locations) { boolean found = false; for (String host : location.getHosts()) { if (host.compareTo(desiredHost) == 0) { found = true; break; } } if (!found) { misplacedBlocks.add("Block At:" + location.getOffset() + " for File:" + path.getPath() + " did not contain desired location:" + desiredHost); } } // ok pass test at a certain threshold if (misplacedBlocks.size() != 0 && ((float) misplacedBlocks.size() / (float) locations.length) > .50f) { LOG.error("Misplaced Blocks Exceed Threshold"); for (String misplacedBlock : misplacedBlocks) { LOG.error(misplacedBlock); } // TODO: SKIP THIS STEP FOR NOW ??? //throw new IOException("Misplaced Blocks Exceed Threshold!"); } partitionToNodeMap.put(partitionNumber, desiredHost); } else { if (excludedNodeList != null) { // LOG.info("Exclued Node List is:" + Lists.newArrayList(excludedNodeList).toString()); } // ok ask file system for block locations TreeMap<String, Integer> nodeToBlockCount = new TreeMap<String, Integer>(); for (BlockLocation location : locations) { for (String host : location.getHosts()) { if (excludedNodeList == null || !excludedNodeList.contains(host)) { Integer nodeHitCount = nodeToBlockCount.get(host); if (nodeHitCount == null) { nodeToBlockCount.put(host, 1); } else { nodeToBlockCount.put(host, nodeHitCount.intValue() + 1); } } } } if (nodeToBlockCount.size() == 0) { throw new IOException("No valid nodes found for partition number:" + path); } Map.Entry<String, Integer> entries[] = nodeToBlockCount.entrySet().toArray(new Map.Entry[0]); Arrays.sort(entries, new Comparator<Map.Entry<String, Integer>>() { @Override public int compare(Entry<String, Integer> o1, Entry<String, Integer> o2) { return o1.getValue().intValue() < o2.getValue().intValue() ? 1 : o1.getValue().intValue() == o2.getValue().intValue() ? 0 : -1; } }); // build a list of nodes by priority ... List<String> nodesByPriority = Lists.transform(Lists.newArrayList(entries), new Function<Map.Entry<String, Integer>, String>() { @Override public String apply(Entry<String, Integer> entry) { return entry.getKey(); } }); // stash it away ... paritionToDesiredCandidateList.put(partitionNumber, nodesByPriority); //LOG.info("Mapping Partition:" + partitionNumber + " To Node:" + entries[0].getKey() + " BlockCount" + entries[0].getValue().intValue()); partitionToNodeMap.put(partitionNumber, entries[0].getKey()); // store the inverse mapping ... inverseMap.put(entries[0].getKey(), partitionNumber); } } if (skipBalance) { // walk partition map to make sure everything is assigned ... /* for (String node : inverseMap.keys()) { if (inverseMap.get(node).size() > maxReducersPerNode) { throw new IOException("Node:" + node + " has too many partitions! ("+inverseMap.get(node).size()); } } */ } // now if optional root map hint is null if (optionalRootMapHint == null && !skipBalance) { // figure out if there is an imbalance int avgRegionsPerNode = (int) Math.floor((float) paths.length / (float) inverseMap.keySet().size()); int maxRegionsPerNode = (int) Math.ceil((float) paths.length / (float) inverseMap.keySet().size()); LOG.info("Attempting to ideally balance nodes. Avg paritions per node:" + avgRegionsPerNode); // two passes .. for (int pass = 0; pass < 2; ++pass) { LOG.info("Pass:" + pass); // iterate nodes ... for (String node : ImmutableSet.copyOf(inverseMap.keySet())) { // get paritions in map Collection<Integer> paritions = ImmutableList.copyOf(inverseMap.get(node)); // if parition count exceeds desired average ... if (paritions.size() > maxRegionsPerNode) { // first pass, assign based on preference if (pass == 0) { LOG.info("Node:" + node + " parition count:" + paritions.size() + " exceeds avg:" + avgRegionsPerNode); // walk partitions trying to find a node to discrard the parition to for (int partition : paritions) { for (String candidate : paritionToDesiredCandidateList.get(partition)) { if (!candidate.equals(node)) { // see if this candidate has room .. if (inverseMap.get(candidate).size() < avgRegionsPerNode) { LOG.info("REASSIGNING parition:" + partition + " from Node:" + node + " to Node:" + candidate); // found match reassign it ... inverseMap.remove(node, partition); inverseMap.put(candidate, partition); break; } } } // break out if reach our desired number of paritions for this node if (inverseMap.get(node).size() == avgRegionsPerNode) break; } } // second pass ... assign based on least loaded node ... else { int desiredRelocations = paritions.size() - maxRegionsPerNode; LOG.info("Desired Relocation for node:" + node + ":" + desiredRelocations + " partitions:" + paritions.size()); for (int i = 0; i < desiredRelocations; ++i) { String leastLoadedNode = null; int leastLoadedNodePartitionCount = 0; for (String candidateNode : inverseMap.keySet()) { if (leastLoadedNode == null || inverseMap.get(candidateNode) .size() < leastLoadedNodePartitionCount) { leastLoadedNode = candidateNode; leastLoadedNodePartitionCount = inverseMap.get(candidateNode).size(); } } int bestPartition = -1; int bestParitionOffset = -1; for (int candidateParition : inverseMap.get(node)) { int offset = 0; for (String nodeCandidate : paritionToDesiredCandidateList .get(candidateParition)) { if (nodeCandidate.equals(leastLoadedNode)) { if (bestPartition == -1 || bestParitionOffset > offset) { bestPartition = candidateParition; bestParitionOffset = offset; } break; } offset++; } } if (bestPartition == -1) { bestPartition = Iterables.get(inverseMap.get(node), 0); } LOG.info("REASSIGNING parition:" + bestPartition + " from Node:" + node + " to Node:" + leastLoadedNode); // found match reassign it ... inverseMap.remove(node, bestPartition); inverseMap.put(leastLoadedNode, bestPartition); } } } } } LOG.info("Rebuilding parition to node map based on ideal balance"); for (String node : inverseMap.keySet()) { LOG.info("Node:" + node + " has:" + inverseMap.get(node).size() + " partitions:" + inverseMap.get(node).toString()); } partitionToNodeMap.clear(); for (Map.Entry<String, Integer> entry : inverseMap.entries()) { partitionToNodeMap.put(entry.getValue(), entry.getKey()); } } StringBuilder builder = new StringBuilder(); int itemCount = 0; for (Map.Entry<Integer, String> entry : partitionToNodeMap.entrySet()) { if (itemCount++ != 0) builder.append("\t"); builder.append(entry.getKey().intValue() + "," + entry.getValue()); } return builder.toString(); }
From source file:org.starfishrespect.myconsumption.server.business.sensors.SensorsDataRetriever.java
/** * Retrieves and stores the data for one user * * @param onlyThisSensorId retrieve only data for one sensor with this id * @return false if something goes wrong; true otherwise *//*from w ww. j ava2 s. c o m*/ public boolean retrieve(List<Sensor> sensors, String onlyThisSensorId) { boolean allSuccessful = true; for (Sensor sensor : sensors) { System.out.println("Retrieve data for sensor " + sensor.getId()); try { valuesRepository.setSensor(sensor.getId()); valuesRepository.init(); if (onlyThisSensorId != null) { if (!sensor.getId().equals(onlyThisSensorId)) { continue; } } HashMap<Integer, HashMap<Integer, Integer>> sortedValues = new HashMap<Integer, HashMap<Integer, Integer>>(); Date lastValue = sensor.getLastValue(); SensorRetriever retriever = null; if (sensor instanceof FluksoSensor) { retriever = new FluksoRetriever((FluksoSensor) sensor); } if (retriever == null) { System.out.println("This sensor type has not been found!"); continue; } TreeMap<Integer, Integer> data = retriever.getDataSince(lastValue).getData(); if (data.size() != 0) { for (int key : data.keySet()) { int hour = key - key % 3600; HashMap<Integer, Integer> hourData = sortedValues.get(hour); if (hourData == null) { hourData = new HashMap<Integer, Integer>(); sortedValues.put(hour, hourData); } hourData.put(key % 3600, data.get(key)); } for (int key : sortedValues.keySet()) { Date dateKey = new Date(key * 1000L); SensorDataset newValue = new SensorDataset(dateKey); newValue.addAllValues(sortedValues.get(key)); valuesRepository.insertOrUpdate(newValue); } if (sensor.getLastValue().before(new Date(data.lastKey() * 1000L))) { sensor.setLastValue(new Date(data.lastKey() * 1000L)); } if (sensor.getFirstValue().after(new Date(data.firstKey() * 1000L)) || sensor.getFirstValue().getTime() == 0) { sensor.setFirstValue(new Date(data.firstKey() * 1000L)); } // sync operation, this avoid to insert a sensor who would have been deleted // while retrieving its data int currentUsageCount = sensorRepository.getUsageCount(sensor.getId()); if (currentUsageCount > -1) { // update, the field may have been incremented during retrieving sensor.setUsageCount(currentUsageCount); sensor.setDead(false); sensorRepository.updateSensor(sensor); } System.out.println("Retrieve successful"); } else { System.out.println("No values retrieved for this sensor"); if (!sensor.isDead()) { // test if sensor is dead ? Calendar cal = new GregorianCalendar(); cal.add(Calendar.HOUR, -6); if (sensor.getLastValue().before(new Date(cal.getTimeInMillis()))) { System.out.println( "Sensor has not sent anything in the last 6 hours! Set its status as dead."); sensor.setDead(true); sensorRepository.updateSensor(sensor); } } else { System.out.println("Sensor is still dead"); } } } catch (RetrieveException | DaoException e) { System.err.println(e.getMessage()); allSuccessful = false; } } return allSuccessful; }
From source file:com.rackspacecloud.blueflood.io.serializers.HistogramSerializationTest.java
private boolean areHistogramsEqual(HistogramRollup first, HistogramRollup second) { final TreeMap<Double, Double> firstBinsAsOrderedMap = getNonZeroBinsAsMap(first); final TreeMap<Double, Double> secondBinsAsOrderedMap = getNonZeroBinsAsMap(second); if (firstBinsAsOrderedMap.size() != secondBinsAsOrderedMap.size()) { return false; }/* ww w.j av a2 s . c o m*/ for (Map.Entry<Double, Double> firstBin : firstBinsAsOrderedMap.entrySet()) { Double val = secondBinsAsOrderedMap.get(firstBin.getKey()); if (val == null || !firstBin.getValue().equals(val)) { return false; } } return true; }
From source file:io.openmessaging.rocketmq.consumer.LocalMessageCache.java
private void cleanExpireMsg() { for (final Map.Entry<MessageQueue, ProcessQueue> next : rocketmqPullConsumer.getDefaultMQPullConsumerImpl() .getRebalanceImpl().getProcessQueueTable().entrySet()) { ProcessQueue pq = next.getValue(); MessageQueue mq = next.getKey(); ReadWriteLock lockTreeMap = getLockInProcessQueue(pq); if (lockTreeMap == null) { log.error("Gets tree map lock in process queue error, may be has compatibility issue"); return; }//from w w w.ja v a 2s.c o m TreeMap<Long, MessageExt> msgTreeMap = pq.getMsgTreeMap(); int loop = msgTreeMap.size(); for (int i = 0; i < loop; i++) { MessageExt msg = null; try { lockTreeMap.readLock().lockInterruptibly(); try { if (!msgTreeMap.isEmpty()) { msg = msgTreeMap.firstEntry().getValue(); if (System.currentTimeMillis() - Long.parseLong(MessageAccessor.getConsumeStartTimeStamp(msg)) > clientConfig .getRmqMessageConsumeTimeout() * 60 * 1000) { //Expired, ack and remove it. } else { break; } } else { break; } } finally { lockTreeMap.readLock().unlock(); } } catch (InterruptedException e) { log.error("Gets expired message exception", e); } try { rocketmqPullConsumer.sendMessageBack(msg, 3); log.info("Send expired msg back. topic={}, msgId={}, storeHost={}, queueId={}, queueOffset={}", msg.getTopic(), msg.getMsgId(), msg.getStoreHost(), msg.getQueueId(), msg.getQueueOffset()); ack(mq, pq, msg); } catch (Exception e) { log.error("Send back expired msg exception", e); } } } }
From source file:org.apache.hadoop.hbase.TestRegionServerExit.java
private Thread startVerificationThread(final String tableName, final Text row) { Runnable runnable = new Runnable() { public void run() { HScannerInterface scanner = null; try { // Verify that the client can find the data after the region has moved // to a different server scanner = table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text()); LOG.info("Obtained scanner " + scanner); HStoreKey key = new HStoreKey(); TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>(); while (scanner.next(key, results)) { assertTrue(key.getRow().equals(row)); assertEquals(1, results.size()); byte[] bytes = results.get(HConstants.COLUMN_FAMILY); assertNotNull(bytes); assertTrue(tableName.equals(new String(bytes, HConstants.UTF8_ENCODING))); }/*w ww. j a v a 2 s . c om*/ LOG.info("Success!"); } catch (IOException e) { e.printStackTrace(); } finally { if (scanner != null) { LOG.info("Closing scanner " + scanner); try { scanner.close(); } catch (IOException e) { e.printStackTrace(); } } } } }; return new Thread(runnable); }
From source file:com.itude.mobile.mobbl.server.http.HttpDelegate.java
private synchronized Header[] transformToHeader(TreeMap<String, String[]> headers) { if (headers == null) return null; Header[] result = new Header[headers.size()]; Iterator<Entry<String, String[]>> iterator = headers.entrySet().iterator(); int i = 0;//ww w .j av a 2 s. co m while (iterator.hasNext()) { Entry<String, String[]> entry = iterator.next(); for (String s : entry.getValue()) result[i] = new Header(entry.getKey(), s); i++; } return result; }
From source file:org.apache.ambari.server.api.services.serializers.CsvSerializerTest.java
@Test public void testSerializeResources_NoColumnInfo() throws Exception { Result result = new ResultImpl(true); result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK)); TreeNode<Resource> tree = result.getResultTree(); List<TreeMap<String, Object>> data = new ArrayList<TreeMap<String, Object>>() { {/* w w w .ja v a 2 s. c o m*/ add(new TreeMap<String, Object>() { { put("property1", "value1a"); put("property2", "value2a"); put("property3", "value3a"); put("property4", "value4a"); } }); add(new TreeMap<String, Object>() { { put("property1", "value1'b"); put("property2", "value2'b"); put("property3", "value3'b"); put("property4", "value4'b"); } }); add(new TreeMap<String, Object>() { { put("property1", "value1,c"); put("property2", "value2,c"); put("property3", "value3,c"); put("property4", "value4,c"); } }); } }; tree.setName("items"); tree.setProperty("isCollection", "true"); addChildResource(tree, "resource", 0, data.get(0)); addChildResource(tree, "resource", 1, data.get(1)); addChildResource(tree, "resource", 2, data.get(2)); replayAll(); //execute test Object o = new CsvSerializer().serialize(result).toString().replace("\r", ""); verifyAll(); assertNotNull(o); StringReader reader = new StringReader(o.toString()); CSVParser csvParser = new CSVParser(reader, CSVFormat.DEFAULT); List<CSVRecord> records = csvParser.getRecords(); assertNotNull(records); assertEquals(3, records.size()); int i = 0; for (CSVRecord record : records) { TreeMap<String, Object> actualData = data.get(i++); assertEquals(actualData.size(), record.size()); for (String item : record) { assertTrue(actualData.containsValue(item)); } } csvParser.close(); }
From source file:sadl.models.pdta.PDTA.java
@Override public int getTransitionCount() { int result = 0; for (final PDTAState state : states.valueCollection()) { for (final TreeMap<Double, PDTATransition> transitions : state.getTransitions().values()) { result += transitions.size(); }//from w ww .j av a 2s . c o m } return result; }