List of usage examples for java.util TreeMap get
public V get(Object key)
From source file:org.ecocean.MarkedIndividual.java
public String getDynamicPropertyValue(String name) { if (dynamicProperties != null) { name = name.replaceAll("%20", " "); //let's create a TreeMap of the properties TreeMap<String, String> tm = new TreeMap<String, String>(); StringTokenizer st = new StringTokenizer(dynamicProperties, ";"); while (st.hasMoreTokens()) { String token = st.nextToken(); int equalPlace = token.indexOf("="); try { tm.put(token.substring(0, equalPlace), token.substring(equalPlace + 1)); } catch (IndexOutOfBoundsException ioob) { }//from w w w . j a va 2 s . c o m } if (tm.containsKey(name)) { return tm.get(name); } } return null; }
From source file:com.mfizz.observer.core.ServiceObserver.java
private void doSnapshotAll(SnapshotAllResult result) throws Exception { //// ww w . j a v a 2 s . c o m // create list of snapshots that will be executed // ArrayList<SnapshotTask> snapshotTasks = new ArrayList<SnapshotTask>(); for (Observer<D> observer : observers.values()) { snapshotTasks.add(new SnapshotTask(observer, result.beginTimestamp)); } result.snapshotsAttempted = snapshotTasks.size(); // this will run all the update tasks and wait for them all to finish executor.invokeAll(snapshotTasks); // create an aggregate for each group TreeMap<String, ObserveAggregateSnapshot<A>> aggs = new TreeMap<String, ObserveAggregateSnapshot<A>>(); // process deltas from each observer for (Observer<D> observer : observers.values()) { // determine if last snapshot completed or failed if (observer.getConsecutiveSnapshotCompletedCount() > 0) { result.snapshotsCompleted++; } else { result.snapshotsFailed++; } // was this the first snapshot attempt for this observer? long snapshotAttempts = observer.getSnapshotAttemptCounter(); // each group will aggregate the same delta snapshot from each observer ObserveDeltaSnapshot<D> ods = observer.getDeltaSnapshot(); if (ods == null) { //logger.debug("delta snapshot for observer {} was null", observer.getName()); SnapshotException e = observer.getException(); if (e == null) { if (snapshotAttempts <= 1) { // first runs we don't expect any deltas } else { logger.error( "observer [{}] for service [{}] had null delta AND exception values (previous snapshot maybe failed?)", observer.getName(), getServiceName()); } } else { // this is now logged in SnapshotTask below //logger.warn("exception during snapshot for observer " + observer.getName(), e); } } else { // period should be the same across all deltas TimePeriod period = ods.getPeriod(); // TODO: verify periods match each other as safety check? // create or get aggregate for each group this observer belongs to for (String group : observer.configuration.getGroups()) { ObserveAggregateSnapshot<A> oas = aggs.get(group); if (oas == null) { oas = new ObserveAggregateSnapshot<A>(period, aggregateClass.newInstance()); aggs.put(group, oas); } oas.add(observer.getName(), ods.getData()); } } } if (snapshotAllAttemptedCounter.get() > 1 && aggs.isEmpty()) { logger.warn("snapshotAll() for service [{}] generated no aggregated snapshots!", this.getServiceName()); } // at this point, the new snapshots from each observer have generated // new aggregates for this point-in-time -- add this to our rolling time series for (String group : aggs.keySet()) { // last aggregate snapshot ObserveAggregateSnapshot<A> oas = aggs.get(group); // get or create new series of aggregate snapshots for each group TimeSeries<ObserveAggregateSnapshot<A>> aggseries = snapshots.get(group); if (aggseries == null) { // figure out capacity of time series (retentionTime / step + fudgeFactor) long retentionMillis = getRetentionMillis(); int initialCapacity = (int) (retentionMillis / this.serviceConfig.getStepMillis()) + 2; logger.info( "Creating new TimeSeries for service [{}] group [{}] with retentionMillis=" + retentionMillis + "; initialCapacity=" + initialCapacity, getServiceName(), group); aggseries = new TimeSeries<ObserveAggregateSnapshot<A>>(retentionMillis, initialCapacity); snapshots.put(group, aggseries); } // add aggregate snapshot to the time series for each group // this will also prune old snapshots that are older than the retention period // the timestamp of the aggregate becomes the relative "now" timestamp for calculating retentions // this is how we'll always at least keep "current" times aggseries.add(oas, oas.getTimestamp()); // create an updated summary for each interval for this group SummaryGroupFactory<S, A> sfg = new SummaryGroupFactory<S, A>(oas.getTimestamp(), this.summaryClass, this.serviceConfig.getPeriods()); sfg.beginAll(); Iterator<ObserveAggregateSnapshot<A>> it = aggseries.getSeries().iterator(); while (it.hasNext()) { ObserveAggregateSnapshot<A> tempoas = it.next(); sfg.summarize(tempoas.getPeriod(), tempoas.getAggregate()); } sfg.completeAll(); SummaryGroup<S> sg = sfg.createSummaryGroup(); summary.put(group, sg); } }
From source file:com.sfs.whichdoctor.dao.IsbEntityDAOImpl.java
/** * Check mandatory group membership./*from w w w . j a v a2 s . c o m*/ * * @param entityGUID the entity guid * @param delete the entity reference if present (otherwise create if not) * @param privileges the privileges */ private void checkMandatoryGroups(final int entityGUID, final boolean delete, final PrivilegesBean privileges) { if (this.mandatoryGroups != null) { UserBean systemUser = getSystemUser("ISB", "System"); dataLogger.debug("Mandatory groups exist"); for (Integer groupGUID : this.mandatoryGroups.keySet()) { TreeMap<String, ItemBean> items = new TreeMap<String, ItemBean>(); String isbMapping = ""; try { final GroupBean group = this.groupDAO.loadGUID(groupGUID); isbMapping = group.getGroupDN(); } catch (WhichDoctorDaoException wde) { dataLogger.error("Error loading the parent group: " + wde.getMessage()); } try { items = this.itemDAO.load(groupGUID, false, "Group", "Member", entityGUID, null, null); } catch (WhichDoctorDaoException wde) { dataLogger.error("Error performing search for items: " + wde.getMessage()); } if (items != null && items.size() == 0 && !delete) { dataLogger.debug("Items do not exist and create selected"); // No items exist and create is requested. final ItemBean item = new ItemBean(); item.setObject1GUID(groupGUID); item.setObject2GUID(entityGUID); item.setWeighting(WEIGHTING); item.setPermission(PERMISSION); item.setItemType(ITEMTYPE); try { this.itemDAO.create(item, systemUser, privileges, isbMapping); } catch (WhichDoctorDaoException wde) { dataLogger.error("Error creating the new item: " + wde.getMessage()); } } if (items != null && items.size() > 0 && delete) { // Items exist and delete is requested. dataLogger.debug("Items exist and delete selected"); for (String key : items.keySet()) { final ItemBean item = items.get(key); try { this.itemDAO.delete(item, systemUser, privileges, isbMapping); } catch (WhichDoctorDaoException wde) { dataLogger.error("Error deleting the existing item: " + wde.getMessage()); } } } } } }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
private static void validateListCode(final File dataDirectory, long listId) throws IOException { final String urlList[] = new String[] { "http://www.yahoo.com/1", "http://www.google.com/1", "http://www.cnn.com/1", "http://www.yahoo.com/2", "http://www.google.com/2", "http://www.cnn.com/2" }; File tempFile = File.createTempFile("CrawlList", "validateListInit"); File localTempFile = new File(dataDirectory, tempFile.getName()); generateTestURLFile(localTempFile, urlList); final TreeMap<String, URLFP> urlToFPMap = new TreeMap<String, URLFP>(); final TreeMap<URLFP, String> urlFPToString = new TreeMap<URLFP, String>(); for (String url : urlList) { URLFP fp = URLUtils.getURLFPFromURL(url, true); urlToFPMap.put(url, fp);//from w ww .ja v a 2s . c o m urlFPToString.put(fp, url); } final TreeMap<URLFP, ProxyCrawlHistoryItem> itemsToMarkComplete = new TreeMap<URLFP, ProxyCrawlHistoryItem>(); ProxyCrawlHistoryItem item1 = new ProxyCrawlHistoryItem(); item1.setCrawlStatus(CrawlURL.FailureReason.RobotsExcluded); item1.setOriginalURL(urlList[1]); ProxyCrawlHistoryItem item2 = new ProxyCrawlHistoryItem(); item2.setCrawlStatus(0); item2.setOriginalURL(urlList[3]); item2.setHttpResultCode(301); item2.setRedirectURL("http://www.yahoo.com/3"); item2.setRedirectStatus(0); item2.setRedirectHttpResult(200); ProxyCrawlHistoryItem item3 = new ProxyCrawlHistoryItem(); item3.setCrawlStatus(0); item3.setOriginalURL(urlList[4]); item3.setHttpResultCode(301); item3.setRedirectURL("http://www.google.com/3"); item3.setRedirectStatus(CrawlURL.FailureReason.IOException); itemsToMarkComplete.put(urlToFPMap.get(item1.getOriginalURL()), item1); itemsToMarkComplete.put(urlToFPMap.get(item2.getOriginalURL()), item2); itemsToMarkComplete.put(urlToFPMap.get(item3.getOriginalURL()), item3); final Set<URLFP> itemsToMarkCompleteFPSet = itemsToMarkComplete.keySet(); final Set<URLFP> itemsNotMarked = new TreeSet<URLFP>(urlToFPMap.values()); itemsNotMarked.removeAll(itemsToMarkCompleteFPSet); CrawlHistoryStorage storage = new CrawlHistoryStorage() { @Override public void syncList(long listId, TreeSet<URLFP> matchCriteria, ItemUpdater targetList) throws IOException { for (URLFP matchItem : matchCriteria) { if (itemsToMarkCompleteFPSet.contains(matchItem)) { targetList.updateItemState(matchItem, itemsToMarkComplete.get(matchItem)); } } } @Override public File getLocalDataDir() { return dataDirectory; } }; CrawlList list1 = new CrawlList(storage, listId, localTempFile, 0); for (int pass = 0; pass < 2; ++pass) { CrawlList list = null; if (pass == 0) { System.out.println("Pass 0 - Initialize from URLList"); list = list1; } else { System.out.println("Pass 1 - Initialize from OnDisk Data"); list = new CrawlList(storage, listId); } // iterate fingerprints for (URLFP fingerprint : urlToFPMap.values()) { ProxyCrawlHistoryItem itemRetrieved = list.getHistoryItemFromURLFP(fingerprint); if (itemsToMarkCompleteFPSet.contains(fingerprint)) { ProxyCrawlHistoryItem itemExpected = itemsToMarkComplete.get(fingerprint); Assert.assertTrue(itemExpected.equals(itemRetrieved)); } else { Assert.assertTrue(itemRetrieved.getOriginalURL().equals(urlFPToString.get(fingerprint)) && !itemRetrieved.isFieldDirty(ProxyCrawlHistoryItem.Field_CRAWLSTATUS) && !itemRetrieved.isFieldDirty(ProxyCrawlHistoryItem.Field_HTTPRESULTCODE) && !itemRetrieved.isFieldDirty(ProxyCrawlHistoryItem.Field_REDIRECTHTTPRESULT) && !itemRetrieved.isFieldDirty(ProxyCrawlHistoryItem.Field_REDIRECTSTATUS) && !itemRetrieved.isFieldDirty(ProxyCrawlHistoryItem.Field_REDIRECTURL)); } } } // validate string code does not update when strings have not changed item3.setRedirectStatus(0); item3.setRedirectHttpResult(200); long variableDataLength = list1._variableDataFile.length(); long fixedDataLength = list1._fixedDataFile.length(); list1.updateItemState(urlToFPMap.get(item3.getOriginalURL()), item3); Assert.assertTrue(fixedDataLength == list1._fixedDataFile.length()); Assert.assertTrue(variableDataLength == list1._variableDataFile.length()); list1.queueUnCrawledItems(new CrawlQueueLoader() { @Override public void queueURL(URLFP urlfp, String url) { Assert.assertTrue(itemsNotMarked.contains(urlfp)); Assert.assertTrue(urlFPToString.get(urlfp).equals(url)); } @Override public void flush() { // TODO Auto-generated method stub } }); }
From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java
private void createTable(String tableName, String dirPath) throws Exception { Path hfofDir = new Path(dirPath); FileSystem fs = hfofDir.getFileSystem(getConf()); if (!fs.exists(hfofDir)) { throw new FileNotFoundException("HFileOutputFormat dir " + hfofDir + " not found"); }/*from w w w . ja va 2 s . com*/ FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); if (familyDirStatuses == null) { throw new FileNotFoundException("No families found in " + hfofDir); } HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor hcd = null; // Add column families // Build a set of keys byte[][] keys = null; TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); for (FileStatus stat : familyDirStatuses) { if (!stat.isDir()) { LOG.warn("Skipping non-directory " + stat.getPath()); continue; } Path familyDir = stat.getPath(); // Skip _logs & .index etc if (familyDir.getName().startsWith("_")) continue; if (familyDir.getName().startsWith(IndexMapReduceUtil.INDEX_DATA_DIR)) { LOG.warn("Ignoring all the HFile specific to " + tableName + " indexed data."); continue; } byte[] family = Bytes.toBytes(familyDir.getName()); hcd = new HColumnDescriptor(family); htd.addFamily(hcd); Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); for (Path hfile : hfiles) { if (hfile.getName().startsWith("_")) continue; HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(getConf())); final byte[] first, last; try { if (hcd.getCompressionType() != reader.getCompressionAlgorithm()) { hcd.setCompressionType(reader.getCompressionAlgorithm()); LOG.info("Setting compression " + hcd.getCompressionType().name() + " for family " + hcd.toString()); } reader.loadFileInfo(); first = reader.getFirstRowKey(); last = reader.getLastRowKey(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; map.put(first, value + 1); value = map.containsKey(last) ? (Integer) map.get(last) : 0; map.put(last, value - 1); } finally { reader.close(); } } } keys = IndexLoadIncrementalHFile.inferBoundaries(map); this.hbAdmin.createTable(htd, keys); LOG.info("Table " + tableName + " is available!!"); }
From source file:com.projity.server.data.Serializer.java
public ProjectData serializeProject(Project project, Collection flatAssignments, Collection flatLinks, boolean incremental, SerializeOptions options) throws Exception { if (TMP_FILES) initTmpDir();//from www . ja v a 2s .c om if (project.isForceNonIncremental()) incremental = false; boolean incrementalDistributions = incremental && !project.isForceNonIncrementalDistributions(); // calendars.clear(); Count projectCount = new Count("Project"); //if (globalIdsOnly) makeGLobal(project); ProjectData projectData = (ProjectData) serialize(project, ProjectData.FACTORY, projectCount); if (project.isForceNonIncremental()) projectData.setVersion(0); projectData.setMaster(project.isMaster()); // projectData.setExternalId(project.getExternalId()); //exposed attributes // projectData.setAttributes(SpreadSheetFieldArray.convertFields(project, "projectExposed", new Transformer(){ // public Object transform(Object value) { // if (value instanceof Money) return ((Money)value).doubleValue(); // return null; // } // })); projectCount.dump(); //resources Map resourceMap = saveResources(project, projectData); //tasks saveTasks(project, projectData, resourceMap, flatAssignments, flatLinks, incremental, options); //distribution long t = System.currentTimeMillis(); Collection<DistributionData> dist = (Collection<DistributionData>) (new DistributionConverter()) .createDistributionData(project, incrementalDistributions); if (dist == null) { dist = new ArrayList<DistributionData>(); } projectData.setDistributions(dist); projectData.setIncrementalDistributions(incrementalDistributions); TreeMap<DistributionData, DistributionData> distMap = project.getDistributionMap(); if (distMap == null) { distMap = new TreeMap<DistributionData, DistributionData>(new DistributionComparator()); project.setDistributionMap(distMap); } TreeMap<DistributionData, DistributionData> newDistMap = new TreeMap<DistributionData, DistributionData>( new DistributionComparator()); //ArrayList<DistributionData> toInsertInOld=new ArrayList<DistributionData>(); //insert, update dist for (Iterator<DistributionData> i = dist.iterator(); i.hasNext();) { DistributionData d = i.next(); if (incrementalDistributions) { DistributionData oldD = distMap.get(d); if (oldD == null) { d.setStatus(DistributionData.INSERT); } else { if (oldD.getWork() == d.getWork() && oldD.getCost() == d.getCost()) { //System.out.println(d+" did not change"); d.setStatus(0); i.remove(); } else d.setStatus(DistributionData.UPDATE); } } else { d.setStatus(DistributionData.INSERT); } newDistMap.put(d, d); } //remove dist if (incrementalDistributions && distMap.size() > 0) { Set<Long> noChangeTaskIds = new HashSet<Long>(); Task task; for (Iterator i = project.getTaskOutlineIterator(); i.hasNext();) { task = (Task) i.next(); if (incremental && !task.isDirty()) noChangeTaskIds.add(task.getUniqueId()); } // for (Iterator i=projectData.getTasks().iterator();i.hasNext();){ // TaskData task=(TaskData)i.next(); // if (!task.isDirty()) noChangeTaskIds.add(task.getUniqueId()); // } for (Iterator<DistributionData> i = distMap.values().iterator(); i.hasNext();) { DistributionData d = i.next(); if (newDistMap.containsKey(d)) continue; if (noChangeTaskIds.contains(d.getTaskId())) { d.setStatus(0); newDistMap.put(d, d); } else { d.setStatus(DistributionData.REMOVE); dist.add(d); } } } project.setNewDistributionMap(newDistMap); System.out.println("Distributions generated in " + (System.currentTimeMillis() - t) + " ms"); // send project field values to server too HashMap fieldValues = FieldValues.getValues(FieldDictionary.getInstance().getProjectFields(), project); if (project.getContainingSubprojectTask() != null) { // special case in which we want to use the duration from subproject task Object durationFieldValue = Configuration.getFieldFromId("Field.duration") .getValue(project.getContainingSubprojectTask(), null); fieldValues.put("Field.duration", durationFieldValue); } projectData.setFieldValues(fieldValues); projectData.setGroup(project.getGroup()); projectData.setDivision(project.getDivision()); projectData.setExpenseType(project.getExpenseType()); projectData.setProjectType(project.getProjectType()); projectData.setProjectStatus(project.getProjectStatus()); projectData.setExtraFields(project.getExtraFields()); projectData.setAccessControlPolicy(project.getAccessControlPolicy()); projectData.setCreationDate(project.getCreationDate()); projectData.setLastModificationDate(project.getLastModificationDate()); // System.out.println("done serialize project " + project); // Collection<DistributionData> dis=(Collection<DistributionData>)projectData.getDistributions(); // for (DistributionData d: dis) System.out.println("Dist: "+d.getTimeId()+", "+d.getType()+", "+d.getStatus()); // project.setNewTaskIds(null); // if (projectData.getTasks()!=null){ // Set<Long> ids=new HashSet<Long>(); // project.setNewTaskIds(ids); // for (TaskData task:(Collection<TaskData>)projectData.getTasks()){ // ids.add(task.getUniqueId()); // } // } // long[] unchangedTasks=projectData.getUnchangedTasks(); // if (unchangedTasks!=null){ // Set<Long> ids=project.getNewTaskIds(); // if (ids==null){ // ids=new HashSet<Long>(); // project.setNewTaskIds(ids); // } // for (int i=0;i<unchangedTasks.length;i++) ids.add(unchangedTasks[i]); // } // // project.setNewLinkIds(null); // if (flatLinks!=null){ // Set<DependencyKey> ids=new HashSet<DependencyKey>(); // project.setNewLinkIds(ids); // for (LinkData link:(Collection<LinkData>)flatLinks){ // ids.add(new DependencyKey(link.getPredecessorId(),link.getSuccessorId()/*,link.getExternalId()*/)); // } // } // long[] unchangedLinks=projectData.getUnchangedLinks(); // if (unchangedLinks!=null){ // Set<DependencyKey> ids=project.getNewLinkIds(); // if (ids==null){ // ids=new HashSet<DependencyKey>(); // project.setNewLinkIds(ids); // } // for (int i=0;i<unchangedLinks.length;i+=2) ids.add(new DependencyKey(unchangedLinks[i],unchangedLinks[i+1])); // } //project.setNewIds(); //claur - useful ? return projectData; }
From source file:com.att.aro.core.packetanalysis.impl.VideoUsageAnalysisImpl.java
private void updateSegments() { log.info("updateSegments()"); TreeMap<String, Integer> segmentList; Integer segment;/* w w w .ja v a2 s . com*/ if (videoUsage != null) { for (AROManifest manifest : videoUsage.getManifests()) { if (manifest.getDuration() > 0 || !manifest.getSegmentEventList().isEmpty()) { segmentList = manifest.getSegmentList(); if (segmentList != null && !segmentList.isEmpty()) { for (VideoEvent videoEvent : manifest.getVideoEventList().values()) { // key = generateVideoEventKey(segment, timestamp, videoEvent.getQuality()); if (videoEvent.getSegment() < 0) { String key = ""; if (videoEvent.getVed().getDateTime() != null) { key = String.format("%s.%s", videoEvent.getVed().getDateTime(), videoEvent.getVed().getExtension()); } else if (videoEvent.getVed().getSegmentReference() != null) { key = videoEvent.getVed().getSegmentReference(); } segment = segmentList.get(key); if (segment != null) { videoEvent.setSegment(segment); } } if (videoEvent.getDuration() <= 0) { videoEvent.setDuration(manifest.getDuration()); } } } } } } }
From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java
private void createTable(TableName tableName, String dirPath) throws Exception { Path hfofDir = new Path(dirPath); FileSystem fs = hfofDir.getFileSystem(getConf()); if (!fs.exists(hfofDir)) { throw new FileNotFoundException("HFileOutputFormat dir " + hfofDir + " not found"); }/*from ww w. j a v a2s .com*/ FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); if (familyDirStatuses == null) { throw new FileNotFoundException("No families found in " + hfofDir); } HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor hcd; // Add column families // Build a set of keys byte[][] keys; TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); for (FileStatus stat : familyDirStatuses) { if (!stat.isDirectory()) { LOG.warn("Skipping non-directory " + stat.getPath()); continue; } Path familyDir = stat.getPath(); // Skip _logs, etc if (familyDir.getName().startsWith("_")) continue; byte[] family = familyDir.getName().getBytes(); hcd = new HColumnDescriptor(family); htd.addFamily(hcd); Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); for (Path hfile : hfiles) { String fileName = hfile.getName(); if (fileName.startsWith("_") || StoreFileInfo.isReference(fileName) || HFileLink.isHFileLink(fileName)) continue; HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(getConf()), getConf()); final byte[] first, last; try { if (hcd.getCompressionType() != reader.getFileContext().getCompression()) { hcd.setCompressionType(reader.getFileContext().getCompression()); LOG.info("Setting compression " + hcd.getCompressionType().name() + " for family " + hcd.toString()); } reader.loadFileInfo(); first = reader.getFirstRowKey(); last = reader.getLastRowKey(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first) ? map.get(first) : 0; map.put(first, value + 1); value = map.containsKey(last) ? map.get(last) : 0; map.put(last, value - 1); } finally { reader.close(); } } } keys = LoadIncrementalHFiles.inferBoundaries(map); this.hbAdmin.createTable(htd, keys); LOG.info("Table " + tableName + " is available!!"); }
From source file:org.cloudfoundry.client.lib.rest.CloudControllerClientImpl.java
@Override public Map<String, String> getCrashLogs(String appName) { String urlPath = getFileUrlPath(); CrashesInfo crashes = getCrashes(appName); if (crashes.getCrashes().isEmpty()) { return Collections.emptyMap(); }// w w w . j a v a 2 s . com TreeMap<Date, String> crashInstances = new TreeMap<Date, String>(); for (CrashInfo crash : crashes.getCrashes()) { crashInstances.put(crash.getSince(), crash.getInstance()); } String instance = crashInstances.get(crashInstances.lastKey()); return doGetLogs(urlPath, appName, instance); }