List of usage examples for java.util ArrayList subList
public List<E> subList(int fromIndex, int toIndex)
From source file:org.apache.hadoop.hbase.regionserver.compactions.SortedCompactionPolicy.java
/** * @param candidates pre-filtrate//from w ww . j a v a 2 s .c o m */ protected void removeExcessFiles(ArrayList<StoreFile> candidates, boolean isUserCompaction, boolean isMajorCompaction) { int excess = candidates.size() - comConf.getMaxFilesToCompact(); if (excess > 0) { if (isMajorCompaction && isUserCompaction) { LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() + " files because of a user-requested major compaction"); } else { LOG.debug("Too many admissible files. Excluding " + excess + " files from compaction candidates"); candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear(); } } }
From source file:pathwaynet.PathwayCalculator.java
private <T> ArrayList<HashSet<T>> generatePermutatedGroupsWithFixedNode(T thisComponent, Collection<T> allComponents, int groupSize) { ArrayList<HashSet<T>> componentsInGroupPermutations = new ArrayList<>(); for (int i = 0; i < numPermutations; i++) { HashSet<T> componentsThisPermut = new HashSet<>(); componentsThisPermut.add(thisComponent); ArrayList<T> componentsInPathway = new ArrayList<>(); componentsInPathway.addAll(allComponents); componentsInPathway.remove(thisComponent); Collections.shuffle(componentsInPathway); componentsThisPermut.addAll(componentsInPathway.subList(0, groupSize - 1)); componentsInGroupPermutations.add(componentsThisPermut); }// w ww . j a v a 2 s . c o m return componentsInGroupPermutations; }
From source file:com.redhat.rhn.common.hibernate.HibernateFactory.java
/** * Using a named query, find all the objects matching the criteria within. * Warning: This can be very expensive if the returned list is large. Use * only for small tables with static data * @param qryName Named query to use to find a list of objects. * @param qryParams Map of named bind parameters whose keys are Strings. The * map can also be null./*from w w w . j a v a2s .c o m*/ * @param col the collection to use as an inclause * @param colLabel the label the collection will have * @return List of objects returned by named query, or null if nothing * found. */ protected List listObjectsByNamedQuery(String qryName, Map qryParams, Collection col, String colLabel) { if (col.isEmpty()) { return Collections.EMPTY_LIST; } ArrayList<Long> tmpList = new ArrayList<Long>(); List<Long> toRet = new ArrayList<Long>(); tmpList.addAll(col); for (int i = 0; i < col.size();) { int initial = i; int fin = i + 500 < col.size() ? i + 500 : col.size(); List<Long> sublist = tmpList.subList(i, fin); qryParams.put(colLabel, sublist); toRet.addAll(listObjectsByNamedQuery(qryName, qryParams, false)); i = fin; } return toRet; }
From source file:org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy.java
private ArrayList<StoreFile> getCurrentEligibleFiles(ArrayList<StoreFile> candidateFiles, final List<StoreFile> filesCompacting) { // candidates = all storefiles not already in compaction queue if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently // compacting. this allows us to preserve contiguity (HBASE-2856) StoreFile last = filesCompacting.get(filesCompacting.size() - 1); int idx = candidateFiles.indexOf(last); Preconditions.checkArgument(idx != -1); candidateFiles.subList(0, idx + 1).clear(); }/* w w w. j a v a 2 s . c o m*/ return candidateFiles; }
From source file:io.hops.metadata.ndb.dalimpl.hdfs.EncodingStatusClusterj.java
@Override public Collection<EncodingStatus> findRequestedEncodings(int limit) throws StorageException { Collection<EncodingStatus> normalEncodings = findWithStatus( EncodingStatus.Status.ENCODING_REQUESTED.ordinal(), limit); Collection<EncodingStatus> copyEncodings = findWithStatus( EncodingStatus.Status.COPY_ENCODING_REQUESTED.ordinal(), limit); ArrayList<EncodingStatus> requests = new ArrayList<EncodingStatus>(limit); requests.addAll(normalEncodings);/* www . ja va2s . c om*/ requests.addAll(copyEncodings); Collections.sort(requests, new Comparator<EncodingStatus>() { @Override public int compare(EncodingStatus o1, EncodingStatus o2) { return o1.getStatusModificationTime().compareTo(o2.getStatusModificationTime()); } }); return requests.subList(0, requests.size() < limit ? requests.size() : limit); }
From source file:org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy.java
/** * @param candidates pre-filtrate/* www . jav a2 s .c o m*/ * @return filtered subset * take upto maxFilesToCompact from the start */ private ArrayList<StoreFile> removeExcessFiles(ArrayList<StoreFile> candidates, boolean isUserCompaction, boolean isMajorCompaction) { int excess = candidates.size() - comConf.getMaxFilesToCompact(); if (excess > 0) { if (isMajorCompaction && isUserCompaction) { LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() + " files because of a user-requested major compaction"); } else { LOG.debug("Too many admissible files. Excluding " + excess + " files from compaction candidates"); candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear(); } } return candidates; }
From source file:com.seer.datacruncher.utils.generic.CommonUtils.java
/** * Splits <tt>str</tt> around matches of the delimiter character. * * @param str// w w w . jav a 2 s . co m * the string to split * * @param delimiter * the delimiting field string * * @param chrDelim * the delimiting character * * @return the array of strings computed by splitting string * around matches of the delimiter character. * **/ public static String[] fieldSplit(String str, String delimiter, char chrDelim) { if ((str.indexOf(chrDelim, 0)) < 0) { return StringUtils.splitPreserveAllTokens(str, delimiter); } else { ArrayList<String> list = new ArrayList<String>(); String record; List<String> streamsList = Arrays.asList(StringUtils.splitPreserveAllTokens(str, delimiter)); int fEnd; for (int i = 0; i < streamsList.size(); i++) { record = streamsList.get(i); if ((record.indexOf(chrDelim, 0)) < 0) { list.add(record); } else { if (record.startsWith(chrDelim + "")) { // check in start field fEnd = record.indexOf(chrDelim, 1); // find end if (fEnd < 0) { //not found if ((i + 1) < streamsList.size()) { streamsList.set(i + 1, record + delimiter + streamsList.get(i + 1)); } else { list.add(record); } } else { list.add(record); } } } } int resultSize = list.size(); String[] result = new String[resultSize]; return list.subList(0, resultSize).toArray(result); } }
From source file:com.android.calendar.alerts.AlertService.java
/** * Redistributes events in the priority lists based on the max # of notifications we * can show.//from www . j a v a2 s .c om */ static void redistributeBuckets(ArrayList<NotificationInfo> highPriorityEvents, ArrayList<NotificationInfo> mediumPriorityEvents, ArrayList<NotificationInfo> lowPriorityEvents, int maxNotifications) { // If too many high priority alerts, shift the remaining high priority and all the // medium priority ones to the low priority bucket. Note that order is important // here; these lists are sorted by descending start time. Maintain that ordering // so posted notifications are in the expected order. if (highPriorityEvents.size() > maxNotifications) { // Move mid-priority to the digest. lowPriorityEvents.addAll(0, mediumPriorityEvents); // Move the rest of the high priority ones (latest ones) to the digest. List<NotificationInfo> itemsToMoveSublist = highPriorityEvents.subList(0, highPriorityEvents.size() - maxNotifications); // TODO: What order for high priority in the digest? lowPriorityEvents.addAll(0, itemsToMoveSublist); if (DEBUG) { logEventIdsBumped(mediumPriorityEvents, itemsToMoveSublist); } mediumPriorityEvents.clear(); // Clearing the sublist view removes the items from the highPriorityEvents list. itemsToMoveSublist.clear(); } // Bump the medium priority events if necessary. if (mediumPriorityEvents.size() + highPriorityEvents.size() > maxNotifications) { int spaceRemaining = maxNotifications - highPriorityEvents.size(); // Reached our max, move the rest to the digest. Since these are concurrent // events, we move the ones with the earlier start time first since they are // further in the past and less important. List<NotificationInfo> itemsToMoveSublist = mediumPriorityEvents.subList(spaceRemaining, mediumPriorityEvents.size()); lowPriorityEvents.addAll(0, itemsToMoveSublist); if (DEBUG) { logEventIdsBumped(itemsToMoveSublist, null); } // Clearing the sublist view removes the items from the mediumPriorityEvents list. itemsToMoveSublist.clear(); } }
From source file:org.broadinstitute.sting.utils.MathUtils.java
/** * Returns n random indices drawn without replacement from the range 0..(k-1) * * @param n the total number of indices sampled from * @param k the number of random indices to draw (without replacement) * @return a list of k random indices ranging from 0 to (n-1) without duplicates *///from w w w .j ava 2 s . co m static public ArrayList<Integer> sampleIndicesWithoutReplacement(final int n, final int k) { ArrayList<Integer> chosen_balls = new ArrayList<Integer>(k); for (int i = 0; i < n; i++) { chosen_balls.add(i); } Collections.shuffle(chosen_balls, GenomeAnalysisEngine.getRandomGenerator()); //return (ArrayList<Integer>) chosen_balls.subList(0, k); return new ArrayList<Integer>(chosen_balls.subList(0, k)); }
From source file:edu.jhuapl.dorset.agents.StockAgent.java
protected JsonObject processData(String json, String keyWordCompanyName) { Gson gson = new Gson(); JsonObject returnObj = new JsonObject(); JsonObject jsonObj = gson.fromJson(json, JsonObject.class); if (jsonObj != null) { if ((jsonObj.get("dataset")) != null) { JsonArray jsonDataArray = (JsonArray) (((JsonObject) jsonObj.get("dataset")).get("data")); ArrayList<JsonElement> responseDataArrayList = new ArrayList<>(); ArrayList<JsonElement> responseLabelsArrayList = new ArrayList<>(); for (int i = 0; i < jsonDataArray.size(); i++) { JsonArray jsonDataArrayNested = (JsonArray) (jsonDataArray.get(i)); responseDataArrayList.add(jsonDataArrayNested.get(4)); responseLabelsArrayList.add(jsonDataArrayNested.get(0)); }//from www . jav a 2 s . c o m Collections.reverse(responseDataArrayList); Collections.reverse(responseLabelsArrayList); List<JsonElement> returnDataJsonList = responseDataArrayList .subList(responseDataArrayList.size() - DAYS_IN_A_MONTH, responseDataArrayList.size()); JsonArray returnDataJsonListStr = new JsonArray(); for (int i = 0; i < returnDataJsonList.size(); i++) { returnDataJsonListStr.add(returnDataJsonList.get(i)); } JsonObject jsonData = new JsonObject(); jsonData.add(keyWordCompanyName, returnDataJsonListStr); returnObj.addProperty("data", jsonData.toString()); List<JsonElement> returnLabelsJsonList = responseLabelsArrayList .subList(responseLabelsArrayList.size() - DAYS_IN_A_MONTH, responseLabelsArrayList.size()); returnObj.addProperty("labels", returnLabelsJsonList.toString()); returnObj.addProperty("title", keyWordCompanyName + " Stock Ticker"); returnObj.addProperty("xaxis", "Day"); returnObj.addProperty("yaxis", "Close of day market price ($)"); returnObj.addProperty("plotType", "lineplot"); } } return returnObj; }