List of usage examples for java.util ArrayList remove
public boolean remove(Object o)
From source file:circdesigna.DesignSequenceConstraints.java
/** * Returns true if an old entry was removed to add this constraint. *///from www . ja va2s .co m private boolean addConstraint(ArrayList<Constraint> toSet, int maxVal, int[] base) { if (base.length == 0) { throw new RuntimeException("Invalid constraint, no bases"); } Constraint made = new Constraint(); for (int i = 0; i < base.length; i++) { base[i] = Std.monomer.noFlags(base[i]); made.regulates[base[i]] = true; } made.constraintValue = maxVal; boolean removed = toSet.remove(made); toSet.add(made); //Check consistency. If constraints have no solution, an exception is thrown. solveSimplex(); return removed; }
From source file:ch.elexis.data.Kontakt.java
@SuppressWarnings("unchecked") private void statForItem(String typ, String storeToString) { @SuppressWarnings("rawtypes") Map exi = getMap(FLD_EXTINFO); // Die Rangliste fr diesen Objekttyp auslesen bzw. neu anlegen. ArrayList<statL> l = (ArrayList<statL>) exi.get(typ); if (l == null) { l = new ArrayList<statL>(); }// w ww .ja v a 2 s .co m // Grsse der Rangliste limitieren. ggf. least frequently used entfernen while (l.size() > 40) { l.remove(l.size() - 1); } // Sehen, ob das bergebene Objekt schon in der Liste enthalten ist boolean found = false; for (statL c : l) { if (c.v.equals(storeToString)) { c.c++; // Gefunden, dann Zhler erhhen found = true; break; } } if (found == false) { l.add(new statL(storeToString)); // Nicht gefunden, dann neu eintragen } Collections.sort(l); // Liste sortieren exi.put(typ, l); setMap(FLD_EXTINFO, exi); }
From source file:com.chinamobile.bcbsp.comm.MessageQueuesNew.java
@Override public void incomeAMessage(int srcPartitionDstBucket, WritableBSPMessages msg, int superstep) { ArrayList<IMessage> msgList = ((BSPMessagesPack) msg).getPack(); int count = msgList.size(); // if(MetaDataOfMessage.RMBLength[srcPartitionDstBucket]< // MetaDataOfMessage.MESSAGE_RECEIVED_BUFFER_THRESHOLD) // if(MetaDataOfMessage.RMBLength[srcPartitionDstBucket]< 3000) // LOG.info("TAGTAG1"); int remain = MetaDataOfMessage.MESSAGE_RECEIVED_BUFFER_THRESHOLD - MetaDataOfMessage.RMBLength[srcPartitionDstBucket]; int counter = 0; while (count-- > 0 && remain-- > 0) { counter++;/* ww w.j a va 2s. c o m*/ incomeAMessage(srcPartitionDstBucket, msgList.remove(0), superstep); } // Note count Is -1 Afer The Recycle Before. count++; MetaDataOfMessage.RMBLength[srcPartitionDstBucket] += counter; if (count == 0) { return; } // Note Write Disk Procedure. Named Like SpilledData. // LOG.info("######### "+count +"$$$$$$$$ " +msgList.size()); try { this.diskManager.processMessagesSave(msgList, superstep, srcPartitionDstBucket); } catch (IOException e) { throw new RuntimeException("[MessageQueuesNew] incomeAMessage exception:", e); } MetaDataOfMessage.RMBLength[srcPartitionDstBucket] += count; }
From source file:com.wildplot.android.ankistats.ReviewCount.java
public boolean calculateDone(int type, boolean reps) { mType = type;//from w w w. j a va 2s . c o m mBackwards = true; if (reps) { mTitle = R.string.stats_review_count; mAxisTitles = new int[] { type, R.string.stats_answers, R.string.stats_cumulative_answers }; } else { mTitle = R.string.stats_review_time; } mValueLabels = new int[] { R.string.statistics_learn, R.string.statistics_relearn, R.string.statistics_young, R.string.statistics_mature, R.string.statistics_cram }; mColors = new int[] { R.color.stats_learn, R.color.stats_relearn, R.color.stats_young, R.color.stats_mature, R.color.stats_cram }; int num = 0; int chunk = 0; switch (type) { case Utils.TYPE_MONTH: num = 31; chunk = 1; break; case Utils.TYPE_YEAR: num = 52; chunk = 7; break; case Utils.TYPE_LIFE: num = -1; chunk = 30; break; } ArrayList<String> lims = new ArrayList<String>(); if (num != -1) { lims.add("id > " + ((mCollectionData.getDayCutoff() - ((num + 1) * chunk * 86400)) * 1000)); } String lim = _revlogLimitWholeOnly().replaceAll("[\\[\\]]", ""); if (lim.length() > 0) { lims.add(lim); } if (lims.size() > 0) { lim = "WHERE "; while (lims.size() > 1) { lim += lims.remove(0) + " AND "; } lim += lims.remove(0); } else { lim = ""; } String ti; String tf; if (!reps) { ti = "time/1000"; if (mType == 0) { tf = "/60.0"; // minutes mAxisTitles = new int[] { type, R.string.stats_minutes, R.string.stats_cumulative_time_minutes }; } else { tf = "/3600.0"; // hours mAxisTitles = new int[] { type, R.string.stats_hours, R.string.stats_cumulative_time_hours }; } } else { ti = "1"; tf = ""; } ArrayList<double[]> list = new ArrayList<double[]>(); Cursor cur = null; String query = "SELECT (cast((id/1000 - " + mCollectionData.getDayCutoff() + ") / 86400.0 AS INT))/" + chunk + " AS day, " + "sum(CASE WHEN type = 0 THEN " + ti + " ELSE 0 END)" + tf + ", " // lrn + "sum(CASE WHEN type = 1 AND lastIvl < 21 THEN " + ti + " ELSE 0 END)" + tf + ", " // yng + "sum(CASE WHEN type = 1 AND lastIvl >= 21 THEN " + ti + " ELSE 0 END)" + tf + ", " // mtr + "sum(CASE WHEN type = 2 THEN " + ti + " ELSE 0 END)" + tf + ", " // lapse + "sum(CASE WHEN type = 3 THEN " + ti + " ELSE 0 END)" + tf // cram + " FROM revlog " + lim + " GROUP BY day ORDER BY day"; Log.d(AnkiStatsApplication.TAG, "ReviewCount query: " + query); try { cur = mAnkiDb.getDatabase().rawQuery(query, null); while (cur.moveToNext()) { list.add(new double[] { cur.getDouble(0), cur.getDouble(1), cur.getDouble(4), cur.getDouble(2), cur.getDouble(3), cur.getDouble(5) }); } } finally { if (cur != null && !cur.isClosed()) { cur.close(); } } // small adjustment for a proper chartbuilding with achartengine if (type != Utils.TYPE_LIFE && (list.size() == 0 || list.get(0)[0] > -num)) { list.add(0, new double[] { -num, 0, 0, 0, 0, 0 }); } else if (type == Utils.TYPE_LIFE && list.size() == 0) { list.add(0, new double[] { -12, 0, 0, 0, 0, 0 }); } if (list.get(list.size() - 1)[0] < 0) { list.add(new double[] { 0, 0, 0, 0, 0, 0 }); } mSeriesList = new double[6][list.size()]; for (int i = 0; i < list.size(); i++) { double[] data = list.get(i); mSeriesList[0][i] = data[0]; // day mSeriesList[1][i] = data[1] + data[2] + data[3] + data[4] + data[5]; // lrn mSeriesList[2][i] = data[2] + data[3] + data[4] + data[5]; // relearn mSeriesList[3][i] = data[3] + data[4] + data[5]; // young mSeriesList[4][i] = data[4] + data[5]; // mature mSeriesList[5][i] = data[5]; // cram if (mSeriesList[1][i] > mMaxCards) mMaxCards = (int) Math.round(data[1] + data[2] + data[3] + data[4] + data[5]); if (data[5] >= 0.999) mFoundCramCards = true; if (data[1] >= 0.999) mFoundLearnCards = true; if (data[2] >= 0.999) mFoundRelearnCards = true; if (data[0] > mLastElement) mLastElement = data[0]; if (data[0] < mFirstElement) mFirstElement = data[0]; if (data[0] == 0) { mZeroIndex = i; } } mMaxElements = list.size() - 1; return list.size() > 0; }
From source file:beast.structuredCoalescent.distribution.ExactStructuredCoalescent.java
private double coalesce(int currTreeInterval) { List<Node> coalLines = treeIntervalsInput.get().getLineagesRemoved(currTreeInterval); if (coalLines.size() > 2) { System.err.println("Unsupported coalescent at non-binary node"); System.exit(0);/*from www . j a va 2s. com*/ } if (coalLines.size() < 2) { System.out.println(); System.out.println("WARNING: Less than two lineages found at coalescent event!"); System.out.println(); return Double.NaN; } // get the indices of the two daughter lineages final int daughterIndex1 = activeLineages.indexOf(coalLines.get(0).getNr()); final int daughterIndex2 = activeLineages.indexOf(coalLines.get(1).getNr()); if (daughterIndex1 == -1 || daughterIndex2 == -1) { System.out.println("daughter lineages at coalescent event not found"); return Double.NaN; } // check which index is large such that the removing starts // with the one with the larger value if (daughterIndex1 > daughterIndex2) { activeLineages.remove(daughterIndex1); activeLineages.remove(daughterIndex2); } else { activeLineages.remove(daughterIndex2); activeLineages.remove(daughterIndex1); } // add the new parent lineage as an active lineage activeLineages.add(coalLines.get(0).getParent().getNr()); // calculate the number of combinations after the coalescent event int nrs = combination.size() / states; // newly initialize the number of lineages per configuration Integer[][] newSums = new Integer[nrs][states]; Integer[] newSumsTot = new Integer[nrs]; // find all joint probabilities where the two lineages are in the same deme ArrayList<Double> newProbability = new ArrayList<Double>(); ArrayList<ArrayList<Integer>> newCombination = new ArrayList<ArrayList<Integer>>(); double[] pairwiseCoalRate = new double[states]; int futureState = 0; for (int i = 0; i < jointStateProbabilities.size(); i++) { // Checks if it is a configuration where both daughter lineages are in the same state if (combination.get(i).get(daughterIndex1) == combination.get(i).get(daughterIndex2)) { ArrayList<Integer> coalLoc = new ArrayList<Integer>(combination.get(i)); newSums[futureState] = sums[i]; newSums[futureState][combination.get(i).get(daughterIndex1)]--; futureState++; if (daughterIndex1 > daughterIndex2) { coalLoc.remove(daughterIndex1); coalLoc.remove(daughterIndex2); } else { coalLoc.remove(daughterIndex2); coalLoc.remove(daughterIndex1); } coalLoc.add(combination.get(i).get(daughterIndex1)); newCombination.add(coalLoc); newProbability.add( coalescent_rates[combination.get(i).get(daughterIndex1)] * jointStateProbabilities.get(i)); pairwiseCoalRate[combination.get(i).get(daughterIndex1)] += 2 * coalescent_rates[combination.get(i).get(daughterIndex1)] * jointStateProbabilities.get(i); } } combination = newCombination; jointStateProbabilities = newProbability; connectivity = new Integer[combination.size()][combination.size()]; // build the connectivity matrix for (int a = 0; a < combination.size(); a++) { for (int b = 0; b < combination.size(); b++) { int diff = 0; int[] directs = new int[2]; ArrayList<Integer> comb1 = combination.get(a); ArrayList<Integer> comb2 = combination.get(b); for (int i = 0; i < comb1.size(); i++) { int d = comb1.get(i) - comb2.get(i); if (d != 0) { diff++; directs[0] = comb1.get(i); directs[1] = comb2.get(i); } } if (diff == 1) { connectivity[a][b] = migration_map[directs[0]][directs[1]]; } } } for (int i = 0; i < nrs; i++) { int news = 0; for (int j = 0; j < states; j++) { int add = newSums[i][j] - 1; if (add > 0) news += add; } newSumsTot[i] = news; } // do normalization double prob = 0.0; for (int i = 0; i < pairwiseCoalRate.length; i++) prob += pairwiseCoalRate[i]; for (int i = 0; i < jointStateProbabilities.size(); i++) jointStateProbabilities.set(i, jointStateProbabilities.get(i) / prob); DoubleMatrix pVec = new DoubleMatrix(states); for (int i = 0; i < pairwiseCoalRate.length; i++) pVec.put(i, pairwiseCoalRate[i] / prob); nodeStateProbabilities[coalLines.get(0).getParent().getNr() - nrSamples] = pVec; sums = newSums; sumsTot = newSumsTot; // return the normlization constant as a probability (in log space) return Math.log(prob); }
From source file:com.l2jfree.gameserver.geodata.pathfinding.PathFinding.java
public final Node[] searchByClosest2(Node start, Node end, int instanceId) { // Always continues checking from the closest to target non-blocked // node from to_visit list. There's extra length in path if needed // to go backwards/sideways but when moving generally forwards, this is extra fast // and accurate. And can reach insane distances (try it with 800 nodes..). // Minimum required node count would be around 300-400. // Generally returns a bit (only a bit) more intelligent looking routes than // the basic version. Not a true distance image (which would increase CPU // load) level of intelligence though. // List of Visited Nodes L2FastSet<Node> visited = L2Collections.newL2FastSet(); // List of Nodes to Visit ArrayList<Node> to_visit = L2Collections.newArrayList(); to_visit.add(start);/*w w w .j a va 2 s . c om*/ try { int targetx = end.getNodeX(); int targety = end.getNodeY(); int dx, dy; boolean added; int i = 0; while (i < 550) { if (to_visit.isEmpty()) { // No Path found return null; } Node node = to_visit.remove(0); if (node.equals(end)) //path found! return constructPath2(node); else { i++; visited.add(node); node.attachNeighbors(instanceId); Node[] neighbors = node.getNeighbors(); if (neighbors == null) continue; for (Node n : neighbors) { if (!visited.contains(n) && !to_visit.contains(n)) { added = false; n.setParent(node); dx = targetx - n.getNodeX(); dy = targety - n.getNodeY(); n.setCost(dx * dx + dy * dy); for (int index = 0; index < to_visit.size(); index++) { // supposed to find it quite early.. if (to_visit.get(index).getCost() > n.getCost()) { to_visit.add(index, n); added = true; break; } } if (!added) to_visit.add(n); } } } } //No Path found return null; } finally { L2Collections.recycle(visited); L2Collections.recycle(to_visit); } }
From source file:com.morphoss.jumble.models.Category.java
/** * // ww w.jav a2s . c o m * @param context * required to work * @return the next word of the available ones */ public Word getNextWord(Context context) { ContentValues cv = new ContentValues(); ArrayList<String> tempsolved = CategoryWords.getSolvedWordsFromCategory(context, this); double ratioSolved = (double) tempsolved.size() / (double) words.size(); Log.d(TAG, "size of solved list : " + tempsolved.size()); Log.d(TAG, "size of words list : " + words.size()); Log.d(TAG, "ratio solved : " + ratioSolved); Log.d(TAG, "category selected : " + this.getLocalisedName()); int ratio = (int) (ratioSolved * 100); Log.d(TAG, "ratio :" + ratio); if (this.getLocalisedName() == null) { Log.e(TAG, "error localised name is null :"); } cv.put(JumbleCategoryTable.UNLOCK, "0"); cv.put(JumbleCategoryTable.CATEGORY, this.getLocalisedName()); cv.put(JumbleCategoryTable.RATIO, ratio); cv.put(JumbleCategoryTable.CC, SettingsActivity.getLanguageToLoad()); if (ratio == 0) { context.getContentResolver().insert(JumbleProvider.CONTENT_URI_CATEGORIES, cv); } else { cv = new ContentValues(); cv.put(JumbleCategoryTable.RATIO, ratio); String selection = JumbleCategoryTable.CATEGORY + "= ? AND " + JumbleCategoryTable.CC + "= ?"; String[] selectionArgs = { this.getLocalisedName(), SettingsActivity.getLanguageToLoad() }; context.getContentResolver().update( Uri.withAppendedPath(JumbleProvider.CONTENT_URI_CATEGORIES, "addratio"), cv, selection, selectionArgs); } Category nextCategory = CategoryGridAdapter.getCategory(getId()); Log.d(TAG, "next category name : " + nextCategory.getLocalisedName()); if (nextCategory.getLocalisedName() != null && ratio == 0) { cv = new ContentValues(); cv.put(JumbleCategoryTable.UNLOCK, "0"); cv.put(JumbleCategoryTable.CATEGORY, nextCategory.getLocalisedName()); cv.put(JumbleCategoryTable.RATIO, ratio); cv.put(JumbleCategoryTable.CC, SettingsActivity.getLanguageToLoad()); context.getContentResolver().insert(JumbleProvider.CONTENT_URI_CATEGORIES, cv); } if (!nextCategory.unlocked() && ratio > 20 && nextCategory.getLocalisedName() != null) { Log.d(TAG, "unlocking a new category"); unlockedCategories.add(nextCategory.getLocalisedName()); cv = new ContentValues(); //cv.put(JumbleCategoryTable.CATEGORY, nextCategory.getLocalisedName()); //cv.put(JumbleCategoryTable.CC, SettingsActivity.getLanguageToLoad()); cv.put(JumbleCategoryTable.UNLOCK, "1"); String selection = JumbleCategoryTable.CATEGORY + "= ? AND " + JumbleCategoryTable.CC + "= ?"; String[] selectionArgs = { nextCategory.getLocalisedName(), SettingsActivity.getLanguageToLoad() }; context.getContentResolver().update( Uri.withAppendedPath(JumbleProvider.CONTENT_URI_CATEGORIES, "unlockCategory"), cv, selection, selectionArgs); //context.getContentResolver().insert(JumbleProvider.CONTENT_URI_CATEGORIES,cv); nextCategory.setUnlocked(true); } int countEasyWords = wordsEasy.size(); Log.d(TAG, "count of easy words :" + countEasyWords); int countMediumWords = wordsMedium.size(); Log.d(TAG, "count of medium words :" + countMediumWords); int countAdvancedWords = wordsAdvanced.size(); Log.d(TAG, "count of advanced words :" + countAdvancedWords); ArrayList<Word> filteredwords = new ArrayList<Word>(); filteredwords.addAll(wordsEasy); if (filteredwords.size() < 3) filteredwords.addAll(wordsMedium); if (filteredwords.size() < 3) filteredwords.addAll(wordsAdvanced); if (filteredwords.size() == 0) return null; Word word = CategoryWords.getRandomItem(filteredwords); ArrayList<Word> wordList; Log.d(TAG, "the random word is : " + word.getLocalisedWord() + " with level :" + word.getLevel()); switch (word.getLevel()) { case EASY: wordList = wordsEasy; break; case MEDIUM: wordList = wordsMedium; break; case ADVANCED: wordList = wordsAdvanced; break; default: wordList = wordsAdvanced; break; } for (int i = 0; i < wordList.size(); i++) { if (wordList.get(i).equals(word)) { wordList.remove(i); break; } } return word; }
From source file:com.krawler.spring.hrms.payroll.employercontribution.hrmsPayrollEmployerContributionDAOImpl.java
@Override public KwlReturnObject getEmployerContribTemplateDetails(HashMap<String, Object> requestParams) { boolean success = true; List tabledata = null;/*from w w w .j a va 2 s .co m*/ try { ArrayList orderby = null; ArrayList ordertype = null; ArrayList name = null; ArrayList value = null; String hql = "from TemplateMapEmployerContribution "; if (requestParams.get("filter_names") != null && requestParams.get("filter_values") != null) { name = new ArrayList((List<String>) requestParams.get("filter_names")); value = new ArrayList((List<Object>) requestParams.get("filter_values")); hql += com.krawler.common.util.StringUtil.filterQuery(name, "where"); int ind = hql.indexOf("("); if (ind > -1) { int index = Integer.valueOf(hql.substring(ind + 1, ind + 2)); hql = hql.replaceAll("(" + index + ")", value.get(index).toString()); value.remove(index); } } if (requestParams.get("order_by") != null && requestParams.get("order_type") != null) { orderby = new ArrayList((List<String>) requestParams.get("order_by")); ordertype = new ArrayList((List<Object>) requestParams.get("order_type")); hql += StringUtil.orderQuery(orderby, ordertype); } tabledata = HibernateUtil.executeQuery(hibernateTemplate, hql, value.toArray()); success = true; } catch (Exception e) { logger.warn( "Exception occurred in hrmsPayrollEmployerContributionDAOImpl.getEmployerContribTemplateDetails", e); success = false; } finally { return new KwlReturnObject(success, "", "-1", tabledata, tabledata.size()); } }
From source file:Log.java
/** * Removes a log target from this facility. * /*from w ww .j a v a2 s. c om*/ * @param target * the target to remove. */ public synchronized void removeTarget(final LogTarget target) { if (target == null) { throw new NullPointerException(); } final ArrayList l = new ArrayList(); l.addAll(Arrays.asList(this.logTargets)); l.remove(target); final LogTarget[] targets = new LogTarget[l.size()]; this.logTargets = (LogTarget[]) l.toArray(targets); }
From source file:edu.umass.cs.reconfiguration.reconfigurationutils.ConsistentReconfigurableNodeConfig.java
/** * This method maps a set of addresses, newAddresses, to a set of nodes such * that there is maximal overlap with the specified set of nodes, oldNodes. * It is somewhat nontrivial only because there is a many-to-one mapping * from nodes to addresses, so a simple reverse lookup is not meaningful. * /*from w w w . j a va2 s .co m*/ * @param newAddresses * @param oldNodes * @return Set of active replica IPs corresponding to {@code newAddresses} * that have high overlap with the set of old active replica nodes * {@code oldNodes}. */ public Set<NodeIDType> getIPToActiveReplicaIDs(ArrayList<InetAddress> newAddresses, Set<NodeIDType> oldNodes) { Set<NodeIDType> newNodes = new HashSet<NodeIDType>(); // return value ArrayList<InetAddress> unassigned = new ArrayList<InetAddress>(); for (InetAddress address : newAddresses) unassigned.add(address); // assign old nodes first if they match any new address for (NodeIDType oldNode : oldNodes) { InetAddress oldAddress = this.nodeConfig.getNodeAddress(oldNode); if (unassigned.contains(oldAddress)) { newNodes.add(oldNode); unassigned.remove(oldAddress); } } // assign any node to unassigned addresses for (NodeIDType node : this.nodeConfig.getActiveReplicas()) { if (this.activesSlatedForRemoval.contains(node)) continue; InetAddress address = this.nodeConfig.getNodeAddress(node); if (unassigned.contains(address)) { newNodes.add(node); unassigned.remove(address); } } return newNodes; }