List of usage examples for com.google.common.collect Multimap size
int size();
From source file:org.mule.util.journal.queue.LocalTxQueueTransactionRecoverer.java
/** * Recover all the pending transactions. * * Will undo all operations done over queues that were not commit or rolled back. * * Clears the transaction log after processing all the log entries since does entries are not longer * required./* w w w. j a v a 2 s. c om*/ */ public void recover() { if (logger.isDebugEnabled()) { logger.debug("Executing transaction recovery"); } Multimap<Integer, LocalQueueTxJournalEntry> allEntries = this.localTxQueueTransactionJournal .getAllLogEntries(); if (logger.isDebugEnabled()) { logger.debug("Found " + allEntries.size() + " txs to recover"); } int txRecovered = 0; for (Integer txId : allEntries.keySet()) { Collection<LocalQueueTxJournalEntry> entries = allEntries.get(txId); Object commitOrRollback = CollectionUtils.find(entries, new Predicate() { @Override public boolean evaluate(Object object) { LocalQueueTxJournalEntry logEntry = (LocalQueueTxJournalEntry) object; return logEntry.isCommit() || logEntry.isRollback(); } }); if (commitOrRollback != null) { continue; } txRecovered++; for (LocalQueueTxJournalEntry logEntry : entries) { if (logEntry.isRemove()) { String queueName = logEntry.getQueueName(); RecoverableQueueStore queue = queueProvider.getRecoveryQueue(queueName); Serializable polledValue = logEntry.getValue(); if (!queue.contains(polledValue)) { if (logger.isDebugEnabled()) { logger.debug( "re-adding polled element that was not commited to queue " + queue.getName()); } try { queue.putNow(polledValue); } catch (InterruptedException e) { throw new MuleRuntimeException(e); } } } else if (logEntry.isAdd() || logEntry.isAddFirst()) { Serializable offeredValue = logEntry.getValue(); String queueName = logEntry.getQueueName(); RecoverableQueueStore queue = queueProvider.getRecoveryQueue(queueName); if (queue.contains(offeredValue)) { if (logger.isDebugEnabled()) { logger.debug( "removing offer element that was not commited to queue " + queue.getName()); } queue.remove(offeredValue); } } } } if (logger.isDebugEnabled()) { logger.debug("Recovered " + txRecovered + " txs to recover"); } this.localTxQueueTransactionJournal.clear(); }
From source file:sadl.models.pdrta.StateStatistic.java
/** * Calculates the {@link LikelihoodValue} of the symbol distributions for splitting a transition. This done by splitting the set of {@link TimedTail}s in a * {@link PDRTAState}/*from w ww .j av a 2 s.c o m*/ * * @param s * The {@link PDRTAState} for splitting * @param mSym * The Set of {@link TimedTail}s to be split apart clustered by symbol index * @return The {@link LikelihoodValue} of the symbol distributions for splitting a transition */ public static LikelihoodValue getLikelihoodRatioSym(PDRTAState s, Multimap<Integer, TimedTail> mSym, boolean advancedPooling, CalcRatio cr) { final StateStatistic st = s.getStat(); final PDRTA a = s.getPDRTA(); final int minData = PDRTA.getMinData(); if (!st.trainMode) { throw new UnsupportedOperationException(); } // LRT_FIX : Operator for calculation interruption (thesis: AND, impl: OR, own: AND) if (SimplePDRTALearner.bOp[2].eval((st.totalOutCount - mSym.size()) < minData, mSym.size() < minData)) { return new LikelihoodValue(); } final int[] part1SymCount = Arrays.copyOf(st.symbolCount, st.symbolCount.length); final int[] part2SymCount = new int[st.symbolCount.length]; for (final Entry<Integer, Collection<TimedTail>> eCol : mSym.asMap().entrySet()) { part1SymCount[eCol.getKey().intValue()] -= eCol.getValue().size(); part2SymCount[eCol.getKey().intValue()] += eCol.getValue().size(); } return calcInterimLRT(a, part1SymCount, part2SymCount, advancedPooling, cr); }
From source file:uk.ac.ebi.mdk.service.loader.data.ChEBIDataLoader.java
public String getFormula(String accession, Collection<DataValue> values) { Set<DataValue> formulae = new HashSet<DataValue>(); for (DataValue value : values) { if (value.type.equals("FORMULA") && !value.value.equals(".")) { formulae.add(value);//w w w . ja v a 2 s . c om } } if (formulae.isEmpty()) return ""; // single entry if (formulae.size() == 1) { return formulae.iterator().next().value; } // resolve duplicates Multimap<String, DataValue> sourceMap = HashMultimap.create(); for (DataValue value : formulae) { sourceMap.put(value.source, value); } if (sourceMap.size() != 1 && sourceMap.containsKey("ChEBI")) { Collection<DataValue> chebiFormulae = sourceMap.get("ChEBI"); if (chebiFormulae.size() == 1) { return chebiFormulae.iterator().next().value; } else if (chebiFormulae.size() == 2) { // if there are two from ChEBI, favour the one with the R group Iterator<DataValue> it = chebiFormulae.iterator(); DataValue first = it.next(); DataValue second = it.next(); if (first.value.contains("R") && !second.value.contains("R")) { return first.value; } else if (second.value.contains("R") && !first.value.contains("R")) { return second.value; } } } LOGGER.warn("Could not resolve single formula for: " + accession + " : " + formulae); // just use the first one return formulae.iterator().next().value; }
From source file:sadl.models.pdrta.StateStatistic.java
/** * Calculates the {@link LikelihoodValue} of the histogram bin distributions for splitting a transition. This done by splitting the set of {@link TimedTail} * s in a {@link PDRTAState}/*from w w w .j a va 2 s. c o m*/ * * @param s * The {@link PDRTAState} for splitting * @param mHist * The Set of {@link TimedTail}s to be split apart clustered by histogram index * @return The {@link LikelihoodValue} of the histogram bin distributions for splitting a transition */ public static LikelihoodValue getLikelihoodRatioTime(PDRTAState s, Multimap<Integer, TimedTail> mHist, boolean advancedPooling, CalcRatio cr) { final StateStatistic st = s.getStat(); final PDRTA a = s.getPDRTA(); final int minData = PDRTA.getMinData(); if (!st.trainMode) { throw new UnsupportedOperationException(); } // LRT_FIX : Operator for calculation interruption (thesis: AND, impl: OR, own: AND) if (SimplePDRTALearner.bOp[2].eval((st.totalOutCount - mHist.size()) < minData, mHist.size() < minData)) { return new LikelihoodValue(); } final int[] part1TimeCount = Arrays.copyOf(st.timeCount, st.timeCount.length); final int[] part2TimeCount = new int[st.timeCount.length]; for (final Entry<Integer, Collection<TimedTail>> eCol : mHist.asMap().entrySet()) { part1TimeCount[eCol.getKey().intValue()] -= eCol.getValue().size(); part2TimeCount[eCol.getKey().intValue()] += eCol.getValue().size(); } return calcInterimLRT(a, part1TimeCount, part2TimeCount, advancedPooling, cr); }
From source file:org.jclouds.ec2.util.IpPermissions.java
protected IpPermissions(IpProtocol ipProtocol, int fromPort, int toPort, Multimap<String, String> userIdGroupPairs, Iterable<String> groupIds, Iterable<String> ipRanges) { super(ipProtocol, fromPort, toPort, userIdGroupPairs, groupIds, userIdGroupPairs.size() == 0 ? ipRanges : ImmutableSet.<String>of()); }
From source file:org.mule.util.xa.XaTransactionRecoverer.java
public synchronized Xid[] recover(int flag) throws XAException { //No need to do anything for XAResource.TMENDRSCAN if (flag == XAResource.TMENDRSCAN) { return new Xid[0]; }/*www. j a v a 2 s. co m*/ //For XAResource.TMSTARTRSCAN and XAResource.TMNOFLAGS (only possible values despite XAResource.TMENDRSCAN we returns //the set of Xid to recover (no commit, no rollback) and bitronix will commit, rollback for Xid that are //dangling transactions and will do nothing for those that are currently being executed. Multimap<Xid, XaQueueTxJournalEntry> xidXaJournalEntryMultimap = xaTxQueueTransactionJournal .getAllLogEntries(); if (logger.isDebugEnabled()) { logger.debug("Executing XA recover"); logger.debug("Found " + xidXaJournalEntryMultimap.size() + " in the tx log"); } List<Xid> txsToRecover = new ArrayList<Xid>(); for (Xid xid : xidXaJournalEntryMultimap.keySet()) { Collection<XaQueueTxJournalEntry> entries = xidXaJournalEntryMultimap.get(xid); Object commitOrRollback = CollectionUtils.find(entries, new Predicate() { @Override public boolean evaluate(Object object) { XaQueueTxJournalEntry logEntry = (XaQueueTxJournalEntry) object; return logEntry.isCommit() || logEntry.isRollback(); } }); if (commitOrRollback != null) { continue; } txsToRecover.add(xid); } if (logger.isDebugEnabled()) { logger.debug("found " + txsToRecover.size() + " txs to recover"); } return txsToRecover.toArray(new Xid[txsToRecover.size()]); }
From source file:com.github.haixing_hu.io.OutputUtils.java
public static <K, V> void writeMultimap(final OutputStream out, @Nullable final Multimap<K, V> map, final Class<K> keyClass, final Class<V> valueClass) throws IOException { final BinarySerializer keySerializer = BinarySerialization.getSerializer(keyClass); if (keySerializer == null) { throw new NoBinarySerializerRegisteredException(keyClass); }/* w w w .j a v a 2s . c o m*/ final BinarySerializer valueSerializer = BinarySerialization.getSerializer(valueClass); if (valueSerializer == null) { throw new NoBinarySerializerRegisteredException(valueClass); } if (!writeNullMark(out, map)) { writeVarInt(out, map.size()); for (final Map.Entry<K, V> entry : map.entries()) { final K key = entry.getKey(); final V value = entry.getValue(); keySerializer.serialize(out, key); valueSerializer.serialize(out, value); } } }
From source file:com.palantir.atlasdb.keyvalue.impl.SweepStatsKeyValueService.java
@Override public void putWithTimestamps(String tableName, Multimap<Cell, Value> cellValues) { delegate().putWithTimestamps(tableName, cellValues); writesByTable.add(tableName, cellValues.size()); recordModifications(cellValues.size()); }
From source file:org.apache.brooklyn.entity.group.zoneaware.BalancingNodePlacementStrategy.java
@Override public List<Entity> entitiesToRemove(Multimap<Location, Entity> currentMembers, int numToRemove) { if (currentMembers.isEmpty()) { throw new IllegalArgumentException( "No members supplied, when requesting removal of " + numToRemove + " nodes"); }//from w w w . j a va2 s . co m if (currentMembers.size() < numToRemove) { LOG.warn("Request to remove " + numToRemove + " when only " + currentMembers.size() + " members (continuing): " + currentMembers); numToRemove = currentMembers.size(); } Map<Location, Integer> numToRemovePerLoc = Maps.newLinkedHashMap(); Map<Location, Integer> locSizes = toMutableLocationSizes(currentMembers, ImmutableList.<Location>of()); for (int i = 0; i < numToRemove; i++) { // TODO Inefficient to loop this many times! But not called with big numbers. Location mostPopulatedLoc = null; int mostPopulatedLocSize = 0; for (Location loc : locSizes.keySet()) { int locSize = locSizes.get(loc); if (locSize > 0 && (mostPopulatedLoc == null || locSize > mostPopulatedLocSize)) { mostPopulatedLoc = loc; mostPopulatedLocSize = locSize; } } assert mostPopulatedLoc != null : "leastPopulatedLoc=null; currentMembers=" + currentMembers; numToRemovePerLoc.put(mostPopulatedLoc, ((numToRemovePerLoc.get(mostPopulatedLoc) == null) ? 0 : numToRemovePerLoc.get(mostPopulatedLoc)) + 1); locSizes.put(mostPopulatedLoc, locSizes.get(mostPopulatedLoc) - 1); } List<Entity> result = Lists.newArrayList(); for (Map.Entry<Location, Integer> entry : numToRemovePerLoc.entrySet()) { result.addAll(pickNewest(currentMembers.get(entry.getKey()), entry.getValue())); } return result; }
From source file:au.net.iinet.plugins.build.LeaderboardBuildPostAction.java
@Override public void execute(StageExecution arg0, Job arg1, BuildResultsSummary arg2) { FilteredTestResults<TestClassResult> results = arg2.getFilteredTestResults(); Multimap<TestClassResult, TestCaseResult> failed = results.getAllFailedTests(); Multimap<TestClassResult, TestCaseResult> new_failed = results.getNewFailedTests(); Multimap<TestClassResult, TestCaseResult> fixed = results.getFixedTests(); System.err.println("Failed tests: " + failed.size()); int count_failed = failed.size(); int count_new_failed = new_failed.size(); int count_fixed = fixed.size(); /** /*from w ww . java2 s.c om*/ Copied shamelessly from jenkins CI game: -10 points for breaking a build 0 points for breaking a build that already was broken +1 points for doing a build with no failures (unstable builds gives no points) -1 points for each new test failures +1 points for each new test that passes */ /** Calculate the score for whoever just commited all that */ int new_points = 0; // you broke the build. if (count_new_failed > 0) new_points -= 10; // You broke tests? Lose points new_points -= count_new_failed; // You fixed some tests tho? Get points for that. new_points += count_fixed; // +1 for commit with no fails if (count_failed == 0) new_points += 1; for (Commit c : arg2.getCommits()) { String author_id = c.getAuthor().getName(); String author_name = c.getAuthor().getName(); // TODO: Icon based on point total here au.net.iinet.plugins.servlet.model.db.User u = users.get(author_id); u.setName(author_name); int total_points = u.getPoints() + new_points; u.setPoints(total_points); users.save(u); } }