Example usage for com.google.common.collect Multimap keySet

List of usage examples for com.google.common.collect Multimap keySet

Introduction

In this page you can find the example usage for com.google.common.collect Multimap keySet.

Prototype

Set<K> keySet();

Source Link

Document

Returns a view collection of all distinct keys contained in this multimap.

Usage

From source file:nl.sidn.pcap.PcapReader.java

/**
 * Clear expired cache entries in order to avoid memory problems 
 *//*from   w w  w.j a  v a2s. co m*/
public void clearCache(int tcpFlowCacheTimeout, int fragmentedIPcacheTimeout) {
    //clear tcp flows with expired packets
    List<TCPFlow> expiredList = new ArrayList<>();
    long now = System.currentTimeMillis();
    Multimap<TCPFlow, SequencePayload> flows = tcpDecoder.getFlows();
    for (TCPFlow flow : flows.keySet()) {
        Collection<SequencePayload> payloads = flows.get(flow);
        for (SequencePayload sequencePayload : payloads) {
            if ((sequencePayload.getTime() + tcpFlowCacheTimeout) <= now) {
                expiredList.add(flow);
                break;
            }
        }
    }

    //check IP datagrams
    List<Datagram> dgExpiredList = new ArrayList<>();

    for (Datagram dg : ipDecoder.getDatagrams().keySet()) {
        if ((dg.getTime() + fragmentedIPcacheTimeout) <= now) {
            dgExpiredList.add(dg);
        }
    }

    LOG.info("------------- Cache purge stats --------------");
    LOG.info("TCP flow cache size: " + flows.size());
    LOG.info("IP datagram cache size: " + ipDecoder.getDatagrams().size());
    LOG.info("Expired (to be removed) TCP flows: " + expiredList.size());
    LOG.info("Expired (to be removed) IP datagrams: " + dgExpiredList.size());
    LOG.info("----------------------------------------------------");

    //remove flows with expired packets
    for (TCPFlow tcpFlow : expiredList) {
        flows.removeAll(tcpFlow);
    }

    for (Datagram dg : dgExpiredList) {
        ipDecoder.getDatagrams().removeAll(dg);
    }

}

From source file:org.kairosdb.datastore.remote.RemoteDatastore.java

private void flushMap() {
    Multimap<DataPointKey, DataPoint> flushMap = createNewMap();

    synchronized (m_dataFileLock) {
        try {//from  w ww  .j a v a2 s  . co  m
            try {
                for (DataPointKey dataPointKey : flushMap.keySet()) {
                    //We have to reset the writer every time or it gets confused
                    //because we are only writing partial json each time.
                    JSONWriter writer = new JSONWriter(m_dataWriter);

                    if (!m_firstDataPoint) {
                        m_dataWriter.write(",\n");
                    }
                    m_firstDataPoint = false;

                    writer.object();

                    writer.key("name").value(dataPointKey.getName());
                    writer.key("ttl").value(dataPointKey.getTtl());
                    writer.key("skip_validate").value(true);
                    writer.key("tags").object();
                    SortedMap<String, String> tags = dataPointKey.getTags();
                    for (String tag : tags.keySet()) {
                        writer.key(tag).value(tags.get(tag));
                    }
                    writer.endObject();

                    writer.key("datapoints").array();
                    for (DataPoint dataPoint : flushMap.get(dataPointKey)) {
                        m_dataPointCounter++;
                        writer.array();
                        writer.value(dataPoint.getTimestamp());
                        dataPoint.writeValueToJson(writer);
                        writer.value(dataPoint.getApiDataType());
                        /*if (dataPoint.isLong())
                           writer.value(dataPoint.getLongValue());
                        else
                           writer.value(dataPoint.getDoubleValue());*/
                        writer.endArray();
                    }
                    writer.endArray();

                    writer.endObject();
                }
            } catch (JSONException e) {
                logger.error("Unable to write datapoints to file", e);
            }

            m_dataWriter.flush();
        } catch (IOException e) {
            logger.error("Unable to write datapoints to file", e);
        }
    }
}

From source file:eu.numberfour.n4js.ui.workingsets.ProjectLocationAwareWorkingSetManager.java

private Multimap<String, IProject> initProjectLocation() {
    final IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
    final IProject[] projects = root.getProjects();
    final Multimap<String, IProject> locations = HashMultimap.create();
    for (final IProject project : projects) {
        final String pair = getWorkingSetId(project);
        locations.put(pair, project);//  w w  w .j  a v a2  s.c om
    }

    if (!deferredInitializerSucceeded) // only once ever.
    {
        // assume not properly initialized if only "other projects" is available as key.
        deferredInitializerSucceeded = locations.keySet().size() > 1;
    }

    return locations;
}

From source file:org.lealone.cluster.dht.RangeStreamer.java

/**
 * Get a map of all ranges and their respective sources that are candidates for streaming the given ranges
 * to us. For each range, the list of sources is sorted by proximity relative to the given destAddress.
 *
 * @throws java.lang.IllegalStateException when there is no source to get data streamed
 *//* w  ww .  j  a  v  a 2 s.c  o m*/
// desiredRange?InetAddress
private Multimap<Range<Token>, InetAddress> getAllRangesWithSourcesFor(Database db,
        Collection<Range<Token>> desiredRanges) {
    AbstractReplicationStrategy strat = ClusterMetaData.getReplicationStrategy(db);
    Multimap<Range<Token>, InetAddress> rangeAddresses = strat.getRangeAddresses(metadata.cloneOnlyTokenMap());

    Multimap<Range<Token>, InetAddress> rangeSources = ArrayListMultimap.create();
    for (Range<Token> desiredRange : desiredRanges) {
        for (Range<Token> range : rangeAddresses.keySet()) {
            if (range.contains(desiredRange)) {
                List<InetAddress> preferred = snitch.getSortedListByProximity(address,
                        rangeAddresses.get(range));
                rangeSources.putAll(desiredRange, preferred);
                break;
            }
        }

        if (!rangeSources.keySet().contains(desiredRange))
            throw new IllegalStateException("No sources found for " + desiredRange);
    }

    return rangeSources;
}

From source file:com.facebook.buck.android.SmartDexingStep.java

/**
 * Once the {@code .class} files have been split into separate zip files, each must be converted
 * to a {@code .dex} file./*from w w  w .  j a v  a  2 s . com*/
 */
private List<Step> generateDxCommands(ProjectFilesystem filesystem, Multimap<Path, Path> outputToInputs)
        throws IOException {
    ImmutableList.Builder<DxPseudoRule> pseudoRules = ImmutableList.builder();

    ImmutableMap<Path, Sha1HashCode> dexInputHashes = dexInputHashesProvider.getDexInputHashes();

    for (Path outputFile : outputToInputs.keySet()) {
        pseudoRules.add(new DxPseudoRule(filesystem, dexInputHashes,
                FluentIterable.from(outputToInputs.get(outputFile)).toSet(), outputFile,
                successDir.resolve(outputFile.getFileName()), dxOptions));
    }

    ImmutableList.Builder<Step> steps = ImmutableList.builder();
    for (DxPseudoRule pseudoRule : pseudoRules.build()) {
        if (!pseudoRule.checkIsCached()) {
            steps.addAll(pseudoRule.buildInternal());
        }
    }

    return steps.build();
}

From source file:org.apache.shindig.gadgets.rewrite.ConcatVisitor.java

/**
 * Split the given batch of elements (assumed to be sibling nodes that can be concatenated)
 * into batches with same media types.// www .j  av a  2s .  c o  m
 *
 * @param elements
 * @param output
 */
private void splitBatchOnMedia(List<Element> elements, List<List<Element>> output) {
    // Multimap to hold the ordered list of elements encountered for a given media type.
    Multimap<String, Element> mediaBatchMap = LinkedHashMultimap.create();
    for (Element element : elements) {
        String mediaType = element.getAttribute("media");
        mediaBatchMap.put(StringUtils.isEmpty(mediaType) ? "screen" : mediaType, element);
    }
    Set<String> mediaTypes = mediaBatchMap.keySet();
    for (String mediaType : mediaTypes) {
        Collection<Element> elems = mediaBatchMap.get(mediaType);
        output.add(new LinkedList<Element>(elems));
    }
}

From source file:model.utilities.dummies.GeographicalCustomer.java

/**
 * Chooses which of this firms the customer wants to choose, if any.
 * @param firmsToChooseFrom The list of available firms
 * @return The firm chosen, or null if none is chosen
 *///  w w  w.ja  va  2s.  co  m

public GeographicalFirm chooseSupplier(final Multimap<GeographicalFirm, Quote> firmsToChooseFrom) {
    Preconditions.checkArgument(!firmsToChooseFrom.isEmpty());
    handleNewEvent(
            new LogEvent(this, LogLevel.TRACE, "was given these firms to choose from: {}", firmsToChooseFrom));

    //basically we want to find the minimum price+distance
    GeographicalFirm best = Collections.min(firmsToChooseFrom.keySet(), new Comparator<GeographicalFirm>() {
        @Override
        public int compare(GeographicalFirm o1, GeographicalFirm o2) {
            //price + distance
            double pricePlusDistance1 = firmsToChooseFrom.get(o1).iterator().next().getPriceQuoted()
                    + distance(GeographicalCustomer.this, o1);
            assert pricePlusDistance1 >= 0;

            double pricePlusDistance2 = firmsToChooseFrom.get(o2).iterator().next().getPriceQuoted()
                    + distance(GeographicalCustomer.this, o2);
            assert pricePlusDistance2 >= 0;

            return Double.compare(pricePlusDistance1, pricePlusDistance2);
        }
    });

    assert best != null;
    //is the minimum price distance okay?

    final long bestPriceAtSource = firmsToChooseFrom.get(best).iterator().next().getPriceQuoted();
    double bestPricePlusDistance = bestPriceAtSource + distance(GeographicalCustomer.this, best);
    //log it!
    handleNewEvent(
            new LogEvent(this, LogLevel.TRACE, "the best firm found was {}, pricing {}, total personal cost {}",
                    best, bestPriceAtSource, bestPricePlusDistance));

    if (bestPricePlusDistance <= getMaxPrice()) {
        handleNewEvent(new LogEvent(this, LogLevel.TRACE, "decided to buy from chosen best"));
        return best;
    } else
        return null;

}

From source file:org.apache.cassandra.dht.RangeStreamer.java

/**
 * Get a map of all ranges and their respective sources that are candidates for streaming the given ranges
 * to us. For each range, the list of sources is sorted by proximity relative to the given destAddress.
 *
 * @throws java.lang.IllegalStateException when there is no source to get data streamed
 *//*  ww  w .jav  a  2s  .  co m*/
private Multimap<Range<Token>, InetAddress> getAllRangesWithSourcesFor(String keyspaceName,
        Collection<Range<Token>> desiredRanges) {
    AbstractReplicationStrategy strat = Keyspace.open(keyspaceName).getReplicationStrategy();
    Multimap<Range<Token>, InetAddress> rangeAddresses = strat.getRangeAddresses(metadata.cloneOnlyTokenMap());

    Multimap<Range<Token>, InetAddress> rangeSources = ArrayListMultimap.create();
    for (Range<Token> desiredRange : desiredRanges) {
        for (Range<Token> range : rangeAddresses.keySet()) {
            if (range.contains(desiredRange)) {
                List<InetAddress> preferred = snitch.getSortedListByProximity(address,
                        rangeAddresses.get(range));
                rangeSources.putAll(desiredRange, preferred);
                break;
            }
        }

        if (!rangeSources.keySet().contains(desiredRange))
            throw new IllegalStateException("No sources found for " + desiredRange);
    }

    return rangeSources;
}

From source file:com.android.tools.idea.gradle.service.ProjectImportEventMessageDataService.java

@Override
public void importData(@NotNull Collection<DataNode<ProjectImportEventMessage>> toImport,
        @NotNull final Project project, boolean synchronous) {
    final ExternalSystemIdeNotificationManager notificationManager = ServiceManager
            .getService(ExternalSystemIdeNotificationManager.class);
    if (notificationManager == null) {
        return;//from  w  w w .ja v a2s  .  c  o  m
    }

    Multimap<String, String> messagesByCategory = ArrayListMultimap.create();
    for (DataNode<ProjectImportEventMessage> node : toImport) {
        ProjectImportEventMessage message = node.getData();
        String category = message.getCategory();
        messagesByCategory.put(category, message.getText());
        LOG.info(message.toString());
    }
    final StringBuilder builder = new StringBuilder();
    builder.append("<html>");
    for (String category : messagesByCategory.keySet()) {
        Collection<String> messages = messagesByCategory.get(category);
        if (category.isEmpty()) {
            Joiner.on("<br>").join(messages);
        } else {
            // If the category is not an empty String, we show the category and each message as a list.
            builder.append(category).append("<ul>");
            for (String message : messages) {
                builder.append("<li>").append(message).append("</li>");
            }
            builder.append("</ul>");
        }
    }
    builder.append("</html>");

    ApplicationManager.getApplication().invokeLater(new Runnable() {
        @Override
        public void run() {
            String title = "Unexpected events:";
            String messageToShow = builder.toString();
            notificationManager.showNotification(title, messageToShow, NotificationType.ERROR, project,
                    GradleConstants.SYSTEM_ID, null);
        }
    });
}

From source file:com.streamsets.pipeline.stage.cloudstorage.destination.GoogleCloudStorageTarget.java

@Override
public void write(Batch batch) throws StageException {
    String pathExpression = GcsUtil.normalizePrefix(gcsTargetConfig.commonPrefix)
            + gcsTargetConfig.partitionTemplate;
    if (gcsTargetConfig.dataFormat == DataFormat.WHOLE_FILE) {
        handleWholeFileFormat(batch, elVars);
    } else {/*from   w  w w . ja  v a  2s  . co m*/
        Multimap<String, Record> pathToRecordMap = ELUtils.partitionBatchByExpression(partitionEval, elVars,
                pathExpression, timeDriverElEval, elVars, gcsTargetConfig.timeDriverTemplate,
                Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(gcsTargetConfig.timeZoneID))), batch);

        pathToRecordMap.keySet().forEach(path -> {
            Collection<Record> records = pathToRecordMap.get(path);
            String fileName = GcsUtil.normalizePrefix(path) + gcsTargetConfig.fileNamePrefix + '_'
                    + UUID.randomUUID();
            if (StringUtils.isNotEmpty(gcsTargetConfig.fileNameSuffix)) {
                fileName = fileName + "." + gcsTargetConfig.fileNameSuffix;
            }
            try {
                ByteArrayOutputStream bOut = new ByteArrayOutputStream();
                OutputStream os = bOut;
                if (gcsTargetConfig.compress) {
                    fileName = fileName + ".gz";
                    os = new GZIPOutputStream(bOut);
                }
                BlobId blobId = BlobId.of(gcsTargetConfig.bucketTemplate, fileName);
                BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(getContentType()).build();
                final AtomicInteger recordsWithoutErrors = new AtomicInteger(0);
                try (DataGenerator dg = gcsTargetConfig.dataGeneratorFormatConfig.getDataGeneratorFactory()
                        .getGenerator(os)) {
                    records.forEach(record -> {
                        try {
                            dg.write(record);
                            recordsWithoutErrors.incrementAndGet();
                        } catch (DataGeneratorException | IOException e) {
                            LOG.error("Error writing record {}. Reason {}", record.getHeader().getSourceId(),
                                    e);
                            getContext().toError(record, Errors.GCS_02, record.getHeader().getSourceId(), e);
                        }
                    });
                } catch (IOException e) {
                    LOG.error("Error happened when creating Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }

                try {
                    if (recordsWithoutErrors.get() > 0) {
                        Blob blob = storage.create(blobInfo, bOut.toByteArray());
                        GCSEvents.GCS_OBJECT_WRITTEN.create(getContext())
                                .with(GCSEvents.BUCKET, blob.getBucket())
                                .with(GCSEvents.OBJECT_KEY, blob.getName())
                                .with(GCSEvents.RECORD_COUNT, recordsWithoutErrors.longValue()).createAndSend();
                    }
                } catch (StorageException e) {
                    LOG.error("Error happened when writing to Output stream. Reason {}", e);
                    records.forEach(record -> getContext().toError(record, e));
                }
            } catch (IOException e) {
                LOG.error("Error happened when creating Output stream. Reason {}", e);
                records.forEach(record -> getContext().toError(record, e));
            }
        });
    }
}