Example usage for com.google.common.collect Maps newTreeMap

List of usage examples for com.google.common.collect Maps newTreeMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newTreeMap.

Prototype

public static <K extends Comparable, V> TreeMap<K, V> newTreeMap() 

Source Link

Document

Creates a mutable, empty TreeMap instance using the natural ordering of its elements.

Usage

From source file:com.zimbra.soap.util.WsdlGenerator.java

public static Document makeWsdlDoc(List<WsdlInfoForNamespace> nsInfos, String serviceName,
        String targetNamespace) {
    Namespace nsSvc = new Namespace(svcPrefix, targetNamespace);

    final QName svcTypes = QName.get("types", nsWsdl);

    Document document = DocumentHelper.createDocument();
    Map<WsdlServiceInfo, Element> bindElems = Maps.newTreeMap();
    Map<WsdlServiceInfo, Element> portTypeElems = Maps.newTreeMap();
    Element root = document.addElement(QName.get("definitions", nsWsdl));
    root.add(nsSvc);// ww w . ja v  a 2  s  .c o  m
    for (WsdlInfoForNamespace wsdlNsInfo : nsInfos) {
        root.add(wsdlNsInfo.getXsdNamespace());
    }
    root.add(nsZimbra);
    root.add(nsSoap);
    root.add(nsXsd);
    root.add(nsWsdl);
    root.addAttribute("targetNamespace", targetNamespace);
    root.addAttribute("name", serviceName);
    addWsdlTypesElement(root, svcTypes, nsInfos);

    for (WsdlInfoForNamespace wsdlNsInfo : nsInfos) {
        WsdlServiceInfo svcInfo = wsdlNsInfo.getSvcInfo();
        if (!portTypeElems.containsKey(svcInfo)) {
            // wsdl:definitions/wsdl:portType
            Element portTypeElem = DocumentHelper.createElement(portType);
            portTypeElem.addAttribute("name", svcInfo.getPortTypeName());
            portTypeElems.put(svcInfo, portTypeElem);
        }
        if (!bindElems.containsKey(svcInfo)) {
            // wsdl:definitions/wsdl:binding
            Element bindingElem = DocumentHelper.createElement(wsdlBinding);
            bindingElem.addAttribute("name", svcInfo.getBindingName());
            bindingElem.addAttribute("type", svcPrefix + ":" + svcInfo.getPortTypeName());
            // wsdl:definitions/wsdl:binding/soap:binding
            Element soapBindElem = bindingElem.addElement(soapBinding);
            soapBindElem.addAttribute("transport", "http://schemas.xmlsoap.org/soap/http");
            soapBindElem.addAttribute("style", "document");

            bindElems.put(svcInfo, bindingElem);
        }
    }

    for (WsdlInfoForNamespace wsdlNsInfo : nsInfos) {
        WsdlServiceInfo svcInfo = wsdlNsInfo.getSvcInfo();
        for (String requestName : wsdlNsInfo.getRequests()) {
            String rootName = requestName.substring(0, requestName.length() - 7);
            String responseName = rootName + "Response";
            String reqOpName = requestName.substring(0, 1).toLowerCase() + requestName.substring(1);
            String reqMsgName = wsdlNsInfo.getTag() + requestName + "Message";
            String respMsgName = wsdlNsInfo.getTag() + responseName + "Message";

            addWsdlRequestAndResponseMessageElements(root, wsdlNsInfo, reqMsgName, respMsgName, requestName,
                    responseName);

            addWsdlPortTypeOperationElements(portTypeElems.get(svcInfo), reqMsgName, respMsgName, reqOpName);

            addWsdlBindingOperationElements(bindElems.get(svcInfo), wsdlNsInfo, reqOpName, rootName);
        }
    }
    addWsdlSoapHdrContextMessageElement(root);

    for (Entry<WsdlServiceInfo, Element> entry : portTypeElems.entrySet()) {
        root.add(entry.getValue());
    }

    for (Entry<WsdlServiceInfo, Element> entry : bindElems.entrySet()) {
        root.add(entry.getValue());
    }

    Set<WsdlServiceInfo> svcSet = Sets.newHashSet();
    for (WsdlInfoForNamespace wsdlNsInfo : nsInfos) {
        WsdlServiceInfo svcInfo = wsdlNsInfo.getSvcInfo();
        if (!svcSet.contains(svcInfo)) {
            svcSet.add(svcInfo);
            addWsdlServiceElement(root, svcInfo);
        }
    }
    return document;
}

From source file:de.monticore.generating.templateengine.reporting.reporter.NodeTypesReporter.java

/**
 * @param nodeTypeCount2: Map contains all ASTNodes
 * @param nodetypeCountPos2: Map contains ASTNodes with a source position
 * @return Map containing ASTNodes without source position
 *///from  w w w . j a v a2s .  com
private Map<String, Integer> getMapDiff(Map<String, Integer> nodeTypeCount2,
        Map<String, Integer> nodetypeCountPos2) {
    Map<String, Integer> dif = Maps.newTreeMap();
    // merging keys of objects and visits
    Set<String> allKeys = new TreeSet<String>();
    allKeys.addAll(nodetypeCountPos2.keySet());
    allKeys.addAll(nodeTypeCount2.keySet());
    for (String key : allKeys) {
        int val1, val2;
        if (nodeTypeCount2.containsKey(key)) {
            val1 = nodeTypeCount2.get(key);
        } else {
            val1 = 0;
        }
        if (nodetypeCountPos2.containsKey(key)) {
            val2 = nodetypeCountPos2.get(key);
        } else {
            val2 = 0;
        }
        dif.put(key, val1 - val2);
    }
    return dif;
}

From source file:co.cask.cdap.gateway.collector.FlumeAdapter.java

@Override
public void append(AvroFlumeEvent event, org.apache.avro.ipc.Callback<Status> callback) throws IOException {
    try {/*from w  ww  . ja  v a 2 s.c om*/
        // Discover the stream endpoint
        Discoverable endpoint = endpointStrategy.pick();
        if (endpoint == null) {
            callback.handleError(
                    new IllegalStateException("No stream endpoint available. Unable to write to stream."));
            return;
        }

        // Figure out the stream name
        Map<String, String> headers = Maps.newTreeMap();
        String streamName = createHeaders(event, headers);

        // Forward the request
        URL url = new URL(String.format("http://%s:%d/v2/streams/%s", endpoint.getSocketAddress().getHostName(),
                endpoint.getSocketAddress().getPort(), streamName));
        HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
        try {
            urlConn.setDoOutput(true);

            // Set headers
            for (Map.Entry<String, String> entry : headers.entrySet()) {
                String key = entry.getKey();
                if (!Constants.Gateway.API_KEY.equals(key)
                        && !Constants.Gateway.HEADER_DESTINATION_STREAM.equals(key)) {
                    key = streamName + "." + key;
                }
                urlConn.setRequestProperty(key, entry.getValue());
            }

            // Write body
            WritableByteChannel output = Channels.newChannel(urlConn.getOutputStream());
            try {
                ByteBuffer body = event.getBody().duplicate();
                while (body.hasRemaining()) {
                    output.write(body);
                }
            } finally {
                output.close();
            }

            // Verify response
            int responseCode = urlConn.getResponseCode();
            Preconditions.checkState(responseCode == HttpURLConnection.HTTP_OK, "Status != 200 OK (%s)",
                    responseCode);

            callback.handleResult(Status.OK);
        } finally {
            urlConn.disconnect();
        }

    } catch (Exception e) {
        LOG.error("Error consuming single event", e);
        callback.handleError(e);
    }
}

From source file:nl.knaw.huygens.timbuctoo.model.ckcc.CKCCPerson.java

@Override
public Map<String, String> createRelSearchRep(Map<String, String> mappedIndexInformation) {
    Map<String, String> filteredMap = Maps.newTreeMap();
    addValueToMap(mappedIndexInformation, filteredMap, "urn");
    addValueToMap(mappedIndexInformation, filteredMap, "cen");
    addValueToMap(mappedIndexInformation, filteredMap, "notes");

    return filteredMap;
}

From source file:de.blizzy.documentr.web.system.SystemController.java

private SortedMap<String, SortedMap<String, String>> getMacroSettingsFromSystemSettings() {
    Set<IMacroDescriptor> descriptors = macroFactory.getDescriptors();
    SortedMap<String, SortedMap<String, String>> allMacroSettings = Maps.newTreeMap();
    for (IMacroDescriptor descriptor : descriptors) {
        Set<MacroSetting> settingDescriptors = descriptor.getSettings();
        if (!settingDescriptors.isEmpty()) {
            SortedMap<String, String> macroSettings = Maps.newTreeMap();
            String macroName = descriptor.getMacroName();
            for (MacroSetting settingDescriptor : settingDescriptors) {
                String key = settingDescriptor.value();
                String value = StringUtils.defaultString(systemSettingsStore.getMacroSetting(macroName, key));
                macroSettings.put(key, value);
            }//from  w  w w.  j  ava  2s  .c  o m
            allMacroSettings.put(macroName, macroSettings);
        }
    }
    return allMacroSettings;
}

From source file:org.sonar.duplications.algorithm.AbstractAdvancedCloneReporter.java

protected List<ClonePair> reportClonePairs(FileBlockGroup fileBlockGroup) {
    SortedSet<Block> resourceBlocks = fileBlockGroup.getBlockList();
    List<List<Block>> sameHashBlockGroups = getIndexedBlockGroups(fileBlockGroup);
    //an empty list is needed a the end to report clone at the end of file
    sameHashBlockGroups.add(new ArrayList<Block>());
    Map<String, Map<CloneKey, ClonePair>> prevActiveChains = Maps.newLinkedHashMap();
    List<ClonePair> reportedPairs = Lists.newArrayList();

    Iterator<Block> blockIterator = resourceBlocks.iterator();
    for (List<Block> blockGroup : sameHashBlockGroups) {
        Map<String, Map<CloneKey, ClonePair>> nextActiveChains = Maps.newLinkedHashMap();

        Block origBlock = null;/*from   w w w  .j a  v a  2  s. com*/
        if (blockIterator.hasNext()) {
            origBlock = blockIterator.next();
        }
        for (Block block : blockGroup) {
            String otherResourceId = block.getResourceId();
            Map<CloneKey, ClonePair> nextActiveMap = nextActiveChains.get(otherResourceId);
            if (nextActiveMap == null) {
                nextActiveMap = Maps.newTreeMap();
                nextActiveChains.put(otherResourceId, nextActiveMap);
            }
            Map<CloneKey, ClonePair> prevActiveMap = prevActiveChains.get(otherResourceId);
            if (prevActiveMap == null) {
                prevActiveMap = Maps.newTreeMap();
                prevActiveChains.put(otherResourceId, prevActiveMap);
            }

            processBlock(prevActiveMap, nextActiveMap, origBlock, block);
        }
        for (Map<CloneKey, ClonePair> prevActiveMap : prevActiveChains.values()) {
            reportedPairs.addAll(prevActiveMap.values());
        }
        prevActiveChains = nextActiveChains;
    }

    return reportedPairs;
}

From source file:org.jclouds.s3.blobstore.strategy.internal.SequentialMultipartUploadStrategy.java

@Override
public String execute(String container, Blob blob) {
    String key = blob.getMetadata().getName();
    ContentMetadata metadata = blob.getMetadata().getContentMetadata();
    Payload payload = blob.getPayload();
    Long length = payload.getContentMetadata().getContentLength();
    checkNotNull(length,//from   w  ww.  j  a va2  s . co m
            "please invoke payload.getContentMetadata().setContentLength(length) prior to multipart upload");
    long chunkSize = algorithm.calculateChunkSize(length);
    int partCount = algorithm.getParts();
    if (partCount > 0) {
        ObjectMetadataBuilder builder = ObjectMetadataBuilder.create().key(key)
                .contentType(metadata.getContentType()).contentDisposition(metadata.getContentDisposition())
                .contentEncoding(metadata.getContentEncoding()).contentLanguage(metadata.getContentLanguage())
                .userMetadata(blob.getMetadata().getUserMetadata());
        String uploadId = client.initiateMultipartUpload(container, builder.build());
        try {
            SortedMap<Integer, String> etags = Maps.newTreeMap();
            for (Payload part : slicer.slice(payload, chunkSize)) {
                int partNum = algorithm.getNextPart();
                prepareUploadPart(container, key, uploadId, partNum, part, algorithm.getNextChunkOffset(),
                        etags);
            }
            return client.completeMultipartUpload(container, key, uploadId, etags);
        } catch (RuntimeException ex) {
            client.abortMultipartUpload(container, key, uploadId);
            throw ex;
        }
    } else {
        // TODO: find a way to disable multipart. if we pass the original
        // options, it goes into a stack overflow
        return client.putObject(container, blobToObject.apply(blob));
    }
}

From source file:cuchaz.m3l.util.transformation.BytecodeTools.java

public static Bytecode copyBytecodeToConstPool(ConstPool dest, Bytecode bytecode) throws BadBytecode {

    // get the entries this bytecode needs from the const pool
    Set<Integer> indices = Sets.newTreeSet();
    ConstPoolEditor editor = new ConstPoolEditor(bytecode.getConstPool());
    BytecodeIndexIterator iterator = new BytecodeIndexIterator(bytecode);
    for (BytecodeIndexIterator.Index index : iterator.indices()) {
        assert (index.isValid(bytecode));
        InfoType.gatherIndexTree(indices, editor, index.getIndex());
    }// ww w .  ja  v  a  2s . c o  m

    Map<Integer, Integer> indexMap = Maps.newTreeMap();

    ConstPool src = bytecode.getConstPool();
    ConstPoolEditor editorSrc = new ConstPoolEditor(src);
    ConstPoolEditor editorDest = new ConstPoolEditor(dest);

    // copy entries over in order of level so the index mapping is easier
    for (InfoType type : InfoType.getSortedByLevel()) {
        for (int index : indices) {
            ConstInfoAccessor entry = editorSrc.getItem(index);

            // skip entries that aren't this type
            if (entry.getType() != type) {
                continue;
            }

            // make sure the source entry is valid before we copy it
            assert (type.subIndicesAreValid(entry, editorSrc));
            assert (type.selfIndexIsValid(entry, editorSrc));

            // make a copy of the entry so we can modify it safely
            ConstInfoAccessor entryCopy = editorSrc.getItem(index).copy();
            assert (type.subIndicesAreValid(entryCopy, editorSrc));
            assert (type.selfIndexIsValid(entryCopy, editorSrc));

            // remap the indices
            type.remapIndices(indexMap, entryCopy);
            assert (type.subIndicesAreValid(entryCopy, editorDest));

            // put the copy in the destination pool
            int newIndex = editorDest.addItem(entryCopy.getItem());
            entryCopy.setIndex(newIndex);
            assert (type.selfIndexIsValid(entryCopy, editorDest)) : type + ", self: " + entryCopy + " dest: "
                    + editorDest.getItem(entryCopy.getIndex());

            // make sure the source entry is unchanged
            assert (type.subIndicesAreValid(entry, editorSrc));
            assert (type.selfIndexIsValid(entry, editorSrc));

            // add the index mapping so we can update the bytecode later
            if (indexMap.containsKey(index)) {
                throw new Error("Entry at index " + index + " already copied!");
            }
            indexMap.put(index, newIndex);
        }
    }

    // make a new bytecode
    Bytecode newBytecode = new Bytecode(dest, bytecode.getMaxStack(), bytecode.getMaxLocals());
    bytecode.setStackDepth(bytecode.getStackDepth());
    setBytecode(newBytecode, bytecode.get());
    setExceptionTable(newBytecode, bytecode.getExceptionTable());

    // apply the mappings to the bytecode
    BytecodeIndexIterator iter = new BytecodeIndexIterator(newBytecode);
    for (BytecodeIndexIterator.Index index : iter.indices()) {
        int oldIndex = index.getIndex();
        Integer newIndex = indexMap.get(oldIndex);
        if (newIndex != null) {
            // make sure this mapping makes sense
            InfoType typeSrc = editorSrc.getItem(oldIndex).getType();
            InfoType typeDest = editorDest.getItem(newIndex).getType();
            assert (typeSrc == typeDest);

            // apply the mapping
            index.setIndex(newIndex);
        }
    }
    iter.saveChangesToBytecode();

    // make sure all the indices are valid
    iter = new BytecodeIndexIterator(newBytecode);
    for (BytecodeIndexIterator.Index index : iter.indices()) {
        assert (index.isValid(newBytecode));
    }

    return newBytecode;
}

From source file:com.cloudera.hadoop.hdfs.nfs.metrics.MetricsAccumulator.java

private synchronized void publish() {
    Map<String, Long> values = Maps.newTreeMap();
    for (String key : mAdhocMetrics.keySet()) {
        AtomicLong counter = mAdhocMetrics.get(key);
        values.put(key, counter.getAndSet(0L));
    }/*w  w  w . j a  v  a2  s . c  o m*/
    Set<Metric> metricsWithDivisors = Sets.newHashSet();
    for (Metric key : mMetrics.keySet()) {
        AtomicLong counter = mMetrics.get(key);
        String name = key.name();
        values.put(name, counter.getAndSet(0L));
        if (key.hasDivisor()) {
            metricsWithDivisors.add(key);
        }
    }
    for (Metric key : metricsWithDivisors) {
        Long counter = values.get(key.name());
        Long divisor = values.get(key.getDivisor());
        if ((divisor != null) && (divisor > 0L)) {
            values.put(key.name() + "_AVG", (counter / divisor));
        }
    }
    mMetricPublisher.publish(values);
}

From source file:io.druid.query.metadata.SegmentAnalyzer.java

public Map<String, ColumnAnalysis> analyze(StorageAdapter adapter,
        EnumSet<SegmentMetadataQuery.AnalysisType> analysisTypes) {
    Preconditions.checkNotNull(adapter, "Adapter cannot be null");
    Map<String, ColumnAnalysis> columns = Maps.newTreeMap();
    List<String> columnNames = getStorageAdapterColumnNames(adapter);

    int numRows = adapter.getNumRows();
    for (String columnName : columnNames) {
        final ColumnCapabilities capabilities = adapter.getColumnCapabilities(columnName);
        final ColumnAnalysis analysis;

        /**/*from   www  .j a v a2s  . com*/
         * StorageAdapter doesn't provide a way to get column values, so size is
         * not calculated for STRING and COMPLEX columns.
         */
        ValueType capType = capabilities.getType();
        switch (capType) {
        case LONG:
            analysis = lengthBasedAnalysisForAdapter(analysisTypes, capType.name(), capabilities, numRows,
                    Longs.BYTES);
            break;
        case FLOAT:
            analysis = lengthBasedAnalysisForAdapter(analysisTypes, capType.name(), capabilities, numRows,
                    NUM_BYTES_IN_TEXT_FLOAT);
            break;
        case STRING:
            analysis = new ColumnAnalysis(capType.name(), 0,
                    analysisHasCardinality(analysisTypes) ? adapter.getDimensionCardinality(columnName) : 0,
                    null);
            break;
        case COMPLEX:
            analysis = new ColumnAnalysis(capType.name(), 0, null, null);
            break;
        default:
            log.warn("Unknown column type[%s].", capType);
            analysis = ColumnAnalysis.error(String.format("unknown_type_%s", capType));
        }

        columns.put(columnName, analysis);
    }

    columns.put(Column.TIME_COLUMN_NAME, lengthBasedAnalysisForAdapter(analysisTypes, ValueType.LONG.name(),
            null, numRows, NUM_BYTES_IN_TIMESTAMP));

    return columns;
}