Example usage for com.google.common.collect Maps newTreeMap

List of usage examples for com.google.common.collect Maps newTreeMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newTreeMap.

Prototype

public static <K extends Comparable, V> TreeMap<K, V> newTreeMap() 

Source Link

Document

Creates a mutable, empty TreeMap instance using the natural ordering of its elements.

Usage

From source file:org.eclipse.viatra.query.runtime.registry.impl.AbstractRegistryView.java

/**
 * Creates a new view instance for the given registry. Note that views are created by the registry and the view
 * update mechanisms are also set up by the registry.
 * //  w  ww. j  a  va2  s.c om
 * @param registry
 */
public AbstractRegistryView(IQuerySpecificationRegistry registry) {
    this.registry = registry;
    this.fqnToEntryMap = Maps.newTreeMap();
    this.listeners = Sets.newHashSet();
}

From source file:org.gradle.api.reporting.components.internal.AbstractBinaryRenderer.java

protected void renderVariants(T binary, TextReportBuilder builder) {
    ModelSchema<?> schema = schemaStore.getSchema(((BinarySpecInternal) binary).getPublicType());
    if (!(schema instanceof ModelStructSchema)) {
        return;/*from  ww w.  ja  v  a  2s .  c  o  m*/
    }
    Map<String, Object> variants = Maps.newTreeMap();
    VariantAspect variantAspect = ((ModelStructSchema<?>) schema).getAspect(VariantAspect.class);
    if (variantAspect != null) {
        for (ModelProperty<?> property : variantAspect.getDimensions()) {
            variants.put(property.getName(), property.getPropertyValue(binary));
        }
    }

    for (Map.Entry<String, Object> variant : variants.entrySet()) {
        String variantName = variant.getKey();
        builder.item(variantName, RendererUtils.displayValueOf(variant.getValue()));
    }
}

From source file:z.tool.util.CollectionUtil.java

public static <E> void checkDeadLock(List<? extends IParent<E>> sourceList, IParent<E> tobeCheck) {
    if (tobeCheck.getParentId() < 1 || tobeCheck.getId() < 1) {
        // ?//from www  .j  a v a  2 s  .c om
        return;
    }

    Checker.checkLogic(tobeCheck.getId() != tobeCheck.getParentId(),
            "(??)");

    Map<Long, IParent<E>> tMap = Maps.newTreeMap();
    for (IParent<E> a : sourceList) {
        tMap.put(a.getId(), a);
    }

    IParent<E> me = tMap.get(tobeCheck.getId());
    Checker.checkLogic(null != me, "(??)");

    for (IParent<E> r : tMap.values()) {
        @SuppressWarnings("unchecked")
        E e = (E) r;
        if (r.getParentId() > 0) {
            tMap.get(r.getParentId()).addChild(e);
        }
    }

    if (ZUtils.isEmpty(me.getChildren())) {
        // ??
        return;
    }

    Set<Long> allSubIds = Sets.newHashSet();
    Queue<IParent<E>> queue = new LinkedList<IParent<E>>();
    queue.add(me);
    do {
        IParent<E> elem = queue.poll();
        if (!ZUtils.isEmpty(elem.getChildren())) {
            queue.addAll(elem.getChildren());
            for (IParent<E> e : elem.getChildren()) {
                allSubIds.add(e.getId());
            }
        }
    } while (!queue.isEmpty());

    Checker.checkLogic(!allSubIds.contains(tobeCheck.getParentId()), "()");
}

From source file:com.google.template.soy.msgs.restricted.RenderOnlySoyMsgBundleImpl.java

/**
 * Constructs a map of render-only soy messages. This implementation saves memory but doesn't
 * store all fields necessary during extraction.
 *
 * @param localeString The language/locale string of this bundle of messages, or null if unknown.
 *     Should only be null for bundles newly extracted from source files. Should always be set
 *     for bundles parsed from message files/resources.
 * @param msgs The list of messages. List order will become the iteration order. Duplicate
 *     message ID's are not permitted./* ww  w.j  av a2s. c o m*/
 */
public RenderOnlySoyMsgBundleImpl(@Nullable String localeString, Iterable<SoyMsg> msgs) {

    this.localeString = localeString;

    // First, build a sorted map from message ID to the message representation.
    SortedMap<Long, Object> partsMap = Maps.newTreeMap();
    for (SoyMsg msg : msgs) {
        checkArgument(Objects.equals(msg.getLocaleString(), localeString));
        checkArgument(msg.getAltId() < 0, "RenderOnlySoyMsgBundleImpl doesn't support alternate ID's.");
        long msgId = msg.getId();
        checkArgument(!partsMap.containsKey(msgId),
                "Duplicate messages are not permitted in the render-only impl.");

        List<SoyMsgPart> parts = msg.getParts();
        checkArgument(MsgPartUtils.hasPlrselPart(parts) == msg.isPlrselMsg(),
                "Message's plural/select status is inconsistent -- internal compiler bug.");
        // Save memory: don't store the list if there's only one item.
        if (parts.size() == 1) {
            partsMap.put(msgId, parts.get(0));
        } else {
            partsMap.put(msgId, ImmutableList.copyOf(parts));
        }
    }

    // Using parallel long[] and Object[] arrays saves memory versus using a Map, because it avoids
    // having to wrap the longs in a new Long(), and avoids wrapping the key/value pair in an
    // Entry. Also, using a sorted array utilizes memory better, since unlike a hash table, you
    // need neither a linked list nor empty spaces in the hash table.
    idArray = new long[partsMap.size()];
    valueArray = new Object[partsMap.size()];

    // Build the arrays in the same order as the sorted map. Note we can't use toArray() since it
    // won't create a primitive long[] (only Long wrappers).
    int index = 0;
    for (Map.Entry<Long, Object> entry : partsMap.entrySet()) {
        idArray[index] = entry.getKey();
        valueArray[index] = entry.getValue();
        index++;
    }
    checkState(index == partsMap.size());
}

From source file:com.github.fge.jsonschema.syntax.SyntaxProcessor.java

private void validate(final ProcessingReport report, final SchemaTree tree) throws ProcessingException {
    final JsonNode node = tree.getNode();
    final NodeType type = NodeType.getNodeType(node);

    /*//w w  w . j a  v  a  2 s  .  co m
     * Barf if not an object, and don't even try to go any further
     */
    if (type != NodeType.OBJECT) {
        report.error(newMsg(tree, "core.notASchema").putArgument("found", type));
        return;
    }

    /*
     * Grab all checkers and object member names. Retain in checkers only
     * existing keywords, and remove from the member names set what is in
     * the checkers' key set: if non empty, some keywords are missing,
     * report them.
     */
    final Map<String, SyntaxChecker> map = Maps.newTreeMap();
    map.putAll(checkers);

    final Set<String> fields = Sets.newHashSet(node.fieldNames());
    map.keySet().retainAll(fields);
    fields.removeAll(map.keySet());

    if (!fields.isEmpty())
        report.warn(newMsg(tree, "core.unknownKeywords").putArgument("ignored",
                Ordering.natural().sortedCopy(fields)));

    /*
     * Now, check syntax of each keyword, and collect pointers for further
     * analysis.
     */
    final List<JsonPointer> pointers = Lists.newArrayList();
    for (final SyntaxChecker checker : map.values())
        checker.checkSyntax(pointers, bundle, report, tree);

    /*
     * Operate on these pointers.
     */
    for (final JsonPointer pointer : pointers)
        validate(report, tree.append(pointer));
}

From source file:org.eclipse.wb.internal.ercp.wizards.project.rcp.NewProjectCreationOperation.java

private void createPDEProject(final IProgressMonitor monitor) throws CoreException, InvocationTargetException {
    addNature("org.eclipse.pde.PluginNature", monitor);
    // add PDE container
    addClassPathEntry(JavaCore.newContainerEntry(new Path("org.eclipse.pde.core.requiredPlugins")), monitor);
    // add template files
    ExecutionUtils.runRethrow(new RunnableEx() {
        public void run() throws Exception {
            // fill values map
            String packageName;/*from   www .j av a2s.  c o m*/
            Map<String, String> valueMap = Maps.newTreeMap();
            {
                valueMap.put("projectName", m_projectName);
                {
                    String bundleName = m_projectName.replace(' ', '_');
                    valueMap.put("bundleName", bundleName + " Plug-in");
                    valueMap.put("bundleSymbolicName", bundleName);
                }
                {
                    packageName = m_projectName.toLowerCase().replace(' ', '.');
                    valueMap.put("packageName", packageName);
                }
            }
            // create MANIFEST.MF file
            {
                IFile manifestFile = createTemplateFile("empty/MANIFEST.MF", valueMap, "META-INF",
                        "MANIFEST.MF", monitor);
                scheduleOpen(manifestFile);
            }
            // create plugin.xml
            createTemplateFile("empty/plugin.xml", valueMap, null, "plugin.xml", monitor);
            // create Activator.java
            createTemplateUnit("empty/Activator.jav", valueMap, packageName, "Activator.java", monitor);
            // generate sample elements
            if (m_generateSample) {
                createTemplateFile("sample/plugin.xml", valueMap, null, "plugin.xml", monitor);
                createTemplateUnit("sample/MyViewPart.jav", valueMap, packageName + ".views", "MyViewPart.java",
                        monitor);
                createTemplateUnit("sample/PreferencePage_1.jav", valueMap, packageName + ".preferences",
                        "PreferencePage_1.java", monitor);
                createTemplateUnit("sample/PreferencePage_2.jav", valueMap, packageName + ".preferences",
                        "PreferencePage_2.java", monitor);
            }
        }
    });
}

From source file:com.netflix.aegisthus.mapred.reduce.CassReducer.java

public void reduce(Text key, Iterable<Text> values, Context ctx) throws IOException, InterruptedException {
    Map<String, Map<String, Object>> mergedData = Maps.newHashMap();
    Set<String> masterSetColumnNames = Sets.newHashSet();
    String referenceToken = null;

    Long deletedAt = Long.MIN_VALUE;
    // If we only have one distinct value we don't need to process
    // differences, which is slow and should be avoided.
    valuesSet.clear();// ww  w . j  av a2  s.  c o m
    for (Text value : values) {
        valuesSet.add(new Text(value));
    }
    if (valuesSet.size() == 1) {
        ctx.write(key, new Text("0"));
        return;
    }

    for (Text val : valuesSet) {
        String[] rowVals = val.toString().split(AegisthusSerializer.VAL_DELIMITER);
        String token = rowVals[1];
        System.out.println("Token : " + token);

        if (referenceToken == null) {
            referenceToken = token;
        }

        Map<String, Object> map = as.deserialize(rowVals[0]);
        // The json has one key value pair, the data is always under the
        // first key so do it once to save lookup
        map.remove(AegisthusSerializer.KEY);
        Long curDeletedAt = (Long) map.remove(AegisthusSerializer.DELETEDAT);

        Map<String, Object> columns = mergedData.get(token);
        if (columns == null) {
            columns = Maps.newTreeMap();
            mergedData.put(token, columns);
            columns.put(AegisthusSerializer.DELETEDAT, Long.MIN_VALUE);
            columns.putAll(map);
        }

        deletedAt = (Long) columns.get(AegisthusSerializer.DELETEDAT);

        if (curDeletedAt > deletedAt) {
            deletedAt = curDeletedAt;
            columns.put(AegisthusSerializer.DELETEDAT, deletedAt);
        }

        Set<String> columnNames = Sets.newHashSet();
        columnNames.addAll(map.keySet());
        columnNames.addAll(columns.keySet());
        masterSetColumnNames.addAll(map.keySet());

        for (String columnName : columnNames) {
            boolean oldKey = columns.containsKey(columnName);
            boolean newKey = map.containsKey(columnName);
            if (oldKey && newKey) {
                if (getTimestamp(map, columnName) > getTimestamp(columns, columnName)) {
                    columns.put(columnName, map.get(columnName));
                }
            } else if (newKey) {
                columns.put(columnName, map.get(columnName));
            }
        }

        // When cassandra compacts it removes columns that are in deleted rows
        // that are older than the deleted timestamp.
        // we will duplicate this behavior. If the etl needs this data at some
        // point we can change, but it is only available assuming
        // cassandra hasn't discarded it.
        List<String> delete = Lists.newArrayList();
        for (Map.Entry<String, Object> e : columns.entrySet()) {
            if (e.getValue() instanceof Long) {
                continue;
            }

            @SuppressWarnings("unchecked")
            Long ts = (Long) ((List<Object>) e.getValue()).get(2);
            if (ts < deletedAt) {
                delete.add(e.getKey());
            }
        }

        for (String k : delete) {
            columns.remove(k);
        }

    }

    //checking on data on all nodes
    Iterator<String> columnNameIter = masterSetColumnNames.iterator();
    Map<String, Object> referenceRow = mergedData.get(referenceToken);
    while (columnNameIter.hasNext()) {
        String colName = columnNameIter.next();

        //reference row does not have such column name
        if (!referenceRow.containsKey(colName)) {
            ctx.write(key, new Text("1"));
            return;
        }

        Object referenceCol = referenceRow.get(colName);

        //for each node or token
        for (Map.Entry<String, Map<String, Object>> e : mergedData.entrySet()) {
            Map<String, Object> targetRow = e.getValue();

            //target row does not have such column name
            if (!targetRow.containsKey(colName)) {
                ctx.write(key, new Text("1"));
                return;
            }

            //reference col value is not equal to target col val
            if (!referenceCol.equals(targetRow.get(colName))) {
                ctx.write(key, new Text("1"));
                return;
            }
        }
    }

    ctx.write(key, new Text("0"));

}

From source file:siftscience.kafka.tools.KafkaAssignmentStrategy.java

private static SortedMap<Integer, Node> createNodeMap(Map<Integer, String> nodeRackAssignment,
        Set<Integer> nodes, int maxReplicas) {
    // Create empty nodes with the specified capacity. Also create rack objects so that node
    // assignment can also look at the rack.
    Map<String, Rack> rackMap = Maps.newTreeMap();
    SortedMap<Integer, Node> nodeMap = Maps.newTreeMap();
    for (Integer nodeId : nodes) {
        Preconditions.checkState(!nodeMap.containsKey(nodeId));
        String rackId = nodeRackAssignment.get(nodeId);
        if (rackId == null) {
            // Use the node id as the rack id if there isn't a rack for the node. This allows
            // this algorithm to work correctly even if we don't care about rack awareness.
            rackId = nodeId.toString();/*  w ww. ja v  a2 s  .c  o  m*/
        }

        // Reuse the rack object if we've seen a node on this rack before so that we can track
        // assignments to a rack together.
        Rack rack = rackMap.get(rackId);
        if (rack == null) {
            rack = new Rack(rackId);
            rackMap.put(rackId, rack);
        }
        Node node = new Node(nodeId, maxReplicas, rack);
        nodeMap.put(nodeId, node);
    }
    return nodeMap;
}

From source file:de.tuberlin.uebb.jdae.llmsl.Block.java

public Block(double[][] data, DataLayout layout, Set<GlobalVariable> variables, Set<DerivedEquation> equations,
        SimulationOptions options) {//from w  w  w.j  av  a  2  s . co  m

    this.options = options;
    this.variables = variables.toArray(new GlobalVariable[variables.size()]);
    Arrays.sort(this.variables, Ordering.natural());

    this.view = new ExecutionContext(0, this.variables, data);

    final Map<Integer, Integer> gvIndex = Maps.newTreeMap();

    for (int i = 0; i < this.variables.length; i++) {
        if (!gvIndex.containsKey(this.variables[i].index))
            gvIndex.put(this.variables[i].index, i);
    }
    final Map<GlobalVariable, BlockVariable> blockVars = makeBlockVars(layout, equations, gvIndex);

    this.views = new ExecutionContext[equations.size()];
    this.equations = new Residual[equations.size()];
    this.residuals = new TDNumber[equations.size()];

    int index = 0;
    for (DerivedEquation e : equations) {
        this.equations[index] = new Residual(e.eqn.bind(blockVars), e.minOrder, e.maxOrder);
        views[index++] = view.derived(e.maxOrder);
    }

    jacobianMatrix = new DenseMatrix64F(this.variables.length, this.variables.length);
    residual = new DenseMatrix64F(this.variables.length, 1);
    x = new DenseMatrix64F(this.variables.length);
    solver = LinearSolverFactory.linear(this.variables.length);
    this.point = new double[this.variables.length];
}

From source file:org.eclipse.wb.internal.ercp.wizards.project.swt.NewProjectCreationOperation.java

@Override
protected void execute(final IProgressMonitor monitor)
        throws CoreException, InvocationTargetException, InterruptedException {
    // create Java project
    super.execute(monitor);
    // add template files
    ExecutionUtils.runRethrow(new RunnableEx() {
        public void run() throws Exception {
            // add jar's
            for (String absoluteJar : getAbsoluteJars(m_ercpLocation)) {
                addClassPathEntry(JavaCore.newLibraryEntry(new Path(absoluteJar), null, null), monitor);
            }//from  w  w  w .  j av a2s.  c  o m
            // fill values map
            String packageName;
            String typeName;
            Map<String, String> valueMap = Maps.newTreeMap();
            {
                valueMap.put("projectName", m_projectName);
                valueMap.put("packageName", packageName = "com.test");
                valueMap.put("typeName", typeName = "SampleApplication");
            }
            // generate sample elements
            if (m_generateSample) {
                {
                    String unitName = typeName + ".java";
                    ICompilationUnit unit = createTemplateUnit("sample/Application.jav", valueMap, packageName,
                            unitName, monitor);
                    scheduleOpen((IFile) unit.getUnderlyingResource());
                }
            }
        }
    });
}