Example usage for java.util LinkedHashSet contains

List of usage examples for java.util LinkedHashSet contains

Introduction

In this page you can find the example usage for java.util LinkedHashSet contains.

Prototype

boolean contains(Object o);

Source Link

Document

Returns true if this set contains the specified element.

Usage

From source file:com.redhat.rhn.taskomatic.task.DailySummary.java

/**
 * DO NOT CALL FROM OUTSIDE THIS CLASS. Renders the actions email message
 * @param actions list of recent actions
 * @return the actions email message//from w  w  w  .  jav a  2 s.com
 */
public String renderActionsMessage(List<ActionMessage> actions) {

    int longestActionLength = HEADER_SPACER;
    int longestStatusLength = 0;
    StringBuilder hdr = new StringBuilder();
    StringBuilder body = new StringBuilder();
    StringBuilder legend = new StringBuilder();
    StringBuilder msg = new StringBuilder();
    LinkedHashSet<String> statusSet = new LinkedHashSet();
    TreeMap<String, Map<String, Integer>> nonErrataActions = new TreeMap();
    TreeMap<String, Map<String, Integer>> errataActions = new TreeMap();
    TreeMap<String, String> errataSynopsis = new TreeMap();

    legend.append(LocalizationService.getInstance().getMessage("taskomatic.daily.errata"));
    legend.append("\n\n");

    for (ActionMessage am : actions) {

        if (!statusSet.contains(am.getStatus())) {
            statusSet.add(am.getStatus());
            if (am.getStatus().length() > longestStatusLength) {
                longestStatusLength = am.getStatus().length();
            }
        }

        if (am.getType().equals(ERRATA_UPDATE)) {
            String advisoryKey = ERRATA_INDENTION + am.getAdvisory();

            if (!errataActions.containsKey(advisoryKey)) {
                errataActions.put(advisoryKey, new HashMap());
                if (advisoryKey.length() + HEADER_SPACER > longestActionLength) {
                    longestActionLength = advisoryKey.length() + HEADER_SPACER;
                }
            }
            Map<String, Integer> counts = errataActions.get(advisoryKey);
            counts.put(am.getStatus(), am.getCount());

            if (am.getAdvisory() != null && !errataSynopsis.containsKey(am.getAdvisory())) {
                errataSynopsis.put(am.getAdvisory(), am.getSynopsis());
            }
        } else {
            if (!nonErrataActions.containsKey(am.getType())) {
                nonErrataActions.put(am.getType(), new HashMap());
                if (am.getType().length() + HEADER_SPACER > longestActionLength) {
                    longestActionLength = am.getType().length() + HEADER_SPACER;
                }
            }
            Map<String, Integer> counts = nonErrataActions.get(am.getType());
            counts.put(am.getStatus(), am.getCount());
        }

    }

    hdr.append(StringUtils.repeat(" ", longestActionLength));
    for (String status : statusSet) {
        hdr.append(status + StringUtils.repeat(" ", (longestStatusLength + ERRATA_SPACER) - status.length()));
    }

    if (!errataActions.isEmpty()) {
        body.append(ERRATA_UPDATE + ":" + "\n");
    }
    StringBuffer formattedErrataActions = renderActionTree(longestActionLength, longestStatusLength, statusSet,
            errataActions);
    body.append(formattedErrataActions);

    for (String advisory : errataSynopsis.keySet()) {
        legend.append(ERRATA_INDENTION + advisory + ERRATA_INDENTION + errataSynopsis.get(advisory) + "\n");
    }

    StringBuffer formattedNonErrataActions = renderActionTree(longestActionLength, longestStatusLength,
            statusSet, nonErrataActions);
    body.append(formattedNonErrataActions);

    // finally put all this together
    msg.append(hdr.toString());
    msg.append("\n");
    msg.append(body.toString());
    msg.append("\n\n");
    if (!errataSynopsis.isEmpty()) {
        msg.append(legend.toString());
    }
    return msg.toString();
}

From source file:com.alibaba.wasp.plan.parser.druid.DruidDDLParser.java

/**
 * Process Create Index Statement and generate Execute Plan
 * /*from   w  w w . j a  v  a  2 s .co  m*/
 */
private void getCreateIndexPlan(ParseContext context, WaspSqlCreateIndexStatement sqlCreateIndexStatement,
        MetaEventOperation metaEventOperation) throws IOException {

    // Index Name
    SQLName name = sqlCreateIndexStatement.getName();
    String indexName = parseName(name);
    metaEventOperation.isLegalIndexName(indexName);
    LOG.debug("Create Index SQL IndexName " + name);

    // Table Name
    SQLName table = sqlCreateIndexStatement.getTable();
    String tableName = parseName(table);
    LOG.debug("Create Index SQL TableName " + table);

    // check if table exists and get Table info
    FTable fTable = metaEventOperation.checkAndGetTable(tableName, true);

    // check the index not exists
    metaEventOperation.checkIndexNotExists(fTable, indexName);

    // Field
    List<SQLSelectOrderByItem> items = sqlCreateIndexStatement.getItems();
    LinkedHashSet<String> columns = new LinkedHashSet<String>(items.size());
    List<String> desc = new ArrayList<String>();
    for (SQLSelectOrderByItem item : items) {
        String columnName = parseName(item.getExpr());
        if (columns.contains(columnName)) {
            throw new UnsupportedException("Index have two same field '" + columnName + "'");
        } else {
            columns.add(columnName);
        }
        if (item.getType() == SQLOrderingSpecification.DESC) {
            desc.add(columnName);
        }
    }

    if (!metaEventOperation.isLegalDescFields(fTable, desc)) {
        throw new UnsupportedException("Currently we only support the ascending and descending time field.");
    }

    List<String> colList = new ArrayList<String>();
    colList.addAll(columns);
    if (metaEventOperation.arePrimaryKeys(fTable, colList)) {
        throw new UnsupportedException("Index keys is Primary Keys.");
    }
    if (metaEventOperation.containPrimaryKeys(fTable, colList)) {
        throw new UnsupportedException("Index keys contain all Primary Keys.");
    }

    LinkedHashMap<String, Field> indexKeys = metaEventOperation.checkAndGetFields(fTable, columns);
    // Check the indexKeys whether have Duplicate column name
    metaEventOperation.areLegalTableColumns(null, indexKeys.values());

    Index index = new Index(indexName, tableName, indexKeys);
    // Check if two index have the same columns and the same columns order
    metaEventOperation.checkTwoIndexWithSameColumn(fTable, index);

    index.setDesc(desc);
    index.setStoring(parse(sqlCreateIndexStatement.getStoringCols(), fTable.getColumns()));
    CreateIndexPlan createIndexPlan = new CreateIndexPlan(index);

    context.setPlan(createIndexPlan);
    LOG.debug("CreateIndexPlan " + createIndexPlan.toString());
}

From source file:com.espertech.esper.epl.spec.PatternStreamSpecRaw.java

private PatternStreamSpecCompiled compileInternal(StatementContext context, Set<String> eventTypeReferences,
        boolean isInsertInto, Collection<Integer> assignedTypeNumberStack, MatchEventSpec tags,
        Set<String> priorAllTags) throws ExprValidationException {
    if (tags == null) {
        tags = new MatchEventSpec();
    }/* w  w w  . j av a  2s.  c  o m*/
    Deque<Integer> subexpressionIdStack = new ArrayDeque<Integer>(assignedTypeNumberStack);
    ExprEvaluatorContext evaluatorContextStmt = new ExprEvaluatorContextStatement(context);
    Stack<EvalFactoryNode> nodeStack = new Stack<EvalFactoryNode>();

    // detemine ordered tags
    Set<EvalFactoryNode> filterFactoryNodes = EvalNodeUtil.recursiveGetChildNodes(evalFactoryNode,
            FilterForFilterFactoryNodes.INSTANCE);
    LinkedHashSet<String> allTagNamesOrdered = new LinkedHashSet<String>();
    if (priorAllTags != null) {
        allTagNamesOrdered.addAll(priorAllTags);
    }
    for (EvalFactoryNode filterNode : filterFactoryNodes) {
        EvalFilterFactoryNode factory = (EvalFilterFactoryNode) filterNode;
        int tagNumber;
        if (factory.getEventAsName() != null) {
            if (!allTagNamesOrdered.contains(factory.getEventAsName())) {
                allTagNamesOrdered.add(factory.getEventAsName());
                tagNumber = allTagNamesOrdered.size() - 1;
            } else {
                tagNumber = findTagNumber(factory.getEventAsName(), allTagNamesOrdered);
            }
            factory.setEventAsTagNumber(tagNumber);
        }
    }

    recursiveCompile(evalFactoryNode, context, evaluatorContextStmt, eventTypeReferences, isInsertInto, tags,
            subexpressionIdStack, nodeStack, allTagNamesOrdered);

    Audit auditPattern = AuditEnum.PATTERN.getAudit(context.getAnnotations());
    Audit auditPatternInstance = AuditEnum.PATTERNINSTANCES.getAudit(context.getAnnotations());
    EvalFactoryNode compiledEvalFactoryNode = evalFactoryNode;
    if (auditPattern != null || auditPatternInstance != null) {
        EvalAuditInstanceCount instanceCount = new EvalAuditInstanceCount();
        compiledEvalFactoryNode = recursiveAddAuditNode(null, auditPattern != null,
                auditPatternInstance != null, evalFactoryNode, evalNodeExpressions, instanceCount);
    }

    return new PatternStreamSpecCompiled(compiledEvalFactoryNode, tags.getTaggedEventTypes(),
            tags.getArrayEventTypes(), allTagNamesOrdered, this.getViewSpecs(), this.getOptionalStreamName(),
            this.getOptions());
}

From source file:gaffer.accumulostore.operation.spark.handler.AccumuloStoreRelation.java

private void buildSchema() {
    LOGGER.info("Building Spark SQL schema for groups {}", StringUtils.join(groups, ','));
    for (final String group : groups) {
        final SchemaElementDefinition elementDefn = store.getSchema().getElement(group);
        final List<StructField> structFieldList = new ArrayList<>();
        if (elementDefn instanceof SchemaEntityDefinition) {
            entityOrEdgeByGroup.put(group, EntityOrEdge.ENTITY);
            final SchemaEntityDefinition entityDefinition = (SchemaEntityDefinition) elementDefn;
            final String vertexClass = store.getSchema().getType(entityDefinition.getVertex()).getClassString();
            final DataType vertexType = getType(vertexClass);
            if (vertexType == null) {
                throw new RuntimeException("Vertex must be a recognised type: found " + vertexClass);
            }//from   w ww.j ava 2 s . com
            LOGGER.info("Group {} is an entity group - {} is of type {}", group, VERTEX_COL_NAME, vertexType);
            structFieldList.add(new StructField(VERTEX_COL_NAME, vertexType, true, Metadata.empty()));
        } else {
            entityOrEdgeByGroup.put(group, EntityOrEdge.EDGE);
            final SchemaEdgeDefinition edgeDefinition = (SchemaEdgeDefinition) elementDefn;
            final String srcClass = store.getSchema().getType(edgeDefinition.getSource()).getClassString();
            final String dstClass = store.getSchema().getType(edgeDefinition.getDestination()).getClassString();
            final DataType srcType = getType(srcClass);
            final DataType dstType = getType(dstClass);
            if (srcType == null || dstType == null) {
                throw new RuntimeException("Both source and destination must be recognised types: source was "
                        + srcClass + " destination was " + dstClass);
            }
            LOGGER.info("Group {} is an edge group - {} is of type {}, {} is of type {}", group, SRC_COL_NAME,
                    srcType, DST_COL_NAME, dstType);
            structFieldList.add(new StructField(SRC_COL_NAME, srcType, true, Metadata.empty()));
            structFieldList.add(new StructField(DST_COL_NAME, dstType, true, Metadata.empty()));
        }
        final Set<String> properties = elementDefn.getProperties();
        for (final String property : properties) {
            final String propertyClass = elementDefn.getPropertyClass(property).getCanonicalName();
            final DataType propertyType = getType(propertyClass);
            if (propertyType == null) {
                LOGGER.warn("Ignoring property {} as it is not a recognised type", property);
            } else {
                LOGGER.info("Property {} is of type {}", property, propertyType);
                structFieldList.add(new StructField(property, propertyType, true, Metadata.empty()));
            }
        }
        structTypeByGroup.put(group,
                new StructType(structFieldList.toArray(new StructField[structFieldList.size()])));
    }
    // Create reverse map of field name to StructField
    final Map<String, Set<StructField>> fieldToStructs = new HashMap<>();
    for (final String group : groups) {
        final StructType groupSchema = structTypeByGroup.get(group);
        for (final String field : groupSchema.fieldNames()) {
            if (fieldToStructs.get(field) == null) {
                fieldToStructs.put(field, new HashSet<StructField>());
            }
            fieldToStructs.get(field).add(groupSchema.apply(field));
        }
    }
    // Check consistency, i.e. if the same field appears in multiple groups then the types are consistent
    for (final Map.Entry<String, Set<StructField>> entry : fieldToStructs.entrySet()) {
        final Set<StructField> schemas = entry.getValue();
        if (schemas.size() > 1) {
            throw new IllegalArgumentException("Inconsistent fields: the field " + entry.getKey()
                    + " has more than one definition: " + StringUtils.join(schemas, ','));
        }
    }
    // Merge schemas for groups together - fields should appear in the order the groups were provided
    final LinkedHashSet<StructField> fields = new LinkedHashSet<>();
    fields.add(new StructField(GROUP, DataTypes.StringType, false, Metadata.empty()));
    usedProperties.add(GROUP);
    for (final String group : groups) {
        final StructType groupSchema = structTypeByGroup.get(group);
        for (final String field : groupSchema.fieldNames()) {
            final StructField struct = groupSchema.apply(field);
            // Add struct to fields unless it has already been added
            if (!fields.contains(struct)) {
                fields.add(struct);
                usedProperties.add(field);
            }
        }
    }
    structType = new StructType(fields.toArray(new StructField[fields.size()]));
    LOGGER.info("Schema is {}", structType);
}

From source file:de._13ducks.cor.game.server.movement.SectorPathfinder.java

public static synchronized List<Node> findPath(SimplePosition start, SimplePosition target,
        FreePolygon startSector, MovementMap moveMap) {

    if (start == null || target == null) {
        System.out.println("FixMe: SPathfinder, irregular call: " + start + "-->" + target);
        return null;
    }//from www  . ja  va2s  .  com

    FreePolygon targetSector = moveMap.containingPoly(target.x(), target.y());

    if (targetSector == null) {
        // Ziel ungltig abbrechen
        System.out.println("Irregular target. Aborting");
        return null;
    }
    FakeNode startNode = new FakeNode(start.x(), start.y(), startSector);
    Node targetNode = new FakeNode(target.x(), target.y(), targetSector);
    targetNode.addPolygon(targetSector);

    // Der Startknoten muss die Member seines Polys kennen
    startNode.setReachableNodes(computeDirectReachable(startNode, startSector));
    // Der Zielknoten muss den Membern seines Polys bekannt sein
    // Die Movement-Map darf aber nicht verndert werden. Des halb mssen einige Aufrufe intern abgefangen werden und das reingedoktert werden.
    List<Node> preTargetNodes = Arrays.asList(computeDirectReachable(targetNode, targetSector));

    PriorityBuffer open = new PriorityBuffer(); // Liste fr entdeckte Knoten
    LinkedHashSet<Node> containopen = new LinkedHashSet<Node>(); // Auch fr entdeckte Knoten, hiermit kann viel schneller festgestellt werden, ob ein bestimmter Knoten schon enthalten ist.
    LinkedHashSet<Node> closed = new LinkedHashSet<Node>(); // Liste fr fertig bearbeitete Knoten

    double cost_t = 0; //Movement Kosten (gerade 5, diagonal 7, wird spter festgelegt)

    startNode.setCost(0); //Kosten fr das Startfeld (von dem aus berechnet wird) sind natrlich 0
    open.add(startNode); //Startfeld in die openlist
    containopen.add(startNode);
    targetNode.setParent(null); //"Vorgngerfeld" vom Zielfeld noch nicht bekannt

    for (int j = 0; j < 40000; j++) { //Anzahl der maximalen Durchlufe, bis Wegfindung aufgibt

        if (open.isEmpty()) { //Abbruch, wenn openlist leer ist => es gibt keinen Weg
            return null;
        }

        // Sortieren nicht mehr ntig, PriorityBuffer bewahrt die Felder in der Reihenfolge ihrer Priority - also dem F-Wert auf
        Node current = (Node) open.remove(); //der Eintrag aus der openlist mit dem niedrigesten F-Wert rausholen und gleich lschen
        containopen.remove(current);
        if (current.equals(targetNode)) { //Abbruch, weil Weg von Start nach Ziel gefunden wurde
            targetNode.setParent(current.getParent()); //"Vorgngerfeld" von Ziel bekannt
            break;
        }

        // Aus der open wurde current bereits gelscht, jetzt in die closed verschieben
        closed.add(current);

        List<Node> neighbors = computeNeighbors(current, targetNode, preTargetNodes);

        for (Node node : neighbors) {

            if (closed.contains(node)) {
                continue;
            }

            // Kosten dort hin berechnen
            cost_t = current.movementCostTo(node);

            if (containopen.contains(node)) { //Wenn sich der Knoten in der openlist befindet, muss berechnet werden, ob es einen krzeren Weg gibt

                if (current.getCost() + cost_t < node.getCost()) { //krzerer Weg gefunden?

                    node.setCost(current.getCost() + cost_t); //-> Wegkosten neu berechnen
                    node.setValF(node.getCost() + node.getHeuristic()); //F-Wert, besteht aus Wegkosten vom Start + Luftlinie zum Ziel
                    node.setParent(current); //aktuelles Feld wird zum Vorgngerfeld
                }
            } else {
                node.setCost(current.getCost() + cost_t);
                node.setHeuristic(Math.sqrt(Math.pow(Math.abs((targetNode.getX() - node.getX())), 2)
                        + Math.pow(Math.abs((targetNode.getY() - node.getY())), 2))); // geschtzte Distanz zum Ziel
                //Die Zahl am Ende der Berechnung ist der Aufwand der Wegsuche
                //5 ist schnell, 4 normal, 3 dauert lange

                node.setParent(current); // Parent ist die RogPosition, von dem der aktuelle entdeckt wurde
                node.setValF(node.getCost() + node.getHeuristic()); //F-Wert, besteht aus Wegkosten vom Start aus + Luftlinie zum Ziel
                open.add(node); // in openlist hinzufgen
                containopen.add(node);
            }

        }
    }

    if (targetNode.getParent() == null) { //kein Weg gefunden
        return null;
    }

    ArrayList<Node> pathrev = new ArrayList<Node>(); //Pfad aus parents erstellen, von Ziel nach Start
    while (!targetNode.equals(startNode)) {
        pathrev.add(targetNode);
        targetNode = targetNode.getParent();
    }
    pathrev.add(startNode);

    ArrayList<Node> path = new ArrayList<Node>(); //Pfad umkehren, sodass er von Start nach Ziel ist
    for (int k = pathrev.size() - 1; k >= 0; k--) {
        path.add(pathrev.get(k));
    }

    // Der folgende Algorithmus braucht Polygon-Infos, diese also hier einfgen
    startNode.addPolygon(startSector);
    targetNode.addPolygon(targetSector);

    /**
     * An dieser Stelle muss der Weg nocheinmal berarbeitet werden.
     * Es kann nmlich durch neue Tweaks sein, dass dies die Knoten nicht direkt
     * verbunden sind (also keinen gemeinsamen Polygon haben)
     * Das tritt z.B. bei der Start- und Zieleinsprungpunkt-Variierung auf.
     */
    for (int i = 0; i < path.size() - 1; i++) {
        Node n1 = path.get(i);
        Node n2 = path.get(i + 1);
        FreePolygon commonSector = commonSector(n1, n2);
        if (commonSector == null) {
            // Das hier ist der interessante Fall, die beiden Knoten sind nicht direkt verbunden, es muss ein Zwischenknoten eingefgt werden:
            // Dessen Punkt suchen
            Edge direct = new Edge(n1, n2);
            Node newNode = null;
            // Die Polygone von n1 durchprobieren
            for (FreePolygon currentPoly : n1.getPolygons()) {
                List<Edge> edges = currentPoly.calcEdges();
                for (Edge testedge : edges) {
                    // Gibts da einen Schnitt?
                    SimplePosition intersection = direct.intersectionWithEndsNotAllowed(testedge);
                    if (intersection != null) {
                        // Kandidat fr den nchsten Polygon
                        FreePolygon nextPoly = null;
                        // Kante gefunden
                        // Von dieser Kante die Enden suchen
                        nextPoly = getOtherPoly(testedge.getStart(), testedge.getEnd(), currentPoly);

                        newNode = intersection.toNode();
                        newNode.addPolygon(currentPoly);
                        newNode.addPolygon(nextPoly);
                        break;
                    }
                }
                if (newNode != null) {
                    break;
                }
            }

            if (newNode == null) {
                // Das drfte nicht passieren, der Weg ist legal gefunden worden, muss also eigentlich existieren
                System.out.println("[Pathfinder][ERROR]: Cannot insert Nodes into route, aborting!");
                return null;
            } else {
                path.add(i + 1, newNode);
            }
        }
    }

    return path; //Pfad zurckgeben
}

From source file:pt.webdetails.cda.utils.mondrian.CompactBandedMDXTableModel.java

public CompactBandedMDXTableModel(final Result resultSet, final int rowLimit) {
    if (resultSet == null) {
        throw new NullPointerException("ResultSet returned was null");
    }/*from w ww  . jav  a2 s. c om*/
    this.resultSet = resultSet;

    // rowcount is the product of all axis-sizes. If an axis contains more than one member, then
    // Mondrian already performs the crossjoin for us.

    // column count is the count of all hierachies of all axis.

    final Axis[] axes = this.resultSet.getAxes();
    this.rowCount = 0;
    this.axesSize = new int[axes.length];
    final int[] axesMembers = new int[axes.length];
    @SuppressWarnings("unchecked")
    final List<Dimension>[] dimensionsForMembersPerAxis = new List[axes.length];
    @SuppressWarnings("unchecked")
    final List<Integer>[] membersPerAxis = new List[axes.length];

    // process the column axis first ..
    if (axesSize.length > 0) {
        final Axis axis = axes[0];
        final List<Position> positions = axis.getPositions();

        axesSize[0] = positions.size();
        if (positions.isEmpty()) {
            noMeasures = true;
        }
    }

    // Axis contains (zero or more) positions, which contains (zero or more) members
    for (int axesIndex = axes.length - 1; axesIndex >= 1; axesIndex -= 1) {
        final Axis axis = axes[axesIndex];
        final List<Position> positions = axis.getPositions();

        axesSize[axesIndex] = positions.size();
        if (positions.isEmpty()) {
            noMeasures = true;
        }

        final ArrayList<Integer> memberList = new ArrayList<Integer>();
        final ArrayList<Dimension> dimensionsForMembers = new ArrayList<Dimension>();
        for (int positionsIndex = 0; positionsIndex < positions.size(); positionsIndex++) {
            final Position position = positions.get(positionsIndex);
            for (int positionIndex = 0; positionIndex < position.size(); positionIndex++) {
                Member m = position.get(positionIndex);
                final Dimension dimension = m.getDimension();
                int hierarchyLevelCount = 1; // Originally was 0

                //          // Change compared to BandedMDXTM - we don't want all levels
                //          while (false && m != null)
                //          {
                //            m = m.getParentMember();
                //            hierarchyLevelCount += 1;
                //          }

                if (memberList.size() <= positionIndex) {
                    memberList.add(hierarchyLevelCount);
                    dimensionsForMembers.add(dimension);
                } else {
                    final Integer existingLevel = memberList.get(positionIndex);
                    if (existingLevel.intValue() < hierarchyLevelCount) {
                        memberList.set(positionIndex, hierarchyLevelCount);
                        dimensionsForMembers.set(positionIndex, dimension);
                    }
                }
            }
        }

        int memberCount = 0;
        for (int i = 0; i < memberList.size(); i++) {
            memberCount += memberList.get(i);
        }
        axesMembers[axesIndex] = memberCount;
        dimensionsForMembersPerAxis[axesIndex] = dimensionsForMembers;
        membersPerAxis[axesIndex] = memberList;
    }

    if (axesSize.length > 1) {
        rowCount = axesSize[1];
        for (int i = 2; i < axesSize.length; i++) {
            final int size = axesSize[i];
            rowCount *= size;
        }
    }
    if (noMeasures == false) {
        rowCount = Math.max(1, rowCount);
    }
    if (axesSize.length == 0) {
        columnCount = 1;
    } else if (axesSize.length > 0) {
        columnCount = axesSize[0];
    }
    for (int i = 1; i < axesMembers.length; i++) {
        columnCount += axesMembers[i];
    }

    columnNames = new String[columnCount];
    columnToDimensionMapping = new Dimension[columnCount];
    columnToAxisPosition = new int[columnCount];

    int columnIndex = 0;
    int dimColIndex = 0;

    //    final FastStack memberStack = new FastStack();
    for (int axesIndex = axes.length - 1; axesIndex >= 1; axesIndex -= 1) {
        final Axis axis = axes[axesIndex];
        final List<Position> positions = axis.getPositions();
        final LinkedHashSet<String> columnNamesSet = new LinkedHashSet<String>();
        for (int positionsIndex = 0; positionsIndex < positions.size(); positionsIndex++) {
            final Position position = positions.get(positionsIndex);
            for (int positionIndex = 0; positionIndex < position.size(); positionIndex++) {
                //          memberStack.clear();
                Member m = position.get(positionIndex);
                // Get member's hierarchy
                final String name = m.getHierarchy().getName();
                if (columnNamesSet.contains(name) == false) {
                    columnNamesSet.add(name);
                }

            }
        }

        if (columnNamesSet.size() != axesMembers[axesIndex]) {
            logger.error("ERROR: Number of names is not equal the pre-counted number.");
        }

        final List<Dimension> dimForMemberPerAxis = dimensionsForMembersPerAxis[axesIndex];
        final List<Integer> memberCntPerAxis = membersPerAxis[axesIndex];
        for (int i = 0; i < memberCntPerAxis.size(); i++) {
            final Integer count = memberCntPerAxis.get(i);
            final Dimension dim = dimForMemberPerAxis.get(i);
            for (int x = 0; x < count.intValue(); x += 1) {
                this.columnToDimensionMapping[dimColIndex + x] = dim;
                this.columnToAxisPosition[dimColIndex + x] = axesIndex;
            }
            dimColIndex = count.intValue() + dimColIndex;
        }

        final String[] names = columnNamesSet.toArray(new String[columnNamesSet.size()]);
        System.arraycopy(names, 0, this.columnNames, columnIndex, names.length);
        columnIndex += names.length;
    }

    if (axesSize.length > 0) {
        // now create the column names for the column-axis
        final Axis axis = axes[0];
        final List<Position> positions = axis.getPositions();
        for (int i = 0; i < positions.size(); i++) {
            final Position position = positions.get(i);
            final StringBuffer positionName = new StringBuffer(100);
            for (int j = 0; j < position.size(); j++) {
                if (j != 0) {
                    positionName.append('/');
                }
                final Member member = position.get(j);
                //positionName.append(MondrianUtil.getUniqueMemberName(member));
                positionName.append(member.getName());

            }
            columnNames[columnIndex] = positionName.toString();
            columnIndex += 1;
        }
    }
    if (axesSize.length == 0) {
        columnNames[0] = "Measure";
    }
    if (rowLimit > 0) {
        rowCount = Math.min(rowLimit, rowCount);
    }
}

From source file:org.codehaus.mojo.jsimport.AbstractGenerateHtmlMojo.java

/**
 * Given a set of file paths, build a new set of any dependencies each of these paths may have, and any dependencies
 * that these dependencies have etc.//  w ww. j  a v  a  2  s .co m
 * 
 * @param a set of nodes already visited so as to avoid overflow.
 * @param filePaths the set of file paths to iterate over.
 * @param allImports the set to build.
 * @return if not null then this represents a file path that revealed a cyclical depedency issue.
 */
private String buildImportsRecursively(Set<String> visitedNodes, LinkedHashSet<String> filePaths,
        LinkedHashSet<String> allImports) {
    String cyclicFilePath = null;
    for (String filePath : filePaths) {
        if (!visitedNodes.contains(filePath)) {
            visitedNodes.add(filePath);

            LinkedHashSet<String> filePathDependencies = fileDependencies.get(filePath);
            if (filePathDependencies == null && compileFileDependencies != null) {
                filePathDependencies = compileFileDependencies.get(filePath);
            }

            if (filePathDependencies != null) {
                cyclicFilePath = buildImportsRecursively(visitedNodes, filePathDependencies, allImports);
            } else if (allImports.contains(filePath)) {
                cyclicFilePath = filePath;
            }

            if (cyclicFilePath != null) {
                break;
            }

            allImports.add(filePath);
        }
    }
    return cyclicFilePath;
}

From source file:ch.unibas.fittingwizard.presentation.fitting.FittingParameterPage.java

private File getInitalCharges(MoleculeQueryService queryService) {
    LinkedHashSet<ChargeValue> userCharges = new LinkedHashSet<>();
    LinkedHashSet<AtomTypeId> atomTypesRequiringUserInput = new LinkedHashSet<>();

    List<Molecule> moleculesWithMissingUserCharges = queryService.findMoleculesWithMissingUserCharges();
    atomTypesRequiringUserInput.addAll(getAllAtomTypeIds(moleculesWithMissingUserCharges));

    boolean multipleMoleculesDefined = queryService.getNumberOfMolecules() > 1;
    if (multipleMoleculesDefined) {
        List<AtomTypeId> duplicates = queryService.findUnequalAndDuplicateAtomTypes();
        atomTypesRequiringUserInput.addAll(duplicates);
    }// www .  j  ava 2 s .  c  om

    if (atomTypesRequiringUserInput.size() > 0) {
        LinkedHashSet<ChargeValue> editedValues = editAtomTypeChargesDialog
                .editAtomTypes(atomTypesRequiringUserInput);
        if (editedValues == null) {
            // TODO ... no nested return
            return null;
        }
        userCharges.addAll(editedValues);
    }

    // fill up with all other values in order to generate a correct charges file.
    // due to the set, the already edited values will not be replaced.
    LinkedHashSet<ChargeValue> allCharges = queryService.getUserChargesFromMoleculesWithCharges();
    for (ChargeValue charge : allCharges) {
        if (!userCharges.contains(charge)) {
            userCharges.add(charge);
        }
    }

    File initalChargesFile = generateInitialChargesFileFromUserCharges(userCharges);
    return initalChargesFile;
}

From source file:org.apache.lens.driver.jdbc.ColumnarSQLRewriter.java

/**
 *  Get all columns used for dimmension tables
 * @param node// www  . j  a  va 2s.co m
 */
public void getAllDimColumns(ASTNode node) {

    if (node == null) {
        log.debug("Input AST is null ");
        return;
    }
    // Assuming column is specified with table.column format
    if (node.getToken().getType() == HiveParser.DOT) {
        String table = HQLParser.findNodeByPath(node, TOK_TABLE_OR_COL, Identifier).toString();
        String column = node.getChild(1).toString();

        Iterator iterator = tableToAliasMap.keySet().iterator();
        while (iterator.hasNext()) {
            String tab = (String) iterator.next();
            String alias = tableToAliasMap.get(tab);

            if ((table.equals(tab) || table.equals(alias)) && column != null) {
                LinkedHashSet<String> cols;
                if (!tableToAccessedColMap.containsKey(tab)) {
                    cols = new LinkedHashSet<String>();
                    cols.add(column);
                    tableToAccessedColMap.put(tab, cols);
                } else {
                    cols = tableToAccessedColMap.get(tab);
                    if (!cols.contains(column)) {
                        cols.add(column);
                    }
                }
            }
        }
    }
    for (int i = 0; i < node.getChildCount(); i++) {
        ASTNode child = (ASTNode) node.getChild(i);
        getAllDimColumns(child);
    }
}

From source file:com.sonicle.webtop.calendar.Service.java

public void processGetPlanning(HttpServletRequest request, HttpServletResponse response, PrintWriter out) {
    CoreUserSettings cus = getEnv().getCoreUserSettings();
    CoreManager core = WT.getCoreManager();
    ArrayList<MapItem> items = new ArrayList<>();
    Connection con = null;//from w w w  .ja v a2s  . co  m

    try {
        String eventStartDate = ServletUtils.getStringParameter(request, "startDate", true);
        String eventEndDate = ServletUtils.getStringParameter(request, "endDate", true);
        String timezone = ServletUtils.getStringParameter(request, "timezone", true);
        JsEvent.Attendee.List attendees = ServletUtils.getObjectParameter(request, "attendees",
                new JsEvent.Attendee.List(), JsEvent.Attendee.List.class);
        //JsAttendeeList attendees = ServletUtils.getObjectParameter(request, "attendees", new JsAttendeeList(), JsAttendeeList.class);

        // Parses string parameters
        DateTimeZone eventTz = DateTimeZone.forID(timezone);
        DateTime eventStartDt = DateTimeUtils.parseYmdHmsWithZone(eventStartDate, eventTz);
        DateTime eventEndDt = DateTimeUtils.parseYmdHmsWithZone(eventEndDate, eventTz);

        UserProfile up = getEnv().getProfile();
        DateTimeZone profileTz = up.getTimeZone();

        LocalTime localStartTime = eventStartDt.toLocalTime();
        LocalTime localEndTime = eventEndDt.toLocalTime();
        LocalTime fromTime = DateTimeUtils.min(localStartTime, us.getWorkdayStart());
        LocalTime toTime = DateTimeUtils.max(localEndTime, us.getWorkdayEnd());

        // Defines useful date/time formatters
        DateTimeFormatter ymdhmFmt = DateTimeUtils.createYmdHmFormatter();
        DateTimeFormatter tFmt = DateTimeUtils.createFormatter(cus.getShortTimeFormat());
        DateTimeFormatter dFmt = DateTimeUtils.createFormatter(cus.getShortDateFormat());

        ArrayList<String> spans = manager.generateTimeSpans(60, eventStartDt.toLocalDate(),
                eventEndDt.toLocalDate(), us.getWorkdayStart(), us.getWorkdayEnd(), profileTz);

        // Generates fields and columnsInfo dynamically
        ArrayList<FieldMeta> fields = new ArrayList<>();
        ArrayList<GridColumnMeta> colsInfo = new ArrayList<>();

        GridColumnMeta col = null;
        fields.add(new FieldMeta("recipient"));
        colsInfo.add(new GridColumnMeta("recipient"));
        for (String spanKey : spans) {
            LocalDateTime ldt = ymdhmFmt.parseLocalDateTime(spanKey);
            fields.add(new FieldMeta(spanKey));
            col = new GridColumnMeta(spanKey, tFmt.print(ldt));
            col.put("date", dFmt.print(ldt));
            col.put("overlaps", (ldt.compareTo(eventStartDt.toLocalDateTime()) >= 0)
                    && (ldt.compareTo(eventEndDt.toLocalDateTime()) < 0));
            colsInfo.add(col);
        }

        // Collects attendees availability...
        OUser user = null;
        UserProfileId profileId = null;
        LinkedHashSet<String> busyHours = null;
        MapItem item = null;
        for (JsEvent.Attendee attendee : attendees) {
            item = new MapItem();
            item.put("recipient", attendee.recipient);

            user = guessUserByAttendee(core, attendee.recipient);
            if (user != null) {
                profileId = new UserProfileId(user.getDomainId(), user.getUserId());
                busyHours = manager.calculateAvailabilitySpans(60, profileId, eventStartDt.withTime(fromTime),
                        eventEndDt.withTime(toTime), eventTz, true);
                for (String hourKey : spans) {
                    item.put(hourKey, busyHours.contains(hourKey) ? "busy" : "free");
                }
            } else {
                for (String spanKey : spans) {
                    item.put(spanKey, "unknown");
                }
            }
            items.add(item);
        }

        GridMetadata meta = new GridMetadata(true);
        meta.setFields(fields);
        meta.setColumnsInfo(colsInfo);
        new JsonResult(items, meta, items.size()).printTo(out);

    } catch (Exception ex) {
        logger.error("Error in GetPlanning", ex);
        new JsonResult(false, "Error").printTo(out);

    } finally {
        DbUtils.closeQuietly(con);
    }
}