Example usage for java.util LinkedHashMap entrySet

List of usage examples for java.util LinkedHashMap entrySet

Introduction

In this page you can find the example usage for java.util LinkedHashMap entrySet.

Prototype

public Set<Map.Entry<K, V>> entrySet() 

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:juicebox.tools.utils.original.Preprocessor.java

/**
 * Note -- compressed//w w w  .  ja v  a  2  s . c  o m
 *
 * @param zd          Matrix zoom data
 * @param block       Block to write
 * @param sampledData Array to hold a sample of the data (to compute statistics)
 * @throws IOException
 */
private void writeBlock(MatrixZoomDataPP zd, BlockPP block, DownsampledDoubleArrayList sampledData)
        throws IOException {

    final Map<Point, ContactCount> records = block.getContactRecordMap();//   getContactRecords();

    // System.out.println("Write contact records : records count = " + records.size());

    // Count records first
    int nRecords;
    if (countThreshold > 0) {
        nRecords = 0;
        for (ContactCount rec : records.values()) {
            if (rec.getCounts() >= countThreshold) {
                nRecords++;
            }
        }
    } else {
        nRecords = records.size();
    }
    BufferedByteWriter buffer = new BufferedByteWriter(nRecords * 12);
    buffer.putInt(nRecords);
    zd.cellCount += nRecords;

    // Find extents of occupied cells
    int binXOffset = Integer.MAX_VALUE;
    int binYOffset = Integer.MAX_VALUE;
    int binXMax = 0;
    int binYMax = 0;
    for (Map.Entry<Point, ContactCount> entry : records.entrySet()) {
        Point point = entry.getKey();
        binXOffset = Math.min(binXOffset, point.x);
        binYOffset = Math.min(binYOffset, point.y);
        binXMax = Math.max(binXMax, point.x);
        binYMax = Math.max(binYMax, point.y);
    }

    buffer.putInt(binXOffset);
    buffer.putInt(binYOffset);

    // Sort keys in row-major order
    List<Point> keys = new ArrayList<Point>(records.keySet());
    Collections.sort(keys, new Comparator<Point>() {
        @Override
        public int compare(Point o1, Point o2) {
            if (o1.y != o2.y) {
                return o1.y - o2.y;
            } else {
                return o1.x - o2.x;
            }
        }
    });
    Point lastPoint = keys.get(keys.size() - 1);
    final short w = (short) (binXMax - binXOffset + 1);

    boolean isInteger = true;
    float maxCounts = 0;

    LinkedHashMap<Integer, List<ContactRecord>> rows = new LinkedHashMap<Integer, List<ContactRecord>>();
    for (Point point : keys) {
        final ContactCount contactCount = records.get(point);
        float counts = contactCount.getCounts();
        if (counts >= countThreshold) {

            isInteger = isInteger && (Math.floor(counts) == counts);
            maxCounts = Math.max(counts, maxCounts);

            final int px = point.x - binXOffset;
            final int py = point.y - binYOffset;
            List<ContactRecord> row = rows.get(py);
            if (row == null) {
                row = new ArrayList<ContactRecord>(10);
                rows.put(py, row);
            }
            row.add(new ContactRecord(px, py, counts));
        }
    }

    // Compute size for each representation and choose smallest
    boolean useShort = isInteger && (maxCounts < Short.MAX_VALUE);
    int valueSize = useShort ? 2 : 4;

    int lorSize = 0;
    int nDensePts = (lastPoint.y - binYOffset) * w + (lastPoint.x - binXOffset) + 1;

    int denseSize = nDensePts * valueSize;
    for (List<ContactRecord> row : rows.values()) {
        lorSize += 4 + row.size() * valueSize;
    }

    buffer.put((byte) (useShort ? 0 : 1));

    if (lorSize < denseSize) {

        buffer.put((byte) 1); // List of rows representation

        buffer.putShort((short) rows.size()); // # of rows

        for (Map.Entry<Integer, List<ContactRecord>> entry : rows.entrySet()) {

            int py = entry.getKey();
            List<ContactRecord> row = entry.getValue();
            buffer.putShort((short) py); // Row number
            buffer.putShort((short) row.size()); // size of row

            for (ContactRecord contactRecord : row) {
                buffer.putShort((short) (contactRecord.getBinX()));
                final float counts = contactRecord.getCounts();

                if (useShort) {
                    buffer.putShort((short) counts);
                } else {
                    buffer.putFloat(counts);
                }

                sampledData.add(counts);
                zd.sum += counts;
            }
        }

    } else {
        buffer.put((byte) 2); // Dense matrix

        buffer.putInt(nDensePts);
        buffer.putShort(w);

        int lastIdx = 0;
        for (Point p : keys) {

            int idx = (p.y - binYOffset) * w + (p.x - binXOffset);
            for (int i = lastIdx; i < idx; i++) {
                // Filler value
                if (useShort) {
                    buffer.putShort(Short.MIN_VALUE);
                } else {
                    buffer.putFloat(Float.NaN);
                }
            }
            float counts = records.get(p).getCounts();
            if (useShort) {
                buffer.putShort((short) counts);
            } else {
                buffer.putFloat(counts);
            }
            lastIdx = idx + 1;

            sampledData.add(counts);
            zd.sum += counts;
        }
    }

    byte[] bytes = buffer.getBytes();
    byte[] compressedBytes = compress(bytes);
    los.write(compressedBytes);

}

From source file:org.jumpmind.db.platform.AbstractDdlBuilder.java

public void writeCopyDataStatement(Table sourceTable, Table targetTable,
        LinkedHashMap<Column, Column> columnMap, StringBuilder ddl) {
    ddl.append("INSERT INTO ");
    ddl.append(getFullyQualifiedTableNameShorten(targetTable));
    ddl.append(" (");
    for (Iterator<Column> columnIt = columnMap.values().iterator(); columnIt.hasNext();) {
        printIdentifier(getColumnName((Column) columnIt.next()), ddl);
        if (columnIt.hasNext()) {
            ddl.append(",");
        }/*  w w  w .j av  a2s.c  o m*/
    }
    ddl.append(") SELECT ");
    for (Iterator<Map.Entry<Column, Column>> columnsIt = columnMap.entrySet().iterator(); columnsIt
            .hasNext();) {
        Map.Entry<Column, Column> entry = columnsIt.next();

        writeCastExpression((Column) entry.getKey(), (Column) entry.getValue(), ddl);
        if (columnsIt.hasNext()) {
            ddl.append(",");
        }
    }
    ddl.append(" FROM ");
    ddl.append(getFullyQualifiedTableNameShorten(sourceTable));
    printEndOfStatement(ddl);
}

From source file:org.apache.lens.cube.metadata.CubeMetastoreClient.java

private List<Partition> getAllLatestPartsEquivalentTo(String factOrDimtableName, String storageTableName,
        List<Partition> partitions) throws HiveException, LensException {
    if (isFactTable(factOrDimtableName)) {
        return Lists.newArrayList();
    }/*www. ja  va  2 s  . c  o m*/
    Table storageTable = getTable(storageTableName);
    List<String> timePartCols = getTimePartColNamesOfTable(storageTable);
    List<Partition> latestParts = Lists.newArrayList();
    for (Partition partition : partitions) {
        LinkedHashMap<String, String> partSpec = partition.getSpec();
        LinkedHashMap<String, String> timePartSpec = Maps.newLinkedHashMap();
        LinkedHashMap<String, String> nonTimePartSpec = Maps.newLinkedHashMap();
        for (Map.Entry<String, String> entry : partSpec.entrySet()) {
            if (timePartCols.contains(entry.getKey())) {
                timePartSpec.put(entry.getKey(), entry.getValue());
            } else {
                nonTimePartSpec.put(entry.getKey(), entry.getValue());
            }
        }
        for (String timePartCol : timePartCols) {
            Partition latestPart = getLatestPart(storageTableName, timePartCol, nonTimePartSpec);
            if (latestPart != null) {
                LinkedHashMap<String, String> latestPartSpec = latestPart.getSpec();
                latestPartSpec.put(timePartCol, partSpec.get(timePartCol));
                if (partSpec.equals(latestPartSpec)) {
                    latestPart.getParameters().putAll(partition.getParameters());
                    latestPart.getParameters().put(getLatestPartTimestampKey(timePartCol),
                            partSpec.get(timePartCol));
                    latestPart.getTPartition().getSd().getSerdeInfo().getParameters()
                            .putAll(partition.getTPartition().getSd().getSerdeInfo().getParameters());
                    latestPart.setLocation(partition.getLocation());
                    latestPart.setInputFormatClass(partition.getInputFormatClass());
                    latestPart.setOutputFormatClass(
                            partition.getOutputFormatClass().asSubclass(HiveOutputFormat.class));
                    latestPart.getTPartition().getSd().getSerdeInfo().setSerializationLib(
                            partition.getTPartition().getSd().getSerdeInfo().getSerializationLib());
                    latestParts.add(latestPart);
                }
            }
        }
    }
    return latestParts;
}

From source file:net.sf.jabref.sql.importer.DatabaseImporter.java

private void importGroupsTree(MetaData metaData, Map<String, BibEntry> entries, Connection conn,
        final String database_id) throws SQLException {
    Map<String, GroupTreeNode> groups = new HashMap<>();
    LinkedHashMap<GroupTreeNode, String> parentIds = new LinkedHashMap<>();
    GroupTreeNode rootNode = GroupTreeNode.fromGroup(new AllEntriesGroup());

    String query = SQLUtil/*from w w w .  ja  v  a2  s. co  m*/
            .queryAllFromTable("groups WHERE database_id='" + database_id + "' ORDER BY groups_id");
    try (Statement statement = conn.createStatement(); ResultSet rsGroups = statement.executeQuery(query)) {
        while (rsGroups.next()) {
            AbstractGroup group = null;
            String typeId = findGroupTypeName(rsGroups.getString("group_types_id"), conn);
            try {
                switch (typeId) {
                case AllEntriesGroup.ID:
                    // register the id of the root node:
                    groups.put(rsGroups.getString("groups_id"), rootNode);
                    break;
                case ExplicitGroup.ID:
                    group = new ExplicitGroup(rsGroups.getString("label"),
                            GroupHierarchyType.getByNumber(rsGroups.getInt("hierarchical_context")),
                            Globals.prefs);
                    break;
                case KeywordGroup.ID:
                    LOGGER.debug("Keyw: " + rsGroups.getBoolean("case_sensitive"));
                    group = new KeywordGroup(rsGroups.getString("label"),
                            StringUtil.unquote(rsGroups.getString("search_field"), '\\'),
                            StringUtil.unquote(rsGroups.getString("search_expression"), '\\'),
                            rsGroups.getBoolean("case_sensitive"), rsGroups.getBoolean("reg_exp"),
                            GroupHierarchyType.getByNumber(rsGroups.getInt("hierarchical_context")),
                            Globals.prefs);
                    break;
                case SearchGroup.ID:
                    LOGGER.debug("Search: " + rsGroups.getBoolean("case_sensitive"));
                    group = new SearchGroup(rsGroups.getString("label"),
                            StringUtil.unquote(rsGroups.getString("search_expression"), '\\'),
                            rsGroups.getBoolean("case_sensitive"), rsGroups.getBoolean("reg_exp"),
                            GroupHierarchyType.getByNumber(rsGroups.getInt("hierarchical_context")));
                    break;
                default:
                    break;
                }
            } catch (ParseException e) {
                LOGGER.error(e);
            }

            if (group != null) {
                GroupTreeNode node = GroupTreeNode.fromGroup(group);
                parentIds.put(node, rsGroups.getString("parent_id"));
                groups.put(rsGroups.getString("groups_id"), node);
            }

            // Ok, we have collected a map of all groups and their parent IDs,
            // and another map of all group IDs and their group nodes.
            // Now we need to build the groups tree:
            for (Map.Entry<GroupTreeNode, String> groupTreeNodeStringEntry : parentIds.entrySet()) {
                String parentId = groupTreeNodeStringEntry.getValue();
                GroupTreeNode parent = groups.get(parentId);
                if (parent == null) {
                    // TODO: missing parent
                } else {
                    groupTreeNodeStringEntry.getKey().moveTo(parent);
                }
            }

            try (Statement entryGroup = conn.createStatement();
                    ResultSet rsEntryGroup = entryGroup
                            .executeQuery(SQLUtil.queryAllFromTable("entry_group"))) {
                while (rsEntryGroup.next()) {
                    String entryId = rsEntryGroup.getString("entries_id");
                    String groupId = rsEntryGroup.getString("groups_id");
                    GroupTreeNode node = groups.get(groupId);
                    if ((node != null) && (node.getGroup() instanceof ExplicitGroup)) {
                        ExplicitGroup expGroup = (ExplicitGroup) node.getGroup();
                        expGroup.add(entries.get(entryId));
                    }
                }
            }
            metaData.setGroups(rootNode);
        }
    }
}

From source file:es.uvigo.ei.sing.adops.datatypes.ProjectExperiment.java

private String checkFastaFile() throws IllegalArgumentException {
    final Set<Character> aminos = new HashSet<Character>(
            Arrays.asList('a', 'c', 't', 'g', 'A', 'C', 'T', 'G', '-'));
    BufferedReader br = null;//from   w ww.  j a  va 2s .  c  om

    try {
        final StringBuilder sb = new StringBuilder();
        final LinkedHashMap<String, StringBuilder> replacements = new LinkedHashMap<String, StringBuilder>();

        br = new BufferedReader(new FileReader(this.fastaFile));

        String line = null;
        while ((line = br.readLine()) != null && !line.startsWith(">")) {
            sb.append(line).append('\n');
        }

        String seqId = null;
        String seq = null;
        while (line != null) {
            seqId = line;
            seq = "";
            while ((line = br.readLine()) != null && !line.startsWith(">")) {
                seq += line;
            }

            // Non ACTG characters replacement
            char[] charSequence = seq.toCharArray();
            String data = "";
            for (int i = 0; i < charSequence.length; i++) {
                if (aminos.contains(charSequence[i])) {
                    data += charSequence[i];
                } else {
                    if (replacements.containsKey(seqId)) {
                        replacements.get(seqId).append(String.format(", [%d,%c]", i + 1, charSequence[i]));
                    } else {
                        replacements.put(seqId,
                                new StringBuilder(String.format("[%d,%c]", i + 1, charSequence[i])));
                    }

                    data += '-';
                }
            }

            // Incomplete codons replacement
            charSequence = data.toCharArray();
            data = "";
            String codon = "";
            for (int i = 0; i < charSequence.length; i++) {
                codon += Character.toString(charSequence[i]);
                if ((i + 1) % 3 == 0) {
                    if (codon.contains("-")) {
                        data += "---";
                        if (replacements.containsKey(seqId)) {
                            replacements.get(seqId).append(String.format(", [%s,---]", codon));
                        } else {
                            replacements.put(seqId, new StringBuilder(String.format("[%s,---]", codon)));
                        }
                    } else {
                        data += codon;

                    }

                    codon = "";
                }
            }

            sb.append(seqId).append('\n');
            sb.append(data).append('\n');
        }

        FileUtils.write(this.fastaFile, sb.toString());

        if (replacements.isEmpty()) {
            return "";
        } else {
            final StringBuilder summary = new StringBuilder("Replacements done on input file\n");
            for (Map.Entry<String, StringBuilder> replacement : replacements.entrySet()) {
                summary.append(replacement.getKey()).append('\n');
                summary.append(replacement.getValue()).append('\n');
            }

            summary.append("\n-----\n");

            return summary.toString();
        }
    } catch (Exception e) {
        throw new IllegalArgumentException("Input file is not a valida Fasta file");
    } finally {
        if (br != null) {
            try {
                br.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:annis.visualizers.component.grid.EventExtractor.java

/**
* Converts Salt document graph to rows.//from ww  w  .j  av a 2 s .  co m
*
* @param graph
* @param annotationNames
* @param startTokenIndex token index of the first token in the match
* @param endTokenIndex token index of the last token in the match
* @return
*/
public static LinkedHashMap<String, ArrayList<Row>> parseSalt(VisualizerInput input,
        List<String> annotationNames, long startTokenIndex, long endTokenIndex) {

    SDocumentGraph graph = input.getDocument().getSDocumentGraph();

    // only look at annotations which were defined by the user
    LinkedHashMap<String, ArrayList<Row>> rowsByAnnotation = new LinkedHashMap<String, ArrayList<Row>>();

    for (String anno : annotationNames) {
        rowsByAnnotation.put(anno, new ArrayList<Row>());
    }

    int eventCounter = 0;

    PDFPageHelper pageNumberHelper = new PDFPageHelper(input);

    for (SSpan span : graph.getSSpans()) {
        // calculate the left and right values of a span
        // TODO: howto get these numbers with Salt?
        long leftLong = span.getSFeature(ANNIS_NS, FEAT_LEFTTOKEN).getSValueSNUMERIC();
        long rightLong = span.getSFeature(ANNIS_NS, FEAT_RIGHTTOKEN).getSValueSNUMERIC();

        leftLong = clip(leftLong, startTokenIndex, endTokenIndex);
        rightLong = clip(rightLong, startTokenIndex, endTokenIndex);

        int left = (int) (leftLong - startTokenIndex);
        int right = (int) (rightLong - startTokenIndex);

        for (SAnnotation anno : span.getSAnnotations()) {
            ArrayList<Row> rows = rowsByAnnotation.get(anno.getQName());
            if (rows == null) {
                // try again with only the name
                rows = rowsByAnnotation.get(anno.getSName());
            }
            if (rows != null) {
                // only do something if the annotation was defined before

                // 1. give each annotation of each span an own row
                Row r = new Row();

                String id = "event_" + eventCounter++;
                GridEvent event = new GridEvent(id, left, right, anno.getSValueSTEXT());

                // check if the span is a matched node
                SFeature featMatched = span.getSFeature(ANNIS_NS, FEAT_MATCHEDNODE);
                Long match = featMatched == null ? null : featMatched.getSValueSNUMERIC();
                event.setMatch(match);

                // calculate overlapped SToken
                EList<Edge> outEdges = graph.getOutEdges(span.getSId());
                if (outEdges != null) {
                    for (Edge e : outEdges) {
                        if (e instanceof SSpanningRelation) {
                            SSpanningRelation spanRel = (SSpanningRelation) e;

                            SToken tok = spanRel.getSToken();
                            event.getCoveredIDs().add(tok.getSId());

                            // get the STextualDS of this token and add it to the event
                            EList<Edge> tokenOutEdges = graph.getOutEdges(tok.getSId());
                            if (tokenOutEdges != null) {
                                for (Edge tokEdge : tokenOutEdges) {
                                    if (tokEdge instanceof STextualRelation) {
                                        event.setTextID(((STextualRelation) tokEdge).getSTextualDS().getSId());
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }

                // try to get time annotations
                double[] startEndTime = TimeHelper.getOverlappedTime(span);
                if (startEndTime.length == 1) {
                    event.setStartTime(startEndTime[0]);
                } else if (startEndTime.length == 2) {
                    event.setStartTime(startEndTime[0]);
                    event.setEndTime(startEndTime[1]);
                }

                r.addEvent(event);
                rows.add(r);

                String page = pageNumberHelper.getPageFromAnnotation(span);
                if (page != null) {
                    event.setPage(page);
                }
            }
        } // end for each annotation of span
    } // end for each span

    // 2. merge rows when possible
    for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) {
        mergeAllRowsIfPossible(e.getValue());
    }

    // 3. sort events on one row by left token index
    for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) {
        for (Row r : e.getValue()) {
            sortEventsByTokenIndex(r);
        }
    }

    // 4. split up events if they have gaps
    for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) {
        for (Row r : e.getValue()) {
            splitRowsOnGaps(r, graph, startTokenIndex, endTokenIndex);
        }
    }
    return rowsByAnnotation;
}

From source file:org.dcm4che3.tool.unvscp.UnvSCP.java

public Attributes calculateStorageCommitmentResult(String calledAET, Attributes actionInfo)
        throws DicomServiceException {
    Sequence requestSeq = actionInfo.getSequence(Tag.ReferencedSOPSequence);
    int size = requestSeq.size();
    String[] sopIUIDs = new String[size];
    Attributes eventInfo = new Attributes(6);
    eventInfo.setString(Tag.RetrieveAETitle, VR.AE, calledAET);
    eventInfo.setString(Tag.StorageMediaFileSetID, VR.SH, ddReader.getFileSetID());
    eventInfo.setString(Tag.StorageMediaFileSetUID, VR.SH, ddReader.getFileSetUID());
    eventInfo.setString(Tag.TransactionUID, VR.UI, actionInfo.getString(Tag.TransactionUID));
    Sequence successSeq = eventInfo.newSequence(Tag.ReferencedSOPSequence, size);
    Sequence failedSeq = eventInfo.newSequence(Tag.FailedSOPSequence, size);
    LinkedHashMap<String, String> map = new LinkedHashMap<String, String>(size * 4 / 3);
    for (int i = 0; i < sopIUIDs.length; i++) {
        Attributes item = requestSeq.get(i);
        map.put(sopIUIDs[i] = item.getString(Tag.ReferencedSOPInstanceUID),
                item.getString(Tag.ReferencedSOPClassUID));
    }//from   www .j  a  va 2  s. c  om
    IDicomReader ddr = ddReader;
    try {
        Attributes patRec = ddr.findPatientRecord();
        while (patRec != null) {
            Attributes studyRec = ddr.findStudyRecord(patRec);
            while (studyRec != null) {
                Attributes seriesRec = ddr.findSeriesRecord(studyRec);
                while (seriesRec != null) {
                    Attributes instRec = ddr.findLowerInstanceRecord(seriesRec, true, sopIUIDs);
                    while (instRec != null) {
                        String iuid = instRec.getString(Tag.ReferencedSOPInstanceUIDInFile);
                        String cuid = map.remove(iuid);
                        if (cuid.equals(instRec.getString(Tag.ReferencedSOPClassUIDInFile)))
                            successSeq.add(refSOP(iuid, cuid, Status.Success));
                        else
                            failedSeq.add(refSOP(iuid, cuid, Status.ClassInstanceConflict));
                        instRec = ddr.findNextInstanceRecord(instRec, true, sopIUIDs);
                    }
                    seriesRec = ddr.findNextSeriesRecord(seriesRec);
                }
                studyRec = ddr.findNextStudyRecord(studyRec);
            }
            patRec = ddr.findNextPatientRecord(patRec);
        }
    } catch (IOException e) {
        LOG.info("Failed to M-READ " + dicomDir, e);
        throw new DicomServiceException(Status.ProcessingFailure, e);
    }
    for (Map.Entry<String, String> entry : map.entrySet()) {
        failedSeq.add(refSOP(entry.getKey(), entry.getValue(), Status.NoSuchObjectInstance));
    }
    if (failedSeq.isEmpty())
        eventInfo.remove(Tag.FailedSOPSequence);
    return eventInfo;
}

From source file:com.espertech.esper.filter.FilterSpecCompiler.java

private static void handleSubselectSelectClauses(int subselectStreamNumber, StatementContext statementContext,
        ExprSubselectNode subselect, EventType outerEventType, String outerEventTypeName,
        String outerStreamName, LinkedHashMap<String, Pair<EventType, String>> taggedEventTypes,
        LinkedHashMap<String, Pair<EventType, String>> arrayEventTypes) throws ExprValidationException {

    StatementSpecCompiled statementSpec = subselect.getStatementSpecCompiled();
    StreamSpecCompiled filterStreamSpec = statementSpec.getStreamSpecs()[0];

    ViewFactoryChain viewFactoryChain;//  w w  w . ja  v  a  2s  . c o  m
    String subselecteventTypeName = null;

    // construct view factory chain
    try {
        if (statementSpec.getStreamSpecs()[0] instanceof FilterStreamSpecCompiled) {
            FilterStreamSpecCompiled filterStreamSpecCompiled = (FilterStreamSpecCompiled) statementSpec
                    .getStreamSpecs()[0];
            subselecteventTypeName = filterStreamSpecCompiled.getFilterSpec().getFilterForEventTypeName();

            // A child view is required to limit the stream
            if (filterStreamSpec.getViewSpecs().length == 0) {
                throw new ExprValidationException(
                        "Subqueries require one or more views to limit the stream, consider declaring a length or time window");
            }

            // Register filter, create view factories
            viewFactoryChain = statementContext.getViewService().createFactories(subselectStreamNumber,
                    filterStreamSpecCompiled.getFilterSpec().getResultEventType(),
                    filterStreamSpec.getViewSpecs(), filterStreamSpec.getOptions(), statementContext);
            subselect.setRawEventType(viewFactoryChain.getEventType());
        } else {
            NamedWindowConsumerStreamSpec namedSpec = (NamedWindowConsumerStreamSpec) statementSpec
                    .getStreamSpecs()[0];
            NamedWindowProcessor processor = statementContext.getNamedWindowService()
                    .getProcessor(namedSpec.getWindowName());
            viewFactoryChain = statementContext.getViewService().createFactories(0,
                    processor.getNamedWindowType(), namedSpec.getViewSpecs(), namedSpec.getOptions(),
                    statementContext);
            subselecteventTypeName = namedSpec.getWindowName();
        }
    } catch (ViewProcessingException ex) {
        throw new ExprValidationException("Error validating subexpression: " + ex.getMessage(), ex);
    }

    // the final event type
    EventType eventType = viewFactoryChain.getEventType();

    // determine a stream name unless one was supplied
    String subexpressionStreamName = filterStreamSpec.getOptionalStreamName();
    if (subexpressionStreamName == null) {
        subexpressionStreamName = "$subselect_" + subselectStreamNumber;
    }

    // Named windows don't allow data views
    if (filterStreamSpec instanceof NamedWindowConsumerStreamSpec) {
        EPStatementStartMethodHelperValidate
                .validateNoDataWindowOnNamedWindow(viewFactoryChain.getViewFactoryChain());
    }

    // Streams event types are the original stream types with the stream zero the subselect stream
    LinkedHashMap<String, Pair<EventType, String>> namesAndTypes = new LinkedHashMap<String, Pair<EventType, String>>();
    namesAndTypes.put(subexpressionStreamName, new Pair<EventType, String>(eventType, subselecteventTypeName));
    namesAndTypes.put(outerStreamName, new Pair<EventType, String>(outerEventType, outerEventTypeName));
    if (taggedEventTypes != null) {
        for (Map.Entry<String, Pair<EventType, String>> entry : taggedEventTypes.entrySet()) {
            namesAndTypes.put(entry.getKey(),
                    new Pair<EventType, String>(entry.getValue().getFirst(), entry.getValue().getSecond()));
        }
    }
    if (arrayEventTypes != null) {
        for (Map.Entry<String, Pair<EventType, String>> entry : arrayEventTypes.entrySet()) {
            namesAndTypes.put(entry.getKey(),
                    new Pair<EventType, String>(entry.getValue().getFirst(), entry.getValue().getSecond()));
        }
    }
    StreamTypeService subselectTypeService = new StreamTypeServiceImpl(namesAndTypes,
            statementContext.getEngineURI(), true, true);
    ViewResourceDelegateUnverified viewResourceDelegateSubselect = new ViewResourceDelegateUnverified();
    subselect.setFilterSubqueryStreamTypes(subselectTypeService);

    // Validate select expression
    SelectClauseSpecCompiled selectClauseSpec = subselect.getStatementSpecCompiled().getSelectClauseSpec();
    if (selectClauseSpec.getSelectExprList().length > 0) {
        if (selectClauseSpec.getSelectExprList().length > 1) {
            throw new ExprValidationException("Subquery multi-column select is not allowed in this context.");
        }

        SelectClauseElementCompiled element = selectClauseSpec.getSelectExprList()[0];
        if (element instanceof SelectClauseExprCompiledSpec) {
            // validate
            SelectClauseExprCompiledSpec compiled = (SelectClauseExprCompiledSpec) element;
            ExprNode selectExpression = compiled.getSelectExpression();
            ExprEvaluatorContextStatement evaluatorContextStmt = new ExprEvaluatorContextStatement(
                    statementContext);
            ExprValidationContext validationContext = new ExprValidationContext(subselectTypeService,
                    statementContext.getMethodResolutionService(), viewResourceDelegateSubselect,
                    statementContext.getSchedulingService(), statementContext.getVariableService(),
                    evaluatorContextStmt, statementContext.getEventAdapterService(),
                    statementContext.getStatementName(), statementContext.getStatementId(),
                    statementContext.getAnnotations(), statementContext.getContextDescriptor());
            selectExpression = ExprNodeUtility.getValidatedSubtree(selectExpression, validationContext);
            subselect.setSelectClause(new ExprNode[] { selectExpression });
            subselect.setSelectAsNames(new String[] { compiled.getAssignedName() });

            // handle aggregation
            List<ExprAggregateNode> aggExprNodes = new LinkedList<ExprAggregateNode>();
            ExprAggregateNodeUtil.getAggregatesBottomUp(selectExpression, aggExprNodes);
            if (aggExprNodes.size() > 0) {
                // Other stream properties, if there is aggregation, cannot be under aggregation.
                for (ExprAggregateNode aggNode : aggExprNodes) {
                    List<Pair<Integer, String>> propertiesNodesAggregated = ExprNodeUtility
                            .getExpressionProperties(aggNode, true);
                    for (Pair<Integer, String> pair : propertiesNodesAggregated) {
                        if (pair.getFirst() != 0) {
                            throw new ExprValidationException(
                                    "Subselect aggregation function cannot aggregate across correlated properties");
                        }
                    }
                }

                // This stream (stream 0) properties must either all be under aggregation, or all not be.
                List<Pair<Integer, String>> propertiesNotAggregated = ExprNodeUtility
                        .getExpressionProperties(selectExpression, false);
                for (Pair<Integer, String> pair : propertiesNotAggregated) {
                    if (pair.getFirst() == 0) {
                        throw new ExprValidationException(
                                "Subselect properties must all be within aggregation functions");
                    }
                }
            }
        }
    }
}

From source file:org.apache.asterix.translator.LangExpressionToPlanTranslator.java

/**
 * Eliminate shared operator references in a query plan rooted at <code>currentOpRef.getValue()</code>.
 * Deep copy a new query plan subtree whenever there is a shared operator reference.
 *
 * @param currentOpRef,/*  w ww . j av a 2 s  . co  m*/
 *            the operator reference to consider
 * @param opRefSet,
 *            the set storing seen operator references so far.
 * @return a mapping that maps old variables to new variables, for the ancestors of
 *         <code>currentOpRef</code> to replace variables properly.
 * @throws AsterixException
 */
private LinkedHashMap<LogicalVariable, LogicalVariable> eliminateSharedOperatorReference(
        Mutable<ILogicalOperator> currentOpRef, Set<Mutable<ILogicalOperator>> opRefSet)
        throws AsterixException {
    try {
        opRefSet.add(currentOpRef);
        AbstractLogicalOperator currentOperator = (AbstractLogicalOperator) currentOpRef.getValue();

        // Recursively eliminates shared references in nested plans.
        if (currentOperator.hasNestedPlans()) {
            // Since a nested plan tree itself can never be shared with another nested plan tree in
            // another operator, the operation called in the if block does not need to replace
            // any variables further for <code>currentOpRef.getValue()</code> nor its ancestor.
            AbstractOperatorWithNestedPlans opWithNestedPlan = (AbstractOperatorWithNestedPlans) currentOperator;
            for (ILogicalPlan plan : opWithNestedPlan.getNestedPlans()) {
                for (Mutable<ILogicalOperator> rootRef : plan.getRoots()) {
                    Set<Mutable<ILogicalOperator>> nestedOpRefSet = new HashSet<>();
                    eliminateSharedOperatorReference(rootRef, nestedOpRefSet);
                }
            }
        }

        int childIndex = 0;
        LinkedHashMap<LogicalVariable, LogicalVariable> varMap = new LinkedHashMap<>();
        for (Mutable<ILogicalOperator> childRef : currentOperator.getInputs()) {
            if (opRefSet.contains(childRef)) {
                // There is a shared operator reference in the query plan.
                // Deep copies the child plan.
                LogicalOperatorDeepCopyWithNewVariablesVisitor visitor = new LogicalOperatorDeepCopyWithNewVariablesVisitor(
                        context, null);
                ILogicalOperator newChild = childRef.getValue().accept(visitor, null);
                LinkedHashMap<LogicalVariable, LogicalVariable> cloneVarMap = visitor
                        .getInputToOutputVariableMapping();

                // Substitute variables according to the deep copy which generates new variables.
                VariableUtilities.substituteVariables(currentOperator, cloneVarMap, null);
                varMap.putAll(cloneVarMap);

                // Sets the new child.
                childRef = new MutableObject<>(newChild);
                currentOperator.getInputs().set(childIndex, childRef);
            }

            // Recursively eliminate shared operator reference for the operator subtree,
            // even if it is a deep copy of some other one.
            LinkedHashMap<LogicalVariable, LogicalVariable> childVarMap = eliminateSharedOperatorReference(
                    childRef, opRefSet);
            // Substitute variables according to the new subtree.
            VariableUtilities.substituteVariables(currentOperator, childVarMap, null);

            // Updates mapping like <$a, $b> in varMap to <$a, $c>, where there is a mapping <$b, $c>
            // in childVarMap.
            for (Map.Entry<LogicalVariable, LogicalVariable> entry : varMap.entrySet()) {
                LogicalVariable newVar = childVarMap.get(entry.getValue());
                if (newVar != null) {
                    entry.setValue(newVar);
                }
            }
            varMap.putAll(childVarMap);
            ++childIndex;
        }

        // Only retain live variables for parent operators to substitute variables.
        Set<LogicalVariable> liveVars = new HashSet<>();
        VariableUtilities.getLiveVariables(currentOperator, liveVars);
        varMap.values().retainAll(liveVars);
        return varMap;
    } catch (AlgebricksException e) {
        throw new AsterixException(e);
    }
}

From source file:org.pentaho.di.repository.pur.PurRepository.java

/**
 * Read shared objects of the types provided from the repository. Every {@link SharedObjectInterface} that is read
 * will be fully loaded as if it has been loaded through {@link #loadDatabaseMeta(ObjectId, String)},
 * {@link #loadClusterSchema(ObjectId, List, String)}, etc.
 * <p>/*from  w  w w  .  ja v a  2s .com*/
 * This method was introduced to reduce the number of server calls for loading shared objects to a constant number:
 * {@code 2 + n, where n is the number of types requested}.
 * </p>
 *
 * @param sharedObjectsByType
 *          Map of type to shared objects. Each map entry will contain a non-null {@link List} of
 *          {@link RepositoryObjectType}s for every type provided. Only entries for types provided will be altered.
 * @param types
 *          Types of repository objects to read from the repository
 * @throws KettleException
 */
protected void readSharedObjects(
        Map<RepositoryObjectType, List<? extends SharedObjectInterface>> sharedObjectsByType,
        RepositoryObjectType... types) throws KettleException {
    // Overview:
    // 1) We will fetch RepositoryFile, NodeRepositoryFileData, and VersionSummary for all types provided.
    // 2) We assume that unless an exception is thrown every RepositoryFile returned by getFilesByType(..) have a
    // matching NodeRepositoryFileData and VersionSummary.
    // 3) With all files, node data, and versions in hand we will iterate over them, merging them back into usable
    // shared objects
    List<RepositoryFile> allFiles = new ArrayList<RepositoryFile>();
    // Since type is not preserved in the RepositoryFile we fetch files by type so we don't rely on parsing the name to
    // determine type afterward
    // Map must be ordered or we can't match up files with data and version summary
    LinkedHashMap<RepositoryObjectType, List<RepositoryFile>> filesByType = getFilesByType(allFiles, types);
    try {
        List<NodeRepositoryFileData> data = pur.getDataForReadInBatch(allFiles, NodeRepositoryFileData.class);
        List<VersionSummary> versions = pur.getVersionSummaryInBatch(allFiles);
        // Only need one iterator for all data and versions. We will work through them as we process the files by type, in
        // order.
        Iterator<NodeRepositoryFileData> dataIter = data.iterator();
        Iterator<VersionSummary> versionsIter = versions.iterator();

        // Assemble into completely loaded SharedObjectInterfaces by type
        for (Entry<RepositoryObjectType, List<RepositoryFile>> entry : filesByType.entrySet()) {
            SharedObjectAssembler<?> assembler = sharedObjectAssemblerMap.get(entry.getKey());
            if (assembler == null) {
                throw new UnsupportedOperationException(
                        String.format("Cannot assemble shared object of type [%s]", entry.getKey())); //$NON-NLS-1$
            }
            // For all files of this type, assemble them from the pieces of data pulled from the repository
            Iterator<RepositoryFile> filesIter = entry.getValue().iterator();
            List<SharedObjectInterface> sharedObjects = new ArrayList<SharedObjectInterface>(
                    entry.getValue().size());
            // Exceptions are thrown during lookup if data or versions aren't found so all the lists should be the same size
            // (no need to check for next on all iterators)
            while (filesIter.hasNext()) {
                RepositoryFile file = filesIter.next();
                NodeRepositoryFileData repoData = dataIter.next();
                VersionSummary version = versionsIter.next();

                // TODO: inexistent db types can cause exceptions assembling; prevent total failure
                try {
                    sharedObjects.add(assembler.assemble(file, repoData, version));
                } catch (Exception ex) {
                    // TODO i18n
                    getLog().logError("Unable to load shared objects", ex);
                }
            }
            sharedObjectsByType.put(entry.getKey(), sharedObjects);
        }
    } catch (Exception ex) {
        // TODO i18n
        throw new KettleException("Unable to load shared objects", ex); //$NON-NLS-1$
    }
}