Example usage for java.util Set equals

List of usage examples for java.util Set equals

Introduction

In this page you can find the example usage for java.util Set equals.

Prototype

boolean equals(Object o);

Source Link

Document

Compares the specified object with this set for equality.

Usage

From source file:beast.evolution.tree.SimpleRandomTree.java

public void doTheWork() {
    // find taxon sets we are dealing with
    taxonSets = new ArrayList<>();
    m_bounds = new ArrayList<>();
    distributions = new ArrayList<>();
    taxonSetIDs = new ArrayList<>();
    List<Boolean> onParent = new ArrayList<>();
    lastMonophyletic = 0;//from   www .j av  a 2s.  c  om

    if (taxaInput.get() != null) {
        sTaxa.addAll(taxaInput.get().getTaxaNames());
    } else {
        sTaxa.addAll(m_taxonset.get().asStringList());
    }

    // pick up constraints from outputs, m_inititial input tree and output tree, if any
    List<MRCAPrior> calibrations = new ArrayList<MRCAPrior>();
    calibrations.addAll(calibrationsInput.get());

    // pick up constraints in m_initial tree
    for (final Object plugin : getOutputs()) {
        if (plugin instanceof MRCAPrior && !calibrations.contains(plugin)) {
            calibrations.add((MRCAPrior) plugin);
        }
    }

    if (m_initial.get() != null) {
        for (final Object plugin : m_initial.get().getOutputs()) {
            if (plugin instanceof MRCAPrior && !calibrations.contains(plugin)) {
                calibrations.add((MRCAPrior) plugin);
            }
        }
    }

    for (final MRCAPrior prior : calibrations) {
        final TaxonSet taxonSet = prior.taxonsetInput.get();
        if (taxonSet != null && !prior.onlyUseTipsInput.get()) {
            final Set<String> bTaxa = new LinkedHashSet<>();
            if (taxonSet.asStringList() == null) {
                taxonSet.initAndValidate();
            }
            for (final String sTaxonID : taxonSet.asStringList()) {

                if (!sTaxa.contains(sTaxonID)) {
                    throw new IllegalArgumentException(
                            "Taxon <" + sTaxonID + "> could not be found in list of taxa. Choose one of "
                                    + Arrays.toString(sTaxa.toArray(new String[sTaxa.size()])));
                }
                bTaxa.add(sTaxonID);
            }
            final ParametricDistribution distr = prior.distInput.get();
            final Bound bounds = new Bound();
            if (distr != null) {
                List<BEASTInterface> plugins = new ArrayList<>();
                distr.getPredecessors(plugins);
                for (int i = plugins.size() - 1; i >= 0; i--) {
                    plugins.get(i).initAndValidate();
                }
                try {
                    final double offset = distr.offsetInput.get();
                    bounds.lower = Math.max(distr.inverseCumulativeProbability(0.0) + offset, 0.0);
                    bounds.upper = distr.inverseCumulativeProbability(1.0) + offset;
                    assert bounds.lower <= bounds.upper;
                } catch (MathException e) {
                    Log.warning
                            .println("Could not set bounds in SimpleRandomTree::doTheWork : " + e.getMessage());
                }
            }

            if (prior.isMonophyleticInput.get() || bTaxa.size() == 1) {
                // add any monophyletic constraint
                boolean isDuplicate = false;
                for (int k = 0; k < lastMonophyletic; ++k) {
                    // assert prior.useOriginateInput.get().equals(onParent.get(k)) == (prior.useOriginateInput.get() == onParent.get(k));
                    if (bTaxa.size() == taxonSets.get(k).size() && bTaxa.equals(taxonSets.get(k))
                            && prior.useOriginateInput.get().equals(onParent.get(k))) {
                        if (distr != null) {
                            if (distributions.get(k) == null) {
                                distributions.set(k, distr);
                                m_bounds.set(k, bounds);
                                taxonSetIDs.set(k, prior.getID());
                            }
                        }
                        isDuplicate = true;
                    }
                }
                if (!isDuplicate) {
                    taxonSets.add(lastMonophyletic, bTaxa);
                    distributions.add(lastMonophyletic, distr);
                    onParent.add(lastMonophyletic, prior.useOriginateInput.get());
                    m_bounds.add(lastMonophyletic, bounds);
                    taxonSetIDs.add(lastMonophyletic, prior.getID());
                    lastMonophyletic++;
                }
            } else {
                // only calibrations with finite bounds are added
                if (!Double.isInfinite(bounds.lower) || !Double.isInfinite(bounds.upper)) {
                    taxonSets.add(bTaxa);
                    distributions.add(distr);
                    m_bounds.add(bounds);
                    taxonSetIDs.add(prior.getID());
                    onParent.add(prior.useOriginateInput.get());
                }
            }
        }
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean i_in_j = tj.containsAll(ti);
                boolean j_in_i = ti.containsAll(tj);
                if (i_in_j || j_in_i) {
                    boolean ok = true;
                    if (i_in_j && j_in_i) {
                        ok = (boolean) (onParent.get(i)) != (boolean) onParent.get(j);
                    }
                    assert ok : "" + i + ' ' + j + ' ' + ' ' + taxonSetIDs.get(i) + ' ' + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    // assume all calibration constraints are Monophyletic
    // TODO: verify that this is a reasonable assumption
    lastMonophyletic = taxonSets.size();

    // sort constraints in increasing set inclusion order, i.e. such that if taxon set i is subset of taxon set j, then i < j
    for (int i = 0; i < lastMonophyletic; i++) {
        for (int j = i + 1; j < lastMonophyletic; j++) {

            final Set<String> taxai = taxonSets.get(i);
            final Set<String> taxaj = taxonSets.get(j);
            Set<String> intersection = new LinkedHashSet<>(taxai);
            intersection.retainAll(taxaj);

            if (intersection.size() > 0) {
                final boolean bIsSubset = taxai.containsAll(taxaj);
                final boolean bIsSubset2 = taxaj.containsAll(taxai);
                // sanity check: make sure either
                // o taxonset1 is subset of taxonset2 OR
                // o taxonset1 is superset of taxonset2 OR
                // o taxonset1 does not intersect taxonset2
                if (!(bIsSubset || bIsSubset2)) {
                    throw new IllegalArgumentException(
                            "333: Don't know how to generate a Random Tree for taxon sets that intersect, "
                                    + "but are not inclusive. Taxonset "
                                    + (taxonSetIDs.get(i) == null ? taxai : taxonSetIDs.get(i)) + " and "
                                    + (taxonSetIDs.get(j) == null ? taxaj : taxonSetIDs.get(j)));
                }
                // swap i & j if b1 subset of b2. If equal sub-sort on 'useOriginate'
                if (bIsSubset && (!bIsSubset2 || (onParent.get(i) && !onParent.get(j)))) {
                    swap(taxonSets, i, j);
                    swap(distributions, i, j);
                    swap(m_bounds, i, j);
                    swap(taxonSetIDs, i, j);
                    swap(onParent, i, j);
                }
            }
        }
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean ok = tj.containsAll(ti);
                if (ok) {
                    ok = !tj.equals(ti) || (!onParent.get(i) && onParent.get(j));
                    assert ok : "" + i + ' ' + j + ' ' + tj.equals(ti) + ' ' + taxonSetIDs.get(i) + ' '
                            + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    for (int i = 0; i < lastMonophyletic; i++) {
        if (onParent.get(i)) {
            // make sure it is after constraint on node itself, if such exists
            assert (!(i + 1 < lastMonophyletic && taxonSets.get(i).equals(taxonSets.get(i + 1))
                    && onParent.get(i) && !onParent.get(i + 1)));
            // find something to attach to ....
            // find enclosing clade, if any. pick a non-intersecting clade in the enclosed without an onParent constraint, or one whose
            // onParent constraint is overlapping.
            final Set<String> iTaxa = taxonSets.get(i);
            int j = i + 1;
            Set<String> enclosingTaxa = sTaxa;
            {
                String someTaxon = iTaxa.iterator().next();
                for (/**/; j < lastMonophyletic; j++) {
                    if (taxonSets.get(j).contains(someTaxon)) {
                        enclosingTaxa = taxonSets.get(j);
                        break;
                    }
                }
            }
            final int enclosingIndex = (j == lastMonophyletic) ? j : j;
            Set<String> candidates = new HashSet<>(enclosingTaxa);
            candidates.removeAll(iTaxa);
            Set<Integer> candidateClades = new HashSet<>(5);
            List<String> canTaxa = new ArrayList<>();
            for (String c : candidates) {
                for (int k = enclosingIndex - 1; k >= 0; --k) {
                    if (taxonSets.get(k).contains(c)) {
                        if (!candidateClades.contains(k)) {
                            if (onParent.get(k)) {
                                if (!intersecting(m_bounds.get(k), m_bounds.get(i))) {
                                    break;
                                }
                            } else {
                                if (!(m_bounds.get(k).lower <= m_bounds.get(i).lower)) {
                                    break;
                                }
                            }
                            candidateClades.add(k);
                        }
                        break;
                    }
                    if (k == 0) {
                        canTaxa.add(c);
                    }
                }
            }

            final int sz1 = canTaxa.size();
            final int sz2 = candidateClades.size();

            if (sz1 + sz2 == 0 && i + 1 == enclosingIndex) {
                final Bound ebound = m_bounds.get(enclosingIndex);
                ebound.restrict(m_bounds.get(i));
            } else {
                assert sz1 + sz2 > 0;
                // prefer taxa over clades (less chance of clades useOriginate clashing)
                final int k = Randomizer.nextInt(sz1 > 0 ? sz1 : sz2);
                Set<String> connectTo;
                int insertPoint;
                if (k < sz1) {
                    // from taxa
                    connectTo = new HashSet<>(1);
                    connectTo.add(canTaxa.get(k));
                    insertPoint = i + 1;
                } else {
                    // from clade
                    final Iterator<Integer> it = candidateClades.iterator();
                    for (j = 0; j < k - sz1 - 1; ++j) {
                        it.next();
                    }
                    insertPoint = it.next();
                    connectTo = new HashSet<>(taxonSets.get(insertPoint));
                    insertPoint = Math.max(insertPoint, i) + 1;
                }

                final HashSet<String> cc = new HashSet<String>(connectTo);

                connectTo.addAll(taxonSets.get(i));
                if (!connectTo.equals(enclosingTaxa) || enclosingTaxa == sTaxa) { // equal when clade already exists

                    taxonSets.add(insertPoint, connectTo);
                    distributions.add(insertPoint, distributions.get(i));
                    onParent.add(insertPoint, false);
                    m_bounds.add(insertPoint, m_bounds.get(i));
                    final String tid = taxonSetIDs.get(i);
                    taxonSetIDs.add(insertPoint, tid);
                    lastMonophyletic += 1;
                } else {
                    // we lose distribution i :(
                    final Bound ebound = m_bounds.get(enclosingIndex);
                    ebound.restrict(m_bounds.get(i));
                }
            }
            if (true) {
                taxonSets.set(i, new HashSet<>());
                distributions.set(i, null);
                m_bounds.set(i, new Bound());
                final String tid = taxonSetIDs.get(i);
                if (tid != null) {
                    taxonSetIDs.set(i, "was-" + tid);
                }
            }
        }
    }

    {
        int icur = 0;
        for (int i = 0; i < lastMonophyletic; ++i, ++icur) {
            final Set<String> ti = taxonSets.get(i);
            if (ti.isEmpty()) {
                icur -= 1;
            } else {
                if (icur < i) {
                    taxonSets.set(icur, taxonSets.get(i));
                    distributions.set(icur, distributions.get(i));
                    m_bounds.set(icur, m_bounds.get(i));
                    taxonSetIDs.set(icur, taxonSetIDs.get(i));
                    onParent.set(icur, onParent.get(i));
                }
            }
        }
        taxonSets.subList(icur, lastMonophyletic).clear();
        distributions.subList(icur, lastMonophyletic).clear();
        m_bounds.subList(icur, lastMonophyletic).clear();
        taxonSetIDs.subList(icur, lastMonophyletic).clear();
        onParent.subList(icur, lastMonophyletic).clear();

        lastMonophyletic = icur;
    }

    if (ICC) {
        for (int i = 0; i < lastMonophyletic; i++) {
            final Set<String> ti = taxonSets.get(i);
            for (int j = i + 1; j < lastMonophyletic; j++) {
                final Set<String> tj = taxonSets.get(j);
                boolean ok = tj.containsAll(ti);
                if (ok) {
                    ok = !tj.equals(ti) || (!onParent.get(i) && onParent.get(j));
                    assert ok : "" + i + ' ' + j + ' ' + taxonSetIDs.get(i) + ' ' + taxonSetIDs.get(j);
                } else {
                    Set<String> tmp = new HashSet<>(tj);
                    tmp.retainAll(ti);
                    assert tmp.isEmpty();
                }
            }
        }
    }

    // map parent child relationships between mono clades. nParent[i] is the immediate parent clade of i, if any. An immediate parent is the
    // smallest superset of i, children[i] is a list of all clades which have i as a parent.
    // The last one, standing for the virtual "root" of all monophyletic clades is not associated with any actual clade
    final int[] nParent = new int[lastMonophyletic];
    children = new List[lastMonophyletic + 1];
    for (int i = 0; i < lastMonophyletic + 1; i++) {
        children[i] = new ArrayList<>();
    }
    for (int i = 0; i < lastMonophyletic; i++) {
        int j = i + 1;
        while (j < lastMonophyletic && !taxonSets.get(j).containsAll(taxonSets.get(i))) {
            j++;
        }
        nParent[i] = j;
        children[j].add(i);
    }

    // make sure upper bounds of a child does not exceed the upper bound of its parent
    for (int i = lastMonophyletic - 1; i >= 0; --i) {
        if (nParent[i] < lastMonophyletic) {
            if (m_bounds.get(i).upper > m_bounds.get(nParent[i]).upper) {
                m_bounds.get(i).upper = m_bounds.get(nParent[i]).upper - 1e-100;
                assert m_bounds.get(i).lower <= m_bounds.get(i).upper : i;
            }
        }
    }

    nodeCount = 2 * sTaxa.size() - 1;
    boundPerNode = new Bound[nodeCount];
    distPerNode = new ParametricDistribution[nodeCount];

    buildTree(sTaxa);
    assert nextNodeNr == nodeCount : "" + nextNodeNr + ' ' + nodeCount;

    double bm = branchMeanInput.get();

    if (bm < 0) {
        double maxMean = 0;

        for (ParametricDistribution distr : distPerNode) {
            if (distr != null) {
                double m = distr.getMean();
                if (maxMean < m)
                    maxMean = m;
            }
        }
        if (maxMean > 0) {
            double s = 0;
            for (int i = 2; i <= nodeCount; ++i) {
                s += 1.0 / i;
            }
            bm = s / maxMean;
        }
    }

    double rate = 1 / (bm < 0 ? 1 : bm);
    boolean succ = false;
    int ntries = 6;
    final double epsi = 0.01 / rate;
    double clamp = 1 - clampInput.get();
    while (!succ && ntries > 0) {
        try {
            succ = setHeights(rate, false, epsi, clamp);
        } catch (ConstraintViolatedException e) {
            throw new RuntimeException("Constraint failed: " + e.getMessage());
        }
        --ntries;
        rate *= 2;
        clamp /= 2;
    }
    if (!succ) {
        try {
            succ = setHeights(rate, true, 0, 0);
        } catch (ConstraintViolatedException e) {
            throw new RuntimeException("Constraint failed: " + e.getMessage());
        }
    }
    assert succ;

    internalNodeCount = sTaxa.size() - 1;
    leafNodeCount = sTaxa.size();

    HashMap<String, Integer> taxonToNR = null;
    // preserve node numbers where possible
    if (m_initial.get() != null) {
        taxonToNR = new HashMap<>();
        for (Node n : m_initial.get().getExternalNodes()) {
            taxonToNR.put(n.getID(), n.getNr());
        }
    }
    // re-assign node numbers
    setNodesNrs(root, 0, new int[1], taxonToNR);

    initArrays();
}

From source file:de.schildbach.pte.VrsProvider.java

@Override
public QueryTripsResult queryTrips(final Location from, final @Nullable Location via, final Location to,
        Date date, boolean dep, final @Nullable Set<Product> products, final @Nullable Optimize optimize,
        final @Nullable WalkSpeed walkSpeed, final @Nullable Accessibility accessibility,
        @Nullable Set<Option> options) throws IOException {
    // The EXACT_POINTS feature generates an about 50% bigger API response, probably well compressible.
    final boolean EXACT_POINTS = true;
    final List<Location> ambiguousFrom = new ArrayList<>();
    String fromString = generateLocation(from, ambiguousFrom);

    final List<Location> ambiguousVia = new ArrayList<>();
    String viaString = generateLocation(via, ambiguousVia);

    final List<Location> ambiguousTo = new ArrayList<>();
    String toString = generateLocation(to, ambiguousTo);

    if (!ambiguousFrom.isEmpty() || !ambiguousVia.isEmpty() || !ambiguousTo.isEmpty()) {
        return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                ambiguousFrom.isEmpty() ? null : ambiguousFrom, ambiguousVia.isEmpty() ? null : ambiguousVia,
                ambiguousTo.isEmpty() ? null : ambiguousTo);
    }/*w ww  .ja  va  2  s . co m*/

    if (fromString == null) {
        return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                QueryTripsResult.Status.UNKNOWN_FROM);
    }
    if (via != null && viaString == null) {
        return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                QueryTripsResult.Status.UNKNOWN_VIA);
    }
    if (toString == null) {
        return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                QueryTripsResult.Status.UNKNOWN_TO);
    }

    final HttpUrl.Builder url = API_BASE.newBuilder();
    url.addQueryParameter("eID", "tx_vrsinfo_ass2_router");
    url.addQueryParameter("f", fromString);
    url.addQueryParameter("t", toString);
    if (via != null) {
        url.addQueryParameter("v", via.id);
    }
    url.addQueryParameter(dep ? "d" : "a", formatDate(date));
    url.addQueryParameter("s", "t");
    if (!products.equals(Product.ALL))
        url.addQueryParameter("p", generateProducts(products));
    url.addQueryParameter("o", "v" + (EXACT_POINTS ? "p" : ""));

    final CharSequence page = httpClient.get(url.build());

    try {
        final List<Trip> trips = new ArrayList<>();
        final JSONObject head = new JSONObject(page.toString());
        final String error = Strings.emptyToNull(head.optString("error", "").trim());
        if (error != null) {
            if (error.equals("ASS2-Server lieferte leere Antwort."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.SERVICE_DOWN);
            else if (error.equals("Zeitberschreitung bei der Verbindung zum ASS2-Server"))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.SERVICE_DOWN);
            else if (error.equals("Server Error"))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.SERVICE_DOWN);
            else if (error.equals("Keine Verbindungen gefunden."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.NO_TRIPS);
            else if (error.startsWith("Keine Verbindung gefunden."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.NO_TRIPS);
            else if (error.equals("Origin invalid."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.UNKNOWN_FROM);
            else if (error.equals("Via invalid."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.UNKNOWN_VIA);
            else if (error.equals("Destination invalid."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.UNKNOWN_TO);
            else if (error.equals("Fehlerhaftes Ziel"))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.UNKNOWN_TO);
            else if (error.equals("Produkt ungltig."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.NO_TRIPS);
            else if (error.equals("Keine Route."))
                return new QueryTripsResult(new ResultHeader(NetworkId.VRS, SERVER_PRODUCT),
                        QueryTripsResult.Status.NO_TRIPS);
            else
                throw new IllegalStateException("unknown error: " + error);
        }
        final JSONArray routes = head.getJSONArray("routes");
        final Context context = new Context();
        // for all routes
        for (int iRoute = 0; iRoute < routes.length(); iRoute++) {
            final JSONObject route = routes.getJSONObject(iRoute);
            final JSONArray segments = route.getJSONArray("segments");
            List<Leg> legs = new ArrayList<>();
            Location tripOrigin = null;
            Location tripDestination = null;
            // for all segments
            for (int iSegment = 0; iSegment < segments.length(); iSegment++) {
                final JSONObject segment = segments.getJSONObject(iSegment);
                final String type = segment.getString("type");
                final JSONObject origin = segment.getJSONObject("origin");
                final LocationWithPosition segmentOriginLocationWithPosition = parseLocationAndPosition(origin);
                Location segmentOrigin = segmentOriginLocationWithPosition.location;
                final Position segmentOriginPosition = segmentOriginLocationWithPosition.position;
                if (iSegment == 0) {
                    // special case: first origin is an address
                    if (from.type == LocationType.ADDRESS) {
                        segmentOrigin = from;
                    }
                    tripOrigin = segmentOrigin;
                }
                final JSONObject destination = segment.getJSONObject("destination");
                final LocationWithPosition segmentDestinationLocationWithPosition = parseLocationAndPosition(
                        destination);
                Location segmentDestination = segmentDestinationLocationWithPosition.location;
                final Position segmentDestinationPosition = segmentDestinationLocationWithPosition.position;
                if (iSegment == segments.length() - 1) {
                    // special case: last destination is an address
                    if (to.type == LocationType.ADDRESS) {
                        segmentDestination = to;
                    }
                    tripDestination = segmentDestination;
                }
                List<Stop> intermediateStops = new ArrayList<>();
                final JSONArray vias = segment.optJSONArray("vias");
                if (vias != null) {
                    for (int iVia = 0; iVia < vias.length(); iVia++) {
                        final JSONObject viaJsonObject = vias.getJSONObject(iVia);
                        final LocationWithPosition viaLocationWithPosition = parseLocationAndPosition(
                                viaJsonObject);
                        final Location viaLocation = viaLocationWithPosition.location;
                        final Position viaPosition = viaLocationWithPosition.position;
                        Date arrivalPlanned = null;
                        Date arrivalPredicted = null;
                        if (viaJsonObject.has("arrivalScheduled")) {
                            arrivalPlanned = parseDateTime(viaJsonObject.getString("arrivalScheduled"));
                            arrivalPredicted = (viaJsonObject.has("arrival"))
                                    ? parseDateTime(viaJsonObject.getString("arrival"))
                                    : null;
                        } else if (segment.has("arrival")) {
                            arrivalPlanned = parseDateTime(viaJsonObject.getString("arrival"));
                        }
                        final Stop intermediateStop = new Stop(viaLocation, false /* arrival */, arrivalPlanned,
                                arrivalPredicted, viaPosition, viaPosition);
                        intermediateStops.add(intermediateStop);
                    }
                }
                Date departurePlanned = null;
                Date departurePredicted = null;
                if (segment.has("departureScheduled")) {
                    departurePlanned = parseDateTime(segment.getString("departureScheduled"));
                    departurePredicted = (segment.has("departure"))
                            ? parseDateTime(segment.getString("departure"))
                            : null;
                    if (iSegment == 0) {
                        context.departure(departurePredicted);
                    }
                } else if (segment.has("departure")) {
                    departurePlanned = parseDateTime(segment.getString("departure"));
                    if (iSegment == 0) {
                        context.departure(departurePlanned);
                    }
                }
                Date arrivalPlanned = null;
                Date arrivalPredicted = null;
                if (segment.has("arrivalScheduled")) {
                    arrivalPlanned = parseDateTime(segment.getString("arrivalScheduled"));
                    arrivalPredicted = (segment.has("arrival")) ? parseDateTime(segment.getString("arrival"))
                            : null;
                    if (iSegment == segments.length() - 1) {
                        context.arrival(arrivalPredicted);
                    }
                } else if (segment.has("arrival")) {
                    arrivalPlanned = parseDateTime(segment.getString("arrival"));
                    if (iSegment == segments.length() - 1) {
                        context.arrival(arrivalPlanned);
                    }
                }
                long traveltime = segment.getLong("traveltime");
                long distance = segment.optLong("distance", 0);
                Line line = null;
                String direction = null;
                JSONObject lineObject = segment.optJSONObject("line");
                if (lineObject != null) {
                    line = parseLine(lineObject);
                    direction = lineObject.optString("direction", null);
                }
                StringBuilder message = new StringBuilder();
                JSONArray infos = segment.optJSONArray("infos");
                if (infos != null) {
                    for (int k = 0; k < infos.length(); k++) {
                        // TODO there can also be a "header" string
                        if (k > 0) {
                            message.append(", ");
                        }
                        message.append(infos.getJSONObject(k).getString("text"));
                    }
                }

                List<Point> points = new ArrayList<>();
                points.add(new Point(segmentOrigin.lat, segmentOrigin.lon));
                if (EXACT_POINTS && segment.has("polygon")) {
                    parsePolygon(segment.getString("polygon"), points);
                } else {
                    for (Stop intermediateStop : intermediateStops) {
                        points.add(new Point(intermediateStop.location.lat, intermediateStop.location.lon));
                    }
                }
                points.add(new Point(segmentDestination.lat, segmentDestination.lon));
                if (type.equals("walk")) {
                    if (departurePlanned == null)
                        departurePlanned = legs.get(legs.size() - 1).getArrivalTime();
                    if (arrivalPlanned == null)
                        arrivalPlanned = new Date(departurePlanned.getTime() + traveltime * 1000);

                    legs.add(new Trip.Individual(Trip.Individual.Type.WALK, segmentOrigin, departurePlanned,
                            segmentDestination, arrivalPlanned, points, (int) distance));
                } else if (type.equals("publicTransport")) {
                    legs.add(new Trip.Public(line,
                            direction != null
                                    ? new Location(LocationType.STATION, null /* id */, null /* place */,
                                            direction)
                                    : null,
                            new Stop(segmentOrigin, true /* departure */, departurePlanned, departurePredicted,
                                    segmentOriginPosition, segmentOriginPosition),
                            new Stop(segmentDestination, false /* departure */, arrivalPlanned,
                                    arrivalPredicted, segmentDestinationPosition, segmentDestinationPosition),
                            intermediateStops, points, Strings.emptyToNull(message.toString())));
                } else {
                    throw new IllegalStateException("unhandled type: " + type);
                }
            }
            int changes = route.getInt("changes");
            List<Fare> fares = parseFare(route.optJSONObject("costs"));

            trips.add(new Trip(null /* id */, tripOrigin, tripDestination, legs, fares, null /* capacity */,
                    changes));
        }
        long serverTime = parseDateTime(head.getString("generated")).getTime();
        final ResultHeader header = new ResultHeader(NetworkId.VRS, SERVER_PRODUCT, null, null, serverTime,
                null);
        context.from = from;
        context.to = to;
        context.via = via;
        context.products = products;
        if (trips.size() == 1) {
            if (dep)
                context.disableLater();
            else
                context.disableEarlier();
        }
        return new QueryTripsResult(header, url.build().toString(), from, via, to, context, trips);
    } catch (final JSONException x) {
        throw new RuntimeException("cannot parse: '" + page + "' on " + url, x);
    } catch (final ParseException e) {
        throw new RuntimeException("cannot parse: '" + page + "' on " + url, e);
    }
}

From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java

private static void launchInTestMode() {

    File baseTestDir = new File("/tmp/logManagerTest");
    FileUtils.recursivelyDeleteFile(baseTestDir);
    baseTestDir.mkdir();/*from   w w  w .j  av  a2  s.c  o m*/
    File remoteDir = new File(baseTestDir, "remote");
    File localDir = new File(baseTestDir, "local");
    remoteDir.mkdir();
    localDir.mkdir();

    final TreeMap<String, URLFP> urlToFPMap = new TreeMap<String, URLFP>();
    final TreeMap<URLFP, String> urlFPToString = new TreeMap<URLFP, String>();

    Set<String> list1 = Sets.newHashSet(urlList1);
    Set<String> list2 = Sets.newHashSet(urlList2);
    final Set<String> combined = Sets.union(list1, list2);
    Set<String> difference = Sets.difference(list1, list2);
    final Set<String> completedURLS = new HashSet<String>();
    for (String url : combined) {
        URLFP fingerprint = URLUtils.getURLFPFromURL(url, true);
        urlToFPMap.put(url, fingerprint);
        urlFPToString.put(fingerprint, url);
    }

    File testInputFile1 = new File(localDir, "INPUT_LIST-" + System.currentTimeMillis());
    File testInputFile2 = new File(localDir, "INPUT_LIST-" + (System.currentTimeMillis() + 1));

    try {

        generateTestURLFile(testInputFile1, urlList1);
        generateTestURLFile(testInputFile2, urlList2);

        FileSystem localFileSystem = FileSystem.getLocal(CrawlEnvironment.getHadoopConfig());

        EventLoop eventLoop = new EventLoop();
        eventLoop.start();

        final CrawlHistoryManager logManager = new CrawlHistoryManager(localFileSystem,
                new Path(remoteDir.getAbsolutePath()), localDir, eventLoop, 0);

        final LinkedBlockingQueue<ProxyCrawlHistoryItem> queue = new LinkedBlockingQueue<ProxyCrawlHistoryItem>();

        final Semaphore initialListComplete = new Semaphore(0);

        logManager.startQueueLoaderThread(new CrawlQueueLoader() {

            @Override
            public void queueURL(URLFP urlfp, String url) {
                ProxyCrawlHistoryItem item = new ProxyCrawlHistoryItem();
                item.setOriginalURL(url);
                queue.add(item);
            }

            @Override
            public void flush() {
                // TODO Auto-generated method stub

            }
        });

        Thread queueTestThread = new Thread(new Runnable() {

            @Override
            public void run() {
                while (true) {
                    try {
                        ProxyCrawlHistoryItem item = queue.take();

                        if (item.getOriginalURL().length() == 0) {
                            break;
                        } else {

                            System.out.println("Got:" + item.getOriginalURL());

                            CrawlURL urlObject = new CrawlURL();

                            Assert.assertTrue(!completedURLS.contains(item.getOriginalURL()));
                            completedURLS.add(item.getOriginalURL());

                            urlObject.setLastAttemptResult((byte) CrawlURL.CrawlResult.SUCCESS);
                            urlObject.setUrl(item.getOriginalURL());
                            urlObject.setResultCode(200);

                            logManager.crawlComplete(urlObject);

                            if (completedURLS.equals(combined)) {
                                System.out.println("Hit Trigger URL. Releasing InitialListComplete Sempahore");
                                initialListComplete.release(1);
                            }
                        }

                    } catch (InterruptedException e) {
                    }
                }
            }

        });

        queueTestThread.start();

        logManager.loadList(testInputFile1, 0);
        logManager.loadList(testInputFile2, 0);
        System.out.println("Waiting for Initial List to Complete");
        initialListComplete.acquireUninterruptibly();
        System.out.println("Woke Up");

        try {
            eventLoop.getEventThread().join();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:com.tesora.dve.tools.aitemplatebuilder.AiTemplateBuilder.java

/**
 * Handles collocation special cases./*from   w ww .  j a va  2 s  .  co m*/
 * 
 * 1. A table is being referenced on a single column group.
 * 
 * a) The table is referenced by two or more unique column groups from a
 * single table.
 * The solution which preserves collocation is to make the
 * table and all the tables it points at Broadcast.
 * 
 * b) The table has unique foreign and target column groups. In other words,
 * the table is being pointed at and points on two or more unique column
 * groups.
 * The only solution is making the pointed tables
 * and all their descendants Broadcast.
 * 
 * 2. A table is being referenced on two or more unique column groups. The
 * only solution in this case is making the table and all the tables it
 * points at Broadcast.
 * 
 * 
 * NOTE: Basic one-to-one collocation cases:
 * 
 * a) Range -> Broadcast: always collocated
 * b) Range -> Range: collocated only if in the same range.
 * c) Broadcast -> Range: make the referenced table Broadcast (a), or
 * colocate the two tables on the same range (b).
 * 
 * NOTE: Same rules hold for self-referencing relationships (table with a
 * foreign key into itself).
 */
private void resolveForeignCollocationConflicts(final Collection<TableStats> tables,
        final boolean isRowWidthWeightingEnabled) throws PEException {
    log("Resolving FK collocation...");

    /*
     * Make sure there are no Broadcast -> Range relationships (c) by making
     * both tables Range or Broadcast if cannot range (user defined).
     */
    for (final TableStats table : tables) {
        if (table.getTableDistributionModel() instanceof Range) {
            for (final TableStats childTable : table.getReferencingForeignTables()) {
                if (childTable.getTableDistributionModel() instanceof Broadcast) {
                    if (!childTable.hasDistributionModelFreezed()) {
                        final Set<TableStats> affectedTables = makeBackwardTableTreeRange(childTable);
                        log("FK forced range: range table '" + table.getFullTableName()
                                + "' is referenced by a broadcast table '" + childTable.getFullTableName()
                                + "'. Had to range '" + affectedTables.size() + "' table(s).",
                                MessageSeverity.ALERT);
                    } else {
                        final Set<TableStats> forcedBroadcastTables = makeForwardTableTreeBroadcast(childTable);
                        log("FK forced broadcast: Could not range table '" + childTable.getFullTableName()
                                + "' (user defined). Had to broadcast '" + forcedBroadcastTables.size()
                                + "' table(s) with total size of '"
                                + CorpusStats.computeTotalSizeKb(forcedBroadcastTables) + "KB'",
                                MessageSeverity.WARNING);
                        forcedBroadcastTables.addAll(forcedBroadcastTables);
                    }
                }
            }
        }
    }

    /*
     * Now, we should have only Range -> Broadcast (a) and Range -> Range
     * (b) relationships.
     */

    final SortedSet<TableStats> forcedBroadcastTables = new TreeSet<TableStats>(
            Collections.reverseOrder(new TableSizeComparator(isRowWidthWeightingEnabled)));

    /* Resolve the special cases. */
    for (final TableStats table : tables) {

        /*
         * Here we handle only the Range -> Range (b) case.
         * Range -> Broadcast (a) is always collocated and we never change
         * the parent table's model to anything other than Broadcast.
         */
        if (!table.hasDistributionModelFreezed() && (table.getTableDistributionModel() instanceof Range)) {
            final Set<Set<TableColumn>> uniqueTargetColumnGroups = table.getUniqueTargetColumnGroups();

            if (uniqueTargetColumnGroups.size() == 1) { // Case (1)

                /* Case (1a) */
                final Set<ForeignRelationship> backwardRelationships = table.getBackwardRelationships();
                for (@SuppressWarnings("unused")
                final Set<TableColumn> targetColumnGroup : uniqueTargetColumnGroups) {
                    final Set<TableStats> visitedReferencingTables = new HashSet<TableStats>();
                    for (final ForeignRelationship relationship : backwardRelationships) {
                        final TableStats targetTable = relationship.getRHS();
                        if (!visitedReferencingTables.add(targetTable)) {
                            final Set<TableStats> affectedTables = makeForwardTableTreeBroadcast(table);
                            log("FK forced broadcast: table '" + table.getFullTableName() + "' referenced by '"
                                    + targetTable.getFullTableName()
                                    + "' on two or more unique column groups. Had to broadcast '"
                                    + affectedTables.size() + "' table(s) with total size of '"
                                    + CorpusStats.computeTotalSizeKb(affectedTables) + "KB'",
                                    MessageSeverity.WARNING);
                            forcedBroadcastTables.addAll(affectedTables);
                            break;
                        }
                    }
                }

                if (!(table.getTableDistributionModel() instanceof Range)) {
                    continue; // The case already resolved from above.
                }

                /* Case (1b) */
                final Set<ForeignRelationship> forwardRelationships = table.getForwardRelationships();
                final Set<TableStats> affectedTables = new LinkedHashSet<TableStats>();
                for (final Set<TableColumn> targetColumnGroup : uniqueTargetColumnGroups) {
                    for (final ForeignRelationship relationship : forwardRelationships) {
                        if (!targetColumnGroup.equals(relationship.getForeignColumns())) {
                            final TableStats targetTable = relationship.getRHS();
                            affectedTables.addAll(makeForwardTableTreeBroadcast(targetTable));
                        }
                    }
                }

                if (!affectedTables.isEmpty()) {
                    log("FK forced broadcast: table '" + table.getFullTableName()
                            + "' has unique foreign and target column groups. Had to broadcast '"
                            + affectedTables.size() + "' table(s) with total size of '"
                            + CorpusStats.computeTotalSizeKb(affectedTables) + "KB'", MessageSeverity.WARNING);
                    forcedBroadcastTables.addAll(affectedTables);
                }

            } else if (uniqueTargetColumnGroups.size() > 1) { // Case (2)
                final Set<TableStats> affectedTables = makeForwardTableTreeBroadcast(table);
                log("FK forced broadcast: table '" + table.getFullTableName()
                        + "' referenced on two or more unique column groups. Had to broadcast '"
                        + affectedTables.size() + "' table(s) with total size of '"
                        + CorpusStats.computeTotalSizeKb(affectedTables) + "KB'", MessageSeverity.WARNING);
                forcedBroadcastTables.addAll(affectedTables);
            }
        }
    }

    /* Print out broadcasted tables. */
    log("The following tables were forced broadcast:", MessageSeverity.WARNING);
    for (final TableStats table : forcedBroadcastTables) {
        log(table.toString(), MessageSeverity.WARNING, 1);
    }
}

From source file:ddf.catalog.impl.CatalogFrameworkImplTest.java

@Test
public void testGetAllSiteNames() {
    String frameworkName = "DDF";
    CatalogProvider provider = new MockMemoryProvider("Provider", "Provider", "v1.0", "DDF",
            new HashSet<ContentType>(), true, new Date());
    List<FederatedSource> federatedSources = createDefaultFederatedSourceList(true);

    // Expected Set of Names
    Set<String> expectedNameSet = new HashSet<String>();
    expectedNameSet.add(frameworkName);/*from   w w w.  j  a v a2s  .  co m*/
    for (FederatedSource curSite : federatedSources) {
        expectedNameSet.add(curSite.getId());
    }

    // Mock register the provider in the container
    // Mock the source poller
    SourcePoller mockPoller = mock(SourcePoller.class);
    when(mockPoller.getCachedSource(isA(Source.class))).thenReturn(null);

    FrameworkProperties frameworkProperties = new FrameworkProperties();
    frameworkProperties.setSourcePoller(mockPoller);
    Map<String, FederatedSource> sources = new HashMap<>();
    for (FederatedSource federatedSource : federatedSources) {
        sources.put(federatedSource.getId(), federatedSource);
    }
    frameworkProperties.setFederatedSources(sources);
    frameworkProperties.setCatalogProviders(Collections.singletonList(provider));

    CatalogFrameworkImpl framework = new CatalogFrameworkImpl(frameworkProperties);
    framework.bind(provider);
    framework.setId(frameworkName);

    // Returned Set of Names
    // Returned Sites
    SourceInfoRequest request = new SourceInfoRequestEnterprise(true);
    SourceInfoResponse response = null;
    try {
        response = framework.getSourceInfo(request);
    } catch (SourceUnavailableException e) {
        LOGGER.debug("SourceUnavilable", e);
        fail();
    }
    assert (response != null);
    Set<SourceDescriptor> sourceDescriptors = response.getSourceInfo();
    // should contain ONLY the original federated sites
    assertEquals(expectedNameSet.size(), sourceDescriptors.size());
    Set<String> returnedSourceIds = new HashSet<String>();

    for (SourceDescriptor sd : sourceDescriptors) {
        returnedSourceIds.add(sd.getSourceId());
    }

    for (String id : returnedSourceIds) {
        LOGGER.debug("returned sourceId: {}", id);
    }
    assertTrue(expectedNameSet.equals(returnedSourceIds));

}

From source file:com.comcast.cqs.persistence.RedisSortedSetPersistence.java

@Override
public List<CQSMessage> receiveMessage(CQSQueue queue, Map<String, String> receiveAttributes)
        throws PersistenceException, IOException, NoSuchAlgorithmException, InterruptedException,
        JSONException {/*  w w  w. j av  a2 s .c o m*/

    int shard = rand.nextInt(queue.getNumberOfShards());
    int maxNumberOfMessages = 1;
    int visibilityTO = queue.getVisibilityTO();

    if (receiveAttributes != null && receiveAttributes.size() > 0) {
        if (receiveAttributes.containsKey(CQSConstants.MAX_NUMBER_OF_MESSAGES)) {
            maxNumberOfMessages = Integer.parseInt(receiveAttributes.get(CQSConstants.MAX_NUMBER_OF_MESSAGES));
        }
        if (receiveAttributes.containsKey(CQSConstants.VISIBILITY_TIMEOUT)) {
            visibilityTO = Integer.parseInt(receiveAttributes.get(CQSConstants.VISIBILITY_TIMEOUT));
        }
    }

    boolean cacheAvailable = checkCacheConsistency(queue.getRelativeUrl(), shard, false);
    List<CQSMessage> ret = new LinkedList<CQSMessage>();

    if (cacheAvailable) {

        // get the messageIds from in redis list

        ShardedJedis jedis = null;
        boolean brokenJedis = false;

        try {

            jedis = getResource();
            String key = queue.getRelativeUrl() + "-" + shard + "-Q";
            Jedis j = jedis.getShard(key);
            boolean done = false;
            Set<String> memIds;
            while (!done) {

                boolean emptyQueue = false;
                HashMap<String, String> messageIdToMemId = new HashMap<String, String>();
                List<String> messageIds = new LinkedList<String>();

                //use lua script if visibilityTO > 0
                if (visibilityTO > 0) {
                    List<String> keys = new LinkedList<String>();
                    keys.add(key);
                    List<String> args = new LinkedList<String>();
                    args.add(String.valueOf(System.currentTimeMillis() - queue.getMsgRetentionPeriod() * 1000));//min
                    args.add(String.valueOf(System.currentTimeMillis()));//max
                    args.add(String.valueOf(maxNumberOfMessages));//number of message
                    args.add(String.valueOf(System.currentTimeMillis() + visibilityTO * 1000)); //new score
                    long ts1 = System.currentTimeMillis();
                    try {
                        memIds = new HashSet<String>(
                                (List<String>) j.evalsha(luaChangeScoreToHigherSHA, keys, args));
                    } catch (JedisDataException e) {
                        if (e.getMessage().startsWith("NOSCRIPT")) {
                            luaChangeScoreToHigherSHA = new String(
                                    j.scriptLoad(luaChangeScoreToHigher.getBytes()));
                            memIds = new HashSet<String>(
                                    (List<String>) j.evalsha(luaChangeScoreToHigherSHA, keys, args));
                        } else {
                            throw e;
                        }
                    }
                    long ts2 = System.currentTimeMillis();
                    CQSControllerServlet.valueAccumulator.addToCounter(AccumulatorName.RedisTime, (ts2 - ts1));
                } else {
                    long ts1 = System.currentTimeMillis();
                    memIds = jedis.zrangeByScore(key,
                            System.currentTimeMillis() - queue.getMsgRetentionPeriod() * 1000,
                            System.currentTimeMillis(), 0, maxNumberOfMessages);
                    long ts2 = System.currentTimeMillis();
                    CQSControllerServlet.valueAccumulator.addToCounter(AccumulatorName.RedisTime, (ts2 - ts1));
                    if (memIds == null || memIds.equals("nil")) { //done
                        emptyQueue = true;
                    }

                }
                for (String memId : memIds) {
                    String messageId = getMemQueueMessageMessageId(queue.getRelativeUrlHash(), memId);
                    messageIds.add(messageId);
                    messageIdToMemId.put(messageId, memId);
                }
                if (messageIds.size() == 0) {
                    CQSMonitor.getInstance().registerEmptyResp(queue.getRelativeUrl(), 1);
                    return Collections.emptyList();
                }

                // By here messageIds have Underlying layer's messageids.
                // Get messages from underlying layer. The total returned may not be what was
                // in mem cache since master-master replication to Cassandra could mean other
                // colo deleted the message form underlying storage. 

                logger.debug("event=found_msg_ids_in_redis num_mem_ids=" + messageIds.size());

                try {

                    Map<String, CQSMessage> persisMap = persistenceStorage.getMessages(queue.getRelativeUrl(),
                            messageIds);

                    for (Entry<String, CQSMessage> messageIdToMessage : persisMap.entrySet()) {

                        String memId = messageIdToMemId.get(messageIdToMessage.getKey());

                        if (memId == null) {
                            throw new IllegalStateException(
                                    "Underlying storage layer returned a message that was not requested");
                        }

                        CQSMessage message = messageIdToMessage.getValue();

                        if (message == null) {
                            logger.warn("event=message_is_null msg_id=" + messageIdToMessage.getKey());
                            //underlying layer does not have this message, remove it from Redis layer
                            jedis.zrem(key, memId);
                            continue;
                        }

                        message.setMessageId(memId);
                        message.setReceiptHandle(memId);

                        //get message-attributes and populate in message
                        Map<String, String> msgAttrs = (message.getAttributes() != null)
                                ? message.getAttributes()
                                : new HashMap<String, String>();
                        List<String> attrs = jedis.hmget(queue.getRelativeUrl() + "-" + shard + "-A-" + memId,
                                CQSConstants.REDIS_APPROXIMATE_FIRST_RECEIVE_TIMESTAMP,
                                CQSConstants.REDIS_APPROXIMATE_RECEIVE_COUNT);

                        if (attrs.get(0) == null) {
                            String firstRecvTS = Long.toString(System.currentTimeMillis());
                            jedis.hset(queue.getRelativeUrl() + "-" + shard + "-A-" + memId,
                                    CQSConstants.REDIS_APPROXIMATE_FIRST_RECEIVE_TIMESTAMP, firstRecvTS);
                            msgAttrs.put(CQSConstants.APPROXIMATE_FIRST_RECEIVE_TIMESTAMP, firstRecvTS);
                        } else {
                            msgAttrs.put(CQSConstants.APPROXIMATE_FIRST_RECEIVE_TIMESTAMP, attrs.get(0));
                        }

                        int recvCount = 1;

                        if (attrs.get(1) != null) {
                            recvCount = Integer.parseInt(attrs.get(1)) + 1;
                        }

                        jedis.hset(queue.getRelativeUrl() + "-" + shard + "-A-" + memId,
                                CQSConstants.REDIS_APPROXIMATE_RECEIVE_COUNT, Integer.toString(recvCount));
                        jedis.expire(queue.getRelativeUrl() + "-" + shard + "-A-" + memId, 3600 * 24 * 14); //14 days expiration if not deleted
                        msgAttrs.put(CQSConstants.APPROXIMATE_RECEIVE_COUNT, Integer.toString(recvCount));
                        message.setAttributes(msgAttrs);
                        ret.add(message);
                    }

                    if (ret.size() > 0) { //There may be cases where the underlying persistent message has two memIds while
                        //cache filling. In such cases trying to retrieve message for the second memId may result in no message
                        //returned. We should skip over those memIds and continue till we find at least one valid memId
                        done = true;
                    } else {
                        for (String messageId : messageIds) {
                            logger.debug("event=bad_mem_id_found msg_id=" + messageId + " action=skip_message");
                        }
                    }

                } catch (PersistenceException e1) { //If cassandra exception, push messages back
                    logger.error("event=persistence_exception num_messages=" + messageIds.size()
                            + " action=pushing_messages_back_to_redis");
                    if (visibilityTO > 0) {
                        for (String messageId : messageIds) {
                            String memId = messageIdToMemId.get(messageId);
                            jedis.zadd(queue.getRelativeUrl() + "-" + shard + "-Q", System.currentTimeMillis(),
                                    memId);
                        }
                    }
                    throw e1;
                }
            }
        } catch (JedisConnectionException e) {
            logger.warn("event=receive_message error_code=redis_unavailable num_connections="
                    + numRedisConnections.get());
            brokenJedis = true;
            trySettingCacheState(queue.getRelativeUrl(), shard, QCacheState.Unavailable);
            cacheAvailable = false;
        } finally {
            if (jedis != null) {
                returnResource(jedis, brokenJedis);
            }
        }

        CQSMonitor.getInstance().registerCacheHit(queue.getRelativeUrl(), ret.size(), ret.size(),
                CacheType.QCache); //all ids from cache
        logger.debug("event=messages_found cache=available num_messages=" + ret.size());

    } else { //get form underlying layer

        List<CQSMessage> messages = persistenceStorage.peekQueueRandom(queue.getRelativeUrl(), shard,
                maxNumberOfMessages);

        for (CQSMessage msg : messages) {
            String memId = getMemQueueMessage(msg.getMessageId()); //TODO: initialDelay is 0
            msg.setMessageId(memId);
            msg.setReceiptHandle(memId);
            Map<String, String> msgAttrs = (msg.getAttributes() != null) ? msg.getAttributes()
                    : new HashMap<String, String>();
            msgAttrs.put(CQSConstants.APPROXIMATE_RECEIVE_COUNT, "1");
            msgAttrs.put(CQSConstants.APPROXIMATE_FIRST_RECEIVE_TIMESTAMP,
                    Long.toString(System.currentTimeMillis()));
            msg.setAttributes(msgAttrs);
            ret.add(msg);
        }
        // in this case there is no message hiding          
        CQSMonitor.getInstance().registerCacheHit(queue.getRelativeUrl(), 0, ret.size(), CacheType.QCache); //all ids missed cache
        logger.debug("event=messages_found cache=unavailable num_messages=" + ret.size());
    }

    if (ret.size() == 0) {
        CQSMonitor.getInstance().registerEmptyResp(queue.getRelativeUrl(), 1);
    }

    return ret;
}

From source file:org.apache.accumulo.tserver.Tablet.java

private CompactionStats _majorCompact(MajorCompactionReason reason)
        throws IOException, CompactionCanceledException {

    long t1, t2, t3;

    // acquire file info outside of tablet lock
    CompactionStrategy strategy = Property.createInstanceFromPropertyName(acuTableConf,
            Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class, new DefaultCompactionStrategy());
    strategy.init(Property.getCompactionStrategyOptions(acuTableConf));

    Map<FileRef, Pair<Key, Key>> firstAndLastKeys = null;
    if (reason == MajorCompactionReason.CHOP) {
        firstAndLastKeys = getFirstAndLastKeys(datafileManager.getDatafileSizes());
    } else if (reason != MajorCompactionReason.USER) {
        MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, fs, acuTableConf);
        request.setFiles(datafileManager.getDatafileSizes());
        strategy.gatherInformation(request);
    }/*w ww  . j av a 2 s.co m*/

    Map<FileRef, DataFileValue> filesToCompact;

    int maxFilesToCompact = acuTableConf.getCount(Property.TSERV_MAJC_THREAD_MAXOPEN);

    CompactionStats majCStats = new CompactionStats();
    CompactionPlan plan = null;

    boolean propogateDeletes = false;

    synchronized (this) {
        // plan all that work that needs to be done in the sync block... then do the actual work
        // outside the sync block

        t1 = System.currentTimeMillis();

        majorCompactionWaitingToStart = true;

        tabletMemory.waitForMinC();

        t2 = System.currentTimeMillis();

        majorCompactionWaitingToStart = false;
        notifyAll();

        if (extent.isRootTablet()) {
            // very important that we call this before doing major compaction,
            // otherwise deleted compacted files could possible be brought back
            // at some point if the file they were compacted to was legitimately
            // removed by a major compaction
            cleanUpFiles(fs, fs.listStatus(this.location), false);
        }
        SortedMap<FileRef, DataFileValue> allFiles = datafileManager.getDatafileSizes();
        List<FileRef> inputFiles = new ArrayList<FileRef>();
        if (reason == MajorCompactionReason.CHOP) {
            // enforce rules: files with keys outside our range need to be compacted
            inputFiles.addAll(findChopFiles(extent, firstAndLastKeys, allFiles.keySet()));
        } else if (reason == MajorCompactionReason.USER) {
            inputFiles.addAll(allFiles.keySet());
        } else {
            MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, fs, acuTableConf);
            request.setFiles(allFiles);
            plan = strategy.getCompactionPlan(request);
            if (plan != null)
                inputFiles.addAll(plan.inputFiles);
        }

        if (inputFiles.isEmpty()) {
            return majCStats;
        }
        // If no original files will exist at the end of the compaction, we do not have to propogate deletes
        Set<FileRef> droppedFiles = new HashSet<FileRef>();
        droppedFiles.addAll(inputFiles);
        if (plan != null)
            droppedFiles.addAll(plan.deleteFiles);
        propogateDeletes = !(droppedFiles.equals(allFiles.keySet()));
        log.debug("Major compaction plan: " + plan + " propogate deletes : " + propogateDeletes);
        filesToCompact = new HashMap<FileRef, DataFileValue>(allFiles);
        filesToCompact.keySet().retainAll(inputFiles);

        t3 = System.currentTimeMillis();

        datafileManager.reserveMajorCompactingFiles(filesToCompact.keySet());
    }

    try {

        log.debug(String.format("MajC initiate lock %.2f secs, wait %.2f secs", (t3 - t2) / 1000.0,
                (t2 - t1) / 1000.0));

        Pair<Long, List<IteratorSetting>> compactionId = null;
        if (!propogateDeletes) {
            // compacting everything, so update the compaction id in metadata
            try {
                compactionId = getCompactionID();
            } catch (NoNodeException e) {
                throw new RuntimeException(e);
            }
        }

        List<IteratorSetting> compactionIterators = new ArrayList<IteratorSetting>();
        if (compactionId != null) {
            if (reason == MajorCompactionReason.USER) {
                if (getCompactionCancelID() >= compactionId.getFirst()) {
                    // compaction was canceled
                    return majCStats;
                }

                synchronized (this) {
                    if (lastCompactID >= compactionId.getFirst())
                        // already compacted
                        return majCStats;
                }
            }

            compactionIterators = compactionId.getSecond();
        }

        // need to handle case where only one file is being major compacted
        while (filesToCompact.size() > 0) {

            int numToCompact = maxFilesToCompact;

            if (filesToCompact.size() > maxFilesToCompact && filesToCompact.size() < 2 * maxFilesToCompact) {
                // on the second to last compaction pass, compact the minimum amount of files possible
                numToCompact = filesToCompact.size() - maxFilesToCompact + 1;
            }

            Set<FileRef> smallestFiles = removeSmallest(filesToCompact, numToCompact);

            FileRef fileName = getNextMapFilename(
                    (filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C");
            FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp");

            AccumuloConfiguration tableConf = createTableConfiguration(acuTableConf, plan);

            Span span = Trace.start("compactFiles");
            try {

                CompactionEnv cenv = new CompactionEnv() {
                    @Override
                    public boolean isCompactionEnabled() {
                        return Tablet.this.isCompactionEnabled();
                    }

                    @Override
                    public IteratorScope getIteratorScope() {
                        return IteratorScope.majc;
                    }
                };

                HashMap<FileRef, DataFileValue> copy = new HashMap<FileRef, DataFileValue>(
                        datafileManager.getDatafileSizes());
                if (!copy.keySet().containsAll(smallestFiles))
                    throw new IllegalStateException("Cannot find data file values for " + smallestFiles);

                copy.keySet().retainAll(smallestFiles);

                log.debug("Starting MajC " + extent + " (" + reason + ") " + copy.keySet() + " --> "
                        + compactTmpName + "  " + compactionIterators);

                // always propagate deletes, unless last batch
                boolean lastBatch = filesToCompact.isEmpty();
                Compactor compactor = new Compactor(conf, fs, copy, null, compactTmpName,
                        lastBatch ? propogateDeletes : true, tableConf, extent, cenv, compactionIterators,
                        reason);

                CompactionStats mcs = compactor.call();

                span.data("files", "" + smallestFiles.size());
                span.data("read", "" + mcs.getEntriesRead());
                span.data("written", "" + mcs.getEntriesWritten());
                majCStats.add(mcs);

                if (lastBatch && plan != null && plan.deleteFiles != null) {
                    smallestFiles.addAll(plan.deleteFiles);
                }
                datafileManager.bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName,
                        filesToCompact.size() == 0 && compactionId != null ? compactionId.getFirst() : null,
                        new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));

                // when major compaction produces a file w/ zero entries, it will be deleted... do not want
                // to add the deleted file
                if (filesToCompact.size() > 0 && mcs.getEntriesWritten() > 0) {
                    filesToCompact.put(fileName, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
                }
            } finally {
                span.stop();
            }

        }
        return majCStats;
    } finally {
        synchronized (Tablet.this) {
            datafileManager.clearMajorCompactingFile();
        }
    }
}

From source file:com.mirth.connect.client.ui.ChannelPanel.java

public Channel importChannel(Channel importChannel, boolean showAlerts, boolean refreshStatuses) {
    boolean overwrite = false;

    try {//  ww w  . j  ava  2s.c  o  m
        String channelName = importChannel.getName();
        String tempId = parent.mirthClient.getGuid();

        // Check to see that the channel name doesn't already exist.
        if (!parent.checkChannelName(channelName, tempId)) {
            if (!parent.alertOption(parent,
                    "Would you like to overwrite the existing channel?  Choose 'No' to create a new channel.")) {
                importChannel.setRevision(0);

                do {
                    channelName = JOptionPane.showInputDialog(this, "Please enter a new name for the channel.",
                            channelName);
                    if (channelName == null) {
                        return null;
                    }
                } while (!parent.checkChannelName(channelName, tempId));

                importChannel.setName(channelName);
                setIdAndUpdateLibraries(importChannel, tempId);
            } else {
                overwrite = true;

                for (ChannelStatus channelStatus : channelStatuses.values()) {
                    Channel channel = channelStatus.getChannel();
                    if (channel.getName().equalsIgnoreCase(channelName)) {
                        // If overwriting, use the old revision number and id
                        importChannel.setRevision(channel.getRevision());
                        setIdAndUpdateLibraries(importChannel, channel.getId());
                    }
                }
            }
        } else {
            // Start the revision number over for a new channel
            importChannel.setRevision(0);

            // If the channel name didn't already exist, make sure
            // the id doesn't exist either.
            if (!checkChannelId(importChannel.getId())) {
                setIdAndUpdateLibraries(importChannel, tempId);
            }

        }

        channelStatuses.put(importChannel.getId(), new ChannelStatus(importChannel));
        parent.updateChannelTags(false);
    } catch (ClientException e) {
        parent.alertThrowable(parent, e);
    }

    // Import code templates / libraries if applicable
    parent.removeInvalidItems(importChannel.getCodeTemplateLibraries(), CodeTemplateLibrary.class);
    if (!(importChannel instanceof InvalidChannel) && !importChannel.getCodeTemplateLibraries().isEmpty()) {
        boolean importLibraries;
        String importChannelCodeTemplateLibraries = Preferences.userNodeForPackage(Mirth.class)
                .get("importChannelCodeTemplateLibraries", null);

        if (importChannelCodeTemplateLibraries == null) {
            JCheckBox alwaysChooseCheckBox = new JCheckBox(
                    "Always choose this option by default in the future (may be changed in the Administrator settings)");
            Object[] params = new Object[] {
                    "Channel \"" + importChannel.getName()
                            + "\" has code template libraries included with it. Would you like to import them?",
                    alwaysChooseCheckBox };
            int result = JOptionPane.showConfirmDialog(this, params, "Select an Option",
                    JOptionPane.YES_NO_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE);

            if (result == JOptionPane.YES_OPTION || result == JOptionPane.NO_OPTION) {
                importLibraries = result == JOptionPane.YES_OPTION;
                if (alwaysChooseCheckBox.isSelected()) {
                    Preferences.userNodeForPackage(Mirth.class).putBoolean("importChannelCodeTemplateLibraries",
                            importLibraries);
                }
            } else {
                return null;
            }
        } else {
            importLibraries = Boolean.parseBoolean(importChannelCodeTemplateLibraries);
        }

        if (importLibraries) {
            CodeTemplateImportDialog dialog = new CodeTemplateImportDialog(parent,
                    importChannel.getCodeTemplateLibraries(), false, true);

            if (dialog.wasSaved()) {
                CodeTemplateLibrarySaveResult updateSummary = parent.codeTemplatePanel.attemptUpdate(
                        dialog.getUpdatedLibraries(), new HashMap<String, CodeTemplateLibrary>(),
                        dialog.getUpdatedCodeTemplates(), new HashMap<String, CodeTemplate>(), true, null,
                        null);

                if (updateSummary == null || updateSummary.isOverrideNeeded()
                        || !updateSummary.isLibrariesSuccess()) {
                    return null;
                } else {
                    for (CodeTemplateUpdateResult result : updateSummary.getCodeTemplateResults().values()) {
                        if (!result.isSuccess()) {
                            return null;
                        }
                    }
                }

                parent.codeTemplatePanel.doRefreshCodeTemplates();
            }
        }

        importChannel.getCodeTemplateLibraries().clear();
    }

    if (CollectionUtils.isNotEmpty(importChannel.getDependentIds())
            || CollectionUtils.isNotEmpty(importChannel.getDependencyIds())) {
        Set<ChannelDependency> channelDependencies = new HashSet<ChannelDependency>(
                getCachedChannelDependencies());

        if (CollectionUtils.isNotEmpty(importChannel.getDependentIds())) {
            for (String dependentId : importChannel.getDependentIds()) {
                if (StringUtils.isNotBlank(dependentId)
                        && !StringUtils.equals(dependentId, importChannel.getId())) {
                    channelDependencies.add(new ChannelDependency(dependentId, importChannel.getId()));
                }
            }
        }

        if (CollectionUtils.isNotEmpty(importChannel.getDependencyIds())) {
            for (String dependencyId : importChannel.getDependencyIds()) {
                if (StringUtils.isNotBlank(dependencyId)
                        && !StringUtils.equals(dependencyId, importChannel.getId())) {
                    channelDependencies.add(new ChannelDependency(importChannel.getId(), dependencyId));
                }
            }
        }

        if (!channelDependencies.equals(getCachedChannelDependencies())) {
            try {
                parent.mirthClient.setChannelDependencies(channelDependencies);
            } catch (ClientException e) {
                parent.alertThrowable(parent, e, "Unable to save channel dependencies.");
            }
        }

        importChannel.clearDependencies();
    }

    // Update resource names
    parent.updateResourceNames(importChannel);

    /*
     * Update the channel if we're overwriting an imported channel, if we're not showing alerts
     * (dragging/dropping multiple channels), or if we're working with an invalid channel.
     */
    if (overwrite || !showAlerts || importChannel instanceof InvalidChannel) {
        try {
            parent.updateChannel(importChannel, overwrite);

            if (importChannel instanceof InvalidChannel && showAlerts) {
                InvalidChannel invalidChannel = (InvalidChannel) importChannel;
                Throwable cause = invalidChannel.getCause();
                parent.alertThrowable(parent, cause, "Channel \"" + importChannel.getName() + "\" is invalid. "
                        + getMissingExtensions(invalidChannel) + " Original cause:\n" + cause.getMessage());
            }
        } catch (Exception e) {
            channelStatuses.remove(importChannel.getId());
            parent.updateChannelTags(false);
            parent.alertThrowable(parent, e);
            return null;
        } finally {
            if (refreshStatuses) {
                doRefreshChannels();
            }
        }
    }

    if (showAlerts) {
        final Channel importChannelFinal = importChannel;
        final boolean overwriteFinal = overwrite;

        /*
         * MIRTH-2048 - This is a hack to fix the memory access error that only occurs on OS X.
         * The block of code that edits the channel needs to be invoked later so that the screen
         * does not change before the drag/drop action of a channel finishes.
         */
        SwingUtilities.invokeLater(new Runnable() {

            @Override
            public void run() {
                try {
                    parent.editChannel(importChannelFinal);
                    parent.setSaveEnabled(!overwriteFinal);
                } catch (Exception e) {
                    channelStatuses.remove(importChannelFinal.getId());
                    parent.updateChannelTags(false);
                    parent.alertError(parent, "Channel had an unknown problem. Channel import aborted.");
                    parent.channelEditPanel = new ChannelSetup();
                    parent.doShowChannel();
                }
            }

        });
    }

    return importChannel;
}

From source file:org.apache.calcite.rel.rules.AbstractMaterializedViewRule.java

/**
 * Rewriting logic is based on "Optimizing Queries Using Materialized Views:
 * A Practical, Scalable Solution" by Goldstein and Larson.
 *
 * <p>On the query side, rules matches a Project-node chain or node, where node
 * is either an Aggregate or a Join. Subplan rooted at the node operator must
 * be composed of one or more of the following operators: TableScan, Project,
 * Filter, and Join./*from  ww w  . j  a  v a 2 s  . c om*/
 *
 * <p>For each join MV, we need to check the following:
 * <ol>
 * <li> The plan rooted at the Join operator in the view produces all rows
 * needed by the plan rooted at the Join operator in the query.</li>
 * <li> All columns required by compensating predicates, i.e., predicates that
 * need to be enforced over the view, are available at the view output.</li>
 * <li> All output expressions can be computed from the output of the view.</li>
 * <li> All output rows occur with the correct duplication factor. We might
 * rely on existing Unique-Key - Foreign-Key relationships to extract that
 * information.</li>
 * </ol>
 *
 * <p>In turn, for each aggregate MV, we need to check the following:
 * <ol>
 * <li> The plan rooted at the Aggregate operator in the view produces all rows
 * needed by the plan rooted at the Aggregate operator in the query.</li>
 * <li> All columns required by compensating predicates, i.e., predicates that
 * need to be enforced over the view, are available at the view output.</li>
 * <li> The grouping columns in the query are a subset of the grouping columns
 * in the view.</li>
 * <li> All columns required to perform further grouping are available in the
 * view output.</li>
 * <li> All columns required to compute output expressions are available in the
 * view output.</li>
 * </ol>
 */
protected void perform(RelOptRuleCall call, Project topProject, RelNode node) {
    final RexBuilder rexBuilder = node.getCluster().getRexBuilder();
    final RelMetadataQuery mq = RelMetadataQuery.instance();
    final RelOptPlanner planner = call.getPlanner();
    final RexSimplify simplify = new RexSimplify(rexBuilder, true,
            planner.getExecutor() != null ? planner.getExecutor() : RexUtil.EXECUTOR);

    final List<RelOptMaterialization> materializations = (planner instanceof VolcanoPlanner)
            ? ((VolcanoPlanner) planner).getMaterializations()
            : ImmutableList.<RelOptMaterialization>of();

    if (!materializations.isEmpty()) {
        // 1. Explore query plan to recognize whether preconditions to
        // try to generate a rewriting are met
        if (!isValidPlan(topProject, node, mq)) {
            return;
        }

        // Obtain applicable (filtered) materializations
        // TODO: Filtering of relevant materializations needs to be
        // improved so we gather only materializations that might
        // actually generate a valid rewriting.
        final List<RelOptMaterialization> applicableMaterializations = RelOptMaterializations
                .getApplicableMaterializations(node, materializations);

        if (!applicableMaterializations.isEmpty()) {
            // 2. Initialize all query related auxiliary data structures
            // that will be used throughout query rewriting process
            // Generate query table references
            final Set<RelTableRef> queryTableRefs = mq.getTableReferences(node);
            if (queryTableRefs == null) {
                // Bail out
                return;
            }

            // Extract query predicates
            final RelOptPredicateList queryPredicateList = mq.getAllPredicates(node);
            if (queryPredicateList == null) {
                // Bail out
                return;
            }
            final RexNode pred = simplify.simplify(
                    RexUtil.composeConjunction(rexBuilder, queryPredicateList.pulledUpPredicates, false));
            final Triple<RexNode, RexNode, RexNode> queryPreds = splitPredicates(rexBuilder, pred);

            // Extract query equivalence classes. An equivalence class is a set
            // of columns in the query output that are known to be equal.
            final EquivalenceClasses qEC = new EquivalenceClasses();
            for (RexNode conj : RelOptUtil.conjunctions(queryPreds.getLeft())) {
                assert conj.isA(SqlKind.EQUALS);
                RexCall equiCond = (RexCall) conj;
                qEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0),
                        (RexTableInputRef) equiCond.getOperands().get(1));
            }

            // 3. We iterate through all applicable materializations trying to
            // rewrite the given query
            for (RelOptMaterialization materialization : applicableMaterializations) {
                final Project topViewProject;
                final RelNode viewNode;
                if (materialization.queryRel instanceof Project) {
                    topViewProject = (Project) materialization.queryRel;
                    viewNode = topViewProject.getInput();
                } else {
                    topViewProject = null;
                    viewNode = materialization.queryRel;
                }

                // 3.1. View checks before proceeding
                if (!isValidPlan(topViewProject, viewNode, mq)) {
                    // Skip it
                    continue;
                }

                // 3.2. Initialize all query related auxiliary data structures
                // that will be used throughout query rewriting process
                // Extract view predicates
                final RelOptPredicateList viewPredicateList = mq.getAllPredicates(viewNode);
                if (viewPredicateList == null) {
                    // Skip it
                    continue;
                }
                final RexNode viewPred = simplify.simplify(
                        RexUtil.composeConjunction(rexBuilder, viewPredicateList.pulledUpPredicates, false));
                final Triple<RexNode, RexNode, RexNode> viewPreds = splitPredicates(rexBuilder, viewPred);

                // Extract view table references
                final Set<RelTableRef> viewTableRefs = mq.getTableReferences(viewNode);
                if (viewTableRefs == null) {
                    // Bail out
                    return;
                }

                // Extract view tables
                MatchModality matchModality;
                Multimap<RexTableInputRef, RexTableInputRef> compensationEquiColumns = ArrayListMultimap
                        .create();
                if (!queryTableRefs.equals(viewTableRefs)) {
                    // We try to compensate, e.g., for join queries it might be
                    // possible to join missing tables with view to compute result.
                    // Two supported cases: query tables are subset of view tables (we need to
                    // check whether they are cardinality-preserving joins), or view tables are
                    // subset of query tables (add additional tables through joins if possible)
                    if (viewTableRefs.containsAll(queryTableRefs)) {
                        matchModality = MatchModality.QUERY_PARTIAL;
                        final EquivalenceClasses vEC = new EquivalenceClasses();
                        for (RexNode conj : RelOptUtil.conjunctions(viewPreds.getLeft())) {
                            assert conj.isA(SqlKind.EQUALS);
                            RexCall equiCond = (RexCall) conj;
                            vEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0),
                                    (RexTableInputRef) equiCond.getOperands().get(1));
                        }
                        if (!compensateQueryPartial(compensationEquiColumns, viewTableRefs, vEC,
                                queryTableRefs)) {
                            // Cannot rewrite, skip it
                            continue;
                        }
                    } else if (queryTableRefs.containsAll(viewTableRefs)) {
                        // TODO: implement latest case
                        matchModality = MatchModality.VIEW_PARTIAL;
                        continue;
                    } else {
                        // Skip it
                        continue;
                    }
                } else {
                    matchModality = MatchModality.COMPLETE;
                }

                // 4. We map every table in the query to a view table with the same qualified
                // name.
                final Multimap<RelTableRef, RelTableRef> multiMapTables = ArrayListMultimap.create();
                for (RelTableRef queryTableRef : queryTableRefs) {
                    for (RelTableRef viewTableRef : viewTableRefs) {
                        if (queryTableRef.getQualifiedName().equals(viewTableRef.getQualifiedName())) {
                            multiMapTables.put(queryTableRef, viewTableRef);
                        }
                    }
                }

                // If a table is used multiple times, we will create multiple mappings,
                // and we will try to rewrite the query using each of the mappings.
                // Then, we will try to map every source table (query) to a target
                // table (view), and if we are successful, we will try to create
                // compensation predicates to filter the view results further
                // (if needed).
                final List<BiMap<RelTableRef, RelTableRef>> flatListMappings = generateTableMappings(
                        multiMapTables);
                for (BiMap<RelTableRef, RelTableRef> tableMapping : flatListMappings) {
                    // 4.0. If compensation equivalence classes exist, we need to add
                    // the mapping to the query mapping
                    final EquivalenceClasses currQEC = EquivalenceClasses.copy(qEC);
                    if (matchModality == MatchModality.QUERY_PARTIAL) {
                        for (Entry<RexTableInputRef, RexTableInputRef> e : compensationEquiColumns.entries()) {
                            // Copy origin
                            RelTableRef queryTableRef = tableMapping.inverse().get(e.getKey().getTableRef());
                            RexTableInputRef queryColumnRef = RexTableInputRef.of(queryTableRef,
                                    e.getKey().getIndex(), e.getKey().getType());
                            // Add to query equivalence classes and table mapping
                            currQEC.addEquivalenceClass(queryColumnRef, e.getValue());
                            tableMapping.put(e.getValue().getTableRef(), e.getValue().getTableRef()); //identity
                        }
                    }

                    final RexNode compensationColumnsEquiPred;
                    final RexNode compensationRangePred;
                    final RexNode compensationResidualPred;

                    // 4.1. Establish relationship between view and query equivalence classes.
                    // If every view equivalence class is not a subset of a query
                    // equivalence class, we bail out.
                    // To establish relationship, we swap column references of the view predicates
                    // to point to query tables. Then, we create the equivalence classes for the
                    // view predicates and check that every view equivalence class is a subset of a
                    // query equivalence class: if it is not, we bail out.
                    final RexNode viewColumnsEquiPred = RexUtil.swapTableReferences(rexBuilder,
                            viewPreds.getLeft(), tableMapping.inverse());
                    final EquivalenceClasses queryBasedVEC = new EquivalenceClasses();
                    for (RexNode conj : RelOptUtil.conjunctions(viewColumnsEquiPred)) {
                        assert conj.isA(SqlKind.EQUALS);
                        RexCall equiCond = (RexCall) conj;
                        queryBasedVEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0),
                                (RexTableInputRef) equiCond.getOperands().get(1));
                    }
                    compensationColumnsEquiPred = generateEquivalenceClasses(rexBuilder, currQEC,
                            queryBasedVEC);
                    if (compensationColumnsEquiPred == null) {
                        // Skip it
                        continue;
                    }

                    // 4.2. We check that range intervals for the query are contained in the view.
                    // Compute compensating predicates.
                    final RexNode queryRangePred = RexUtil.swapColumnReferences(rexBuilder,
                            queryPreds.getMiddle(), currQEC.getEquivalenceClassesMap());
                    final RexNode viewRangePred = RexUtil.swapTableColumnReferences(rexBuilder,
                            viewPreds.getMiddle(), tableMapping.inverse(), currQEC.getEquivalenceClassesMap());
                    compensationRangePred = SubstitutionVisitor.splitFilter(simplify, queryRangePred,
                            viewRangePred);
                    if (compensationRangePred == null) {
                        // Skip it
                        continue;
                    }

                    // 4.3. Finally, we check that residual predicates of the query are satisfied
                    // within the view.
                    // Compute compensating predicates.
                    final RexNode queryResidualPred = RexUtil.swapColumnReferences(rexBuilder,
                            queryPreds.getRight(), currQEC.getEquivalenceClassesMap());
                    final RexNode viewResidualPred = RexUtil.swapTableColumnReferences(rexBuilder,
                            viewPreds.getRight(), tableMapping.inverse(), currQEC.getEquivalenceClassesMap());
                    compensationResidualPred = SubstitutionVisitor.splitFilter(simplify, queryResidualPred,
                            viewResidualPred);
                    if (compensationResidualPred == null) {
                        // Skip it
                        continue;
                    }

                    // 4.4. Final compensation predicate.
                    RexNode compensationPred = RexUtil.composeConjunction(rexBuilder, ImmutableList
                            .of(compensationColumnsEquiPred, compensationRangePred, compensationResidualPred),
                            false);
                    if (!compensationPred.isAlwaysTrue()) {
                        // All columns required by compensating predicates must be contained
                        // in the view output (condition 2).
                        List<RexNode> viewExprs = extractExpressions(topViewProject, viewNode, rexBuilder);
                        compensationPred = rewriteExpression(rexBuilder, viewNode, viewExprs, compensationPred,
                                tableMapping, currQEC.getEquivalenceClassesMap(), mq);
                        if (compensationPred == null) {
                            // Skip it
                            continue;
                        }
                    }

                    // 4.5. Generate final rewriting if possible.
                    // First, we add the compensation predicate (if any) on top of the view.
                    // Then, we trigger the Aggregate unifying method. This method will either create
                    // a Project or an Aggregate operator on top of the view. It will also compute the
                    // output expressions for the query.
                    RelBuilder builder = call.builder();
                    builder.push(materialization.tableRel);
                    if (!compensationPred.isAlwaysTrue()) {
                        builder.filter(simplify.simplify(compensationPred));
                    }
                    RelNode result = unify(rexBuilder, builder, builder.build(), topProject, node,
                            topViewProject, viewNode, tableMapping, currQEC.getEquivalenceClassesMap(), mq);
                    if (result == null) {
                        // Skip it
                        continue;
                    }
                    call.transformTo(result);
                }
            }
        }
    }
}