Example usage for java.util BitSet BitSet

List of usage examples for java.util BitSet BitSet

Introduction

In this page you can find the example usage for java.util BitSet BitSet.

Prototype

private BitSet(long[] words) 

Source Link

Document

Creates a bit set using words as the internal representation.

Usage

From source file:org.asoem.greyfish.utils.collect.BitString.java

/**
 * Create a new bit string of given {@code length} with bits set to one at given {@code indices}. The indices might
 * be given/*from   w w  w  .  java2  s. c  om*/
 *
 * @param indices the indices of the bits to set
 * @param length  the length of the bit string to create
 * @return a new bit string of given {@code length}
 */
public static BitString forIndices(final Set<Integer> indices, final int length) {
    if ((double) indices.size() / length < 1.0 / 32) {
        return new IndexSetString(indices, length);
    } else {
        final BitSet bitSet = new BitSet(length);
        for (Integer index : indices) {
            bitSet.set(index);
        }
        return forBitSet(bitSet, length);
    }
}

From source file:de.hpi.petrinet.PetriNet.java

protected Map<Node, Set<Node>> deriveDominators(boolean reverse) {

    int initIndex = reverse ? this.getNodes().indexOf(this.getFinalPlace())
            : this.getNodes().indexOf(this.getInitialPlace());

    int size = this.getNodes().size();
    final BitSet[] dom = new BitSet[size];
    final BitSet ALL = new BitSet(size);

    for (Node n : this.getNodes())
        ALL.set(this.getNodes().indexOf(n));

    for (Node n : this.getNodes()) {
        int index = this.getNodes().indexOf(n);
        BitSet curDoms = new BitSet(size);
        dom[index] = curDoms;// w  ww.ja  va2  s  . c  o m

        if (index != initIndex)
            curDoms.or(ALL);
        else
            curDoms.set(initIndex);
    }

    boolean changed = true;

    /*
     * While we change the dom relation for a node
     */
    while (changed) {
        changed = false;
        for (Node n : this.getNodes()) {
            int index = this.getNodes().indexOf(n);
            if (index == initIndex)
                continue;

            final BitSet old = dom[index];
            final BitSet curDoms = new BitSet(size);
            curDoms.or(old);

            Collection<Node> predecessors = reverse ? n.getSucceedingNodes() : n.getPrecedingNodes();
            for (Node p : predecessors) {
                int index2 = this.getNodes().indexOf(p);
                curDoms.and(dom[index2]);
            }

            curDoms.set(index);

            if (!curDoms.equals(old)) {
                changed = true;
                dom[index] = curDoms;
            }
        }
    }

    Map<Node, Set<Node>> dominators = new HashMap<Node, Set<Node>>();

    for (Node n : this.getNodes()) {
        int index = this.getNodes().indexOf(n);
        dominators.put(n, new HashSet<Node>());
        for (int i = 0; i < size; i++)
            if (dom[index].get(i))
                dominators.get(n).add(this.getNodes().get(i));
    }

    return dominators;
}

From source file:android.support.v7.widget.StaggeredGridLayoutManager.java

/**
 * Checks for gaps if we've reached to the top of the list.
 * <p>/*from  ww w  .j ava2s . c o  m*/
 * Intermediate gaps created by full span items are tracked via mLaidOutInvalidFullSpan field.
 */
View hasGapsToFix() {
    int startChildIndex = 0;
    int endChildIndex = getChildCount() - 1;
    BitSet mSpansToCheck = new BitSet(mSpanCount);
    mSpansToCheck.set(0, mSpanCount, true);

    final int firstChildIndex, childLimit;
    final int preferredSpanDir = mOrientation == VERTICAL && isLayoutRTL() ? 1 : -1;

    if (mShouldReverseLayout) {
        firstChildIndex = endChildIndex;
        childLimit = startChildIndex - 1;
    } else {
        firstChildIndex = startChildIndex;
        childLimit = endChildIndex + 1;
    }
    final int nextChildDiff = firstChildIndex < childLimit ? 1 : -1;
    for (int i = firstChildIndex; i != childLimit; i += nextChildDiff) {
        View child = getChildAt(i);
        LayoutParams lp = (LayoutParams) child.getLayoutParams();
        if (mSpansToCheck.get(lp.mSpan.mIndex)) {
            if (checkSpanForGap(lp.mSpan)) {
                return child;
            }
            mSpansToCheck.clear(lp.mSpan.mIndex);
        }
        if (lp.mFullSpan) {
            continue; // quick reject
        }

        if (i + nextChildDiff != childLimit) {
            View nextChild = getChildAt(i + nextChildDiff);
            boolean compareSpans = false;
            if (mShouldReverseLayout) {
                // ensure child's end is below nextChild's end
                int myEnd = mPrimaryOrientation.getDecoratedEnd(child);
                int nextEnd = mPrimaryOrientation.getDecoratedEnd(nextChild);
                if (myEnd < nextEnd) {
                    return child;//i should have a better position
                } else if (myEnd == nextEnd) {
                    compareSpans = true;
                }
            } else {
                int myStart = mPrimaryOrientation.getDecoratedStart(child);
                int nextStart = mPrimaryOrientation.getDecoratedStart(nextChild);
                if (myStart > nextStart) {
                    return child;//i should have a better position
                } else if (myStart == nextStart) {
                    compareSpans = true;
                }
            }
            if (compareSpans) {
                // equal, check span indices.
                LayoutParams nextLp = (LayoutParams) nextChild.getLayoutParams();
                if (lp.mSpan.mIndex - nextLp.mSpan.mIndex < 0 != preferredSpanDir < 0) {
                    return child;
                }
            }
        }
    }
    // everything looks good
    return null;
}

From source file:edu.uci.ics.asterix.optimizer.rules.am.BTreeAccessMethod.java

private ILogicalOperator createSecondaryToPrimaryPlan(Mutable<ILogicalOperator> topOpRef,
        Mutable<ILogicalExpression> conditionRef, OptimizableOperatorSubTree indexSubTree,
        OptimizableOperatorSubTree probeSubTree, Index chosenIndex, AccessMethodAnalysisContext analysisCtx,
        boolean retainInput, boolean retainNull, boolean requiresBroadcast, IOptimizationContext context)
        throws AlgebricksException {
    Dataset dataset = indexSubTree.dataset;
    ARecordType recordType = indexSubTree.recordType;
    // we made sure indexSubTree has datasource scan
    AbstractDataSourceOperator dataSourceOp = (AbstractDataSourceOperator) indexSubTree.dataSourceRef
            .getValue();/*from  ww  w.  jav a  2 s  . c  o m*/
    List<Pair<Integer, Integer>> exprAndVarList = analysisCtx.indexExprsAndVars.get(chosenIndex);
    List<IOptimizableFuncExpr> matchedFuncExprs = analysisCtx.matchedFuncExprs;
    int numSecondaryKeys = analysisCtx.indexNumMatchedKeys.get(chosenIndex);
    // List of function expressions that will be replaced by the secondary-index search.
    // These func exprs will be removed from the select condition at the very end of this method.
    Set<ILogicalExpression> replacedFuncExprs = new HashSet<ILogicalExpression>();

    // Info on high and low keys for the BTree search predicate.
    ILogicalExpression[] lowKeyExprs = new ILogicalExpression[numSecondaryKeys];
    ILogicalExpression[] highKeyExprs = new ILogicalExpression[numSecondaryKeys];
    LimitType[] lowKeyLimits = new LimitType[numSecondaryKeys];
    LimitType[] highKeyLimits = new LimitType[numSecondaryKeys];
    boolean[] lowKeyInclusive = new boolean[numSecondaryKeys];
    boolean[] highKeyInclusive = new boolean[numSecondaryKeys];

    // TODO: For now we don't do any sophisticated analysis of the func exprs to come up with "the best" range predicate.
    // If we can't figure out how to integrate a certain funcExpr into the current predicate, we just bail by setting this flag.
    boolean couldntFigureOut = false;
    boolean doneWithExprs = false;
    boolean isEqCondition = false;
    // TODO: For now don't consider prefix searches.
    BitSet setLowKeys = new BitSet(numSecondaryKeys);
    BitSet setHighKeys = new BitSet(numSecondaryKeys);
    // Go through the func exprs listed as optimizable by the chosen index,
    // and formulate a range predicate on the secondary-index keys.

    // checks whether a type casting happened from a real (FLOAT, DOUBLE) value to an INT value
    // since we have a round issues when dealing with LT(<) OR GT(>) operator.
    boolean realTypeConvertedToIntegerType = false;

    for (Pair<Integer, Integer> exprIndex : exprAndVarList) {
        // Position of the field of matchedFuncExprs.get(exprIndex) in the chosen index's indexed exprs.
        IOptimizableFuncExpr optFuncExpr = matchedFuncExprs.get(exprIndex.first);
        int keyPos = indexOf(optFuncExpr.getFieldName(0), chosenIndex.getKeyFieldNames());
        if (keyPos < 0) {
            if (optFuncExpr.getNumLogicalVars() > 1) {
                // If we are optimizing a join, the matching field may be the second field name.
                keyPos = indexOf(optFuncExpr.getFieldName(1), chosenIndex.getKeyFieldNames());
            }
        }
        if (keyPos < 0) {
            throw new AlgebricksException(
                    "Could not match optimizable function expression to any index field name.");
        }
        Pair<ILogicalExpression, Boolean> returnedSearchKeyExpr = AccessMethodUtils
                .createSearchKeyExpr(optFuncExpr, indexSubTree, probeSubTree);
        ILogicalExpression searchKeyExpr = returnedSearchKeyExpr.first;
        realTypeConvertedToIntegerType = returnedSearchKeyExpr.second;

        LimitType limit = getLimitType(optFuncExpr, probeSubTree);

        // If a DOUBLE or FLOAT constant is converted to an INT type value,
        // we need to check a corner case where two real values are located between an INT value.
        // For example, for the following query,
        //
        // for $emp in dataset empDataset
        // where $emp.age > double("2.3") and $emp.age < double("3.3")
        // return $emp.id;
        //
        // It should generate a result if there is a tuple that satisfies the condition, which is 3,
        // however, it does not generate the desired result since finding candidates
        // fail after truncating the fraction part (there is no INT whose value is greater than 2 and less than 3.)
        //
        // Therefore, we convert LT(<) to LE(<=) and GT(>) to GE(>=) to find candidates.
        // This does not change the result of an actual comparison since this conversion is only applied
        // for finding candidates from an index.
        //
        if (realTypeConvertedToIntegerType) {
            if (limit == LimitType.HIGH_EXCLUSIVE) {
                limit = LimitType.HIGH_INCLUSIVE;
            } else if (limit == LimitType.LOW_EXCLUSIVE) {
                limit = LimitType.LOW_INCLUSIVE;
            }
        }

        switch (limit) {
        case EQUAL: {
            if (lowKeyLimits[keyPos] == null && highKeyLimits[keyPos] == null) {
                lowKeyLimits[keyPos] = highKeyLimits[keyPos] = limit;
                lowKeyInclusive[keyPos] = highKeyInclusive[keyPos] = true;
                lowKeyExprs[keyPos] = highKeyExprs[keyPos] = searchKeyExpr;
                setLowKeys.set(keyPos);
                setHighKeys.set(keyPos);
                isEqCondition = true;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == true
                        && lowKeyExprs[keyPos].equals(searchKeyExpr) && highKeyLimits[keyPos] == limit
                        && highKeyInclusive[keyPos] == true && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    isEqCondition = true;
                    break;
                }
                couldntFigureOut = true;
            }
            // TODO: For now don't consider prefix searches.
            // If high and low keys are set, we exit for now.
            if (setLowKeys.cardinality() == numSecondaryKeys && setHighKeys.cardinality() == numSecondaryKeys) {
                doneWithExprs = true;
            }
            break;
        }
        case HIGH_EXCLUSIVE: {
            if (highKeyLimits[keyPos] == null || (highKeyLimits[keyPos] != null && highKeyInclusive[keyPos])) {
                highKeyLimits[keyPos] = limit;
                highKeyExprs[keyPos] = searchKeyExpr;
                highKeyInclusive[keyPos] = false;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (highKeyLimits[keyPos] == limit && highKeyInclusive[keyPos] == false
                        && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case HIGH_INCLUSIVE: {
            if (highKeyLimits[keyPos] == null) {
                highKeyLimits[keyPos] = limit;
                highKeyExprs[keyPos] = searchKeyExpr;
                highKeyInclusive[keyPos] = true;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (highKeyLimits[keyPos] == limit && highKeyInclusive[keyPos] == true
                        && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case LOW_EXCLUSIVE: {
            if (lowKeyLimits[keyPos] == null || (lowKeyLimits[keyPos] != null && lowKeyInclusive[keyPos])) {
                lowKeyLimits[keyPos] = limit;
                lowKeyExprs[keyPos] = searchKeyExpr;
                lowKeyInclusive[keyPos] = false;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == false
                        && lowKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case LOW_INCLUSIVE: {
            if (lowKeyLimits[keyPos] == null) {
                lowKeyLimits[keyPos] = limit;
                lowKeyExprs[keyPos] = searchKeyExpr;
                lowKeyInclusive[keyPos] = true;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == true
                        && lowKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        default: {
            throw new IllegalStateException();
        }
        }
        if (!couldntFigureOut) {
            // Remember to remove this funcExpr later.
            replacedFuncExprs.add(matchedFuncExprs.get(exprIndex.first).getFuncExpr());
        }
        if (doneWithExprs) {
            break;
        }
    }
    if (couldntFigureOut) {
        return null;
    }

    // If the select condition contains mixed open/closed intervals on multiple keys, then we make all intervals closed to obtain a superset of answers and leave the original selection in place.
    boolean primaryIndexPostProccessingIsNeeded = false;
    for (int i = 1; i < numSecondaryKeys; ++i) {
        if (lowKeyInclusive[i] != lowKeyInclusive[0]) {
            Arrays.fill(lowKeyInclusive, true);
            primaryIndexPostProccessingIsNeeded = true;
            break;
        }
    }
    for (int i = 1; i < numSecondaryKeys; ++i) {
        if (highKeyInclusive[i] != highKeyInclusive[0]) {
            Arrays.fill(highKeyInclusive, true);
            primaryIndexPostProccessingIsNeeded = true;
            break;
        }
    }

    // determine cases when prefix search could be applied
    for (int i = 1; i < lowKeyExprs.length; i++) {
        if (lowKeyLimits[0] == null && lowKeyLimits[i] != null
                || lowKeyLimits[0] != null && lowKeyLimits[i] == null
                || highKeyLimits[0] == null && highKeyLimits[i] != null
                || highKeyLimits[0] != null && highKeyLimits[i] == null) {
            numSecondaryKeys--;
            primaryIndexPostProccessingIsNeeded = true;
        }
    }
    if (lowKeyLimits[0] == null) {
        lowKeyInclusive[0] = true;
    }
    if (highKeyLimits[0] == null) {
        highKeyInclusive[0] = true;
    }

    // Here we generate vars and funcs for assigning the secondary-index keys to be fed into the secondary-index search.
    // List of variables for the assign.
    ArrayList<LogicalVariable> keyVarList = new ArrayList<LogicalVariable>();
    // List of variables and expressions for the assign.
    ArrayList<LogicalVariable> assignKeyVarList = new ArrayList<LogicalVariable>();
    ArrayList<Mutable<ILogicalExpression>> assignKeyExprList = new ArrayList<Mutable<ILogicalExpression>>();
    int numLowKeys = createKeyVarsAndExprs(numSecondaryKeys, lowKeyLimits, lowKeyExprs, assignKeyVarList,
            assignKeyExprList, keyVarList, context);
    int numHighKeys = createKeyVarsAndExprs(numSecondaryKeys, highKeyLimits, highKeyExprs, assignKeyVarList,
            assignKeyExprList, keyVarList, context);

    BTreeJobGenParams jobGenParams = new BTreeJobGenParams(chosenIndex.getIndexName(), IndexType.BTREE,
            dataset.getDataverseName(), dataset.getDatasetName(), retainInput, retainNull, requiresBroadcast);
    jobGenParams.setLowKeyInclusive(lowKeyInclusive[0]);
    jobGenParams.setHighKeyInclusive(highKeyInclusive[0]);
    jobGenParams.setIsEqCondition(isEqCondition);
    jobGenParams.setLowKeyVarList(keyVarList, 0, numLowKeys);
    jobGenParams.setHighKeyVarList(keyVarList, numLowKeys, numHighKeys);

    ILogicalOperator inputOp = null;
    if (!assignKeyVarList.isEmpty()) {
        // Assign operator that sets the constant secondary-index search-key fields if necessary.
        AssignOperator assignConstantSearchKeys = new AssignOperator(assignKeyVarList, assignKeyExprList);
        // Input to this assign is the EmptyTupleSource (which the dataSourceScan also must have had as input).
        assignConstantSearchKeys.getInputs().add(dataSourceOp.getInputs().get(0));
        assignConstantSearchKeys.setExecutionMode(dataSourceOp.getExecutionMode());
        inputOp = assignConstantSearchKeys;
    } else {
        // All index search keys are variables.
        inputOp = probeSubTree.root;
    }

    UnnestMapOperator secondaryIndexUnnestOp = AccessMethodUtils.createSecondaryIndexUnnestMap(dataset,
            recordType, chosenIndex, inputOp, jobGenParams, context, false, retainInput);

    // Generate the rest of the upstream plan which feeds the search results into the primary index.
    UnnestMapOperator primaryIndexUnnestOp = null;
    boolean isPrimaryIndex = chosenIndex.isPrimaryIndex();
    if (dataset.getDatasetType() == DatasetType.EXTERNAL) {
        // External dataset
        ExternalDataLookupOperator externalDataAccessOp = AccessMethodUtils.createExternalDataLookupUnnestMap(
                dataSourceOp, dataset, recordType, secondaryIndexUnnestOp, context, chosenIndex, retainInput,
                retainNull);
        indexSubTree.dataSourceRef.setValue(externalDataAccessOp);
        return externalDataAccessOp;
    } else if (!isPrimaryIndex) {
        primaryIndexUnnestOp = AccessMethodUtils.createPrimaryIndexUnnestMap(dataSourceOp, dataset, recordType,
                secondaryIndexUnnestOp, context, true, retainInput, retainNull, false);

        // Replace the datasource scan with the new plan rooted at
        // primaryIndexUnnestMap.
        indexSubTree.dataSourceRef.setValue(primaryIndexUnnestOp);
    } else {
        List<Object> primaryIndexOutputTypes = new ArrayList<Object>();
        try {
            AccessMethodUtils.appendPrimaryIndexTypes(dataset, recordType, primaryIndexOutputTypes);
        } catch (IOException e) {
            throw new AlgebricksException(e);
        }
        List<LogicalVariable> scanVariables = dataSourceOp.getVariables();
        primaryIndexUnnestOp = new UnnestMapOperator(scanVariables, secondaryIndexUnnestOp.getExpressionRef(),
                primaryIndexOutputTypes, retainInput);
        primaryIndexUnnestOp.getInputs().add(new MutableObject<ILogicalOperator>(inputOp));

        if (!primaryIndexPostProccessingIsNeeded) {
            List<Mutable<ILogicalExpression>> remainingFuncExprs = new ArrayList<Mutable<ILogicalExpression>>();
            getNewConditionExprs(conditionRef, replacedFuncExprs, remainingFuncExprs);
            // Generate new condition.
            if (!remainingFuncExprs.isEmpty()) {
                ILogicalExpression pulledCond = createSelectCondition(remainingFuncExprs);
                conditionRef.setValue(pulledCond);
            } else {
                conditionRef.setValue(null);
            }
        }

        // Adds equivalence classes --- one equivalent class between a primary key
        // variable and a record field-access expression.
        EquivalenceClassUtils.addEquivalenceClassesForPrimaryIndexAccess(primaryIndexUnnestOp, scanVariables,
                recordType, dataset, context);
    }

    return primaryIndexUnnestOp;
}

From source file:com.microsoft.azure.management.datalake.store.uploader.UploadMetadata.java

/**
 * Verifies the given metadata for consistency. Checks include:
 *  Completeness/*  w w w. j  av  a2  s.c o m*/
 *  Existence and consistency with local file
 *  Segment data consistency
 *
 * @throws InvalidMetadataException Thrown if the metadata is invalid.
 */
public void validateConsistency() throws InvalidMetadataException {
    if (this.segments == null || this.segments.length != this.segmentCount) {
        throw new InvalidMetadataException("Inconsistent number of segments");
    }

    long sum = 0;
    int lastSegmentNumber = -1;
    BitSet segments = new BitSet(this.segmentCount);

    for (UploadSegmentMetadata segment : this.segments) {
        if (segment.getSegmentNumber() < 0 || segment.getSegmentNumber() >= this.segmentCount) {
            throw new InvalidMetadataException(MessageFormat.format(
                    "Segment numbers must be at least 0 and less than {0}. Found segment number {1}.",
                    this.segmentCount, segment.getSegmentNumber()));
        }

        if (segment.getSegmentNumber() <= lastSegmentNumber) {
            throw new InvalidMetadataException(MessageFormat.format("Segment number {0} appears out of order.",
                    segment.getSegmentNumber()));
        }

        if (segments.get(segment.getSegmentNumber())) {
            throw new InvalidMetadataException(
                    MessageFormat.format("Segment number {0} appears twice", segment.getSegmentNumber()));
        }

        if (segment.getOffset() != sum) {
            throw new InvalidMetadataException(MessageFormat.format(
                    "Segment number {0} has an invalid starting offset ({1}). Expected {2}.",
                    segment.getSegmentNumber(), segment.getOffset(), sum));
        }

        segments.set(segment.getSegmentNumber());
        sum += segment.getLength();
        lastSegmentNumber = segment.getSegmentNumber();
    }

    if (sum != this.fileLength) {
        throw new InvalidMetadataException(
                "The individual segment lengths do not add up to the input File length");
    }
}

From source file:com.turn.ttorrent.client.TorrentHandler.java

/**
 * Build this torrent's pieces array.//from   w ww . j  av a 2s  .  c  om
 *
 * <p>
 * Hash and verify any potentially present local data and create this
 * torrent's pieces array from their respective hash provided in the
 * torrent meta-info.
 * </p>
 *
 * <p>
 * This function should be called soon after the constructor to initialize
 * the pieces array.
 * </p>
 */
@VisibleForTesting
/* pp */ void init() throws InterruptedException, IOException {
    {
        State s = getState();
        if (s != State.WAITING) {
            LOG.info("Restarting torrent from state " + s);
            return;
        }
    }
    setState(State.VALIDATING);

    try {
        int npieces = torrent.getPieceCount();

        long size = getSize();
        // Store in a local so we can update with minimal synchronization.
        BitSet completedPieces = new BitSet(npieces);
        long completedSize = 0;

        ThreadPoolExecutor executor = client.getEnvironment().getExecutorService();
        // TorrentCreator.newExecutor("TorrentHandlerInit");
        try {
            LOG.info("{}: Analyzing local data for {} ({} pieces)...",
                    new Object[] { getLocalPeerName(), getName(), npieces });

            int step = 10;
            CountDownLatch latch = new CountDownLatch(npieces);
            for (int index = 0; index < npieces; index++) {
                // TODO: Read the file sequentially and pass it to the validator.
                // Otherwise we thrash the disk on validation.
                ByteBuffer buffer = ByteBuffer.allocate(getPieceLength(index));
                bucket.read(buffer, getPieceOffset(index));
                buffer.flip();
                executor.execute(new PieceValidator(torrent, index, buffer, completedPieces, latch));

                if (index / (float) npieces * 100f > step) {
                    LOG.info("{}:  ... {}% complete", getLocalPeerName(), step);
                    step += 10;
                }
            }
            latch.await();

            for (int i = completedPieces.nextSetBit(0); i >= 0; i = completedPieces.nextSetBit(i + 1)) {
                completedSize += getPieceLength(i);
            }
        } finally {
            // Request orderly executor shutdown and wait for hashing tasks to
            // complete.
            // executor.shutdown();
            // executor.awaitTermination(1, TimeUnit.SECONDS);
        }

        LOG.debug("{}: {}: we have {}/{} bytes ({}%) [{}/{} pieces].",
                new Object[] { getLocalPeerName(), getName(), completedSize, size,
                        String.format("%.1f", (100f * (completedSize / (float) size))),
                        completedPieces.cardinality(), getPieceCount() });

        synchronized (lock) {
            this.completedPieces = completedPieces;
        }

        if (isComplete())
            setState(State.SEEDING);
        else
            setState(State.SHARING);
    } catch (Exception e) {
        setState(State.ERROR);
        Throwables.propagateIfPossible(e, InterruptedException.class, IOException.class);
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.hyracks.control.cc.executor.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();// www.  j  a v a  2s  .  com
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = executor.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * We compute the transitive closure of this (producer-consumer) relation to find the largest set of
     * tasks that need to be co-scheduled.
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    return tcSet.toArray(new TaskCluster[tcSet.size()]);
}

From source file:org.apache.asterix.optimizer.rules.am.BTreeAccessMethod.java

@Override
public ILogicalOperator createSecondaryToPrimaryPlan(Mutable<ILogicalExpression> conditionRef,
        OptimizableOperatorSubTree indexSubTree, OptimizableOperatorSubTree probeSubTree, Index chosenIndex,
        AccessMethodAnalysisContext analysisCtx, boolean retainInput, boolean retainNull,
        boolean requiresBroadcast, IOptimizationContext context) throws AlgebricksException {
    Dataset dataset = indexSubTree.getDataset();
    ARecordType recordType = indexSubTree.getRecordType();
    ARecordType metaRecordType = indexSubTree.getMetaRecordType();
    // we made sure indexSubTree has datasource scan
    AbstractDataSourceOperator dataSourceOp = (AbstractDataSourceOperator) indexSubTree.getDataSourceRef()
            .getValue();//from   ww w.java2 s  .  c  o m
    List<Pair<Integer, Integer>> exprAndVarList = analysisCtx.indexExprsAndVars.get(chosenIndex);
    List<IOptimizableFuncExpr> matchedFuncExprs = analysisCtx.matchedFuncExprs;
    int numSecondaryKeys = analysisCtx.indexNumMatchedKeys.get(chosenIndex);
    // List of function expressions that will be replaced by the secondary-index search.
    // These func exprs will be removed from the select condition at the very end of this method.
    Set<ILogicalExpression> replacedFuncExprs = new HashSet<>();

    // Info on high and low keys for the BTree search predicate.
    ILogicalExpression[] lowKeyExprs = new ILogicalExpression[numSecondaryKeys];
    ILogicalExpression[] highKeyExprs = new ILogicalExpression[numSecondaryKeys];
    LimitType[] lowKeyLimits = new LimitType[numSecondaryKeys];
    LimitType[] highKeyLimits = new LimitType[numSecondaryKeys];
    boolean[] lowKeyInclusive = new boolean[numSecondaryKeys];
    boolean[] highKeyInclusive = new boolean[numSecondaryKeys];
    ILogicalExpression[] constantAtRuntimeExpressions = new ILogicalExpression[numSecondaryKeys];
    LogicalVariable[] constAtRuntimeExprVars = new LogicalVariable[numSecondaryKeys];

    /* TODO: For now we don't do any sophisticated analysis of the func exprs to come up with "the best" range
     * predicate. If we can't figure out how to integrate a certain funcExpr into the current predicate,
     * we just bail by setting this flag.*/
    boolean couldntFigureOut = false;
    boolean doneWithExprs = false;
    boolean isEqCondition = false;
    BitSet setLowKeys = new BitSet(numSecondaryKeys);
    BitSet setHighKeys = new BitSet(numSecondaryKeys);
    // Go through the func exprs listed as optimizable by the chosen index,
    // and formulate a range predicate on the secondary-index keys.

    // checks whether a type casting happened from a real (FLOAT, DOUBLE) value to an INT value
    // since we have a round issues when dealing with LT(<) OR GT(>) operator.
    boolean realTypeConvertedToIntegerType;

    for (Pair<Integer, Integer> exprIndex : exprAndVarList) {
        // Position of the field of matchedFuncExprs.get(exprIndex) in the chosen index's indexed exprs.
        IOptimizableFuncExpr optFuncExpr = matchedFuncExprs.get(exprIndex.first);
        int keyPos = indexOf(optFuncExpr.getFieldName(0), chosenIndex.getKeyFieldNames());
        if (keyPos < 0 && optFuncExpr.getNumLogicalVars() > 1) {
            // If we are optimizing a join, the matching field may be the second field name.
            keyPos = indexOf(optFuncExpr.getFieldName(1), chosenIndex.getKeyFieldNames());
        }
        if (keyPos < 0) {
            throw new AlgebricksException(
                    "Could not match optimizable function expression to any index field name.");
        }
        Pair<ILogicalExpression, Boolean> returnedSearchKeyExpr = AccessMethodUtils
                .createSearchKeyExpr(optFuncExpr, indexSubTree, probeSubTree);
        ILogicalExpression searchKeyExpr = returnedSearchKeyExpr.first;
        if (searchKeyExpr.getExpressionTag() == LogicalExpressionTag.FUNCTION_CALL) {
            constantAtRuntimeExpressions[keyPos] = searchKeyExpr;
            constAtRuntimeExprVars[keyPos] = context.newVar();
            searchKeyExpr = new VariableReferenceExpression(constAtRuntimeExprVars[keyPos]);

        }
        realTypeConvertedToIntegerType = returnedSearchKeyExpr.second;

        LimitType limit = getLimitType(optFuncExpr, probeSubTree);

        // If a DOUBLE or FLOAT constant is converted to an INT type value,
        // we need to check a corner case where two real values are located between an INT value.
        // For example, for the following query,
        //
        // for $emp in dataset empDataset
        // where $emp.age > double("2.3") and $emp.age < double("3.3")
        // return $emp.id
        //
        // It should generate a result if there is a tuple that satisfies the condition, which is 3,
        // however, it does not generate the desired result since finding candidates
        // fail after truncating the fraction part (there is no INT whose value is greater than 2 and less than 3.)
        //
        // Therefore, we convert LT(<) to LE(<=) and GT(>) to GE(>=) to find candidates.
        // This does not change the result of an actual comparison since this conversion is only applied
        // for finding candidates from an index.
        //
        if (realTypeConvertedToIntegerType) {
            if (limit == LimitType.HIGH_EXCLUSIVE) {
                limit = LimitType.HIGH_INCLUSIVE;
            } else if (limit == LimitType.LOW_EXCLUSIVE) {
                limit = LimitType.LOW_INCLUSIVE;
            }
        }

        switch (limit) {
        case EQUAL: {
            if (lowKeyLimits[keyPos] == null && highKeyLimits[keyPos] == null) {
                lowKeyLimits[keyPos] = highKeyLimits[keyPos] = limit;
                lowKeyInclusive[keyPos] = highKeyInclusive[keyPos] = true;
                lowKeyExprs[keyPos] = highKeyExprs[keyPos] = searchKeyExpr;
                setLowKeys.set(keyPos);
                setHighKeys.set(keyPos);
                isEqCondition = true;
            } else {
                // Has already been set to the identical values.
                // When optimizing join we may encounter the same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == true
                        && lowKeyExprs[keyPos].equals(searchKeyExpr) && highKeyLimits[keyPos] == limit
                        && highKeyInclusive[keyPos] == true && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    isEqCondition = true;
                    break;
                }
                couldntFigureOut = true;
            }
            // TODO: For now don't consider prefix searches.
            // If high and low keys are set, we exit for now.
            if (setLowKeys.cardinality() == numSecondaryKeys && setHighKeys.cardinality() == numSecondaryKeys) {
                doneWithExprs = true;
            }
            break;
        }
        case HIGH_EXCLUSIVE: {
            if (highKeyLimits[keyPos] == null || (highKeyLimits[keyPos] != null && highKeyInclusive[keyPos])) {
                highKeyLimits[keyPos] = limit;
                highKeyExprs[keyPos] = searchKeyExpr;
                highKeyInclusive[keyPos] = false;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the
                // same optimizable expression twice
                // (once from analyzing each side of the join)
                if (highKeyLimits[keyPos] == limit && highKeyInclusive[keyPos] == false
                        && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case HIGH_INCLUSIVE: {
            if (highKeyLimits[keyPos] == null) {
                highKeyLimits[keyPos] = limit;
                highKeyExprs[keyPos] = searchKeyExpr;
                highKeyInclusive[keyPos] = true;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the
                // same optimizable expression twice
                // (once from analyzing each side of the join)
                if (highKeyLimits[keyPos] == limit && highKeyInclusive[keyPos] == true
                        && highKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case LOW_EXCLUSIVE: {
            if (lowKeyLimits[keyPos] == null || (lowKeyLimits[keyPos] != null && lowKeyInclusive[keyPos])) {
                lowKeyLimits[keyPos] = limit;
                lowKeyExprs[keyPos] = searchKeyExpr;
                lowKeyInclusive[keyPos] = false;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the
                // same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == false
                        && lowKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        case LOW_INCLUSIVE: {
            if (lowKeyLimits[keyPos] == null) {
                lowKeyLimits[keyPos] = limit;
                lowKeyExprs[keyPos] = searchKeyExpr;
                lowKeyInclusive[keyPos] = true;
            } else {
                // Has already been set to the identical values. When optimizing join we may encounter the
                // same optimizable expression twice
                // (once from analyzing each side of the join)
                if (lowKeyLimits[keyPos] == limit && lowKeyInclusive[keyPos] == true
                        && lowKeyExprs[keyPos].equals(searchKeyExpr)) {
                    break;
                }
                couldntFigureOut = true;
                doneWithExprs = true;
            }
            break;
        }
        default: {
            throw new IllegalStateException();
        }
        }
        if (!couldntFigureOut) {
            // Remember to remove this funcExpr later.
            replacedFuncExprs.add(matchedFuncExprs.get(exprIndex.first).getFuncExpr());
        }
        if (doneWithExprs) {
            break;
        }
    }
    if (couldntFigureOut) {
        return null;
    }

    // If the select condition contains mixed open/closed intervals on multiple keys, then we make all intervals
    // closed to obtain a superset of answers and leave the original selection in place.
    boolean primaryIndexPostProccessingIsNeeded = false;
    for (int i = 1; i < numSecondaryKeys; ++i) {
        if (lowKeyInclusive[i] != lowKeyInclusive[0]) {
            Arrays.fill(lowKeyInclusive, true);
            primaryIndexPostProccessingIsNeeded = true;
            break;
        }
    }
    for (int i = 1; i < numSecondaryKeys; ++i) {
        if (highKeyInclusive[i] != highKeyInclusive[0]) {
            Arrays.fill(highKeyInclusive, true);
            primaryIndexPostProccessingIsNeeded = true;
            break;
        }
    }

    // determine cases when prefix search could be applied
    for (int i = 1; i < lowKeyExprs.length; i++) {
        if (lowKeyLimits[0] == null && lowKeyLimits[i] != null
                || lowKeyLimits[0] != null && lowKeyLimits[i] == null
                || highKeyLimits[0] == null && highKeyLimits[i] != null
                || highKeyLimits[0] != null && highKeyLimits[i] == null) {
            numSecondaryKeys--;
            primaryIndexPostProccessingIsNeeded = true;
        }
    }
    if (lowKeyLimits[0] == null) {
        lowKeyInclusive[0] = true;
    }
    if (highKeyLimits[0] == null) {
        highKeyInclusive[0] = true;
    }

    // Here we generate vars and funcs for assigning the secondary-index keys to be fed into the secondary-index
    // search.
    // List of variables for the assign.
    ArrayList<LogicalVariable> keyVarList = new ArrayList<LogicalVariable>();
    // List of variables and expressions for the assign.
    ArrayList<LogicalVariable> assignKeyVarList = new ArrayList<LogicalVariable>();
    ArrayList<Mutable<ILogicalExpression>> assignKeyExprList = new ArrayList<Mutable<ILogicalExpression>>();
    int numLowKeys = createKeyVarsAndExprs(numSecondaryKeys, lowKeyLimits, lowKeyExprs, assignKeyVarList,
            assignKeyExprList, keyVarList, context, constantAtRuntimeExpressions, constAtRuntimeExprVars);
    int numHighKeys = createKeyVarsAndExprs(numSecondaryKeys, highKeyLimits, highKeyExprs, assignKeyVarList,
            assignKeyExprList, keyVarList, context, constantAtRuntimeExpressions, constAtRuntimeExprVars);

    BTreeJobGenParams jobGenParams = new BTreeJobGenParams(chosenIndex.getIndexName(), IndexType.BTREE,
            dataset.getDataverseName(), dataset.getDatasetName(), retainInput, requiresBroadcast);
    jobGenParams.setLowKeyInclusive(lowKeyInclusive[0]);
    jobGenParams.setHighKeyInclusive(highKeyInclusive[0]);
    jobGenParams.setIsEqCondition(isEqCondition);
    jobGenParams.setLowKeyVarList(keyVarList, 0, numLowKeys);
    jobGenParams.setHighKeyVarList(keyVarList, numLowKeys, numHighKeys);

    ILogicalOperator inputOp = null;
    if (!assignKeyVarList.isEmpty()) {
        // Assign operator that sets the constant secondary-index search-key fields if necessary.
        AssignOperator assignConstantSearchKeys = new AssignOperator(assignKeyVarList, assignKeyExprList);
        // Input to this assign is the EmptyTupleSource (which the dataSourceScan also must have had as input).
        assignConstantSearchKeys.getInputs().add(new MutableObject<ILogicalOperator>(
                OperatorManipulationUtil.deepCopy(dataSourceOp.getInputs().get(0).getValue())));
        assignConstantSearchKeys.setExecutionMode(dataSourceOp.getExecutionMode());
        inputOp = assignConstantSearchKeys;
    } else {
        // All index search keys are variables.
        inputOp = probeSubTree.getRoot();
    }

    ILogicalOperator secondaryIndexUnnestOp = AccessMethodUtils.createSecondaryIndexUnnestMap(dataset,
            recordType, metaRecordType, chosenIndex, inputOp, jobGenParams, context, false, retainInput,
            retainNull);

    // Generate the rest of the upstream plan which feeds the search results into the primary index.
    AbstractUnnestMapOperator primaryIndexUnnestOp = null;

    boolean isPrimaryIndex = chosenIndex.isPrimaryIndex();
    if (dataset.getDatasetType() == DatasetType.EXTERNAL) {
        // External dataset
        UnnestMapOperator externalDataAccessOp = AccessMethodUtils.createExternalDataLookupUnnestMap(
                dataSourceOp, dataset, recordType, secondaryIndexUnnestOp, context, chosenIndex, retainInput,
                retainNull);
        indexSubTree.getDataSourceRef().setValue(externalDataAccessOp);
        return externalDataAccessOp;
    } else if (!isPrimaryIndex) {
        primaryIndexUnnestOp = AccessMethodUtils.createPrimaryIndexUnnestMap(dataSourceOp, dataset, recordType,
                metaRecordType, secondaryIndexUnnestOp, context, true, retainInput, retainNull, false);

        // Adds equivalence classes --- one equivalent class between a primary key
        // variable and a record field-access expression.
        EquivalenceClassUtils.addEquivalenceClassesForPrimaryIndexAccess(primaryIndexUnnestOp,
                dataSourceOp.getVariables(), recordType, metaRecordType, dataset, context);
    } else {
        List<Object> primaryIndexOutputTypes = new ArrayList<Object>();
        AccessMethodUtils.appendPrimaryIndexTypes(dataset, recordType, metaRecordType, primaryIndexOutputTypes);
        List<LogicalVariable> scanVariables = dataSourceOp.getVariables();

        // Checks whether the primary index search can replace the given
        // SELECT condition.
        // If so, condition will be set to null and eventually the SELECT
        // operator will be removed.
        // If not, we create a new condition based on remaining ones.
        if (!primaryIndexPostProccessingIsNeeded) {
            List<Mutable<ILogicalExpression>> remainingFuncExprs = new ArrayList<Mutable<ILogicalExpression>>();
            getNewConditionExprs(conditionRef, replacedFuncExprs, remainingFuncExprs);
            // Generate new condition.
            if (!remainingFuncExprs.isEmpty()) {
                ILogicalExpression pulledCond = createSelectCondition(remainingFuncExprs);
                conditionRef.setValue(pulledCond);
            } else {
                conditionRef.setValue(null);
            }
        }

        // Checks whether LEFT_OUTER_UNNESTMAP operator is required.
        boolean leftOuterUnnestMapRequired = false;
        if (retainNull && retainInput) {
            leftOuterUnnestMapRequired = true;
        } else {
            leftOuterUnnestMapRequired = false;
        }

        if (conditionRef.getValue() != null) {
            // The job gen parameters are transferred to the actual job gen
            // via the UnnestMapOperator's function arguments.
            List<Mutable<ILogicalExpression>> primaryIndexFuncArgs = new ArrayList<Mutable<ILogicalExpression>>();
            jobGenParams.writeToFuncArgs(primaryIndexFuncArgs);
            // An index search is expressed as an unnest-map over an
            // index-search function.
            IFunctionInfo primaryIndexSearch = FunctionUtil
                    .getFunctionInfo(AsterixBuiltinFunctions.INDEX_SEARCH);
            UnnestingFunctionCallExpression primaryIndexSearchFunc = new UnnestingFunctionCallExpression(
                    primaryIndexSearch, primaryIndexFuncArgs);
            primaryIndexSearchFunc.setReturnsUniqueValues(true);
            if (!leftOuterUnnestMapRequired) {
                primaryIndexUnnestOp = new UnnestMapOperator(scanVariables,
                        new MutableObject<ILogicalExpression>(primaryIndexSearchFunc), primaryIndexOutputTypes,
                        retainInput);
            } else {
                primaryIndexUnnestOp = new LeftOuterUnnestMapOperator(scanVariables,
                        new MutableObject<ILogicalExpression>(primaryIndexSearchFunc), primaryIndexOutputTypes,
                        true);
            }
        } else {
            if (!leftOuterUnnestMapRequired) {
                primaryIndexUnnestOp = new UnnestMapOperator(scanVariables,
                        ((UnnestMapOperator) secondaryIndexUnnestOp).getExpressionRef(),
                        primaryIndexOutputTypes, retainInput);
            } else {
                primaryIndexUnnestOp = new LeftOuterUnnestMapOperator(scanVariables,
                        ((LeftOuterUnnestMapOperator) secondaryIndexUnnestOp).getExpressionRef(),
                        primaryIndexOutputTypes, true);
            }
        }

        primaryIndexUnnestOp.getInputs().add(new MutableObject<ILogicalOperator>(inputOp));

        // Adds equivalence classes --- one equivalent class between a primary key
        // variable and a record field-access expression.
        EquivalenceClassUtils.addEquivalenceClassesForPrimaryIndexAccess(primaryIndexUnnestOp, scanVariables,
                recordType, metaRecordType, dataset, context);
    }

    return primaryIndexUnnestOp;
}

From source file:edu.uci.ics.hyracks.control.cc.scheduler.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<TaskId, Set<TaskId>>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();/*from w w w.  ja v a 2s.  c o m*/
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<TaskId>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = scheduler.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * taskClusterMap contains for every TID x, x -> { coscheduled consumer TIDs U x }
     * We compute the transitive closure of this relation to find the largest set of
     * tasks that need to be co-scheduled
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<TaskId, Integer>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<List<TaskId>>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<TaskId>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<TaskCluster>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<Task>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    TaskCluster[] taskClusters = tcSet.toArray(new TaskCluster[tcSet.size()]);
    return taskClusters;
}

From source file:org.apache.tez.runtime.library.common.sort.impl.dflt.TestDefaultSorter.java

public void testEmptyCaseFileLengthsHelper(int numPartitions, String[] keys, String[] values)
        throws IOException {
    OutputContext context = createTezOutputContext();

    MemoryUpdateCallbackHandler handler = new MemoryUpdateCallbackHandler();
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 1);
    context.requestInitialMemory(/*from   w ww . ja v a  2 s  . c  o  m*/
            ExternalSorter.getInitialMemoryRequirement(conf, context.getTotalMemoryAvailableToTask()), handler);
    String auxService = conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID,
            TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
    SorterWrapper sorterWrapper = new SorterWrapper(context, conf, numPartitions, handler.getMemoryAssigned());
    DefaultSorter sorter = sorterWrapper.getSorter();
    assertEquals("Key and Values must have the same number of elements", keys.length, values.length);
    BitSet keyRLEs = new BitSet(keys.length);
    for (int i = 0; i < keys.length; i++) {
        boolean isRLE = sorterWrapper.writeKeyValue(new Text(keys[i]), new Text(values[i]));
        keyRLEs.set(i, isRLE);
    }
    sorterWrapper.close();

    List<Event> events = new ArrayList<>();
    String pathComponent = (context.getUniqueIdentifier() + "_" + 0);
    ShuffleUtils.generateEventOnSpill(events, true, true, context, 0, sorter.indexCacheList.get(0), 0, true,
            pathComponent, sorter.getPartitionStats(), sorter.reportDetailedPartitionStats(), auxService,
            TezCommonUtils.newBestCompressionDeflater());

    CompositeDataMovementEvent compositeDataMovementEvent = (CompositeDataMovementEvent) events.get(1);
    ByteBuffer bb = compositeDataMovementEvent.getUserPayload();
    ShuffleUserPayloads.DataMovementEventPayloadProto shufflePayload = ShuffleUserPayloads.DataMovementEventPayloadProto
            .parseFrom(ByteString.copyFrom(bb));

    if (shufflePayload.hasEmptyPartitions()) {
        byte[] emptyPartitionsBytesString = TezCommonUtils
                .decompressByteStringToByteArray(shufflePayload.getEmptyPartitions());
        BitSet emptyPartitionBitSet = TezUtilsInternal.fromByteArray(emptyPartitionsBytesString);
        Assert.assertEquals("Number of empty partitions did not match!", emptyPartitionBitSet.cardinality(),
                sorterWrapper.getEmptyPartitionsCount());
    } else {
        Assert.assertEquals(sorterWrapper.getEmptyPartitionsCount(), 0);
    }
    // Each non-empty partition adds 4 bytes for header, 2 bytes for EOF_MARKER, 4 bytes for checksum
    int expectedFileOutLength = sorterWrapper.getNonEmptyPartitionsCount() * 10;
    for (int i = 0; i < keys.length; i++) {
        // Each Record adds 1 byte for key length, 1 byte Text overhead (length), key.length bytes for key
        expectedFileOutLength += keys[i].length() + 2;
        // Each Record adds 1 byte for value length, 1 byte Text overhead (length), value.length bytes for value
        expectedFileOutLength += values[i].length() + 2;
    }
    assertEquals("Unexpected Output File Size!", localFs.getFileStatus(sorter.getFinalOutputFile()).getLen(),
            expectedFileOutLength);
    assertEquals(sorter.getNumSpills(), 1);
    verifyCounters(sorter, context);
}