Example usage for java.util BitSet nextSetBit

List of usage examples for java.util BitSet nextSetBit

Introduction

In this page you can find the example usage for java.util BitSet nextSetBit.

Prototype

public int nextSetBit(int fromIndex) 

Source Link

Document

Returns the index of the first bit that is set to true that occurs on or after the specified starting index.

Usage

From source file:org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdRowCount.java

private static Pair<Integer, Integer> canHandleJoin(JoinRelBase joinRel, List<RexNode> leftFilters,
        List<RexNode> rightFilters, List<RexNode> joinFilters) {

    /*//from   w  w  w . ja  v a  2s  . c o m
     * If after classifying filters there is more than 1 joining predicate, we
     * don't handle this. Return null.
     */
    if (joinFilters.size() != 1) {
        return null;
    }

    RexNode joinCond = joinFilters.get(0);

    int leftColIdx;
    int rightColIdx;

    if (!(joinCond instanceof RexCall)) {
        return null;
    }

    if (((RexCall) joinCond).getOperator() != SqlStdOperatorTable.EQUALS) {
        return null;
    }

    BitSet leftCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(0));
    BitSet rightCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(1));

    if (leftCols.cardinality() != 1 || rightCols.cardinality() != 1) {
        return null;
    }

    int nFieldsLeft = joinRel.getLeft().getRowType().getFieldList().size();
    int nFieldsRight = joinRel.getRight().getRowType().getFieldList().size();
    int nSysFields = joinRel.getSystemFieldList().size();
    BitSet rightFieldsBitSet = BitSets.range(nSysFields + nFieldsLeft, nSysFields + nFieldsLeft + nFieldsRight);
    /*
     * flip column references if join condition specified in reverse order to
     * join sources.
     */
    if (BitSets.contains(rightFieldsBitSet, leftCols)) {
        BitSet t = leftCols;
        leftCols = rightCols;
        rightCols = t;
    }

    leftColIdx = leftCols.nextSetBit(0) - nSysFields;
    rightColIdx = rightCols.nextSetBit(0) - (nSysFields + nFieldsLeft);

    return new Pair<Integer, Integer>(leftColIdx, rightColIdx);
}

From source file:org.apache.ctakes.ytex.kernel.evaluator.SemanticTypeKernel.java

public Set<Integer> tuiToMainSui(BitSet tuis) {
    Set<Integer> mainSui = new HashSet<Integer>(tuis.size());
    for (int i = tuis.nextSetBit(0); i >= 0; i = tuis.nextSetBit(i + 1)) {
        String tui = this.tuiList.get(i);
        mainSui.add(getMainSem(Integer.parseInt(tui.substring(1))));
    }/*  w w w.j a v a2  s.  co  m*/
    return mainSui;
}

From source file:edu.uci.ics.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private void getAllBlockingClusterIds(int clusterId, BitSet blockingClusters) {
    BitSet waitFor = clusterWaitForMap.get(clusterId);
    if (waitFor != null) {
        for (int i = waitFor.nextSetBit(0); i >= 0; i = waitFor.nextSetBit(i + 1)) {
            getAllBlockingClusterIds(i, blockingClusters);
        }// ww w. j a  v a2 s .  co m
        blockingClusters.or(waitFor);
    }
}

From source file:org.caleydo.data.importer.tcga.FirehoseProvider.java

private TCGAFileInfo filterColumns(TCGAFileInfo full, Pair<TCGAFileInfo, Boolean> sampled) {
    File in = full.getFile();/*from  w  w  w .j  av  a2  s  . co  m*/
    File out = new File(in.getParentFile(), "F" + in.getName());
    TCGAFileInfo r = new TCGAFileInfo(out, full.getArchiveURL(), full.getSourceFileName());
    if (out.exists() && !settings.isCleanCache())
        return r;
    assert full != null;
    if (sampled == null || sampled.getFirst() == null) {
        log.severe("can't filter the full gene file: " + in + " - sampled not found");
        return full;
    }
    // full: 1row, 2col
    // sampled: 3row, 3col
    Set<String> good = readGoodSamples(sampled.getFirst().getFile());
    if (good == null)
        return full;
    try (BufferedReader fin = new BufferedReader(new FileReader(in)); PrintWriter w = new PrintWriter(out)) {
        String[] header = fin.readLine().split("\t");
        BitSet bad = filterCols(header, good);
        {
            StringBuilder b = new StringBuilder();
            for (int i = bad.nextSetBit(0); i >= 0; i = bad.nextSetBit(i + 1))
                b.append(' ').append(header[i]);
            log.warning("remove bad samples of " + in + ":" + b);
        }
        w.append(header[0]);
        for (int i = 1; i < header.length; ++i) {
            if (bad.get(i))
                continue;
            w.append('\t').append(header[i]);
        }
        String line;
        while ((line = fin.readLine()) != null) {
            w.println();
            int t = line.indexOf('\t');
            w.append(line.subSequence(0, t));
            int prev = t;
            int i = 1;
            for (t = line.indexOf('\t', t + 1); t >= 0; t = line.indexOf('\t', t + 1), ++i) {
                if (!bad.get(i))
                    w.append(line.subSequence(prev, t));
                prev = t;
            }
            if (!bad.get(i))
                w.append(line.subSequence(prev, line.length()));

        }
    } catch (IOException e) {
        log.log(Level.SEVERE, "can't filter full file: " + in, e);
    }

    return r;
}

From source file:org.apache.hyracks.control.cc.executor.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();//from w w w. j  ava2  s .c o  m
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = executor.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * We compute the transitive closure of this (producer-consumer) relation to find the largest set of
     * tasks that need to be co-scheduled.
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    return tcSet.toArray(new TaskCluster[tcSet.size()]);
}

From source file:edu.uci.ics.hyracks.control.cc.scheduler.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<TaskId, Set<TaskId>>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();//  w  ww. j a  v  a  2s . co  m
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<TaskId>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = scheduler.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * taskClusterMap contains for every TID x, x -> { coscheduled consumer TIDs U x }
     * We compute the transitive closure of this relation to find the largest set of
     * tasks that need to be co-scheduled
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<TaskId, Integer>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<List<TaskId>>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<TaskId>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<TaskCluster>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<Task>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    TaskCluster[] taskClusters = tcSet.toArray(new TaskCluster[tcSet.size()]);
    return taskClusters;
}

From source file:com.google.uzaygezen.core.LongBitVector.java

@Override
public void copyFrom(BitSet from) {
    int localSize = size;
    long value = 0;
    for (int i = from.nextSetBit(0); i != -1; i = from.nextSetBit(i + 1)) {
        Preconditions.checkArgument(i < localSize, "bit set too large");
        value |= 1L << i;//from   w w  w. j a  v  a 2  s .  c  o m
    }
    data = value;
}

From source file:org.apache.pig.tools.pigstats.ScriptState.java

protected long bitSetToLong(BitSet bs) {
    long ret = 0;
    for (int i = bs.nextSetBit(0); i >= 0; i = bs.nextSetBit(i + 1)) {
        ret |= (1L << i);/*from ww  w  . j a  v  a  2s  .  c o  m*/
    }
    return ret;
}

From source file:fr.inria.lille.repair.nopol.NoPol.java

private void logResultInfo(List<Patch> patches) {
    long durationTime = System.currentTimeMillis() - this.startTime;
    this.logger.info("----INFORMATION----");
    List<CtType<?>> allClasses = this.getSpooner().spoonFactory().Class().getAll();
    int nbMethod = 0;
    for (int i = 0; i < allClasses.size(); i++) {
        CtType<?> ctSimpleType = allClasses.get(i);
        if (ctSimpleType instanceof CtClass) {
            Set methods = ((CtClass) ctSimpleType).getMethods();
            nbMethod += methods.size();//ww  w  . j a  v  a  2 s.co m
        }
    }

    this.logger.info("Nb classes : " + allClasses.size());
    this.logger.info("Nb methods : " + nbMethod);
    if (NoPol.currentStatement != null) {
        BitSet coverage = NoPol.currentStatement.getCoverage();
        int countStatementSuccess = 0;
        int countStatementFailed = 0;
        int nextTest = coverage.nextSetBit(0);
        /*while (nextTest != -1) {
           TestResultImpl testResult = nopol.getgZoltar().getGzoltar().getTestResults().get(nextTest);
           if (testResult.wasSuccessful()) {
              countStatementSuccess += testResult.getCoveredComponents().size();
           } else {
              countStatementFailed += testResult.getCoveredComponents().size();
           }
           nextTest = coverage.nextSetBit(nextTest + 1);
        }*/

        this.logger.info(
                "Nb statement executed by the passing tests of the patched line: " + countStatementSuccess);
        this.logger.info(
                "Nb statement executed by the failing tests of the patched line: " + countStatementFailed);
    }

    this.logger.info("Nb Statements Analyzed : " + SynthesizerFactory.getNbStatementsAnalysed());
    this.logger.info("Nb Statements with Angelic Value Found : "
            + SMTNopolSynthesizer.getNbStatementsWithAngelicValue());
    if (nopolContext.getSynthesis() == NopolContext.NopolSynthesis.SMT) {
        this.logger.info("Nb inputs in SMT : " + SMTNopolSynthesizer.getDataSize());
        this.logger.info("Nb SMT level: " + ConstraintBasedSynthesis.level);
        if (ConstraintBasedSynthesis.operators != null) {
            this.logger.info("Nb SMT components: [" + ConstraintBasedSynthesis.operators.size() + "] "
                    + ConstraintBasedSynthesis.operators);
            Iterator<Operator<?>> iterator = ConstraintBasedSynthesis.operators.iterator();
            Map<Class, Integer> mapType = new HashMap<>();
            while (iterator.hasNext()) {
                Operator<?> next = iterator.next();
                if (!mapType.containsKey(next.type())) {
                    mapType.put(next.type(), 1);
                } else {
                    mapType.put(next.type(), mapType.get(next.type()) + 1);
                }
            }
            for (Iterator<Class> patchIterator = mapType.keySet().iterator(); patchIterator.hasNext();) {
                Class next = patchIterator.next();
                this.logger.info("                  " + next + ": " + mapType.get(next));
            }
        }

        this.logger.info("Nb variables in SMT : " + SMTNopolSynthesizer.getNbVariables());
    }
    //this.logger.info("Nb run failing test  : " + nbFailingTestExecution);
    //this.logger.info("Nb run passing test : " + nbPassedTestExecution);

    this.logger.info("NoPol Execution time : " + durationTime + "ms");
    this.logger.info("".equals(nopolContext.getIdentifier()) ? "" : "  for " + nopolContext.getIdentifier());

    if (patches != null && !patches.isEmpty()) {
        this.logger.info("----PATCH FOUND----");
        for (int i = 0; i < patches.size(); i++) {
            Patch patch = patches.get(i);
            this.logger.info(patch.asString());
            this.logger.info("Nb test that executes the patch: "
                    + this.getLocalizer().getTestListPerStatement().get(patch.getSourceLocation()).size());
            this.logger.info(String.format("%s:%d: %s", patch.getSourceLocation().getContainingClassName(),
                    patch.getLineNumber(), patch.getType()));
            String diffPatch = patch.toDiff(this.getSpooner().spoonFactory(), nopolContext);
            this.logger.info(diffPatch);

            if (nopolContext.getOutputFolder() != null) {
                File patchLocation = new File(nopolContext.getOutputFolder() + "/patch_" + (i + 1) + ".diff");
                try {
                    PrintWriter writer = new PrintWriter(patchLocation, "UTF-8");
                    writer.print(diffPatch);
                    writer.close();
                } catch (IOException e) {
                    System.err.println("Unable to write the patch: " + e.getMessage());
                }
            }
        }
    }
    if (nopolContext.isJson()) {
        JSONObject output = new JSONObject();

        output.put("nb_classes", allClasses.size());
        output.put("nb_methods", nbMethod);
        output.put("nbStatement", SynthesizerFactory.getNbStatementsAnalysed());
        output.put("nbAngelicValue", SMTNopolSynthesizer.getNbStatementsWithAngelicValue());
        //output.put("nb_failing_test", nbFailingTestExecution);
        //output.put("nb_passing_test", nbPassedTestExecution);
        output.put("executionTime", durationTime);
        output.put("date", new Date());
        if (patches != null) {
            for (int i = 0; i < patches.size(); i++) {
                Patch patch = patches.get(i);

                JSONObject patchOutput = new JSONObject();

                JSONObject locationOutput = new JSONObject();
                locationOutput.put("class", patch.getSourceLocation().getContainingClassName());
                locationOutput.put("line", patch.getLineNumber());
                patchOutput.put("patchLocation", locationOutput);
                patchOutput.put("patchType", patch.getType());
                patchOutput.put("nb_test_that_execute_statement",
                        this.getLocalizer().getTestListPerStatement().get(patch.getSourceLocation()).size());
                patchOutput.put("patch", patch.toDiff(this.getSpooner().spoonFactory(), nopolContext));

                output.append("patch", patchOutput);
            }
        }

        try (FileWriter writer = new FileWriter(nopolContext.getOutputFolder() + "/output.json")) {
            output.write(writer);
            writer.close();
        } catch (IOException ignore) {
        }
    }
}

From source file:com.bittorrent.mpetazzoni.client.SharedTorrent.java

/**
 * Peer disconnection handler./*from w  ww .jav  a2 s  .com*/
 *
 * <p>
 * When a peer disconnects, we need to mark in all of the pieces it had
 * available that they can't be reached through this peer anymore.
 * </p>
 *
 * @param peer The peer we got this piece from.
 */
@Override
public synchronized void handlePeerDisconnected(SharingPeer peer) {
    BitSet availablePieces = peer.getAvailablePieces();

    for (int i = availablePieces.nextSetBit(0); i >= 0; i = availablePieces.nextSetBit(i + 1)) {
        this.rarest.remove(this.pieces[i]);
        this.pieces[i].noLongerAt(peer);
        this.rarest.add(this.pieces[i]);
    }

    Piece requested = peer.getRequestedPiece();
    if (requested != null) {
        this.requestedPieces.set(requested.getIndex(), false);
    }

    logger.debug("Peer {} went away with {} piece(s) [completed={}; available={}/{}]",
            new Object[] { peer, availablePieces.cardinality(), this.completedPieces.cardinality(),
                    this.getAvailablePieces().cardinality(), this.pieces.length });
    logger.trace("We now have {} piece(s) and {} outstanding request(s): {}", new Object[] {
            this.completedPieces.cardinality(), this.requestedPieces.cardinality(), this.requestedPieces });
}