Example usage for java.util BitSet clear

List of usage examples for java.util BitSet clear

Introduction

In this page you can find the example usage for java.util BitSet clear.

Prototype

public void clear(int bitIndex) 

Source Link

Document

Sets the bit specified by the index to false .

Usage

From source file:Sieve.java

public static void main(String[] s) {
    int n = 2000000;
    long start = System.currentTimeMillis();
    BitSet b = new BitSet(n + 1);
    int count = 0;
    int i;/*from   w  ww.  ja v  a 2s.  c o m*/
    for (i = 2; i <= n; i++)
        b.set(i);
    i = 2;
    while (i * i <= n) {
        if (b.get(i)) {
            count++;
            int k = 2 * i;
            while (k <= n) {
                b.clear(k);
                k += i;
            }
        }
        i++;
    }
    while (i <= n) {
        if (b.get(i))
            count++;
        i++;
    }
    long end = System.currentTimeMillis();
    System.out.println(count + " primes");
    System.out.println((end - start) + " milliseconds");
}

From source file:Main.java

public static void main(String[] args) {

    BitSet bitset1 = new BitSet(8);
    BitSet bitset2 = new BitSet(8);

    // assign values to bitset1
    bitset1.set(0);/*w ww .ja  v a2  s . c  o  m*/
    bitset1.set(1);
    bitset1.set(2);

    // assign values to bitset2
    bitset2.set(2);
    bitset2.set(4);

    // print the sets
    System.out.println("Bitset1:" + bitset1);
    System.out.println("Bitset2:" + bitset2);

    // clear index 2 in bitset1
    bitset1.clear(2);

    // clear index 4 in bitset2
    bitset2.clear(4);

    // print new bitsets
    System.out.println(bitset1);
    System.out.println(bitset2);

}

From source file:edu.uci.ics.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private void computeClusters(Mutable<ILogicalOperator> parentRef, Mutable<ILogicalOperator> opRef,
        MutableInt currentClusterId) {//from   ww w. ja  v  a  2 s .  c  o  m
    // only replicate operator has multiple outputs
    int outputIndex = 0;
    if (opRef.getValue().getOperatorTag() == LogicalOperatorTag.REPLICATE) {
        ReplicateOperator rop = (ReplicateOperator) opRef.getValue();
        List<Mutable<ILogicalOperator>> outputs = rop.getOutputs();
        for (outputIndex = 0; outputIndex < outputs.size(); outputIndex++) {
            if (outputs.get(outputIndex).equals(parentRef)) {
                break;
            }
        }
    }
    AbstractLogicalOperator aop = (AbstractLogicalOperator) opRef.getValue();
    Pair<int[], int[]> labels = aop.getPhysicalOperator().getInputOutputDependencyLabels(opRef.getValue());
    List<Mutable<ILogicalOperator>> inputs = opRef.getValue().getInputs();
    for (int i = 0; i < inputs.size(); i++) {
        Mutable<ILogicalOperator> inputRef = inputs.get(i);
        if (labels.second[outputIndex] == 1 && labels.first[i] == 0) { // 1 -> 0
            if (labels.second.length == 1) {
                clusterMap.put(opRef, currentClusterId);
                // start a new cluster
                MutableInt newClusterId = new MutableInt(++lastUsedClusterId);
                computeClusters(opRef, inputRef, newClusterId);
                BitSet waitForList = clusterWaitForMap.get(currentClusterId.getValue());
                if (waitForList == null) {
                    waitForList = new BitSet();
                    clusterWaitForMap.put(currentClusterId.getValue(), waitForList);
                }
                waitForList.set(newClusterId.getValue());
            }
        } else { // 0 -> 0 and 1 -> 1
            MutableInt prevClusterId = clusterMap.get(opRef);
            if (prevClusterId == null || prevClusterId.getValue().equals(currentClusterId.getValue())) {
                clusterMap.put(opRef, currentClusterId);
                computeClusters(opRef, inputRef, currentClusterId);
            } else {
                // merge prevClusterId and currentClusterId: update all the map entries that has currentClusterId to prevClusterId
                for (BitSet bs : clusterWaitForMap.values()) {
                    if (bs.get(currentClusterId.getValue())) {
                        bs.clear(currentClusterId.getValue());
                        bs.set(prevClusterId.getValue());
                    }
                }
                currentClusterId.setValue(prevClusterId.getValue());
            }
        }
    }
}

From source file:org.apache.hyracks.algebricks.rewriter.rules.ExtractCommonOperatorsRule.java

private void computeClusters(Mutable<ILogicalOperator> parentRef, Mutable<ILogicalOperator> opRef,
        MutableInt currentClusterId) {// w  w w  .  j  a va  2  s . c o  m
    // only replicate operator has multiple outputs
    int outputIndex = 0;
    if (opRef.getValue().getOperatorTag() == LogicalOperatorTag.REPLICATE) {
        ReplicateOperator rop = (ReplicateOperator) opRef.getValue();
        List<Mutable<ILogicalOperator>> outputs = rop.getOutputs();
        for (outputIndex = 0; outputIndex < outputs.size(); outputIndex++) {
            if (outputs.get(outputIndex).equals(parentRef)) {
                break;
            }
        }
    }
    AbstractLogicalOperator aop = (AbstractLogicalOperator) opRef.getValue();
    Pair<int[], int[]> labels = aop.getPhysicalOperator().getInputOutputDependencyLabels(opRef.getValue());
    List<Mutable<ILogicalOperator>> inputs = opRef.getValue().getInputs();
    for (int i = 0; i < inputs.size(); i++) {
        Mutable<ILogicalOperator> inputRef = inputs.get(i);
        if (labels.second[outputIndex] == 1 && labels.first[i] == 0) { // 1 -> 0
            if (labels.second.length == 1) {
                clusterMap.put(opRef, currentClusterId);
                // start a new cluster
                MutableInt newClusterId = new MutableInt(++lastUsedClusterId);
                computeClusters(opRef, inputRef, newClusterId);
                BitSet waitForList = clusterWaitForMap.get(currentClusterId.getValue());
                if (waitForList == null) {
                    waitForList = new BitSet();
                    clusterWaitForMap.put(currentClusterId.getValue(), waitForList);
                }
                waitForList.set(newClusterId.getValue());
            }
        } else { // 0 -> 0 and 1 -> 1
            MutableInt prevClusterId = clusterMap.get(opRef);
            if (prevClusterId == null || prevClusterId.getValue().equals(currentClusterId.getValue())) {
                clusterMap.put(opRef, currentClusterId);
                computeClusters(opRef, inputRef, currentClusterId);
            } else {
                // merge prevClusterId and currentClusterId: update all the map entries that has currentClusterId to prevClusterId
                for (BitSet bs : clusterWaitForMap.values()) {
                    if (bs.get(currentClusterId.getValue())) {
                        bs.clear(currentClusterId.getValue());
                        bs.set(prevClusterId.getValue());
                    }
                }
                clusterWaitForMap.remove(currentClusterId.getValue());
                currentClusterId.setValue(prevClusterId.getValue());
            }
        }
    }
}

From source file:org.apache.hyracks.control.cc.executor.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();/*from  w w w . j  ava2 s  .c  o  m*/
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = executor.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * We compute the transitive closure of this (producer-consumer) relation to find the largest set of
     * tasks that need to be co-scheduled.
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    return tcSet.toArray(new TaskCluster[tcSet.size()]);
}

From source file:edu.uci.ics.hyracks.control.cc.scheduler.ActivityClusterPlanner.java

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac,
        Map<ActivityId, ActivityPlan> activityPlanMap,
        Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<TaskId, Set<TaskId>>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();//from ww w.j  a v a 2 s  .  c om
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<TaskId>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }

    JobRun jobRun = scheduler.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }

    /*
     * taskClusterMap contains for every TID x, x -> { coscheduled consumer TIDs U x }
     * We compute the transitive closure of this relation to find the largest set of
     * tasks that need to be co-scheduled
     */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<TaskId, Integer>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }

    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<List<TaskId>>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<TaskId>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }

    List<TaskCluster> tcSet = new ArrayList<TaskCluster>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<Task>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac,
                taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    TaskCluster[] taskClusters = tcSet.toArray(new TaskCluster[tcSet.size()]);
    return taskClusters;
}

From source file:org.wso2.carbon.user.core.authorization.PermissionTree.java

/**
 * Clear all role authorization based on action starting from the given node
 *
 * @param node the start node to begin the search
 * @param/*from   w w w.j a  va 2s.com*/
 * @return the result as a SearchResultpathParts a List of
 * @see SearchResult
 */
private void clearRoleAuthorization(String roleName, TreeNode node, TreeNode.Permission permission) {
    write.lock();
    try {
        Map<String, BitSet> allowRoles = node.getRoleAllowPermissions();
        Map<String, BitSet> denyRoles = node.getRoleDenyPermissions();

        BitSet bs = allowRoles.get(roleName);
        if (bs != null) {
            bs.clear(permission.ordinal());
        }

        bs = denyRoles.get(roleName);
        if (bs != null) {
            bs.clear(permission.ordinal());
        }

        Map<String, TreeNode> childMap = node.getChildren();
        if (childMap != null && childMap.size() > 0) {
            for (TreeNode treeNode : childMap.values()) {
                clearRoleAuthorization(roleName, treeNode, permission);
            }
        }
        //     invalidateCache(root);
    } finally {
        write.unlock();
    }
}

From source file:org.wso2.carbon.user.core.authorization.PermissionTree.java

void clearRoleAuthorization(String roleName, String resourceId, String action) throws UserStoreException {
    SearchResult sr = getNode(root, PermissionTreeUtil.toComponenets(resourceId));
    write.lock();//w w  w. j ava 2s  . c  o  m
    try {
        if (sr.getUnprocessedPaths() == null) {
            TreeNode.Permission permission = PermissionTreeUtil.actionToPermission(action);

            Map<String, BitSet> allowRoles = sr.getLastNode().getRoleAllowPermissions();
            BitSet bs = allowRoles.get(roleName);
            if (bs != null) {
                bs.clear(permission.ordinal());
            }

            Map<String, BitSet> denyRoles = sr.getLastNode().getRoleDenyPermissions();
            bs = denyRoles.get(roleName);
            if (bs != null) {
                bs.clear(permission.ordinal());
            }
        }
        invalidateCache(root);
    } finally {
        write.unlock();
    }
}

From source file:org.wso2.carbon.user.core.authorization.PermissionTree.java

void clearUserAuthorization(String userName, String resourceId, String action) throws UserStoreException {
    if (!isUsernameCaseSensitive(userName, tenantId)) {
        userName = userName.toLowerCase();
    }//from   ww w.j a v  a  2 s  . c o  m
    write.lock();
    try {
        SearchResult sr = getNode(root, PermissionTreeUtil.toComponenets(resourceId));
        if (sr.getUnprocessedPaths() == null || sr.getUnprocessedPaths().isEmpty()) {
            TreeNode.Permission permission = PermissionTreeUtil.actionToPermission(action);

            Map<String, BitSet> allowUsers = sr.getLastNode().getUserAllowPermissions();
            BitSet bs = allowUsers.get(userName);
            if (bs != null) {
                bs.clear(permission.ordinal());
            }

            Map<String, BitSet> denyUsers = sr.getLastNode().getUserDenyPermissions();
            bs = denyUsers.get(userName);
            if (bs != null) {
                bs.clear(permission.ordinal());
            }
        }
        invalidateCache(root);
    } finally {
        write.unlock();
    }
}

From source file:org.apache.openjpa.kernel.DetachManager.java

/**
 * Ready the object for detachment, including loading the fields to be
 * detached and updating version information.
 *
 * @param idxs the indexes of fields to detach will be set as a side
 * effect of this method//from  w  w  w  .ja  v  a2 s .c  o  m
 */
private static void preDetach(Broker broker, StateManagerImpl sm, BitSet idxs, boolean full,
        boolean reloadOnDetach) {
    // make sure the existing object has the right fields fetched; call
    // even if using currently-loaded fields for detach to make sure
    // version is set
    int detachMode = broker.getDetachState();
    int loadMode = StateManagerImpl.LOAD_FGS;
    BitSet exclude = null;
    if (detachMode == DETACH_LOADED)
        exclude = StoreContext.EXCLUDE_ALL;
    else if (detachMode == DETACH_ALL)
        loadMode = StateManagerImpl.LOAD_ALL;
    try {
        if (detachMode != DETACH_LOADED || reloadOnDetach || (!reloadOnDetach && !full)) {
            sm.load(broker.getFetchConfiguration(), loadMode, exclude, null, false);
        }
    } catch (ObjectNotFoundException onfe) {
        // consume the exception
    }

    // create bitset of fields to detach; if mode is all we can use
    // currently loaded bitset clone, since we know all fields are loaded
    if (idxs != null) {
        if (detachMode == DETACH_FETCH_GROUPS)
            setFetchGroupFields(broker, sm, idxs);
        else
            idxs.or(sm.getLoaded());

        // clear lrs fields
        FieldMetaData[] fmds = sm.getMetaData().getFields();
        for (int i = 0; i < fmds.length; i++)
            if (fmds[i].isLRS())
                idxs.clear(i);
    }
}