List of usage examples for java.util BitSet set
public void set(int bitIndex)
From source file:mastodon.algorithms.MHBisectionAlgorithm.java
protected void tryPruning() { //choose the number of species in list to perturb based on a Poisson distributions with rate equal to variable "mean" above int numberToSet = 0; int numberToClear = 0; while (numberToSet < 1 || numberToSet > currPrunedSpeciesCount) { numberToSet = pd.sample() + 1;/*from w w w.j a va2s .c o m*/ } if (numberToSet > (bts.getTaxaCount() - currPrunedSpeciesCount)) { numberToSet = bts.getTaxaCount() - currPrunedSpeciesCount; } numberToClear = numberToSet; BitSet bitsToSet = new BitSet(); BitSet bitsToClear = new BitSet(); for (int e = 0; e < numberToSet; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (!currPruning.get(choice) && !bitsToSet.get(choice)) { break; } } bitsToSet.set(choice); } for (int e = 0; e < numberToClear; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (currPruning.get(choice) && !bitsToClear.get(choice)) { break; } } bitsToClear.set(choice); } currPruning.or(bitsToSet); currPruning.xor(bitsToClear); currScore = bts.pruneFast(currPruning); bts.unPrune(); }
From source file:mastodon.algorithms.SABisectionAlgorithm.java
protected void tryPruning() { //choose the number of species in list to perturb based on a Poisson distributions with rate equal to variable "mean" above int numberToSet = 0; int numberToClear = 0; while (numberToSet < 1 || numberToSet > currPrunedSpeciesCount) { numberToSet = pd.sample() + 1;// ww w. j ava 2 s . c om } if (numberToSet > (bts.getTaxaCount() - currPrunedSpeciesCount)) { numberToSet = bts.getTaxaCount() - currPrunedSpeciesCount; } numberToClear = numberToSet; BitSet bitsToSet = new BitSet(); BitSet bitsToClear = new BitSet(); for (int e = 0; e < numberToSet; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (!currPruning.get(choice) && !bitsToSet.get(choice)) { break; } } bitsToSet.set(choice); } for (int e = 0; e < numberToClear; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (currPruning.get(choice) && !bitsToClear.get(choice)) { break; } } bitsToClear.set(choice); } currPruning.or(bitsToSet); currPruning.xor(bitsToClear); currScore = bts.pruneFast(currPruning); bts.unPrune(); }
From source file:de.hpi.petrinet.PetriNet.java
protected Map<Node, Set<Node>> deriveDominators(boolean reverse) { int initIndex = reverse ? this.getNodes().indexOf(this.getFinalPlace()) : this.getNodes().indexOf(this.getInitialPlace()); int size = this.getNodes().size(); final BitSet[] dom = new BitSet[size]; final BitSet ALL = new BitSet(size); for (Node n : this.getNodes()) ALL.set(this.getNodes().indexOf(n)); for (Node n : this.getNodes()) { int index = this.getNodes().indexOf(n); BitSet curDoms = new BitSet(size); dom[index] = curDoms;//from w w w . ja v a 2 s .c o m if (index != initIndex) curDoms.or(ALL); else curDoms.set(initIndex); } boolean changed = true; /* * While we change the dom relation for a node */ while (changed) { changed = false; for (Node n : this.getNodes()) { int index = this.getNodes().indexOf(n); if (index == initIndex) continue; final BitSet old = dom[index]; final BitSet curDoms = new BitSet(size); curDoms.or(old); Collection<Node> predecessors = reverse ? n.getSucceedingNodes() : n.getPrecedingNodes(); for (Node p : predecessors) { int index2 = this.getNodes().indexOf(p); curDoms.and(dom[index2]); } curDoms.set(index); if (!curDoms.equals(old)) { changed = true; dom[index] = curDoms; } } } Map<Node, Set<Node>> dominators = new HashMap<Node, Set<Node>>(); for (Node n : this.getNodes()) { int index = this.getNodes().indexOf(n); dominators.put(n, new HashSet<Node>()); for (int i = 0; i < size; i++) if (dom[index].get(i)) dominators.get(n).add(this.getNodes().get(i)); } return dominators; }
From source file:sf.net.experimaestro.scheduler.SchedulerTest.java
@Test(description = "Test of the token resource - one job at a time") public void test_token_resource() throws ExperimaestroCannotOverwrite, InterruptedException, IOException { File jobDirectory = mkTestDir(); ThreadCount counter = new ThreadCount(); TokenResource token = new TokenResource("scheduler_test/test_token_resource", 1); Transaction.run((em, t) -> token.save(t)); // Sets 5 jobs WaitingJob[] jobs = new WaitingJob[5]; BitSet failure = new BitSet(); failure.set(3); for (int i = 0; i < jobs.length; i++) { jobs[i] = new WaitingJob(counter, jobDirectory, "job" + i, new Action(250, failure.get(i) ? 1 : 0, 0)); final WaitingJob job = jobs[i]; Transaction.run((em, t) -> {/* w ww.j a v a 2 s. co m*/ job.addDependency(token.createDependency(null)); job.save(t); }); } waitToFinish(0, counter, jobs, 1500, 5); waitBeforeCheck(); // Check that one started after the other (since only one must have been active // at a time) LOGGER.info("Checking the token test output"); // Retrieve all the jobs int errors = 0; errors += checkSequence(true, false, jobs); for (int i = 0; i < jobs.length; i++) { errors += checkState( jobs[i].finalCode() != 0 ? EnumSet.of(ResourceState.ERROR) : EnumSet.of(ResourceState.DONE), jobs[i]); } Assert.assertTrue(errors == 0, "Detected " + errors + " errors after running jobs"); }
From source file:mastodon.algorithms.SALinearAlgorithm.java
protected void tryPruning() { //choose the number of species in list to perturb based on a Poisson distributions with rate equal to variable "mean" above int numberToSet = 0; int numberToClear = 0; while (numberToSet < 1 || numberToSet > currPrunedSpeciesCount) { numberToSet = pd.sample() + 1;//from w w w. j a v a 2 s . c om } if (numberToSet > (bts.getTaxaCount() - currPrunedSpeciesCount)) { numberToSet = bts.getTaxaCount() - currPrunedSpeciesCount; } //if we are pruning by one more species now, clear one species less from the pruning list this time if (currPruning.cardinality() < currPrunedSpeciesCount) { numberToClear = numberToSet - 1; } else { numberToClear = numberToSet; } BitSet bitsToSet = new BitSet(); BitSet bitsToClear = new BitSet(); for (int e = 0; e < numberToSet; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (!currPruning.get(choice) && !bitsToSet.get(choice)) { break; } } bitsToSet.set(choice); } for (int e = 0; e < numberToClear; e++) { int choice = 0; while (true) { choice = (int) (Random.nextDouble() * bts.getTaxaCount()); if (currPruning.get(choice) && !bitsToClear.get(choice)) { break; } } bitsToClear.set(choice); } currPruning.or(bitsToSet); currPruning.xor(bitsToClear); currScore = bts.pruneFast(currPruning); bts.unPrune(); }
From source file:org.apache.hyracks.control.cc.executor.ActivityClusterPlanner.java
private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac, Map<ActivityId, ActivityPlan> activityPlanMap, Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) { Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>(); for (ActivityId anId : ac.getActivityMap().keySet()) { ActivityPlan ap = activityPlanMap.get(anId); Task[] tasks = ap.getTasks();/*from w w w.j a v a2 s .c o m*/ for (Task t : tasks) { Set<TaskId> cluster = new HashSet<>(); TaskId tid = t.getTaskId(); cluster.add(tid); taskClusterMap.put(tid, cluster); } } JobRun jobRun = executor.getJobRun(); Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap(); for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) { Set<TaskId> cluster = taskClusterMap.get(e.getKey()); for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) { IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight()); if (cPolicy.requiresProducerConsumerCoscheduling()) { cluster.add(p.getLeft()); } } } /* * We compute the transitive closure of this (producer-consumer) relation to find the largest set of * tasks that need to be co-scheduled. */ int counter = 0; TaskId[] ordinalList = new TaskId[taskClusterMap.size()]; Map<TaskId, Integer> ordinalMap = new HashMap<>(); for (TaskId tid : taskClusterMap.keySet()) { ordinalList[counter] = tid; ordinalMap.put(tid, counter); ++counter; } int n = ordinalList.length; BitSet[] paths = new BitSet[n]; for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) { int i = ordinalMap.get(e.getKey()); BitSet bsi = paths[i]; if (bsi == null) { bsi = new BitSet(n); paths[i] = bsi; } for (TaskId ttid : e.getValue()) { int j = ordinalMap.get(ttid); paths[i].set(j); BitSet bsj = paths[j]; if (bsj == null) { bsj = new BitSet(n); paths[j] = bsj; } bsj.set(i); } } for (int k = 0; k < n; ++k) { for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) { for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) { paths[i].set(j, paths[k].get(j)); paths[j].set(i, paths[i].get(j)); } } } BitSet pending = new BitSet(n); pending.set(0, n); List<List<TaskId>> clusters = new ArrayList<>(); for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) { List<TaskId> cluster = new ArrayList<>(); for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) { cluster.add(ordinalList[j]); pending.clear(j); } clusters.add(cluster); } List<TaskCluster> tcSet = new ArrayList<>(); counter = 0; for (List<TaskId> cluster : clusters) { List<Task> taskStates = new ArrayList<>(); for (TaskId tid : cluster) { taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]); } TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac, taskStates.toArray(new Task[taskStates.size()])); tcSet.add(tc); for (TaskId tid : cluster) { activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc); } } return tcSet.toArray(new TaskCluster[tcSet.size()]); }
From source file:edu.uci.ics.hyracks.control.cc.scheduler.ActivityClusterPlanner.java
private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac, Map<ActivityId, ActivityPlan> activityPlanMap, Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) { Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<TaskId, Set<TaskId>>(); for (ActivityId anId : ac.getActivityMap().keySet()) { ActivityPlan ap = activityPlanMap.get(anId); Task[] tasks = ap.getTasks();/* www. ja v a 2 s .c o m*/ for (Task t : tasks) { Set<TaskId> cluster = new HashSet<TaskId>(); TaskId tid = t.getTaskId(); cluster.add(tid); taskClusterMap.put(tid, cluster); } } JobRun jobRun = scheduler.getJobRun(); Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap(); for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) { Set<TaskId> cluster = taskClusterMap.get(e.getKey()); for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) { IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight()); if (cPolicy.requiresProducerConsumerCoscheduling()) { cluster.add(p.getLeft()); } } } /* * taskClusterMap contains for every TID x, x -> { coscheduled consumer TIDs U x } * We compute the transitive closure of this relation to find the largest set of * tasks that need to be co-scheduled */ int counter = 0; TaskId[] ordinalList = new TaskId[taskClusterMap.size()]; Map<TaskId, Integer> ordinalMap = new HashMap<TaskId, Integer>(); for (TaskId tid : taskClusterMap.keySet()) { ordinalList[counter] = tid; ordinalMap.put(tid, counter); ++counter; } int n = ordinalList.length; BitSet[] paths = new BitSet[n]; for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) { int i = ordinalMap.get(e.getKey()); BitSet bsi = paths[i]; if (bsi == null) { bsi = new BitSet(n); paths[i] = bsi; } for (TaskId ttid : e.getValue()) { int j = ordinalMap.get(ttid); paths[i].set(j); BitSet bsj = paths[j]; if (bsj == null) { bsj = new BitSet(n); paths[j] = bsj; } bsj.set(i); } } for (int k = 0; k < n; ++k) { for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) { for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) { paths[i].set(j, paths[k].get(j)); paths[j].set(i, paths[i].get(j)); } } } BitSet pending = new BitSet(n); pending.set(0, n); List<List<TaskId>> clusters = new ArrayList<List<TaskId>>(); for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) { List<TaskId> cluster = new ArrayList<TaskId>(); for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) { cluster.add(ordinalList[j]); pending.clear(j); } clusters.add(cluster); } List<TaskCluster> tcSet = new ArrayList<TaskCluster>(); counter = 0; for (List<TaskId> cluster : clusters) { List<Task> taskStates = new ArrayList<Task>(); for (TaskId tid : cluster) { taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]); } TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac, taskStates.toArray(new Task[taskStates.size()])); tcSet.add(tc); for (TaskId tid : cluster) { activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc); } } TaskCluster[] taskClusters = tcSet.toArray(new TaskCluster[tcSet.size()]); return taskClusters; }
From source file:com.google.uzaygezen.core.LongBitVector.java
@Override public BitSet toBitSet() { BitSet b = new BitSet(size); for (int i = 0; i < size; i++) { if (unsafeGet(i)) { b.set(i); }//from w w w. j a va 2 s. c om } return b; }
From source file:com.opengamma.financial.analytics.model.volatility.cube.SABRNonLinearSwaptionVolatilityCubeFittingFunctionNew.java
private BitSet getFixedParameters(final ValueRequirement desiredSurface) { final BitSet fixed = new BitSet(4); final String useFixedAlpha = desiredSurface.getConstraint(PROPERTY_USE_FIXED_ALPHA); if (Boolean.parseBoolean(useFixedAlpha)) { fixed.set(0); }/*ww w. j ava 2 s . co m*/ final String useFixedBeta = desiredSurface.getConstraint(PROPERTY_USE_FIXED_BETA); if (Boolean.parseBoolean(useFixedBeta)) { fixed.set(1); } final String useFixedRho = desiredSurface.getConstraint(PROPERTY_USE_FIXED_RHO); if (Boolean.parseBoolean(useFixedRho)) { fixed.set(2); } final String useFixedNu = desiredSurface.getConstraint(PROPERTY_USE_FIXED_NU); if (Boolean.parseBoolean(useFixedNu)) { fixed.set(3); } return fixed; }
From source file:com.netspective.commons.acl.AccessControlListTest.java
/** * This test makes sure that an ACL is read properly from a file containing just a single <access-control-list> * tag with the default name of "acl"// ww w. jav a 2 s . co m */ public void testSingleACLDataModelSchemaImportFromXmlValid() throws PermissionNotFoundException, DataModelException, InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException, IOException, RoleNotFoundException { AccessControlListsComponent aclc = (AccessControlListsComponent) XdmComponentFactory.get( AccessControlListsComponent.class, new Resource(AccessControlListTest.class, RESOURCE_NAME_ONE), XdmComponentFactory.XDMCOMPFLAGS_DEFAULT); // Verify _something_ was loaded... assertNotNull(aclc); // Verify exactly _one_ ACL was loaded... AccessControlListsManager aclm = aclc.getItems(); assertNotNull("Expected: AccessControlListsManager object, Found: null", aclm); AccessControlLists acls = aclm.getAccessControlLists(); Integer expectedNumACLs = new Integer(1); assertNotNull("Expected: AccessControlLists object, Found: null", acls); assertEquals("Expected: " + expectedNumACLs + " ACL, Found: " + acls.size(), expectedNumACLs.intValue(), acls.size()); // Verify the defaultAcl and the acl named "acl" are the same AccessControlList defaultAcl = aclm.getDefaultAccessControlList(); AccessControlList aclAcl = aclm.getAccessControlList("acl"); assertNotNull("Expected: Non-Null Default ACL, Found: null", defaultAcl); assertNotNull("Expected: Non-Null Default ACL, Found: null", aclAcl); assertEquals("Expected: ACL with name 'acl', Found: ACL with name " + defaultAcl.getName(), aclAcl, defaultAcl); // Verify number of permissions Map aclPermissions = defaultAcl.getPermissionsByName(); assertEquals("Expected: Total permissions = 7, Found: Total permissions = " + aclPermissions.size(), 7, aclPermissions.size()); // Verify number of roles Map aclRoles = defaultAcl.getRolesByName(); assertEquals("Expected: Total roles = 3, Found: Total roles = " + aclRoles.size(), 3, aclRoles.size()); // Verify the index of the /acl/role/normal-user permission is 2 Role normalUser = defaultAcl.getRole("/acl/role/normal-user"); assertEquals("Expected: Id for /acl/role/normal-user = 2, Found: " + normalUser.getId(), 2, normalUser.getId()); // Verify the set of permissions for /acl/role/normal-user are exactly what we expect BitSet normalUserPermissionSet = normalUser.getPermissions(); BitSet expectedPermissionSet = new BitSet(11); expectedPermissionSet.set(1); expectedPermissionSet.set(2); expectedPermissionSet.set(3); expectedPermissionSet.set(4); expectedPermissionSet.set(5); assertEquals("Expected: Permissions for /acl/role/normal-user = " + expectedPermissionSet + ", Found: " + normalUserPermissionSet, expectedPermissionSet, normalUserPermissionSet); aclc.printErrorsAndWarnings(); }