List of usage examples for java.util BitSet BitSet
private BitSet(long[] words)
From source file:com.tamingtext.util.SplitInput.java
/** Perform a split on the specified input file. Results will be written to files of the same name in the specified * training and test output directories. The {@link #validate()} method is called prior to executing the split. *//*from www .ja va 2 s . c o m*/ public void splitFile(Path inputFile) throws IOException { if (fs.getFileStatus(inputFile) == null) { throw new IOException(inputFile + " does not exist"); } else if (fs.getFileStatus(inputFile).isDir()) { throw new IOException(inputFile + " is a directory"); } validate(); Path testOutputFile = new Path(testOutputDirectory, inputFile.getName()); Path trainingOutputFile = new Path(trainingOutputDirectory, inputFile.getName()); int lineCount = countLines(fs, inputFile, charset); log.info("{} has {} lines", inputFile.getName(), lineCount); int testSplitStart = 0; int testSplitSize = this.testSplitSize; // don't modify state BitSet randomSel = null; if (testRandomSelectionPct > 0 || testRandomSelectionSize > 0) { testSplitSize = this.testRandomSelectionSize; if (testRandomSelectionPct > 0) { testSplitSize = Math.round(lineCount * (testRandomSelectionPct / 100.0f)); } log.info("{} test split size is {} based on random selection percentage {}", new Object[] { inputFile.getName(), testSplitSize, testRandomSelectionPct }); long[] ridx = new long[testSplitSize]; RandomSampler.sample(testSplitSize, lineCount - 1, testSplitSize, 0, ridx, 0, RandomUtils.getRandom()); randomSel = new BitSet(lineCount); for (long idx : ridx) { randomSel.set((int) idx + 1); } } else { if (testSplitPct > 0) { // calculate split size based on percentage testSplitSize = Math.round(lineCount * (testSplitPct / 100.0f)); log.info("{} test split size is {} based on percentage {}", new Object[] { inputFile.getName(), testSplitSize, testSplitPct }); } else { log.info("{} test split size is {}", inputFile.getName(), testSplitSize); } if (splitLocation > 0) { // calculate start of split based on percentage testSplitStart = Math.round(lineCount * (splitLocation / 100.0f)); if (lineCount - testSplitStart < testSplitSize) { // adjust split start downwards based on split size. testSplitStart = lineCount - testSplitSize; } log.info("{} test split start is {} based on split location {}", new Object[] { inputFile.getName(), testSplitStart, splitLocation }); } if (testSplitStart < 0) { throw new IllegalArgumentException( "test split size for " + inputFile + " is too large, it would produce an " + "empty training set from the initial set of " + lineCount + " examples"); } else if ((lineCount - testSplitSize) < testSplitSize) { log.warn( "Test set size for {} may be too large, {} is larger than the number of " + "lines remaining in the training set: {}", new Object[] { inputFile, testSplitSize, lineCount - testSplitSize }); } } BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(inputFile), charset)); Writer trainingWriter = new OutputStreamWriter(fs.create(trainingOutputFile), charset); Writer testWriter = new OutputStreamWriter(fs.create(testOutputFile), charset); int pos = 0; int trainCount = 0; int testCount = 0; String line; while ((line = reader.readLine()) != null) { pos++; Writer writer; if (testRandomSelectionPct > 0) { // Randomly choose writer = randomSel.get(pos) ? testWriter : trainingWriter; } else { // Choose based on location writer = pos > testSplitStart ? testWriter : trainingWriter; } if (writer == testWriter) { if (testCount >= testSplitSize) { writer = trainingWriter; } else { testCount++; } } if (writer == trainingWriter) { trainCount++; } writer.write(line); writer.write('\n'); } IOUtils.close(Collections.singleton(trainingWriter)); IOUtils.close(Collections.singleton(testWriter)); log.info("file: {}, input: {} train: {}, test: {} starting at {}", new Object[] { inputFile.getName(), lineCount, trainCount, testCount, testSplitStart }); // testing; if (callback != null) { callback.splitComplete(inputFile, lineCount, trainCount, testCount, testSplitStart); } }
From source file:org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.PORollupSampling.java
protected Result processPlan() throws ExecException { if (schema != null && tupleMaker == null) { // Note here that if SchemaTuple is currently turned on, then any UDF's in the chain // must follow good practices. Namely, they should not append to the Tuple that comes // out of an iterator (a practice which is fairly common, but is not recommended). tupleMaker = SchemaTupleFactory.getInstance(schema, false, GenContext.FOREACH); if (tupleMaker != null) { knownSize = true;//from w w w . j a va2s . c o m } } if (tupleMaker == null) { tupleMaker = TupleFactory.getInstance(); } Result res = new Result(); //We check if all the databags have exhausted the tuples. If so we enforce the reading of new data by setting data and its to null if (its != null) { boolean restartIts = true; for (int i = 0; i < noItems; ++i) { if (its[i] != null && isToBeFlattenedArray[i] == true) { restartIts &= !its[i].hasNext(); } } //this means that all the databags have reached their last elements. so we need to force reading of fresh databags if (restartIts) { its = null; data = null; } } if (its == null) { //getNext being called for the first time OR starting with a set of new data from inputs its = new Iterator[noItems]; bags = new Object[noItems]; earlyTermination = new BitSet(noItems); for (int i = 0; i < noItems; ++i) { //Getting the iterators //populate the input data Result inputData = null; switch (resultTypes[i]) { case DataType.BAG: case DataType.TUPLE: case DataType.BYTEARRAY: case DataType.MAP: case DataType.BOOLEAN: case DataType.INTEGER: case DataType.DOUBLE: case DataType.LONG: case DataType.FLOAT: case DataType.BIGINTEGER: case DataType.BIGDECIMAL: case DataType.DATETIME: case DataType.CHARARRAY: inputData = planLeafOps[i].getNext(resultTypes[i]); break; default: { int errCode = 2080; String msg = "Foreach currently does not handle type " + DataType.findTypeName(resultTypes[i]); throw new ExecException(msg, errCode, PigException.BUG); } } //we accrue information about what accumulators have early terminated //in the case that they all do, we can finish if (inputData.returnStatus == POStatus.STATUS_EARLY_TERMINATION) { if (!earlyTermination.get(i)) earlyTermination.set(i); continue; } if (inputData.returnStatus == POStatus.STATUS_BATCH_OK) { continue; } if (inputData.returnStatus == POStatus.STATUS_EOP) { //we are done with all the elements. Time to return. its = null; bags = null; return inputData; } // if we see a error just return it if (inputData.returnStatus == POStatus.STATUS_ERR) { return inputData; } // Object input = null; bags[i] = inputData.result; if (inputData.result instanceof DataBag && isToBeFlattenedArray[i]) { its[i] = ((DataBag) bags[i]).iterator(); } else { its[i] = null; } } } // if accumulating, we haven't got data yet for some fields, just return if (isAccumulative() && isAccumStarted()) { if (earlyTermination.cardinality() < noItems) { res.returnStatus = POStatus.STATUS_BATCH_OK; } else { res.returnStatus = POStatus.STATUS_EARLY_TERMINATION; } return res; } while (true) { if (data == null) { //getNext being called for the first time or starting on new input data //we instantiate the template array and start populating it with data data = new Object[noItems]; for (int i = 0; i < noItems; ++i) { if (isToBeFlattenedArray[i] && bags[i] instanceof DataBag) { if (its[i].hasNext()) { data[i] = its[i].next(); } else { //the input set is null, so we return. This is // caught above and this function recalled with // new inputs. its = null; data = null; res.returnStatus = POStatus.STATUS_NULL; return res; } } else { data[i] = bags[i]; } } if (getReporter() != null) { getReporter().progress(); } //createTuple(data); res.result = createTuple(data); res.returnStatus = POStatus.STATUS_OK; return res; } else { //we try to find the last expression which needs flattening and start iterating over it //we also try to update the template array for (int index = noItems - 1; index >= 0; --index) { if (its[index] != null && isToBeFlattenedArray[index]) { if (its[index].hasNext()) { data[index] = its[index].next(); res.result = createTuple(data); res.returnStatus = POStatus.STATUS_OK; return res; } else { // reset this index's iterator so cross product can be achieved // we would be resetting this way only for the indexes from the end // when the first index which needs to be flattened has reached the // last element in its iterator, we won't come here - instead, we reset // all iterators at the beginning of this method. its[index] = ((DataBag) bags[index]).iterator(); data[index] = its[index].next(); } } } } } //return null; }
From source file:com.bittorrent.mpetazzoni.client.SharedTorrent.java
/** * Return a copy of the bit field of available pieces for this torrent. * * <p>// w ww . j ava2s . c o m * Available pieces are pieces available in the swarm, and it does not * include our own pieces. * </p> */ public BitSet getAvailablePieces() { if (!this.isInitialized()) { throw new IllegalStateException("Torrent not yet initialized!"); } BitSet availablePieces = new BitSet(this.pieces.length); synchronized (this.pieces) { for (Piece piece : this.pieces) { if (piece.available()) { availablePieces.set(piece.getIndex()); } } } return availablePieces; }
From source file:com.netease.news.utils.SplitInput.java
/** * Perform a split on the specified input file. Results will be written to files of the same name in the specified * training and test output directories. The {@link #validate()} method is called prior to executing the split. *//*from w ww . ja v a2 s.co m*/ public void splitFile(Path inputFile) throws IOException { Configuration conf = getConf(); FileSystem fs = inputFile.getFileSystem(conf); if (fs.getFileStatus(inputFile) == null) { throw new IOException(inputFile + " does not exist"); } if (fs.getFileStatus(inputFile).isDir()) { throw new IOException(inputFile + " is a directory"); } validate(); Path testOutputFile = new Path(testOutputDirectory, inputFile.getName()); Path trainingOutputFile = new Path(trainingOutputDirectory, inputFile.getName()); int lineCount = countLines(fs, inputFile, charset); log.info("{} has {} lines", inputFile.getName(), lineCount); int testSplitStart = 0; int testSplitSize = this.testSplitSize; // don't modify state BitSet randomSel = null; if (testRandomSelectionPct > 0 || testRandomSelectionSize > 0) { testSplitSize = this.testRandomSelectionSize; if (testRandomSelectionPct > 0) { testSplitSize = Math.round(lineCount * testRandomSelectionPct / 100.0f); } log.info("{} test split size is {} based on random selection percentage {}", inputFile.getName(), testSplitSize, testRandomSelectionPct); long[] ridx = new long[testSplitSize]; RandomSampler.sample(testSplitSize, lineCount - 1, testSplitSize, 0, ridx, 0, RandomUtils.getRandom()); randomSel = new BitSet(lineCount); for (long idx : ridx) { randomSel.set((int) idx + 1); } } else { if (testSplitPct > 0) { // calculate split size based on percentage testSplitSize = Math.round(lineCount * testSplitPct / 100.0f); log.info("{} test split size is {} based on percentage {}", inputFile.getName(), testSplitSize, testSplitPct); } else { log.info("{} test split size is {}", inputFile.getName(), testSplitSize); } if (splitLocation > 0) { // calculate start of split based on percentage testSplitStart = Math.round(lineCount * splitLocation / 100.0f); if (lineCount - testSplitStart < testSplitSize) { // adjust split start downwards based on split size. testSplitStart = lineCount - testSplitSize; } log.info("{} test split start is {} based on split location {}", inputFile.getName(), testSplitStart, splitLocation); } if (testSplitStart < 0) { throw new IllegalArgumentException( "test split size for " + inputFile + " is too large, it would produce an " + "empty training set from the initial set of " + lineCount + " examples"); } else if (lineCount - testSplitSize < testSplitSize) { log.warn( "Test set size for {} may be too large, {} is larger than the number of " + "lines remaining in the training set: {}", inputFile, testSplitSize, lineCount - testSplitSize); } } int trainCount = 0; int testCount = 0; if (!useSequence) { BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(inputFile), charset)); Writer trainingWriter = new OutputStreamWriter(fs.create(trainingOutputFile), charset); Writer testWriter = new OutputStreamWriter(fs.create(testOutputFile), charset); try { String line; int pos = 0; while ((line = reader.readLine()) != null) { pos++; Writer writer; if (testRandomSelectionPct > 0) { // Randomly choose writer = randomSel.get(pos) ? testWriter : trainingWriter; } else { // Choose based on location writer = pos > testSplitStart ? testWriter : trainingWriter; } if (writer == testWriter) { if (testCount >= testSplitSize) { writer = trainingWriter; } else { testCount++; } } if (writer == trainingWriter) { trainCount++; } writer.write(line); writer.write('\n'); } } finally { Closeables.close(reader, true); Closeables.close(trainingWriter, false); Closeables.close(testWriter, false); } } else { SequenceFileIterator<Writable, Writable> iterator = new SequenceFileIterator<Writable, Writable>( inputFile, false, fs.getConf()); SequenceFile.Writer trainingWriter = SequenceFile.createWriter(fs, fs.getConf(), trainingOutputFile, iterator.getKeyClass(), iterator.getValueClass()); SequenceFile.Writer testWriter = SequenceFile.createWriter(fs, fs.getConf(), testOutputFile, iterator.getKeyClass(), iterator.getValueClass()); try { int pos = 0; while (iterator.hasNext()) { pos++; SequenceFile.Writer writer; if (testRandomSelectionPct > 0) { // Randomly choose writer = randomSel.get(pos) ? testWriter : trainingWriter; } else { // Choose based on location writer = pos > testSplitStart ? testWriter : trainingWriter; } if (writer == testWriter) { if (testCount >= testSplitSize) { writer = trainingWriter; } else { testCount++; } } if (writer == trainingWriter) { trainCount++; } Pair<Writable, Writable> pair = iterator.next(); writer.append(pair.getFirst(), pair.getSecond()); } } finally { Closeables.close(iterator, true); Closeables.close(trainingWriter, false); Closeables.close(testWriter, false); } } log.info("file: {}, input: {} train: {}, test: {} starting at {}", inputFile.getName(), lineCount, trainCount, testCount, testSplitStart); // testing; if (callback != null) { callback.splitComplete(inputFile, lineCount, trainCount, testCount, testSplitStart); } }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testSlowConsumerWithException() throws Exception { logger.info("Start " + this.testName.getMethodName()); Map<String, Object> props = KafkaTestUtils.consumerProps("slow3", "false", embeddedKafka); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props); ContainerProperties containerProps = new ContainerProperties(topic3); final CountDownLatch latch = new CountDownLatch(18); final BitSet bitSet = new BitSet(6); final Map<String, AtomicInteger> faults = new HashMap<>(); RetryingMessageListenerAdapter<Integer, String> adapter = new RetryingMessageListenerAdapter<>( new MessageListener<Integer, String>() { @Override//from ww w . j av a 2 s . com public void onMessage(ConsumerRecord<Integer, String> message) { logger.info("slow3: " + message); bitSet.set((int) (message.partition() * 3 + message.offset())); String key = message.topic() + message.partition() + message.offset(); if (faults.get(key) == null) { faults.put(key, new AtomicInteger(1)); } else { faults.get(key).incrementAndGet(); } latch.countDown(); // 3 per = 18 if (faults.get(key).get() < 3) { // succeed on the third attempt throw new FooEx(); } } }, buildRetry(), null); containerProps.setMessageListener(adapter); containerProps.setPauseAfter(100); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testSlow3"); container.start(); Consumer<?, ?> consumer = spyOnConsumer(container); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic3); template.sendDefault(0, "foo"); template.sendDefault(2, "bar"); template.sendDefault(0, "baz"); template.sendDefault(2, "qux"); template.flush(); Thread.sleep(300); template.sendDefault(0, "fiz"); template.sendDefault(2, "buz"); template.flush(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(bitSet.cardinality()).isEqualTo(6); verify(consumer, atLeastOnce()).pause(anyObject()); verify(consumer, atLeastOnce()).resume(anyObject()); container.stop(); logger.info("Stop " + this.testName.getMethodName()); }
From source file:android.support.v7.widget.StaggeredGridLayoutManager.java
/** * Sets the number of spans for the layout. This will invalidate all of the span assignments * for Views./* w ww.ja v a2 s.c om*/ * <p> * Calling this method will automatically result in a new layout request unless the spanCount * parameter is equal to current span count. * * @param spanCount Number of spans to layout */ public void setSpanCount(int spanCount) { assertNotInLayoutOrScroll(null); if (spanCount != mSpanCount) { invalidateSpanAssignments(); mSpanCount = spanCount; mRemainingSpans = new BitSet(mSpanCount); mSpans = new Span[mSpanCount]; for (int i = 0; i < mSpanCount; i++) { mSpans[i] = new Span(i); } requestLayout(); } }
From source file:org.wso2.andes.kernel.router.TopicRoutingMatcher.java
/** * This methods adds a constituent table with only null and other constituents. * This is required when a message comes with more than the available number of constituents. If wildcard * queues are available for those, they should match. Hence need to create these empty constituent tables. */// w ww . j a v a2 s.c o m private void addEmptyConstituentTable() { int noOfqueues = storageQueueList.size(); Map<String, BitSet> constituentTable = new HashMap<>(); BitSet nullBitSet = new BitSet(noOfqueues); BitSet otherBitSet = new BitSet(noOfqueues); if (noOfqueues > 0) { // Null constituent will always be true for empty constituents, hence need to flip nullBitSet.flip(0, noOfqueues - 1); for (int queueIndex = 0; queueIndex < noOfqueues; queueIndex++) { // For 'other', if subscribers last constituent is multi level wild card then matching String[] allConstituent = queueConstituents.get(queueIndex); String lastConstituent = allConstituent[allConstituent.length - 1]; if (multiLevelWildCard.equals(lastConstituent)) { otherBitSet.set(queueIndex); } else { otherBitSet.set(queueIndex, false); } } } constituentTable.put(NULL_CONSTITUENT, nullBitSet); constituentTable.put(OTHER_CONSTITUENT, otherBitSet); constituentTables.add(constituentTable); }
From source file:com.netspective.commons.acl.AccessControlListTest.java
/** * This test makes sure that an ACL is read properly from a file containing multiple <access-control-list> * tags with one having the default name of "acl" and the rest having other names. *///from w ww. j ava2s. c o m public void testMultipleACLWithDefaultDataModelSchemaImportFromXmlValid() throws RoleNotFoundException, PermissionNotFoundException, DataModelException, InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException, IOException { AccessControlListsComponent aclc = (AccessControlListsComponent) XdmComponentFactory.get( AccessControlListsComponent.class, new Resource(AccessControlListTest.class, RESOURCE_NAME_THREE), XdmComponentFactory.XDMCOMPFLAGS_DEFAULT); // Verify _something_ was loaded... assertNotNull(aclc); // Verify exactly _one_ ACL was loaded... AccessControlListsManager aclm = aclc.getItems(); assertNotNull("Expected: AccessControlListsManager object, Found: null", aclm); AccessControlLists acls = aclm.getAccessControlLists(); Integer expectedNumACLs = new Integer(3); assertNotNull("Expected: AccessControlLists object, Found: null", acls); assertEquals("Expected: " + expectedNumACLs + " ACLs, Found: " + acls.size(), expectedNumACLs.intValue(), acls.size()); // Verify basic statistics about the ACLS object assertEquals(17, acls.getHighestPermissionId()); assertEquals(8, acls.getHighestRoleId()); assertEquals(17, acls.getNextPermissionId()); assertEquals(8, acls.getNextRoleId()); assertEquals(17, acls.permissionsCount()); assertEquals(8, acls.rolesCount()); // Verify the defaultAcl and the acl named "acl" are the same AccessControlList defaultAcl = aclm.getDefaultAccessControlList(); AccessControlList aclAcl = aclm.getAccessControlList(AccessControlList.ACLNAME_DEFAULT); AccessControlList aclTwoAcl = aclm.getAccessControlList(AccessControlList.ACLNAME_DEFAULT + "_two"); AccessControlList aclThreeAcl = aclm.getAccessControlList(AccessControlList.ACLNAME_DEFAULT + "_three"); assertNotNull("Expected: Non-Null ACL named " + AccessControlList.ACLNAME_DEFAULT + ", Found: null", defaultAcl); assertNotNull("Expected: Non-Null ACL named " + AccessControlList.ACLNAME_DEFAULT + ", Found: null", aclAcl); assertNotNull("Expected: Non-Null ACL named " + AccessControlList.ACLNAME_DEFAULT + "_two, Found: null", aclTwoAcl); assertNotNull("Expected: Non-Null ACL named " + AccessControlList.ACLNAME_DEFAULT + "_three, Found: null", aclThreeAcl); assertEquals("Expected: ACL with name 'acl', Found: ACL with name " + defaultAcl.getName(), aclAcl, defaultAcl); assertEquals("Expected: ACL with name 'acl', Found: ACL with name " + aclAcl.getName(), AccessControlList.ACLNAME_DEFAULT, aclAcl.getName()); assertEquals("Expected: ACL with name 'acl_two', Found: ACL with name " + aclTwoAcl.getName(), AccessControlList.ACLNAME_DEFAULT + "_two", aclTwoAcl.getName()); assertEquals("Expected: ACL with name 'acl_three', Found: ACL with name " + aclThreeAcl.getName(), AccessControlList.ACLNAME_DEFAULT + "_three", aclThreeAcl.getName()); // Verify the number of permissions loaded for each ACL Map aclAclPermissions = aclAcl.getPermissionsByName(); Map aclTwoAclPermissions = aclTwoAcl.getPermissionsByName(); Map aclThreeAclPermissions = aclThreeAcl.getPermissionsByName(); assertEquals("Expected: Total permissions = 7, Found: Total permissions = " + aclAclPermissions.size(), 7, aclAclPermissions.size()); assertEquals("Expected: Total permissions = 5, Found: Total permissions = " + aclTwoAclPermissions.size(), 5, aclTwoAclPermissions.size()); assertEquals("Expected: Total permissions = 5, Found: Total permissions = " + aclThreeAclPermissions.size(), 5, aclThreeAclPermissions.size()); // Verify the number of roles loaded for each ACL Map aclAclRoles = aclAcl.getRolesByName(); Map aclTwoAclRoles = aclTwoAcl.getRolesByName(); Map aclThreeAclRoles = aclThreeAcl.getRolesByName(); assertEquals(3, aclAclRoles.size()); assertEquals(3, aclTwoAclRoles.size()); assertEquals(2, aclThreeAclRoles.size()); // Verify the index of the /acl/role/normal-user permission is 10 Role aclNormalUser = aclAcl.getRole("/acl/role/normal-user"); Role aclTwoNormalUser = aclTwoAcl.getRole("/acl_two/role/normal-user"); Role aclThreeReadOnlyUser = aclThreeAcl.getRole("/acl_three/role/read-only-user"); assertEquals("Expected: Id for /acl/role/normal-user = 2, Found: " + aclNormalUser.getId(), 2, aclNormalUser.getId()); assertEquals("Expected: Id for /acl_two/role/normal-user = 5, Found: " + aclTwoNormalUser.getId(), 5, aclTwoNormalUser.getId()); assertEquals("Expected: Id for /acl_three/role/read-only-user = 7, Found: " + aclThreeReadOnlyUser.getId(), 7, aclThreeReadOnlyUser.getId()); //TODO: Fix Role so that the bit corresponding to the role's Id is not set in the role's permissions BitSet // Verify the set of permissions for /acl/role/normal-user are exactly what we expect BitSet aclNormalUserPermissionSet = aclNormalUser.getPermissions(); BitSet aclExpectedPermissionSet = new BitSet(aclAcl.getHighestPermissionId()); aclExpectedPermissionSet.set(1); aclExpectedPermissionSet.set(2); aclExpectedPermissionSet.set(3); aclExpectedPermissionSet.set(4); aclExpectedPermissionSet.set(5); //TODO: Fix this after Role's have been fixed // assertEquals("Expected: Permissions for /acl/role/normal-user = " + aclExpectedPermissionSet + ", Found: " + aclNormalUserPermissionSet, aclExpectedPermissionSet, aclNormalUserPermissionSet); // Verify the set of permissions for /acl_two/role/normal-user are exactly what we expect BitSet aclTwoNormalUserPermissionSet = aclTwoNormalUser.getPermissions(); BitSet aclTwoExpectedPermissionSet = new BitSet(aclTwoAcl.getHighestPermissionId()); aclTwoExpectedPermissionSet.set(8); aclTwoExpectedPermissionSet.set(9); aclTwoExpectedPermissionSet.set(10); aclTwoExpectedPermissionSet.set(11); //TODO: Fix this after Role's have been fixed // System.out.println("\n/acl_two/role/normal-user(" + aclTwoNormalUser.getId() + "): " + aclTwoNormalUserPermissionSet + "\n"); // assertEquals("Expected: Permissions for /acl_two/role/normal-user = " + aclTwoExpectedPermissionSet + ", Found: " + aclTwoNormalUserPermissionSet, aclTwoExpectedPermissionSet, aclTwoNormalUserPermissionSet); // Verify the set of permissions for /acl_three/role/read-only-user are exactly what we expect BitSet aclThreeReadOnlyUserPermissionSet = aclThreeReadOnlyUser.getPermissions(); BitSet aclThreeExpectedPermissionSet = new BitSet(aclThreeAcl.getHighestPermissionId()); aclThreeExpectedPermissionSet.set(12); aclThreeExpectedPermissionSet.set(13); aclThreeExpectedPermissionSet.set(15); //TODO: Fix this after Roles have been fixed // assertEquals("Expected: Permissions for /acl_three/role/read-only-user = " + aclThreeExpectedPermissionSet + ", Found: " + aclThreeReadOnlyUserPermissionSet, aclThreeExpectedPermissionSet, aclThreeReadOnlyUserPermissionSet); aclc.printErrorsAndWarnings(); }
From source file:org.springframework.kafka.listener.ConcurrentMessageListenerContainerTests.java
@Test public void testManualCommitSyncExisting() throws Exception { this.logger.info("Start MANUAL_IMMEDIATE with Existing"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<Integer, String>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic8);/*from w w w . ja v a 2 s . com*/ template.sendDefault(0, "foo"); template.sendDefault(2, "bar"); template.sendDefault(0, "baz"); template.sendDefault(2, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("testManualExistingSync", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props); ContainerProperties containerProps = new ContainerProperties(topic8); containerProps.setSyncCommits(true); final CountDownLatch latch = new CountDownLatch(8); final BitSet bitSet = new BitSet(8); containerProps.setMessageListener((AcknowledgingMessageListener<Integer, String>) (message, ack) -> { ConcurrentMessageListenerContainerTests.this.logger.info("manualExisting: " + message); ack.acknowledge(); bitSet.set((int) (message.partition() * 4 + message.offset())); latch.countDown(); }); containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE); ConcurrentMessageListenerContainer<Integer, String> container = new ConcurrentMessageListenerContainer<>(cf, containerProps); container.setConcurrency(1); container.setBeanName("testManualExisting"); container.start(); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); template.sendDefault(0, "fooo"); template.sendDefault(2, "barr"); template.sendDefault(0, "bazz"); template.sendDefault(2, "quxx"); template.flush(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(bitSet.cardinality()).isEqualTo(8); container.stop(); this.logger.info("Stop MANUAL_IMMEDIATE with Existing"); }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
private void writeFileEmptyStreams(final DataOutput header) throws IOException { boolean hasEmptyStreams = false; for (final SevenZArchiveEntry entry : files) { if (!entry.hasStream()) { hasEmptyStreams = true;/* www.jav a2s .c o m*/ break; } } if (hasEmptyStreams) { header.write(NID.kEmptyStream); final BitSet emptyStreams = new BitSet(files.size()); for (int i = 0; i < files.size(); i++) { emptyStreams.set(i, !files.get(i).hasStream()); } final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final DataOutputStream out = new DataOutputStream(baos); writeBits(out, emptyStreams, files.size()); out.flush(); final byte[] contents = baos.toByteArray(); writeUint64(header, contents.length); header.write(contents); } }