List of usage examples for java.util BitSet get
public boolean get(int bitIndex)
From source file:JavaTron.AudioTron.java
public void setAlarm(int index, boolean enabled, boolean softEnabled, int hour, int min, boolean am, BitSet days, int volume) { if (index < 0 || index > 1) { return;//www. j a va 2s . co m } Vector commandArgs = new Vector(); commandArgs.add("alarmenable"); commandArgs.add(enabled ? "ON" : "OFF"); commandArgs.add("alarmindex"); commandArgs.add(new Integer(index).toString()); if (enabled) { commandArgs.add("softenable"); commandArgs.add(softEnabled ? "ON" : "OFF"); commandArgs.add("alarmhour"); commandArgs.add(new Integer(hour).toString()); commandArgs.add("alarmmin"); commandArgs.add(new Integer(min).toString()); commandArgs.add("ampmctrl"); commandArgs.add(am ? "1" : "2"); if (days.get(0)) { commandArgs.add("alarmmon"); commandArgs.add("ON"); } if (days.get(1)) { commandArgs.add("alarmtue"); commandArgs.add("ON"); } if (days.get(2)) { commandArgs.add("alarmwed"); commandArgs.add("ON"); } if (days.get(3)) { commandArgs.add("alarmthu"); commandArgs.add("ON"); } if (days.get(4)) { commandArgs.add("alarmfri"); commandArgs.add("ON"); } if (days.get(5)) { commandArgs.add("alarmsat"); commandArgs.add("ON"); } if (days.get(6)) { commandArgs.add("alarmsun"); commandArgs.add("ON"); } commandArgs.add("volumectrl"); commandArgs.add(new Integer(volume).toString()); } GetCommand command = new GetCommand("/goform/webSetAlarmForm", commandArgs, null, ALARM); addCommand(command, 0); }
From source file:com.bittorrent.mpetazzoni.client.SharedTorrent.java
/** * Peer ready handler.//from ww w.j a v a2s . c om * * <p> * When a peer becomes ready to accept piece block requests, select a piece * to download and go for it. * </p> * * @param peer The peer that became ready. */ @Override public synchronized void handlePeerReady(SharingPeer peer) { BitSet interesting = peer.getAvailablePieces(); interesting.andNot(this.completedPieces); interesting.andNot(this.requestedPieces); logger.trace("Peer {} is ready and has {} interesting piece(s).", peer, interesting.cardinality()); // If we didn't find interesting pieces, we need to check if we're in // an end-game situation. If yes, we request an already requested piece // to try to speed up the end. if (interesting.cardinality() == 0) { interesting = peer.getAvailablePieces(); interesting.andNot(this.completedPieces); if (interesting.cardinality() == 0) { logger.trace("No interesting piece from {}!", peer); return; } if (this.completedPieces.cardinality() < ENG_GAME_COMPLETION_RATIO * this.pieces.length) { logger.trace("Not far along enough to warrant end-game mode."); return; } logger.trace("Possible end-game, we're about to request a piece " + "that was already requested from another peer."); } // Extract the RAREST_PIECE_JITTER rarest pieces from the interesting // pieces of this peer. ArrayList<Piece> choice = new ArrayList<Piece>(RAREST_PIECE_JITTER); synchronized (this.rarest) { for (Piece piece : this.rarest) { if (interesting.get(piece.getIndex())) { choice.add(piece); if (choice.size() >= RAREST_PIECE_JITTER) { break; } } } } Piece chosen = choice.get(this.random.nextInt(Math.min(choice.size(), RAREST_PIECE_JITTER))); this.requestedPieces.set(chosen.getIndex()); logger.trace("Requesting {} from {}, we now have {} " + "outstanding request(s): {}", new Object[] { chosen, peer, this.requestedPieces.cardinality(), this.requestedPieces }); peer.downloadPiece(chosen); }
From source file:org.apache.carbondata.core.scan.filter.FilterUtil.java
/** * Below method will used to get encoded filter values for range filter values * when local dictionary is present in blocklet for columns * If number of include filter is more than 60% of total dictionary size it will * convert include to exclude//from w w w .j ava2 s . com * @param includeDictValues * include filter values * @param carbonDictionary * dictionary * @param useExclude * to check if using exclude will be more optimized * @return encoded filter values */ private static byte[][] getEncodedFilterValuesForRange(BitSet includeDictValues, CarbonDictionary carbonDictionary, boolean useExclude) { KeyGenerator keyGenerator = KeyGeneratorFactory .getKeyGenerator(new int[] { CarbonCommonConstants.LOCAL_DICTIONARY_MAX }); List<byte[]> encodedFilterValues = new ArrayList<>(); int[] dummy = new int[1]; if (!useExclude) { try { for (int i = includeDictValues.nextSetBit(0); i >= 0; i = includeDictValues.nextSetBit(i + 1)) { dummy[0] = i; encodedFilterValues.add(keyGenerator.generateKey(dummy)); } } catch (KeyGenException e) { LOGGER.error(e.getMessage(), e); } return encodedFilterValues.toArray(new byte[encodedFilterValues.size()][]); } else { try { for (int i = 1; i < carbonDictionary.getDictionarySize(); i++) { if (!includeDictValues.get(i) && null != carbonDictionary.getDictionaryValue(i)) { dummy[0] = i; encodedFilterValues.add(keyGenerator.generateKey(dummy)); } } } catch (KeyGenException e) { LOGGER.error(e.getMessage(), e); } } return getSortedEncodedFilters(encodedFilterValues); }
From source file:jetbrains.buildServer.clouds.azure.asm.connector.AzureApiConnector.java
private int getPortNumber(final String serviceName, final HostedServiceGetDetailedResponse.Deployment deployment) { final BitSet busyPorts = new BitSet(); busyPorts.set(MIN_PORT_NUMBER, MAX_PORT_NUMBER); for (RoleInstance instance : deployment.getRoleInstances()) { for (InstanceEndpoint endpoint : instance.getInstanceEndpoints()) { final int port = endpoint.getPort(); if (port >= MIN_PORT_NUMBER && port <= MAX_PORT_NUMBER) { busyPorts.set(port, false); }/*from w w w . j av a 2 s. c o m*/ } } for (Role role : deployment.getRoles()) { for (ConfigurationSet conf : role.getConfigurationSets()) { for (InputEndpoint endpoint : conf.getInputEndpoints()) { final int port = endpoint.getPort(); if (port >= MIN_PORT_NUMBER && port <= MAX_PORT_NUMBER) { busyPorts.set(port, false); } } } } final Map<String, Integer> map = DEPLOYMENT_OPS.get(serviceName); if (map != null) { final Iterator<String> iter = map.keySet().iterator(); while (iter.hasNext()) { final String operationId = iter.next(); if (isActionFinished(operationId)) { iter.remove(); } else { busyPorts.set(map.get(operationId), false); } } } int portNumber = MIN_PORT_NUMBER; for (int i = MIN_PORT_NUMBER; i <= MAX_PORT_NUMBER; i++) { if (busyPorts.get(i)) { portNumber = i; break; } } return portNumber; }
From source file:org.apache.hadoop.mapreduce.lib.input.TestMRKeyValueTextInputFormat.java
@Test public void testFormat() throws Exception { Job job = Job.getInstance(new Configuration(defaultConf)); Path file = new Path(workDir, "test.txt"); int seed = new Random().nextInt(); LOG.info("seed = " + seed); Random random = new Random(seed); localFs.delete(workDir, true);// w w w. jav a 2s . c o m FileInputFormat.setInputPaths(job, workDir); final int MAX_LENGTH = 10000; // for a variety of lengths for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) { LOG.debug("creating; entries = " + length); // create a file with length entries Writer writer = new OutputStreamWriter(localFs.create(file)); try { for (int i = 0; i < length; i++) { writer.write(Integer.toString(i * 2)); writer.write("\t"); writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } // try splitting the file in a variety of sizes KeyValueTextInputFormat format = new KeyValueTextInputFormat(); for (int i = 0; i < 3; i++) { int numSplits = random.nextInt(MAX_LENGTH / 20) + 1; LOG.debug("splitting: requesting = " + numSplits); List<InputSplit> splits = format.getSplits(job); LOG.debug("splitting: got = " + splits.size()); // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.size(); j++) { LOG.debug("split[" + j + "]= " + splits.get(j)); TaskAttemptContext context = MapReduceTestUtil .createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader<Text, Text> reader = format.createRecordReader(splits.get(j), context); Class<?> clazz = reader.getClass(); assertEquals("reader class is KeyValueLineRecordReader.", KeyValueLineRecordReader.class, clazz); MapContext<Text, Text, Text, Text> mcontext = new MapContextImpl<Text, Text, Text, Text>( job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(j)); reader.initialize(splits.get(j), mcontext); Text key = null; Text value = null; try { int count = 0; while (reader.nextKeyValue()) { key = reader.getCurrentKey(); clazz = key.getClass(); assertEquals("Key class is Text.", Text.class, clazz); value = reader.getCurrentValue(); clazz = value.getClass(); assertEquals("Value class is Text.", Text.class, clazz); final int k = Integer.parseInt(key.toString()); final int v = Integer.parseInt(value.toString()); assertEquals("Bad key", 0, k % 2); assertEquals("Mismatched key/value", k / 2, v); LOG.debug("read " + v); assertFalse("Key in multiple partitions.", bits.get(v)); bits.set(v); count++; } LOG.debug("splits[" + j + "]=" + splits.get(j) + " count=" + count); } finally { reader.close(); } } assertEquals("Some keys in no partition.", length, bits.cardinality()); } } }
From source file:org.apache.kylin.cube.model.CubeDesc.java
private void initMeasureReferenceToColumnFamily() { if (measures == null || measures.size() == 0) return;/*from w w w . j a va 2s . c o m*/ Map<String, MeasureDesc> measureLookup = new HashMap<String, MeasureDesc>(); for (MeasureDesc m : measures) measureLookup.put(m.getName(), m); Map<String, Integer> measureIndexLookup = new HashMap<String, Integer>(); for (int i = 0; i < measures.size(); i++) measureIndexLookup.put(measures.get(i).getName(), i); BitSet checkEachMeasureExist = new BitSet(); for (HBaseColumnFamilyDesc cf : getHbaseMapping().getColumnFamily()) { for (HBaseColumnDesc c : cf.getColumns()) { String[] colMeasureRefs = c.getMeasureRefs(); MeasureDesc[] measureDescs = new MeasureDesc[colMeasureRefs.length]; int[] measureIndex = new int[colMeasureRefs.length]; for (int i = 0; i < colMeasureRefs.length; i++) { measureDescs[i] = measureLookup.get(colMeasureRefs[i]); checkState(measureDescs[i] != null, "measure desc at (%s) is null", i); measureIndex[i] = measureIndexLookup.get(colMeasureRefs[i]); checkState(measureIndex[i] >= 0, "measure index at (%s) not positive", i); checkEachMeasureExist.set(measureIndex[i]); } c.setMeasures(measureDescs); c.setMeasureIndex(measureIndex); c.setColumnFamilyName(cf.getName()); } } for (int i = 0; i < measures.size(); i++) { checkState(checkEachMeasureExist.get(i), "measure (%s) does not exist in column familyor measure duplicates", measures.get(i)); } }
From source file:org.apache.tez.dag.app.rm.DagAwareYarnTaskScheduler.java
@GuardedBy("this") @Nullable// w ww. j a v a 2s. c om private TaskRequest tryAssignReuseContainerForPriority(HeldContainer hc, String matchLocation, Priority priority, BitSet allowedVertices) { List<? extends Collection<TaskRequest>> results = client.getMatchingRequests(priority, matchLocation, hc.getCapability()); if (results.isEmpty()) { return null; } for (Collection<TaskRequest> requests : results) { for (TaskRequest request : requests) { final int vertexIndex = request.getVertexIndex(); if (!allowedVertices.get(vertexIndex)) { LOG.debug("Not assigning task {} since it is a descendant of a pending vertex", request.getTask()); continue; } Object signature = hc.getSignature(); if (signature == null || signatureMatcher.isSuperSet(signature, request.getContainerSignature())) { if (!maybeChangeNode(request, hc.getContainer().getNodeId())) { assignContainer(request, hc, matchLocation); return request; } } } } return null; }
From source file:org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.java
@Override public VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) throws IOException { // If a super user issues a get/scan, he should be able to scan the cells // irrespective of the Visibility labels if (isReadFromSystemAuthUser()) { return new VisibilityExpEvaluator() { @Override//from w w w . j a va2 s . c om public boolean evaluate(Cell cell) throws IOException { return true; } }; } List<String> authLabels = null; for (ScanLabelGenerator scanLabelGenerator : scanLabelGenerators) { try { // null authorizations to be handled inside SLG impl. authLabels = scanLabelGenerator.getLabels(VisibilityUtils.getActiveUser(), authorizations); authLabels = (authLabels == null) ? new ArrayList<String>() : authLabels; authorizations = new Authorizations(authLabels); } catch (Throwable t) { LOG.error(t); throw new IOException(t); } } int labelsCount = this.labelsCache.getLabelsCount(); final BitSet bs = new BitSet(labelsCount + 1); // ordinal is index 1 based if (authLabels != null) { for (String authLabel : authLabels) { int labelOrdinal = this.labelsCache.getLabelOrdinal(authLabel); if (labelOrdinal != 0) { bs.set(labelOrdinal); } } } return new VisibilityExpEvaluator() { @Override public boolean evaluate(Cell cell) throws IOException { boolean visibilityTagPresent = false; // Save an object allocation where we can if (cell.getTagsLength() > 0) { Iterator<Tag> tagsItr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); while (tagsItr.hasNext()) { boolean includeKV = true; Tag tag = tagsItr.next(); if (tag.getType() == VISIBILITY_TAG_TYPE) { visibilityTagPresent = true; int offset = tag.getTagOffset(); int endOffset = offset + tag.getTagLength(); while (offset < endOffset) { Pair<Integer, Integer> result = StreamUtils.readRawVarint32(tag.getBuffer(), offset); int currLabelOrdinal = result.getFirst(); if (currLabelOrdinal < 0) { // check for the absence of this label in the Scan Auth labels // ie. to check BitSet corresponding bit is 0 int temp = -currLabelOrdinal; if (bs.get(temp)) { includeKV = false; break; } } else { if (!bs.get(currLabelOrdinal)) { includeKV = false; break; } } offset += result.getSecond(); } if (includeKV) { // We got one visibility expression getting evaluated to true. Good to include this // KV in the result then. return true; } } } } return !(visibilityTagPresent); } }; }
From source file:org.apache.tez.runtime.library.common.writers.TestUnorderedPartitionedKVWriter.java
public void textTest(int numRegularRecords, int numPartitions, long availableMemory, int numLargeKeys, int numLargevalues, int numLargeKvPairs) throws IOException, InterruptedException { Partitioner partitioner = new HashPartitioner(); ApplicationId appId = ApplicationId.newInstance(10000, 1); TezCounters counters = new TezCounters(); String uniqueId = UUID.randomUUID().toString(); OutputContext outputContext = createMockOutputContext(counters, appId, uniqueId); Random random = new Random(); Configuration conf = createConfiguration(outputContext, Text.class, Text.class, shouldCompress, -1, HashPartitioner.class); CompressionCodec codec = null;// w w w.j a va 2s. c om if (shouldCompress) { codec = new DefaultCodec(); ((Configurable) codec).setConf(conf); } int numRecordsWritten = 0; Map<Integer, Multimap<String, String>> expectedValues = new HashMap<Integer, Multimap<String, String>>(); for (int i = 0; i < numPartitions; i++) { expectedValues.put(i, LinkedListMultimap.<String, String>create()); } UnorderedPartitionedKVWriter kvWriter = new UnorderedPartitionedKVWriterForTest(outputContext, conf, numPartitions, availableMemory); int sizePerBuffer = kvWriter.sizePerBuffer; BitSet partitionsWithData = new BitSet(numPartitions); Text keyText = new Text(); Text valText = new Text(); for (int i = 0; i < numRegularRecords; i++) { String key = createRandomString(Math.abs(random.nextInt(10))); String val = createRandomString(Math.abs(random.nextInt(20))); keyText.set(key); valText.set(val); int partition = partitioner.getPartition(keyText, valText, numPartitions); partitionsWithData.set(partition); expectedValues.get(partition).put(key, val); kvWriter.write(keyText, valText); numRecordsWritten++; } // Write Large key records for (int i = 0; i < numLargeKeys; i++) { String key = createRandomString(sizePerBuffer + Math.abs(random.nextInt(100))); String val = createRandomString(Math.abs(random.nextInt(20))); keyText.set(key); valText.set(val); int partition = partitioner.getPartition(keyText, valText, numPartitions); partitionsWithData.set(partition); expectedValues.get(partition).put(key, val); kvWriter.write(keyText, valText); numRecordsWritten++; } // Write Large val records for (int i = 0; i < numLargevalues; i++) { String key = createRandomString(Math.abs(random.nextInt(10))); String val = createRandomString(sizePerBuffer + Math.abs(random.nextInt(100))); keyText.set(key); valText.set(val); int partition = partitioner.getPartition(keyText, valText, numPartitions); partitionsWithData.set(partition); expectedValues.get(partition).put(key, val); kvWriter.write(keyText, valText); numRecordsWritten++; } // Write records where key + val are large (but both can fit in the buffer individually) for (int i = 0; i < numLargeKvPairs; i++) { String key = createRandomString(sizePerBuffer / 2 + Math.abs(random.nextInt(100))); String val = createRandomString(sizePerBuffer / 2 + Math.abs(random.nextInt(100))); keyText.set(key); valText.set(val); int partition = partitioner.getPartition(keyText, valText, numPartitions); partitionsWithData.set(partition); expectedValues.get(partition).put(key, val); kvWriter.write(keyText, valText); numRecordsWritten++; } List<Event> events = kvWriter.close(); verify(outputContext, never()).fatalError(any(Throwable.class), any(String.class)); TezCounter outputLargeRecordsCounter = counters.findCounter(TaskCounter.OUTPUT_LARGE_RECORDS); assertEquals(numLargeKeys + numLargevalues + numLargeKvPairs, outputLargeRecordsCounter.getValue()); // Validate the event assertEquals(1, events.size()); assertTrue(events.get(0) instanceof CompositeDataMovementEvent); CompositeDataMovementEvent cdme = (CompositeDataMovementEvent) events.get(0); assertEquals(0, cdme.getSourceIndexStart()); assertEquals(numPartitions, cdme.getCount()); DataMovementEventPayloadProto eventProto = DataMovementEventPayloadProto .parseFrom(ByteString.copyFrom(cdme.getUserPayload())); assertFalse(eventProto.hasData()); BitSet emptyPartitionBits = null; if (partitionsWithData.cardinality() != numPartitions) { assertTrue(eventProto.hasEmptyPartitions()); byte[] emptyPartitions = TezCommonUtils .decompressByteStringToByteArray(eventProto.getEmptyPartitions()); emptyPartitionBits = TezUtilsInternal.fromByteArray(emptyPartitions); assertEquals(numPartitions - partitionsWithData.cardinality(), emptyPartitionBits.cardinality()); } else { assertFalse(eventProto.hasEmptyPartitions()); emptyPartitionBits = new BitSet(numPartitions); } assertEquals(HOST_STRING, eventProto.getHost()); assertEquals(SHUFFLE_PORT, eventProto.getPort()); assertEquals(uniqueId, eventProto.getPathComponent()); // Verify the data // Verify the actual data TezTaskOutput taskOutput = new TezTaskOutputFiles(conf, uniqueId); Path outputFilePath = null; Path spillFilePath = null; try { outputFilePath = taskOutput.getOutputFile(); } catch (DiskErrorException e) { if (numRecordsWritten > 0) { fail(); } else { // Record checking not required. return; } } try { spillFilePath = taskOutput.getOutputIndexFile(); } catch (DiskErrorException e) { if (numRecordsWritten > 0) { fail(); } else { // Record checking not required. return; } } // Special case for 0 records. TezSpillRecord spillRecord = new TezSpillRecord(spillFilePath, conf); DataInputBuffer keyBuffer = new DataInputBuffer(); DataInputBuffer valBuffer = new DataInputBuffer(); Text keyDeser = new Text(); Text valDeser = new Text(); for (int i = 0; i < numPartitions; i++) { if (emptyPartitionBits.get(i)) { continue; } TezIndexRecord indexRecord = spillRecord.getIndex(i); FSDataInputStream inStream = FileSystem.getLocal(conf).open(outputFilePath); inStream.seek(indexRecord.getStartOffset()); IFile.Reader reader = new IFile.Reader(inStream, indexRecord.getPartLength(), codec, null, null, false, 0, -1); while (reader.nextRawKey(keyBuffer)) { reader.nextRawValue(valBuffer); keyDeser.readFields(keyBuffer); valDeser.readFields(valBuffer); int partition = partitioner.getPartition(keyDeser, valDeser, numPartitions); assertTrue(expectedValues.get(partition).remove(keyDeser.toString(), valDeser.toString())); } inStream.close(); } for (int i = 0; i < numPartitions; i++) { assertEquals(0, expectedValues.get(i).size()); expectedValues.remove(i); } assertEquals(0, expectedValues.size()); }
From source file:juicebox.data.MatrixZoomData.java
/** * Dump the O/E or Pearsons matrix to standard out in ascii format. * * @param df Density function (expected values) * @param type will be "oe", "pearsons", or "expected" * @param les output stream//w w w .j av a 2s .c o m * @param pw Text output stream * @throws java.io.IOException If fails to write */ public void dumpOE(ExpectedValueFunction df, String type, NormalizationType no, LittleEndianOutputStream les, PrintWriter pw) throws IOException { if (les == null && pw == null) { pw = new PrintWriter(System.out); } if (type.equals("oe")) { int nBins; if (zoom.getUnit() == HiC.Unit.BP) { nBins = chr1.getLength() / zoom.getBinSize() + 1; } else { nBins = ((DatasetReaderV2) reader).getFragCount(chr1) / zoom.getBinSize() + 1; } BasicMatrix matrix = new InMemoryMatrix(nBins); BitSet bitSet = new BitSet(nBins); List<Integer> blockNumbers = reader.getBlockNumbers(this); for (int blockNumber : blockNumbers) { Block b = null; try { b = reader.readNormalizedBlock(blockNumber, this, df.getNormalizationType()); if (b != null) { for (ContactRecord rec : b.getContactRecords()) { int x = rec.getBinX(); int y = rec.getBinY(); int dist = Math.abs(x - y); double expected = 0; try { expected = df.getExpectedValue(chr1.getIndex(), dist); } catch (Exception e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } double observed = rec.getCounts(); // Observed is already normalized double normCounts = observed / expected; // The apache library doesn't seem to play nice with NaNs if (!Double.isNaN(normCounts)) { matrix.setEntry(x, y, (float) normCounts); if (x != y) { matrix.setEntry(y, x, (float) normCounts); } bitSet.set(x); bitSet.set(y); } } } } catch (IOException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } if (les != null) les.writeInt(nBins); for (int i = 0; i < nBins; i++) { for (int j = 0; j < nBins; j++) { float output; if (!bitSet.get(i) && !bitSet.get(j)) { output = Float.NaN; } else output = matrix.getEntry(i, j); if (les != null) les.writeFloat(output); else pw.print(output + " "); } if (les == null) pw.println(); } if (les == null) { pw.println(); pw.flush(); } } else { BasicMatrix pearsons = getPearsons(df); if (pearsons != null) { int dim = pearsons.getRowDimension(); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { float output = pearsons.getEntry(i, j); if (les != null) les.writeFloat(output); else pw.print(output + " "); } if (les == null) pw.println(); } pw.flush(); } else { log.error("Pearson's not available at zoom " + zoom); } } }