List of usage examples for java.util BitSet BitSet
private BitSet(long[] words)
From source file:com.joliciel.jochre.graphics.ShapeImpl.java
/** * Find outline of the shape as a BitSet. * @param shape//ww w. j a v a 2s.c om * @return */ public BitSet getOutline(int threshold) { BitSet outline = this.outlines.get((Integer) threshold); if (outline == null) { outline = new BitSet(this.getHeight() * this.getWidth()); int counter = 0; for (int y = 0; y < this.getHeight(); y++) { for (int x = 0; x < this.getWidth(); x++) { boolean black = this.isPixelBlack(x, y, threshold); if (!black) outline.set(counter++, false); else { boolean innerPixel = this.isPixelBlack(x - 1, y, threshold) && this.isPixelBlack(x + 1, y, threshold) && this.isPixelBlack(x, y - 1, threshold) && this.isPixelBlack(x, y + 1, threshold); outline.set(counter++, !innerPixel); } // is it black? } // next x } // next y this.outlines.put((Integer) threshold, outline); } return outline; }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaITCase.java
@Test public void customPartitioningTestTopology() throws Exception { LOG.info("Starting KafkaITCase.customPartitioningTestTopology()"); String topic = "customPartitioningTestTopic"; createTestTopic(topic, 3, 1);//from w w w . ja v a 2 s .c o m final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(1); // add consuming topology: DataStreamSource<Tuple2<Long, String>> consuming = env .addSource(new PersistentKafkaSource<Tuple2<Long, String>>(topic, new Utils.TypeInformationSerializationSchema<Tuple2<Long, String>>( new Tuple2<Long, String>(1L, ""), env.getConfig()), standardCC)); consuming.addSink(new SinkFunction<Tuple2<Long, String>>() { private static final long serialVersionUID = 1L; int start = -1; BitSet validator = new BitSet(101); boolean gotPartition1 = false; boolean gotPartition2 = false; boolean gotPartition3 = false; @Override public void invoke(Tuple2<Long, String> value) throws Exception { LOG.debug("Got " + value); String[] sp = value.f1.split("-"); int v = Integer.parseInt(sp[1]); assertEquals(value.f0 - 1000, (long) v); switch (v) { case 9: gotPartition1 = true; break; case 19: gotPartition2 = true; break; case 99: gotPartition3 = true; break; } if (start == -1) { start = v; } Assert.assertFalse("Received tuple twice", validator.get(v - start)); validator.set(v - start); if (gotPartition1 && gotPartition2 && gotPartition3) { // check if everything in the bitset is set to true int nc; if ((nc = validator.nextClearBit(0)) != 100) { throw new RuntimeException("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator); } throw new SuccessException(); } } }); // add producing topology DataStream<Tuple2<Long, String>> stream = env.addSource(new SourceFunction<Tuple2<Long, String>>() { private static final long serialVersionUID = 1L; boolean running = true; @Override public void run(SourceContext<Tuple2<Long, String>> ctx) throws Exception { LOG.info("Starting source."); int cnt = 0; while (running) { ctx.collect(new Tuple2<Long, String>(1000L + cnt, "kafka-" + cnt++)); try { Thread.sleep(100); } catch (InterruptedException ignored) { } } } @Override public void cancel() { LOG.info("Source got cancel()"); running = false; } }); stream.addSink(new KafkaSink<Tuple2<Long, String>>(brokerConnectionStrings, topic, new Utils.TypeInformationSerializationSchema<Tuple2<Long, String>>(new Tuple2<Long, String>(1L, ""), env.getConfig()), new CustomPartitioner())); tryExecute(env, "custom partitioning test"); LOG.info("Finished KafkaITCase.customPartitioningTestTopology()"); }
From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java
/** * Reads in any BagIt file that uses a ':' to delimit a keyword and value pair. * * @param bagItFile the file to read/*ww w .ja va2s. c om*/ * @return a Map keyed by the keywords, with the List of values as they appear in the file * @throws IOException */ private Map<String, List<String>> parseBagItKeyValuesFile(File bagItFile) throws IOException { Map<String, List<String>> result = new HashMap<>(); // Used to track state; a streams no-no. Probably should do this the old-fashioned way. BitSet bitSet = new BitSet(1); bitSet.set(0); StringBuilder key = new StringBuilder(); Files.lines(bagItFile.toPath(), Charset.forName("UTF-8")).flatMap(line -> Stream .of(line.substring(0, line.indexOf(":")), line.substring(line.indexOf(":") + 1).trim())) .forEach(token -> { if (bitSet.get(0)) { // key key.delete(0, key.length()); result.putIfAbsent(token, new ArrayList<>()); key.append(token); bitSet.clear(0); } else { // value result.get(key.toString()).add(token); bitSet.set(0); } }); return result; }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaITCase.java
@Test public void simpleTestTopology() throws Exception { String topic = "simpleTestTopic"; createTestTopic(topic, 1, 1);/*from w w w .j a va 2s .co m*/ final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment(1); // add consuming topology: DataStreamSource<String> consuming = env .addSource(new PersistentKafkaSource<String>(topic, new JavaDefaultStringSchema(), standardCC)); consuming.addSink(new SinkFunction<String>() { private static final long serialVersionUID = 1L; int elCnt = 0; int start = -1; BitSet validator = new BitSet(101); @Override public void invoke(String value) throws Exception { LOG.debug("Got " + value); String[] sp = value.split("-"); int v = Integer.parseInt(sp[1]); if (start == -1) { start = v; } Assert.assertFalse("Received tuple twice", validator.get(v - start)); validator.set(v - start); elCnt++; if (elCnt == 100) { // check if everything in the bitset is set to true int nc; if ((nc = validator.nextClearBit(0)) != 100) { throw new RuntimeException("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator); } throw new SuccessException(); } } }); // add producing topology DataStream<String> stream = env.addSource(new SourceFunction<String>() { private static final long serialVersionUID = 1L; boolean running = true; @Override public void run(SourceContext<String> ctx) throws Exception { LOG.info("Starting source."); int cnt = 0; while (running) { ctx.collect("kafka-" + cnt++); try { Thread.sleep(100); } catch (InterruptedException ignored) { } } } @Override public void cancel() { LOG.info("Source got cancel()"); running = false; } }); stream.addSink(new KafkaSink<String>(brokerConnectionStrings, topic, new JavaDefaultStringSchema())); tryExecute(env, "simpletest"); }
From source file:org.onosproject.tetopology.management.impl.DistributedTeTopologyStore.java
@Override public void updateNetwork(Network network) { InternalNetwork curNetwork = networkMap.get(network.networkId()); TeTopologyKey topoKey = null;//from w w w. j a va 2 s. c om if (network.teTopologyId() != null) { topoKey = newTeTopologyKey(network.teTopologyId()); } // Update TE nodes List<TeNodeKey> teNodeKeys = null; if (MapUtils.isNotEmpty(network.nodes())) { teNodeKeys = Lists.newArrayList(); for (Map.Entry<KeyId, NetworkNode> entry : network.nodes().entrySet()) { NetworkNodeKey nodeKey = new NetworkNodeKey(network.networkId(), entry.getKey()); TeNodeKey teNodeKey = null; if (topoKey != null && entry.getValue().teNode() != null) { teNodeKey = new TeNodeKey(topoKey, entry.getValue().teNode().teNodeId()); } updateNetworkNode(nodeKey, entry.getValue(), true, false, teNodeKey); teNodeKeys.add(teNodeKey); } } // Update TE links List<TeLinkTpGlobalKey> teLinkKeys = null; if (MapUtils.isNotEmpty(network.links())) { teLinkKeys = Lists.newArrayList(); for (Map.Entry<KeyId, NetworkLink> entry : network.links().entrySet()) { NetworkLinkKey linkKey = new NetworkLinkKey(network.networkId(), entry.getKey()); TeLinkTpGlobalKey teLinkKey = null; if (topoKey != null && entry.getValue().teLink() != null) { teLinkKey = new TeLinkTpGlobalKey(topoKey, entry.getValue().teLink().teLinkKey()); } updateNetworkLink(linkKey, entry.getValue(), true, false, teLinkKey); teLinkKeys.add(teLinkKey); } } // New network, update TE Topology first if (curNetwork == null) { InternalTeTopology intTopo = new InternalTeTopology(network.teTopologyId().topologyId()); intTopo.setTeNodeKeys(teNodeKeys); intTopo.setTeLinkKeys(teLinkKeys); BitSet flags = new BitSet(TeConstants.FLAG_MAX_BITS); flags.set(TeTopology.BIT_LEARNT); if (network.teTopologyId().clientId() == TeTopologyManager.DEFAULT_PROVIDER_ID) { // Hard rule for now flags.set(TeTopology.BIT_CUSTOMIZED); } CommonTopologyData common = new CommonTopologyData(network.networkId(), OptimizationType.NOT_OPTIMIZED, flags, network.ownerId()); intTopo.setTopologydata(common); teTopologyMap.put(topoKey, intTopo); } // Finally Update networkMap InternalNetwork newNetwork = new InternalNetwork(network); newNetwork.setTeTopologyKey(topoKey); networkMap.put(network.networkId(), newNetwork); }
From source file:org.alfresco.repo.security.permissions.impl.acegi.ACLEntryAfterInvocationProvider.java
@SuppressWarnings("rawtypes") private Object[] decide(Authentication authentication, Object object, ConfigAttributeDefinition config, Object[] returnedObject) throws AccessDeniedException { // Assumption: value is not null BitSet incudedSet = new BitSet(returnedObject.length); List<ConfigAttributeDefintion> supportedDefinitions = extractSupportedDefinitions(config); if (supportedDefinitions.size() == 0) { return returnedObject; }// www. j av a 2 s . c o m for (int i = 0, l = returnedObject.length; i < l; i++) { Object current = returnedObject[i]; for (ConfigAttributeDefintion cad : supportedDefinitions) { incudedSet.set(i, true); NodeRef testNodeRef = null; if (cad.typeString.equals(AFTER_ACL_NODE)) { if (StoreRef.class.isAssignableFrom(current.getClass())) { testNodeRef = nodeService.getRootNode((StoreRef) current); } else if (NodeRef.class.isAssignableFrom(current.getClass())) { testNodeRef = (NodeRef) current; } else if (ChildAssociationRef.class.isAssignableFrom(current.getClass())) { testNodeRef = ((ChildAssociationRef) current).getChildRef(); } else if (Pair.class.isAssignableFrom(current.getClass())) { testNodeRef = (NodeRef) ((Pair) current).getSecond(); } else if (PermissionCheckValue.class.isAssignableFrom(current.getClass())) { testNodeRef = ((PermissionCheckValue) current).getNodeRef(); } else { throw new ACLEntryVoterException( "The specified parameter is recognized: " + current.getClass()); } } else if (cad.typeString.equals(AFTER_ACL_PARENT)) { if (StoreRef.class.isAssignableFrom(current.getClass())) { testNodeRef = null; } else if (NodeRef.class.isAssignableFrom(current.getClass())) { testNodeRef = nodeService.getPrimaryParent((NodeRef) current).getParentRef(); } else if (ChildAssociationRef.class.isAssignableFrom(current.getClass())) { testNodeRef = ((ChildAssociationRef) current).getParentRef(); } else if (Pair.class.isAssignableFrom(current.getClass())) { testNodeRef = (NodeRef) ((Pair) current).getSecond(); } else if (PermissionCheckValue.class.isAssignableFrom(current.getClass())) { NodeRef nodeRef = ((PermissionCheckValue) current).getNodeRef(); testNodeRef = nodeService.getPrimaryParent(nodeRef).getParentRef(); } else { throw new ACLEntryVoterException( "The specified parameter is recognized: " + current.getClass()); } } if (log.isDebugEnabled()) { log.debug("\t" + cad.typeString + " test on " + testNodeRef + " from " + current.getClass().getName()); } if (isUnfiltered(testNodeRef)) { continue; } if (incudedSet.get(i) && (testNodeRef != null) && (permissionService.hasPermission(testNodeRef, cad.required.toString()) == AccessStatus.DENIED)) { incudedSet.set(i, false); } } } if (incudedSet.cardinality() == returnedObject.length) { return returnedObject; } else { Object[] answer = new Object[incudedSet.cardinality()]; for (int i = incudedSet.nextSetBit(0), p = 0; i >= 0; i = incudedSet.nextSetBit(++i), p++) { answer[p] = returnedObject[i]; } return answer; } }
From source file:edu.brown.benchmark.seats.SEATSClient.java
protected BitSet getSeatsBitSet(long flight_id) { BitSet seats = CACHE_BOOKED_SEATS.get(flight_id); if (seats == null) { // synchronized (CACHE_BOOKED_SEATS) { seats = CACHE_BOOKED_SEATS.get(flight_id); if (seats == null) { seats = new BitSet(SEATSConstants.FLIGHTS_NUM_SEATS); CACHE_BOOKED_SEATS.put(flight_id, seats); }/* w w w. j ava 2 s . c om*/ // } // SYNCH } return (seats); }
From source file:org.apache.ctakes.ytex.kernel.metric.ConceptSimilarityServiceImpl.java
/** * convert the list of tuis into a bitset * // w w w. j a v a 2 s. co m * @param tuis * @param mapTuiIndex * @return */ private BitSet tuiListToBitset(Set<String> tuis, SortedMap<String, Integer> mapTuiIndex) { BitSet bs = new BitSet(mapTuiIndex.size()); for (String tui : tuis) { bs.set(mapTuiIndex.get(tui)); } return bs; }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
/** * Ensure Kafka is working on both producer and consumer side. * This executes a job that contains two Flink pipelines. * * <pre>/* ww w .ja v a 2s. co m*/ * (generator source) --> (kafka sink)-[KAFKA-TOPIC]-(kafka source) --> (validating sink) * </pre> * * We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer * does not guarantee exactly-once output. Hence a recovery would introduce duplicates that * cause the test to fail. * * This test also ensures that FLINK-3156 doesn't happen again: * * The following situation caused a NPE in the FlinkKafkaConsumer * * topic-1 <-- elements are only produced into topic1. * topic-2 * * Therefore, this test is consuming as well from an empty topic. * */ @RetryOnException(times = 2, exception = kafka.common.NotLeaderForPartitionException.class) public void runSimpleConcurrentProducerConsumerTopology() throws Exception { final String topic = "concurrentProducerConsumerTopic_" + UUID.randomUUID().toString(); final String additionalEmptyTopic = "additionalEmptyTopic_" + UUID.randomUUID().toString(); final int parallelism = 3; final int elementsPerPartition = 100; final int totalElements = parallelism * elementsPerPartition; createTestTopic(topic, parallelism, 2); createTestTopic(additionalEmptyTopic, parallelism, 1); // create an empty topic which will remain empty all the time final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); env.setParallelism(parallelism); env.enableCheckpointing(500); env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately env.getConfig().disableSysoutLogging(); TypeInformation<Tuple2<Long, String>> longStringType = TypeInfoParser.parse("Tuple2<Long, String>"); TypeInformationSerializationSchema<Tuple2<Long, String>> sourceSchema = new TypeInformationSerializationSchema<>( longStringType, env.getConfig()); TypeInformationSerializationSchema<Tuple2<Long, String>> sinkSchema = new TypeInformationSerializationSchema<>( longStringType, env.getConfig()); // ----------- add producer dataflow ---------- DataStream<Tuple2<Long, String>> stream = env .addSource(new RichParallelSourceFunction<Tuple2<Long, String>>() { private boolean running = true; @Override public void run(SourceContext<Tuple2<Long, String>> ctx) throws InterruptedException { int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition; int limit = cnt + elementsPerPartition; while (running && cnt < limit) { ctx.collect(new Tuple2<>(1000L + cnt, "kafka-" + cnt)); cnt++; // we delay data generation a bit so that we are sure that some checkpoints are // triggered (for FLINK-3156) Thread.sleep(50); } } @Override public void cancel() { running = false; } }); Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "3"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(sinkSchema), producerProperties, null); // ----------- add consumer dataflow ---------- List<String> topics = new ArrayList<>(); topics.add(topic); topics.add(additionalEmptyTopic); Properties props = new Properties(); props.putAll(standardProps); props.putAll(secureProps); FlinkKafkaConsumerBase<Tuple2<Long, String>> source = kafkaServer.getConsumer(topics, sourceSchema, props); DataStreamSource<Tuple2<Long, String>> consuming = env.addSource(source).setParallelism(parallelism); consuming.addSink(new RichSinkFunction<Tuple2<Long, String>>() { private int elCnt = 0; private BitSet validator = new BitSet(totalElements); @Override public void invoke(Tuple2<Long, String> value) throws Exception { String[] sp = value.f1.split("-"); int v = Integer.parseInt(sp[1]); assertEquals(value.f0 - 1000, (long) v); assertFalse("Received tuple twice", validator.get(v)); validator.set(v); elCnt++; if (elCnt == totalElements) { // check if everything in the bitset is set to true int nc; if ((nc = validator.nextClearBit(0)) != totalElements) { fail("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator); } throw new SuccessException(); } } @Override public void close() throws Exception { super.close(); } }).setParallelism(1); try { tryExecutePropagateExceptions(env, "runSimpleConcurrentProducerConsumerTopology"); } catch (ProgramInvocationException | JobExecutionException e) { // look for NotLeaderForPartitionException Throwable cause = e.getCause(); // search for nested SuccessExceptions int depth = 0; while (cause != null && depth++ < 20) { if (cause instanceof kafka.common.NotLeaderForPartitionException) { throw (Exception) cause; } cause = cause.getCause(); } throw e; } deleteTestTopic(topic); }
From source file:skewtune.mapreduce.STJobTracker.java
/** * SkewTune heartbeat protocol//from w w w . j a v a 2s .co m * * REQUEST (Heartbeat) * * HOST TaskAttemptID -- status report (initialization|mapoutput|completed) * progress [splitted] TaskAttemptID (initialization|mapoutput|completed) * progress [splitted] ... * * RESPONSE * * TaskAttemptID (keep going | new map output [] | cancel ) * * .skewtune/m-0000?/part-m-XXXXX ... * * The protocol is softstate. Jobtracker responds to each heartbeat with the * task to cancel and list of unknown jobs in the heart beat message. The * task tracker is supposed to reclaim space occupied by the unknown jobs. */ @Override public synchronized HeartbeatResponse heartbeat(TaskTrackerStatus status, boolean justStarted, boolean justInited, short responseId) throws IOException, InterruptedException { if (LOG.isDebugEnabled() && dumpHeartbeat) { LOG.debug("Got heartbeat from: " + status.getTrackerName() + " with responseId: " + responseId); } String trackerName = status.getTrackerName(); long now = System.currentTimeMillis(); short newResponseId = (short) (responseId + 1); status.setLastSeen(now); trackerToLastHeartbeat.put(trackerName, status); trackerToHttpPort.put(trackerName, status.getHttpPort()); HashSet<JobID> unknownJobs = new HashSet<JobID>(); ArrayList<ReactiveMapOutput> newMapOutput = new ArrayList<ReactiveMapOutput>(); // ArrayList<TaskAttemptID> cancelledTasks = new ArrayList<TaskAttemptID>(); ArrayList<TaskAction> taskActions = new ArrayList<TaskAction>(); ArrayList<TaskStatusEvent> newTakeOver = new ArrayList<TaskStatusEvent>(); // per job -- processing // FIXME retrieve task tracker // FIXME for each job, update task status, build host-task map for (JobOnTaskTracker jobReport : status.getJobReports()) { JobID jobid = jobReport.getJobID(); JobInProgress jip = null; boolean pendingReactive = false; synchronized (jobs) { jip = jobs.get(jobid); } if (jip == null) { synchronized (pendingCompletedReactiveJob) { jip = pendingCompletedReactiveJob.get(jobid); } pendingReactive = jip != null; } if (jip == null) { // FIXME check the pending completion list unknownJobs.add(jobid); // this job must be cleared } else { int from = jobReport.getFromIndex(); int fromTakeOver = jobReport.getFromIndexOfTakeOver(); final JobType jobType = jip.getJobType(); BitSet completed = new BitSet(jip.getNumMapTasks()); synchronized (jip) { // load job token into this node if (jobType == JobType.ORIGINAL || jobType == JobType.REDUCE_REACTIVE) { scheduleJobTokenLoading(jip); // we only need to load it for original job // FIXME we need to load it for other job if we support recursive split } // update statistics of this task for (STTaskStatus taskStatus : jobReport.getTaskReports()) { int action = jip.handleTaskHeartbeat(taskStatus, status.getHostName(), completed); if (action != 0) { taskActions.add(new TaskAction(taskStatus.getTaskID(), action)); } // if ( jip.handleTaskHeartbeat(taskStatus,status.getHostName(),completed) != 0) { // cancelledTasks.add(taskStatus.getTaskID()); // FIXME create task action // } } // fetch all available new map output from FROM if (from >= 0) { jip.retrieveNewMapOutput(newMapOutput, from); } if (fromTakeOver >= 0) { jip.retrieveNewTakeOver(newTakeOver, fromTakeOver); } if (jobType == JobType.MAP_REACTIVE && pendingReactive) { if (jip.isAllMapOutputIndexAvailable()) { synchronized (pendingCompletedReactiveJob) { pendingCompletedReactiveJob.remove(jobid); } cleanupPendingReactiveMap(jip); } } } // if ( jobType == JobType.ORIGINAL ) { // jip.notifyMapCompletion(completed); // } } } int nextInterval = getNextHeartbeatInterval(); return new HeartbeatResponse(newResponseId, nextInterval, newMapOutput.toArray(new ReactiveMapOutput[newMapOutput.size()]), // cancelledTasks.toArray(new TaskAttemptID[cancelledTasks.size()]), taskActions.toArray(new TaskAction[taskActions.size()]), unknownJobs.toArray(new JobID[unknownJobs.size()]), newTakeOver.toArray(new TaskStatusEvent[newTakeOver.size()])); }