Example usage for java.util BitSet nextClearBit

List of usage examples for java.util BitSet nextClearBit

Introduction

In this page you can find the example usage for java.util BitSet nextClearBit.

Prototype

public int nextClearBit(int fromIndex) 

Source Link

Document

Returns the index of the first bit that is set to false that occurs on or after the specified starting index.

Usage

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Runs a job using the provided environment to read a sequence of records from a single Kafka topic.
 * The method allows to individually specify the expected starting offset and total read value count of each partition.
 * The job will be considered successful only if all partition read results match the start offset and value count criteria.
 *//*from w  w  w .j a va2s  .  c om*/
protected void readSequence(final StreamExecutionEnvironment env, final StartupMode startupMode,
        final Map<KafkaTopicPartition, Long> specificStartupOffsets, final Properties cc,
        final String topicName,
        final Map<Integer, Tuple2<Integer, Integer>> partitionsToValuesCountAndStartOffset) throws Exception {
    final int sourceParallelism = partitionsToValuesCountAndStartOffset.keySet().size();

    int finalCountTmp = 0;
    for (Map.Entry<Integer, Tuple2<Integer, Integer>> valuesCountAndStartOffset : partitionsToValuesCountAndStartOffset
            .entrySet()) {
        finalCountTmp += valuesCountAndStartOffset.getValue().f0;
    }
    final int finalCount = finalCountTmp;

    final TypeInformation<Tuple2<Integer, Integer>> intIntTupleType = TypeInfoParser
            .parse("Tuple2<Integer, Integer>");

    final TypeInformationSerializationSchema<Tuple2<Integer, Integer>> deser = new TypeInformationSerializationSchema<>(
            intIntTupleType, env.getConfig());

    // create the consumer
    cc.putAll(secureProps);
    FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> consumer = kafkaServer.getConsumer(topicName, deser, cc);
    switch (startupMode) {
    case EARLIEST:
        consumer.setStartFromEarliest();
        break;
    case LATEST:
        consumer.setStartFromLatest();
        break;
    case SPECIFIC_OFFSETS:
        consumer.setStartFromSpecificOffsets(specificStartupOffsets);
        break;
    case GROUP_OFFSETS:
        consumer.setStartFromGroupOffsets();
        break;
    }

    DataStream<Tuple2<Integer, Integer>> source = env.addSource(consumer).setParallelism(sourceParallelism)
            .map(new ThrottledMapper<Tuple2<Integer, Integer>>(20)).setParallelism(sourceParallelism);

    // verify data
    source.flatMap(new RichFlatMapFunction<Tuple2<Integer, Integer>, Integer>() {

        private HashMap<Integer, BitSet> partitionsToValueCheck;
        private int count = 0;

        @Override
        public void open(Configuration parameters) throws Exception {
            partitionsToValueCheck = new HashMap<>();
            for (Integer partition : partitionsToValuesCountAndStartOffset.keySet()) {
                partitionsToValueCheck.put(partition, new BitSet());
            }
        }

        @Override
        public void flatMap(Tuple2<Integer, Integer> value, Collector<Integer> out) throws Exception {
            int partition = value.f0;
            int val = value.f1;

            BitSet bitSet = partitionsToValueCheck.get(partition);
            if (bitSet == null) {
                throw new RuntimeException("Got a record from an unknown partition");
            } else {
                bitSet.set(val - partitionsToValuesCountAndStartOffset.get(partition).f1);
            }

            count++;

            LOG.info("Received message {}, total {} messages", value, count);

            // verify if we've seen everything
            if (count == finalCount) {
                for (Map.Entry<Integer, BitSet> partitionsToValueCheck : this.partitionsToValueCheck
                        .entrySet()) {
                    BitSet check = partitionsToValueCheck.getValue();
                    int expectedValueCount = partitionsToValuesCountAndStartOffset
                            .get(partitionsToValueCheck.getKey()).f0;

                    if (check.cardinality() != expectedValueCount) {
                        throw new RuntimeException("Expected cardinality to be " + expectedValueCount
                                + ", but was " + check.cardinality());
                    } else if (check.nextClearBit(0) != expectedValueCount) {
                        throw new RuntimeException("Expected next clear bit to be " + expectedValueCount
                                + ", but was " + check.cardinality());
                    }
                }

                // test has passed
                throw new SuccessException();
            }
        }

    }).setParallelism(1);

    tryExecute(env, "Read data from Kafka");

    LOG.info("Successfully read sequence for verification");
}

From source file:org.apache.hawq.pxf.service.utilities.AnalyzeUtils.java

/**
 * Returns index of next clear (false) bit, starting from and including
 * index. If all bits from index to the end are set (true), search from the
 * beginning. Return -1 if all bits are set (true).
 *
 * @param index starting point// w ww  .  j ava 2  s  . c o m
 * @param poolSize the bit set size
 * @param bitSet bitset to search
 * @return index of next clear bit, starting in index
 */
static private int nextClearBitModulo(int index, int poolSize, BitSet bitSet) {

    int indexToSet = bitSet.nextClearBit(index);
    if (indexToSet == poolSize && index != 0) {
        indexToSet = bitSet.nextClearBit(0);
    }
    /* means that all bits are already set, so we return -1 */
    if (indexToSet == poolSize) {
        return -1;
    }

    return indexToSet;
}

From source file:org.apache.pig.backend.local.executionengine.LocalPigLauncher.java

private int runPipeline(POStore[] leaves, PigContext pc) throws IOException, ExecException {
    BitSet bs = new BitSet(leaves.length);
    int failed = 0;
    while (true) {
        if (bs.cardinality() == leaves.length) {
            break;
        }/*from   ww  w.  jav a2s . co  m*/
        for (int i = bs.nextClearBit(0); i < leaves.length; i = bs.nextClearBit(i + 1)) {
            Result res = leaves[i].getNext(DUMMYTUPLE);
            switch (res.returnStatus) {
            case POStatus.STATUS_NULL:
                // good null from store means keep at it.
                continue;
            case POStatus.STATUS_OK:
                // ok shouldn't happen store should have consumed it.
                // fallthrough
            case POStatus.STATUS_ERR:
                leaves[i].cleanUp();
                leaves[i].tearDown();
                failed++;
                failedStores.add(leaves[i].getSFile());
                if ("true".equalsIgnoreCase(pc.getProperties().getProperty("stop.on.failure", "false"))) {
                    int errCode = 6017;
                    String msg = "Execution failed, while processing " + leaves[i].getSFile().getFileName();

                    throw new ExecException(msg, errCode, PigException.REMOTE_ENVIRONMENT);
                }
                bs.set(i);
                break;
            case POStatus.STATUS_EOP:
                leaves[i].tearDown();
                succeededStores.add(leaves[i].getSFile());
                // fallthrough
            default:
                bs.set(i);
                break;
            }
        }
    }
    return failed;
}

From source file:org.intermine.bio.postprocess.CreateIntronFeaturesProcess.java

/**
 * Return a set of Intron objects that don't overlap the Locations
 * in the locationSet argument.  The caller must call ObjectStoreWriter.store() on the
 * Intron, its chromosomeLocation and the synonym in the synonyms collection.
 * @param locationSet a set of Locations for the exons on a particular transcript
 * @param transcript Transcript that the Locations refer to
 * @param tranLoc The Location of the Transcript
 * @param gene gene for the transcript/*ww w. j ava 2s. co m*/
 * @return a set of Intron objects
 * @throws ObjectStoreException if there is an ObjectStore problem
 */
protected int createIntronFeatures(Set<Location> locationSet, SequenceFeature transcript, Location tranLoc,
        Gene gene) throws ObjectStoreException {
    if (locationSet.size() == 1 || tranLoc == null || transcript == null || transcript.getLength() == null) {
        return 0;
    }

    final BitSet bs = new BitSet(transcript.getLength().intValue());
    Chromosome chr = transcript.getChromosome();

    int tranStart = tranLoc.getStart().intValue();

    for (Location location : locationSet) {
        bs.set(location.getStart().intValue() - tranStart, (location.getEnd().intValue() - tranStart) + 1);
    }

    int prevEndPos = 0;
    int intronCount = 0;
    while (prevEndPos != -1) {
        intronCount++;
        int nextIntronStart = bs.nextClearBit(prevEndPos + 1);
        int intronEnd;
        int nextSetBit = bs.nextSetBit(nextIntronStart);

        if (nextSetBit == -1) {
            intronEnd = transcript.getLength().intValue();
        } else {
            intronEnd = nextSetBit - 1;
        }

        if (nextSetBit == -1 || intronCount == (locationSet.size() - 1)) {
            prevEndPos = -1;
        } else {
            prevEndPos = intronEnd;
        }

        int newLocStart = nextIntronStart + tranStart;
        int newLocEnd = intronEnd + tranStart;

        String identifier = "intron_chr" + chr.getPrimaryIdentifier() + "_" + Integer.toString(newLocStart)
                + ".." + Integer.toString(newLocEnd);

        if (intronMap.get(identifier) == null) {
            Class<?> intronCls = model.getClassDescriptorByName("Intron").getType();
            Intron intron = (Intron) DynamicUtil.createObject(Collections.singleton(intronCls));
            Location location = (Location) DynamicUtil.createObject(Collections.singleton(Location.class));

            intron.setChromosome(chr);
            intron.setOrganism(chr.getOrganism());
            intron.addDataSets(dataSet);
            intron.setPrimaryIdentifier(identifier);
            intron.setGenes(Collections.singleton(gene));

            location.setStart(new Integer(newLocStart));
            location.setEnd(new Integer(newLocEnd));
            location.setStrand(tranLoc.getStrand());
            location.setFeature(intron);
            location.setLocatedOn(transcript);
            location.addDataSets(dataSet);

            intron.setChromosomeLocation(location);
            osw.store(location);

            int length = location.getEnd().intValue() - location.getStart().intValue() + 1;
            intron.setLength(new Integer(length));
            addToIntronTranscripts(intron, transcript);
            intronMap.put(identifier, intron);
        } else {
            SequenceFeature intron = intronMap.get(identifier);
            addToIntronTranscripts(intron, transcript);
            intronMap.put(identifier, intron);
        }
    }
    return intronCount;
}