Example usage for java.util BitSet set

List of usage examples for java.util BitSet set

Introduction

In this page you can find the example usage for java.util BitSet set.

Prototype

public void set(int bitIndex) 

Source Link

Document

Sets the bit at the specified index to true .

Usage

From source file:model.DecomposableModel.java

public Double entropyDiffIfAdding(int a, int b, BitSet Sab, EntropyComputer computer) {
    BitSet Sabua = (BitSet) Sab.clone();
    BitSet Sabub = (BitSet) Sab.clone();
    BitSet Sabuaub = (BitSet) Sab.clone();
    Sabua.set(a);
    Sabub.set(b);/*from ww  w  .  ja v  a2 s . c o  m*/
    Sabuaub.set(a);
    Sabuaub.set(b);

    Double entropy = 0.0;
    Double tmp;

    // Sab
    tmp = computer.computeEntropy(Sab);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy -= tmp;
    }

    // Sab + a
    tmp = computer.computeEntropy(Sabua);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy += tmp;
    }

    // Sab + b
    tmp = computer.computeEntropy(Sabub);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy += tmp;
    }

    // Sab + a + b
    tmp = computer.computeEntropy(Sabuaub);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy -= tmp;
    }

    return entropy;
}

From source file:model.DecomposableModel.java

/**
 * Compute the difference in the entropy from this model, to one that would
 * add vertex1 and vertex2 to it/*  www. ja  v  a  2 s .  co  m*/
 * 
 * @param a
 * @param b
 * @param computer
 * @return
 */
public Double entropyDiffIfAdding(Integer a, Integer b, EntropyComputer computer, boolean verbose) {
    // System.out.println("computing actual entropy");
    BitSet Sab = graph.getSeparator(a, b);
    BitSet Sabua = (BitSet) Sab.clone();
    BitSet Sabub = (BitSet) Sab.clone();
    BitSet Sabuaub = (BitSet) Sab.clone();
    Sabua.set(a);
    Sabub.set(b);
    Sabuaub.set(a);
    Sabuaub.set(b);

    Double entropy = 0.0;
    Double tmp;

    // Sab
    tmp = computer.computeEntropy(Sab);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy -= tmp;
    }
    if (verbose)
        System.out.println("-" + Sab + ":" + tmp);

    // Sab + a
    tmp = computer.computeEntropy(Sabua);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy += tmp;
    }
    if (verbose)
        System.out.println("+" + Sabua + ":" + tmp);

    // Sab + b
    tmp = computer.computeEntropy(Sabub);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy += tmp;
    }
    if (verbose)
        System.out.println("+" + Sabub + ":" + tmp);

    // Sab + a + b
    tmp = computer.computeEntropy(Sabuaub);
    if (tmp == null) {
        entropy = null;
        return entropy;
    } else {
        entropy -= tmp;
    }
    if (verbose)
        System.out.println("-" + Sabuaub + ":" + tmp);

    return entropy;
}

From source file:org.apache.hadoop.mapreduce.lib.input.TestCombineSequenceFileInputFormat.java

@Test(timeout = 10000)
public void testFormat() throws IOException, InterruptedException {
    Job job = Job.getInstance(conf);//from   w ww. ja  v  a 2 s  .co  m

    Random random = new Random();
    long seed = random.nextLong();
    random.setSeed(seed);

    localFs.delete(workDir, true);
    FileInputFormat.setInputPaths(job, workDir);

    final int length = 10000;
    final int numFiles = 10;

    // create files with a variety of lengths
    createFiles(length, numFiles, random, job);

    TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
    // create a combine split for the files
    InputFormat<IntWritable, BytesWritable> format = new CombineSequenceFileInputFormat<IntWritable, BytesWritable>();
    for (int i = 0; i < 3; i++) {
        int numSplits = random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
        LOG.info("splitting: requesting = " + numSplits);
        List<InputSplit> splits = format.getSplits(job);
        LOG.info("splitting: got =        " + splits.size());

        // we should have a single split as the length is comfortably smaller than
        // the block size
        assertEquals("We got more than one splits!", 1, splits.size());
        InputSplit split = splits.get(0);
        assertEquals("It should be CombineFileSplit", CombineFileSplit.class, split.getClass());

        // check the split
        BitSet bits = new BitSet(length);
        RecordReader<IntWritable, BytesWritable> reader = format.createRecordReader(split, context);
        MapContext<IntWritable, BytesWritable, IntWritable, BytesWritable> mcontext = new MapContextImpl<IntWritable, BytesWritable, IntWritable, BytesWritable>(
                job.getConfiguration(), context.getTaskAttemptID(), reader, null, null,
                MapReduceTestUtil.createDummyReporter(), split);
        reader.initialize(split, mcontext);
        assertEquals("reader class is CombineFileRecordReader.", CombineFileRecordReader.class,
                reader.getClass());

        try {
            while (reader.nextKeyValue()) {
                IntWritable key = reader.getCurrentKey();
                BytesWritable value = reader.getCurrentValue();
                assertNotNull("Value should not be null.", value);
                final int k = key.get();
                LOG.debug("read " + k);
                assertFalse("Key in multiple partitions.", bits.get(k));
                bits.set(k);
            }
        } finally {
            reader.close();
        }
        assertEquals("Some keys in no partition.", length, bits.cardinality());
    }
}

From source file:org.wso2.andes.subscription.ClusterSubscriptionBitMapHandler.java

/**
 * Add a new constituent row for the given constituent index table and fill values for already available
 * subscriptions./*from  ww w  .ja  v  a 2  s . c om*/
 *
 * @param constituent      The constituent to add
 * @param constituentIndex The index of the constituent
 */
private void addConstituentRow(String constituent, int constituentIndex) {
    Map<String, BitSet> constituentTable = constituentTables.get(constituentIndex);
    BitSet bitSet = new BitSet();

    for (int i = 0; i < subscriptionConstituents.size(); i++) {
        String[] constituentsOfSubscription = subscriptionConstituents.get(i);

        if (constituentIndex < constituentsOfSubscription.length) {
            // Get the i'th subscription's [constituentIndex]'th constituent
            String subscriptionConstituent = constituentsOfSubscription[constituentIndex];
            if (subscriptionConstituent.equals(constituent)
                    || multiLevelWildCard.equals(subscriptionConstituent)
                    || singleLevelWildCard.equals(subscriptionConstituent)) {
                // The new constituent matches the subscriptions i'th constituent
                bitSet.set(i);
            } else {
                // The new constituent does not match the i'th subscriptions [constituentIndex] constituent
                bitSet.set(i, false);
            }
        } else {
            // The subscription does not have a constituent for this index
            // If the last constituent of the subscription is multiLevelWildCard we match else false
            if (multiLevelWildCard.equals(constituentsOfSubscription[constituentsOfSubscription.length - 1])) {
                bitSet.set(i);
            } else {
                bitSet.set(i, false);
            }
        }
    }

    constituentTable.put(constituent, bitSet);
}

From source file:ArrayUtils.java

/**
 * Removes all contents of <code>array2</code> from <code>array1</code>. All
 * instances of <code>array2</code> will also be removed from
 * <code>array1</code>.//from  w w w  .ja  v a 2s. co  m
 * 
 * @param <T>
 *            The type of the array
 * @param array1
 *            The array to remove elements from
 * @param array2
 *            The array containing the elements to remove; or the element to
 *            remove itself
 * @return <code>array1</code> missing all the contents of
 *         <code>array2</code>
 */
public static <T> T[] removeAll(T[] array1, Object array2) {
    if (array1 == null || array2 == null)
        return array1;
    if (!array1.getClass().isArray())
        return null;
    if (!array2.getClass().isArray())
        array2 = new Object[] { array2 };
    java.util.BitSet remove = new java.util.BitSet();
    int len1 = array1.length;
    int len2 = Array.getLength(array2);
    int i, j;
    for (i = 0; i < len1; i++) {
        for (j = 0; j < len2; j++) {
            if (equals(array1[i], Array.get(array2, j))) {
                remove.set(i);
                break;
            }
        }
    }
    T[] ret = (T[]) Array.newInstance(array1.getClass().getComponentType(), len1 - remove.cardinality());
    // This copying section might be replaced by a more efficient version
    // using System.arraycopy()--this would be much faster than reflection,
    // especially for large arrays needing only a few elements removed
    for (i = 0, j = 0; i < len1; i++) {
        if (!remove.get(i)) {
            ret[j] = array1[i];
            j++;
        }
    }
    return ret;
}

From source file:org.apache.tez.dag.utils.TaskSpecificLaunchCmdOption.java

/**
 * Get the set of tasks that need additional launch command options within a vertex
 *
 * @param tasksInVertex//from  ww  w  .  j  a v  a2 s . com
 * @return Set<Integer> containing the task indexes to be profiled
 */
private BitSet parseTasks(String tasksInVertex) {
    BitSet taskSet = new BitSet();
    if (Strings.isNullOrEmpty(tasksInVertex)) {
        return taskSet;
    }
    Iterable<String> tasks = Splitter.on(",").omitEmptyStrings().trimResults().split(tasksInVertex);
    for (String task : tasks) {
        /**
         * TODO: this is horrible way to check the ranges.
         * Should use RangeSet when guava is upgraded.  Also, need to support partial
         * ranges like "1:", ":50".  With current implementation partial ranges are not
         * allowed.
         */
        if (task.endsWith(":") || task.startsWith(":")) {
            //invalid range. e.g :20, 6: are not supported.
            LOG.warn("Partial range is considered as an invalid option");
            return null;
        }
        Matcher taskMatcher = RANGE_REGEX.matcher(task);
        if (taskMatcher.find()) {
            int start = Integer.parseInt((taskMatcher.group(1).trim()));
            int end = Integer.parseInt((taskMatcher.group(2).trim()));
            for (int i = Math.min(start, end); i <= Math.max(start, end); i++) {
                taskSet.set(i);
            }
        } else {
            taskSet.set(Integer.parseInt(task.trim()));
        }
    }
    return taskSet;
}

From source file:ArrayUtils.java

/**
 * Removes all contents of <code>array2</code> from <code>array1</code>. All
 * instances of <code>array2</code> will also be removed from
 * <code>array1</code>. For primitive types.
 * //from ww  w . j  a  v a 2  s  .c om
 * @param array1
 *            The array to remove elements from
 * @param array2
 *            The array containing the elements to remove; or the element to
 *            remove itself
 * @return <code>array1</code> missing all the contents of
 *         <code>array2</code>
 */
public static Object removeAllP(Object array1, Object array2) {
    if (array1 == null || array2 == null)
        return array1;
    if (!array1.getClass().isArray())
        return null;
    if (!array2.getClass().isArray())
        array2 = new Object[] { array2 };
    else
        array2 = addP(array2, array2);
    java.util.BitSet remove = new java.util.BitSet();
    int len1 = Array.getLength(array1);
    int len2 = Array.getLength(array2);
    int i, j;
    for (i = 0; i < len1; i++) {
        for (j = 0; j < len2; j++) {
            if (equals(Array.get(array1, i), Array.get(array2, j))) {
                remove.set(i);
                break;
            }
        }
    }
    Object ret = Array.newInstance(array1.getClass().getComponentType(), len1 - remove.cardinality());
    // This copying section might be replaced by a more efficient version
    // using System.arraycopy()--this would be much faster than reflection,
    // especially for large arrays needing only a few elements removed
    for (i = 0, j = 0; i < len1; i++) {
        if (!remove.get(i)) {
            put(ret, Array.get(array1, i), j);
            j++;
        }
    }
    return ret;
}

From source file:com.tamingtext.util.SplitInput.java

/** Perform a split on the specified input file. Results will be written to files of the same name in the specified 
 *  training and test output directories. The {@link #validate()} method is called prior to executing the split.
 *//*ww  w.  j a  v  a2 s .  c o m*/
public void splitFile(Path inputFile) throws IOException {
    if (fs.getFileStatus(inputFile) == null) {
        throw new IOException(inputFile + " does not exist");
    } else if (fs.getFileStatus(inputFile).isDir()) {
        throw new IOException(inputFile + " is a directory");
    }

    validate();

    Path testOutputFile = new Path(testOutputDirectory, inputFile.getName());
    Path trainingOutputFile = new Path(trainingOutputDirectory, inputFile.getName());

    int lineCount = countLines(fs, inputFile, charset);

    log.info("{} has {} lines", inputFile.getName(), lineCount);

    int testSplitStart = 0;
    int testSplitSize = this.testSplitSize; // don't modify state
    BitSet randomSel = null;

    if (testRandomSelectionPct > 0 || testRandomSelectionSize > 0) {
        testSplitSize = this.testRandomSelectionSize;

        if (testRandomSelectionPct > 0) {
            testSplitSize = Math.round(lineCount * (testRandomSelectionPct / 100.0f));
        }
        log.info("{} test split size is {} based on random selection percentage {}",
                new Object[] { inputFile.getName(), testSplitSize, testRandomSelectionPct });
        long[] ridx = new long[testSplitSize];
        RandomSampler.sample(testSplitSize, lineCount - 1, testSplitSize, 0, ridx, 0, RandomUtils.getRandom());
        randomSel = new BitSet(lineCount);
        for (long idx : ridx) {
            randomSel.set((int) idx + 1);
        }
    } else {
        if (testSplitPct > 0) { // calculate split size based on percentage
            testSplitSize = Math.round(lineCount * (testSplitPct / 100.0f));
            log.info("{} test split size is {} based on percentage {}",
                    new Object[] { inputFile.getName(), testSplitSize, testSplitPct });
        } else {
            log.info("{} test split size is {}", inputFile.getName(), testSplitSize);
        }

        if (splitLocation > 0) { // calculate start of split based on percentage
            testSplitStart = Math.round(lineCount * (splitLocation / 100.0f));
            if (lineCount - testSplitStart < testSplitSize) {
                // adjust split start downwards based on split size.
                testSplitStart = lineCount - testSplitSize;
            }
            log.info("{} test split start is {} based on split location {}",
                    new Object[] { inputFile.getName(), testSplitStart, splitLocation });
        }

        if (testSplitStart < 0) {
            throw new IllegalArgumentException(
                    "test split size for " + inputFile + " is too large, it would produce an "
                            + "empty training set from the initial set of " + lineCount + " examples");
        } else if ((lineCount - testSplitSize) < testSplitSize) {
            log.warn(
                    "Test set size for {} may be too large, {} is larger than the number of "
                            + "lines remaining in the training set: {}",
                    new Object[] { inputFile, testSplitSize, lineCount - testSplitSize });
        }
    }

    BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(inputFile), charset));
    Writer trainingWriter = new OutputStreamWriter(fs.create(trainingOutputFile), charset);
    Writer testWriter = new OutputStreamWriter(fs.create(testOutputFile), charset);

    int pos = 0;
    int trainCount = 0;
    int testCount = 0;

    String line;
    while ((line = reader.readLine()) != null) {
        pos++;

        Writer writer;
        if (testRandomSelectionPct > 0) { // Randomly choose
            writer = randomSel.get(pos) ? testWriter : trainingWriter;
        } else { // Choose based on location
            writer = pos > testSplitStart ? testWriter : trainingWriter;
        }

        if (writer == testWriter) {
            if (testCount >= testSplitSize) {
                writer = trainingWriter;
            } else {
                testCount++;
            }
        }

        if (writer == trainingWriter) {
            trainCount++;
        }

        writer.write(line);
        writer.write('\n');
    }

    IOUtils.close(Collections.singleton(trainingWriter));
    IOUtils.close(Collections.singleton(testWriter));

    log.info("file: {}, input: {} train: {}, test: {} starting at {}",
            new Object[] { inputFile.getName(), lineCount, trainCount, testCount, testSplitStart });

    // testing;
    if (callback != null) {
        callback.splitComplete(inputFile, lineCount, trainCount, testCount, testSplitStart);
    }
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testSlowListener() throws Exception {
    logger.info("Start " + this.testName.getMethodName());
    Map<String, Object> props = KafkaTestUtils.consumerProps("slow1", "false", embeddedKafka);
    //      props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 6); // 2 per poll
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1);

    final CountDownLatch latch = new CountDownLatch(6);
    final BitSet bitSet = new BitSet(6);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("slow1: " + message);
        bitSet.set((int) (message.partition() * 3 + message.offset()));
        try {/*  ww  w  .ja  va2 s  . c  o  m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        latch.countDown();
    });
    containerProps.setPauseAfter(100);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testSlow1");

    container.start();
    Consumer<?, ?> consumer = spyOnConsumer(container);
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<Integer, String>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic1);
    template.sendDefault(0, "foo");
    template.sendDefault(2, "bar");
    template.sendDefault(0, "baz");
    template.sendDefault(2, "qux");
    template.flush();
    Thread.sleep(300);
    template.sendDefault(0, "fiz");
    template.sendDefault(2, "buz");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(bitSet.cardinality()).isEqualTo(6);
    verify(consumer, atLeastOnce()).pause(anyObject());
    verify(consumer, atLeastOnce()).resume(anyObject());
    container.stop();
    logger.info("Stop " + this.testName.getMethodName());
}

From source file:org.apache.mahout.classifier.bayes.SplitBayesInput.java

/** Perform a split on the specified input file. Results will be written to files of the same name in the specified 
 *  training and test output directories. The {@link #validate()} method is called prior to executing the split.
 *//*from   w  ww  .j  av a2 s. c  o m*/
public void splitFile(Path inputFile) throws IOException {
    if (fs.getFileStatus(inputFile) == null) {
        throw new IOException(inputFile + " does not exist");
    } else if (fs.getFileStatus(inputFile).isDir()) {
        throw new IOException(inputFile + " is a directory");
    }

    validate();

    Path testOutputFile = new Path(testOutputDirectory, inputFile.getName());
    Path trainingOutputFile = new Path(trainingOutputDirectory, inputFile.getName());

    int lineCount = countLines(fs, inputFile, charset);

    log.info("{} has {} lines", inputFile.getName(), lineCount);

    int testSplitStart = 0;
    int testSplitSize = this.testSplitSize; // don't modify state
    BitSet randomSel = null;

    if (testRandomSelectionPct > 0 || testRandomSelectionSize > 0) {
        testSplitSize = this.testRandomSelectionSize;

        if (testRandomSelectionPct > 0) {
            testSplitSize = Math.round(lineCount * testRandomSelectionPct / 100.0f);
        }
        log.info("{} test split size is {} based on random selection percentage {}",
                new Object[] { inputFile.getName(), testSplitSize, testRandomSelectionPct });
        long[] ridx = new long[testSplitSize];
        RandomSampler.sample(testSplitSize, lineCount - 1, testSplitSize, 0, ridx, 0, RandomUtils.getRandom());
        randomSel = new BitSet(lineCount);
        for (long idx : ridx) {
            randomSel.set((int) idx + 1);
        }
    } else {
        if (testSplitPct > 0) { // calculate split size based on percentage
            testSplitSize = Math.round(lineCount * testSplitPct / 100.0f);
            log.info("{} test split size is {} based on percentage {}",
                    new Object[] { inputFile.getName(), testSplitSize, testSplitPct });
        } else {
            log.info("{} test split size is {}", inputFile.getName(), testSplitSize);
        }

        if (splitLocation > 0) { // calculate start of split based on percentage
            testSplitStart = Math.round(lineCount * splitLocation / 100.0f);
            if (lineCount - testSplitStart < testSplitSize) {
                // adjust split start downwards based on split size.
                testSplitStart = lineCount - testSplitSize;
            }
            log.info("{} test split start is {} based on split location {}",
                    new Object[] { inputFile.getName(), testSplitStart, splitLocation });
        }

        if (testSplitStart < 0) {
            throw new IllegalArgumentException(
                    "test split size for " + inputFile + " is too large, it would produce an "
                            + "empty training set from the initial set of " + lineCount + " examples");
        } else if (lineCount - testSplitSize < testSplitSize) {
            log.warn(
                    "Test set size for {} may be too large, {} is larger than the number of "
                            + "lines remaining in the training set: {}",
                    new Object[] { inputFile, testSplitSize, lineCount - testSplitSize });
        }
    }

    BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(inputFile), charset));
    Writer trainingWriter = new OutputStreamWriter(fs.create(trainingOutputFile), charset);
    Writer testWriter = new OutputStreamWriter(fs.create(testOutputFile), charset);

    int trainCount = 0;
    int testCount = 0;

    try {

        String line;
        int pos = 0;
        while ((line = reader.readLine()) != null) {
            pos++;

            Writer writer;
            if (testRandomSelectionPct > 0) { // Randomly choose
                writer = randomSel.get(pos) ? testWriter : trainingWriter;
            } else { // Choose based on location
                writer = pos > testSplitStart ? testWriter : trainingWriter;
            }

            if (writer == testWriter) {
                if (testCount >= testSplitSize) {
                    writer = trainingWriter;
                } else {
                    testCount++;
                }
            }

            if (writer == trainingWriter) {
                trainCount++;
            }

            writer.write(line);
            writer.write('\n');
        }

    } finally {
        IOUtils.quietClose(reader);
        IOUtils.quietClose(trainingWriter);
        IOUtils.quietClose(testWriter);
    }

    log.info("file: {}, input: {} train: {}, test: {} starting at {}",
            new Object[] { inputFile.getName(), lineCount, trainCount, testCount, testSplitStart });

    // testing;
    if (callback != null) {
        callback.splitComplete(inputFile, lineCount, trainCount, testCount, testSplitStart);
    }
}