Example usage for java.util BitSet BitSet

List of usage examples for java.util BitSet BitSet

Introduction

In this page you can find the example usage for java.util BitSet BitSet.

Prototype

private BitSet(long[] words) 

Source Link

Document

Creates a bit set using words as the internal representation.

Usage

From source file:fingerprints.helper.BloomFilter.java

public BitSet toBitSet() {
    BitSet result = new BitSet(bitSetSize);
    result.or(bitSet);
    return result;
}

From source file:de.uni_potsdam.hpi.asg.logictool.helper.BDDHelper.java

public static BDD mergeBDDs(BDD bdd, NetlistVariable replaceVar, BDD replaceBdd, Netlist netlist) {

    Set<NetlistVariable> bddvars = BDDHelper.getVars(bdd, netlist);
    if (!bddvars.contains(replaceVar)) {
        logger.error("ReplaceVar not in Vars");
        return null;
    }/*from  w  w  w  .jav  a 2 s  .c  o  m*/

    if (bddvars.size() == 1) {
        //         logger.debug("Shortcut");
        //         logger.debug("BDD: " + getFunctionString(bdd, netlist));
        //         logger.debug("ReplBDD: " + getFunctionString(replaceBdd, netlist));
        //         logger.debug("ReplVar: " + replaceVar.getName());
        if (isPos(bdd, replaceVar)) {
            return replaceBdd;
        } else {
            return replaceBdd.not();
        }
        //         return replaceBdd;//.and(netlist.getFac().one());
    }

    SortedSet<NetlistVariable> newinputs = new TreeSet<>();
    newinputs.addAll(bddvars);
    newinputs.addAll(BDDHelper.getVars(replaceBdd, netlist));
    newinputs.remove(replaceVar);
    //      System.out.println("New Inp: " + newinputs.toString());

    BDD retVal = netlist.getFac().zero();
    BitSet b = new BitSet(newinputs.size());
    for (int i = 0; i < Math.pow(2, newinputs.size()); i++) {
        //         System.out.println(i + ": " + BitSetHelper.formatBitset(b, newinputs.size()));
        int index = 0;
        BDD bdd_new = bdd;
        BDD replacBdd_new = replaceBdd;
        BDD minterm = netlist.getFac().one();
        //TODO: xWITH
        for (NetlistVariable var : newinputs) {
            if (b.get(index)) {
                bdd_new = bdd_new.restrict(var.toBDD());
                replacBdd_new = replacBdd_new.restrict(var.toBDD());
                minterm = minterm.and(var.toBDD());
            } else {
                bdd_new = bdd_new.restrict(var.toNotBDD());
                replacBdd_new = replacBdd_new.restrict(var.toNotBDD());
                minterm = minterm.and(var.toNotBDD());
            }
            index++;
        }
        if (replacBdd_new.isZero()) {
            bdd_new = bdd_new.restrict(replaceVar.toNotBDD());
        } else if (replacBdd_new.isOne()) {
            bdd_new = bdd_new.restrict(replaceVar.toBDD());
        } else {
            logger.error("Repl BDD should be one or zero");
        }

        if (bdd_new.isZero()) {

        } else if (bdd_new.isOne()) {
            retVal.orWith(minterm);
        } else {
            logger.error("BDD should be one or zero");
        }

        BitSetHelper.dualNext(b);
    }

    //      if(bddvars.size() == 1) {
    //         logger.debug("RetVal: " + getFunctionString(retVal, netlist));
    //      }

    return retVal;
}

From source file:org.caleydo.data.importer.tcga.FirehoseProvider.java

/**
 * @param header//from  ww  w.  jav  a  2 s . c o  m
 * @param good
 * @return
 */
private static BitSet filterCols(String[] header, Set<String> good) {
    BitSet r = new BitSet(header.length);
    for (int i = 0; i < header.length; ++i)
        if (!good.contains(header[i]))
            r.set(i);
    return r;
}

From source file:com.oltpbenchmark.benchmarks.seats.SEATSWorker.java

protected BitSet getSeatsBitSet(FlightId flight_id) {
    BitSet seats = CACHE_BOOKED_SEATS.get(flight_id);
    if (seats == null) {
        //            synchronized (CACHE_BOOKED_SEATS) {
        seats = CACHE_BOOKED_SEATS.get(flight_id);
        if (seats == null) {
            seats = new BitSet(SEATSConstants.FLIGHTS_NUM_SEATS);
            CACHE_BOOKED_SEATS.put(flight_id, seats);
        }/* w w  w.j a  va  2 s  .  c o  m*/
        //            } // SYNCH
    }
    return (seats);
}

From source file:org.apache.hadoop.hdfs.TestRecoverStripedFile.java

/**
 * Test the file blocks recovery./* w ww.  jav a 2s .  c om*/
 * 1. Check the replica is recovered in the target datanode, 
 *    and verify the block replica length, generationStamp and content.
 * 2. Read the file and verify content. 
 */
private void assertFileBlocksRecovery(String fileName, int fileLen, int recovery, int toRecoverBlockNum)
        throws Exception {
    if (recovery != 0 && recovery != 1 && recovery != 2) {
        Assert.fail("Invalid recovery: 0 is to recovery parity blocks,"
                + "1 is to recovery data blocks, 2 is any.");
    }
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }

    Path file = new Path(fileName);

    final byte[] data = new byte[fileLen];
    ThreadLocalRandom.current().nextBytes(data);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);

    LocatedBlocks locatedBlocks = getLocatedBlocks(file);
    assertEquals(locatedBlocks.getFileLength(), fileLen);

    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();

    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();

    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }

    int[] toDead = new int[toRecoverBlockNum];
    int n = 0;
    for (int i = 0; i < indices.length; i++) {
        if (n < toRecoverBlockNum) {
            if (recovery == 0) {
                if (indices[i] >= dataBlkNum) {
                    toDead[n++] = i;
                }
            } else if (recovery == 1) {
                if (indices[i] < dataBlkNum) {
                    toDead[n++] = i;
                }
            } else {
                toDead[n++] = i;
            }
        } else {
            break;
        }
    }

    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[toDead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);

        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum,
                indices[toDead[i]]);
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicas[i].length(), StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(),
                cellSize, dataBlkNum, indices[toDead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }

    int cellsNum = (fileLen - 1) / cellSize + 1;
    int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;

    for (int i = 0; i < toRecoverBlockNum; i++) {
        /*
         * Kill the datanode which contains one replica
         * We need to make sure it dead in namenode: clear its update time and
         * trigger NN to check heartbeat.
         */
        DataNode dn = cluster.getDataNodes().get(deadDnIndices[i]);
        dn.shutdown();
        cluster.setDataNodeDead(dn.getDatanodeId());
    }

    // Check the locatedBlocks of the file again
    locatedBlocks = getLocatedBlocks(file);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - toRecoverBlockNum);

    int[] targetDNs = new int[dnNum - groupSize];
    n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) { // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }

    waitForRecoveryFinished(file, groupSize);

    targetDNs = sortTargetsByReplicas(blocks, targetDNs);

    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterRecovery = cluster.getBlockFile(targetDNs[i], blocks[i]);
        File metadataAfterRecovery = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaAfterRecovery.length(), replicas[i].length());
        assertTrue(metadataAfterRecovery.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterRecovery = DFSTestUtil.readFileAsBytes(replicaAfterRecovery);

        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterRecovery);
    }
}

From source file:com.google.uzaygezen.core.LongBitVector.java

@Override
public BitSet toBitSet() {
    BitSet b = new BitSet(size);
    for (int i = 0; i < size; i++) {
        if (unsafeGet(i)) {
            b.set(i);//from www.ja v a2s .c o m
        }
    }
    return b;
}

From source file:Exec.java

/**
 * Description of the Method/*from w  w w .  jav a 2s .  co  m*/
 * 
 * @param command
 *            Description of the Parameter
 * @param input
 *            Description of the Parameter
 * @param successCode
 *            Description of the Parameter
 * @param timeout
 *            Description of the Parameter
 * @param lazy
 *            Description of the Parameter
 * @return Description of the Return Value
 */
public static ExecResults execOptions(String command, String input, int successCode, int timeout,
        boolean lazy) {
    Process child = null;
    ByteArrayOutputStream output = new ByteArrayOutputStream();
    ByteArrayOutputStream errors = new ByteArrayOutputStream();
    ExecResults results = new ExecResults(command, input, successCode, timeout);
    BitSet interrupted = new BitSet(1);
    boolean lazyQuit = false;
    ThreadWatcher watcher;

    try {
        // start the command
        child = Runtime.getRuntime().exec(command);

        // get the streams in and out of the command
        InputStream processIn = child.getInputStream();
        InputStream processError = child.getErrorStream();
        OutputStream processOut = child.getOutputStream();

        // start the clock running
        if (timeout > 0) {
            watcher = new ThreadWatcher(child, interrupted, timeout);
            new Thread(watcher).start();
        }

        // Write to the child process' input stream
        if ((input != null) && !input.equals("")) {
            try {
                processOut.write(input.getBytes());
                processOut.flush();
                processOut.close();
            } catch (IOException e1) {
                results.setThrowable(e1);
            }
        }

        // Read from the child process' output stream
        // The process may get killed by the watcher at any time
        int c = 0;

        try {
            while (true) {
                if (interrupted.get(0) || lazyQuit) {
                    break;
                }

                // interrupted
                c = processIn.read();

                if (c == -1) {
                    break;
                }

                // end of stream
                output.write(c);

                if (lazy && (processIn.available() < 1)) {
                    lazyQuit = true;
                }

                // if lazy and nothing then quit (after at least one read)
            }

            processIn.close();
        } catch (IOException e2) {
            results.setThrowable(e2);
        } finally {
            if (interrupted.get(0)) {
                results.setInterrupted();
            }

            results.setOutput(output.toString());
        }

        // Read from the child process' error stream
        // The process may get killed by the watcher at any time
        try {
            while (true) {
                if (interrupted.get(0) || lazyQuit) {
                    break;
                }

                // interrupted
                c = processError.read();

                if (c == -1) {
                    break;
                }

                // end of stream
                output.write(c);

                if (lazy && (processError.available() < 1)) {
                    lazyQuit = true;
                }

                // if lazy and nothing then quit (after at least one read)
            }

            processError.close();
        } catch (IOException e3) {
            results.setThrowable(e3);
        } finally {
            if (interrupted.get(0)) {
                results.setInterrupted();
            }

            results.setErrors(errors.toString());
        }

        // wait for the return value of the child process.
        if (!interrupted.get(0) && !lazyQuit) {
            int returnCode = child.waitFor();
            results.setReturnCode(returnCode);

            if (returnCode != successCode) {
                results.setError(ExecResults.BADRETURNCODE);
            }
        }
    } catch (InterruptedException i) {
        results.setInterrupted();
    } catch (Throwable t) {
        results.setThrowable(t);
    } finally {
        if (child != null) {
            child.destroy();
        }
    }

    return (results);
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

private void testSlowListenerManualGuts(AckMode ackMode, String topic) throws Exception {
    logger.info("Start " + this.testName.getMethodName() + ackMode);
    Map<String, Object> props = KafkaTestUtils.consumerProps("slow2", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props);
    ContainerProperties containerProps = new ContainerProperties(topic);
    containerProps.setSyncCommits(true);

    final CountDownLatch latch = new CountDownLatch(6);
    final BitSet bitSet = new BitSet(4);
    containerProps.setMessageListener((AcknowledgingMessageListener<Integer, String>) (message, ack) -> {
        logger.info("slow2: " + message);
        bitSet.set((int) (message.partition() * 3 + message.offset()));
        try {//from   w w w  .j  a  v a 2 s .  c om
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        ack.acknowledge();
        latch.countDown();
    });
    containerProps.setPauseAfter(100);
    containerProps.setAckMode(ackMode);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testSlow2");
    container.start();

    Consumer<?, ?> consumer = spyOnConsumer(container);

    final CountDownLatch commitLatch = new CountDownLatch(7);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            commitLatch.countDown();
        }

    }).given(consumer).commitSync(any());

    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<Integer, String>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic);
    template.sendDefault(0, "foo");
    template.sendDefault(2, "bar");
    template.sendDefault(0, "baz");
    template.sendDefault(2, "qux");
    template.flush();
    Thread.sleep(300);
    template.sendDefault(0, "fiz");
    template.sendDefault(2, "buz");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(bitSet.cardinality()).isEqualTo(6);
    verify(consumer, atLeastOnce()).pause(anyObject());
    verify(consumer, atLeastOnce()).resume(anyObject());
    container.stop();
    logger.info("Stop " + this.testName.getMethodName() + ackMode);
}

From source file:org.apache.openjpa.datacache.AbstractDataCache.java

public BitSet removeAll(Collection<Object> keys) {
    if (keys.isEmpty())
        return EMPTY_BITSET;

    BitSet set = new BitSet(keys.size());
    int i = 0;//  ww w.  j a  va 2 s. c o m
    for (Iterator<Object> iter = keys.iterator(); iter.hasNext(); i++)
        if (remove(iter.next()) != null)
            set.set(i);
    return set;
}

From source file:org.caleydo.core.util.impute.KNNImpute.java

/**
 * split the neighbor hood in two groups based on 2 k-means
 *
 * @param neighborhood/*from   w ww . j a v a 2s  .  co m*/
 * @return
 */
private Pair<List<Gene>, List<Gene>> twoMeanClusterSplit(List<Gene> neighborhood) {
    final int n = neighborhood.size();

    final int maxit = desc.getMaxit();
    final double eps = desc.getEps();

    int a_start = r.nextInt(n);
    int b_start = r.nextInt(n);
    Gene a_center = new Gene(1, -1, Arrays.copyOf(neighborhood.get(a_start).data, samples));
    Gene b_center = new Gene(1, -1, Arrays.copyOf(neighborhood.get(b_start).data, samples));
    float[] a_center_pong = new float[samples];
    Arrays.fill(a_center_pong, Float.NaN);
    float[] b_center_pong = new float[samples];
    Arrays.fill(b_center_pong, Float.NaN);

    float[] tmp;
    BitSet partOf_a = new BitSet(n);

    double d_old = 0;
    for (int i = 0; i < maxit; ++i) {
        int j = 0;
        int changed = 0;
        double d_new = 0;
        for (Gene gene : neighborhood) {
            final double a_distance = distance(a_center, gene);
            final double b_distance = distance(b_center, gene);
            final boolean in_a = a_distance < b_distance;
            if (partOf_a.get(j) != in_a) {
                changed++;
                partOf_a.set(j, in_a);
            }
            d_new += in_a ? a_distance : b_distance;
            tmp = in_a ? a_center_pong : b_center_pong;
            // shift new center
            for (int k = 0; k < samples; ++k) {
                if (!gene.isNaN(k)) {
                    if (Float.isNaN(tmp[k]))
                        tmp[k] = gene.get(k);
                    else
                        tmp[k] += gene.get(k);
                }
            }
            j++;
        }
        if (changed == 0 || d_new == 0)
            break;
        final double ratio = Math.abs(d_new - d_old) / d_old;
        if (i > 0 && ratio < eps)
            break;
        d_old = d_new;
        int a_n = partOf_a.cardinality();
        int b_n = n - a_n;
        if (a_n == 0 || b_n == 0) {
            // FIXME
        }
        updateCenter(a_center, a_center_pong, a_n);
        updateCenter(b_center, b_center_pong, b_n);
    }

    return split(neighborhood, partOf_a);
}