Example usage for java.util BitSet set

List of usage examples for java.util BitSet set

Introduction

In this page you can find the example usage for java.util BitSet set.

Prototype

public void set(int bitIndex) 

Source Link

Document

Sets the bit at the specified index to true .

Usage

From source file:org.apache.tez.runtime.library.output.OrderedPartitionedKVOutput.java

protected List<Event> generateEventsOnClose() throws IOException {
    DataMovementEventPayloadProto.Builder payloadBuilder = DataMovementEventPayloadProto.newBuilder();

    boolean outputGenerated = true;
    if (sendEmptyPartitionDetails) {
        Path indexFile = sorter.getMapOutput().getOutputIndexFile();
        TezSpillRecord spillRecord = new TezSpillRecord(indexFile, conf);
        BitSet emptyPartitionDetails = new BitSet();
        int emptyPartitions = 0;
        for (int i = 0; i < spillRecord.size(); i++) {
            TezIndexRecord indexRecord = spillRecord.getIndex(i);
            if (!indexRecord.hasData()) {
                emptyPartitionDetails.set(i);
                emptyPartitions++;//from  ww  w .  ja  v a2 s.co m
            }
        }
        outputGenerated = (spillRecord.size() != emptyPartitions);
        if (emptyPartitions > 0) {
            ByteString emptyPartitionsBytesString = TezCommonUtils
                    .compressByteArrayToByteString(TezUtilsInternal.toByteArray(emptyPartitionDetails));
            payloadBuilder.setEmptyPartitions(emptyPartitionsBytesString);
            LOG.info("EmptyPartition bitsetSize=" + emptyPartitionDetails.cardinality() + ", numOutputs="
                    + getNumPhysicalOutputs() + ", emptyPartitions=" + emptyPartitions + ", compressedSize="
                    + emptyPartitionsBytesString.size());
        }
    }
    if (!sendEmptyPartitionDetails || outputGenerated) {
        String host = System.getenv(ApplicationConstants.Environment.NM_HOST.toString());
        ByteBuffer shuffleMetadata = getContext()
                .getServiceProviderMetaData(ShuffleUtils.SHUFFLE_HANDLER_SERVICE_ID);
        int shufflePort = ShuffleUtils.deserializeShuffleProviderMetaData(shuffleMetadata);
        payloadBuilder.setHost(host);
        payloadBuilder.setPort(shufflePort);
        payloadBuilder.setPathComponent(getContext().getUniqueIdentifier());
    }

    payloadBuilder.setRunDuration((int) ((endTime - startTime) / 1000));
    DataMovementEventPayloadProto payloadProto = payloadBuilder.build();
    ByteBuffer payload = payloadProto.toByteString().asReadOnlyByteBuffer();

    long outputSize = getContext().getCounters().findCounter(TaskCounter.OUTPUT_BYTES).getValue();
    VertexManagerEventPayloadProto.Builder vmBuilder = VertexManagerEventPayloadProto.newBuilder();
    vmBuilder.setOutputSize(outputSize);
    VertexManagerEvent vmEvent = VertexManagerEvent.create(getContext().getDestinationVertexName(),
            vmBuilder.build().toByteString().asReadOnlyByteBuffer());

    List<Event> events = Lists.newArrayListWithCapacity(getNumPhysicalOutputs() + 1);
    events.add(vmEvent);

    CompositeDataMovementEvent csdme = CompositeDataMovementEvent.create(0, getNumPhysicalOutputs(), payload);
    events.add(csdme);

    return events;
}

From source file:org.apache.hadoop.hdfs.TestRecoverStripedFile.java

/**
 * Test the file blocks recovery./*  www. ja v  a  2  s .c  o  m*/
 * 1. Check the replica is recovered in the target datanode, 
 *    and verify the block replica length, generationStamp and content.
 * 2. Read the file and verify content. 
 */
private void assertFileBlocksRecovery(String fileName, int fileLen, int recovery, int toRecoverBlockNum)
        throws Exception {
    if (recovery != 0 && recovery != 1 && recovery != 2) {
        Assert.fail("Invalid recovery: 0 is to recovery parity blocks,"
                + "1 is to recovery data blocks, 2 is any.");
    }
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }

    Path file = new Path(fileName);

    final byte[] data = new byte[fileLen];
    ThreadLocalRandom.current().nextBytes(data);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);

    LocatedBlocks locatedBlocks = getLocatedBlocks(file);
    assertEquals(locatedBlocks.getFileLength(), fileLen);

    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();

    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();

    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }

    int[] toDead = new int[toRecoverBlockNum];
    int n = 0;
    for (int i = 0; i < indices.length; i++) {
        if (n < toRecoverBlockNum) {
            if (recovery == 0) {
                if (indices[i] >= dataBlkNum) {
                    toDead[n++] = i;
                }
            } else if (recovery == 1) {
                if (indices[i] < dataBlkNum) {
                    toDead[n++] = i;
                }
            } else {
                toDead[n++] = i;
            }
        } else {
            break;
        }
    }

    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[toDead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);

        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum,
                indices[toDead[i]]);
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicas[i].length(), StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(),
                cellSize, dataBlkNum, indices[toDead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }

    int cellsNum = (fileLen - 1) / cellSize + 1;
    int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;

    for (int i = 0; i < toRecoverBlockNum; i++) {
        /*
         * Kill the datanode which contains one replica
         * We need to make sure it dead in namenode: clear its update time and
         * trigger NN to check heartbeat.
         */
        DataNode dn = cluster.getDataNodes().get(deadDnIndices[i]);
        dn.shutdown();
        cluster.setDataNodeDead(dn.getDatanodeId());
    }

    // Check the locatedBlocks of the file again
    locatedBlocks = getLocatedBlocks(file);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - toRecoverBlockNum);

    int[] targetDNs = new int[dnNum - groupSize];
    n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) { // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }

    waitForRecoveryFinished(file, groupSize);

    targetDNs = sortTargetsByReplicas(blocks, targetDNs);

    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterRecovery = cluster.getBlockFile(targetDNs[i], blocks[i]);
        File metadataAfterRecovery = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaAfterRecovery.length(), replicas[i].length());
        assertTrue(metadataAfterRecovery.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterRecovery = DFSTestUtil.readFileAsBytes(replicaAfterRecovery);

        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterRecovery);
    }
}

From source file:au.org.ala.delta.intkey.WriteOnceIntkeyItemsFile.java

public void writeAttributeBits(int charNumber, List<BitSet> attributes, int numBits) {
    int record = updateCharacterIndex(charNumber);

    // Merge the list into a single BitSet.
    BitSet master = new BitSet();
    int offset = 0;
    for (BitSet set : attributes) {
        for (int i = 0; i < numBits; i++) {
            if (set.get(i)) {
                master.set(i + offset);
            }/*from   ww  w .j  a v  a2  s. co  m*/
        }
        offset += numBits;
    }

    List<Integer> values = bitSetToInts(master, numBits * attributes.size());
    writeToRecord(record, values);
}

From source file:org.apache.hadoop.hdfs.TestReconstructStripedFile.java

/**
 * Test the file blocks reconstruction./*from   w  w  w  .j av  a 2  s.  c  om*/
 * 1. Check the replica is reconstructed in the target datanode,
 *    and verify the block replica length, generationStamp and content.
 * 2. Read the file and verify content.
 */
private void assertFileBlocksReconstruction(String fileName, int fileLen, ReconstructionType type,
        int toRecoverBlockNum) throws Exception {
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }
    assertTrue("File length must be positive.", fileLen > 0);

    Path file = new Path(fileName);

    final byte[] data = new byte[fileLen];
    Arrays.fill(data, (byte) 1);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);

    LocatedBlocks locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    assertEquals(locatedBlocks.getFileLength(), fileLen);

    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();

    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();

    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }

    int[] dead = generateDeadDnIndices(type, toRecoverBlockNum, indices);
    LOG.info("Note: indices == " + Arrays.toString(indices) + ". Generate errors on datanodes: "
            + Arrays.toString(dead));

    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    long[] replicaLengths = new long[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[dead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);

        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum,
                indices[dead[i]]);
        errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        replicaLengths[i] = replicas[i].length();
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicaLengths[i], StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(),
                cellSize, dataBlkNum, indices[dead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        LOG.info("replica " + i + " locates in file: " + replicas[i]);
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }

    int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
    int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum
            : Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
    int groupSize = lastGroupNumBlk + parityBlkNum;

    // shutdown datanodes or generate corruption
    int stoppedDN = generateErrors(errorMap, type);

    // Check the locatedBlocks of the file again
    locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - stoppedDN);

    int[] targetDNs = new int[dnNum - groupSize];
    int n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) { // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }

    StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize);

    targetDNs = sortTargetsByReplicas(blocks, targetDNs);

    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterReconstruction = cluster.getBlockFile(targetDNs[i], blocks[i]);
        LOG.info("replica after reconstruction " + replicaAfterReconstruction);
        File metadataAfterReconstruction = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
        LOG.info("replica before " + replicas[i]);
        assertTrue(metadataAfterReconstruction.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterReconstruction = DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);

        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
    }
}

From source file:org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer.java

private static boolean areMergeable(ParseContext pctx, SharedWorkOptimizerCache optimizerCache,
        TableScanOperator tsOp1, TableScanOperator tsOp2) throws SemanticException {
    // First we check if the two table scan operators can actually be merged
    // If schemas do not match, we currently do not merge
    List<String> prevTsOpNeededColumns = tsOp1.getNeededColumns();
    List<String> tsOpNeededColumns = tsOp2.getNeededColumns();
    if (prevTsOpNeededColumns.size() != tsOpNeededColumns.size()) {
        return false;
    }/*  ww w.j ava2  s .co m*/
    boolean notEqual = false;
    for (int i = 0; i < prevTsOpNeededColumns.size(); i++) {
        if (!prevTsOpNeededColumns.get(i).equals(tsOpNeededColumns.get(i))) {
            notEqual = true;
            break;
        }
    }
    if (notEqual) {
        return false;
    }
    // If row limit does not match, we currently do not merge
    if (tsOp1.getConf().getRowLimit() != tsOp2.getConf().getRowLimit()) {
        return false;
    }
    // If partitions do not match, we currently do not merge
    PrunedPartitionList prevTsOpPPList = pctx.getPrunedPartitions(tsOp1);
    PrunedPartitionList tsOpPPList = pctx.getPrunedPartitions(tsOp2);
    if (!prevTsOpPPList.getPartitions().equals(tsOpPPList.getPartitions())) {
        return false;
    }
    // If is a DPP, check if actually it refers to same target, column, etc.
    // Further, the DPP value needs to be generated from same subtree
    List<Operator<?>> dppsOp1 = new ArrayList<>(optimizerCache.tableScanToDPPSource.get(tsOp1));
    List<Operator<?>> dppsOp2 = new ArrayList<>(optimizerCache.tableScanToDPPSource.get(tsOp2));
    if (dppsOp1.isEmpty() && dppsOp2.isEmpty()) {
        return true;
    }
    for (int i = 0; i < dppsOp1.size(); i++) {
        Operator<?> op = dppsOp1.get(i);
        if (op instanceof ReduceSinkOperator) {
            Set<Operator<?>> ascendants = findAscendantWorkOperators(pctx, optimizerCache, op);
            if (ascendants.contains(tsOp2)) {
                dppsOp1.remove(i);
                i--;
            }
        }
    }
    for (int i = 0; i < dppsOp2.size(); i++) {
        Operator<?> op = dppsOp2.get(i);
        if (op instanceof ReduceSinkOperator) {
            Set<Operator<?>> ascendants = findAscendantWorkOperators(pctx, optimizerCache, op);
            if (ascendants.contains(tsOp1)) {
                dppsOp2.remove(i);
                i--;
            }
        }
    }
    if (dppsOp1.size() != dppsOp2.size()) {
        // Only first or second operator contains DPP pruning
        return false;
    }
    // Check if DPP branches are equal
    for (int i = 0; i < dppsOp1.size(); i++) {
        Operator<?> dppOp1 = dppsOp1.get(i);
        BitSet bs = new BitSet();
        for (int j = 0; j < dppsOp2.size(); j++) {
            if (!bs.get(j)) {
                // If not visited yet
                Operator<?> dppOp2 = dppsOp2.get(j);
                if (compareAndGatherOps(pctx, dppOp1, dppOp2) != null) {
                    // The DPP operator/branch are equal
                    bs.set(j);
                    break;
                }
            }
        }
        if (bs.cardinality() == i) {
            return false;
        }
    }
    return true;
}

From source file:SwitchTest.java

protected BranchGroup createSceneBranchGroup() {
    BranchGroup objRoot = super.createSceneBranchGroup();

    double labelScale = 20;

    // flip this boolean to either display all
    // the child nodes or to just display the 3, 6 and 7th.
    final boolean bDisplayAll = false;

    // create the Switch Node
    int nMode = Switch.CHILD_ALL;

    if (bDisplayAll == false)
        nMode = Switch.CHILD_MASK;/* ww  w .  ja v a 2s . c om*/

    Switch switchGroup = new Switch(nMode);
    switchGroup.setCapability(Switch.ALLOW_SWITCH_WRITE);

    switchGroup.addChild(createLabel("Child Node 1", labelScale));
    switchGroup.addChild(createLabel("Child Node 2", labelScale));
    switchGroup.addChild(createLabel("Child Node 3", labelScale));
    switchGroup.addChild(createLabel("Child Node 4", labelScale));
    switchGroup.addChild(createLabel("Child Node 5", labelScale));
    switchGroup.addChild(createLabel("Child Node 6", labelScale));
    switchGroup.addChild(createLabel("Child Node 7", labelScale));

    if (bDisplayAll == false) {
        java.util.BitSet visibleNodes = new java.util.BitSet(switchGroup.numChildren());

        // make the third, sixth and seventh nodes visible
        visibleNodes.set(2);
        visibleNodes.set(5);
        visibleNodes.set(6);

        switchGroup.setChildMask(visibleNodes);
    }

    // finally add the Switch Node
    objRoot.addChild(switchGroup);

    return objRoot;
}

From source file:dendroscope.autumn.hybridnumber.ComputeHybridNumber.java

/**
 * run the algorithm. This can be reentered by rerootings of the same two trees
 *
 * @param tree1/*from ww w .ja  v a  2s.c o  m*/
 * @param tree2
 * @return reduced trees
 */
int run(PhyloTree tree1, PhyloTree tree2, Taxa allTaxa) throws IOException, CanceledException {
    if (!initialized) {
        initialized = true;
        progressListener.setMaximum(20);
        progressListener.setProgress(0);
        startTime = System.currentTimeMillis();
        nextTime = this.startTime + waitTime;
    }

    if (bestScore.get() == LARGE) { // no upper bound given, use cluster network
        System.err.print("Computing upper bound using cluster network: ");
        int upperBound = Utilities.getNumberOfReticulationsInClusterNetwork(tree1, tree2, progressListener);
        System.err.println(upperBound);
        bestScore.set(upperBound);
    }

    Pair<Root, Root> roots = PreProcess.apply(tree1, tree2, allTaxa);
    Root root1 = roots.getFirst();
    Root root2 = roots.getSecond();

    BitSet onlyTree1 = Cluster.setminus(root1.getTaxa(), root2.getTaxa());
    BitSet onlyTree2 = Cluster.setminus(root2.getTaxa(), root1.getTaxa());

    if (root1.getTaxa().cardinality() == onlyTree1.cardinality())
        throw new IOException("None of the taxa in tree2 are contained in tree1");
    if (root2.getTaxa().cardinality() == onlyTree2.cardinality())
        throw new IOException("None of the taxa in tree1 are contained in tree2");

    if (onlyTree1.cardinality() > 0) {
        if (!silent)
            System.err.println("Killing all taxa only present in tree1: " + onlyTree1.cardinality());
        for (int t = onlyTree1.nextSetBit(0); t != -1; t = onlyTree1.nextSetBit(t + 1)) {
            BitSet one = new BitSet();
            one.set(t);
            root1 = CopyWithTaxaRemoved.apply(root1, one);
        }
    }

    if (onlyTree2.cardinality() > 0) {
        if (!silent)
            System.err.println("Killing all taxa only present in tree2: " + onlyTree2.cardinality());
        for (int t = onlyTree2.nextSetBit(0); t != -1; t = onlyTree2.nextSetBit(t + 1)) {
            BitSet one = new BitSet();
            one.set(t);
            root2 = CopyWithTaxaRemoved.apply(root2, one);
        }
    }

    if (!root1.getTaxa().equals(root2.getTaxa()))
        throw new IOException("Trees have unequal taxon sets (even after killing)");

    // run the refine algorithm
    if (!silent)
        System.err.println("Computing common refinement of both trees");
    Refine.apply(root1, root2);

    if (true) {
        System.err.println(root1.toStringTree());
        System.err.println(root2.toStringTree());
    }

    if (tree1.getRoot() == null || tree2.getRoot() == null) {
        throw new IOException("Can't compute hybrid number, at least one of the trees is empty or unrooted");
    }

    // we maintain both trees in lexicographic order for ease of comparison
    root1.reorderSubTree();
    root2.reorderSubTree();

    if (!silent)
        System.err.println("Computing hybridization number using Autumn algorithm...");
    if (!silent)
        System.err.println("(Number of worker threads: " + (additionalThreads + 1) + ")");

    int result = computeHybridNumberRec(root1, root2, false, null, null, true, 0, new ValuesList());
    if (!silent)
        System.err.println("(Result: " + result + ")");
    if (!silent)
        System.err.println("Hybridization number: " + bestScore.get());
    if (bestScore.get() > result)
        throw new IOException("bestScore > result: " + bestScore.get() + " " + result);

    return bestScore.get();
}

From source file:org.apache.jackrabbit.core.query.lucene.TermDocsCache.java

/**
 * Returns the {@link TermDocs} for the given term.
 *
 * @param t the term.//w w  w. ja v a 2  s .c  om
 * @return the term docs for the given term.
 * @throws IOException if an error occurs while reading from the index.
 */
public TermDocs termDocs(final Term t) throws IOException {
    if (t.field() != field) {
        return reader.termDocs(t);
    }

    String text = t.text();
    if (unknownValues.get(text) != null) {
        log.debug("EmptyTermDocs({},{})", field, text);
        return EmptyTermDocs.INSTANCE;
    }

    // maintain cache
    CacheEntry entry;
    synchronized (cache) {
        entry = (CacheEntry) cache.get(text);
        if (entry == null) {
            // check space
            if (cache.size() >= CACHE_SIZE) {
                // prune half of them and adjust the rest
                CacheEntry[] entries = (CacheEntry[]) cache.values().toArray(new CacheEntry[cache.size()]);
                Arrays.sort(entries);
                int threshold = entries[CACHE_SIZE / 2].numAccessed;
                for (Iterator it = cache.entrySet().iterator(); it.hasNext();) {
                    Map.Entry e = (Map.Entry) it.next();
                    if (((CacheEntry) e.getValue()).numAccessed <= threshold) {
                        // prune
                        it.remove();
                    } else {
                        // adjust
                        CacheEntry ce = (CacheEntry) e.getValue();
                        ce.numAccessed = (int) Math.sqrt(ce.numAccessed);
                    }
                }
            }
            entry = new CacheEntry();
            cache.put(text, entry);
        } else {
            entry.numAccessed++;
        }
    }

    // this is a threshold to prevent caching of TermDocs
    // that are read only irregularly.
    if (entry.numAccessed < 10) {
        if (log.isDebugEnabled()) {
            log.debug("#{} TermDocs({},{})", new Object[] { new Integer(entry.numAccessed), field, text });
        }
        return reader.termDocs(t);
    }

    if (entry.bits == null) {
        // collect bits
        BitSet bits = null;
        TermDocs tDocs = reader.termDocs(t);
        try {
            while (tDocs.next()) {
                if (bits == null) {
                    bits = new BitSet(reader.maxDoc());
                }
                bits.set(tDocs.doc());
            }
        } finally {
            tDocs.close();
        }
        if (bits != null) {
            entry.bits = bits;
        }
    }

    if (entry.bits == null) {
        // none collected
        unknownValues.put(text, text);
        return EmptyTermDocs.INSTANCE;
    } else {
        if (log.isDebugEnabled()) {
            log.debug("CachedTermDocs({},{},{}/{})", new Object[] { field, text,
                    new Integer(entry.bits.cardinality()), new Integer(reader.maxDoc()) });
        }
        return new CachedTermDocs(entry.bits);
    }
}

From source file:org.exoplatform.services.jcr.impl.core.query.lucene.TermDocsCache.java

/**
 * Returns the {@link TermDocs} for the given term.
 *
 * @param t the term.//from   w w w  .j av  a  2s  . com
 * @return the term docs for the given term.
 * @throws IOException if an error occurs while reading from the index.
 */
public TermDocs termDocs(final Term t) throws IOException {
    if (t == null || t.field() != field) {
        return reader.termDocs(t);
    }

    String text = t.text();
    if (unknownValues.get(text) != null) {
        log.debug("EmptyTermDocs({},{})", field, text);
        return EmptyTermDocs.INSTANCE;
    }

    // maintain cache
    CacheEntry entry;
    synchronized (cache) {
        entry = (CacheEntry) cache.get(text);
        if (entry == null) {
            // check space
            if (cache.size() >= CACHE_SIZE) {
                // prune half of them and adjust the rest
                CacheEntry[] entries = (CacheEntry[]) cache.values().toArray(new CacheEntry[cache.size()]);
                Arrays.sort(entries);
                int threshold = entries[CACHE_SIZE / 2].numAccessed;
                for (Iterator it = cache.entrySet().iterator(); it.hasNext();) {
                    Map.Entry e = (Map.Entry) it.next();
                    if (((CacheEntry) e.getValue()).numAccessed <= threshold) {
                        // prune
                        it.remove();
                    } else {
                        // adjust
                        CacheEntry ce = (CacheEntry) e.getValue();
                        ce.numAccessed = (int) Math.sqrt(ce.numAccessed);
                    }
                }
            }
            entry = new CacheEntry();
            cache.put(text, entry);
        } else {
            entry.numAccessed++;
        }
    }

    // this is a threshold to prevent caching of TermDocs
    // that are read only irregularly.
    if (entry.numAccessed < 10) {
        if (log.isDebugEnabled()) {
            log.debug("#{} TermDocs({},{})", new Object[] { new Integer(entry.numAccessed), field, text });
        }
        return reader.termDocs(t);
    }

    if (entry.bits == null) {
        // collect bits
        BitSet bits = null;
        TermDocs tDocs = reader.termDocs(t);
        try {
            while (tDocs.next()) {
                if (bits == null) {
                    bits = new BitSet(reader.maxDoc());
                }
                bits.set(tDocs.doc());
            }
        } finally {
            tDocs.close();
        }
        if (bits != null) {
            entry.bits = bits;
        }
    }

    if (entry.bits == null) {
        // none collected
        unknownValues.put(text, text);
        return EmptyTermDocs.INSTANCE;
    } else {
        if (log.isDebugEnabled()) {
            log.debug("CachedTermDocs({},{},{}/{})", new Object[] { field, text,
                    new Integer(entry.bits.cardinality()), new Integer(reader.maxDoc()) });
        }
        return new CachedTermDocs(entry.bits);
    }
}

From source file:org.apache.kylin.cube.cuboid.algorithm.generic.GeneticAlgorithm.java

protected Population initRandomPopulation(BitsChromosomeHelper helper) {
    List<Chromosome> chromosomeList = Lists.newArrayListWithCapacity(populationSize);

    while (chromosomeList.size() < populationSize) {
        BitSet bitSetForSelection = new BitSet(helper.getLength());

        //Initialize selection genes
        double totalSpace = 0;
        while (totalSpace < helper.spaceLimit) {
            int j = org.apache.commons.math3.genetics.GeneticAlgorithm.getRandomGenerator()
                    .nextInt(helper.getLength());
            if (!bitSetForSelection.get(j)) {
                totalSpace += helper.getCuboidSizeByBitIndex(j);
                bitSetForSelection.set(j);
            }/*  w ww .jav  a  2s  . co  m*/
        }

        Chromosome chromosome = new BitsChromosome(bitSetForSelection, benefitPolicy.getInstance(), helper);
        chromosomeList.add(chromosome);
    }
    return new ElitisticListPopulation(chromosomeList, maxPopulationSize, 0.8);
}