Example usage for java.util BitSet BitSet

List of usage examples for java.util BitSet BitSet

Introduction

In this page you can find the example usage for java.util BitSet BitSet.

Prototype

private BitSet(long[] words) 

Source Link

Document

Creates a bit set using words as the internal representation.

Usage

From source file:hivemall.ftvec.ranking.BprSamplingUDTF.java

/**
 * Sampling pairs uniform for each user without replacement. Sample a user. Then, sample a pair.
 * //from   ww  w  . j  a v  a 2s  .  co  m
 * Caution: This is not a perfect 'without sampling' but it does 'without sampling' for positive
 * feedbacks.
 */
private void uniformUserSamplingWithoutReplacement(@Nonnull final PositiveOnlyFeedback feedback,
        final int numSamples) throws HiveException {
    int numUsers = feedback.getNumUsers();
    if (numUsers == 0) {
        return;
    }
    final int maxItemId = feedback.getMaxItemId();
    if (maxItemId <= 0) {
        throw new HiveException("Invalid maxItemId: " + maxItemId);
    }
    final int numItems = maxItemId + 1;
    final BitSet userBits = new BitSet(numUsers);
    feedback.getUsers(userBits);

    final Random rand = new Random(31L);
    for (int i = 0; i < numSamples && numUsers > 0; i++) {
        int nthUser = rand.nextInt(numUsers);
        int user = BitUtils.indexOfSetBit(userBits, nthUser);
        if (user == -1) {
            throw new HiveException("Cannot find " + nthUser + "-th user among " + numUsers + " users");
        }

        IntArrayList posItems = feedback.getItems(user, true);
        assert (posItems != null) : user;
        int size = posItems.size();
        assert (size > 0) : size;
        if (size == numItems) {// cannot draw a negative item                
            --i;
            continue;
        }

        int posItemIndex = rand.nextInt(size);
        int posItem = posItems.fastGet(posItemIndex);
        int negItem;
        do {
            negItem = rand.nextInt(maxItemId);
        } while (posItems.contains(negItem));

        posItems.remove(posItemIndex);
        if (posItems.isEmpty()) {
            feedback.removeFeedback(user);
            userBits.clear(user);
            --numUsers;
        }

        forward(user, posItem, negItem);
    }
}

From source file:org.asoem.greyfish.utils.collect.BitString.java

/**
 * Create a random bit string of given {@code length} where each bit is set with probability {@code p}, and not set
 * with probability {@code 1-p}.//from  w ww .jav a2s  .  c om
 *
 * @param length the length of the bit string
 * @param rng    the random number generator to use
 * @param p      the probability for each bit in the new bit string to hold the value 1
 * @return a new bit string
 */
public static BitString random(final int length, final RandomGenerator rng, final double p) {
    checkNotNull(rng);
    checkArgument(p >= 0 && p <= 1);
    checkArgument(length >= 0);

    if (length == 0) {
        return emptyBitSequence();
    }

    if (p == 0.5) {
        return random(length, rng); // faster
    }

    final int n;
    if (p == 0) {
        n = 0;
    } else if (p == 1) {
        n = length;
    } else {
        final BinomialDistribution binomialDistribution = new BinomialDistribution(rng, length, p);
        n = binomialDistribution.sample();
    }
    assert n >= 0 && n <= length : n;

    if (n == 0) {
        return zeros(length);
    } else if (n == length) {
        return ones(length);
    }

    final ContiguousSet<Integer> indexRange = ContiguousSet.create(Range.closedOpen(0, length),
            DiscreteDomain.integers());
    final Iterable<Integer> uniqueIndexSample = Samplings.random(rng).withoutReplacement().sample(indexRange,
            n);

    if ((double) n / length < 1.0 / 32) { // < 1 bit per word?
        return new IndexSetString(ImmutableSortedSet.copyOf(uniqueIndexSample), length);
    } else {
        final BitSet bs = new BitSet(length);
        for (Integer index : uniqueIndexSample) {
            bs.set(index, true);
        }
        return new BitSetString(bs, length);
    }
}

From source file:com.bittorrent.mpetazzoni.client.SharedTorrent.java

/**
 * Build this torrent's pieces array.//from w w  w  .j a v  a2s  .  com
 *
 * <p>
 * Hash and verify any potentially present local data and create this
 * torrent's pieces array from their respective hash provided in the
 * torrent meta-info.
 * </p>
 *
 * <p>
 * This function should be called soon after the constructor to initialize
 * the pieces array.
 * </p>
 */
public synchronized void init() throws InterruptedException, IOException {
    if (this.isInitialized()) {
        throw new IllegalStateException("Torrent was already initialized!");
    }

    int threads = getHashingThreadsCount();
    int nPieces = (int) (Math.ceil((double) this.getSize() / this.pieceLength));
    int step = 10;

    this.pieces = new Piece[nPieces];
    this.completedPieces = new BitSet(nPieces);
    this.piecesHashes.clear();

    ExecutorService executor = Executors.newFixedThreadPool(threads);
    List<Future<Piece>> results = new LinkedList<Future<Piece>>();

    try {
        logger.info("Analyzing local data for {} with {} threads ({} pieces)...",
                new Object[] { this.getName(), threads, nPieces });
        for (int idx = 0; idx < nPieces; idx++) {
            byte[] hash = new byte[Torrent.PIECE_HASH_SIZE];
            this.piecesHashes.get(hash);

            // The last piece may be shorter than the torrent's global piece
            // length. Let's make sure we get the right piece length in any
            // situation.
            long off = ((long) idx) * this.pieceLength;
            long len = Math.min(this.bucket.size() - off, this.pieceLength);

            this.pieces[idx] = new Piece(this.bucket, idx, off, len, hash, this.isSeeder());

            Callable<Piece> hasher = new Piece.CallableHasher(this.pieces[idx]);
            results.add(executor.submit(hasher));

            if (results.size() >= threads) {
                this.validatePieces(results);
            }

            if (idx / (float) nPieces * 100f > step) {
                logger.info("  ... {}% complete", step);
                step += 10;
            }
        }

        this.validatePieces(results);
    } finally {
        // Request orderly executor shutdown and wait for hashing tasks to
        // complete.
        executor.shutdown();
        while (!executor.isTerminated()) {
            if (this.stop) {
                throw new InterruptedException("Torrent data analysis " + "interrupted.");
            }

            Thread.sleep(10);
        }
    }

    logger.debug("{}: we have {}/{} bytes ({}%) [{}/{} pieces].",
            new Object[] { this.getName(), (this.getSize() - this.left), this.getSize(),
                    String.format("%.1f", (100f * (1f - this.left / (float) this.getSize()))),
                    this.completedPieces.cardinality(), this.pieces.length });
    this.initialized = true;
}

From source file:org.docx4j.fonts.fop.fonts.truetype.TTFFile.java

private boolean readUnicodeCmap(FontFileReader in, long cmapUniOffset, int encodingID) throws IOException {
    //Read CMAP table and correct mtxTab.index
    int mtxPtr = 0;

    // Read unicode cmap
    seekTab(in, "cmap", cmapUniOffset);
    int cmapFormat = in.readTTFUShort();
    /*int cmap_length =*/ in.readTTFUShort(); //skip cmap length

    if (log.isDebugEnabled()) {
        log.debug("CMAP format: " + cmapFormat);
    }/*from   w  w w . j  a  va2s  . com*/

    if (cmapFormat == 4) {
        in.skip(2); // Skip version number
        int cmapSegCountX2 = in.readTTFUShort();
        int cmapSearchRange = in.readTTFUShort();
        int cmapEntrySelector = in.readTTFUShort();
        int cmapRangeShift = in.readTTFUShort();

        if (log.isDebugEnabled()) {
            log.debug("segCountX2   : " + cmapSegCountX2);
            log.debug("searchRange  : " + cmapSearchRange);
            log.debug("entrySelector: " + cmapEntrySelector);
            log.debug("rangeShift   : " + cmapRangeShift);
        }

        int[] cmapEndCounts = new int[cmapSegCountX2 / 2];
        int[] cmapStartCounts = new int[cmapSegCountX2 / 2];
        int[] cmapDeltas = new int[cmapSegCountX2 / 2];
        int[] cmapRangeOffsets = new int[cmapSegCountX2 / 2];

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapEndCounts[i] = in.readTTFUShort();
        }

        in.skip(2); // Skip reservedPad

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapStartCounts[i] = in.readTTFUShort();
        }

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapDeltas[i] = in.readTTFShort();
        }

        //int startRangeOffset = in.getCurrentPos();

        for (int i = 0; i < (cmapSegCountX2 / 2); i++) {
            cmapRangeOffsets[i] = in.readTTFUShort();
        }

        int glyphIdArrayOffset = in.getCurrentPos();

        BitSet eightBitGlyphs = new BitSet(256);

        // Insert the unicode id for the glyphs in mtxTab
        // and fill in the cmaps ArrayList

        for (int i = 0; i < cmapStartCounts.length; i++) {

            if (log.isTraceEnabled()) {
                log.trace(i + ": " + cmapStartCounts[i] + " - " + cmapEndCounts[i]);
            }
            if (log.isDebugEnabled()) {
                if (isInPrivateUseArea(cmapStartCounts[i], cmapEndCounts[i])) {
                    log.debug("Font contains glyphs in the Unicode private use area:"
                            + Integer.toHexString(cmapStartCounts[i]) + " - "
                            + Integer.toHexString(cmapEndCounts[i]));
                }
            }

            for (int j = cmapStartCounts[i]; j <= cmapEndCounts[i]; j++) {

                // Update lastChar
                if (j < 256 && j > lastChar) {
                    lastChar = (short) j;
                }

                if (j < 256) {
                    eightBitGlyphs.set(j);
                }

                if (mtxPtr < mtxTab.length) {
                    int glyphIdx;
                    // the last character 65535 = .notdef
                    // may have a range offset
                    if (cmapRangeOffsets[i] != 0 && j != 65535) {
                        int glyphOffset = glyphIdArrayOffset + ((cmapRangeOffsets[i] / 2)
                                + (j - cmapStartCounts[i]) + (i) - cmapSegCountX2 / 2) * 2;
                        in.seekSet(glyphOffset);
                        glyphIdx = (in.readTTFUShort() + cmapDeltas[i]) & 0xffff;

                        unicodeMapping.add(new UnicodeMapping(glyphIdx, j));
                        mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));

                        if (encodingID == 0 && j >= 0xF020 && j <= 0xF0FF) {
                            //Experimental: Mapping 0xF020-0xF0FF to 0x0020-0x00FF
                            //Tested with Wingdings and Symbol TTF fonts which map their
                            //glyphs in the region 0xF020-0xF0FF.
                            int mapped = j - 0xF000;
                            if (!eightBitGlyphs.get(mapped)) {
                                //Only map if Unicode code point hasn't been mapped before
                                unicodeMapping.add(new UnicodeMapping(glyphIdx, mapped));
                                mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(mapped));
                            }
                        }

                        // Also add winAnsiWidth
                        List v = (List) ansiIndex.get(new Integer(j));
                        if (v != null) {
                            Iterator e = v.listIterator();
                            while (e.hasNext()) {
                                Integer aIdx = (Integer) e.next();
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();

                                if (log.isTraceEnabled()) {
                                    log.trace("Added width " + mtxTab[glyphIdx].getWx() + " uni: " + j
                                            + " ansi: " + aIdx.intValue());
                                }
                            }
                        }

                        if (log.isTraceEnabled()) {
                            log.trace("Idx: " + glyphIdx + " Delta: " + cmapDeltas[i] + " Unicode: " + j
                                    + " name: " + mtxTab[glyphIdx].getName());
                        }
                    } else {
                        glyphIdx = (j + cmapDeltas[i]) & 0xffff;

                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        unicodeMapping.add(new UnicodeMapping(glyphIdx, j));
                        if (glyphIdx < mtxTab.length) {
                            mtxTab[glyphIdx].getUnicodeIndex().add(new Integer(j));
                        } else {
                            log.debug("Glyph " + glyphIdx + " out of range: " + mtxTab.length);
                        }

                        // Also add winAnsiWidth
                        List v = (List) ansiIndex.get(new Integer(j));
                        if (v != null) {
                            Iterator e = v.listIterator();
                            while (e.hasNext()) {
                                Integer aIdx = (Integer) e.next();
                                ansiWidth[aIdx.intValue()] = mtxTab[glyphIdx].getWx();
                            }
                        }

                        //getLogger().debug("IIdx: " +
                        //    mtxPtr +
                        //    " Delta: " + cmap_deltas[i] +
                        //    " Unicode: " + j +
                        //    " name: " +
                        //    mtxTab[(j+cmap_deltas[i]) & 0xffff].name);

                    }
                    if (glyphIdx < mtxTab.length) {
                        if (mtxTab[glyphIdx].getUnicodeIndex().size() < 2) {
                            mtxPtr++;
                        }
                    }
                }
            }
        }
    } else {
        log.error("Cmap format not supported: " + cmapFormat);
        return false;
    }
    return true;
}

From source file:com.p2p.peercds.client.SharedTorrent.java

/**
 * Build this torrent's pieces array./*from  w w  w .  java 2s .  c  om*/
 *
 * <p>
 * Hash and verify any potentially present local data and create this
 * torrent's pieces array from their respective hash provided in the
 * torrent meta-info.
 * </p>
 *
 * <p>
 * This function should be called soon after the constructor to initialize
 * the pieces array.
 * </p>
 */
public synchronized void init() throws InterruptedException, IOException {
    //      if (this.isInitialized()) {
    //         throw new IllegalStateException("Torrent was already initialized!");
    //      }

    int threads = getHashingThreadsCount();
    int nPieces = (int) (Math.ceil((double) this.getSize() / this.pieceLength));
    int step = 10;

    this.pieces = new Piece[nPieces];
    this.completedPieces = new BitSet(nPieces);
    this.piecesHashes.clear();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    List<Future<Piece>> results = new LinkedList<Future<Piece>>();

    try {
        logger.info("Analyzing local data for {} with {} threads ({} pieces)...",
                new Object[] { this.getName(), threads, nPieces });
        for (int idx = 0; idx < nPieces; idx++) {
            byte[] hash = new byte[PIECE_HASH_SIZE];
            this.piecesHashes.get(hash);

            // The last piece may be shorter than the torrent's global piece
            // length. Let's make sure we get the right piece length in any
            // situation.
            long off = ((long) idx) * this.pieceLength;
            long len = Math.min(this.bucket.size() - off, this.pieceLength);

            this.pieces[idx] = new Piece(this.bucket, idx, off, len, hash, this.isSeeder());

            Callable<Piece> hasher = new Piece.CallableHasher(this.pieces[idx]);
            results.add(executor.submit(hasher));

            if (results.size() >= threads) {
                this.validatePieces(results);
            }

            if (idx / (float) nPieces * 100f > step) {
                logger.info("  ... {}% complete", step);
                step += 10;
            }
        }

        this.validatePieces(results);
    } finally {
        // Request orderly executor shutdown and wait for hashing tasks to
        // complete.
        executor.shutdown();
        while (!executor.isTerminated()) {
            if (this.stop) {
                throw new InterruptedException("Torrent data analysis " + "interrupted.");
            }

            Thread.sleep(10);
        }
    }

    logger.debug("{}: we have {}/{} bytes ({}%) [{}/{} pieces].",
            new Object[] { this.getName(), (this.getSize() - this.left), this.getSize(),
                    String.format("%.1f", (100f * (1f - this.left / (float) this.getSize()))),
                    this.completedPieces.cardinality(), this.pieces.length });
    this.initialized = true;
}

From source file:org.apache.hadoop.hdfs.TestReconstructStripedFile.java

/**
 * Test the file blocks reconstruction./*from w  w  w .  j  a v  a2 s  .co m*/
 * 1. Check the replica is reconstructed in the target datanode,
 *    and verify the block replica length, generationStamp and content.
 * 2. Read the file and verify content.
 */
private void assertFileBlocksReconstruction(String fileName, int fileLen, ReconstructionType type,
        int toRecoverBlockNum) throws Exception {
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }
    assertTrue("File length must be positive.", fileLen > 0);

    Path file = new Path(fileName);

    final byte[] data = new byte[fileLen];
    Arrays.fill(data, (byte) 1);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);

    LocatedBlocks locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    assertEquals(locatedBlocks.getFileLength(), fileLen);

    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();

    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();

    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }

    int[] dead = generateDeadDnIndices(type, toRecoverBlockNum, indices);
    LOG.info("Note: indices == " + Arrays.toString(indices) + ". Generate errors on datanodes: "
            + Arrays.toString(dead));

    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    long[] replicaLengths = new long[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[dead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);

        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum,
                indices[dead[i]]);
        errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        replicaLengths[i] = replicas[i].length();
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicaLengths[i], StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(),
                cellSize, dataBlkNum, indices[dead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        LOG.info("replica " + i + " locates in file: " + replicas[i]);
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }

    int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
    int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum
            : Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
    int groupSize = lastGroupNumBlk + parityBlkNum;

    // shutdown datanodes or generate corruption
    int stoppedDN = generateErrors(errorMap, type);

    // Check the locatedBlocks of the file again
    locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - stoppedDN);

    int[] targetDNs = new int[dnNum - groupSize];
    int n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) { // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }

    StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize);

    targetDNs = sortTargetsByReplicas(blocks, targetDNs);

    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterReconstruction = cluster.getBlockFile(targetDNs[i], blocks[i]);
        LOG.info("replica after reconstruction " + replicaAfterReconstruction);
        File metadataAfterReconstruction = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
        LOG.info("replica before " + replicas[i]);
        assertTrue(metadataAfterReconstruction.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterReconstruction = DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);

        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
    }
}

From source file:org.lockss.util.NumberUtil.java

/**
 * Construct an alphabetical (base-26) sequence by incrementing the first
 * string alphabetically until it reaches the second string. The start string
 * is incremented by the given delta; if the delta does not divide into the
 * Levenstein distance between the start and end strings, an exception is
 * thrown. The strings must also be the same length.
 * <p>/*from  w w w.j  a  va2 s . c om*/
 * The string is lower cased before the increment is applied, and then each
 * character position that was upper case in the original string is upper
 * cased in the resulting string. It is assumed that the two strings are
 * capitalised in the same pattern. An exception will be thrown if any
 * character is outside of a-z after lower casing.
 *
 * @param start an alphabetical string (case-insensitive)
 * @param end an alphabetical string (case-insensitive)
 * @param delta the increment between strings in the sequence; can be negative
 * @return a list of strings representing a sequence from <tt>start</tt> to <tt>end</tt>
 * @throws IllegalArgumentException if the delta does not divide into the gap or the strings are different lengths
 */
public static List<String> constructAlphabeticSequence(final String start, final String end, int delta)
        throws IllegalArgumentException {

    // Ensure the delta is positive
    if (delta == 0)
        throw new IllegalArgumentException("Delta cannot be 0.");

    // If the strings are equal, the sequence will be the single string
    if (start.equals(end))
        return new ArrayList<String>() {
            {
                add(start);
            }
        };

    // Check the string lengths are the same
    if (start.length() != end.length())
        throw new IllegalArgumentException(
                String.format("Start and end strings are different lengths: %s %s.", start, end));

    // Find the integer distance
    int distance = Math.abs(fromBase26(start) - fromBase26(end));
    //int distance = StringUtils.getLevenshteinDistance(start, end);
    // Check the delta divides into the gap
    if (distance % delta != 0) {
        throw new IllegalArgumentException(String.format(
                "The distance %s between start and end must be " + "divisible by delta %s.", distance, delta));
    }

    // Track the case of each character, so we can reset them before returning
    BitSet cases = new BitSet(start.length());
    for (int i = 0; i < start.length(); i++) {
        cases.set(i, Character.isUpperCase(start.charAt(i)));
    }

    // Increment alphabetically
    List<String> seq = new ArrayList<String>();
    int[] nums = constructSequence(fromBase26(start), fromBase26(end), delta);
    for (int i = 0; i < nums.length; i++) {
        String s = toBase26(nums[i]);
        // Pad the string to the correct length with 'a'
        s = StringUtils.leftPad(s, start.length(), 'a');
        // Re-case the chars
        char[] carr = s.toCharArray();
        for (int pos = 0; pos < cases.length(); pos++) {
            if (cases.get(pos))
                carr[pos] = Character.toUpperCase(carr[pos]);
        }
        seq.add(new String(carr));
    }
    return seq;
}

From source file:android.support.v7.widget.StaggeredGridLayoutManager2.java

/**
 * Checks for gaps if we've reached to the top of the list.
 * <p>/*w w w . j a  v a 2 s .  c  o m*/
 * Intermediate gaps created by full span items are tracked via mLaidOutInvalidFullSpan field.
 */
View hasGapsToFix() {
    int startChildIndex = 0;
    int endChildIndex = getChildCount() - 1;
    BitSet mSpansToCheck = new BitSet(mSpanCount);
    mSpansToCheck.set(0, mSpanCount, true);

    final int firstChildIndex, childLimit;
    final int preferredSpanDir = mOrientation == VERTICAL && isLayoutRTL() ? 1 : -1;

    if (mShouldReverseLayout) {
        firstChildIndex = endChildIndex - 1;
        childLimit = startChildIndex - 1;
    } else {
        firstChildIndex = startChildIndex;
        childLimit = endChildIndex;
    }
    final int nextChildDiff = firstChildIndex < childLimit ? 1 : -1;
    for (int i = firstChildIndex; i != childLimit; i += nextChildDiff) {
        View child = getChildAt(i);
        LayoutParams lp = (LayoutParams) child.getLayoutParams();
        if (mSpansToCheck.get(lp.mSpan.mIndex)) {
            if (checkSpanForGap(lp.mSpan)) {
                return child;
            }
            mSpansToCheck.clear(lp.mSpan.mIndex);
        }
        if (lp.mFullSpan) {
            continue; // quick reject
        }

        if (i + nextChildDiff != childLimit) {
            View nextChild = getChildAt(i + nextChildDiff);
            boolean compareSpans = false;
            if (mShouldReverseLayout) {
                // ensure child's end is below nextChild's end
                int myEnd = mPrimaryOrientation.getDecoratedEnd(child);
                int nextEnd = mPrimaryOrientation.getDecoratedEnd(nextChild);
                if (myEnd < nextEnd) {
                    return child;//i should have a better position
                } else if (myEnd == nextEnd) {
                    compareSpans = true;
                }
            } else {
                int myStart = mPrimaryOrientation.getDecoratedStart(child);
                int nextStart = mPrimaryOrientation.getDecoratedStart(nextChild);
                if (myStart > nextStart) {
                    return child;//i should have a better position
                } else if (myStart == nextStart) {
                    compareSpans = true;
                }
            }
            if (compareSpans) {
                // equal, check span indices.
                LayoutParams nextLp = (LayoutParams) nextChild.getLayoutParams();
                if (lp.mSpan.mIndex - nextLp.mSpan.mIndex < 0 != preferredSpanDir < 0) {
                    return child;
                }
            }
        }
    }
    // everything looks good
    return null;
}

From source file:de.unijena.bioinf.FragmentationTreeConstruction.computation.FragmentationPatternAnalysis.java

/**
 * Step 3. Normalizing//from ww w. j a va  2 s  .c  om
 * Merge all peaks within a single spectrum
 * Return a list of peaks (from all spectra) with relative intensities
 */
public ProcessedInput performNormalization(ProcessedInput input) {
    final Ms2Experiment experiment = input.getExperimentInformation();
    final double parentMass = experiment.getIonMass();
    final ArrayList<ProcessedPeak> peaklist = new ArrayList<ProcessedPeak>(100);
    final Deviation mergeWindow = getDefaultProfile().getAllowedMassDeviation().divide(2d);
    final Ionization ion = experiment.getPrecursorIonType().getIonization();
    double globalMaxIntensity = 0d;
    for (Ms2Spectrum s : experiment.getMs2Spectra()) {
        // merge peaks: iterate them from highest to lowest intensity and remove peaks which
        // are in the mass range of a high intensive peak
        final MutableSpectrum<Peak> sortedByIntensity = new SimpleMutableSpectrum(s);
        Spectrums.sortSpectrumByDescendingIntensity(sortedByIntensity);
        // simple spectra are always ordered by mass
        final SimpleSpectrum sortedByMass = new SimpleSpectrum(s);
        final BitSet deletedPeaks = new BitSet(s.size());
        for (int i = 0; i < s.size(); ++i) {
            // get index of peak in mass-ordered spectrum
            final double mz = sortedByIntensity.getMzAt(i);
            final int index = Spectrums.binarySearch(sortedByMass, mz);
            assert index >= 0;
            if (deletedPeaks.get(index))
                continue; // peak is already deleted
            // delete all peaks within the mass range
            for (int j = index - 1; j >= 0 && mergeWindow.inErrorWindow(mz, sortedByMass.getMzAt(j)); --j)
                deletedPeaks.set(j, true);
            for (int j = index + 1; j < s.size() && mergeWindow.inErrorWindow(mz, sortedByMass.getMzAt(j)); ++j)
                deletedPeaks.set(j, true);
        }
        final int offset = peaklist.size();
        // add all remaining peaks to the peaklist
        for (int i = 0; i < s.size(); ++i) {
            if (!deletedPeaks.get(i)) {
                final ProcessedPeak propeak = new ProcessedPeak(
                        new MS2Peak(s, sortedByMass.getMzAt(i), sortedByMass.getIntensityAt(i)));
                propeak.setIon(ion);
                peaklist.add(propeak);

            }
        }
        // now performNormalization spectrum. Ignore peaks near to the parent peak
        final double lowerbound = parentMass - 0.1d;
        double scale = 0d;
        for (int i = offset; i < peaklist.size() && peaklist.get(i).getMz() < lowerbound; ++i) {
            scale = Math.max(scale, peaklist.get(i).getIntensity());
        }
        if (scale == 0)
            scale = peaklist.get(0).getIntensity(); // happens for spectra with only one peak
        // now set local relative intensities
        for (int i = offset; i < peaklist.size(); ++i) {
            final ProcessedPeak peak = peaklist.get(i);
            peak.setLocalRelativeIntensity(peak.getIntensity() / scale);
        }
        // and adjust global relative intensity
        globalMaxIntensity = Math.max(globalMaxIntensity, scale);
    }
    // now calculate global normalized intensities
    for (ProcessedPeak peak : peaklist) {
        peak.setGlobalRelativeIntensity(peak.getIntensity() / globalMaxIntensity);
        peak.setRelativeIntensity(
                normalizationType == NormalizationType.GLOBAL ? peak.getGlobalRelativeIntensity()
                        : peak.getLocalRelativeIntensity());
    }
    // finished!
    input.setMergedPeaks(peaklist);

    // postprocess
    postProcess(PostProcessor.Stage.AFTER_NORMALIZING, input);
    return input;
}

From source file:org.intermine.bio.postprocess.CreateIntronFeaturesProcess.java

/**
 * Return a set of Intron objects that don't overlap the Locations
 * in the locationSet argument.  The caller must call ObjectStoreWriter.store() on the
 * Intron, its chromosomeLocation and the synonym in the synonyms collection.
 * @param locationSet a set of Locations for the exons on a particular transcript
 * @param transcript Transcript that the Locations refer to
 * @param tranLoc The Location of the Transcript
 * @param gene gene for the transcript/*from www  .j  a v  a  2s  .c o m*/
 * @return a set of Intron objects
 * @throws ObjectStoreException if there is an ObjectStore problem
 */
protected int createIntronFeatures(Set<Location> locationSet, SequenceFeature transcript, Location tranLoc,
        Gene gene) throws ObjectStoreException {
    if (locationSet.size() == 1 || tranLoc == null || transcript == null || transcript.getLength() == null) {
        return 0;
    }

    final BitSet bs = new BitSet(transcript.getLength().intValue());
    Chromosome chr = transcript.getChromosome();

    int tranStart = tranLoc.getStart().intValue();

    for (Location location : locationSet) {
        bs.set(location.getStart().intValue() - tranStart, (location.getEnd().intValue() - tranStart) + 1);
    }

    int prevEndPos = 0;
    int intronCount = 0;
    while (prevEndPos != -1) {
        intronCount++;
        int nextIntronStart = bs.nextClearBit(prevEndPos + 1);
        int intronEnd;
        int nextSetBit = bs.nextSetBit(nextIntronStart);

        if (nextSetBit == -1) {
            intronEnd = transcript.getLength().intValue();
        } else {
            intronEnd = nextSetBit - 1;
        }

        if (nextSetBit == -1 || intronCount == (locationSet.size() - 1)) {
            prevEndPos = -1;
        } else {
            prevEndPos = intronEnd;
        }

        int newLocStart = nextIntronStart + tranStart;
        int newLocEnd = intronEnd + tranStart;

        String identifier = "intron_chr" + chr.getPrimaryIdentifier() + "_" + Integer.toString(newLocStart)
                + ".." + Integer.toString(newLocEnd);

        if (intronMap.get(identifier) == null) {
            Class<?> intronCls = model.getClassDescriptorByName("Intron").getType();
            Intron intron = (Intron) DynamicUtil.createObject(Collections.singleton(intronCls));
            Location location = (Location) DynamicUtil.createObject(Collections.singleton(Location.class));

            intron.setChromosome(chr);
            intron.setOrganism(chr.getOrganism());
            intron.addDataSets(dataSet);
            intron.setPrimaryIdentifier(identifier);
            intron.setGenes(Collections.singleton(gene));

            location.setStart(new Integer(newLocStart));
            location.setEnd(new Integer(newLocEnd));
            location.setStrand(tranLoc.getStrand());
            location.setFeature(intron);
            location.setLocatedOn(transcript);
            location.addDataSets(dataSet);

            intron.setChromosomeLocation(location);
            osw.store(location);

            int length = location.getEnd().intValue() - location.getStart().intValue() + 1;
            intron.setLength(new Integer(length));
            addToIntronTranscripts(intron, transcript);
            intronMap.put(identifier, intron);
        } else {
            SequenceFeature intron = intronMap.get(identifier);
            addToIntronTranscripts(intron, transcript);
            intronMap.put(identifier, intron);
        }
    }
    return intronCount;
}