Example usage for java.lang Long numberOfLeadingZeros

List of usage examples for java.lang Long numberOfLeadingZeros

Introduction

In this page you can find the example usage for java.lang Long numberOfLeadingZeros.

Prototype

@HotSpotIntrinsicCandidate
public static int numberOfLeadingZeros(long i) 

Source Link

Document

Returns the number of zero bits preceding the highest-order ("leftmost") one-bit in the two's complement binary representation of the specified long value.

Usage

From source file:com.google.uzaygezen.core.LongArrayBitVector.java

@Override
public int length() {
    int i;/*from   w  w  w .  j  a  va 2s .co m*/
    for (i = data.length; --i >= 0 && data[i] == 0L;)
        ;
    if (i < 0) {
        return 0;
    }
    return WORD * (i + 1) - Long.numberOfLeadingZeros(data[i]);
}

From source file:com.dal.vv.type.AbstractValue.java

@SuppressWarnings("fallthrough")
private Value loadNumber(long data) {
    clear();//from  w  w  w  . ja  va2s .  c  om
    int actualSize = 64 - Long.numberOfLeadingZeros(Math.abs(data)) - (data < 0 ? 1 : 0);
    int availableBits = size.getBits();
    if (actualSize > availableBits) {
        if (Size.variable == size) {
            value = new byte[(1 + actualSize) / 8];
        } else {
            throw new OverflowException("Unable to load[" + data + "] " + actualSize + " bits only "
                    + availableBits + " available");
        }
    }

    int avail = size.getBytes();

    if (Size.variable == size) {
        avail = value.length;
    }

    switch (avail) {
    case 8:
        value[7] = (byte) ((data >> 56) & 0xFF);
        value[6] = (byte) ((data >> 48) & 0xFF);
        value[5] = (byte) ((data >> 40) & 0xFF);
        value[4] = (byte) ((data >> 32) & 0xFF);
    case 7:
    case 6:
    case 5:
    case 4:
        value[3] = (byte) ((data >> 24) & 0xFF);
        value[2] = (byte) ((data >> 16) & 0xFF);
    case 3:
    case 2:
        value[1] = (byte) ((data >> 8) & 0xFF);
    case 1:
        value[0] = (byte) (data & 0xFF);
    default:
    }

    return this;
}

From source file:com.google.uzaygezen.core.LongBitVector.java

public void copyFrom(long value) {
    Preconditions.checkArgument(64 - Long.numberOfLeadingZeros(value) <= size, "value doesn't fit");
    data = value;
}

From source file:com.google.uzaygezen.core.BitSetBackedBitVector.java

@Override
public void copyFrom(long[] array) {
    int len = (size + 63) >>> 6;
    Preconditions.checkArgument(array.length == len, "Length must be %s.", len);
    if (size == 0) {
        return;/*w ww  .j ava2  s  .c o  m*/
    }
    Preconditions.checkArgument(Long.numberOfLeadingZeros(array[len - 1]) >= (len << 6) - size,
            "Some bit positions are too high.");
    BitSet bs = BitSet.valueOf(array);
    bitset.clear();
    bitset.or(bs);
}

From source file:com.google.uzaygezen.core.LongArrayBitVector.java

@Override
public void copyFrom(long d) {
    Preconditions.checkArgument(WORD - Long.numberOfLeadingZeros(d) <= size);
    clear();// w  w  w.  j  av a  2s .  co  m
    if (size > 0) {
        data[0] = d;
    }
}

From source file:org.apache.flink.graph.generator.RMatGraph.java

@Override
public Graph<LongValue, NullValue, NullValue> generate() {
    int scale = Long.SIZE - Long.numberOfLeadingZeros(vertexCount - 1);

    // Edges//  w  ww. j av a2  s  . c  o  m
    int cyclesPerEdge = noiseEnabled ? 5 * scale : scale;

    List<BlockInfo<T>> generatorBlocks = randomGenerableFactory.getRandomGenerables(edgeCount, cyclesPerEdge);

    DataSet<Edge<LongValue, NullValue>> edges = env.fromCollection(generatorBlocks).name("Random generators")
            .rebalance().setParallelism(parallelism).name("Rebalance")
            .flatMap(new GenerateEdges<T>(vertexCount, scale, A, B, C, noiseEnabled, noise))
            .setParallelism(parallelism).name("RMat graph edges");

    // Vertices
    DataSet<Vertex<LongValue, NullValue>> vertices = GraphGeneratorUtils.vertexSet(edges, parallelism);

    // Graph
    return Graph.fromDataSet(vertices, edges, env);
}

From source file:org.apache.geode.internal.cache.Oplog.java

/**
 * Return the number of bytes needed to encode the given long. Value returned will be >= 1 and <=
 * 8.//from  w w  w . j  a v  a2  s  . co  m
 */
static int bytesNeeded(long v) {
    if (v < 0) {
        v = ~v;
    }
    return ((64 - Long.numberOfLeadingZeros(v)) / 8) + 1;
}

From source file:org.apache.hadoop.hive.llap.cache.BuddyAllocator.java

@VisibleForTesting
public BuddyAllocator(boolean isDirectVal, boolean isMappedVal, int minAllocVal, int maxAllocVal,
        int arenaCount, long maxSizeVal, long defragHeadroom, String mapPath, MemoryManager memoryManager,
        LlapDaemonCacheMetrics metrics, String discardMethod) {
    isDirect = isDirectVal;/*from www . j  ava2 s  . co  m*/
    isMapped = isMappedVal;
    minAllocation = minAllocVal;
    maxAllocation = maxAllocVal;
    if (isMapped) {
        try {
            cacheDir = Files.createTempDirectory(FileSystems.getDefault().getPath(mapPath), "llap-", RWX);
        } catch (IOException ioe) {
            // conf validator already checks this, so it will never trigger usually
            throw new AssertionError("Configured mmap directory should be writable", ioe);
        }
    } else {
        cacheDir = null;
    }

    arenaSize = validateAndDetermineArenaSize(arenaCount, maxSizeVal);
    maxSize = validateAndDetermineMaxSize(maxSizeVal);
    memoryManager.updateMaxSize(determineMaxMmSize(defragHeadroom, maxSize));

    minAllocLog2 = 31 - Integer.numberOfLeadingZeros(minAllocation);
    maxAllocLog2 = 31 - Integer.numberOfLeadingZeros(maxAllocation);
    arenaSizeLog2 = 63 - Long.numberOfLeadingZeros(arenaSize);
    maxArenas = (int) (maxSize / arenaSize);
    arenas = new Arena[maxArenas];
    for (int i = 0; i < maxArenas; ++i) {
        arenas[i] = new Arena();
    }
    Arena firstArena = arenas[0];
    firstArena.init(0);
    allocatedArenas.set(1);
    this.memoryManager = memoryManager;
    defragCounters = new AtomicLong[maxAllocLog2 - minAllocLog2 + 1];
    for (int i = 0; i < defragCounters.length; ++i) {
        defragCounters[i] = new AtomicLong(0);
    }
    this.metrics = metrics;
    metrics.incrAllocatedArena();
    boolean isBoth = null == discardMethod || "both".equalsIgnoreCase(discardMethod);
    doUseFreeListDiscard = isBoth || "freelist".equalsIgnoreCase(discardMethod);
    doUseBruteDiscard = isBoth || "brute".equalsIgnoreCase(discardMethod);
    ctxPool = new FixedSizedObjectPool<DiscardContext>(32,
            new FixedSizedObjectPool.PoolObjectHelper<DiscardContext>() {
                @Override
                public DiscardContext create() {
                    return new DiscardContext();
                }

                @Override
                public void resetBeforeOffer(DiscardContext t) {
                }
            });
}

From source file:org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap.java

public BytesBytesMultiHashMap(int initialCapacity, float loadFactor, int wbSize, long maxProbeSize) {
    if (loadFactor < 0 || loadFactor > 1) {
        throw new AssertionError("Load factor must be between (0, 1].");
    }//from   w w w  .  java 2s.  c  om
    assert initialCapacity > 0;
    initialCapacity = (Long.bitCount(initialCapacity) == 1) ? initialCapacity
            : nextHighestPowerOfTwo(initialCapacity);
    // 8 bytes per long in the refs, assume data will be empty. This is just a sanity check.
    int maxCapacity = (maxProbeSize <= 0) ? DEFAULT_MAX_CAPACITY
            : (int) Math.min((long) DEFAULT_MAX_CAPACITY, maxProbeSize / 8);
    if (maxCapacity < DEFAULT_MIN_MAX_CAPACITY) {
        maxCapacity = DEFAULT_MIN_MAX_CAPACITY;
    }
    if (maxCapacity < initialCapacity || initialCapacity <= 0) {
        // Either initialCapacity is too large, or nextHighestPowerOfTwo overflows
        initialCapacity = (Long.bitCount(maxCapacity) == 1) ? maxCapacity : nextLowestPowerOfTwo(maxCapacity);
    }

    validateCapacity(initialCapacity);
    startingHashBitCount = 63 - Long.numberOfLeadingZeros(initialCapacity);
    this.loadFactor = loadFactor;
    refs = new long[initialCapacity];
    writeBuffers = new WriteBuffers(wbSize, MAX_WB_SIZE);
    resizeThreshold = (int) (initialCapacity * this.loadFactor);
}

From source file:org.apache.kylin.engine.mr.common.CubeStatsReader.java

/**
 * Estimate the cuboid's size//from  ww w  . j  av a  2 s. c  o  m
 *
 * @return the cuboid size in M bytes
 */
private static double estimateCuboidStorageSize(CubeSegment cubeSegment, long cuboidId, long rowCount,
        long baseCuboidId, List<Integer> rowKeyColumnLength) {

    int rowkeyLength = cubeSegment.getRowKeyPreambleSize();
    KylinConfig kylinConf = cubeSegment.getConfig();

    long mask = Long.highestOneBit(baseCuboidId);
    long parentCuboidIdActualLength = (long) Long.SIZE - Long.numberOfLeadingZeros(baseCuboidId);
    for (int i = 0; i < parentCuboidIdActualLength; i++) {
        if ((mask & cuboidId) > 0) {
            rowkeyLength += rowKeyColumnLength.get(i); //colIO.getColumnLength(columnList.get(i));
        }
        mask = mask >> 1;
    }

    // add the measure length
    int normalSpace = rowkeyLength;
    int countDistinctSpace = 0;
    for (MeasureDesc measureDesc : cubeSegment.getCubeDesc().getMeasures()) {
        DataType returnType = measureDesc.getFunction().getReturnDataType();
        if (measureDesc.getFunction().getExpression().equals(FunctionDesc.FUNC_COUNT_DISTINCT)) {
            countDistinctSpace += returnType.getStorageBytesEstimate();
        } else {
            normalSpace += returnType.getStorageBytesEstimate();
        }
    }

    double cuboidSizeRatio = kylinConf.getJobCuboidSizeRatio();
    double cuboidSizeMemHungryRatio = kylinConf.getJobCuboidSizeCountDistinctRatio();
    double ret = (1.0 * normalSpace * rowCount * cuboidSizeRatio
            + 1.0 * countDistinctSpace * rowCount * cuboidSizeMemHungryRatio) / (1024L * 1024L);
    return ret;
}