Example usage for java.lang Long SIZE

List of usage examples for java.lang Long SIZE

Introduction

In this page you can find the example usage for java.lang Long SIZE.

Prototype

int SIZE

To view the source code for java.lang Long SIZE.

Click Source Link

Document

The number of bits used to represent a long value in two's complement binary form.

Usage

From source file:es.udc.gii.common.eaf.stoptest.BitwiseConvergence.java

/**
 * Calculates the convergence rate between two individuals.
 *//*from  w  w  w. j a v a2 s .co m*/
private double convergence(Individual i1, Individual i2) {

    double convergence = 0.0;

    /* Asume both individuals have the same number of genes !! */
    int genes = i1.getChromosomeAt(0).length;

    /* For each pair of genes */
    for (int i = 0; i < genes; i++) {

        /* Get the value of the genes. Note that only individuals which have
         * a double as an internal value are considered. */
        double d1 = i1.getChromosomeAt(0)[i];
        double d2 = i2.getChromosomeAt(0)[i];

        /* Get the binary codification of the values. */
        Long lg1 = new Long(Double.doubleToRawLongBits(d1));
        Long lg2 = new Long(Double.doubleToRawLongBits(d2));

        /* Perform a bitwise XOR operation. Bitpositions that are identical
         * will yield a 0 and bitpositions which differ will yield a 1. So
         * we are counting the bits in which the two individuals *differ* */
        Long lg = new Long(lg1.longValue() ^ lg2.longValue());

        /* Count the number of bits in which the two individuals differ. */
        convergence += Long.bitCount(lg);
    }

    /* Get the average bitwise difference. */
    convergence /= Long.SIZE * genes;

    /* Get the average convergence. */
    convergence = 1 - convergence;

    return convergence;
}

From source file:org.apache.maven.classpath.munger.validation.JarValidationUtilsTest.java

@Test
public void testSignatureOnShuffledContents() throws Exception {
    byte[] TEST_DATA = (getClass().getName() + "#" + getCurrentTestName()).getBytes("UTF-8");
    NamedPropertySource expected = JarValidationUtils.createJarSignature(createTestJar(TEST_DATA));
    Random rnd = new Random(System.nanoTime());
    for (int index = 0; index < Long.SIZE; index++) {
        shuffle(rnd, TEST_DATA);/*w  ww  .  j a  v  a2 s . c  om*/

        NamedPropertySource actual = JarValidationUtils.createJarSignature(createTestJar(TEST_DATA));
        try {
            JarValidationUtils.validateJarSignature(expected, actual);
            fail("Unexpected success for " + new String(TEST_DATA));
        } catch (SecurityException e) {
            if (logger.isDebugEnabled()) {
                logger.debug(e.getMessage());
            }
        }
    }
}

From source file:net.community.chest.gitcloud.facade.backend.git.BackendRepositoryResolverTest.java

@Test
public void testDeepDownRepositoryResolution() throws Exception {
    final String REPO_NAME = "testDeepDownRepositoryResolution", GIT_NAME = REPO_NAME + Constants.DOT_GIT_EXT;
    final int MAX_DEPTH = Byte.SIZE;
    StringBuilder sb = new StringBuilder(MAX_DEPTH + Long.SIZE);
    File parentDir = reposDir;/* www  .  j  a v a2s .c o  m*/
    for (int depth = 0; depth < MAX_DEPTH; depth++) {
        String subName = String.valueOf(depth);
        parentDir = new File(parentDir, subName);
        sb.append(subName).append('/');

        File gitDir = new File(parentDir, GIT_NAME);
        if (!gitDir.exists()) {
            Repository repo = new FileRepository(gitDir);
            try {
                repo.create(true);
            } finally {
                repo.close();
            }
        } else {
            assertTrue("Child repo not a folder: " + gitDir, gitDir.isDirectory());
        }

        int curLen = sb.length();
        try {
            sb.append(REPO_NAME);

            int baseLen = sb.length();
            for (String ext : TEST_EXTS) {
                try {
                    Repository repo = resolver.open(null, sb.append(ext).toString());
                    assertNotNull("No resolution result for ext=" + ext, repo);

                    try {
                        File actual = repo.getDirectory();
                        assertEquals("Mismatched resolved location for ext=" + ext, gitDir, actual);
                    } finally {
                        repo.close();
                    }
                } finally {
                    sb.setLength(baseLen);
                }
            }
        } finally {
            sb.setLength(curLen);
        }
    }
}

From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java

/**
 * Compress a set of columns./*from   w w w  .  ja va 2 s.c  om*/
 *
 * The header contains a compressed array of data types.
 * The body contains compressed columns and their metadata.
 * The footer contains a compressed array of chunk sizes. The final four bytes of the footer encode the byte size of that compressed array.
 *
 * @param colSet
 *
 * @return ByteBuffer representing the compressed set.
 */
@Override
public ByteBuffer compress(ColumnBuffer[] colSet) {

    // Many compression libraries allow you to avoid allocation of intermediate arrays.
    // To use these API, we need to preallocate the output container.

    // Reserve space for the header.
    int[] dataType = new int[colSet.length];
    int maxCompressedSize = Snappy.maxCompressedLength(4 * dataType.length);

    // Reserve space for the compressed nulls BitSet for each column.
    maxCompressedSize += colSet.length * Snappy.maxCompressedLength((colSet.length / 8) + 1);

    // Track the length of `List<Integer> compressedSize` which will be declared later.
    int uncompressedFooterLength = 1 + 2 * colSet.length;

    for (int colNum = 0; colNum < colSet.length; ++colNum) {
        // Reserve space for the compressed columns.
        dataType[colNum] = colSet[colNum].getType().toTType().getValue();
        switch (TTypeId.findByValue(dataType[colNum])) {
        case BOOLEAN_TYPE:
            maxCompressedSize += Integer.SIZE / Byte.SIZE; // This is for the encoded length.
            maxCompressedSize += Snappy.maxCompressedLength((colSet.length / 8) + 1);
            break;
        case TINYINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length);
            break;
        case SMALLINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Short.SIZE / Byte.SIZE);
            break;
        case INT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);
            break;
        case BIGINT_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Long.SIZE / Byte.SIZE);
            break;
        case DOUBLE_TYPE:
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Double.SIZE / Byte.SIZE);
            break;
        case BINARY_TYPE:
            // Reserve space for the size of the compressed array of row sizes.
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);

            // Reserve space for the size of the compressed flattened bytes.
            for (ByteBuffer nextBuffer : colSet[colNum].toTColumn().getBinaryVal().getValues()) {
                maxCompressedSize += Snappy.maxCompressedLength(nextBuffer.limit());
            }

            // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array).
            uncompressedFooterLength++;

            break;
        case STRING_TYPE:
            // Reserve space for the size of the compressed array of row sizes.
            maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE);

            // Reserve space for the size of the compressed flattened bytes.
            for (String nextString : colSet[colNum].toTColumn().getStringVal().getValues()) {
                maxCompressedSize += Snappy
                        .maxCompressedLength(nextString.getBytes(StandardCharsets.UTF_8).length);
            }

            // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array).
            uncompressedFooterLength++;

            break;
        default:
            throw new IllegalStateException("Unrecognized column type");
        }
    }
    // Reserve space for the footer.
    maxCompressedSize += Snappy.maxCompressedLength(uncompressedFooterLength * Integer.SIZE / Byte.SIZE);

    // Allocate the output container.
    ByteBuffer output = ByteBuffer.allocate(maxCompressedSize);

    // Allocate the footer. This goes in the footer because we don't know the chunk sizes until after
    // the columns have been compressed and written.
    ArrayList<Integer> compressedSize = new ArrayList<Integer>(uncompressedFooterLength);

    // Write to the output buffer.
    try {
        // Write the header.
        compressedSize.add(writePrimitives(dataType, output));

        // Write the compressed columns and metadata.
        for (int colNum = 0; colNum < colSet.length; colNum++) {
            switch (TTypeId.findByValue(dataType[colNum])) {
            case BOOLEAN_TYPE: {
                TBoolColumn column = colSet[colNum].toTColumn().getBoolVal();

                List<Boolean> bools = column.getValues();
                BitSet bsBools = new BitSet(bools.size());
                for (int rowNum = 0; rowNum < bools.size(); rowNum++) {
                    bsBools.set(rowNum, bools.get(rowNum));
                }

                compressedSize.add(writePrimitives(column.getNulls(), output));

                // BitSet won't write trailing zeroes so we encode the length
                output.putInt(column.getValuesSize());

                compressedSize.add(writePrimitives(bsBools.toByteArray(), output));

                break;
            }
            case TINYINT_TYPE: {
                TByteColumn column = colSet[colNum].toTColumn().getByteVal();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedBytes(column.getValues(), output));
                break;
            }
            case SMALLINT_TYPE: {
                TI16Column column = colSet[colNum].toTColumn().getI16Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedShorts(column.getValues(), output));
                break;
            }
            case INT_TYPE: {
                TI32Column column = colSet[colNum].toTColumn().getI32Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedIntegers(column.getValues(), output));
                break;
            }
            case BIGINT_TYPE: {
                TI64Column column = colSet[colNum].toTColumn().getI64Val();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedLongs(column.getValues(), output));
                break;
            }
            case DOUBLE_TYPE: {
                TDoubleColumn column = colSet[colNum].toTColumn().getDoubleVal();
                compressedSize.add(writePrimitives(column.getNulls(), output));
                compressedSize.add(writeBoxedDoubles(column.getValues(), output));
                break;
            }
            case BINARY_TYPE: {
                TBinaryColumn column = colSet[colNum].toTColumn().getBinaryVal();

                // Initialize the array of row sizes.
                int[] rowSizes = new int[column.getValuesSize()];
                int totalSize = 0;
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    rowSizes[rowNum] = column.getValues().get(rowNum).limit();
                    totalSize += column.getValues().get(rowNum).limit();
                }

                // Flatten the data for Snappy for a better compression ratio.
                ByteBuffer flattenedData = ByteBuffer.allocate(totalSize);
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    flattenedData.put(column.getValues().get(rowNum));
                }

                // Write nulls bitmap.
                compressedSize.add(writePrimitives(column.getNulls(), output));

                // Write the list of row sizes.
                compressedSize.add(writePrimitives(rowSizes, output));

                // Write the compressed, flattened data.
                compressedSize.add(writePrimitives(flattenedData.array(), output));

                break;
            }
            case STRING_TYPE: {
                TStringColumn column = colSet[colNum].toTColumn().getStringVal();

                // Initialize the array of row sizes.
                int[] rowSizes = new int[column.getValuesSize()];
                int totalSize = 0;
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    rowSizes[rowNum] = column.getValues().get(rowNum).length();
                    totalSize += column.getValues().get(rowNum).length();
                }

                // Flatten the data for Snappy for a better compression ratio.
                StringBuilder flattenedData = new StringBuilder(totalSize);
                for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) {
                    flattenedData.append(column.getValues().get(rowNum));
                }

                // Write nulls bitmap.
                compressedSize.add(writePrimitives(column.getNulls(), output));

                // Write the list of row sizes.
                compressedSize.add(writePrimitives(rowSizes, output));

                // Write the flattened data.
                compressedSize.add(
                        writePrimitives(flattenedData.toString().getBytes(StandardCharsets.UTF_8), output));

                break;
            }
            default:
                throw new IllegalStateException("Unrecognized column type");
            }
        }

        // Write the footer.
        output.putInt(writeBoxedIntegers(compressedSize, output));

    } catch (IOException e) {
        e.printStackTrace();
    }
    output.flip();
    return output;
}

From source file:org.cloudata.core.commitlog.pipe.CommitLogFileChannel.java

public static long readLastIndex(FileChannel ch) throws IOException {
    ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / 8);
    long chSize = ch.size();

    if (chSize > 0) {
        ch.position(chSize > (Long.SIZE / 8) ? chSize - (Long.SIZE / 8) : 0);
        ch.read(buf);/*  w  w w .  j  a v  a  2  s.c  om*/
        buf.flip();
        return buf.getLong();
    } else {
        return 0;
    }
}

From source file:org.apache.flink.graph.generator.RMatGraph.java

@Override
public Graph<LongValue, NullValue, NullValue> generate() {
    int scale = Long.SIZE - Long.numberOfLeadingZeros(vertexCount - 1);

    // Edges//from   w  w w.  ja  v  a2s .  com
    int cyclesPerEdge = noiseEnabled ? 5 * scale : scale;

    List<BlockInfo<T>> generatorBlocks = randomGenerableFactory.getRandomGenerables(edgeCount, cyclesPerEdge);

    DataSet<Edge<LongValue, NullValue>> edges = env.fromCollection(generatorBlocks).name("Random generators")
            .rebalance().setParallelism(parallelism).name("Rebalance")
            .flatMap(new GenerateEdges<T>(vertexCount, scale, A, B, C, noiseEnabled, noise))
            .setParallelism(parallelism).name("RMat graph edges");

    // Vertices
    DataSet<Vertex<LongValue, NullValue>> vertices = GraphGeneratorUtils.vertexSet(edges, parallelism);

    // Graph
    return Graph.fromDataSet(vertices, edges, env);
}

From source file:com.monitor.baseservice.utils.XCodeUtil.java

/**
 * 1?xcode?/*from  w  w  w .ja va 2 s. co m*/
 * 2?base64?
 * 3?????CRCCRC?8
 * 4?CRC
 * 5??????JSON
 * 
 * @param xCode
 * @return
 * @throws LogicalException 
 */
@SuppressWarnings("unchecked")
public static Map<String, Object> xDecode(String xCode) throws LogicalException {
    // 1
    String real = xCode.substring(PREFIX_LENGTH);
    byte[] rst = Base64.decodeBase64(real);

    byte[] data = Arrays.copyOf(rst, rst.length - (Long.SIZE / Byte.SIZE));
    byte[] crc = Arrays.copyOfRange(rst, data.length, rst.length);
    // 4
    long value = byteArrayToLong(crc);
    byte[] realCrc = crcUnsigned(data, CRC_KEY.getBytes());
    long realValue = byteArrayToLong(realCrc);
    if (!(value == realValue)) {
        System.out.println("license verify failed.");
        throw new LogicalException(RetStat.ERR_BAD_PARAMS, null);
    }

    xorCode(data, XOR_KEY);

    String info = new String(data);

    return JSONObject.parseObject(info, Map.class);
}

From source file:ie.peternagy.jcrypto.algo.EllipticCurveWrapper.java

/**
 * Create raw header - includes version, keyId, crc
 * //  w  ww  . j  av a 2s  .  c  om
 * @param dataCrc
 * @return 
 */
public byte[] createRawHeader(long dataCrc) {
    try {
        byte[] keyId = getKeyId();
        ByteArrayOutputStream header = new ByteArrayOutputStream();

        header.write((byte) 100);//version
        header.write(ByteBuffer.allocate(Integer.SIZE / Byte.SIZE).putInt(keyId.length).array());//key id length
        header.write(ByteBuffer.allocate(Long.SIZE / Byte.SIZE).putLong(dataCrc).array());
        header.write(keyId);

        return header.toByteArray();
    } catch (IOException ex) {
        Logger.getLogger(EllipticCurveWrapper.class.getName()).log(Level.SEVERE, null, ex);
    }

    return null;
}

From source file:org.elasticsearch.common.geo.GeoUtils.java

/**
 * Calculate the number of levels needed for a specific precision. Quadtree
 * cells will not exceed the specified size (diagonal) of the precision.
 * @param meters Maximum size of cells in meters (must greater than zero)
 * @return levels need to achieve precision  
 */// w  w w.j a va 2  s  .  co m
public static int quadTreeLevelsForPrecision(double meters) {
    assert meters >= 0;
    if (meters == 0) {
        return QuadPrefixTree.MAX_LEVELS_POSSIBLE;
    } else {
        final double ratio = 1 + (EARTH_POLAR_DISTANCE / EARTH_EQUATOR); // cell ratio
        final double width = Math.sqrt((meters * meters) / (ratio * ratio)); // convert to cell width
        final long part = Math.round(Math.ceil(EARTH_EQUATOR / width));
        final int level = Long.SIZE - Long.numberOfLeadingZeros(part) - 1; // (log_2)
        return (part <= (1l << level)) ? level : (level + 1); // adjust level
    }
}

From source file:gobblin.writer.SimpleDataWriterTest.java

/**
 * Prepend the size to each record without delimiting the record. Each record
 * should be prepended by the size of that record and the bytes written should
 * include the prepended bytes./* www. j  a va2s  .c  o m*/
 */
@Test
public void testPrependSizeWithoutDelimiter() throws IOException {
    properties.setProp(ConfigurationKeys.SIMPLE_WRITER_PREPEND_SIZE, true);
    properties.setProp(ConfigurationKeys.SIMPLE_WRITER_DELIMITER, "");
    SimpleDataWriter writer = buildSimpleDataWriter();
    byte[] rec1 = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
    byte[] rec2 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 };
    byte[] rec3 = { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45 };
    byte[][] records = { rec1, rec2, rec3 };

    writer.write(rec1);
    writer.write(rec2);
    writer.write(rec3);

    writer.close();
    writer.commit();

    Assert.assertEquals(writer.recordsWritten(), 3);
    Assert.assertEquals(writer.bytesWritten(), rec1.length + rec2.length + rec3.length + (Long.SIZE / 8 * 3));

    File outputFile = new File(writer.getOutputFilePath());
    DataInputStream dis = new DataInputStream(new FileInputStream(outputFile));
    for (int i = 0; i < 3; i++) {
        long size = dis.readLong();
        Assert.assertEquals(size, records[i].length);
        for (int j = 0; j < size; j++) {
            Assert.assertEquals(dis.readByte(), records[i][j]);
        }
    }
}