Example usage for org.apache.hadoop.conf Configuration setLong

List of usage examples for org.apache.hadoop.conf Configuration setLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setLong.

Prototype

public void setLong(String name, long value) 

Source Link

Document

Set the value of the name property to a long.

Usage

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

private void doTestFSOutputSummer(String checksumType) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();/*from w ww  . jav a  2  s.  co  m*/
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
        writeFile2(file);
        writeFile3(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

@Test
public void TestDFSCheckSumType() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();/*from ww w  . jav  a  2s  .c o  m*/
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test uses/*from www  . j  ava  2 s .co m*/
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block' boundaries
 */
@Test
public void hFlush_02() throws IOException {
    Configuration conf = new HdfsConfiguration();
    int customPerChecksumSize = 512;
    int customBlockSize = customPerChecksumSize * 3;
    // Modify defaul filesystem settings
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

    doTheJob(conf, fName, customBlockSize, (short) 2, false, EnumSet.noneOf(SyncFlag.class));
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test uses//from  w  w  w.  java2 s.c  om
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)} 
 * to write a file with a custom block size so the writes will be 
 * happening across block's and checksum' boundaries
 */
@Test
public void hFlush_03() throws IOException {
    Configuration conf = new HdfsConfiguration();
    int customPerChecksumSize = 400;
    int customBlockSize = customPerChecksumSize * 3;
    // Modify defaul filesystem settings
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

    doTheJob(conf, fName, customBlockSize, (short) 2, false, EnumSet.noneOf(SyncFlag.class));
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test calls//from  ww w  .  j a  va2s  .c  om
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
 * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
 * Similar with {@link #hFlush_02()} , it writes a file with a custom block
 * size so the writes will be happening across block' boundaries
 */
@Test
public void hSyncUpdateLength_02() throws IOException {
    Configuration conf = new HdfsConfiguration();
    int customPerChecksumSize = 512;
    int customBlockSize = customPerChecksumSize * 3;
    // Modify defaul filesystem settings
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

    doTheJob(conf, fName, customBlockSize, (short) 2, true, EnumSet.of(SyncFlag.UPDATE_LENGTH));
}

From source file:com.mellanox.r4h.TestHFlush.java

License:Apache License

/**
 * The test calls//from   www  .  j av  a2 s.  c  o m
 * {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
 * while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
 * Similar with {@link #hFlush_03()} , it writes a file with a custom block
 * size so the writes will be happening across block's and checksum'
 * boundaries.
 */
@Test
public void hSyncUpdateLength_03() throws IOException {
    Configuration conf = new HdfsConfiguration();
    int customPerChecksumSize = 400;
    int customBlockSize = customPerChecksumSize * 3;
    // Modify defaul filesystem settings
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);

    doTheJob(conf, fName, customBlockSize, (short) 2, true, EnumSet.of(SyncFlag.UPDATE_LENGTH));
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Test reading while writing. */
@Test/*from www  . ja  v a 2s.c o m*/
public void pipeline_02_03() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

    // create cluster
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    try {
        //change the lease limits.
        cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

        //wait for the cluster
        cluster.waitActive();
        final FileSystem fs = cluster.getFileSystem();
        final Path p = new Path(DIR, "file1");
        final int half = BLOCK_SIZE / 2;

        //a. On Machine M1, Create file. Write half block of data.
        //   Invoke DFSOutputStream.hflush() on the dfs file handle.
        //   Do not close file yet.
        {
            final FSDataOutputStream out = fs.create(p, true,
                    fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3,
                    BLOCK_SIZE);
            write(out, 0, half);

            //hflush
            ((DFSOutputStream) out.getWrappedStream()).hflush();
        }

        //b. On another machine M2, open file and verify that the half-block
        //   of data can be read successfully.
        checkFile(p, half, conf);
        MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()");
        ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin();

        //c. On M1, append another half block of data.  Close file on M1.
        {
            //sleep to let the lease is expired.
            Thread.sleep(2 * SOFT_LEASE_LIMIT);

            final UserGroupInformation current = UserGroupInformation.getCurrentUser();
            final UserGroupInformation ugi = UserGroupInformation
                    .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" });
            final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
                @Override
                public DistributedFileSystem run() throws Exception {
                    return (DistributedFileSystem) FileSystem.newInstance(conf);
                }
            });
            final FSDataOutputStream out = append(dfs, p);
            write(out, 0, half);
            out.close();
        }

        //d. On M2, open file and read 1 block of data from it. Close file.
        checkFile(p, 2 * half, conf);
    } finally {
        cluster.shutdown();
    }
}

From source file:com.moz.fiji.hadoop.configurator.TestHadoopConfigurator.java

License:Apache License

@Test
public void testConfigure() {
    Configuration conf = new Configuration();
    conf.setBoolean("my.boolean.value", true);
    conf.setFloat("my.float.value", 3.1f);
    conf.setFloat("my.double.value", 1.9f);
    conf.setInt("my.int.value", 12);
    conf.set("my.string.value", "bar");
    conf.setStrings("my.string.collection", "apple", "banana");
    conf.setStrings("my.string.array", "red", "green", "blue");
    conf.setBoolean("your.boolean.value", true);
    conf.setFloat("your.float.value", 1.0f);
    conf.setFloat("your.double.value", 2.0f);
    conf.setInt("your.int.value", 1);
    conf.setLong("your.long.value", 2L);
    conf.set("your.string.value", "asdf");

    MyConfiguredClass instance = ReflectionUtils.newInstance(MyConfiguredClass.class, conf);
    assertEquals(true, instance.getBooleanValue());
    assertEquals(3.1f, instance.getFloatValue(), 1e-6f);
    assertEquals(1.9, instance.getDoubleValue(), 1e-6);
    assertEquals(12, instance.getIntValue());
    assertEquals(456L, instance.getLongValue());
    assertEquals("bar", instance.getStringValue());
    assertEquals(true, instance.getYourBoolean());
    assertEquals(1.0f, instance.getYourFloat(), 1e-6f);
    assertEquals(2.0, instance.getYourDouble(), 1e-6);
    assertEquals(1, instance.getYourInt());
    assertEquals(2L, instance.getYourLong());
}

From source file:com.mozilla.socorro.hadoop.CrashReportJob.java

License:LGPL

/**
 * @param args//from   w  w  w.  ja  v a  2  s.com
 * @return
 * @throws IOException
 * @throws ParseException
 */
public static Job initJob(String jobName, Configuration conf, Class<?> mainClass,
        Class<? extends TableMapper> mapperClass, Class<? extends Reducer> combinerClass,
        Class<? extends Reducer> reducerClass, Map<byte[], byte[]> columns,
        Class<? extends WritableComparable> keyOut, Class<? extends Writable> valueOut, Path outputPath)
        throws IOException, ParseException {
    // Set both start/end time and start/stop row
    Calendar startCal = Calendar.getInstance();
    Calendar endCal = Calendar.getInstance();

    SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd");

    String startDateStr = conf.get(START_DATE);
    String endDateStr = conf.get(END_DATE);
    if (!StringUtils.isBlank(startDateStr)) {
        startCal.setTime(sdf.parse(startDateStr));
    }
    if (!StringUtils.isBlank(endDateStr)) {
        endCal.setTime(sdf.parse(endDateStr));
    }

    conf.setLong(START_TIME, startCal.getTimeInMillis());
    conf.setLong(END_TIME, DateUtil.getEndTimeAtResolution(endCal.getTimeInMillis(), Calendar.DATE));

    Job job = new Job(conf);
    job.setJobName(jobName);
    job.setJarByClass(mainClass);

    // input table configuration
    Scan[] scans = MultiScanTableMapReduceUtil.generateScans(startCal, endCal, columns, 100, false);
    MultiScanTableMapReduceUtil.initMultiScanTableMapperJob(TABLE_NAME_CRASH_REPORTS, scans, mapperClass,
            keyOut, valueOut, job);

    if (combinerClass != null) {
        job.setCombinerClass(combinerClass);
    }

    if (reducerClass != null) {
        job.setReducerClass(reducerClass);
    } else {
        job.setNumReduceTasks(0);
    }

    FileOutputFormat.setOutputPath(job, outputPath);

    return job;
}

From source file:com.msd.gin.halyard.common.HalyardTableUtils.java

License:Apache License

/**
 * Helper method which locates or creates and return HTable
 * @param config Hadoop Configuration/*  w ww  .  j  av a 2  s .  com*/
 * @param tableName String table name
 * @param create boolean option to create the table if does not exists
 * @param splitBits int number of bits used for calculation of HTable region pre-splits (applies for new tables only)
 * @param contextSplitBitsMap Map between contexts and number of bits used for calculation of HTable region contextual pre-splits (applies for new tables only)
 * @return HTable
 * @throws IOException throws IOException in case of any HBase IO problems
 */
public static HTable getTable(Configuration config, String tableName, boolean create, int splitBits,
        Map<String, Integer> contextSplitBitsMap) throws IOException {
    Configuration cfg = HBaseConfiguration.create(config);
    cfg.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 3600000l);
    if (create) {
        try (Connection con = ConnectionFactory.createConnection(config)) {
            try (Admin admin = con.getAdmin()) {
                if (!admin.tableExists(TableName.valueOf(tableName))) {
                    HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName));
                    td.addFamily(createColumnFamily());
                    td.setValue(HALYARD_VERSION_ATTRIBUTE, HALYARD_VERSION);
                    admin.createTable(td,
                            splitBits < 0 ? null : calculateSplits(splitBits, contextSplitBitsMap));
                }
            }
        }
    }
    HTable table = new HTable(cfg, tableName);
    String version = table.getTableDescriptor().getValue(HALYARD_VERSION_ATTRIBUTE);
    if (!HALYARD_VERSION.equals(version)) {
        table.close();
        throw new IllegalArgumentException("Table " + tableName + " is not compatible, expected "
                + HALYARD_VERSION_ATTRIBUTE + "=" + HALYARD_VERSION + ", however received " + version);
    }
    table.setAutoFlushTo(false);
    return table;
}