Example usage for org.apache.hadoop.conf Configuration setLong

List of usage examples for org.apache.hadoop.conf Configuration setLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setLong.

Prototype

public void setLong(String name, long value) 

Source Link

Document

Set the value of the name property to a long.

Usage

From source file:org.apache.sentry.api.service.thrift.TestSentryServiceWithInvalidMsgSize.java

License:Apache License

/**
 * Test the case when the message size is larger than the client's thrift max message size.
 *//*  ww w .  j  av a  2  s  .  c  om*/
@Test
public void testClientWithSmallMaxMsgSize() throws Exception {
    runTestAsSubject(new TestOperation() {
        @Override
        public void runTestAsSubject() throws Exception {
            SentryServiceClientFactory oldFactory = SentryServiceClientFactory.factoryReset(null);
            Configuration confWithSmallMaxMsgSize = new Configuration(SentryServiceIntegrationBase.conf);
            confWithSmallMaxMsgSize
                    .setLong(ApiConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, 20);
            // create a client with a small thrift max message size
            SentryPolicyServiceClient clientWithSmallMaxMsgSize = SentryServiceClientFactory
                    .create(confWithSmallMaxMsgSize);

            setLocalGroupMapping(SentryServiceIntegrationBase.ADMIN_USER, REQUESTER_USER_GROUP_NAMES);
            writePolicyFile();

            boolean exceptionThrown = false;
            try {
                // client throws exception when message size is larger than the client's thrift max message size.
                clientWithSmallMaxMsgSize.listAllRoles(SentryServiceIntegrationBase.ADMIN_USER);
            } catch (SentryUserException e) {
                exceptionThrown = true;
                Assert.assertTrue(e.getMessage().contains("Thrift exception occurred"));
                Assert.assertTrue(e.getCause().getMessage().contains("Length exceeded max allowed"));
            } finally {
                Assert.assertEquals(true, exceptionThrown);
                clientWithSmallMaxMsgSize.close();
                SentryServiceClientFactory.factoryReset(oldFactory);
            }

            // client can still talk with sentry server when message size is smaller.
            client.dropRoleIfExists(SentryServiceIntegrationBase.ADMIN_USER, ROLE_NAME);
            client.listAllRoles(SentryServiceIntegrationBase.ADMIN_USER);
            client.createRole(SentryServiceIntegrationBase.ADMIN_USER, ROLE_NAME);
            client.listAllRoles(SentryServiceIntegrationBase.ADMIN_USER);
        }
    });
}

From source file:org.apache.sentry.api.service.thrift.TestSentryServiceWithInvalidMsgSize.java

License:Apache License

/**
 * Test the case when the message size is larger than the server's thrift max message size.
 *///from   www.  ja va 2s  .co m
@Test
public void testServerWithSmallMaxMsgSize() throws Exception {
    runTestAsSubject(new TestOperation() {
        @Override
        public void runTestAsSubject() throws Exception {
            Configuration confWithSmallMaxMsgSize = new Configuration(SentryServiceIntegrationBase.conf);
            confWithSmallMaxMsgSize
                    .setLong(ServiceConstants.ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE, 50);
            stopSentryService();

            // create a server with a small max thrift message size
            SentryServiceIntegrationBase.server = SentryServiceFactory.create(confWithSmallMaxMsgSize);
            SentryServiceIntegrationBase.startSentryService();

            setLocalGroupMapping(SentryServiceIntegrationBase.ADMIN_USER, REQUESTER_USER_GROUP_NAMES);
            writePolicyFile();

            // client can talk with server when message size is smaller.
            client.listAllRoles(SentryServiceIntegrationBase.ADMIN_USER);
            client.createRole(SentryServiceIntegrationBase.ADMIN_USER, ROLE_NAME);

            boolean exceptionThrown = false;
            try {
                // client throws exception when message size is larger than the server's thrift max message size.
                client.grantServerPrivilege(SentryServiceIntegrationBase.ADMIN_USER, ROLE_NAME, "server",
                        false);
            } catch (SentryUserException e) {
                exceptionThrown = true;
                Assert.assertTrue(
                        e.getCause().getMessage().contains("org.apache.thrift.transport.TTransportException"));
            } finally {
                Assert.assertEquals(true, exceptionThrown);
            }

            // client can still talk with sentry server when message size is smaller.
            Set<TSentryRole> roles = client.listAllRoles(SentryServiceIntegrationBase.ADMIN_USER);
            Assert.assertTrue(roles.size() == 1);
            Assert.assertEquals(ROLE_NAME, roles.iterator().next().getRoleName());
        }
    });
}

From source file:org.apache.sentry.provider.db.service.thrift.TestSentryServiceWithInvalidMsgSize.java

License:Apache License

/**
 * Test the case when the message size is larger than the client's thrift max message size.
 *///from   ww w.  j a  v a2s .  c o  m
@Test
public void testClientWithSmallMaxMsgSize() throws Exception {
    runTestAsSubject(new TestOperation() {
        @Override
        public void runTestAsSubject() throws Exception {
            SentryServiceClientFactory oldFactory = SentryServiceClientFactory.factoryReset(null);
            Configuration confWithSmallMaxMsgSize = new Configuration(conf);
            confWithSmallMaxMsgSize
                    .setLong(ServiceConstants.ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, 20);
            // create a client with a small thrift max message size
            SentryPolicyServiceClient clientWithSmallMaxMsgSize = SentryServiceClientFactory
                    .create(confWithSmallMaxMsgSize);

            setLocalGroupMapping(ADMIN_USER, REQUESTER_USER_GROUP_NAMES);
            writePolicyFile();

            boolean exceptionThrown = false;
            try {
                // client throws exception when message size is larger than the client's thrift max message size.
                clientWithSmallMaxMsgSize.listAllRoles(ADMIN_USER);
            } catch (SentryUserException e) {
                exceptionThrown = true;
                Assert.assertTrue(e.getMessage().contains("Thrift exception occurred"));
                Assert.assertTrue(e.getCause().getMessage().contains("Length exceeded max allowed"));
            } finally {
                Assert.assertEquals(true, exceptionThrown);
                clientWithSmallMaxMsgSize.close();
                SentryServiceClientFactory.factoryReset(oldFactory);
            }

            // client can still talk with sentry server when message size is smaller.
            client.dropRoleIfExists(ADMIN_USER, ROLE_NAME);
            client.listAllRoles(ADMIN_USER);
            client.createRole(ADMIN_USER, ROLE_NAME);
            client.listAllRoles(ADMIN_USER);
        }
    });
}

From source file:org.apache.sentry.provider.db.service.thrift.TestSentryServiceWithInvalidMsgSize.java

License:Apache License

/**
 * Test the case when the message size is larger than the server's thrift max message size.
 *//*  w  w w .j av  a2  s. c  o m*/
@Test
public void testServerWithSmallMaxMsgSize() throws Exception {
    runTestAsSubject(new TestOperation() {
        @Override
        public void runTestAsSubject() throws Exception {
            Configuration confWithSmallMaxMsgSize = new Configuration(conf);
            confWithSmallMaxMsgSize
                    .setLong(ServiceConstants.ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE, 50);
            stopSentryService();

            // create a server with a small max thrift message size
            server = SentryServiceFactory.create(confWithSmallMaxMsgSize);
            startSentryService();

            setLocalGroupMapping(ADMIN_USER, REQUESTER_USER_GROUP_NAMES);
            writePolicyFile();

            // client can talk with server when message size is smaller.
            client.listAllRoles(ADMIN_USER);
            client.createRole(ADMIN_USER, ROLE_NAME);

            boolean exceptionThrown = false;
            try {
                // client throws exception when message size is larger than the server's thrift max message size.
                client.grantServerPrivilege(ADMIN_USER, ROLE_NAME, "server", false);
            } catch (SentryUserException e) {
                exceptionThrown = true;
                Assert.assertTrue(
                        e.getCause().getMessage().contains("org.apache.thrift.transport.TTransportException"));
            } finally {
                Assert.assertEquals(true, exceptionThrown);
            }

            // client can still talk with sentry server when message size is smaller.
            Set<TSentryRole> roles = client.listAllRoles(ADMIN_USER);
            Assert.assertTrue(roles.size() == 1);
            Assert.assertEquals(ROLE_NAME, roles.iterator().next().getRoleName());
        }
    });
}

From source file:org.apache.sqoop.mapreduce.AccumuloImportJob.java

License:Apache License

@Override
protected void configureOutputFormat(Job job, String tableName, String tableClassName)
        throws ClassNotFoundException, IOException {

    // Use the DelegatingOutputFormat with the AccumuloMutationProcessor.
    job.setOutputFormatClass(getOutputFormatClass());

    Configuration conf = job.getConfiguration();
    conf.setClass("sqoop.output.delegate.field.map.processor.class", AccumuloMutationProcessor.class,
            FieldMapProcessor.class);

    // Set the Accumulo parameters (table, column family, row key):
    conf.set(AccumuloConstants.ZOOKEEPERS, options.getAccumuloZookeepers());
    conf.set(AccumuloConstants.ACCUMULO_INSTANCE, options.getAccumuloInstance());
    conf.set(AccumuloConstants.ACCUMULO_USER_NAME, options.getAccumuloUser());
    String pw = options.getAccumuloPassword();
    if (null == pw) {
        pw = "";//from w  ww.java 2s .c  om
    }
    conf.set(AccumuloConstants.ACCUMULO_PASSWORD, pw);
    conf.set(AccumuloConstants.TABLE_NAME_KEY, options.getAccumuloTable());
    conf.set(AccumuloConstants.COL_FAMILY_KEY, options.getAccumuloColFamily());
    conf.setLong(AccumuloConstants.BATCH_SIZE, options.getAccumuloBatchSize());
    conf.setLong(AccumuloConstants.MAX_LATENCY, options.getAccumuloMaxLatency());

    // What column of the input becomes the row key?
    String rowKeyCol = options.getAccumuloRowKeyColumn();
    if (null == rowKeyCol) {
        // User didn't explicitly set one. If there's a split-by column set,
        // use that.
        rowKeyCol = options.getSplitByCol();
    }

    if (null == rowKeyCol) {
        // No split-by column is explicitly set.
        // If the table has a primary key, use that.
        ConnManager manager = getContext().getConnManager();
        rowKeyCol = manager.getPrimaryKey(tableName);
    }

    if (null == rowKeyCol) {
        // Give up here if this is still unset.
        throw new IOException("Could not determine the row-key column. "
                + "Use --accumulo-row-key to specify the input column that " + "names each row.");
    }

    conf.set(AccumuloConstants.ROW_KEY_COLUMN_KEY, rowKeyCol);
}

From source file:org.apache.sqoop.mapreduce.CombineFileRecordReader.java

License:Apache License

/**
 * Get the record reader for the next chunk in this CombineFileSplit.
 *//*www. j  a v  a  2 s.  c  o  m*/
protected boolean initNextRecordReader() throws IOException {

    if (curReader != null) {
        curReader.close();
        curReader = null;
        if (idx > 0) {
            progress += split.getLength(idx - 1); // done processing so far
        }
    }

    // if all chunks have been processed, nothing more to do.
    if (idx == split.getNumPaths()) {
        return false;
    }

    // get a record reader for the idx-th chunk
    try {
        Configuration conf = context.getConfiguration();
        // setup some helper config variables.
        conf.set("map.input.file", split.getPath(idx).toString());
        conf.setLong("map.input.start", split.getOffset(idx));
        conf.setLong("map.input.length", split.getLength(idx));

        curReader = rrConstructor.newInstance(new Object[] { split, context, Integer.valueOf(idx) });

        if (idx > 0) {
            // initialize() for the first RecordReader will be called by MapTask;
            // we're responsible for initializing subsequent RecordReaders.
            curReader.initialize(split, context);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    idx++;
    return true;
}

From source file:org.apache.tajo.conf.TajoConf.java

License:Apache License

public static void setLongVar(Configuration conf, ConfVars var, long val) {
    assert (var.valClass == Long.class);
    conf.setLong(var.varname, val);
}

From source file:org.apache.tajo.storage.raw.TestDirectRawFile.java

License:Apache License

@BeforeClass
public static void setUpClass() throws IOException, InterruptedException {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.numDataNodes(1);/*from  w w w  . java  2s.c o m*/
    builder.format(true);
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    cluster = builder.build();

    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    localFs = FileSystem.getLocal(new TajoConf());
}

From source file:org.apache.tajo.storage.TestByteBufLineReader.java

License:Apache License

@Test
public void testReaderWithDFS() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*  w  ww. j a va 2  s.c o m*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    Path tablePath = new Path("/testReaderWithDFS");
    Path filePath = new Path(tablePath, "data.dat");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(filePath, true);
        out.write(LINE.getBytes(Charset.defaultCharset()));
        out.write('\n');
        out.close();

        assertTrue(fs.exists(filePath));
        FSDataInputStream inputStream = fs.open(filePath);
        assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable);

        ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream));
        assertEquals(LINE, lineReader.readLine());
        lineReader.seek(0);
        assertEquals(LINE, lineReader.readLine());
        assertNull(lineReader.readLine());

        lineReader.close();
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();//from   w w w .j  a va  2s  .c o m
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}