Example usage for org.apache.hadoop.conf Configuration set

List of usage examples for org.apache.hadoop.conf Configuration set

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration set.

Prototype

public void set(String name, String value) 

Source Link

Document

Set the value of the name property.

Usage

From source file:co.cask.cdap.data.startup.TransactionServiceCheckWithHConfTest.java

License:Apache License

protected void run(CConfiguration cConf) throws Exception {
    Configuration hConf = new Configuration();
    for (Map.Entry<String, String> entry : cConf) {
        hConf.set(entry.getKey(), entry.getValue());
    }//from ww  w .j  a  v a  2 s . c o m
    new TransactionServiceCheck(hConf).run();
}

From source file:co.cask.cdap.data.stream.AbstractStreamInputFormat.java

License:Apache License

/**
 * Sets the stream id of the stream.//from ww w .  j  ava2 s . c om
 *
 * @param conf The conf to modify.
 * @param streamId {@link StreamId} id of the stream.
 */
public static void setStreamId(Configuration conf, StreamId streamId) {
    conf.set(STREAM_ID, GSON.toJson(streamId));
}

From source file:co.cask.cdap.data.stream.AbstractStreamInputFormat.java

License:Apache License

/**
 * Sets the base path to stream files.//from  w w  w .ja  v  a  2  s  . c  o  m
 *
 * @param conf The conf to modify.
 * @param path The file path to stream base directory.
 */
public static void setStreamPath(Configuration conf, URI path) {
    conf.set(STREAM_PATH, path.toString());
}

From source file:co.cask.cdap.data.stream.AbstractStreamInputFormat.java

License:Apache License

/**
 * Sets the class name for the {@link StreamEventDecoder}.
 *
 * @param conf The conf to modify.//from   w  ww. j a v  a2  s .  c  o  m
 * @param decoderClassName Class name of the decoder class
 */
public static void setDecoderClassName(Configuration conf, String decoderClassName) {
    conf.set(DECODER_TYPE, decoderClassName);
}

From source file:co.cask.cdap.data.stream.AbstractStreamInputFormat.java

License:Apache License

/**
 * Set the format specification for reading the body of stream events. Will also set the decoder class appropriately.
 *
 * @param conf The job configuration./*from  ww w.  j a va  2 s  .  co m*/
 * @param formatSpecification Format specification for reading the body of stream events.
 */
public static void setBodyFormatSpecification(Configuration conf, FormatSpecification formatSpecification) {
    conf.set(BODY_FORMAT, GSON.toJson(formatSpecification));
    setDecoderClassName(conf, FormatStreamEventDecoder.class.getName());
}

From source file:co.cask.cdap.data.stream.DFSStreamFileJanitorTest.java

License:Apache License

@BeforeClass
public static void init() throws IOException {

    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitClusterUp();/*from ww  w  .  j  ava2s .  com*/
    final LocationFactory lf = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
    final NamespacedLocationFactory nlf = new DefaultNamespacedLocationFactory(cConf, lf);

    Injector injector = Guice.createInjector(new ConfigModule(cConf, hConf), new ZKClientModule(),
            new AbstractModule() {
                @Override
                protected void configure() {
                    bind(LocationFactory.class).toInstance(lf);
                    bind(NamespacedLocationFactory.class).toInstance(nlf);
                }
            }, new TransactionMetricsModule(), new DiscoveryRuntimeModule().getInMemoryModules(),
            new DataFabricModules().getDistributedModules(),
            Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(MetadataStore.class).to(NoOpMetadataStore.class);
                }
            }), new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(),
            Modules.override(new StreamAdminModules().getDistributedModules()).with(new AbstractModule() {

                @Override
                protected void configure() {
                    // Tests are running in same process, hence no need to have ZK to coordinate
                    bind(StreamCoordinatorClient.class).to(InMemoryStreamCoordinatorClient.class)
                            .in(Scopes.SINGLETON);
                    bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
                }
            }), new AbstractModule() {
                @Override
                protected void configure() {
                    // We don't need notification in this test, hence inject an no-op one
                    bind(NotificationFeedManager.class).to(NoOpNotificationFeedManager.class);
                    bind(NamespaceStore.class).to(InMemoryNamespaceStore.class);
                }
            });

    locationFactory = injector.getInstance(LocationFactory.class);
    namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
    namespaceStore = injector.getInstance(NamespaceStore.class);
    streamAdmin = injector.getInstance(StreamAdmin.class);
    fileWriterFactory = injector.getInstance(StreamFileWriterFactory.class);
    streamCoordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
    streamCoordinatorClient.startAndWait();
}

From source file:co.cask.cdap.data.stream.DFSTimePartitionedStreamTest.java

License:Apache License

@BeforeClass
public static void init() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    dfsCluster.waitClusterUp();/*  w w w. j a v a  2s  .  c  om*/
    locationFactory = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
}

From source file:co.cask.cdap.data.stream.DistributedStreamCoordinatorClientTest.java

License:Apache License

@BeforeClass
public static void init() throws IOException {
    zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
    zkServer.startAndWait();/* www .  j  a  v  a2 s  . c  o  m*/

    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitClusterUp();
    final LocationFactory lf = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
    final NamespacedLocationFactory nlf = new DefaultNamespacedLocationFactory(cConf, lf);

    cConf.set(Constants.Zookeeper.QUORUM, zkServer.getConnectionStr());

    Injector injector = Guice.createInjector(new ConfigModule(cConf), new ZKClientModule(),
            new DiscoveryRuntimeModule().getDistributedModules(),
            new DataFabricModules().getDistributedModules(),
            Modules.override(new DataSetsModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(MetadataStore.class).to(NoOpMetadataStore.class);
                }
            }), new TransactionMetricsModule(), new NotificationFeedServiceRuntimeModule().getInMemoryModules(),
            new AbstractModule() {
                @Override
                protected void configure() {
                    bind(LocationFactory.class).toInstance(lf);
                    bind(NamespacedLocationFactory.class).toInstance(nlf);
                }
            }, new ExploreClientModule(), new ViewAdminModules().getInMemoryModules(),
            Modules.override(new StreamAdminModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(StreamMetaStore.class).to(InMemoryStreamMetaStore.class);
                }
            }));

    zkClient = injector.getInstance(ZKClientService.class);
    zkClient.startAndWait();

    setupNamespaces(injector.getInstance(NamespacedLocationFactory.class));
    streamAdmin = injector.getInstance(StreamAdmin.class);
    coordinatorClient = injector.getInstance(StreamCoordinatorClient.class);
    coordinatorClient.startAndWait();
}

From source file:co.cask.cdap.data.stream.service.DFSConcurrentStreamWriterTest.java

License:Apache License

@BeforeClass
public static void init() throws IOException {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TMP_FOLDER.newFolder().getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitClusterUp();// w  ww .  j a v  a2 s  .c  om
    LocationFactory locationFactory = new FileContextLocationFactory(dfsCluster.getFileSystem().getConf());
    namespacedLocationFactory = new DefaultNamespacedLocationFactory(CConfiguration.create(), locationFactory);

}

From source file:co.cask.cdap.data2.transaction.snapshot.SnapshotCodecCompatibilityTest.java

License:Apache License

/**
 * In-progress LONG transactions written with DefaultSnapshotCodec will not have the type serialized as part of
 * the data.  Since these transactions also contain a non-negative expiration, we need to ensure we reset the type
 * correctly when the snapshot is loaded.
 *//*from w w  w .  j a v a2s .co m*/
@Test
public void testV2ToTephraV3Compatibility() throws Exception {
    long now = System.currentTimeMillis();
    long nowWritePointer = now * TxConstants.MAX_TX_PER_MS;
    /*
     * Snapshot consisting of transactions at:
     */
    long tInvalid = nowWritePointer - 5; // t1 - invalid
    long readPtr = nowWritePointer - 4; // t2 - here and earlier committed
    long tLong = nowWritePointer - 3; // t3 - in-progress LONG
    long tCommitted = nowWritePointer - 2; // t4 - committed, changeset (r1, r2)
    long tShort = nowWritePointer - 1; // t5 - in-progress SHORT, canCommit called, changeset (r3, r4)

    TreeMap<Long, TransactionManager.InProgressTx> inProgress = Maps.newTreeMap(ImmutableSortedMap.of(tLong,
            new TransactionManager.InProgressTx(readPtr,
                    TransactionManager.getTxExpirationFromWritePointer(tLong,
                            TxConstants.Manager.DEFAULT_TX_LONG_TIMEOUT),
                    TransactionType.LONG),
            tShort, new TransactionManager.InProgressTx(readPtr, now + 1000, TransactionType.SHORT)));

    TransactionSnapshot snapshot = new TransactionSnapshot(now, readPtr, nowWritePointer,
            Lists.newArrayList(tInvalid), // invalid
            inProgress,
            ImmutableMap.<Long, Set<ChangeId>>of(tShort,
                    Sets.newHashSet(new ChangeId(new byte[] { 'r', '3' }),
                            new ChangeId(new byte[] { 'r', '4' }))),
            ImmutableMap.<Long, Set<ChangeId>>of(tCommitted, Sets
                    .newHashSet(new ChangeId(new byte[] { 'r', '1' }), new ChangeId(new byte[] { 'r', '2' }))));

    Configuration conf1 = new Configuration();
    conf1.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, SnapshotCodecV2.class.getName());
    SnapshotCodecProvider provider1 = new SnapshotCodecProvider(conf1);

    ByteArrayOutputStream out = new ByteArrayOutputStream();
    try {
        provider1.encode(out, snapshot);
    } finally {
        out.close();
    }

    TransactionSnapshot snapshot2 = provider1.decode(new ByteArrayInputStream(out.toByteArray()));
    assertEquals(snapshot.getReadPointer(), snapshot2.getReadPointer());
    assertEquals(snapshot.getWritePointer(), snapshot2.getWritePointer());
    assertEquals(snapshot.getInvalid(), snapshot2.getInvalid());
    // in-progress transactions will have missing types
    assertNotEquals(snapshot.getInProgress(), snapshot2.getInProgress());
    assertEquals(snapshot.getCommittingChangeSets(), snapshot2.getCommittingChangeSets());
    assertEquals(snapshot.getCommittedChangeSets(), snapshot2.getCommittedChangeSets());

    // after fixing in-progress, full snapshot should match
    Map<Long, TransactionManager.InProgressTx> fixedInProgress = TransactionManager.txnBackwardsCompatCheck(
            TxConstants.Manager.DEFAULT_TX_LONG_TIMEOUT, 10000L, snapshot2.getInProgress());
    assertEquals(snapshot.getInProgress(), fixedInProgress);
    assertEquals(snapshot, snapshot2);
}