Example usage for org.apache.hadoop.conf Configuration setLong

List of usage examples for org.apache.hadoop.conf Configuration setLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setLong.

Prototype

public void setLong(String name, long value) 

Source Link

Document

Set the value of the name property to a long.

Usage

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();//from   ww w .  j a  va2 s . c  om

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testStoreType() throws Exception {
    final Configuration hdfsConf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    hdfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).build();
    cluster.waitClusterUp();/*from w  ww.  java  2 s  .  c o  m*/

    TajoConf tajoConf = new TajoConf(hdfsConf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    try {
        /* Local FileSystem */
        FileStorageManager sm = (FileStorageManager) StorageManager.getStorageManager(conf, StoreType.CSV);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        /* Distributed FileSystem */
        sm = (FileStorageManager) StorageManager.getStorageManager(tajoConf, StoreType.CSV);
        assertNotEquals(fs.getUri(), sm.getFileSystem().getUri());
        assertEquals(cluster.getFileSystem().getUri(), sm.getFileSystem().getUri());
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();//from  www .  j  a va2  s. com
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileTablespace space = new FileTablespace("testGetSplit", fs.getUri());
        space.init(new TajoConf(conf));
        assertEquals(fs.getUri(), space.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(space.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(space.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*from   w  ww .j a v a 2 s.c  om*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));

        FileTablespace sm = new FileTablespace("testGetSplitWithBlockStorageLocationsBatching", fs.getUri());
        sm.init(new TajoConf(conf));

        assertEquals(fs.getUri(), sm.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetFileTablespace() throws Exception {
    final Configuration hdfsConf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    hdfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).build();
    cluster.waitClusterUp();/* w  w  w  .  jav a 2  s . c o m*/
    URI uri = URI.create(cluster.getFileSystem().getUri() + "/tajo");

    Optional<Tablespace> existingTs = Optional.absent();
    try {
        /* Local FileSystem */
        FileTablespace space = TablespaceManager.getLocalFs();
        assertEquals(localFs.getUri(), space.getFileSystem().getUri());

        FileTablespace distTablespace = new FileTablespace("testGetFileTablespace", uri);
        distTablespace.init(conf);
        existingTs = TablespaceManager.addTableSpaceForTest(distTablespace);

        /* Distributed FileSystem */
        space = (FileTablespace) TablespaceManager.get(uri).get();
        assertEquals(cluster.getFileSystem().getUri(), space.getFileSystem().getUri());

        space = (FileTablespace) TablespaceManager.getByName("testGetFileTablespace").get();
        assertEquals(cluster.getFileSystem().getUri(), space.getFileSystem().getUri());

    } finally {

        if (existingTs.isPresent()) {
            TablespaceManager.addTableSpaceForTest(existingTs.get());
        }

        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {//from   www  .ja  va2 s . c  o  m
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, splits.get(0).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {//from ww  w.j  a v  a 2  s .c o  m
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, splits.get(0).getDiskIds().length);
        assertNotEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:org.apache.tephra.distributed.ThriftTransactionServerTest.java

License:Apache License

@Before
public void start() throws Exception {
    zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build();
    zkServer.startAndWait();/*from   w  w w.  j av  a  2 s  .co m*/

    Configuration conf = new Configuration();
    conf.setBoolean(TxConstants.Manager.CFG_DO_PERSIST, false);
    conf.set(TxConstants.Service.CFG_DATA_TX_ZOOKEEPER_QUORUM, zkServer.getConnectionStr());
    conf.set(TxConstants.Service.CFG_DATA_TX_CLIENT_RETRY_STRATEGY, "n-times");
    conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_ATTEMPTS, 1);
    conf.setInt(TxConstants.Service.CFG_DATA_TX_CLIENT_COUNT, NUM_CLIENTS);
    conf.setLong(TxConstants.Service.CFG_DATA_TX_CLIENT_TIMEOUT, TimeUnit.HOURS.toMillis(1));
    conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_IO_THREADS, 2);
    conf.setInt(TxConstants.Service.CFG_DATA_TX_SERVER_THREADS, 4);
    conf.setInt(TxConstants.HBase.ZK_SESSION_TIMEOUT, 10000);

    injector = Guice.createInjector(new ConfigModule(conf), new ZKModule(),
            new DiscoveryModules().getDistributedModules(),
            Modules.override(new TransactionModules().getDistributedModules()).with(new AbstractModule() {
                @Override
                protected void configure() {
                    bind(TransactionStateStorage.class).to(SlowTransactionStorage.class).in(Scopes.SINGLETON);
                    // overriding this to make it non-singleton
                    bind(TransactionSystemClient.class).to(TransactionServiceClient.class);
                }
            }), new TransactionClientModule());

    zkClientService = injector.getInstance(ZKClientService.class);
    zkClientService.startAndWait();

    // start a tx server
    txService = injector.getInstance(TransactionService.class);
    storage = injector.getInstance(TransactionStateStorage.class);
    try {
        LOG.info("Starting transaction service");
        txService.startAndWait();
    } catch (Exception e) {
        LOG.error("Failed to start service: ", e);
        throw e;
    }

    Tests.waitForTxReady(injector.getInstance(TransactionSystemClient.class));

    getClient().resetState();

    storageWaitLatch = new CountDownLatch(1);
    clientsDoneLatch = new CountDownLatch(NUM_CLIENTS);
}

From source file:org.apache.tephra.txprune.TransactionPruningServiceTest.java

License:Apache License

@Test
public void testTransactionPruningService() throws Exception {
    // Setup plugins
    Configuration conf = new Configuration();
    conf.set(TxConstants.TransactionPruning.PLUGINS,
            "data.tx.txprune.plugin.mockPlugin1, data.tx.txprune.plugin.mockPlugin2");
    conf.set("data.tx.txprune.plugin.mockPlugin1.class",
            "org.apache.tephra.txprune.TransactionPruningServiceTest$MockPlugin1");
    conf.set("data.tx.txprune.plugin.mockPlugin2.class",
            "org.apache.tephra.txprune.TransactionPruningServiceTest$MockPlugin2");
    // Setup schedule to run every second
    conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true);
    conf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 10);
    conf.setLong(TxConstants.TransactionPruning.PRUNE_GRACE_PERIOD, 0);

    // Setup mock data
    long m = 1000;
    long n = m * TxConstants.MAX_TX_PER_MS;
    // Current time to be returned
    Iterator<Long> currentTime = Iterators.cycle(120L * m, 220L * m);
    // Transaction objects to be returned by mock tx manager
    Iterator<Transaction> txns = Iterators.cycle(
            new Transaction(100 * n, 110 * n, new long[] { 40 * n, 50 * n, 60 * n, 70 * n },
                    new long[] { 80 * n, 90 * n }, 80 * n),
            new Transaction(200 * n, 210 * n, new long[] { 60 * n, 75 * n, 78 * n, 100 * n, 110 * n, 120 * n },
                    new long[] { 80 * n, 90 * n }, 80 * n));
    // Prune upper bounds to be returned by the mock plugins
    Iterator<Long> pruneUpperBoundsPlugin1 = Iterators.cycle(60L * n, 80L * n);
    Iterator<Long> pruneUpperBoundsPlugin2 = Iterators.cycle(70L * n, 77L * n);

    TestTransactionPruningRunnable.setCurrentTime(currentTime);
    MockTxManager.setTxIter(txns);/*from w  w  w. j ava2s .c o  m*/
    MockPlugin1.setPruneUpperBoundIter(pruneUpperBoundsPlugin1);
    MockPlugin2.setPruneUpperBoundIter(pruneUpperBoundsPlugin2);

    MockTxManager mockTxManager = new MockTxManager(conf);
    TransactionPruningService pruningService = new TestTransactionPruningService(conf, mockTxManager);
    pruningService.startAndWait();
    // This will cause the pruning run to happen three times,
    // but we are interested in only first two runs for the assertions later
    int pruneRuns = TestTransactionPruningRunnable.getRuns();
    pruningService.pruneNow();
    pruningService.pruneNow();
    pruningService.pruneNow();
    TestTransactionPruningRunnable.waitForRuns(pruneRuns + 3, 5, TimeUnit.MILLISECONDS);
    pruningService.stopAndWait();

    // Assert inactive transaction bound that the plugins receive.
    // Both the plugins should get the same inactive transaction bound since it is
    // computed and passed by the transaction service
    Assert.assertEquals(ImmutableList.of(110L * n - 1, 210L * n - 1),
            limitTwo(MockPlugin1.getInactiveTransactionBoundList()));
    Assert.assertEquals(ImmutableList.of(110L * n - 1, 210L * n - 1),
            limitTwo(MockPlugin2.getInactiveTransactionBoundList()));

    // Assert invalid list entries that got pruned
    // The min prune upper bound for the first run should be 60, and for the second run 77
    Assert.assertEquals(
            ImmutableList.of(ImmutableSet.of(40L * n, 50L * n, 60L * n), ImmutableSet.of(60L * n, 75L * n)),
            limitTwo(MockTxManager.getPrunedInvalidsList()));

    // Assert max invalid tx pruned that the plugins receive for the prune complete call
    // Both the plugins should get the same max invalid tx pruned value since it is
    // computed and passed by the transaction service
    Assert.assertEquals(ImmutableList.of(60L * n, 75L * n), limitTwo(MockPlugin1.getMaxPrunedInvalidList()));
    Assert.assertEquals(ImmutableList.of(60L * n, 75L * n), limitTwo(MockPlugin2.getMaxPrunedInvalidList()));
}

From source file:org.apache.tephra.txprune.TransactionPruningServiceTest.java

License:Apache License

@Test
public void testNoPruning() throws Exception {
    // Setup plugins
    Configuration conf = new Configuration();
    conf.set(TxConstants.TransactionPruning.PLUGINS,
            "data.tx.txprune.plugin.mockPlugin1, data.tx.txprune.plugin.mockPlugin2");
    conf.set("data.tx.txprune.plugin.mockPlugin1.class",
            "org.apache.tephra.txprune.TransactionPruningServiceTest$MockPlugin1");
    conf.set("data.tx.txprune.plugin.mockPlugin2.class",
            "org.apache.tephra.txprune.TransactionPruningServiceTest$MockPlugin2");
    // Setup schedule to run every second
    conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true);
    conf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 10);
    conf.setLong(TxConstants.TransactionPruning.PRUNE_GRACE_PERIOD, 0);

    // Setup mock data
    long m = 1000;
    long n = m * TxConstants.MAX_TX_PER_MS;
    // Current time to be returned
    Iterator<Long> currentTime = Iterators.cycle(120L * m, 220L * m);
    // Transaction objects to be returned by mock tx manager
    Iterator<Transaction> txns = Iterators.cycle(
            new Transaction(100 * n, 110 * n, new long[] { 40 * n, 50 * n, 60 * n, 70 * n },
                    new long[] { 80 * n, 90 * n }, 80 * n),
            new Transaction(200 * n, 210 * n, new long[] { 60 * n, 75 * n, 78 * n, 100 * n, 110 * n, 120 * n },
                    new long[] { 80 * n, 90 * n }, 80 * n));
    // Prune upper bounds to be returned by the mock plugins
    Iterator<Long> pruneUpperBoundsPlugin1 = Iterators.cycle(35L * n, -1L);
    Iterator<Long> pruneUpperBoundsPlugin2 = Iterators.cycle(70L * n, 100L * n);

    TestTransactionPruningRunnable.setCurrentTime(currentTime);
    MockTxManager.setTxIter(txns);/*  ww w.  jav a 2 s . c  o m*/
    MockPlugin1.setPruneUpperBoundIter(pruneUpperBoundsPlugin1);
    MockPlugin2.setPruneUpperBoundIter(pruneUpperBoundsPlugin2);

    MockTxManager mockTxManager = new MockTxManager(conf);
    TransactionPruningService pruningService = new TestTransactionPruningService(conf, mockTxManager);
    pruningService.startAndWait();
    // This will cause the pruning run to happen three times,
    // but we are interested in only first two runs for the assertions later
    int pruneRuns = TestTransactionPruningRunnable.getRuns();
    pruningService.pruneNow();
    pruningService.pruneNow();
    pruningService.pruneNow();
    TestTransactionPruningRunnable.waitForRuns(pruneRuns + 3, 5, TimeUnit.MILLISECONDS);
    pruningService.stopAndWait();

    // Assert inactive transaction bound
    Assert.assertEquals(ImmutableList.of(110L * n - 1, 210L * n - 1),
            limitTwo(MockPlugin1.getInactiveTransactionBoundList()));
    Assert.assertEquals(ImmutableList.of(110L * n - 1, 210L * n - 1),
            limitTwo(MockPlugin2.getInactiveTransactionBoundList()));

    // Invalid entries should not be pruned in any run
    Assert.assertEquals(ImmutableList.of(), MockTxManager.getPrunedInvalidsList());

    // No max invalid tx pruned for any run
    Assert.assertEquals(ImmutableList.of(), limitTwo(MockPlugin1.getMaxPrunedInvalidList()));
    Assert.assertEquals(ImmutableList.of(), limitTwo(MockPlugin2.getMaxPrunedInvalidList()));
}