Example usage for org.apache.hadoop.fs FileSystem newInstance

List of usage examples for org.apache.hadoop.fs FileSystem newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem newInstance.

Prototype

public static FileSystem newInstance(URI uri, Configuration config) throws IOException 

Source Link

Document

Returns the FileSystem for this URI's scheme and authority.

Usage

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentPusher.java

License:Apache License

@Inject
public HdfsDataSegmentPusher(HdfsDataSegmentPusherConfig config, Configuration hadoopConfig,
        ObjectMapper jsonMapper) {/*from   w w w . j  a v a  2 s. com*/
    this.hadoopConfig = hadoopConfig;
    this.jsonMapper = jsonMapper;
    Path storageDir = new Path(config.getStorageDirectory());
    this.fullyQualifiedStorageDirectory = Suppliers.memoize(() -> {
        try {
            return FileSystem.newInstance(storageDir.toUri(), hadoopConfig).makeQualified(storageDir).toUri()
                    .toString();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    });

    log.info("Configured HDFS as deep storage");
}

From source file:org.apache.gobblin.yarn.GobblinYarnLogSource.java

License:Apache License

/**
 * Return a new (non-cached) {@link FileSystem} instance. The {@link FileSystem} instance
 * returned by the method has automatic closing disabled. The user of the instance needs to handle closing of the
 * instance, typically as part of its shutdown sequence.
 *///from w ww. j  a v a2 s  .c  om
private FileSystem buildFileSystem(Config config, boolean isLocal) throws IOException {
    return isLocal ? FileSystem.newInstanceLocal(AUTO_CLOSE_CONFIG)
            : config.hasPath(ConfigurationKeys.FS_URI_KEY)
                    ? FileSystem.newInstance(URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)),
                            AUTO_CLOSE_CONFIG)
                    : FileSystem.newInstance(AUTO_CLOSE_CONFIG);
}

From source file:org.apache.hive.streaming.AbstractRecordWriter.java

License:Apache License

@Override
public void init(StreamingConnection conn, long minWriteId, long maxWriteId, int statementId)
        throws StreamingException {
    if (conn == null) {
        throw new StreamingException("Streaming connection cannot be null during record writer initialization");
    }/*from  www .j a v a2 s. c o  m*/
    this.conn = conn;
    this.curBatchMinWriteId = minWriteId;
    this.curBatchMaxWriteId = maxWriteId;
    this.statementId = statementId;
    this.conf = conn.getHiveConf();
    this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
    this.table = conn.getTable();
    String location = table.getSd().getLocation();
    try {
        URI uri = new URI(location);
        this.fs = FileSystem.newInstance(uri, conf);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Created new filesystem instance: {}", System.identityHashCode(this.fs));
        }
    } catch (URISyntaxException e) {
        throw new StreamingException("Unable to create URI from location: " + location, e);
    } catch (IOException e) {
        throw new StreamingException("Unable to get filesystem for location: " + location, e);
    }
    this.inputColumns = table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
    this.inputTypes = table.getSd().getCols().stream().map(FieldSchema::getType).collect(Collectors.toList());
    if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
        this.partitionColumns = table.getPartitionKeys().stream().map(FieldSchema::getName)
                .collect(Collectors.toList());
        this.inputColumns.addAll(partitionColumns);
        this.inputTypes.addAll(
                table.getPartitionKeys().stream().map(FieldSchema::getType).collect(Collectors.toList()));
    }
    this.fullyQualifiedTableName = Warehouse.getQualifiedName(table.getDbName(), table.getTableName());
    String outFormatName = this.table.getSd().getOutputFormat();
    try {
        this.acidOutputFormat = (AcidOutputFormat<?, ?>) ReflectionUtils
                .newInstance(JavaUtils.loadClass(outFormatName), conf);
    } catch (Exception e) {
        String shadePrefix = conf.getVar(HiveConf.ConfVars.HIVE_CLASSLOADER_SHADE_PREFIX);
        if (shadePrefix != null && !shadePrefix.trim().isEmpty()) {
            try {
                LOG.info("Shade prefix: {} specified. Using as fallback to load {}..", shadePrefix,
                        outFormatName);
                this.acidOutputFormat = (AcidOutputFormat<?, ?>) ReflectionUtils
                        .newInstance(JavaUtils.loadClass(shadePrefix, outFormatName), conf);
            } catch (ClassNotFoundException e1) {
                throw new StreamingException(e.getMessage(), e);
            }
        } else {
            throw new StreamingException(e.getMessage(), e);
        }
    }
    setupMemoryMonitoring();
    try {
        final AbstractSerDe serDe = createSerde();
        this.inputRowObjectInspector = (StructObjectInspector) serDe.getObjectInspector();
        if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
            preparePartitioningFields();
            int dpStartCol = inputRowObjectInspector.getAllStructFieldRefs().size()
                    - table.getPartitionKeys().size();
            this.outputRowObjectInspector = new SubStructObjectInspector(inputRowObjectInspector, 0,
                    dpStartCol);
        } else {
            this.outputRowObjectInspector = inputRowObjectInspector;
        }
        prepareBucketingFields();
    } catch (SerDeException e) {
        throw new StreamingException("Unable to create SerDe", e);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.GridHadoopFileSystemsTest.java

License:Apache License

/**
 * Test the file system with specified URI for the multi-thread working directory support.
 *
 * @param uri Base URI of the file system (scheme and authority).
 * @throws Exception If fails./*from   w w w . ja v  a2s  .c  o  m*/
 */
private void testFileSystem(final URI uri) throws Exception {
    final Configuration cfg = new Configuration();

    setupFileSystems(cfg);

    cfg.set(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP,
            new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString());

    final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeAbsDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch finishPhase = new CountDownLatch(THREAD_COUNT);

    final Path[] newUserInitWorkDir = new Path[THREAD_COUNT];
    final Path[] newWorkDir = new Path[THREAD_COUNT];
    final Path[] newAbsWorkDir = new Path[THREAD_COUNT];
    final Path[] newInstanceWorkDir = new Path[THREAD_COUNT];

    final AtomicInteger threadNum = new AtomicInteger(0);

    GridTestUtils.runMultiThreadedAsync(new Runnable() {
        @Override
        public void run() {
            try {
                int curThreadNum = threadNum.getAndIncrement();

                FileSystem fs = FileSystem.get(uri, cfg);

                GridHadoopFileSystemsUtils.setUser(fs, "user" + curThreadNum);

                if ("file".equals(uri.getScheme()))
                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum));

                changeUserPhase.countDown();
                changeUserPhase.await();

                newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum));

                changeDirPhase.countDown();
                changeDirPhase.await();

                newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum));

                changeAbsDirPhase.countDown();
                changeAbsDirPhase.await();

                newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory();

                finishPhase.countDown();
            } catch (InterruptedException | IOException e) {
                error("Failed to execute test thread.", e);

                fail();
            }
        }
    }, THREAD_COUNT, "filesystems-test");

    finishPhase.await();

    for (int i = 0; i < THREAD_COUNT; i++) {
        cfg.set(MRJobConfig.USER_NAME, "user" + i);

        Path workDir = new Path(new Path(uri), "user/user" + i);

        cfg.set(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString());

        assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());

        assertEquals(workDir, newUserInitWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/user" + i + "/folder" + i), newWorkDir[i]);

        assertEquals(new Path("/folder" + i), newAbsWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/" + System.getProperty("user.name")), newInstanceWorkDir[i]);
    }

    System.out.println(System.getProperty("user.dir"));
}

From source file:org.apache.ignite.internal.processors.hadoop.HadoopFileSystemsTest.java

License:Apache License

/**
 * Test the file system with specified URI for the multi-thread working directory support.
 *
 * @param uri Base URI of the file system (scheme and authority).
 * @throws Exception If fails.//from  w  ww .ja  va 2s.co m
 */
private void testFileSystem(final URI uri) throws Exception {
    final Configuration cfg = new Configuration();

    setupFileSystems(cfg);

    cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP,
            new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString());

    final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeAbsDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch finishPhase = new CountDownLatch(THREAD_COUNT);

    final Path[] newUserInitWorkDir = new Path[THREAD_COUNT];
    final Path[] newWorkDir = new Path[THREAD_COUNT];
    final Path[] newAbsWorkDir = new Path[THREAD_COUNT];
    final Path[] newInstanceWorkDir = new Path[THREAD_COUNT];

    final AtomicInteger threadNum = new AtomicInteger(0);

    GridTestUtils.runMultiThreadedAsync(new Runnable() {
        @Override
        public void run() {
            try {
                int curThreadNum = threadNum.getAndIncrement();

                FileSystem fs = FileSystem.get(uri, cfg);

                HadoopFileSystemsUtils.setUser(fs, "user" + curThreadNum);

                if ("file".equals(uri.getScheme()))
                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum));

                changeUserPhase.countDown();
                changeUserPhase.await();

                newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum));

                changeDirPhase.countDown();
                changeDirPhase.await();

                newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum));

                changeAbsDirPhase.countDown();
                changeAbsDirPhase.await();

                newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory();

                finishPhase.countDown();
            } catch (InterruptedException | IOException e) {
                error("Failed to execute test thread.", e);

                fail();
            }
        }
    }, THREAD_COUNT, "filesystems-test");

    finishPhase.await();

    for (int i = 0; i < THREAD_COUNT; i++) {
        cfg.set(MRJobConfig.USER_NAME, "user" + i);

        Path workDir = new Path(new Path(uri), "user/user" + i);

        cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString());

        assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());

        assertEquals(workDir, newUserInitWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/user" + i + "/folder" + i), newWorkDir[i]);

        assertEquals(new Path("/folder" + i), newAbsWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/" + System.getProperty("user.name")), newInstanceWorkDir[i]);
    }

    System.out.println(System.getProperty("user.dir"));
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.HadoopFileSystemsTest.java

License:Apache License

/**
 * Test the file system with specified URI for the multi-thread working directory support.
 *
 * @param uri Base URI of the file system (scheme and authority).
 * @throws Exception If fails./*from w w w  . j av  a 2 s . c om*/
 */
private void testFileSystem(final URI uri) throws Exception {
    final Configuration cfg = new Configuration();

    setupFileSystems(cfg);

    cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP,
            new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString());

    FileSystem fs = FileSystem.get(uri, cfg);

    assertTrue(fs instanceof HadoopLocalFileSystemV1);

    final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch changeAbsDirPhase = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch finishPhase = new CountDownLatch(THREAD_COUNT);

    final Path[] newUserInitWorkDir = new Path[THREAD_COUNT];
    final Path[] newWorkDir = new Path[THREAD_COUNT];
    final Path[] newAbsWorkDir = new Path[THREAD_COUNT];
    final Path[] newInstanceWorkDir = new Path[THREAD_COUNT];

    final AtomicInteger threadNum = new AtomicInteger(0);

    GridTestUtils.runMultiThreadedAsync(new Runnable() {
        @Override
        public void run() {
            try {
                int curThreadNum = threadNum.getAndIncrement();

                if ("file".equals(uri.getScheme()))
                    FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum));

                changeUserPhase.countDown();
                changeUserPhase.await();

                newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum));

                changeDirPhase.countDown();
                changeDirPhase.await();

                newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum));

                changeAbsDirPhase.countDown();
                changeAbsDirPhase.await();

                newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory();

                newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory();

                finishPhase.countDown();
            } catch (InterruptedException | IOException e) {
                error("Failed to execute test thread.", e);

                fail();
            }
        }
    }, THREAD_COUNT, "filesystems-test");

    finishPhase.await();

    for (int i = 0; i < THREAD_COUNT; i++) {
        cfg.set(MRJobConfig.USER_NAME, "user" + i);

        Path workDir = new Path(new Path(uri), "user/user" + i);

        cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString());

        assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());

        assertEquals(workDir, newUserInitWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/user" + i + "/folder" + i), newWorkDir[i]);

        assertEquals(new Path("/folder" + i), newAbsWorkDir[i]);

        assertEquals(new Path(new Path(uri), "user/" + System.getProperty("user.name")), newInstanceWorkDir[i]);
    }

    System.out.println(System.getProperty("user.dir"));
}

From source file:org.apache.lens.server.LensServices.java

License:Apache License

@SuppressWarnings("unchecked")
@Override// ww w  .j a v  a2  s.c  o m
public synchronized void init(HiveConf hiveConf) {

    if (getServiceState() == STATE.NOTINITED) {

        initializeErrorCollection();
        conf = hiveConf;
        conf.setVar(HiveConf.ConfVars.HIVE_SESSION_IMPL_CLASSNAME, LensSessionImpl.class.getCanonicalName());
        serviceMode = conf.getEnum(SERVER_MODE, ServiceMode.valueOf(DEFAULT_SERVER_MODE));
        cliService = new CLIService(null);
        UserConfigLoaderFactory.init(conf);
        // Add default services
        addService(cliService);
        addService(new EventServiceImpl(LensEventService.NAME));
        addService(new MetricsServiceImpl(MetricsService.NAME));
        addService(new StatisticsService(StatisticsService.STATS_SVC_NAME));

        // Add configured services, these are instances of LensService which need a CLIService instance
        // for session management
        String[] serviceNames = conf.getStrings(SERVICE_NAMES);
        for (String sName : serviceNames) {
            try {

                String serviceClassName = conf.get(getServiceImplConfKey(sName));

                if (StringUtils.isBlank(serviceClassName)) {
                    log.warn("Invalid class for service {} class={}", sName, serviceClassName);
                    continue;
                }

                Class<?> cls = Class.forName(serviceClassName);

                if (BaseLensService.class.isAssignableFrom(cls)) {
                    Class<? extends BaseLensService> serviceClass = (Class<? extends BaseLensService>) cls;
                    log.info("Adding {}  service with {}", sName, serviceClass);
                    Constructor<?> constructor = serviceClass.getConstructor(CLIService.class);
                    BaseLensService service = (BaseLensService) constructor.newInstance(cliService);
                    addService(service);
                    lensServices.add(service);
                } else if (Service.class.isAssignableFrom(cls)) {
                    Class<? extends Service> serviceClass = (Class<? extends Service>) cls;
                    // Assuming default constructor
                    Service svc = serviceClass.newInstance();
                    addService(svc);
                } else {
                    log.warn("Unsupported service class {} for service {}", serviceClassName, sName);
                }
            } catch (Exception e) {
                log.warn("Could not add service:{}", sName, e);
                throw new RuntimeException("Could not add service:" + sName, e);
            }
        }

        for (Service svc : getServices()) {
            services.put(svc.getName(), svc);
        }

        // This will init all services in the order they were added
        super.init(conf);

        // setup persisted state
        isServerStatePersistenceEnabled = conf.getBoolean(SERVER_STATE_PERSISTENCE_ENABLED,
                DEFAULT_SERVER_STATE_PERSISTENCE_ENABLED);
        if (isServerStatePersistenceEnabled) {
            String persistPathStr = conf.get(SERVER_STATE_PERSIST_LOCATION,
                    DEFAULT_SERVER_STATE_PERSIST_LOCATION);
            persistDir = new Path(persistPathStr);
            try {
                Configuration configuration = new Configuration(conf);
                configuration.setBoolean(FS_AUTOMATIC_CLOSE, false);

                int outStreamBufferSize = conf.getInt(STATE_PERSIST_OUT_STREAM_BUFF_SIZE,
                        DEFAULT_STATE_PERSIST_OUT_STREAM_BUFF_SIZE);
                configuration.setInt(FS_IO_FILE_BUFFER_SIZE, outStreamBufferSize);
                log.info("STATE_PERSIST_OUT_STREAM_BUFF_SIZE IN BYTES:{}", outStreamBufferSize);
                persistenceFS = FileSystem.newInstance(persistDir.toUri(), configuration);
                setupPersistedState();
            } catch (Exception e) {
                log.error("Could not recover from persisted state", e);
                throw new RuntimeException("Could not recover from persisted state", e);
            }
            serverStatePersistenceInterval = conf.getLong(SERVER_STATE_PERSISTENCE_INTERVAL_MILLIS,
                    DEFAULT_SERVER_STATE_PERSISTENCE_INTERVAL_MILLIS);
        }
        log.info("Initialized services: {}", services.keySet().toString());
    }
}

From source file:org.apache.lens.server.session.DatabaseResourceService.java

License:Apache License

private void loadDbResourceEntries() throws LensException {
    // Read list of databases in
    FileSystem serverFs = null;//  w w  w.ja va 2 s  .  com

    try {
        String resTopDir = getHiveConf().get(LensConfConstants.DATABASE_RESOURCE_DIR,
                LensConfConstants.DEFAULT_DATABASE_RESOURCE_DIR);
        log.info("Database specific resources at {}", resTopDir);

        Path resTopDirPath = new Path(resTopDir);
        serverFs = FileSystem.newInstance(resTopDirPath.toUri(), getHiveConf());
        if (!serverFs.exists(resTopDirPath)) {
            incrCounter(LOAD_RESOURCES_ERRORS);
            log.warn("Database resource location does not exist - {}. Database jars will not be available",
                    resTopDir);
            return;
        }

        // Look for db dirs
        for (FileStatus dbDir : serverFs.listStatus(resTopDirPath)) {
            Path dbDirPath = dbDir.getPath();
            if (serverFs.isDirectory(dbDirPath)) {
                String dbName = dbDirPath.getName();
                // Get all resources for that db
                findResourcesInDir(serverFs, dbName, dbDirPath);
            } else {
                log.warn("DB resource DIR is not a directory: {}", dbDirPath);
            }
        }

        log.debug("Found resources {}", dbResEntryMap);
    } catch (IOException io) {
        log.error("Error getting list of dbs to load resources from", io);
        throw new LensException(io);
    } finally {
        if (serverFs != null) {
            try {
                serverFs.close();
            } catch (IOException e) {
                log.error("Error closing file system instance", e);
            }
        }
    }
}

From source file:org.apache.oozie.action.hadoop.GitMain.java

License:Apache License

/**
 * Gathers the Git authentication key from a FileSystem and copies it to a local
 * filesystem location/*from w w w .  ja  va 2 s . c o  m*/
 *
 * @param location where the key is located (an HDFS URI)
 * @return the location to where the key was saved
 */
@VisibleForTesting
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "File created without user input")
File getKeyFromFS(final Path location) throws IOException, URISyntaxException {
    final String keyCopyMsg = "Copied keys to local container!";

    final Configuration conf = new Configuration();
    final FileSystem fs = FileSystem.newInstance(new URI(nameNode), conf);

    final File key = createTempDir("git");

    fs.copyToLocalFile(location, new Path("file:///" + key.getAbsolutePath() + "/privkey"));
    System.out.println(keyCopyMsg);

    return new File(key.getAbsolutePath() + "/privkey");
}

From source file:org.apache.solr.cloud.hdfs.StressHdfsTest.java

License:Apache License

private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException,
        InterruptedException, URISyntaxException {

    boolean overshard = random().nextBoolean();
    if (overshard) {
        createCollection(DELETE_DATA_DIR_COLLECTION, shardCount * 2, 1, 2);
    } else {/*from w w  w  .j a  v a  2  s.c  o m*/
        int rep = shardCount / 2;
        if (rep == 0)
            rep = 1;
        createCollection(DELETE_DATA_DIR_COLLECTION, rep, 2, 1);
    }

    waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
    cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
    cloudClient.getZkStateReader().updateClusterState(true);

    // collect the data dirs
    List<String> dataDirs = new ArrayList<String>();

    int i = 0;
    for (SolrServer client : clients) {
        HttpSolrServer c = new HttpSolrServer(getBaseUrl(client) + "/delete_data_dir");
        try {
            c.add(getDoc("id", i++));
            if (random().nextBoolean())
                c.add(getDoc("id", i++));
            if (random().nextBoolean())
                c.add(getDoc("id", i++));
            if (random().nextBoolean()) {
                c.commit();
            } else {
                c.commit(true, true, true);
            }

            c.query(new SolrQuery("id:" + i));
            c.setSoTimeout(60000);
            c.setConnectionTimeout(30000);
            NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system"))
                    .getResponse();
            NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
            String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
            dataDirs.add(dataDir);
        } finally {
            c.shutdown();
        }
    }

    if (random().nextBoolean()) {
        cloudClient.deleteByQuery("*:*");
        cloudClient.commit();

        assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
    }

    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", DELETE_DATA_DIR_COLLECTION);
    QueryRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");
    cloudClient.request(request);

    // check that all dirs are gone
    for (String dataDir : dataDirs) {
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.newInstance(new URI(dataDir), conf);
        assertFalse("Data directory exists after collection removal : " + dataDir,
                fs.exists(new Path(dataDir)));
        fs.close();
    }
}