Example usage for org.apache.hadoop.fs FileSystem closeAll

List of usage examples for org.apache.hadoop.fs FileSystem closeAll

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem closeAll.

Prototype

public static void closeAll() throws IOException 

Source Link

Document

Close all cached FileSystem instances.

Usage

From source file:eu.stratosphere.test.util.TestBase.java

License:Apache License

@After
public void stopCluster() throws Exception {
    cluster.stopCluster();// w  w  w .j  a  v a2 s.  c o m
    ClusterProviderPool.removeInstance(clusterConfig);
    FileSystem.closeAll();
    System.gc();
}

From source file:io.divolte.server.Server.java

License:Apache License

public void shutdown() {
    try {//from   w  ww.  j  av  a2 s  .  c om
        logger.info("Stopping HTTP server.");
        shutdownHandler.shutdown();
        shutdownHandler.awaitShutdown(HTTP_SHUTDOWN_GRACE_PERIOD_MILLIS);
        undertow.stop();
    } catch (Exception ie) {
        Thread.currentThread().interrupt();
    }

    logger.info("Stopping thread pools.");
    processingPool.stop();

    logger.info("Closing HDFS filesystem connection.");
    try {
        FileSystem.closeAll();
    } catch (IOException ioe) {
        logger.warn("Failed to cleanly close HDFS file system.", ioe);
    }
}

From source file:org.apache.blur.HdfsMiniClusterUtil.java

License:Apache License

public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {/* w ww.ja v a2 s.c  o  m*/
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]");
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]");
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}

From source file:org.apache.blur.MiniCluster.java

License:Apache License

public void shutdownDfs() {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {//from  w w w . java 2 s  .c o m
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = group;
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]");
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]");
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}

From source file:org.apache.falcon.oozie.feed.OozieFeedWorkflowBuilderTest.java

License:Apache License

@Test(dataProvider = "uMaskOptions")
public void testRetentionCoords(String umask) throws Exception {
    FileSystem fs = srcMiniDFS.getFileSystem();
    Configuration conf = fs.getConf();
    conf.set("fs.permissions.umask-mode", umask);

    OozieEntityBuilder feedBuilder = OozieEntityBuilder.get(feed);
    Path bundlePath = new Path("/projects/falcon/");
    feedBuilder.build(trgCluster, bundlePath);

    // ClusterHelper constructs new fs Conf. Add it to cluster properties so that it gets added to FS conf
    setUmaskInFsConf(srcCluster, umask);

    org.apache.falcon.entity.v0.feed.Cluster cluster = FeedHelper.getCluster(feed, srcCluster.getName());
    Calendar startCal = Calendar.getInstance();
    Calendar endCal = Calendar.getInstance();
    endCal.add(Calendar.DATE, 1);
    cluster.getValidity().setEnd(endCal.getTime());
    RuntimeProperties.get().setProperty("falcon.retention.keep.instances.beyond.validity", "false");

    OozieCoordinatorBuilder builder = OozieCoordinatorBuilder.get(feed, Tag.RETENTION);
    List<Properties> coords = builder.buildCoords(srcCluster, new Path("/projects/falcon/" + umask));
    COORDINATORAPP coord = getCoordinator(srcMiniDFS,
            coords.get(0).getProperty(OozieEntityBuilder.ENTITY_PATH));

    Assert.assertEquals(coord.getAction().getWorkflow().getAppPath(),
            "${nameNode}/projects/falcon/" + umask + "/RETENTION");
    Assert.assertEquals(coord.getName(), "FALCON_FEED_RETENTION_" + feed.getName());
    Assert.assertEquals(coord.getFrequency(), "${coord:hours(6)}");

    Assert.assertEquals(coord.getStart(), DateUtil.getDateFormatFromTime(startCal.getTimeInMillis()));
    Date endDate = DateUtils.addSeconds(endCal.getTime(),
            FeedHelper.getRetentionLimitInSeconds(feed, srcCluster.getName()));
    Assert.assertEquals(coord.getEnd(), DateUtil.getDateFormatFromTime(endDate.getTime()));

    HashMap<String, String> props = getCoordProperties(coord);

    HashMap<String, String> wfProps = getWorkflowProperties(fs, coord);

    String feedDataPath = wfProps.get("feedDataPath");
    String storageType = wfProps.get("falconFeedStorageType");

    // verify the param that feed evictor depends on

    Assert.assertEquals(storageType, Storage.TYPE.FILESYSTEM.name());

    final Storage storage = FeedHelper.createStorage(cluster, feed);
    if (feedDataPath != null) {
        Assert.assertEquals(feedDataPath, storage.getUriTemplate().replaceAll(Storage.DOLLAR_EXPR_START_REGEX,
                Storage.QUESTION_EXPR_START_REGEX));
    }/* w  w  w.j  a v  a 2 s .  c  o m*/

    if (storageType != null) {
        Assert.assertEquals(storageType, storage.getType().name());
    }

    // verify the post processing params
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_FEED_NAMES.getName()), feed.getName());
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_NAMES.getName()), feed.getName());
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_FEED_PATHS.getName()), "IGNORE");

    assertWorkflowRetries(getWorkflowapp(srcMiniDFS.getFileSystem(), coord));

    try {
        verifyClusterLocationsUMask(srcCluster, fs);
        verifyWorkflowUMask(fs, coord, umask);
    } finally {
        cleanupWorkflowState(fs, coord);
        FileSystem.closeAll();
    }
}

From source file:org.apache.falcon.oozie.feed.OozieFeedWorkflowBuilderTest.java

License:Apache License

@Test(dataProvider = "secureOptions")
public void testRetentionCoordsForTable(String secureOption) throws Exception {
    StartupProperties.get().setProperty("falcon.postprocessing.enable", "true");
    StartupProperties.get().setProperty(SecurityUtil.AUTHENTICATION_TYPE, secureOption);

    final String umask = "000";

    FileSystem fs = trgMiniDFS.getFileSystem();
    Configuration conf = fs.getConf();
    conf.set("fs.permissions.umask-mode", umask);

    // ClusterHelper constructs new fs Conf. Add it to cluster properties so that it gets added to FS conf
    setUmaskInFsConf(trgCluster, umask);

    org.apache.falcon.entity.v0.feed.Cluster cluster = FeedHelper.getCluster(tableFeed, trgCluster.getName());
    final Calendar instance = Calendar.getInstance();
    instance.add(Calendar.YEAR, 1);
    cluster.getValidity().setEnd(instance.getTime());

    OozieCoordinatorBuilder builder = OozieCoordinatorBuilder.get(tableFeed, Tag.RETENTION);
    List<Properties> coords = builder.buildCoords(trgCluster, new Path("/projects/falcon/"));
    COORDINATORAPP coord = getCoordinator(trgMiniDFS,
            coords.get(0).getProperty(OozieEntityBuilder.ENTITY_PATH));

    Assert.assertEquals(coord.getAction().getWorkflow().getAppPath(), "${nameNode}/projects/falcon/RETENTION");
    Assert.assertEquals(coord.getName(), "FALCON_FEED_RETENTION_" + tableFeed.getName());
    Assert.assertEquals(coord.getFrequency(), "${coord:hours(6)}");

    HashMap<String, String> props = getCoordProperties(coord);

    HashMap<String, String> wfProps = getWorkflowProperties(fs, coord);

    String feedDataPath = wfProps.get("feedDataPath");
    String storageType = wfProps.get("falconFeedStorageType");

    // verify the param that feed evictor depends on
    Assert.assertEquals(storageType, Storage.TYPE.TABLE.name());

    final Storage storage = FeedHelper.createStorage(cluster, tableFeed);
    if (feedDataPath != null) {
        Assert.assertEquals(feedDataPath, storage.getUriTemplate().replaceAll(Storage.DOLLAR_EXPR_START_REGEX,
                Storage.QUESTION_EXPR_START_REGEX));
    }//w ww .j  av  a  2 s .co m

    if (storageType != null) {
        Assert.assertEquals(storageType, storage.getType().name());
    }

    // verify the post processing params
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_FEED_NAMES.getName()), tableFeed.getName());
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_NAMES.getName()), tableFeed.getName());
    Assert.assertEquals(wfProps.get(WorkflowExecutionArgs.OUTPUT_FEED_PATHS.getName()), "IGNORE");

    assertWorkflowRetries(coord);
    verifyBrokerProperties(srcCluster, wfProps);
    verifyEntityProperties(tableFeed, trgCluster, WorkflowExecutionContext.EntityOperations.DELETE, wfProps);

    Assert.assertTrue(Storage.TYPE.TABLE == FeedHelper.getStorageType(tableFeed, trgCluster));
    assertHCatCredentials(getWorkflowapp(trgMiniDFS.getFileSystem(), coord),
            coord.getAction().getWorkflow().getAppPath().replace("${nameNode}", ""));

    try {
        verifyClusterLocationsUMask(trgCluster, fs);
        verifyWorkflowUMask(fs, coord, umask);
    } finally {
        cleanupWorkflowState(fs, coord);
        FileSystem.closeAll();
    }
}

From source file:org.apache.flink.test.util.TestBaseUtils.java

License:Apache License

public static void stopCluster(ForkableFlinkMiniCluster executor, FiniteDuration timeout) throws Exception {
    if (logDir != null) {
        FileUtils.deleteDirectory(logDir);
    }//  w  w  w.j  a  v  a 2s.  co  m
    if (executor != null) {
        int numUnreleasedBCVars = 0;
        int numActiveConnections = 0;

        if (executor.running()) {
            List<ActorRef> tms = executor.getTaskManagersAsJava();
            List<Future<Object>> bcVariableManagerResponseFutures = new ArrayList<>();
            List<Future<Object>> numActiveConnectionsResponseFutures = new ArrayList<>();

            for (ActorRef tm : tms) {
                bcVariableManagerResponseFutures.add(Patterns.ask(tm,
                        TestingTaskManagerMessages.RequestBroadcastVariablesWithReferences$.MODULE$,
                        new Timeout(timeout)));

                numActiveConnectionsResponseFutures.add(Patterns.ask(tm,
                        TestingTaskManagerMessages.RequestNumActiveConnections$.MODULE$, new Timeout(timeout)));
            }

            Future<Iterable<Object>> bcVariableManagerFutureResponses = Futures
                    .sequence(bcVariableManagerResponseFutures, TestingUtils.defaultExecutionContext());

            Iterable<Object> responses = Await.result(bcVariableManagerFutureResponses, timeout);

            for (Object response : responses) {
                numUnreleasedBCVars += ((TestingTaskManagerMessages.ResponseBroadcastVariablesWithReferences) response)
                        .number();
            }

            Future<Iterable<Object>> numActiveConnectionsFutureResponses = Futures
                    .sequence(numActiveConnectionsResponseFutures, TestingUtils.defaultExecutionContext());

            responses = Await.result(numActiveConnectionsFutureResponses, timeout);

            for (Object response : responses) {
                numActiveConnections += ((TestingTaskManagerMessages.ResponseNumActiveConnections) response)
                        .number();
            }
        }

        executor.stop();
        FileSystem.closeAll();
        System.gc();

        Assert.assertEquals("Not all broadcast variables were released.", 0, numUnreleasedBCVars);
        Assert.assertEquals("Not all TCP connections were released.", 0, numActiveConnections);
    }

}

From source file:org.apache.hama.HamaTestCase.java

License:Apache License

/**
 * Common method to close down a MiniDFSCluster and the associated file system
 * /*from   w ww .  j a  va2 s .com*/
 * @param cluster
 */
public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }
    }
}

From source file:org.apache.hcatalog.hbase.ManyMiniCluster.java

License:Apache License

protected synchronized void stop() {
    if (hbaseCluster != null) {
        HConnectionManager.deleteAllConnections(true);
        try {//from   w  w  w.  j a v  a 2  s . c o  m
            hbaseCluster.shutdown();
        } catch (Exception e) {
            e.printStackTrace();
        }
        hbaseCluster = null;
    }
    if (zookeeperCluster != null) {
        try {
            zookeeperCluster.shutdown();
        } catch (Exception e) {
            e.printStackTrace();
        }
        zookeeperCluster = null;
    }
    if (mrCluster != null) {
        try {
            mrCluster.shutdown();
        } catch (Exception e) {
            e.printStackTrace();
        }
        mrCluster = null;
    }
    if (dfsCluster != null) {
        try {
            dfsCluster.getFileSystem().close();
            dfsCluster.shutdown();
        } catch (Exception e) {
            e.printStackTrace();
        }
        dfsCluster = null;
    }
    try {
        FileSystem.closeAll();
    } catch (IOException e) {
        e.printStackTrace();
    }
    started = false;
}

From source file:org.apache.hdt.dfs.core.DFSLocationsRoot.java

License:Apache License

public void disconnect() {
    Thread closeThread = new Thread() {
        /* @inheritDoc */
        @Override/*from  ww w  .j a  v  a 2s . co m*/
        public void run() {
            try {
                System.out.printf("Closing all opened File Systems...\n");
                FileSystem.closeAll();
                System.out.printf("File Systems closed\n");

            } catch (IOException ioe) {
                ioe.printStackTrace();
            }
        }
    };

    // Wait 5 seconds for the connections to be closed
    closeThread.start();
    try {
        closeThread.join(5000);

    } catch (InterruptedException ie) {
        // Ignore
    }
}