List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:org.apache.flink.yarn.FlinkYarnClientBase.java
License:Apache License
/** * This method will block until the ApplicationMaster/JobManager have been * deployed on YARN.// w w w. j ava2 s . c o m */ protected AbstractFlinkYarnCluster deployInternal() throws Exception { isReadyForDeployment(); LOG.info("Using values:"); LOG.info("\tTaskManager count = {}", taskManagerCount); LOG.info("\tJobManager memory = {}", jobManagerMemoryMb); LOG.info("\tTaskManager memory = {}", taskManagerMemoryMb); // Create application via yarnClient yarnApplication = yarnClient.createApplication(); GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse(); // ------------------ Add dynamic properties to local flinkConfiguraton ------ Map<String, String> dynProperties = CliFrontend.getDynamicProperties(dynamicPropertiesEncoded); for (Map.Entry<String, String> dynProperty : dynProperties.entrySet()) { flinkConfiguration.setString(dynProperty.getKey(), dynProperty.getValue()); } try { org.apache.flink.core.fs.FileSystem.setDefaultScheme(flinkConfiguration); } catch (IOException e) { throw new IOException("Error while setting the default " + "filesystem scheme from configuration.", e); } // ------------------ Check if the specified queue exists -------------- try { List<QueueInfo> queues = yarnClient.getAllQueues(); if (queues.size() > 0 && this.yarnQueue != null) { // check only if there are queues configured in yarn and for this session. boolean queueFound = false; for (QueueInfo queue : queues) { if (queue.getQueueName().equals(this.yarnQueue)) { queueFound = true; break; } } if (!queueFound) { String queueNames = ""; for (QueueInfo queue : queues) { queueNames += queue.getQueueName() + ", "; } LOG.warn("The specified queue '" + this.yarnQueue + "' does not exist. " + "Available queues: " + queueNames); } } else { LOG.debug("The YARN cluster does not have any queues configured"); } } catch (Throwable e) { LOG.warn("Error while getting queue information from YARN: " + e.getMessage()); if (LOG.isDebugEnabled()) { LOG.debug("Error details", e); } } // ------------------ Check if the YARN Cluster has the requested resources -------------- // the yarnMinAllocationMB specifies the smallest possible container allocation size. // all allocations below this value are automatically set to this value. final int yarnMinAllocationMB = conf.getInt("yarn.scheduler.minimum-allocation-mb", 0); if (jobManagerMemoryMb < yarnMinAllocationMB || taskManagerMemoryMb < yarnMinAllocationMB) { LOG.warn("The JobManager or TaskManager memory is below the smallest possible YARN Container size. " + "The value of 'yarn.scheduler.minimum-allocation-mb' is '" + yarnMinAllocationMB + "'. Please increase the memory size." + "YARN will allocate the smaller containers but the scheduler will account for the minimum-allocation-mb, maybe not all instances " + "you requested will start."); } // set the memory to minAllocationMB to do the next checks correctly if (jobManagerMemoryMb < yarnMinAllocationMB) { jobManagerMemoryMb = yarnMinAllocationMB; } if (taskManagerMemoryMb < yarnMinAllocationMB) { taskManagerMemoryMb = yarnMinAllocationMB; } Resource maxRes = appResponse.getMaximumResourceCapability(); final String NOTE = "Please check the 'yarn.scheduler.maximum-allocation-mb' and the 'yarn.nodemanager.resource.memory-mb' configuration values\n"; if (jobManagerMemoryMb > maxRes.getMemory()) { failSessionDuringDeployment(); throw new YarnDeploymentException( "The cluster does not have the requested resources for the JobManager available!\n" + "Maximum Memory: " + maxRes.getMemory() + "MB Requested: " + jobManagerMemoryMb + "MB. " + NOTE); } if (taskManagerMemoryMb > maxRes.getMemory()) { failSessionDuringDeployment(); throw new YarnDeploymentException( "The cluster does not have the requested resources for the TaskManagers available!\n" + "Maximum Memory: " + maxRes.getMemory() + " Requested: " + taskManagerMemoryMb + "MB. " + NOTE); } final String NOTE_RSC = "\nThe Flink YARN client will try to allocate the YARN session, but maybe not all TaskManagers are " + "connecting from the beginning because the resources are currently not available in the cluster. " + "The allocation might take more time than usual because the Flink YARN client needs to wait until " + "the resources become available."; int totalMemoryRequired = jobManagerMemoryMb + taskManagerMemoryMb * taskManagerCount; ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient); if (freeClusterMem.totalFreeMemory < totalMemoryRequired) { LOG.warn("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. " + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available." + NOTE_RSC); } if (taskManagerMemoryMb > freeClusterMem.containerLimit) { LOG.warn("The requested amount of memory for the TaskManagers (" + taskManagerMemoryMb + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC); } if (jobManagerMemoryMb > freeClusterMem.containerLimit) { LOG.warn( "The requested amount of memory for the JobManager (" + jobManagerMemoryMb + "MB) is more than " + "the largest possible YARN container: " + freeClusterMem.containerLimit + NOTE_RSC); } // ----------------- check if the requested containers fit into the cluster. int[] nmFree = Arrays.copyOf(freeClusterMem.nodeManagersFree, freeClusterMem.nodeManagersFree.length); // first, allocate the jobManager somewhere. if (!allocateResource(nmFree, jobManagerMemoryMb)) { LOG.warn("Unable to find a NodeManager that can fit the JobManager/Application master. " + "The JobManager requires " + jobManagerMemoryMb + "MB. NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + NOTE_RSC); } // allocate TaskManagers for (int i = 0; i < taskManagerCount; i++) { if (!allocateResource(nmFree, taskManagerMemoryMb)) { LOG.warn("There is not enough memory available in the YARN cluster. " + "The TaskManager(s) require " + taskManagerMemoryMb + "MB each. " + "NodeManagers available: " + Arrays.toString(freeClusterMem.nodeManagersFree) + "\n" + "After allocating the JobManager (" + jobManagerMemoryMb + "MB) and (" + i + "/" + taskManagerCount + ") TaskManagers, " + "the following NodeManagers are available: " + Arrays.toString(nmFree) + NOTE_RSC); } } // ------------------ Prepare Application Master Container ------------------------------ // respect custom JVM options in the YAML file final String javaOpts = flinkConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, ""); String logbackFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME; boolean hasLogback = new File(logbackFile).exists(); String log4jFile = configurationDirectory + File.separator + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME; boolean hasLog4j = new File(log4jFile).exists(); if (hasLogback) { shipFiles.add(new File(logbackFile)); } if (hasLog4j) { shipFiles.add(new File(log4jFile)); } // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration) + "M " + javaOpts; if (hasLogback || hasLog4j) { amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\""; if (hasLogback) { amCommand += " -Dlogback.configurationFile=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOGBACK_NAME; } if (hasLog4j) { amCommand += " -Dlog4j.configuration=file:" + FlinkYarnSessionCli.CONFIG_FILE_LOG4J_NAME; } } amCommand += " " + getApplicationMasterClass().getName() + " " + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err"; amContainer.setCommands(Collections.singletonList(amCommand)); LOG.debug("Application Master start command: " + amCommand); // intialize HDFS // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path final FileSystem fs = FileSystem.get(conf); // hard coded check for the GoogleHDFS client because its not overriding the getScheme() method. if (!fs.getClass().getSimpleName().equals("GoogleHadoopFileSystem") && fs.getScheme().startsWith("file")) { LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the " + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values." + "The Flink YARN client needs to store its files in a distributed file system"); } // Set-up ApplicationSubmissionContext for the application ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext(); if (RecoveryMode.isHighAvailabilityModeActivated(flinkConfiguration)) { // activate re-execution of failed applications appContext.setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS)); activateHighAvailabilitySupport(appContext); } else { // set number of application retries to 1 in the default case appContext .setMaxAppAttempts(flinkConfiguration.getInteger(ConfigConstants.YARN_APPLICATION_ATTEMPTS, 1)); } final ApplicationId appId = appContext.getApplicationId(); // Setup jar for ApplicationMaster LocalResource appMasterJar = Records.newRecord(LocalResource.class); LocalResource flinkConf = Records.newRecord(LocalResource.class); Path remotePathJar = Utils.setupLocalResource(fs, appId.toString(), flinkJarPath, appMasterJar, fs.getHomeDirectory()); Path remotePathConf = Utils.setupLocalResource(fs, appId.toString(), flinkConfigurationPath, flinkConf, fs.getHomeDirectory()); Map<String, LocalResource> localResources = new HashMap<>(2); localResources.put("flink.jar", appMasterJar); localResources.put("flink-conf.yaml", flinkConf); // setup security tokens (code from apache storm) final Path[] paths = new Path[2 + shipFiles.size()]; StringBuilder envShipFileList = new StringBuilder(); // upload ship files for (int i = 0; i < shipFiles.size(); i++) { File shipFile = shipFiles.get(i); LocalResource shipResources = Records.newRecord(LocalResource.class); Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath()); paths[2 + i] = Utils.setupLocalResource(fs, appId.toString(), shipLocalPath, shipResources, fs.getHomeDirectory()); localResources.put(shipFile.getName(), shipResources); envShipFileList.append(paths[2 + i]); if (i + 1 < shipFiles.size()) { envShipFileList.append(','); } } paths[0] = remotePathJar; paths[1] = remotePathConf; sessionFilesDir = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString() + "/"); FsPermission permission = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); fs.setPermission(sessionFilesDir, permission); // set permission for path. Utils.setTokensFor(amContainer, paths, conf); amContainer.setLocalResources(localResources); fs.close(); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<>(); // set user specified app master environment variables appMasterEnv.putAll(Utils.getEnvironmentVariables(ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX, flinkConfiguration)); // set classpath from YARN configuration Utils.setupEnv(conf, appMasterEnv); // set Flink on YARN internal configuration values appMasterEnv.put(YarnConfigKeys.ENV_TM_COUNT, String.valueOf(taskManagerCount)); appMasterEnv.put(YarnConfigKeys.ENV_TM_MEMORY, String.valueOf(taskManagerMemoryMb)); appMasterEnv.put(YarnConfigKeys.FLINK_JAR_PATH, remotePathJar.toString()); appMasterEnv.put(YarnConfigKeys.ENV_APP_ID, appId.toString()); appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString()); appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, envShipFileList.toString()); appMasterEnv.put(YarnConfigKeys.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName()); appMasterEnv.put(YarnConfigKeys.ENV_SLOTS, String.valueOf(slots)); appMasterEnv.put(YarnConfigKeys.ENV_DETACHED, String.valueOf(detached)); if (dynamicPropertiesEncoded != null) { appMasterEnv.put(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES, dynamicPropertiesEncoded); } amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(jobManagerMemoryMb); capability.setVirtualCores(1); String name; if (customName == null) { name = "Flink session with " + taskManagerCount + " TaskManagers"; if (detached) { name += " (detached)"; } } else { name = customName; } appContext.setApplicationName(name); // application name appContext.setApplicationType("Apache Flink"); appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); if (yarnQueue != null) { appContext.setQueue(yarnQueue); } // add a hook to clean up in case deployment fails Runtime.getRuntime().addShutdownHook(deploymentFailureHook); LOG.info("Submitting application master " + appId); yarnClient.submitApplication(appContext); LOG.info("Waiting for the cluster to be allocated"); int waittime = 0; loop: while (true) { ApplicationReport report; try { report = yarnClient.getApplicationReport(appId); } catch (IOException e) { throw new YarnDeploymentException("Failed to deploy the cluster: " + e.getMessage()); } YarnApplicationState appState = report.getYarnApplicationState(); switch (appState) { case FAILED: case FINISHED: case KILLED: throw new YarnDeploymentException("The YARN application unexpectedly switched to state " + appState + " during deployment. \n" + "Diagnostics from YARN: " + report.getDiagnostics() + "\n" + "If log aggregation is enabled on your cluster, use this command to further investigate the issue:\n" + "yarn logs -applicationId " + appId); //break .. case RUNNING: LOG.info("YARN application has been deployed successfully."); break loop; default: LOG.info("Deploying cluster, current state " + appState); if (waittime > 60000) { LOG.info( "Deployment took more than 60 seconds. Please check if the requested resources are available in the YARN cluster"); } } waittime += 1000; Thread.sleep(1000); } // print the application id for user to cancel themselves. if (isDetached()) { LOG.info("The Flink YARN client has been started in detached mode. In order to stop " + "Flink on YARN, use the following command or a YARN web interface to stop " + "it:\nyarn application -kill " + appId + "\nPlease also note that the " + "temporary files of the YARN session in the home directoy will not be removed."); } // since deployment was successful, remove the hook try { Runtime.getRuntime().removeShutdownHook(deploymentFailureHook); } catch (IllegalStateException e) { // we're already in the shut down hook. } // the Flink cluster is deployed in YARN. Represent cluster return new FlinkYarnCluster(yarnClient, appId, conf, flinkConfiguration, sessionFilesDir, detached); }
From source file:org.apache.flink.yarn.FlinkYarnCluster.java
License:Apache License
/** * Shutdown the YARN cluster.//w w w.ja v a2s.c om * @param failApplication whether we should fail the YARN application (in case of errors in Flink) */ @Override public void shutdown(boolean failApplication) { if (!isConnected) { throw new IllegalStateException("The cluster has been connected to the ApplicationMaster."); } if (hasBeenShutDown.getAndSet(true)) { return; } try { Runtime.getRuntime().removeShutdownHook(clientShutdownHook); } catch (IllegalStateException e) { // we are already in the shutdown hook } if (actorSystem != null) { LOG.info("Sending shutdown request to the Application Master"); if (applicationClient != ActorRef.noSender()) { try { FinalApplicationStatus finalStatus; if (failApplication) { finalStatus = FinalApplicationStatus.FAILED; } else { finalStatus = FinalApplicationStatus.SUCCEEDED; } Future<Object> response = Patterns.ask(applicationClient, new YarnMessages.LocalStopYarnSession( finalStatus, "Flink YARN Client requested shutdown"), new Timeout(akkaDuration)); Await.ready(response, akkaDuration); } catch (Exception e) { LOG.warn("Error while stopping YARN Application Client", e); } } actorSystem.shutdown(); actorSystem.awaitTermination(); actorSystem = null; } LOG.info("Deleting files in " + sessionFilesDir); try { FileSystem shutFS = FileSystem.get(hadoopConfig); shutFS.delete(sessionFilesDir, true); // delete conf and jar file. shutFS.close(); } catch (IOException e) { LOG.error("Could not delete the Flink jar and configuration files in HDFS..", e); } try { actorRunner.join(1000); // wait for 1 second } catch (InterruptedException e) { LOG.warn("Shutdown of the actor runner was interrupted", e); Thread.currentThread().interrupt(); } try { pollingRunner.stopRunner(); pollingRunner.join(1000); } catch (InterruptedException e) { LOG.warn("Shutdown of the polling runner was interrupted", e); Thread.currentThread().interrupt(); } LOG.info("YARN Client is shutting down"); yarnClient.stop(); // actorRunner is using the yarnClient. yarnClient = null; // set null to clearly see if somebody wants to access it afterwards. }
From source file:org.apache.flink.yarn.YarnClusterClient.java
License:Apache License
/** * Shuts down the Yarn application// ww w . j av a 2 s .co m */ public void shutdownCluster() { if (hasBeenShutDown.getAndSet(true)) { return; } if (!isConnected) { throw new IllegalStateException("The cluster has been not been connected to the ApplicationMaster."); } try { Runtime.getRuntime().removeShutdownHook(clientShutdownHook); } catch (IllegalStateException e) { // we are already in the shutdown hook } LOG.info("Sending shutdown request to the Application Master"); try { Future<Object> response = Patterns.ask(applicationClient.get(), new YarnMessages.LocalStopYarnSession( getApplicationStatus(), "Flink YARN Client requested shutdown"), new Timeout(akkaDuration)); Await.ready(response, akkaDuration); } catch (Exception e) { LOG.warn("Error while stopping YARN cluster.", e); } try { File propertiesFile = FlinkYarnSessionCli.getYarnPropertiesLocation(flinkConfig); if (propertiesFile.isFile()) { if (propertiesFile.delete()) { LOG.info("Deleted Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString()); } else { LOG.warn("Couldn't delete Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString()); } } } catch (Exception e) { LOG.warn("Exception while deleting the JobManager address file", e); } if (sessionFilesDir != null) { LOG.info("Deleting files in " + sessionFilesDir); try { FileSystem shutFS = FileSystem.get(hadoopConfig); shutFS.delete(sessionFilesDir, true); // delete conf and jar file. shutFS.close(); } catch (IOException e) { LOG.error("Could not delete the Flink jar and configuration files in HDFS..", e); } } else { LOG.warn("Session file directory not set. Not deleting session files"); } try { pollingRunner.stopRunner(); pollingRunner.join(1000); } catch (InterruptedException e) { LOG.warn("Shutdown of the polling runner was interrupted", e); Thread.currentThread().interrupt(); } try { ApplicationReport appReport = yarnClient.getApplicationReport(appId); LOG.info("Application " + appId + " finished with state " + appReport.getYarnApplicationState() + " and final state " + appReport.getFinalApplicationStatus() + " at " + appReport.getFinishTime()); if (appReport.getYarnApplicationState() == YarnApplicationState.FAILED || appReport.getYarnApplicationState() == YarnApplicationState.KILLED) { LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics()); LOG.warn("If log aggregation is activated in the Hadoop cluster, we recommend to retrieve " + "the full application log using this command:" + System.lineSeparator() + "\tyarn logs -applicationId " + appReport.getApplicationId() + System.lineSeparator() + "(It sometimes takes a few seconds until the logs are aggregated)"); } } catch (Exception e) { LOG.warn("Couldn't get final report", e); } LOG.info("YARN Client is shutting down"); yarnClient.stop(); // actorRunner is using the yarnClient. yarnClient = null; // set null to clearly see if somebody wants to access it afterwards. }
From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java
License:Apache License
@Test public void testCloseOnIdle() throws IOException, EventDeliveryException, InterruptedException { String hdfsPath = testPath + "/idleClose"; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); Path dirPath = new Path(hdfsPath); fs.delete(dirPath, true);//w ww . j ava2s . c o m fs.mkdirs(dirPath); Context context = new Context(); context.put("hdfs.path", hdfsPath); /* * All three rolling methods are disabled so the only * way a file can roll is through the idle timeout. */ context.put("hdfs.rollCount", "0"); context.put("hdfs.rollSize", "0"); context.put("hdfs.rollInterval", "0"); context.put("hdfs.batchSize", "2"); context.put("hdfs.idleTimeout", "1"); Configurables.configure(sink, context); Channel channel = new MemoryChannel(); Configurables.configure(channel, context); sink.setChannel(channel); sink.start(); Transaction txn = channel.getTransaction(); txn.begin(); for (int i = 0; i < 10; i++) { Event event = new SimpleEvent(); event.setBody(("test event " + i).getBytes()); channel.put(event); } txn.commit(); txn.close(); sink.process(); sink.process(); Thread.sleep(1001); // previous file should have timed out now // this can throw BucketClosedException(from the bucketWriter having // closed),this is not an issue as the sink will retry and get a fresh // bucketWriter so long as the onClose handler properly removes // bucket writers that were closed. sink.process(); sink.process(); Thread.sleep(500); // shouldn't be enough for a timeout to occur sink.process(); sink.process(); sink.stop(); FileStatus[] dirStat = fs.listStatus(dirPath); Path[] fList = FileUtil.stat2Paths(dirStat); Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","), 2, fList.length); Assert.assertTrue(!fList[0].getName().endsWith(".tmp") && !fList[1].getName().endsWith(".tmp")); fs.close(); }
From source file:org.apache.hama.HamaTestCase.java
License:Apache License
/** * Common method to close down a MiniDFSCluster and the associated file system * /* w ww. j a va2 s .c om*/ * @param cluster */ public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { LOG.info("Shutting down Mini DFS "); try { cluster.shutdown(); } catch (Exception e) { // / Can get a java.lang.reflect.UndeclaredThrowableException thrown // here because of an InterruptedException. Don't let exceptions in // here be cause of test failure. } try { FileSystem fs = cluster.getFileSystem(); if (fs != null) { LOG.info("Shutting down FileSystem"); fs.close(); } FileSystem.closeAll(); } catch (IOException e) { LOG.error("error closing file system", e); } } }
From source file:org.apache.hive.hcatalog.templeton.tool.HDFSCleanup.java
License:Apache License
/** * Run the cleanup loop.//from ww w. ja va 2 s.c o m * */ public void run() { while (!stop) { try { // Put each check in a separate try/catch, so if that particular // cycle fails, it'll try again on the next cycle. FileSystem fs = null; try { fs = new Path(storage_root).getFileSystem(appConf); checkFiles(fs); } catch (Exception e) { LOG.error("Cleanup cycle failed: " + e.getMessage()); } finally { if (fs != null) { try { fs.close(); } catch (Exception e) { LOG.error("Closing file system failed: " + e.getMessage()); } } } long sleepMillis = (long) (Math.random() * interval); LOG.info("Next execution: " + new Date(new Date().getTime() + sleepMillis)); Thread.sleep(sleepMillis); } catch (Exception e) { // If sleep fails, we should exit now before things get worse. isRunning = false; LOG.error("Cleanup failed: " + e.getMessage(), e); } } isRunning = false; }
From source file:org.apache.ignite.igfs.HadoopFIleSystemFactorySelfTest.java
License:Apache License
/** * Test custom factory./* ww w . j ava 2 s . co m*/ * * @throws Exception If failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") public void testCustomFactory() throws Exception { assert START_CNT.get() == 1; assert STOP_CNT.get() == 0; // Use IGFS directly. primary.mkdirs(IGFS_PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); GridTestUtils.assertThrows(null, new Callable<Object>() { @Override public Object call() throws Exception { primary.mkdirs(IGFS_PATH_PROXY); return null; } }, IgfsInvalidPathException.class, null); // Create remote instance. FileSystem fs = FileSystem.get(URI.create("igfs://primary:primary@127.0.0.1:10500/"), baseConfiguration()); // Ensure lifecycle callback was invoked. assert START_CNT.get() == 2; assert STOP_CNT.get() == 0; // Check file system operations. assert fs.exists(PATH_DUAL); assert fs.delete(PATH_DUAL, true); assert !primary.exists(IGFS_PATH_DUAL); assert !secondary.exists(IGFS_PATH_DUAL); assert !fs.exists(PATH_DUAL); assert fs.mkdirs(PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); assert fs.exists(PATH_DUAL); assert fs.mkdirs(PATH_PROXY); assert secondary.exists(IGFS_PATH_PROXY); assert fs.exists(PATH_PROXY); // Close file system and ensure that associated factory was notified. fs.close(); assert START_CNT.get() == 2; assert STOP_CNT.get() == 1; // Stop primary node and ensure that base factory was notified. G.stop(primary.context().kernalContext().grid().name(), true); assert START_CNT.get() == 2; assert STOP_CNT.get() == 2; }
From source file:org.apache.ignite.igfs.HadoopIgfs20FileSystemAbstractSelfTest.java
License:Apache License
/** * Test expected failures for 'close' operation. * * @param fs File system to test./*from w ww. j av a 2 s. co m*/ * @param msg Expected exception message. */ public void assertCloseFails(final FileSystem fs, String msg) { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { fs.close(); return null; } }, IOException.class, msg); }
From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemAbstractSelfTest.java
License:Apache License
/** * Test how IPC cache map works.// w w w . jav a 2 s .c o m * * @throws Exception If failed. */ public void testIpcCache() throws Exception { IgfsHadoopEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop"); if (hadoop instanceof IgfsHadoopOutProc) { FileSystem fsOther = null; try { Field field = IgfsHadoopIpcIo.class.getDeclaredField("ipcCache"); field.setAccessible(true); Map<String, IgfsHadoopIpcIo> cache = (Map<String, IgfsHadoopIpcIo>) field.get(null); Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem); // we disable caching in order to obtain new FileSystem instance. cfg.setBoolean("fs.igfs.impl.disable.cache", true); // Initial cache size. int initSize = cache.size(); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. fsOther = FileSystem.get(new URI(PRIMARY_URI), cfg); assert fs != fsOther; assertEquals(initSize, cache.size()); fsOther.close(); assertEquals(initSize, cache.size()); Field stopField = IgfsHadoopIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); IgfsHadoopIpcIo io = null; for (Map.Entry<String, IgfsHadoopIpcIo> ioEntry : cache.entrySet()) { if (endpoint.contains(ioEntry.getKey())) { io = ioEntry.getValue(); break; } } assert io != null; assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs.close(); assertEquals(initSize - 1, cache.size()); assert (Boolean) stopField.get(io); } finally { U.closeQuiet(fsOther); } } }
From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemAbstractSelfTest.java
License:Apache License
/** @throws Exception If failed. */ public void testCloseIfNotInitialized() throws Exception { final FileSystem fs = new IgfsHadoopFileSystem(); // Check close makes nothing harmful. fs.close(); }