Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:org.apache.sentry.binding.hive.SentryIniPolicyFileFormatter.java

License:Apache License

/**
 * Write the sentry mapping data to ini file.
 *
 * @param resourcePath The path of the output file
 * @param conf sentry configuration/*from  www .j  av a 2 s  .  c om*/
 * @param sentryMappingData
 *        The map for sentry mapping data, eg:
 *        for the following mapping data:
 *        user1=role1,role2
 *        user2=role2,role3
 *        group1=role1,role2
 *        group2=role2,role3
 *        role1=server=server1->db=db1
 *        role2=server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2
 *        role3=server=server1->url=hdfs://localhost/path
 *
 *        The sentryMappingData will be inputed as:
 *        {
 *        users={[user1={role1, role2}], group2=[role2, role3]},
 *        groups={[group1={role1, role2}], group2=[role2, role3]},
 *        roles={role1=[server=server1->db=db1],
 *        role2=[server=server1->db=db1->table=tbl1,server=server1->db=db1->table=tbl2],
 *        role3=[server=server1->url=hdfs://localhost/path]
 *        }
 *        }
 */
@Override
public void write(String resourcePath, Configuration conf,
        Map<String, Map<String, Set<String>>> sentryMappingData) throws Exception {
    Path path = new Path(resourcePath);
    if (Strings.isNullOrEmpty(path.toUri().getScheme())) {
        // Path provided did not have any URI scheme. Update the scheme based on configuration.
        String defaultFs = conf.get(ServiceConstants.ClientConfig.SENTRY_EXPORT_IMPORT_DEFAULT_FS,
                ServiceConstants.ClientConfig.SENTRY_EXPORT_IMPORT_DEFAULT_FS_DEFAULT);
        path = new Path(defaultFs + resourcePath);
    }

    FileSystem fileSystem = path.getFileSystem(conf);
    if (fileSystem.exists(path)) {
        fileSystem.delete(path, true);
    }
    String contents = Joiner.on(NL).join(
            generateSection(PolicyFileConstants.USER_ROLES,
                    sentryMappingData.get(PolicyFileConstants.USER_ROLES)),
            generateSection(PolicyFileConstants.GROUPS, sentryMappingData.get(PolicyFileConstants.GROUPS)),
            generateSection(PolicyFileConstants.ROLES, sentryMappingData.get(PolicyFileConstants.ROLES)), "");
    LOGGER.info("Writing policy information to file located at" + path);
    OutputStream os = fileSystem.create(path);
    BufferedWriter br = new BufferedWriter(new OutputStreamWriter(os, "UTF-8"));
    try {
        br.write(contents);
    } catch (Exception exception) {
        LOGGER.error("Failed to export policy information to file, located at " + path);
    } finally {
        br.close();
        fileSystem.close();
    }
}

From source file:org.apache.solr.cloud.hdfs.HdfsRecoverLeaseTest.java

License:Apache License

@Test
public void testBasic() throws IOException {
    long startRecoverLeaseSuccessCount = FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get();

    URI uri = dfsCluster.getURI();
    Path path = new Path(uri);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs1 = FileSystem.get(path.toUri(), conf);
    Path testFile = new Path(uri.toString() + "/testfile");
    FSDataOutputStream out = fs1.create(testFile);

    out.write(5);/*from   w  ww  . j  av  a 2s.com*/
    out.hflush();
    out.close();

    FSHDFSUtils.recoverFileLease(fs1, testFile, conf, new CallerInfo() {

        @Override
        public boolean isCallerClosed() {
            return false;
        }
    });
    assertEquals(0, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount);

    fs1.close();

    FileSystem fs2 = FileSystem.get(path.toUri(), conf);
    Path testFile2 = new Path(uri.toString() + "/testfile2");
    FSDataOutputStream out2 = fs2.create(testFile2);

    if (random().nextBoolean()) {
        int cnt = random().nextInt(100);
        for (int i = 0; i < cnt; i++) {
            out2.write(random().nextInt(20000));
        }
        out2.hflush();
    }

    // closing the fs will close the file it seems
    // fs2.close();

    FileSystem fs3 = FileSystem.get(path.toUri(), conf);

    FSHDFSUtils.recoverFileLease(fs3, testFile2, conf, new CallerInfo() {

        @Override
        public boolean isCallerClosed() {
            return false;
        }
    });
    assertEquals(1, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount);

    fs3.close();
    fs2.close();
}

From source file:org.apache.solr.cloud.hdfs.HdfsThreadLeakTest.java

License:Apache License

@Test
public void testBasic() throws IOException {
    String uri = HdfsTestUtil.getURI(dfsCluster);
    Path path = new Path(uri);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs = FileSystem.get(path.toUri(), conf);
    Path testFile = new Path(uri.toString() + "/testfile");
    FSDataOutputStream out = fs.create(testFile);

    out.write(5);/*from   ww  w. j  ava 2  s  .co  m*/
    out.hflush();
    out.close();

    ((DistributedFileSystem) fs).recoverLease(testFile);

    fs.close();
}

From source file:org.apache.solr.cloud.hdfs.StressHdfsTest.java

License:Apache License

private void createAndDeleteCollection() throws SolrServerException, IOException, Exception, KeeperException,
        InterruptedException, URISyntaxException {

    boolean overshard = random().nextBoolean();
    if (overshard) {
        createCollection(DELETE_DATA_DIR_COLLECTION, shardCount * 2, 1, 2);
    } else {//from ww  w.  ja  v a 2 s.co  m
        int rep = shardCount / 2;
        if (rep == 0)
            rep = 1;
        createCollection(DELETE_DATA_DIR_COLLECTION, rep, 2, 1);
    }

    waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
    cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
    cloudClient.getZkStateReader().updateClusterState(true);

    // collect the data dirs
    List<String> dataDirs = new ArrayList<String>();

    int i = 0;
    for (SolrServer client : clients) {
        HttpSolrServer c = new HttpSolrServer(getBaseUrl(client) + "/delete_data_dir");
        try {
            c.add(getDoc("id", i++));
            if (random().nextBoolean())
                c.add(getDoc("id", i++));
            if (random().nextBoolean())
                c.add(getDoc("id", i++));
            if (random().nextBoolean()) {
                c.commit();
            } else {
                c.commit(true, true, true);
            }

            c.query(new SolrQuery("id:" + i));
            c.setSoTimeout(60000);
            c.setConnectionTimeout(30000);
            NamedList<Object> response = c.query(new SolrQuery().setRequestHandler("/admin/system"))
                    .getResponse();
            NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
            String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory")).get("data");
            dataDirs.add(dataDir);
        } finally {
            c.shutdown();
        }
    }

    if (random().nextBoolean()) {
        cloudClient.deleteByQuery("*:*");
        cloudClient.commit();

        assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());
    }

    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionAction.DELETE.toString());
    params.set("name", DELETE_DATA_DIR_COLLECTION);
    QueryRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");
    cloudClient.request(request);

    // check that all dirs are gone
    for (String dataDir : dataDirs) {
        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.newInstance(new URI(dataDir), conf);
        assertFalse("Data directory exists after collection removal : " + dataDir,
                fs.exists(new Path(dataDir)));
        fs.close();
    }
}

From source file:org.apache.tajo.catalog.store.TestHCatalogStore.java

License:Apache License

@Test
public void testDeleteTable() throws Exception {
    TableMeta meta = new TableMeta(CatalogProtos.StoreType.CSV, new KeyValueSet());
    org.apache.tajo.catalog.Schema schema = new org.apache.tajo.catalog.Schema();
    schema.addColumn("n_name", TajoDataTypes.Type.TEXT);
    schema.addColumn("n_regionkey", TajoDataTypes.Type.INT4);
    schema.addColumn("n_comment", TajoDataTypes.Type.TEXT);

    String tableName = "table1";
    TableDesc table = new TableDesc(DB_NAME + "." + tableName, schema, meta, warehousePath.toUri());
    store.createTable(table.getProto());
    assertTrue(store.existTable(DB_NAME, tableName));

    TableDesc table1 = new TableDesc(store.getTable(DB_NAME, tableName));
    FileSystem fs = FileSystem.getLocal(new Configuration());
    assertTrue(fs.exists(new Path(table1.getPath())));

    store.dropTable(DB_NAME, tableName);
    assertFalse(store.existTable(DB_NAME, tableName));
    fs.close();
}

From source file:org.apache.tajo.TajoTestingCluster.java

License:Apache License

public void shutdownMiniDFSCluster() throws Exception {
    if (this.dfsCluster != null) {
        try {/*from   www. ja va2s  . c om*/
            FileSystem fs = this.dfsCluster.getFileSystem();
            if (fs != null)
                fs.close();
        } catch (IOException e) {
            System.err.println("error closing file system: " + e);
        }
        // The below throws an exception per dn, AsynchronousCloseException.
        this.dfsCluster.shutdown();
    }
}

From source file:org.apache.tajo.TajoTestingCluster.java

License:Apache License

public void shutdownMiniCluster() throws IOException {
    LOG.info("========================================");
    LOG.info("Minicluster is stopping");
    LOG.info("========================================");

    try {//from  w  w w . java2 s  . c  o m
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    shutdownMiniTajoCluster();

    if (this.catalogServer != null) {
        shutdownCatalogCluster();
        isCatalogServerRunning = false;
    }

    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    if (this.dfsCluster != null) {
        try {
            FileSystem fs = this.dfsCluster.getFileSystem();
            if (fs != null)
                fs.close();
            this.dfsCluster.shutdown();
        } catch (IOException e) {
            System.err.println("error closing file system: " + e);
        }
        isDFSRunning = false;
    }

    if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
        if (!ShutdownHookManager.get().isShutdownInProgress()) {
            //TODO clean test dir when ShutdownInProgress
            LocalFileSystem localFS = LocalFileSystem.getLocal(conf);
            localFS.delete(new Path(clusterTestBuildDir.toString()), true);
            localFS.close();
        }
        this.clusterTestBuildDir = null;
    }

    if (hbaseUtil != null) {
        hbaseUtil.stopZooKeeperCluster();
        hbaseUtil.stopHBaseCluster();
    }

    LOG.info("Minicluster is down");
    isTajoClusterRunning = false;
}

From source file:org.apache.vxquery.metadata.VXQueryCollectionOperatorDescriptor.java

License:Apache License

@Override
public IOperatorNodePushable createPushRuntime(IHyracksTaskContext ctx,
        IRecordDescriptorProvider recordDescProvider, int partition, int nPartitions)
        throws HyracksDataException {
    final FrameTupleAccessor fta = new FrameTupleAccessor(ctx.getFrameSize(),
            recordDescProvider.getInputRecordDescriptor(getActivityId(), 0));
    final int fieldOutputCount = recordDescProvider.getOutputRecordDescriptor(getActivityId(), 0)
            .getFieldCount();/*  w w w. jav a 2  s  .c o m*/
    final ByteBuffer frame = ctx.allocateFrame();
    final FrameTupleAppender appender = new FrameTupleAppender(ctx.getFrameSize(), fieldOutputCount);
    final short partitionId = (short) ctx.getTaskAttemptId().getTaskId().getPartition();
    final ITreeNodeIdProvider nodeIdProvider = new TreeNodeIdProvider(partitionId, dataSourceId,
            totalDataSources);
    final String nodeId = ctx.getJobletContext().getApplicationContext().getNodeId();
    final DynamicContext dCtx = (DynamicContext) ctx.getJobletContext().getGlobalJobData();

    final String collectionName = collectionPartitions[partition % collectionPartitions.length];
    final XMLParser parser = new XMLParser(false, nodeIdProvider, nodeId, frame, appender, childSeq,
            dCtx.getStaticContext());

    return new AbstractUnaryInputUnaryOutputOperatorNodePushable() {
        @Override
        public void open() throws HyracksDataException {
            appender.reset(frame, true);
            writer.open();
            hdfs = new HDFSFunctions();
        }

        @Override
        public void nextFrame(ByteBuffer buffer) throws HyracksDataException {
            fta.reset(buffer);
            String collectionModifiedName = collectionName.replace("${nodeId}", nodeId);
            if (!collectionModifiedName.contains("hdfs:/")) {
                File collectionDirectory = new File(collectionModifiedName);
                //check if directory is in the local file system
                if (collectionDirectory.exists()) {
                    // Go through each tuple.
                    if (collectionDirectory.isDirectory()) {
                        for (int tupleIndex = 0; tupleIndex < fta.getTupleCount(); ++tupleIndex) {
                            Iterator<File> it = FileUtils.iterateFiles(collectionDirectory,
                                    new VXQueryIOFileFilter(), TrueFileFilter.INSTANCE);
                            while (it.hasNext()) {
                                File xmlDocument = it.next();
                                if (LOGGER.isLoggable(Level.FINE)) {
                                    LOGGER.fine(
                                            "Starting to read XML document: " + xmlDocument.getAbsolutePath());
                                }
                                parser.parseElements(xmlDocument, writer, fta, tupleIndex);
                            }
                        }
                    } else {
                        throw new HyracksDataException("Invalid directory parameter (" + nodeId + ":"
                                + collectionDirectory.getAbsolutePath() + ") passed to collection.");
                    }
                }
            } else {
                // Else check in HDFS file system
                // Get instance of the HDFS filesystem
                FileSystem fs = hdfs.getFileSystem();
                if (fs != null) {
                    collectionModifiedName = collectionModifiedName.replaceAll("hdfs:/", "");
                    Path directory = new Path(collectionModifiedName);
                    Path xmlDocument;
                    if (tag != null) {
                        hdfs.setJob(directory.getName(), tag);
                        tag = "<" + tag + ">";
                        Job job = hdfs.getJob();
                        InputFormat inputFormat = hdfs.getinputFormat();
                        try {
                            hdfs.scheduleSplits();
                            ArrayList<Integer> schedule = hdfs
                                    .getScheduleForNode(InetAddress.getLocalHost().getHostName());
                            List<InputSplit> splits = hdfs.getSplits();
                            List<FileSplit> fileSplits = new ArrayList<FileSplit>();
                            for (int i : schedule) {
                                fileSplits.add((FileSplit) splits.get(i));
                            }
                            FileSplitsFactory splitsFactory = new FileSplitsFactory(fileSplits);
                            List<FileSplit> inputSplits = splitsFactory.getSplits();
                            ContextFactory ctxFactory = new ContextFactory();
                            int size = inputSplits.size();
                            InputStream stream;
                            String value;
                            RecordReader reader;
                            TaskAttemptContext context;
                            for (int i = 0; i < size; i++) {
                                //read split
                                context = ctxFactory.createContext(job.getConfiguration(), i);
                                try {
                                    reader = inputFormat.createRecordReader(inputSplits.get(i), context);
                                    reader.initialize(inputSplits.get(i), context);
                                    while (reader.nextKeyValue()) {
                                        value = reader.getCurrentValue().toString();
                                        //Split value if it contains more than one item with the tag
                                        if (StringUtils.countMatches(value, tag) > 1) {
                                            String items[] = value.split(tag);
                                            for (String item : items) {
                                                if (item.length() > 0) {
                                                    item = START_TAG + tag + item;
                                                    stream = new ByteArrayInputStream(
                                                            item.getBytes(StandardCharsets.UTF_8));
                                                    parser.parseHDFSElements(stream, writer, fta, i);
                                                }
                                            }
                                        } else {
                                            value = START_TAG + value;
                                            //create an input stream to the file currently reading and send it to parser
                                            stream = new ByteArrayInputStream(
                                                    value.getBytes(StandardCharsets.UTF_8));
                                            parser.parseHDFSElements(stream, writer, fta, i);
                                        }
                                    }

                                } catch (InterruptedException e) {
                                    if (LOGGER.isLoggable(Level.SEVERE)) {
                                        LOGGER.severe(e.getMessage());
                                    }
                                }
                            }

                        } catch (IOException e) {
                            if (LOGGER.isLoggable(Level.SEVERE)) {
                                LOGGER.severe(e.getMessage());
                            }
                        } catch (ParserConfigurationException e) {
                            if (LOGGER.isLoggable(Level.SEVERE)) {
                                LOGGER.severe(e.getMessage());
                            }
                        } catch (SAXException e) {
                            if (LOGGER.isLoggable(Level.SEVERE)) {
                                LOGGER.severe(e.getMessage());
                            }
                        }
                    } else {
                        try {
                            //check if the path exists and is a directory
                            if (fs.exists(directory) && fs.isDirectory(directory)) {
                                for (int tupleIndex = 0; tupleIndex < fta.getTupleCount(); ++tupleIndex) {
                                    //read every file in the directory
                                    RemoteIterator<LocatedFileStatus> it = fs.listFiles(directory, true);
                                    while (it.hasNext()) {
                                        xmlDocument = it.next().getPath();
                                        if (fs.isFile(xmlDocument)) {
                                            if (LOGGER.isLoggable(Level.FINE)) {
                                                LOGGER.fine("Starting to read XML document: "
                                                        + xmlDocument.getName());
                                            }
                                            //create an input stream to the file currently reading and send it to parser
                                            InputStream in = fs.open(xmlDocument).getWrappedStream();
                                            parser.parseHDFSElements(in, writer, fta, tupleIndex);
                                        }
                                    }
                                }
                            } else {
                                throw new HyracksDataException("Invalid HDFS directory parameter (" + nodeId
                                        + ":" + directory + ") passed to collection.");
                            }
                        } catch (FileNotFoundException e) {
                            if (LOGGER.isLoggable(Level.SEVERE)) {
                                LOGGER.severe(e.getMessage());
                            }
                        } catch (IOException e) {
                            if (LOGGER.isLoggable(Level.SEVERE)) {
                                LOGGER.severe(e.getMessage());
                            }
                        }
                    }
                    try {
                        fs.close();
                    } catch (IOException e) {
                        if (LOGGER.isLoggable(Level.SEVERE)) {
                            LOGGER.severe(e.getMessage());
                        }
                    }
                }
            }
        }

        @Override
        public void fail() throws HyracksDataException {
            writer.fail();
        }

        @Override
        public void close() throws HyracksDataException {
            // Check if needed?
            fta.reset(frame);
            if (fta.getTupleCount() > 0) {
                FrameUtils.flushFrame(frame, writer);
            }
            writer.close();
        }
    };
}

From source file:org.avenir.tree.DataPartitioner.java

License:Apache License

/**
 * @param outPath/*from w  ww .j a v a2s .  c om*/
 * @param segmentCount
 * @param conf
 * @throws IOException
 */
private void moveOutputToSegmentDir(String outPath, int segmentCount, Configuration conf) throws IOException {
    FileSystem fileSystem = FileSystem.get(conf);
    for (int i = 0; i < segmentCount; ++i) {
        //create segment dir
        String dir = outPath + "/segment=" + i + "/data";
        Path segmentPath = new Path(dir);
        fileSystem.mkdirs(segmentPath);

        //move output to segment dir
        Path srcFile = new Path(outPath + "/part-r-0000" + i);
        Path dstFile = new Path(outPath + "/segment=" + i + "/data/partition.txt");
        fileSystem.rename(srcFile, dstFile);
    }

    fileSystem.close();
}

From source file:org.bgi.flexlab.gaea.data.mapreduce.input.header.SamHdfsFileHeader.java

License:Open Source License

public static void writeHeader(Configuration conf, SAMFileHeader header, Path output) {
    Path rankSumTestObjPath = null;
    FsAction[] v = FsAction.values();//from   ww  w.j  a v a2 s  .  co  m
    StringBuilder uri = new StringBuilder();
    uri.append(output);
    if (!output.getName().endsWith("/")) {
        uri.append(System.getProperty("file.separator"));
    }
    uri.append(BAM_HEADER_FILE_NAME);
    conf.set(BAM_HEADER_FILE_NAME, uri.toString());
    rankSumTestObjPath = new Path(uri.toString());
    FileSystem fs = null;
    try {
        fs = rankSumTestObjPath.getFileSystem(conf);
        FsPermission permission = new FsPermission(v[7], v[7], v[7]);
        if (!fs.exists(output)) {
            fs.mkdirs(output, permission);
        } else {
            fs.setPermission(output, permission);
        }

        SamFileHeaderCodec.writeHeader(header, fs.create(rankSumTestObjPath));
    } catch (IOException e) {
        throw new RuntimeException(e.toString());
    } finally {
        try {
            fs.close();
        } catch (IOException ioe) {
            throw new RuntimeException(ioe.toString());
        }
    }
}