Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

/**
 * HDFS? ? ?  ./*from w  w w .  ja  v  a2 s .c om*/
 *
 * @param path HDFS? ? ? 
 * @return ?? ?  <tt>true</tt>,  ?  <tt>false</tt>
 * @throws ServiceException ? ?    
 */
@Override
public boolean delete(String path) throws Exception {
    this.rootCheck(path);
    this.mustExists(path);
    try {
        FileSystem fs = fileSystemFactory.getFileSystem();
        Path fsPath = new Path(path);
        boolean delete = fs.delete(fsPath, true);
        fs.close();
        return delete;
    } catch (Exception ex) {
        throw new ServiceException("   .", ex);
    }
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _appendFile(String path, InputStream is) throws Exception {
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    FSDataOutputStream out = fs.append(fsPath);
    byte[] b = new byte[1024];
    int numBytes = 0;
    while ((numBytes = is.read(b)) > 0) {
        out.write(b, 0, numBytes);/*  www  .  j  a va2 s  .  c o m*/
    }

    is.close();
    out.close();
    fs.close();
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private Path _rename(String path, String rename) throws Exception {
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    FileStatus fileStatus = fs.getFileStatus(fsPath);
    HdfsFileInfo hdfsFileInfo = new HdfsFileInfo(fileStatus, fs.getContentSummary(fsPath));
    String parentPath = hdfsFileInfo.getPath();

    String newPath = parentPath + "/" + rename;
    Path path1 = new Path(newPath);
    if (StringUtils.isEmpty(rename)) {
        logger.warn("Failed rename HDFS file, Rename is empty : {}", newPath);
        throw new ServiceException(" ? ??  .");
    }/* ww  w .j  a  v  a  2  s.  co  m*/

    fs.rename(fsPath, path1);
    fs.close();
    return path1;
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _createEmptyFile(String path) throws Exception {
    FileSystem fs = fileSystemFactory.getFileSystem();
    fs.create(new Path(path)).close();
    fs.close();
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _setOwner(String path, String owner, String group) throws Exception {
    if (StringUtils.isEmpty(owner)) {
        owner = config.getProperty("system.hdfs.super.user");
    }//from   w w  w. j  a  va  2s  .c o m
    if (StringUtils.isEmpty(group)) {
        group = owner;
    }
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    if (!fs.exists(fsPath)) {
        this.notFoundException(fsPath.toString());
    }
    fs.setOwner(fsPath, StringUtils.isEmpty(owner) ? null : owner, StringUtils.isEmpty(group) ? null : group);
    fs.close();
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _setPermission(String path, String permission) throws Exception {
    if (StringUtils.isEmpty(permission)) {
        return;/*from w w  w .  j a v  a  2  s  .c o m*/
    }
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    if (!fs.exists(fsPath)) {
        this.notFoundException(fsPath.toString());
    }
    FsPermission fsPermission = new FsPermission(permission);
    fs.setPermission(fsPath, fsPermission);
    fs.close();
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

@Override
public void teragen() throws Exception {
    FileSystem fs = fileSystemFactory.getFileSystem();
    for (int i = 0; i < 1000000; i++) {
        fs.create(new Path("/user/ubuntu/many/uuid_u" + i)).close();
    }//from w w  w .  j av  a  2  s  .  c  o  m
    fs.close();
}

From source file:org.schedoscope.metascope.task.metastore.MetastoreTask.java

License:Apache License

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public boolean run(RawJDBCSqlRepository sqlRepository, long start) {
    LOG.info("Sync repository with metastore");

    metastoreClient.init();// w  w w. j  a va 2s  . c om

    FileSystem fs;
    try {
        Configuration hadoopConfig = new Configuration();
        hadoopConfig.set("fs.defaultFS", config.getHdfs());
        fs = FileSystem.get(hadoopConfig);
    } catch (IOException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e);
        metastoreClient.close();
        return false;
    }

    Connection connection;
    try {
        connection = dataSource.getConnection();
    } catch (SQLException e) {
        LOG.error("Could not retrieve database connection.", e);
        return false;
    }

    LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")");

    List<MetascopeTable> allTables = sqlRepository.findAllTables(connection);

    for (MetascopeTable table : allTables) {
        LOG.info("Get metastore information for table " + table.getFqdn());

        try {
            MetastoreTable mTable = metastoreClient.getTable(table.getDatabaseName(), table.getTableName());

            if (mTable == null) {
                LOG.error("Could not retrieve table from metastore.");
                continue;
            }

            table.setTableOwner(mTable.getOwner());
            table.setCreatedAt(mTable.getCreateTime() * 1000L);
            table.setInputFormat(mTable.getInputFormat());
            table.setOutputFormat(mTable.getOutputFormat());
            table.setDataPath(mTable.getLocation());
            try {
                table.setDataSize(getDirectorySize(fs, table.getDataPath()));
                table.setPermissions(getPermission(fs, table.getDataPath()));
            } catch (IllegalArgumentException e) {
                LOG.warn("Could not retrieve dir size: " + e.getMessage());
                LOG.debug("ERROR: Could not read HDFS metadata", e);
            }

            long maxLastTransformation = -1;

            List<String> partitionNames = metastoreClient.listPartitionNames(table.getDatabaseName(),
                    table.getTableName(), (short) -1);

            List<MetascopeView> views = sqlRepository.findViews(connection, table.getFqdn());
            List<List<String>> groupedPartitions = metastoreClient.partitionLists(partitionNames, 10000);
            for (List<String> groupedPartitionNames : groupedPartitions) {
                List<MetastorePartition> partitions = metastoreClient.listPartitions(table.getDatabaseName(),
                        table.getTableName(), groupedPartitionNames);
                List<MetascopeView> changedViews = new ArrayList<>();
                for (MetastorePartition partition : partitions) {
                    MetascopeView view = getView(views, partition);
                    if (view == null) {
                        //a view which is not registered as a partition in hive metastore should not exists ...
                        continue;
                    }

                    view.setTable(table);

                    String numRows = partition.getNumRows();
                    if (numRows != null && !numRows.toUpperCase().equals("NULL") && !numRows.isEmpty()) {
                        view.setNumRows(Long.parseLong(numRows));
                    }
                    String totalSize = partition.getTotalSize();
                    if (totalSize != null && !totalSize.toUpperCase().equals("NULL") && !totalSize.isEmpty()) {
                        view.setTotalSize(Long.parseLong(totalSize));
                    }
                    String lastTransformation = partition.getSchedoscopeTimestamp();
                    if (lastTransformation != null && !lastTransformation.toUpperCase().equals("NULL")
                            && !lastTransformation.isEmpty()) {
                        long ts = Long.parseLong(lastTransformation);
                        view.setLastTransformation(ts);
                        if (ts > maxLastTransformation) {
                            maxLastTransformation = ts;
                        }
                    }
                    solrFacade.updateViewEntity(view, false);
                    changedViews.add(view);
                }
                sqlRepository.insertOrUpdateViewMetadata(connection, changedViews);
                solrFacade.commit();
            }

            if (maxLastTransformation != -1) {
                table.setLastTransformation(maxLastTransformation);
            } else {
                String ts = mTable.getSchedoscopeTimestamp();//mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (ts != null) {
                    long lastTransformationTs = Long.parseLong(ts);
                    table.setLastTransformation(lastTransformationTs);
                    MetascopeView rootView = views.get(0);
                    rootView.setTable(table);
                    rootView.setLastTransformation(lastTransformationTs);
                    solrFacade.updateViewEntity(rootView, false);
                }
            }

            sqlRepository.saveTable(connection, table);
            solrFacade.updateTableMetastoreData(table, true);
        } catch (Exception e) {
            LOG.warn("Could not retrieve table from metastore", e);
            continue;
        }

    }

    /* commit to index */
    solrFacade.commit();

    metastoreClient.close();

    try {
        fs.close();
    } catch (IOException e) {
        LOG.warn("Could not close connection to HDFS", e);
    }

    try {
        connection.close();
    } catch (SQLException e) {
        LOG.error("Could not close connection", e);
    }

    LOG.info("Sync with metastore finished");
    return true;
}

From source file:org.schedoscope.metascope.task.MetastoreTask.java

License:Apache License

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public boolean run(long start) {
    LOG.info("Sync repository with metastore");
    HiveConf conf = new HiveConf();
    conf.set("hive.metastore.local", "false");
    conf.setVar(HiveConf.ConfVars.METASTOREURIS, config.getMetastoreThriftUri());
    String principal = config.getKerberosPrincipal();
    if (principal != null && !principal.isEmpty()) {
        conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
        conf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, principal);
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }/*from   w ww.  j ava  2 s . c  o  m*/

    HiveMetaStoreClient client = null;
    try {
        client = new HiveMetaStoreClient(conf);
    } catch (Exception e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to hive metastore", e);
        return false;
    }

    FileSystem fs;
    try {
        Configuration hadoopConfig = new Configuration();
        hadoopConfig.set("fs.defaultFS", config.getHdfs());
        fs = FileSystem.get(hadoopConfig);
    } catch (IOException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e);
        client.close();
        return false;
    }

    LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")");

    List<String> allTables = metascopeTableRepository.getAllTablesNames();

    for (String fqdn : allTables) {
        //load table
        MetascopeTable table = metascopeTableRepository.findOne(fqdn);
        LOG.info("Get metastore information for table " + table.getFqdn());

        try {
            Table mTable = client.getTable(table.getDatabaseName(), table.getTableName());
            List<Partition> partitions = client.listPartitions(table.getDatabaseName(), table.getTableName(),
                    Short.MAX_VALUE);

            table.setTableOwner(mTable.getOwner());
            table.setCreatedAt(mTable.getCreateTime() * 1000L);
            table.setInputFormat(mTable.getSd().getInputFormat());
            table.setOutputFormat(mTable.getSd().getOutputFormat());
            table.setDataPath(mTable.getSd().getLocation());
            try {
                table.setDataSize(getDirectorySize(fs, table.getDataPath()));
                table.setPermissions(getPermission(fs, table.getDataPath()));
            } catch (IllegalArgumentException e) {
                LOG.warn("Could not retrieve dir size: " + e.getMessage());
                LOG.debug("ERROR: Could not read HDFS metadata", e);
            }

            long maxLastTransformation = -1;

            Hibernate.initialize(table.getViews());
            table.setViewsSize(table.getViews().size());

            for (Partition partition : partitions) {
                MetascopeView view = getView(table.getViews(), partition);
                if (view == null) {
                    //a view which is not registered as a partition in hive metastore should not exists ...
                    continue;
                }
                String numRows = partition.getParameters().get("numRows");
                if (numRows != null) {
                    view.setNumRows(Long.parseLong(numRows));
                }
                String totalSize = partition.getParameters().get("totalSize");
                if (totalSize != null) {
                    view.setTotalSize(Long.parseLong(totalSize));
                }
                String lastTransformation = partition.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (lastTransformation != null) {
                    long ts = Long.parseLong(lastTransformation);
                    view.setLastTransformation(ts);
                    if (ts > maxLastTransformation) {
                        maxLastTransformation = ts;
                    }
                }
                solrFacade.updateViewEntity(view, false);
            }

            if (maxLastTransformation != -1) {
                table.setLastTransformation(maxLastTransformation);
            } else {
                String ts = mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (ts != null) {
                    long lastTransformationTs = Long.parseLong(ts);
                    table.setLastTransformation(lastTransformationTs);
                    MetascopeView rootView = table.getViews().get(0);
                    rootView.setLastTransformation(lastTransformationTs);
                    solrFacade.updateViewEntity(rootView, false);
                }
            }

            metascopeTableRepository.save(table);
            solrFacade.updateTablePartial(table, true);
        } catch (Exception e) {
            LOG.warn("Could not retrieve table from metastore", e);
            continue;
        }

    }

    /* commit to index */
    solrFacade.commit();

    client.close();
    try {
        fs.close();
    } catch (IOException e) {
        LOG.warn("Could not close connection to HDFS", e);
    }

    LOG.info("Sync with metastore finished");
    return true;
}

From source file:org.sf.xrime.algorithms.BC.BCForwardReducer.java

License:Apache License

private void recordContinue() throws IOException // to indicate whether there still needs another mapreduce step 
{
    if (changeflag) {
        return;// w  ww .  j ava2  s.  c  om
    }

    changeflag = true;

    String continueFile = job.get(BCForwardStep.continueFileKey);

    if (continueFile != null) {
        FileSystem fs = FileSystem.get(job);
        fs.mkdirs(new Path(continueFile));
        fs.close();
    }
}