Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:org.apache.kylin.monitor.ApiRequestParser.java

License:Apache License

public void parseRequestLog(String filePath, String dPath) throws ParseException, IOException {

    logger.info("Start parsing kylin api request file " + filePath + " !");

    // writer config init
    FileSystem fs = this.getHdfsFileSystem();
    org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
    OutputStreamWriter writer = new OutputStreamWriter(fs.append(resultStorePath));
    CSVWriter cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);

    Pattern p_available = Pattern.compile("/kylin/api/(cubes|user)+.*");
    Pattern p_request = Pattern.compile(
            "^.*\\[.*KylinApiFilter.logRequest.*\\].*REQUEST:.*REQUESTER=(.*);REQ_TIME=(\\w+ (\\d{4}-\\d{2}-\\d{2}).*);URI=(.*);METHOD=(.*);QUERY_STRING=(.*);PAYLOAD=(.*);RESP_STATUS=(.*);$");
    Pattern p_uri = Pattern.compile("/kylin/api/(\\w+)(/.*/)*(.*)$");
    Matcher m_available = p_available.matcher("");
    Matcher m_request = p_request.matcher("");
    Matcher m_uri = p_uri.matcher("");

    Path path = Paths.get(filePath);
    try {//from ww  w  .  ja  v  a  2  s.c o  m
        BufferedReader reader = Files.newBufferedReader(path, ENCODING);
        String line = null;
        while ((line = reader.readLine()) != null) {
            // reset the input
            m_available.reset(line);
            m_request.reset(line);

            // filter unnecessary info
            if (m_available.find()) {
                // filter GET info
                if (m_request.find() && !m_request.group(5).equals("GET")) {

                    List<String> groups = new ArrayList<String>();
                    for (int i = 1; i <= m_request.groupCount(); i++) {
                        groups.add(m_request.group(i));
                    }

                    String uri = m_request.group(4);
                    m_uri.reset(uri);
                    if (m_uri.find()) {

                        // add target
                        groups.add(m_uri.group(1));

                        // add action
                        if (m_uri.group(1).equals("cubes")) {
                            String method = m_request.group(5);
                            if ("DELETE".equals(method)) {
                                groups.add("drop");
                            } else if ("POST".equals(method)) {
                                groups.add("save");
                            } else {
                                // add parse action
                                groups.add(m_uri.group(3));
                            }
                        }
                    }
                    groups.add(DEPLOY_ENV);
                    String[] recordArray = groups.toArray(new String[groups.size()]);
                    // write to hdfs
                    cwriter.writeNext(recordArray);
                }
            }

        }
    } catch (IOException ex) {
        logger.info("Failed to write to hdfs:", ex);
    } finally {
        writer.close();
        cwriter.close();
        fs.close();
    }

    logger.info("Finish parsing file " + filePath + " !");
}

From source file:org.apache.kylin.monitor.ApiRequestParser.java

License:Apache License

public void writeResultToHdfs(String dPath, String[] record) throws IOException {
    OutputStreamWriter writer = null;
    CSVWriter cwriter = null;/*w w  w .  j  a v  a  2  s .c om*/
    FileSystem fs = null;
    try {

        fs = this.getHdfsFileSystem();
        org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
        writer = new OutputStreamWriter(fs.append(resultStorePath));
        cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);
        cwriter.writeNext(record);

    } catch (IOException e) {
        logger.info("Exception", e);
    } finally {
        writer.close();
        cwriter.close();
        fs.close();
    }
}

From source file:org.apache.kylin.monitor.FileUtils.java

License:Apache License

public static boolean pathCheck(String filePath) throws IOException {
    logger.info("checking file:" + filePath);
    FileSystem fs = null;
    try {//  www. ja v a 2s .com
        Configuration conf = new Configuration();
        fs = FileSystem.get(conf);
        org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(filePath);
        if (!fs.exists(path)) {
            fs.create(path);
            fs.close();
            return false;
        }
    } catch (Exception e) {
        if (fs != null) {
            fs.close();
        }
        logger.info("Failed to init:", e);
    }
    return true;
}

From source file:org.apache.kylin.monitor.FileUtils.java

License:Apache License

public static void clearHdfsFile(String dPath) throws IOException {
    OutputStreamWriter writer = null;
    FileSystem fs = null;
    try {/*from  w  ww. j a v  a  2  s .c  om*/
        fs = getHdfsFileSystem();
        org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
        writer = new OutputStreamWriter(fs.create(resultStorePath, true));

    } catch (Exception e) {
        logger.info("Exception", e);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:org.apache.kylin.monitor.FileUtils.java

License:Apache License

public static void appendResultToHdfs(String dPath, String[] record) throws IOException {
    OutputStreamWriter writer = null;
    CSVWriter cwriter = null;// w  w w . j a  v  a2  s. c om
    FileSystem fs = null;
    try {
        fs = getHdfsFileSystem();
        org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
        writer = new OutputStreamWriter(fs.append(resultStorePath));
        cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);

        cwriter.writeNext(record);

    } catch (Exception e) {
        logger.info("Exception", e);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (cwriter != null) {
            cwriter.close();
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:org.apache.kylin.monitor.FileUtils.java

License:Apache License

public static FileSystem getHdfsFileSystem() throws IOException {
    Configuration conf = new Configuration();
    //conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
    FileSystem fs = null;
    try {/*from   w  w  w .  j ava 2 s  .  co  m*/
        fs = FileSystem.newInstance(conf);
    } catch (IOException e) {
        if (fs != null) {
            fs.close();
        }
        logger.info("Failed to get hdfs FileSystem", e);
    }
    return fs;
}

From source file:org.apache.kylin.monitor.QueryParser.java

License:Apache License

public void parseQueryInit() throws IOException {
    logger.info("parse query initializing...");
    FileSystem fs = null;
    try {//from  ww  w .  j ava2  s  . com
        Configuration conf = new Configuration();
        fs = FileSystem.get(conf);
        org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(QueryParser.QUERY_PARSE_RESULT_PATH);
        if (!fs.exists(path)) {
            fs.create(path);
            fs.close(); //need to close before get FileSystem again
            this.writeResultToHdfs(QueryParser.QUERY_PARSE_RESULT_PATH, QueryParser.KYLIN_QUERY_CSV_HEADER);
        }
    } catch (IOException e) {
        if (fs != null) {
            fs.close();
        }
        logger.info("Failed to init:", e);
    }
}

From source file:org.apache.kylin.monitor.QueryParser.java

License:Apache License

public void parseQueryLog(String filePath, String dPath) throws ParseException, IOException {

    logger.info("Start parsing file " + filePath + " !");

    //        writer config init
    FileSystem fs = this.getHdfsFileSystem();
    org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
    OutputStreamWriter writer = new OutputStreamWriter(fs.append(resultStorePath));
    CSVWriter cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);

    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
    Pattern p_query_start = Pattern.compile("^\\[.*\\]:\\[(.*),.*\\]\\[.*\\]\\[.*QueryService.logQuery.*\\].*");
    Pattern p_query_end = Pattern.compile("^Message:(.*)$");
    Pattern p_query_body = Pattern.compile(
            "^\\[.*\\]:\\[((\\d{4}-\\d{2}-\\d{2}).*)\\]\\[.*\\]\\[.*\\].*\n^=+\\[QUERY\\]=+\n^SQL:(.*)\n^User:(.*)\n^Success:(.*)\n^Duration:(.*)\n^Project:(.*)\n^(Realization Names|Cube Names): \\[(.*)\\]\n^Cuboid Ids: \\[(.*)\\]\n^Total scan count:(.*)\n^Result row count:(.*)\n^Accept Partial:(.*)\n(^Is Partial Result:(.*)\n)?^Hit Cache:(.*)\n^Message:(.*)",
            Pattern.MULTILINE);//from   ww  w .j a  v a 2  s . c  o m
    Matcher m_query_start = p_query_start.matcher("");
    Matcher m_query_end = p_query_end.matcher("");
    Matcher m_query_body = p_query_body.matcher("");

    boolean query_start = false;
    StringBuffer query_body = new StringBuffer("");
    Path path = Paths.get(filePath);
    try {
        BufferedReader reader = Files.newBufferedReader(path, ENCODING);
        String line = null;
        while ((line = reader.readLine()) != null) {
            m_query_start.reset(line); //reset the input
            m_query_end.reset(line);

            // set start flag ,clear StringBuffer
            if (m_query_start.find()) {
                query_start = true;
                query_body = new StringBuffer("");
            }
            if (query_start) {
                query_body.append(line + "\n");
            }
            if (m_query_end.find()) {
                query_start = false;
                m_query_body.reset(query_body);
                logger.info("parsing query...");
                logger.info(query_body);
                //                    skip group(8) and group(14)
                if (m_query_body.find()) {
                    ArrayList<String> groups = new ArrayList<String>();
                    int grp_count = m_query_body.groupCount();
                    for (int i = 1; i <= grp_count; i++) {
                        if (i != 8 && i != 14) {
                            String grp_item = m_query_body.group(i);
                            grp_item = grp_item == null ? "" : grp_item.trim();
                            groups.add(grp_item);
                        }
                    }

                    long start_time = format.parse(groups.get(0)).getTime()
                            - (int) (Double.parseDouble(groups.get(5)) * 1000);
                    groups.set(0, format.format(new Date(start_time)));
                    groups.add(DEPLOY_ENV);
                    String[] recordArray = groups.toArray(new String[groups.size()]);
                    //                        write to hdfs
                    cwriter.writeNext(recordArray);

                }

            }

        }
    } catch (IOException ex) {
        logger.info("Failed to write to hdfs:", ex);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (cwriter != null) {
            cwriter.close();
        }
        if (fs != null) {
            fs.close();
        }
    }

    logger.info("Finish parsing file " + filePath + " !");

}

From source file:org.apache.kylin.monitor.QueryParser.java

License:Apache License

public void writeResultToHdfs(String dPath, String[] record) throws IOException {
    OutputStreamWriter writer = null;
    CSVWriter cwriter = null;/* www  .j  a  v a2s  . c om*/
    FileSystem fs = null;
    try {
        fs = this.getHdfsFileSystem();
        org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
        writer = new OutputStreamWriter(fs.append(resultStorePath));
        cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);

        cwriter.writeNext(record);

    } catch (IOException e) {
        logger.info("Exception", e);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (cwriter != null) {
            cwriter.close();
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:org.apache.kylin.monitor.QueryParser.java

License:Apache License

public FileSystem getHdfsFileSystem() throws IOException {
    Configuration conf = new Configuration();
    //        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
    FileSystem fs = null;
    try {//from   w  w  w. ja v a  2s.c o m
        fs = FileSystem.get(conf);
    } catch (IOException e) {
        if (fs != null) {
            fs.close();
        }
        logger.info("Failed to get hdfs FileSystem", e);
    }
    return fs;
}