Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:gobblin.compaction.hive.HdfsIO.java

License:Apache License

private static void addHadoopConfigPropertiesToConf(Configuration conf) {
    Set<String> propertyNames = CompactionRunner.properties.stringPropertyNames();
    for (String propertyName : propertyNames) {
        if (propertyName.startsWith(HADOOP_CONFIGFILE_)) {
            String hadoopConfigFile = CompactionRunner.properties.getProperty(propertyName);
            conf.addResource(new Path(hadoopConfigFile));
            LOG.info("Added Hadoop Config File: " + hadoopConfigFile);
        }/*from  www  . j  a  v a 2  s  .c o m*/
    }
}

From source file:gov.jgi.meta.exec.BlatCommand.java

License:Open Source License

public static void main(String[] args) throws Exception {

    Configuration conf = new Configuration();

    conf.addResource("blat-test-conf.xml");
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

    /*/*from w w  w  .  j ava  2s .c  o  m*/
    process arguments
     */

    if (otherArgs.length != 2) {
        System.err.println("Usage: blat <seqfilepath> <ecfilepath>");
        System.exit(2);
    }

    Map<String, String> l = new HashMap<String, String>();
    Set<String> r;

    Text t = new Text();
    FileInputStream fstream = new FileInputStream(otherArgs[0]);
    FastaBlockLineReader in = new FastaBlockLineReader(fstream);
    int bytes = in.readLine(t, l);

    BlatCommand b = new BlatCommand();
    r = b.exec(l, otherArgs[1], null);

    System.out.println("matches = " + r);
}

From source file:gov.jgi.meta.MetaUtils.java

License:Open Source License

/**
 * same as {@link #loadConfiguration(Configuration, String[]) loadConfiguration()} but loads from
 * a specified file instead of the default filename.
 *
 * @param conf the job configuration to add the defaults to
 * @param configurationFileName the cluster defaults file to load
 * @param args the commandline args/*ww  w .j  a  v  a2s  .  c om*/
 * @return modifies conf
* @throws IOException 
 */
public static String[] loadConfiguration(Configuration conf, String configurationFileName, String[] args)
        throws IOException {
    /*
     * first load the configuration from the build properties (typically packaged in the jar)
     */
    System.out.println("loading build.properties ...");
    try {
        Properties buildProperties = new Properties();
        buildProperties.load(MetaUtils.class.getResourceAsStream("/build.properties"));
        for (Enumeration e = buildProperties.propertyNames(); e.hasMoreElements();) {
            String k = (String) e.nextElement();
            System.out.println("setting " + k + " to " + buildProperties.getProperty(k));
            System.setProperty(k, buildProperties.getProperty(k));
            conf.set(k, buildProperties.getProperty(k));
        }
    } catch (Exception e) {
        System.out.println("unable to find build.properties ... skipping");
    }

    /*
     * override properties with the deployment descriptor
     */
    if (configurationFileName == null) {
        String appName = System.getProperty("application.name");
        String appVersion = System.getProperty("application.version");
        configurationFileName = appName + "-" + appVersion + "-conf.xml";
    }
    System.out.println("loading application configuration from " + configurationFileName);
    try {
        URL u = ClassLoader.getSystemResource(configurationFileName);
        if (u == null) {
            System.err.println("unable to find " + configurationFileName + " ... skipping");
        } else {
            conf.addResource(configurationFileName);
        }
    } catch (Exception e) {
        System.err.println("unable to find " + configurationFileName + " ... skipping");
    }

    /*
     * override properties from user's preferences defined in ~/.meta-prefs
     */

    try {
        java.io.FileInputStream fis = new java.io.FileInputStream(
                new java.io.File(System.getenv("HOME") + "/.meta-prefs"));
        Properties props = new Properties();
        props.load(fis);
        System.out.println("loading preferences from ~/.meta-prefs");
        for (Enumeration e = props.propertyNames(); e.hasMoreElements();) {
            String k = (String) e.nextElement();
            System.out.println("overriding property: " + k);
            conf.set(k, props.getProperty(k));
        }
    } catch (Exception e) {
        System.out.println("unable to find ~/.meta-prefs ... skipping");
    }

    /*
     * finally, allow user to override from commandline
     */
    return (new GenericOptionsParser(conf, args).getRemainingArgs());
}

From source file:gov.tva.sparky.core.HDFS.java

/**
 * Load a subset of points from the archive
 * /*  www  .  j av  a 2  s .  co m*/
 * @param oIndexQueryResult
 * @param iRegistryFileID
 * @param pointerHeap
 * @param lookup_table
 * @return
 * @throws Exception
 */
private static PriorityQueue<StandardPointFile> LoadPointSubsetFromArchive(IndexQueryResult oIndexQueryResult,
        int iRegistryFileID, PriorityQueue<HDFSPointBlockPointer> pointerHeap,
        HistorianArchiveLookupTable lookup_table) throws Exception {

    PriorityQueue<StandardPointFile> points = new PriorityQueue<StandardPointFile>();
    String strHdfsTranslatedPath = lookup_table.Lookup_Path_ByFileID(iRegistryFileID);

    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");

    FileSystem hdfs = FileSystem.get(conf);
    HistorianArchiveFile dotD;

    if (null == pointerHeap)
        return points;

    Path path = new Path(strHdfsTranslatedPath);

    // 1. Open .d file on hdfs
    dotD = new HistorianArchiveFile(hdfs, path, conf);

    while (pointerHeap.size() > 0) {
        HDFSPointBlockPointer pointer = pointerHeap.remove();
        // go ahead and parse this block, add results to the heap
        ArrayList<StandardPointFile> pointsInBlock = dotD.ParseBlockInArchive(pointer.iBlockIndex);
        System.out.println("PointBlock > Found Points: " + pointsInBlock.size());
        // 2. Extract points from block, add to heap
        for (int j = 0; j < pointsInBlock.size(); j++) {
            StandardPointFile originalPoint = pointsInBlock.get(j);
            if (originalPoint.GetCalendar().getTimeInMillis() >= oIndexQueryResult.query.getStartTime()
                    && originalPoint.GetCalendar().getTimeInMillis() <= oIndexQueryResult.query.getEndTime()) {
                // adds to heap, this is sorted by time on add
                points.add(originalPoint);
            }
        }
    }

    return points;
}

From source file:gov.tva.sparky.core.Jetty.java

public static void main(String[] args) throws Exception {
    // Configuration
    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");
    startServer(conf.getInt("sparky.http.port", 8080));
}

From source file:gov.tva.sparky.hbase.RestProxy.java

public static boolean DeleteHbaseRow(String strTablename, String strRowKey) throws URIException {

    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");

    int port = conf.getInt("sparky.hbase.restPort", 8092);
    String uri = conf.get("sparky.hbase.restURI", "http://socdvmhbase");

    boolean bResult = false;

    BufferedReader br = null;/*from  w w  w  .j  a va 2 s  .  c o  m*/
    HttpClient client = new HttpClient();

    String strRestPath = uri + ":" + port + "/" + strTablename + "/" + strRowKey;

    DeleteMethod delete_method = new DeleteMethod(strRestPath);

    try {

        int returnCode = client.executeMethod(delete_method);

        if (returnCode == HttpStatus.SC_NOT_IMPLEMENTED) {

            System.out.println("The Post method is not implemented by this URI");

        } else {

            bResult = true;

        }
    } catch (Exception e) {
        System.out.println(e);
    } finally {
        delete_method.releaseConnection();
        if (br != null)
            try {
                br.close();
            } catch (Exception fe) {
            }
    }

    return bResult;

}

From source file:gov.tva.sparky.hbase.RestProxy.java

/**
 * This method adds data to HBase.//  w w w.j  a v  a  2  s .co  m
 * 
 * @param strTablename
 * @param strRowKey
 * @param strColumn
 * @param strQualifier
 * @param arBytesValue
 * @return
 * @throws URIException
 */
public static boolean InsertHbaseIndexBucket(String strTablename, String strRowKey, String strColumn,
        String strQualifier, byte[] arBytesValue) throws URIException {
    // Configuration
    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");
    int port = conf.getInt("sparky.hbase.restPort", 8092);
    String uri = conf.get("sparky.hbase.restURI", "http://socdvmhbase");

    boolean bResult = false;

    BufferedReader br = null;
    HttpClient client = new HttpClient();

    String strRestPath = uri + ":" + port + "/" + strTablename + "/" + strRowKey + "/" + strColumn + ":"
            + strQualifier;

    PostMethod post = new PostMethod(strRestPath);
    post.addRequestHeader("Content-Type", "application/octet-stream");

    RequestEntity entity = new ByteArrayRequestEntity(arBytesValue);
    post.setRequestEntity(entity);

    try {
        int returnCode = client.executeMethod(post);

        if (returnCode == HttpStatus.SC_NOT_IMPLEMENTED) {
            System.out.println("The Post method is not implemented by this URI");
            // still consume the response body
            post.getResponseBodyAsString();
        } else {
            br = new BufferedReader(new InputStreamReader(post.getResponseBodyAsStream()));
            String readLine;
            while (((readLine = br.readLine()) != null)) {
                System.out.println(readLine);
            }

            bResult = true;

        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        post.releaseConnection();
        if (br != null) {
            try {
                br.close();
            } catch (Exception fe) {
                fe.printStackTrace();
            }
        }
    }

    return bResult;
}

From source file:gov.tva.sparky.hbase.RestProxy.java

/**
 * //from w  w  w. j a v  a 2  s.  c o  m
 * @param strTablename
 * @param strRowKey
 * @param strColumn
 * @param strQualifier
 * @return Returns the values from a data cell in HBase.
 * @throws ParserConfigurationException
 * @throws SAXException
 * @throws IOException
 */
public static byte[] QueryHBaseForCell(String strTablename, String strRowKey, String strColumn,
        String strQualifier) throws ParserConfigurationException, SAXException, IOException {
    // Configuration
    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");
    int port = conf.getInt("sparky.hbase.restPort", 8092);
    String uri = conf.get("sparky.hbase.restURI", "http://socdvmhbase");

    DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
    // never forget this!
    factory.setNamespaceAware(true);
    DocumentBuilder builder = factory.newDocumentBuilder();

    String strRestPath = uri + ":" + port + "/" + strTablename + "/" + strRowKey + "/" + strColumn + ":"
            + strQualifier;

    Document doc = null;

    try {
        doc = builder.parse(strRestPath);

    } catch (FileNotFoundException e) {
        //System.out.println("RestProxy > Exception: ( " + strRestPath + " )");
    }

    if (null == doc)
        return null;

    XPathFactory xpath_factory = XPathFactory.newInstance();
    XPath xpath = xpath_factory.newXPath();

    XPathExpression expr = null;

    try {
        expr = xpath.compile("/CellSet/Row/Cell/text()");
    } catch (XPathExpressionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    Object result = null;
    try {
        result = expr.evaluate(doc, XPathConstants.NODESET);
    } catch (XPathExpressionException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    NodeList nodes = (NodeList) result;
    String cell_value = nodes.item(0).getNodeValue();

    Base64 decoder = new Base64();
    byte[] decodedValue = decoder.decode(cell_value.getBytes());

    return decodedValue;
}

From source file:gov.tva.sparky.hdfs.HistorianArchiveFile.java

/**
 * A test to see if a particular file exists.
 * /*from  ww  w . j  a va  2 s  .co m*/
 * @param strHdfsPath The path of the file to test.
 * @return If the archive file exists, returns true, else returns false;
 * @throws IOException
 */
public static boolean ArchiveExists(String strHdfsPath) throws IOException {
    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");

    FileSystem hdfs = FileSystem.get(conf);

    Path hdfs_path = new Path(strHdfsPath);

    return hdfs.exists(hdfs_path);
}

From source file:gov.tva.sparky.util.indexer.FileIndex.java

private HashMap<Integer, PriorityQueue<HDFSPointBlockPointer>> parseFile(String strInputFilename)
        throws IOException {
    HashMap<Integer, PriorityQueue<HDFSPointBlockPointer>> hashMap = new HashMap<Integer, PriorityQueue<HDFSPointBlockPointer>>();

    Configuration conf = new Configuration(false);
    conf.addResource("hadoop-default.xml");
    conf.addResource("sparky-site.xml");

    FileSystem hdfs = FileSystem.get(conf);
    HistorianArchiveFile dotD;//from w w w. j av  a2  s.c o  m

    Path path = new Path(strInputFilename);

    dotD = new HistorianArchiveFile(hdfs, path, conf);
    ArchiveFileAllocationTable fatTable = dotD.GetFatTable();
    BlockMap blockMap = fatTable._BlockMap;

    for (int i = 0; i < blockMap._arBlockPointers.size(); i++) {
        ArchiveDataBlockPointer point = blockMap._arBlockPointers.get(i);

        if (!hashMap.containsKey(point.iPointID))
            hashMap.put(point.iPointID, new PriorityQueue<HDFSPointBlockPointer>());

        PriorityQueue<HDFSPointBlockPointer> heap = hashMap.get(point.iPointID);

        // add properties to blockpointer
        HDFSPointBlockPointer blockPointer = new HDFSPointBlockPointer();
        blockPointer.iArchiveFileNameID = this.iArchiveFileName;
        blockPointer.iBlockIndex = i;
        blockPointer.iPointID = point.iPointID;

        // we probably only want to store "i" here --- since event blocksize can vary, and its a simple MUL to do at the start
        blockPointer.lBlockOffset = fatTable._EventBlockSize * i;
        blockPointer.start_ts = point.Time.GetCalendar(cBaseDateTime).getTimeInMillis(); //(long) point.Time._dTime;            // is this correct?
        blockPointer.strHDFSPath = strInputFilename;

        heap.add(blockPointer);

        // is this step necessary?
        hashMap.put(point.iPointID, heap);
    }

    dotD.close();

    return hashMap;
}