Example usage for org.apache.hadoop.fs FileSystem newInstance

List of usage examples for org.apache.hadoop.fs FileSystem newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem newInstance.

Prototype

public static FileSystem newInstance(URI uri, Configuration config) throws IOException 

Source Link

Document

Returns the FileSystem for this URI's scheme and authority.

Usage

From source file:org.apache.solr.core.HdfsDirectoryFactory.java

License:Apache License

@Override
public boolean exists(String path) {
    Path hdfsDirPath = new Path(path);
    Configuration conf = getConf();
    FileSystem fileSystem = null;
    try {//from   w ww  .j  a  v a  2s  .c o m
        fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), conf);
        return fileSystem.exists(hdfsDirPath);
    } catch (IOException e) {
        LOG.error("Error checking if hdfs path exists", e);
        throw new RuntimeException("Error checking if hdfs path exists", e);
    } finally {
        IOUtils.closeQuietly(fileSystem);
    }
}

From source file:org.apache.solr.core.HdfsDirectoryFactory.java

License:Apache License

protected synchronized void removeDirectory(CacheValue cacheValue) throws IOException {
    Configuration conf = getConf();
    FileSystem fileSystem = null;
    try {/*w  w  w . j  a  v  a2s.  c  o m*/
        fileSystem = FileSystem.newInstance(new URI(cacheValue.path), conf);
        boolean success = fileSystem.delete(new Path(cacheValue.path), true);
        if (!success) {
            throw new RuntimeException("Could not remove directory");
        }
    } catch (Exception e) {
        LOG.error("Could not remove directory", e);
        throw new SolrException(ErrorCode.SERVER_ERROR, "Could not remove directory", e);
    } finally {
        IOUtils.closeQuietly(fileSystem);
    }
}

From source file:org.apache.solr.search.TestRecoveryHdfs.java

License:Apache License

@BeforeClass
public static void beforeClass() throws Exception {
    dfsCluster = HdfsTestUtil.setupClass(
            new File(TEMP_DIR, HdfsBasicDistributedZk2Test.class.getName() + "_" + System.currentTimeMillis())
                    .getAbsolutePath());
    hdfsUri = dfsCluster.getFileSystem().getUri().toString();

    try {/*from   www. j a v  a  2s  .  c o m*/
        URI uri = new URI(hdfsUri);
        fs = FileSystem.newInstance(uri, new Configuration());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (URISyntaxException e) {
        throw new RuntimeException(e);
    }

    hdfsDataDir = hdfsUri + "/solr/shard1";
    System.setProperty("solr.data.dir", hdfsUri + "/solr/shard1");
    System.setProperty("solr.ulog.dir", hdfsUri + "/solr/shard1");

    initCore("solrconfig-tlog.xml", "schema15.xml");
}

From source file:org.apache.solr.store.hdfs.HdfsDirectory.java

License:Apache License

public HdfsDirectory(Path hdfsDirPath, Configuration configuration) throws IOException {
    assert hdfsDirPath.toString().startsWith("hdfs:/") : hdfsDirPath.toString();
    setLockFactory(NoLockFactory.getNoLockFactory());
    this.hdfsDirPath = hdfsDirPath;
    this.configuration = configuration;
    fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), configuration);
    try {//from ww  w.ja v a2  s  .  c o  m
        if (!fileSystem.exists(hdfsDirPath)) {
            fileSystem.mkdirs(hdfsDirPath);
        }
    } catch (Exception e) {
        org.apache.solr.util.IOUtils.closeQuietly(fileSystem);
        throw new RuntimeException("Problem creating directory: " + hdfsDirPath, e);
    }
}

From source file:org.apache.solr.store.hdfs.HdfsLockFactory.java

License:Apache License

@Override
public void clearLock(String lockName) throws IOException {
    FileSystem fs = null;/*from   ww w  .j  a v a  2 s.c  o  m*/
    try {
        fs = FileSystem.newInstance(lockPath.toUri(), configuration);

        if (fs.exists(lockPath)) {
            if (lockPrefix != null) {
                lockName = lockPrefix + "-" + lockName;
            }

            Path lockFile = new Path(lockPath, lockName);

            if (fs.exists(lockFile) && !fs.delete(lockFile, false)) {
                throw new IOException("Cannot delete " + lockFile);
            }
        }
    } finally {
        IOUtils.closeQuietly(fs);
    }
}

From source file:org.apache.solr.update.HdfsUpdateLog.java

License:Apache License

@Override
public void init(UpdateHandler uhandler, SolrCore core) {

    // ulogDir from CoreDescriptor overrides
    String ulogDir = core.getCoreDescriptor().getUlogDir();

    if (ulogDir != null) {
        dataDir = ulogDir;/*from   w  w  w  .ja v a 2s. co  m*/
    }
    if (dataDir == null || dataDir.length() == 0) {
        dataDir = core.getDataDir();
    }

    if (!core.getDirectoryFactory().isAbsolute(dataDir)) {
        try {
            dataDir = core.getDirectoryFactory().getDataHome(core.getCoreDescriptor());
        } catch (IOException e) {
            throw new SolrException(ErrorCode.SERVER_ERROR, e);
        }
    }

    try {
        if (fs != null) {
            fs.close();
        }
    } catch (IOException e) {
        throw new SolrException(ErrorCode.SERVER_ERROR, e);
    }

    try {
        fs = FileSystem.newInstance(new Path(dataDir).toUri(), getConf());
    } catch (IOException e) {
        throw new SolrException(ErrorCode.SERVER_ERROR, e);
    }

    this.uhandler = uhandler;

    if (dataDir.equals(lastDataDir)) {
        if (debug) {
            log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", next id=" + id,
                    " this is a reopen... nothing else to do.");
        }

        versionInfo.reload();

        // on a normal reopen, we currently shouldn't have to do anything
        return;
    }
    lastDataDir = dataDir;
    tlogDir = new Path(dataDir, TLOG_NAME);

    try {
        if (!fs.exists(tlogDir)) {
            boolean success = fs.mkdirs(tlogDir);
            if (!success) {
                throw new RuntimeException("Could not create directory:" + tlogDir);
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    tlogFiles = getLogList(fs, tlogDir);
    id = getLastLogId() + 1; // add 1 since we will create a new log for the
                             // next update

    if (debug) {
        log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", existing tlogs=" + Arrays.asList(tlogFiles)
                + ", next id=" + id);
    }

    TransactionLog oldLog = null;
    for (String oldLogName : tlogFiles) {
        Path f = new Path(tlogDir, oldLogName);
        try {
            oldLog = new HdfsTransactionLog(fs, f, null, true);
            addOldLog(oldLog, false); // don't remove old logs on startup since more
                                      // than one may be uncapped.
        } catch (Exception e) {
            SolrException.log(log, "Failure to open existing log file (non fatal) " + f, e);
            try {
                fs.delete(f, false);
            } catch (IOException e1) {
                throw new RuntimeException(e1);
            }
        }
    }

    // Record first two logs (oldest first) at startup for potential tlog
    // recovery.
    // It's possible that at abnormal shutdown both "tlog" and "prevTlog" were
    // uncapped.
    for (TransactionLog ll : logs) {
        newestLogsOnStartup.addFirst(ll);
        if (newestLogsOnStartup.size() >= 2)
            break;
    }

    try {
        versionInfo = new VersionInfo(this, 256);
    } catch (SolrException e) {
        log.error("Unable to use updateLog: " + e.getMessage(), e);
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Unable to use updateLog: " + e.getMessage(), e);
    }

    // TODO: these startingVersions assume that we successfully recover from all
    // non-complete tlogs.
    HdfsUpdateLog.RecentUpdates startingUpdates = getRecentUpdates();
    try {
        startingVersions = startingUpdates.getVersions(numRecordsToKeep);
        startingOperation = startingUpdates.getLatestOperation();

        // populate recent deletes list (since we can't get that info from the
        // index)
        for (int i = startingUpdates.deleteList.size() - 1; i >= 0; i--) {
            DeleteUpdate du = startingUpdates.deleteList.get(i);
            oldDeletes.put(new BytesRef(du.id), new LogPtr(-1, du.version));
        }

        // populate recent deleteByQuery commands
        for (int i = startingUpdates.deleteByQueryList.size() - 1; i >= 0; i--) {
            Update update = startingUpdates.deleteByQueryList.get(i);
            List<Object> dbq = (List<Object>) update.log.lookup(update.pointer);
            long version = (Long) dbq.get(1);
            String q = (String) dbq.get(2);
            trackDeleteByQuery(q, version);
        }

    } finally {
        startingUpdates.close();
    }

}

From source file:org.apache.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

private void testCaskTransactionLogSync(int totalCount, int batchSize, byte versionNumber, boolean isComplete)
        throws Exception {
    List<co.cask.tephra.persist.TransactionEdit> edits = TransactionEditUtil.createRandomCaskEdits(totalCount);
    long timestamp = System.currentTimeMillis();
    Configuration configuration = getConfiguration();
    FileSystem fs = FileSystem.newInstance(FileSystem.getDefaultUri(configuration), configuration);
    SequenceFile.Writer writer = getSequenceFileWriter(configuration, fs, timestamp, versionNumber);
    AtomicLong logSequence = new AtomicLong();
    HDFSTransactionLog transactionLog = getHDFSTransactionLog(configuration, fs, timestamp);
    AbstractTransactionLog.CaskEntry entry;

    for (int i = 0; i < totalCount - batchSize; i += batchSize) {
        if (versionNumber > 1) {
            writeNumWrites(writer, batchSize);
        }/*from   w w w  . ja v  a  2s .  co  m*/
        for (int j = 0; j < batchSize; j++) {
            entry = new AbstractTransactionLog.CaskEntry(new LongWritable(logSequence.getAndIncrement()),
                    edits.get(j));
            writer.append(entry.getKey(), entry.getEdit());
        }
        writer.syncFs();
    }

    if (versionNumber > 1) {
        writeNumWrites(writer, batchSize);
    }

    for (int i = totalCount - batchSize; i < totalCount - 1; i++) {
        entry = new AbstractTransactionLog.CaskEntry(new LongWritable(logSequence.getAndIncrement()),
                edits.get(i));
        writer.append(entry.getKey(), entry.getEdit());
    }

    entry = new AbstractTransactionLog.CaskEntry(new LongWritable(logSequence.getAndIncrement()),
            edits.get(totalCount - 1));
    if (isComplete) {
        writer.append(entry.getKey(), entry.getEdit());
    } else {
        byte[] bytes = Longs.toByteArray(entry.getKey().get());
        writer.appendRaw(bytes, 0, bytes.length, new SequenceFile.ValueBytes() {
            @Override
            public void writeUncompressedBytes(DataOutputStream outStream) throws IOException {
                byte[] test = new byte[] { 0x2 };
                outStream.write(test, 0, 1);
            }

            @Override
            public void writeCompressedBytes(DataOutputStream outStream)
                    throws IllegalArgumentException, IOException {
                // no-op
            }

            @Override
            public int getSize() {
                // mimic size longer than the actual byte array size written, so we would reach EOF
                return 12;
            }
        });
    }
    writer.syncFs();
    Closeables.closeQuietly(writer);

    // now let's try to read this log
    TransactionLogReader reader = transactionLog.getReader();
    int syncedEdits = 0;
    while (reader.next() != null) {
        // testing reading the transaction edits
        syncedEdits++;
    }
    if (isComplete) {
        Assert.assertEquals(totalCount, syncedEdits);
    } else {
        Assert.assertEquals(totalCount - batchSize, syncedEdits);
    }
}

From source file:org.apache.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

private void testTransactionLogSync(int totalCount, int batchSize, byte versionNumber, boolean isComplete)
        throws Exception {
    List<TransactionEdit> edits = TransactionEditUtil.createRandomEdits(totalCount);
    long timestamp = System.currentTimeMillis();
    Configuration configuration = getConfiguration();
    configuration.set(TxConstants.TransactionLog.CFG_SLOW_APPEND_THRESHOLD, "0");
    FileSystem fs = FileSystem.newInstance(FileSystem.getDefaultUri(configuration), configuration);
    SequenceFile.Writer writer = getSequenceFileWriter(configuration, fs, timestamp, versionNumber);
    AtomicLong logSequence = new AtomicLong();
    HDFSTransactionLog transactionLog = getHDFSTransactionLog(configuration, fs, timestamp);
    AbstractTransactionLog.Entry entry;//from  w  w  w .j  a  v a 2 s .c  o m

    for (int i = 0; i < totalCount - batchSize; i += batchSize) {
        writeNumWrites(writer, batchSize);
        for (int j = 0; j < batchSize; j++) {
            entry = new AbstractTransactionLog.Entry(new LongWritable(logSequence.getAndIncrement()),
                    edits.get(j));
            writer.append(entry.getKey(), entry.getEdit());
        }
        writer.syncFs();
    }

    writeNumWrites(writer, batchSize);
    for (int i = totalCount - batchSize; i < totalCount - 1; i++) {
        entry = new AbstractTransactionLog.Entry(new LongWritable(logSequence.getAndIncrement()), edits.get(i));
        writer.append(entry.getKey(), entry.getEdit());
    }

    entry = new AbstractTransactionLog.Entry(new LongWritable(logSequence.getAndIncrement()),
            edits.get(totalCount - 1));
    if (isComplete) {
        writer.append(entry.getKey(), entry.getEdit());
    } else {
        byte[] bytes = Longs.toByteArray(entry.getKey().get());
        writer.appendRaw(bytes, 0, bytes.length, new SequenceFile.ValueBytes() {
            @Override
            public void writeUncompressedBytes(DataOutputStream outStream) throws IOException {
                byte[] test = new byte[] { 0x2 };
                outStream.write(test, 0, 1);
            }

            @Override
            public void writeCompressedBytes(DataOutputStream outStream)
                    throws IllegalArgumentException, IOException {
                // no-op
            }

            @Override
            public int getSize() {
                // mimic size longer than the actual byte array size written, so we would reach EOF
                return 12;
            }
        });
    }
    writer.syncFs();
    Closeables.closeQuietly(writer);

    // now let's try to read this log
    TransactionLogReader reader = transactionLog.getReader();
    int syncedEdits = 0;
    while (reader.next() != null) {
        // testing reading the transaction edits
        syncedEdits++;
    }
    if (isComplete) {
        Assert.assertEquals(totalCount, syncedEdits);
    } else {
        Assert.assertEquals(totalCount - batchSize, syncedEdits);
    }
}

From source file:org.deeplearning4j.utils.CommonUtils.java

License:Apache License

public static FileSystem openHdfsConnect() {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", SERVER_PATH);
    FileSystem fs = null;//w w  w .  j ava2 s .  c o  m
    try {
        fs = FileSystem.newInstance(new URI(SERVER_PATH), conf);
    } catch (IOException e) {
        e.printStackTrace();
    } catch (URISyntaxException e) {
        e.printStackTrace();
    }
    return fs;
}