List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:com.cloudera.hoop.client.fs.TestHoopFileSystem.java
License:Open Source License
private void testSetReplication() throws Exception { FileSystem fs = FileSystem.get(getHadoopConf()); Path path = new Path(getHadoopTestDir(), "foo.txt"); OutputStream os = fs.create(path); os.write(1);// w w w.j a v a 2s . co m os.close(); fs.close(); fs.setReplication(path, (short) 2); Configuration conf = new Configuration(); conf.set("fs.http.impl", HoopFileSystem.class.getName()); fs = FileSystem.get(getJettyURL().toURI(), conf); fs.setReplication(path, (short) 1); fs.close(); fs = FileSystem.get(getHadoopConf()); FileStatus status1 = fs.getFileStatus(path); fs.close(); Assert.assertEquals(status1.getReplication(), (short) 1); }
From source file:com.cloudera.lib.service.hadoop.HadoopService.java
License:Open Source License
protected void closeFileSystem(FileSystem fs) throws IOException { fs.close(); }
From source file:com.cloudera.MultiplexedTest.java
License:Apache License
public static void main(String[] args) throws Exception { options = new Options(); final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.get(new URI(options.uri), conf); long nanoStart = System.nanoTime(); BaseThread threads[] = new BaseThread[options.nThreads]; if (options.isWriteOperation) { for (int i = 0; i < options.nThreads; i++) { threads[i] = WriterThread.create(i, fs); }//from w ww. j a v a 2 s . co m } else { for (int i = 0; i < options.nThreads; i++) { threads[i] = ReaderThread.create(i, fs); } } for (int i = 0; i < options.nThreads; i++) { threads[i].start(); } for (int i = 0; i < options.nThreads; i++) { threads[i].join(); } for (int i = 0; i < options.nThreads; i++) { Throwable t = threads[i].getException(); if (t != null) { System.err.println("there were exceptions. Aborting."); System.exit(1); } } long nanoEnd = System.nanoTime(); fs.close(); long totalIo = options.nThreads; totalIo *= options.nBytesPerThread; float nanoDiff = nanoEnd - nanoStart; float seconds = nanoDiff / 1000000000; float rate = totalIo / seconds; System.out.println(String.format("Using %d threads, average rate was %s/s\n" + "Total time was %f seconds", options.nThreads, prettyPrintByteSize(rate), seconds)); }
From source file:com.cotdp.hadoop.ZipFileTest.java
License:Apache License
/** * Prepare the FileSystem and copy test files */// w w w. j ava 2 s. com @Override protected void setUp() throws Exception { // One-off initialisation if (isInitialised == false) { LOG.info("setUp() called, preparing FileSystem for tests"); // FileSystem fs = FileSystem.get(conf); // Delete our working directory if it already exists LOG.info(" ... Deleting " + workingPath.toString()); fs.delete(workingPath, true); // Copy the test files LOG.info(" ... Copying files"); fs.mkdirs(inputPath); copyFile(fs, "zip-01.zip"); copyFile(fs, "zip-02.zip"); copyFile(fs, "zip-03.zip"); copyFile(fs, "zip-04.dat"); copyFile(fs, "random.dat"); copyFile(fs, "encrypted.zip"); copyFile(fs, "corrupt.zip"); fs.close(); // isInitialised = true; } // Reset ZipFileInputFormat leniency (false) ZipFileInputFormat.setLenient(false); }
From source file:com.dalabs.droop.util.password.CryptoFileLoader.java
License:Apache License
@Override public String loadPassword(String p, Configuration configuration) throws IOException { LOG.debug("Fetching password from specified path: " + p); Path path = new Path(p); FileSystem fs = path.getFileSystem(configuration); byte[] encrypted; try {/*w ww .j a v a2 s . co m*/ verifyPath(fs, path); encrypted = readBytes(fs, path); } finally { fs.close(); } String passPhrase = configuration.get(PROPERTY_CRYPTO_PASSPHRASE); if (passPhrase == null) { throw new IOException("Passphrase is missing in property " + PROPERTY_CRYPTO_PASSPHRASE); } String alg = configuration.get(PROPERTY_CRYPTO_ALG, DEFAULT_ALG); String algOnly = alg.split("/")[0]; String salt = configuration.get(PROPERTY_CRYPTO_SALT, DEFAULT_SALT); int iterations = configuration.getInt(PROPERTY_CRYPTO_ITERATIONS, DEFAULT_ITERATIONS); int keyLen = configuration.getInt(PROPERTY_CRYPTO_KEY_LEN, DEFAULT_KEY_LEN); SecretKeyFactory factory = null; try { factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1"); } catch (NoSuchAlgorithmException e) { throw new IOException("Can't load SecretKeyFactory", e); } SecretKeySpec key = null; try { key = new SecretKeySpec(factory .generateSecret(new PBEKeySpec(passPhrase.toCharArray(), salt.getBytes(), iterations, keyLen)) .getEncoded(), algOnly); } catch (Exception e) { throw new IOException("Can't generate secret key", e); } Cipher crypto = null; try { crypto = Cipher.getInstance(alg); } catch (Exception e) { throw new IOException("Can't initialize the decryptor", e); } byte[] decryptedBytes; try { crypto.init(Cipher.DECRYPT_MODE, key); decryptedBytes = crypto.doFinal(encrypted); } catch (Exception e) { throw new IOException("Can't decrypt the password", e); } return new String(decryptedBytes); }
From source file:com.datatorrent.flume.source.HdfsTestSource.java
License:Open Source License
private List<String> findFiles() throws IOException { List<String> files = Lists.newArrayList(); Path directoryPath = new Path(directory); FileSystem lfs = FileSystem.newInstance(directoryPath.toUri(), configuration); try {/* w w w .j a v a 2 s . co m*/ logger.debug("checking for new files in {}", directoryPath); RemoteIterator<LocatedFileStatus> statuses = lfs.listFiles(directoryPath, true); for (; statuses.hasNext();) { FileStatus status = statuses.next(); Path path = status.getPath(); String filePathStr = path.toString(); if (!filePathStr.endsWith(".gz")) { continue; } logger.debug("new file {}", filePathStr); files.add(path.toString()); } } catch (FileNotFoundException e) { logger.warn("Failed to list directory {}", directoryPath, e); throw new RuntimeException(e); } finally { lfs.close(); } return files; }
From source file:com.datatorrent.lib.bucket.ExpirableHdfsBucketStore.java
License:Open Source License
@Override public void deleteExpiredBuckets(long time) throws IOException { Iterator<Long> iterator = windowToBuckets.keySet().iterator(); for (; iterator.hasNext();) { long window = iterator.next(); long timestamp = windowToTimestamp.get(window); if (timestamp < time) { Collection<Integer> indices = windowToBuckets.get(window); synchronized (indices) { if (indices.size() > 0) { Path dataFilePath = new Path(bucketRoot + PATH_SEPARATOR + window); FileSystem fs = FileSystem.newInstance(dataFilePath.toUri(), configuration); try { if (fs.exists(dataFilePath)) { logger.debug("start delete {}", window); fs.delete(dataFilePath, true); logger.debug("end delete {}", window); }//from ww w. j av a 2s . com for (int bucketIdx : indices) { Map<Long, Long> offsetMap = bucketPositions[bucketIdx]; if (offsetMap != null) { synchronized (offsetMap) { offsetMap.remove(window); } } } } finally { fs.close(); } } windowToTimestamp.remove(window); iterator.remove(); } } } }
From source file:com.datatorrent.lib.bucket.HdfsBucketStore.java
License:Open Source License
/** * {@inheritDoc}//from w w w.j a va 2 s. c o m */ @Override public void storeBucketData(long window, long timestamp, Map<Integer, Map<Object, T>> data) throws IOException { Path dataFilePath = new Path(bucketRoot + PATH_SEPARATOR + window); FileSystem fs = FileSystem.newInstance(dataFilePath.toUri(), configuration); FSDataOutputStream dataStream = fs.create(dataFilePath); Output output = new Output(dataStream); try { long offset = 0; for (int bucketIdx : data.keySet()) { Map<Object, T> bucketData = data.get(bucketIdx); if (eventKeyClass == null) { Map.Entry<Object, T> eventEntry = bucketData.entrySet().iterator().next(); eventKeyClass = eventEntry.getKey().getClass(); if (!writeEventKeysOnly) { @SuppressWarnings("unchecked") Class<T> lEventClass = (Class<T>) eventEntry.getValue().getClass(); eventClass = lEventClass; } } //Write the size of data and then data dataStream.writeInt(bucketData.size()); for (Map.Entry<Object, T> entry : bucketData.entrySet()) { writeSerde.writeObject(output, entry.getKey()); if (!writeEventKeysOnly) { int posLength = output.position(); output.writeInt(0); //temporary place holder writeSerde.writeObject(output, entry.getValue()); int posValue = output.position(); int valueLength = posValue - posLength - 4; output.setPosition(posLength); output.writeInt(valueLength); output.setPosition(posValue); } } output.flush(); if (bucketPositions[bucketIdx] == null) { bucketPositions[bucketIdx] = Maps.newHashMap(); } windowToBuckets.put(window, bucketIdx); windowToTimestamp.put(window, timestamp); synchronized (bucketPositions[bucketIdx]) { bucketPositions[bucketIdx].put(window, offset); } offset = dataStream.getPos(); } } finally { output.close(); dataStream.close(); fs.close(); } }
From source file:com.datatorrent.lib.bucket.HdfsBucketStore.java
License:Open Source License
/** * {@inheritDoc}/*from w w w. j a v a2 s .c o m*/ */ @Override public void deleteBucket(int bucketIdx) throws IOException { Map<Long, Long> offsetMap = bucketPositions[bucketIdx]; if (offsetMap != null) { for (Long window : offsetMap.keySet()) { Collection<Integer> indices = windowToBuckets.get(window); synchronized (indices) { boolean elementRemoved = indices.remove(bucketIdx); if (indices.isEmpty() && elementRemoved) { Path dataFilePath = new Path(bucketRoot + PATH_SEPARATOR + window); FileSystem fs = FileSystem.newInstance(dataFilePath.toUri(), configuration); try { if (fs.exists(dataFilePath)) { logger.debug("start delete {}", window); fs.delete(dataFilePath, true); logger.debug("end delete {}", window); } windowToBuckets.removeAll(window); windowToTimestamp.remove(window); } finally { fs.close(); } } } } } bucketPositions[bucketIdx] = null; }
From source file:com.datatorrent.lib.parser.XmlParser.java
License:Apache License
@Override public void setup(com.datatorrent.api.Context.OperatorContext context) { try {/*from w ww.j a va 2 s .co m*/ JAXBContext ctx = JAXBContext.newInstance(getClazz()); unmarshaller = ctx.createUnmarshaller(); if (schemaXSDFile != null) { Path filePath = new Path(schemaXSDFile); Configuration configuration = new Configuration(); FileSystem fs = FileSystem.newInstance(filePath.toUri(), configuration); FSDataInputStream inputStream = fs.open(filePath); SchemaFactory factory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI); Schema schema = factory.newSchema(new StreamSource(inputStream)); unmarshaller.setSchema(schema); validator = schema.newValidator(); fs.close(); } } catch (SAXException e) { DTThrowable.wrapIfChecked(e); } catch (JAXBException e) { DTThrowable.wrapIfChecked(e); } catch (IOException e) { DTThrowable.wrapIfChecked(e); } }