List of usage examples for java.nio.channels FileChannel close
public final void close() throws IOException
From source file:org.apache.nifi.processors.standard.TailFile.java
private void processTailFile(final ProcessContext context, final ProcessSession session, final String tailFile) { // If user changes the file that is being tailed, we need to consume the already-rolled-over data according // to the Initial Start Position property boolean rolloverOccurred; TailFileObject tfo = states.get(tailFile); if (tfo.isTailFileChanged()) { rolloverOccurred = false;//from www . j a va 2 s . com final String recoverPosition = context.getProperty(START_POSITION).getValue(); if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) { recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(), tfo.getState().getTimestamp(), tfo.getState().getPosition()); } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) { cleanup(); tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer())); } else { final String filename = tailFile; final File file = new File(filename); try { final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ); getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file }); final Checksum checksum = new CRC32(); final long position = file.length(); final long timestamp = file.lastModified(); try (final InputStream fis = new FileInputStream(file); final CheckedInputStream in = new CheckedInputStream(fis, checksum)) { StreamUtils.copy(in, new NullOutputStream(), position); } fileChannel.position(position); cleanup(); tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(), checksum, tfo.getState().getBuffer())); } catch (final IOException ioe) { getLogger().error( "Attempted to position Reader at current position in file {} but failed to do so due to {}", new Object[] { file, ioe.toString() }, ioe); context.yield(); return; } } tfo.setTailFileChanged(false); } else { // Recover any data that may have rolled over since the last time that this processor ran. // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered" // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case, // use whatever checksum value is currently in the state. Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum(); if (expectedChecksumValue == null) { expectedChecksumValue = tfo.getState().getChecksum() == null ? null : tfo.getState().getChecksum().getValue(); } rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue, tfo.getState().getTimestamp(), tfo.getState().getPosition()); tfo.setExpectedRecoveryChecksum(null); } // initialize local variables from state object; this is done so that we can easily change the values throughout // the onTrigger method and then create a new state object after we finish processing the files. TailFileState state = tfo.getState(); File file = state.getFile(); FileChannel reader = state.getReader(); Checksum checksum = state.getChecksum(); if (checksum == null) { checksum = new CRC32(); } long position = state.getPosition(); long timestamp = state.getTimestamp(); long length = state.getLength(); // Create a reader if necessary. if (file == null || reader == null) { file = new File(tailFile); reader = createReader(file, position); if (reader == null) { context.yield(); return; } } final long startNanos = System.nanoTime(); // Check if file has rotated if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length()) || (timestamp < file.lastModified() && length >= file.length())) { // Since file has rotated, we close the reader, create a new one, and then reset our state. try { reader.close(); getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader }); } catch (final IOException ioe) { getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe }); } reader = createReader(file, 0L); position = 0L; checksum.reset(); } if (file.length() == position || !file.exists()) { // no data to consume so rather than continually running, yield to allow other processors to use the thread. getLogger().debug("No data to consume; created no FlowFiles"); tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); persistState(tfo, context); context.yield(); return; } // If there is data to consume, read as much as we can. final TailFileState currentState = state; final Checksum chksum = checksum; // data has been written to file. Stream it to a new FlowFile. FlowFile flowFile = session.create(); final FileChannel fileReader = reader; final AtomicLong positionHolder = new AtomicLong(position); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream rawOut) throws IOException { try (final OutputStream out = new BufferedOutputStream(rawOut)) { positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum)); } } }); // If there ended up being no data, just remove the FlowFile if (flowFile.getSize() == 0) { session.remove(flowFile); getLogger().debug("No data to consume; removed created FlowFile"); } else { // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension> final String tailFilename = file.getName(); final String baseName = StringUtils.substringBeforeLast(tailFilename, "."); final String flowFileName; if (baseName.length() < tailFilename.length()) { flowFileName = baseName + "." + position + "-" + positionHolder.get() + "." + StringUtils.substringAfterLast(tailFilename, "."); } else { flowFileName = baseName + "." + position + "-" + positionHolder.get(); } final Map<String, String> attributes = new HashMap<>(3); attributes.put(CoreAttributes.FILENAME.key(), flowFileName); attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain"); attributes.put("tailfile.original.path", tailFile); flowFile = session.putAllAttributes(flowFile, attributes); session.getProvenanceReporter().receive(flowFile, file.toURI().toString(), "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos)); session.transfer(flowFile, REL_SUCCESS); position = positionHolder.get(); // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state. // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the // rotated file a second time. timestamp = Math.max(state.getTimestamp(), file.lastModified()); length = file.length(); getLogger().debug("Created {} and routed to success", new Object[] { flowFile }); } // Create a new state object to represent our current position, timestamp, etc. tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); // We must commit session before persisting state in order to avoid data loss on restart session.commit(); persistState(tfo, context); }
From source file:com.concursive.connect.config.ApplicationPrefs.java
/** * Initializes preferences/*w w w. j a v a 2 s .c o m*/ * * @param context ServletContext */ public void initializePrefs(ServletContext context) { LOG.info("Initializing..."); // Load the application node name, if any try { Properties instanceProperties = new Properties(); instanceProperties.load(context.getResourceAsStream("/WEB-INF/instance.property")); node = instanceProperties.getProperty("node", DEFAULT_NODE); LOG.info("Node: " + node); } catch (Exception e) { LOG.info("Default Node: " + DEFAULT_NODE); node = DEFAULT_NODE; } // Determine the file library String fileLibrary = retrieveFileLibraryLocation(context); if (fileLibrary != null) { loadProperties(fileLibrary); this.add(FILE_LIBRARY_PATH, fileLibrary); configureDebug(); verifyKey(context, fileLibrary); configureConnectionPool(context); configureFreemarker(context); configureWebdavManager(context); configureSystemSettings(context); configureCache(context); if (isConfigured()) { if (ApplicationVersion.isOutOfDate(this)) { LOG.info("Upgrade triggered... obtaining lock to continue"); // Use a lock file to to start upgrading File upgradeLockFile = new File(fileLibrary + "upgrade.lock"); FileChannel fileChannel = null; FileLock fileLock = null; try { // Configure the file for locking fileChannel = new RandomAccessFile(upgradeLockFile, "rw").getChannel(); // Use fileChannel.lock which blocks until the lock is obtained fileLock = fileChannel.lock(); // Reload the prefs to make sure the upgrade isn't already complete loadProperties(fileLibrary); if (ApplicationVersion.isOutOfDate(this)) { // The application needs an update LOG.info("Installed version " + ApplicationVersion.getInstalledVersion(this) + " will be upgraded to " + ApplicationVersion.VERSION); performUpgrade(context); } } catch (Exception e) { LOG.error("initializePrefs-> performUpgrade", e); } finally { try { if (fileLock != null) { fileLock.release(); } if (fileChannel != null) { fileChannel.close(); } } catch (Exception eclose) { LOG.error("initializePrefs-> lock", eclose); } } } if (!ApplicationVersion.isOutOfDate(this)) { // Start the services now that everything is ready initializeServices(context); } } } configureDefaultBehavior(context); loadApplicationDictionaries(context); }
From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java
/** * Copy a file from s to d, s will be a file, d may be a file or directory * /*from ww w .j a v a2 s .c om*/ * @param s * @param d * @throws IOException */ private void copyFile(File s, File d) throws IOException { if (log.isDebugEnabled()) log.debug("Copying " + s.getAbsolutePath() + " to " + d.getAbsolutePath()); if (s.exists() && s.isFile()) { File t = d; // target if (d.isDirectory()) { if (!d.exists()) { if (!d.mkdirs()) { log.warn("Unable to create directory " + d.getPath()); } } t = new File(d, s.getName()); } else { File p = d.getParentFile(); if (!p.exists()) { if (!p.mkdirs()) { log.warn("couldn't create: " + p.getPath()); } } } FileChannel srcChannel = null; FileChannel dstChannel = null; try { // use nio // Create channel on the source srcChannel = new FileInputStream(s).getChannel(); // Create channel on the destination dstChannel = new FileOutputStream(t).getChannel(); // Copy file contents from source to destination doBlockedStream(srcChannel, dstChannel); // Close the channels } finally { try { srcChannel.close(); } catch (Exception ex) { log.debug(ex); } try { dstChannel.close(); } catch (Exception ex) { log.debug(ex); } } } }
From source file:com.marklogic.client.functionaltest.BasicJavaClientREST.java
/** * Copy Files from One location to Other * @param Source File/*from w w w .j a v a 2s. co m*/ * @param target File * @param Boolean Value * @throws FileNotFoundException */ public void copyWithChannels(File aSourceFile, File aTargetFile, boolean aAppend) { //log("Copying files with channels."); //ensureTargetDirectoryExists(aTargetFile.getParentFile()); FileChannel inChannel = null; FileChannel outChannel = null; FileInputStream inStream = null; FileOutputStream outStream = null; try { try { inStream = new FileInputStream(aSourceFile); inChannel = inStream.getChannel(); outStream = new FileOutputStream(aTargetFile, aAppend); outChannel = outStream.getChannel(); long bytesTransferred = 0; //defensive loop - there's usually only a single iteration : while (bytesTransferred < inChannel.size()) { bytesTransferred += inChannel.transferTo(0, inChannel.size(), outChannel); } } finally { //being defensive about closing all channels and streams if (inChannel != null) inChannel.close(); if (outChannel != null) outChannel.close(); if (inStream != null) inStream.close(); if (outStream != null) outStream.close(); } } catch (FileNotFoundException ex) { System.out.println("File not found: " + ex); } catch (IOException ex) { System.out.println(ex); } }
From source file:com.ezac.gliderlogs.FlightOverviewActivity.java
@SuppressLint("SimpleDateFormat") public void GliderLogToDB(String DBPath, String DB, String device) { // format date SimpleDateFormat TSD = new SimpleDateFormat("yyyyMMdd_kkss"); SimpleDateFormat DIR = new SimpleDateFormat("yyyy/MM_dd"); Date myDate = new Date(); String backupDBPath = device + "_" + TSD.format(myDate); String TS_DIR = DIR.format(myDate); // to internal sdcard File dir = new File(Environment.getExternalStorageDirectory() + "/Download/" + TS_DIR); if (!dir.exists() || !dir.isDirectory()) { dir.mkdir();/*from w w w. j av a 2 s . com*/ } File data = Environment.getDataDirectory(); // create a file channel object FileChannel src = null; FileChannel des = null; File currentDB = new File(data + "/data/" + DBPath + "/databases/", DB); File backupDB = new File(dir, backupDBPath); try { backupDB.delete(); src = new FileInputStream(currentDB).getChannel(); des = new FileOutputStream(backupDB).getChannel(); des.transferFrom(src, 0, src.size()); src.close(); des.close(); } catch (IOException e) { Log.d(TAG, e.toString()); e.printStackTrace(); } }
From source file:com.cisco.dvbu.ps.common.util.CommonUtils.java
/** * The copyFile method is used to copy files from a source to a destination folder. * // w w w .ja v a 2 s. com * @param fromFilePath * @param toFilePath * @throws ValidationException */ public static void copyFile(String fromFilePath, String toFilePath, boolean forceCopy) throws ValidationException { FileChannel srcChannel = null; FileChannel dstChannel = null; boolean fileExists = fileExists(toFilePath); if (forceCopy && fileExists) { removeFile(toFilePath); fileExists = fileExists(toFilePath); } if ((!fileExists) || (forceCopy && fileExists)) { try { // Create channel on the source srcChannel = new FileInputStream(fromFilePath).getChannel(); // Create channel on the destination dstChannel = new FileOutputStream(toFilePath).getChannel(); // Force the copy - added to overcome copy error dstChannel.force(true); // Copy file contents from source to destination dstChannel.transferFrom(srcChannel, 0, srcChannel.size()); } catch (IOException e) { String message = "Could not copy file " + fromFilePath + ".An error was encountered: " + e.toString(); throw new ValidationException(message, e); } finally { try { // Close the channels if (srcChannel != null) srcChannel.close(); if (dstChannel != null) dstChannel.close(); srcChannel = null; dstChannel = null; } catch (IOException e) { String message = "Could not copy file " + fromFilePath + ". Error encountered while closing source and destination channels: " + e.toString(); throw new ValidationException(message, e); } } } }
From source file:com.tandong.sa.aq.AbstractAQuery.java
/** * Create a temporary file on EXTERNAL storage (sdcard) that holds the cached content of the url. * Returns null if url is not cached, or the system cannot create such file (sdcard is absent, such as in emulator). * /*from w ww .ja va 2s .co m*/ * The returned file is accessable to all apps, therefore it is ideal for sharing content (such as photo) via the intent mechanism. * * <br> * <br> * Example Usage: * * <pre> * Intent intent = new Intent(Intent.ACTION_SEND); * intent.setType("image/jpeg"); * intent.putExtra(Intent.EXTRA_STREAM, Uri.fromFile(file)); * startActivityForResult(Intent.createChooser(intent, "Share via:"), 0); * </pre> * * <br> * The temp file will be deleted when AQUtility.cleanCacheAsync is invoked, or the file can be explicitly deleted after use. * * @param url The url of the desired cached content. * @param filename The desired file name, which might be used by other apps to describe the content, such as an email attachment. * @return temp file * */ public File makeSharedFile(String url, String filename) { File file = null; try { File cached = getCachedFile(url); if (cached != null) { File temp = AQUtility.getTempDir(); if (temp != null) { file = new File(temp, filename); file.createNewFile(); FileChannel ic = new FileInputStream(cached).getChannel(); FileChannel oc = new FileOutputStream(file).getChannel(); try { ic.transferTo(0, ic.size(), oc); } finally { if (ic != null) ic.close(); if (oc != null) oc.close(); } } } } catch (Exception e) { AQUtility.debug(e); } return file; }
From source file:au.org.theark.core.service.ArkCommonServiceImpl.java
/** * {@inheritDoc}/* w ww . j a v a 2 s . c o m*/ */ public void copyArkLargeFileAttachments(String sourceFilePath, String destinationFilePath) throws IOException { FileChannel source = null; FileChannel destination = null; try { source = new FileInputStream(new File(sourceFilePath)).getChannel(); destination = new FileOutputStream(new File(destinationFilePath)).getChannel(); // This fails with Map Failed exception on large files // destination.transferFrom(source, 0, source.size()); ByteBuffer buf = ByteBuffer.allocateDirect(DEFAULT_BUFFER_SIZE); while ((source.read(buf)) != -1) { buf.flip(); destination.write(buf); buf.clear(); } } finally { if (source != null) { source.close(); } if (destination != null) { destination.close(); } } }
From source file:com.koda.integ.hbase.storage.LRUStorageRecycler.java
/** * Format of a block in a file://from ww w. jav a 2s. c o m * 0..3 - total record size (-4) * 4..7 - size of a key in bytes (16 if use hash128) * 8 .. x - key data * x+1 ..x+1- IN_MEMORY flag ( 1- in memory, 0 - not) * x+2 ... block, serialized and compressed * * @param file the file * @throws IOException Signals that an I/O exception has occurred. * @throws NativeMemoryException the native memory exception */ private void processFile(RandomAccessFile file) throws IOException, NativeMemoryException { FileChannel fc = file.getChannel(); // make sure that file size < 2G LOG.info("File length=" + file.length()); MappedByteBuffer buffer = fc.map(MapMode.READ_ONLY, 0, file.length()); long fileLength = file.length(); long saved = 0; long startTime = System.currentTimeMillis(); while (buffer.position() < fileLength) { int oldOffset = buffer.position(); //LOG.info(oldOffset); // check IO throttle ioThrottle(startTime, oldOffset); NumericHistogram histogram = refCache.getObjectHistogram(); int blockSize = buffer.getInt(); int keySize = buffer.getInt(); //LOG.info("block size="+blockSize+" key size="+keySize); byte[] key = new byte[keySize]; // STATISTICS totalScannedBytes.addAndGet(blockSize + 4); // read key // WE HAVE TO USE byte[] keys long data = refCache.getEvictionData(key); if (data < 0) { // not found in in_memory cache buffer.position(oldOffset + blockSize + 4); continue; } double quantValue = histogram.quantile(evictionThreshold); if (data > quantValue) { // save block saved = blockSize + 4; buffer.position(oldOffset); StorageHandle handle = storage.storeData(buffer); refCache.put(key, handle); } else { // STATISTICS totalPurgedBytes.addAndGet(blockSize + 4); } if (oldOffset + blockSize + 4 < fileLength) { // Advance pointer buffer.position(oldOffset + blockSize + 4); } else { break; } // Check panic. W/o adaptive processing support - killing file entirely // is the only option to keep up with the load. if (storage.getCurrentStorageSize() >= panicLevelWatermark * storage.getMaxStorageSize()) { LOG.warn("[PANIC DELETE]. Storage size exceeded " + panicLevelWatermark + " mark."); // STATISTICS totalPanicEvents.incrementAndGet(); } } // Unmap mapped ByteBuffer fc.close(); FileUtils.unmapMmaped(buffer); ; LOG.info("Stats: total length=" + fileLength + "; purged data=" + (fileLength - saved) + " with eviction threshold=" + evictionThreshold + "; purged ratio=[" + (((double) (fileLength - saved)) / fileLength) + "]"); }
From source file:com.shinymayhem.radiopresets.ServiceRadioPlayer.java
public void copyLog() { //String path = Environment.getExternalStorageDirectory().getAbsolutePath(); String path = getExternalFilesDir(null).getAbsolutePath(); File src = getFileStreamPath(ActivityLogger.LOG_FILENAME); File dst = new File(path + File.separator + ActivityLogger.LOG_FILENAME); try {//from w w w . ja v a2 s . c om if (dst.createNewFile()) { if (LOCAL_LOGV) log("sd file created", "v"); } else { if (LOCAL_LOGV) log("sd file exists?", "v"); } } catch (IOException e2) { log("sd file error", "e"); Toast.makeText(this, "sd file error", Toast.LENGTH_SHORT).show(); e2.printStackTrace(); } FileChannel in = null; FileChannel out = null; try { in = new FileInputStream(src).getChannel(); } catch (FileNotFoundException e1) { log("in file not found", "e"); Toast.makeText(this, "in file not found", Toast.LENGTH_SHORT).show(); e1.printStackTrace(); } try { out = new FileOutputStream(dst).getChannel(); } catch (FileNotFoundException e1) { log("out file not found", "e"); Toast.makeText(this, "out file not found", Toast.LENGTH_SHORT).show(); e1.printStackTrace(); } try { in.transferTo(0, in.size(), out); if (LOCAL_LOGD) log("log file copied to " + path + File.separator + ActivityLogger.LOG_FILENAME, "d"); if (in != null) { in.close(); } if (out != null) { out.close(); } clearLog(); } catch (IOException e) { log("error copying log file", "e"); Toast.makeText(this, "error copying log file", Toast.LENGTH_SHORT).show(); if (LOCAL_LOGV) e.printStackTrace(); } finally { } }