List of usage examples for java.nio.channels FileChannel write
public final long write(ByteBuffer[] srcs) throws IOException
From source file:sos.scheduler.editor.app.WebDavDialogListener.java
public void saveProfile(boolean savePassword) { try {/* www . jav a 2 s.com*/ java.util.Properties profile = getCurrProfile(); String filename = configFile; String profilename = currProfileName; byte[] b = getBytesFromFile(new File(filename)); String s = new String(b); //System.out.println(s); int pos1 = s.indexOf("[" + PREFIX + profilename + "]"); int pos2 = s.indexOf("[", pos1 + 1); if (pos1 == -1) { //System.out.println("profile nicht gefunden"); pos1 = s.length(); pos2 = -1; } if (pos2 == -1) pos2 = s.length(); String s2 = s.substring(0, pos1); s2 = s2 + "[" + PREFIX + profilename + "]\n\n"; s2 = s2 + "url=" + sosString.parseToString(profile.get("url")) + "\n"; s2 = s2 + "user=" + sosString.parseToString(profile.get("user")) + "\n"; try { if (savePassword && sosString.parseToString(profile.get("password")).length() > 0) { String pass = String.valueOf(SOSUniqueID.get()); Options.setProperty("profile.timestamp." + profilename, pass); Options.saveProperties(); if (pass.length() > 8) { pass = pass.substring(pass.length() - 8); } String encrypt = SOSCrypt.encrypt(pass, sosString.parseToString(profile.get("password"))); s2 = s2 + "password=" + encrypt + "\n"; profile.put("password", encrypt); this.password = encrypt; getProfiles().put(profilename, profile); } } catch (Exception e) { new ErrorLog("error in " + sos.util.SOSClassUtil.getMethodName() + " ; ..could not encrypt.", e); throw e; } s2 = s2 + "localdirectory=" + sosString.parseToString(profile.get("localdirectory")) + "\n"; s2 = s2 + "save_password=" + sosString.parseToString(profile.get("save_password")) + "\n"; s2 = s2 + "protocol=" + sosString.parseToString(profile.get("protocol")) + "\n"; s2 = s2 + "use_proxy=" + sosString.parseToString(profile.get("use_proxy")) + "\n"; s2 = s2 + "proxy_server=" + sosString.parseToString(profile.get("proxy_server")) + "\n"; s2 = s2 + "proxy_port=" + sosString.parseToString(profile.get("proxy_port")) + "\n"; s2 = s2 + "\n\n"; s2 = s2 + s.substring(pos2); // System.out.println("+++++++++++++++++++++++++++++++++++"); // System.out.println(s2); // System.out.println("+++++++++++++++++++++++++++++++++++"); java.nio.ByteBuffer bbuf = java.nio.ByteBuffer.wrap(s2.getBytes()); java.io.File file = new java.io.File(filename); boolean append = false; java.nio.channels.FileChannel wChannel = new java.io.FileOutputStream(file, append).getChannel(); wChannel.write(bbuf); wChannel.close(); /*} catch (java.io.IOException e) { hasError = true; MainWindow.message("could not save configurations File: " + configFile + ": cause:\n" + e.getMessage(), SWT.ICON_WARNING); */ } catch (Exception e) { try { new ErrorLog("error in " + sos.util.SOSClassUtil.getMethodName() + " ; could not save configurations File: " + configFile, e); } catch (Exception ee) { //tu nichts } hasError = true; MainWindow.message("could not save configurations File: " + configFile + ": cause:\n" + e.getMessage(), SWT.ICON_WARNING); } finally { cboConnectname.setItems(getProfileNames()); cboConnectname.setText(currProfileName); txtURL.setText(currProfile.getProperty("url")); } }
From source file:org.apache.htrace.impl.HTracedSpanReceiver.java
void appendToDroppedSpansLog(String text) throws IOException { // Is the dropped spans log is disabled? if (conf.droppedSpansLogPath.isEmpty() || (conf.droppedSpansLogMaxSize == 0)) { return;//ww w .j av a 2 s.c o m } FileLock lock = null; String msg = ISO_DATE_FORMAT.format(new Date()) + ": " + text; ByteBuffer bb = ByteBuffer.wrap(msg.getBytes(StandardCharsets.UTF_8)); // FileChannel locking corresponds to advisory locking on UNIX. It will // protect multiple processes from attempting to write to the same dropped // spans log at once. However, within a single process, we need this // synchronized block to ensure that multiple HTracedSpanReceiver objects // don't try to write to the same log at once. (It is unusal to configure // multiple HTracedSpanReceiver objects, but possible.) synchronized (HTracedSpanReceiver.class) { FileChannel channel = FileChannel.open(Paths.get(conf.droppedSpansLogPath), APPEND, CREATE, WRITE); try { lock = channel.lock(); long size = channel.size(); if (size > conf.droppedSpansLogMaxSize) { throw new IOException("Dropped spans log " + conf.droppedSpansLogPath + " is already " + size + " bytes; will not add to it."); } else if ((size == 0) && (DROPPED_SPANS_FILE_PERMS != null)) { // Set the permissions of the dropped spans file so that other // processes can write to it. Files.setPosixFilePermissions(Paths.get(conf.droppedSpansLogPath), DROPPED_SPANS_FILE_PERMS); } channel.write(bb); } finally { try { if (lock != null) { lock.release(); } } finally { channel.close(); } } } }
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager.java
private void writeByteBufferToFile(File target, ByteBuffer data) throws IOException { FileChannel fileChannel = new FileOutputStream(target, false).getChannel(); fileChannel.write(data); fileChannel.close();/*w w w . j a v a2s .c om*/ }
From source file:com.colorchen.qbase.utils.FileUtil.java
/** * ?/*from w w w . ja v a 2 s.c o m*/ * * @param outFile * @param files */ public static void mergeFiles(Context context, File outFile, List<File> files) { FileChannel outChannel = null; try { outChannel = new FileOutputStream(outFile).getChannel(); for (File f : files) { FileChannel fc = new FileInputStream(f).getChannel(); ByteBuffer bb = ByteBuffer.allocate(BUFSIZE); while (fc.read(bb) != -1) { bb.flip(); outChannel.write(bb); bb.clear(); } fc.close(); } Log.d(TAG, "?"); } catch (IOException ioe) { ioe.printStackTrace(); } finally { try { if (outChannel != null) { outChannel.close(); } } catch (IOException ignore) { } } }
From source file:com.rapidminer.tools.Tools.java
public static void copy(File srcPath, File dstPath) throws IOException { if (srcPath.isDirectory()) { if (!dstPath.exists()) { boolean result = dstPath.mkdir(); if (!result) { throw new IOException("Unable to create directoy: " + dstPath); }// w w w . j a v a 2s . c om } String[] files = srcPath.list(); for (String file : files) { copy(new File(srcPath, file), new File(dstPath, file)); } } else { if (srcPath.exists()) { FileChannel in = null; FileChannel out = null; try (FileInputStream fis = new FileInputStream(srcPath); FileOutputStream fos = new FileOutputStream(dstPath)) { in = fis.getChannel(); out = fos.getChannel(); long size = in.size(); MappedByteBuffer buf = in.map(FileChannel.MapMode.READ_ONLY, 0, size); out.write(buf); } finally { if (in != null) { in.close(); } if (out != null) { out.close(); } } } } }
From source file:com.servoy.j2db.util.Utils.java
public static boolean writeTXTFile(File f, String content, Charset charset) { if (f != null) { FileOutputStream fos = null; try {/* w w w .j a v a 2s . c o m*/ fos = new FileOutputStream(f); FileChannel fc = fos.getChannel(); ByteBuffer bb = charset.encode(content); fc.write(bb); bb.rewind(); return true; } catch (Exception e) { Debug.error("Error writing txt file: " + f, e); //$NON-NLS-1$ } finally { closeOutputStream(fos); } } return false; }
From source file:org.carbondata.core.util.CarbonUtil.java
public static void writeLevelCardinalityFile(String loadFolderLoc, String tableName, int[] dimCardinality) throws KettleException { String levelCardinalityFilePath = loadFolderLoc + File.separator + CarbonCommonConstants.LEVEL_METADATA_FILE + tableName + CarbonCommonConstants.CARBON_METADATA_EXTENSION; FileOutputStream fileOutputStream = null; FileChannel channel = null; try {/*from www . j a v a 2 s .c om*/ int dimCardinalityArrLength = dimCardinality.length; // first four bytes for writing the length of array, remaining for array data ByteBuffer buffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE + dimCardinalityArrLength * CarbonCommonConstants.INT_SIZE_IN_BYTE); fileOutputStream = new FileOutputStream(levelCardinalityFilePath); channel = fileOutputStream.getChannel(); buffer.putInt(dimCardinalityArrLength); for (int i = 0; i < dimCardinalityArrLength; i++) { buffer.putInt(dimCardinality[i]); } buffer.flip(); channel.write(buffer); buffer.clear(); LOGGER.info("Level cardinality file written to : " + levelCardinalityFilePath); } catch (IOException e) { LOGGER.error( "Error while writing level cardinality file : " + levelCardinalityFilePath + e.getMessage()); throw new KettleException("Not able to write level cardinality file", e); } finally { closeStreams(channel, fileOutputStream); } }
From source file:com.healthmarketscience.jackcess.Database.java
/** * Copies the given InputStream to the given channel using the most * efficient means possible./*w w w. j a va2 s . c o m*/ */ private static void transferFrom(FileChannel channel, InputStream in) throws IOException { ReadableByteChannel readChannel = Channels.newChannel(in); if (!BROKEN_NIO) { // sane implementation channel.transferFrom(readChannel, 0, MAX_EMPTYDB_SIZE); } else { // do things the hard way for broken vms ByteBuffer bb = ByteBuffer.allocate(8096); while (readChannel.read(bb) >= 0) { bb.flip(); channel.write(bb); bb.clear(); } } }
From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java
private void commitBucketTableToDisk() throws BucketTableManagerException { File currentFile = null;/*from www .j a va 2 s . co m*/ FileChannel fileChannel = null; ByteBuffer headerBuffer = null; try { logger.warn("Start commit bucket table..."); if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty()) throw new BucketTableManagerException("commit requested while there is no requested checkpoint"); currentFile = getLatestCommitedFile(); File nextFile = getNextFile(getLatestCommitedFile()); fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel(); // Write header with empty checkpoint headerBuffer = ByteBuffer.allocate(HEADERSIZE); fileChannel.position(0L); headerBuffer.putInt(MAGICSTART); headerBuffer.putLong(mapSize); // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle lastCheckPoint.putNeedlePointerToBuffer(headerBuffer); headerBuffer.putInt(MAGICEND); headerBuffer.flip(); // truncate buffer fileChannel.write(headerBuffer); // Now writes buffers for (int i = 0; i < nbBuffers; i++) { bucketTable.prepareBufferForWriting(i); int written = fileChannel.write(bucketTable.getBuffer(i)); if (written < bucketTable.getBuffer(i).limit()) throw new BucketTableManagerException("Incomplete write for bucket table file " + nextFile.getName() + ", expected " + mapSize + HEADERSIZE); // else // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ; try { Thread.sleep(10); } catch (Throwable th) { } } // Writes second magic number ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE); buffer.rewind(); buffer.limit(INTSIZE); buffer.putInt(MAGICSTART); buffer.rewind(); fileChannel.write(buffer); // Write Needle Log Info Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator(); while (it.hasNext()) { buffer.rewind(); buffer.limit(NeedleLogInfo.INFOSIZE); NeedleLogInfo nli = it.next(); nli.putNeedleLogInfo(buffer, true); int written = fileChannel.write(buffer); if (written < NeedleLogInfo.INFOSIZE) throw new BucketTableManagerException( "Incomplete write for bucket table file, writing log infos " + nextFile.getName()); } // Writes checkpoint headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE); headerBuffer.rewind(); headerBuffer.limit(NeedlePointer.POINTERSIZE); // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ; bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write headerBuffer.rewind(); // fileChannel.force(false) ; if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) { throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName()); } fileChannel.force(true); fileChannel.close(); if (!nextFile.renameTo(getCommittedFile(nextFile))) throw new BucketTableManagerException( "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName()); logger.warn("Committed bucket table."); } catch (IOException ie) { throw new BucketTableManagerException("Failed writting bucket table", ie); } finally { headerBuffer = null; //May ease garbage collection if (fileChannel != null) { try { fileChannel.close(); } catch (Exception ex) { throw new BucketTableManagerException("Failed to close file channel", ex); } } } try { if (currentFile != null) { if (!currentFile.delete()) logger.error("Failed deleting previous bucket table" + currentFile.getName()); } } catch (Throwable th) { logger.error("Failed deleting previous bucket table" + currentFile.getName(), th); } }
From source file:com.clustercontrol.agent.job.PublicKeyThread.java
/** * ?Authorized_key????<BR>/*w w w. j a va 2 s . c om*/ * * @param publicKey * @return */ private synchronized boolean addKey(String publicKey) { m_log.debug("add key start"); if (SKIP_KEYFILE_UPDATE) { m_log.info("skipped appending publicKey"); return true; } //??? String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH); m_log.debug("faileName" + fileName); if (fileName == null || fileName.length() == 0) return false; //File? File fi = new File(fileName); RandomAccessFile randomAccessFile = null; FileChannel channel = null; FileLock lock = null; boolean add = false; try { //RandomAccessFile? randomAccessFile = new RandomAccessFile(fi, "rw"); //FileChannel? channel = randomAccessFile.getChannel(); // for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) { if (null != (lock = channel.tryLock())) { break; } m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT) + " : " + fileName + "]"); Thread.sleep(FILELOCK_WAIT); } if (null == lock) { m_log.warn("file locking timeout."); return false; } // (?) synchronized (authKeyLock) { //?? channel.position(channel.size()); //? String writeData = "\n" + publicKey; // m_log.debug("add key : " + writeData); //????? ByteBuffer buffer = ByteBuffer.allocate(512); //??? buffer.clear(); buffer.put(writeData.getBytes()); buffer.flip(); channel.write(buffer); } add = true; } catch (Exception e) { m_log.error(e); } finally { try { if (channel != null) { channel.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } if (lock != null) { // lock.release(); } } catch (Exception e) { } } return add; }