List of usage examples for java.nio ByteBuffer flip
public final Buffer flip()
From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java
@Override public void getPatch(PatchDocument patchDocument, NodeChecksums nodeChecksums, ReadableByteChannel inChannel) throws IOException { ByteBuffer buffer = ByteBuffer.allocate(1024 * 100); inChannel.read(buffer);// w w w .ja va 2s . com buffer.flip(); updatePatchDocument(patchDocument, nodeChecksums, buffer); }
From source file:com.clustercontrol.agent.job.PublicKeyThread.java
/** * ?Authorized_key????<BR>//ww w . ja v a2 s . com * * @param publicKey * @return */ private synchronized boolean addKey(String publicKey) { m_log.debug("add key start"); if (SKIP_KEYFILE_UPDATE) { m_log.info("skipped appending publicKey"); return true; } //??? String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH); m_log.debug("faileName" + fileName); if (fileName == null || fileName.length() == 0) return false; //File? File fi = new File(fileName); RandomAccessFile randomAccessFile = null; FileChannel channel = null; FileLock lock = null; boolean add = false; try { //RandomAccessFile? randomAccessFile = new RandomAccessFile(fi, "rw"); //FileChannel? channel = randomAccessFile.getChannel(); // for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) { if (null != (lock = channel.tryLock())) { break; } m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT) + " : " + fileName + "]"); Thread.sleep(FILELOCK_WAIT); } if (null == lock) { m_log.warn("file locking timeout."); return false; } // (?) synchronized (authKeyLock) { //?? channel.position(channel.size()); //? String writeData = "\n" + publicKey; // m_log.debug("add key : " + writeData); //????? ByteBuffer buffer = ByteBuffer.allocate(512); //??? buffer.clear(); buffer.put(writeData.getBytes()); buffer.flip(); channel.write(buffer); } add = true; } catch (Exception e) { m_log.error(e); } finally { try { if (channel != null) { channel.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } if (lock != null) { // lock.release(); } } catch (Exception e) { } } return add; }
From source file:com.openteach.diamond.network.waverider.session.DefaultSession.java
@Override public void onRead() throws IOException, InterruptedException { logger.debug("onRead"); ByteBuffer buffer = ByteBuffer.allocate(NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE); int ret = 0;//from w w w.j av a 2 s.c o m do { ret = channel.read(buffer); } while (ret > 0); if (ret == -1) { throw new IOException("EOF"); } buffer.flip(); if (buffer.hasRemaining()) { inputBuffer.put(buffer); synchronized (waitMoreDataLock) { waitMoreDataLock.notifyAll(); } } //logger.info("Session is onRead, read " + buffer.remaining() + " bytes"); }
From source file:org.alfresco.contentstore.ContentStoreTest.java
private PatchDocument getPatch(Node node, String content) throws IOException { PatchDocument patchDocument = new PatchDocumentImpl(); ByteBuffer data = ByteBuffer.allocate(1024 * 10); data.put(content.getBytes());/*w w w.ja v a 2 s . c o m*/ data.flip(); NodeChecksums checksums = checksumService.getChecksums(node.getNodeId(), node.getNodeVersion()); patchService.updatePatchDocument(patchDocument, checksums, data); return patchDocument; }
From source file:com.openteach.diamond.network.waverider.network.Packet.java
/** * ??Packet, ??/*from ww w . j av a 2 s .c o m*/ * @param inputBuffer * @return * @throws IOException, InterruptedException */ public static Packet parse(BlockingQueue<ByteBuffer> inputBuffer, NetWorkEndPoint endPoint, SocketChannel channel) throws IOException, InterruptedException { // Buffer for packet header byte[] tmpBuf = new byte[NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE]; ByteBuffer header = ByteBuffer.allocate(Packet.getHeaderSize()); ByteBuffer currentBuffer = null; int rest = 0; boolean isRemove = false; // ? while (true) { while ((currentBuffer = inputBuffer.peek()) == null) { if (!endPoint.notifyRead(channel)) { throw new IOException("Socket closed by other thread"); } // ? //endPoint.waitMoreData(5); // FIXME 2ms //Thread.sleep(1); Thread.yield(); } isRemove = false; rest = header.capacity() - header.position(); if (currentBuffer.remaining() >= rest) { if (currentBuffer.remaining() == rest) { isRemove = true; } currentBuffer.get(tmpBuf, 0, rest); header.put(tmpBuf, 0, rest); if (isRemove) { inputBuffer.remove(); } break; } else { header.put(currentBuffer); inputBuffer.remove(); } } header.flip(); // , ??? // ? Integer size = header.getInt(Packet.getLengthPosition()); // For test /*if(size < 0 || size > 100000) { logger.info("Error"); }*/ //logger.debug(new StringBuilder("Try to allocate ").append(size).append(" bytes memory")); ByteBuffer buffer = ByteBuffer.allocate(size); buffer.put(header); header.clear(); // ? while (true) { while ((currentBuffer = inputBuffer.peek()) == null) { endPoint.notifyRead(channel); Thread.sleep(1000); } isRemove = false; rest = buffer.capacity() - buffer.position(); if (currentBuffer.remaining() >= rest) { if (currentBuffer.remaining() == rest) { isRemove = true; } currentBuffer.get(tmpBuf, 0, rest); buffer.put(tmpBuf, 0, rest); if (isRemove) { inputBuffer.remove(); } break; } else { buffer.put(currentBuffer); inputBuffer.remove(); } } //buffer.position(0); buffer.flip(); Packet packet = Packet.unmarshall(buffer); //logger.info("Parse one packet from network"); //packet.dump(); return packet; }
From source file:org.bytesoft.bytetcc.work.CleanupWork.java
public void forget(Xid xid, String resourceId) throws RuntimeException { byte[] globalTransactionId = xid.getGlobalTransactionId(); byte[] branchQualifier = xid.getBranchQualifier(); byte[] keyByteArray = resourceId.getBytes(); byte sizeOfkeyByteArray = (byte) keyByteArray.length; if (sizeOfkeyByteArray > CONSTANTS_RES_ID_MAX_SIZE) { throw new IllegalStateException("The resource name is too long!"); }//from w w w . j av a 2 s . c om byte[] resourceByteArray = new byte[CONSTANTS_RES_ID_MAX_SIZE]; System.arraycopy(keyByteArray, 0, resourceByteArray, 0, keyByteArray.length); ByteBuffer buffer = ByteBuffer.allocate(1 + CONSTANTS_RECORD_SIZE); buffer.put((byte) 0x1); buffer.put(globalTransactionId); buffer.put(branchQualifier); buffer.put(resourceByteArray); buffer.flip(); this.invokeForget(xid, resourceId, buffer); }
From source file:it.geosolutions.opensdi2.service.impl.FileUploadServiceImpl.java
/** * Create a temporal file with a byte array * //from w ww . j a v a2 s. co m * @param key of the file * @param bytes to write * @param i index by the file name * @return absolute path to the file * @throws IOException */ public String createTemporalFile(String key, byte[] bytes, int i) throws IOException { String filePath = temporaryFolder + File.separator + key; try { // write bytes File tmpFile = new File(filePath); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Appending bytes to " + tmpFile.getAbsolutePath()); } // File channel to append bytes @SuppressWarnings("resource") FileChannel channel = new FileOutputStream(tmpFile, true).getChannel(); ByteBuffer buf = ByteBuffer.allocateDirect((int) bytes.length); // put bytes buf.put(bytes); // Flips this buffer. The limit is set to the current position and then // the position is set to zero. If the mark is defined then it is discarded. buf.flip(); // Writes a sequence of bytes to this channel from the given buffer. channel.write(buf); // close the channel channel.close(); } catch (IOException e) { LOGGER.error("Error writing file bytes", e); } return filePath; }
From source file:org.apache.hama.monitor.fd.UDPSupervisor.java
@Override public Object call() throws Exception { ByteBuffer packet = ByteBuffer.allocate(8); try {/*from w w w.ja va 2 s . c o m*/ while (running.get()) { final InetSocketAddress source = (InetSocketAddress) channel.receive(packet); final String hostName = source.getHostName(); packet.flip(); final long seq = packet.getLong(); packet.clear(); if (LOG.isDebugEnabled()) { LOG.debug("Seqence: " + seq + " src host: " + hostName); } final Node tmpNode = new Node(hostName, WINDOW_SIZE.get()); receiver.submit(new Hermes(tmpNode, seq, System.currentTimeMillis())); } } catch (IOException ioe) { LOG.error("Problem in receiving packet from channel.", ioe); Thread.currentThread().interrupt(); } finally { if (null != this.channel) try { this.channel.socket().close(); this.channel.close(); } catch (IOException ioe) { LOG.error("Error closing supervisor channel.", ioe); } } return null; }
From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java
@Override public void copy(Path source, Path target, CopyOption... options) throws IOException { List<CopyOption> optionList = Arrays.asList(options); if (!optionList.contains(StandardCopyOption.REPLACE_EXISTING)) { if (Files.exists(target)) throw new java.nio.file.FileAlreadyExistsException(source.toString(), target.toString(), "could not copy file to destination"); } else {/* w ww . ja va 2 s. co m*/ Files.deleteIfExists(target); } FileSystem sourceFS = source.getFileSystem(); FileSystem targetFS = target.getFileSystem(); if (optionList.contains(HadoopCopyOption.REMOTE_COPY) && sourceFS.equals(targetFS)) { remoteCopy(source, target, options); return; } try (SeekableByteChannel sourceChannel = sourceFS.provider().newByteChannel(source, EnumSet.of(StandardOpenOption.READ))) { Set<StandardOpenOption> openOptions = EnumSet.of(StandardOpenOption.WRITE); if (optionList.contains(StandardCopyOption.REPLACE_EXISTING)) openOptions.add(StandardOpenOption.CREATE); else openOptions.add(StandardOpenOption.CREATE_NEW); List<FileAttribute<?>> fileAttributes = new ArrayList<>(); if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { Set<String> sourceAttrViews = sourceFS.supportedFileAttributeViews(); Set<String> targetAttrViews = targetFS.supportedFileAttributeViews(); if (sourceAttrViews.contains(PosixFileAttributeViewImpl.NAME) && targetAttrViews.contains(PosixFileAttributeViewImpl.NAME)) { PosixFileAttributes posixAttributes = sourceFS.provider().readAttributes(source, PosixFileAttributes.class); fileAttributes.add(PosixFilePermissions.asFileAttribute(posixAttributes.permissions())); } if (sourceAttrViews.contains(HadoopFileAttributeViewImpl.NAME) && targetAttrViews.contains(HadoopFileAttributeViewImpl.NAME)) { final HadoopFileAttributes hdfsAttributes = sourceFS.provider().readAttributes(source, HadoopFileAttributes.class); fileAttributes.add(new FileAttribute<Long>() { @Override public String name() { return HadoopFileAttributeViewImpl.NAME + ":blockSize"; } @Override public Long value() { return hdfsAttributes.getBlockSize(); } }); fileAttributes.add(new FileAttribute<Short>() { @Override public String name() { return HadoopFileAttributeViewImpl.NAME + ":replication"; } @Override public Short value() { return hdfsAttributes.getReplication(); } }); } } FileAttribute<?>[] attributes = fileAttributes.toArray(new FileAttribute<?>[fileAttributes.size()]); try (SeekableByteChannel targetChannel = targetFS.provider().newByteChannel(target, openOptions, attributes)) { int buffSize = getConfiguration().getInt(DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_KEY, DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_DEFAULT); ByteBuffer buffer = ByteBuffer.allocate(buffSize); buffer.clear(); while (sourceChannel.read(buffer) > 0) { buffer.flip(); targetChannel.write(buffer); buffer.clear(); } } if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { BasicFileAttributes attrs = sourceFS.provider().readAttributes(source, BasicFileAttributes.class); BasicFileAttributeView view = targetFS.provider().getFileAttributeView(target, BasicFileAttributeView.class); view.setTimes(attrs.lastModifiedTime(), attrs.lastAccessTime(), attrs.creationTime()); } } }