List of usage examples for java.nio ByteBuffer allocateDirect
public static ByteBuffer allocateDirect(int capacity)
From source file:de.bluepair.sci.client.SHAUtils.java
public static <T> Map<String, String> sha512(Path path, Predicate<T> gard, T testValue, long blockSizePref, boolean forceBlockSize) { if (Files.notExists(path)) { return null; }//from w w w.ja v a2s.com MessageDigest md = getDigest(); MessageDigest md1 = getDigest(); if (!gard.test(testValue)) { return null; } long blockSize = blockSizePref; long size = -1; try { size = Files.size(path); if (!forceBlockSize) {// maximal 10 hashsummen // sonst hab ich zu viele in der datei // stehen! while (size / blockSize > 10) { blockSize += blockSizePref; } } } catch (IOException e) { blockSize = blockSizePref; return null; } Map<String, String> map = new HashMap<>(); long lastStart = 0; long stepDown = blockSize; try (final SeekableByteChannel fileChannel = Files.newByteChannel(path, StandardOpenOption.READ);) { final ByteBuffer buffer = ByteBuffer.allocateDirect(8192); int last; do { if (!gard.test(testValue) || Files.notExists(path)) { return null; } buffer.clear(); last = fileChannel.read(buffer); buffer.flip(); md.update(buffer); // calc 2checksups buffer.flip(); md1.update(buffer); if (last > 0) { stepDown -= last; } // wenn ich ein 100mb netzwerk habe // ~ca. 5MB bertragung // also bei abbruch kann wiederaufgesetzt werden wenn die summen // bekannt sind. // ~hnlich Blcke berechen also // 0-5 c1 // 0-10 c2 // 5-10 c3 ... if (stepDown <= 0 || (last <= 0)) { long len = (blockSize + Math.abs(stepDown)); if (stepDown > 0) { // kottektur wenn last <0 len = blockSize - stepDown; } stepDown = blockSize; map.put("sha512_" + lastStart + "_" + len, Hex.encodeHexString(md1.digest())); lastStart += len; md1.reset(); } } while (last > 0); } catch (IOException ex) { Logger.getLogger(FileAnalysis.class.getName()).log(Level.SEVERE, null, ex); return null; } final byte[] sha1hash = md.digest(); map.put("sha512", Hex.encodeHexString(sha1hash)); return map; }
From source file:org.apache.hadoop.io.ElasticByteBufferPool.java
@Override public synchronized ByteBuffer getBuffer(boolean direct, int length) { TreeMap<Key, ByteBuffer> tree = getBufferTree(direct); Map.Entry<Key, ByteBuffer> entry = tree.ceilingEntry(new Key(length, 0)); if (entry == null) { return direct ? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length); }/*from www . ja v a 2s.com*/ tree.remove(entry.getKey()); return entry.getValue(); }
From source file:de.spqrinfo.cups4j.operations.IppOperation.java
/** * Gets the IPP header// w w w . ja va 2 s . co m * * @param url * @param map * @return IPP header * @throws UnsupportedEncodingException */ public ByteBuffer getIppHeader(URL url, Map<String, String> map) throws UnsupportedEncodingException { if (url == null) { logger.error("IppOperation.getIppHeader(): uri is null"); return null; } ByteBuffer ippBuf = ByteBuffer.allocateDirect(bufferSize); ippBuf = IppTag.getOperation(ippBuf, operationID); ippBuf = IppTag.getUri(ippBuf, "printer-uri", stripPortNumber(url)); if (map == null) { ippBuf = IppTag.getEnd(ippBuf); ippBuf.flip(); return ippBuf; } ippBuf = IppTag.getNameWithoutLanguage(ippBuf, "requesting-user-name", map.get("requesting-user-name")); if (map.get("limit") != null) { int value = Integer.parseInt(map.get("limit")); ippBuf = IppTag.getInteger(ippBuf, "limit", value); } if (map.get("requested-attributes") != null) { String[] sta = map.get("requested-attributes").split(" "); if (sta != null) { ippBuf = IppTag.getKeyword(ippBuf, "requested-attributes", sta[0]); int l = sta.length; for (int i = 1; i < l; i++) { ippBuf = IppTag.getKeyword(ippBuf, null, sta[i]); } } } ippBuf = IppTag.getEnd(ippBuf); ippBuf.flip(); return ippBuf; }
From source file:org.cloudata.core.commitlog.pipe.BufferPool.java
public ByteBuffer[] getBuffer(int size) { ByteBuffer retBuffer = null;//from w w w . j a v a 2 s . com synchronized (bufferMap) { TreeSet<PoolEntry> entrySet = bufferMap.get(size); if (entrySet != null) { PoolEntry entry = entrySet.pollLast(); if (entry != null) { retBuffer = entry.buffer; } } } if (retBuffer == null) { retBuffer = ByteBuffer.allocateDirect(size); poolMonitor.increaseAllocated(size); } else { poolMonitor.increaseUsed(size); } return new ByteBuffer[] { retBuffer }; }
From source file:org.zuinnote.hadoop.bitcoin.format.BitcoinBlockReader.java
/** * Create a BitcoinBlock reader that reads from the given stream and uses the given parameters for configuration. Note it assumed that the validity of this configuration is checked by BitcoinBlockRecordReader * @param in Input stream to read from/*from w w w. jav a 2 s . c o m*/ * @param maxSizeBitcoinBlock Maximum size of a Bitcoinblock. * @param bufferSize size of the memory buffer for the givenInputStream * @param specificMagicByteArray filters by specific block magic numbers if not null. * @param useDirectBuffer experimental feature to use a DirectByteBuffer instead of a HeapByteBuffer **/ public BitcoinBlockReader(InputStream in, int maxSizeBitcoinBlock, int bufferSize, byte[][] specificMagicByteArray, boolean useDirectBuffer) { this.maxSizeBitcoinBlock = maxSizeBitcoinBlock; this.bufferSize = bufferSize; this.specificMagicByteArray = specificMagicByteArray; this.useDirectBuffer = useDirectBuffer; if (specificMagicByteArray != null) this.filterSpecificMagic = true; this.bin = new BufferedInputStream(in, bufferSize); if (this.useDirectBuffer == true) { // in case of a DirectByteBuffer we do allocation only once for the maximum size of one block, otherwise we will have a high cost for reallocation preAllocatedDirectByteBuffer = ByteBuffer.allocateDirect(this.maxSizeBitcoinBlock); } }
From source file:me.schiz.jmeter.ring.udp.sampler.UDPRingSampler.java
@Override public SampleResult sample(Entry entry) { boolean idling = false; SampleResult newSampleResult = new SampleResult(); newSampleResult.setSampleLabel(getName()); ConcurrentLinkedQueue<SampleResult> queue = tlQueue.get(); if (queue == null) { queue = new ConcurrentLinkedQueue<SampleResult>(); tlQueue.set(queue);//from w w w . j av a 2 s. c o m } Ring ring = UDPRingSourceElement.get(getSource()); Token t; int tid = -1; byte[] request_in_bytes = new byte[0]; ByteBuffer request = tlRequest.get(); if (request == null) { request = tlBuffer.get(); if (request == null) { request = ByteBuffer.allocateDirect(8 * 1024 * 1024); tlBuffer.set(request); } request.clear(); if (isHex()) { try { request_in_bytes = Hex.decodeHex(getRequest().toCharArray()); } catch (DecoderException e) { log.error("can't decode request", e); idling = true; } } else { request_in_bytes = getRequest().getBytes(); } request.put(request_in_bytes); } if (!idling) { try { request.flip(); while (tid == -1) { tid = ring.acquire(); } t = ring.get(tid); t.lock.lock(); if (isHex()) t.ishex = true; newSampleResult.sampleStart(); try { //t.socketChannel.write(request); t.sampleResult = newSampleResult; t.queue = queue; ring.write(tid, request); request.clear(); newSampleResult.setSuccessful(true); } catch (IOException e) { newSampleResult.setSuccessful(false); ring.reset(tid); log.warn("IOException", e); } finally { t.lock.unlock(); } } catch (Exception e) { log.error("Exception", e); newSampleResult.setSuccessful(false); newSampleResult.setResponseCode(e.getClass().getName()); while (!queue.offer(newSampleResult)) { } if (tid != -1) ring.reset(tid); } finally { newSampleResult.setRequestHeaders(getRequest()); } } SampleResult sampleResult = queue.poll(); return sampleResult; }
From source file:com.rinke.solutions.pinball.io.UsbConnector.java
@Override protected void send(byte[] data, ConnectionHandle handle) { IntBuffer transfered = IntBuffer.allocate(1); ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); buffer.put(data);//from w ww . j ava 2 s. c o m UsbHandle usb = (UsbHandle) handle; // Use device handle here int res = LibUsb.bulkTransfer(usb.getDeviceHandle(), (byte) 0x01, buffer, transfered, 4000); if (res != LibUsb.SUCCESS) throw new LibUsbException("Control transfer failed", res); if (transfered.get() != data.length) { log.error("unexpected length returned on bulk: {}", transfered.get()); } }
From source file:org.apache.hadoop.io.compress.lzma.LzmaCompressor.java
/** * Creates a new compressor using the specified level {@link CompressionLevel}. * //from w ww . ja v a2s . c om * @param level lzma compression algorithm to use * @param directBufferSize size of the direct buffer to be used. */ public LzmaCompressor(int compress_level, int directBufferSize) { this.level = compress_level; this.directBufferSize = directBufferSize; uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize); compressedDirectBuf.position(directBufferSize); stream = init(this.level); }
From source file:rb.app.GLObject.java
public void allocateBuffer() { int SHORT_MAX = 250000; int FLOAT_MAX = 1000000; Log.d("GLRenderer", "Allocate (short):" + SHORT_MAX * 2 + " bytes"); ByteBuffer vbb = ByteBuffer.allocateDirect(SHORT_MAX * 2); vbb.order(ByteOrder.nativeOrder()); _shortBuffer = vbb.asShortBuffer();/*from w ww.j a v a 2s . com*/ _shortBuffer.position(0); Log.d("GLRenderer", "Allocate (float):" + FLOAT_MAX * 4 + " bytes"); ByteBuffer fbb = ByteBuffer.allocateDirect(FLOAT_MAX * 4); fbb.order(ByteOrder.nativeOrder()); _floatBuffer = fbb.asFloatBuffer(); _floatBuffer.position(0); }
From source file:xbird.server.services.RemotePagingService.java
public RemotePagingService() { super(SRV_NAME); this._fdCacheMap = new FinalizableSoftValueReferenceMap<String, FileChannel>( new ReferentFinalizer<FileChannel>() { public void finalize(FileChannel reclaimed) { IOUtils.closeQuietly(reclaimed); }// www .j a va 2s . c o m }); this._directoryCache = new ReferenceMap<String, IDescriptor>(ReferenceType.STRONG, ReferenceType.SOFT); if (SystemUtils.isSendfileSupported()) { this._sndBufSegm = null; this._sndBufDTM = null; } else { this._sndBufSegm = ByteBuffer.allocateDirect(SND_BUFSIZE); _sndBufSegm.order(ByteOrder.BIG_ENDIAN); this._sndBufDTM = ByteBuffer.allocateDirect(SND_BUFSIZE); _sndBufDTM.order(ByteOrder.LITTLE_ENDIAN); } }