List of usage examples for java.nio ByteBuffer flip
public final Buffer flip()
From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationHelper.java
/** * Helper that reads text from the given file at the given offset * and returns it. If checkOverflow is true, it will make sure that * no more than 'len' bytes were read.// ww w . j a va 2s .co m */ protected String readTextFile(String bucketName, String objectName, int offset, int len, boolean checkOverflow) throws IOException { String text = null; SeekableReadableByteChannel readChannel = null; try { int bufferSize = len; bufferSize += checkOverflow ? 1 : 0; ByteBuffer readBuffer = ByteBuffer.allocate(bufferSize); readChannel = open(bucketName, objectName); if (offset > 0) { readChannel.position(offset); } int numBytesRead = readChannel.read(readBuffer); Assert.assertEquals("readTextFile: read size mismatch", len, numBytesRead); readBuffer.flip(); text = StandardCharsets.UTF_8.decode(readBuffer).toString(); } finally { if (readChannel != null) { readChannel.close(); } } return text; }
From source file:hornet.framework.clamav.service.ClamAVCheckService.java
/** * Lecture du fichier et envoi sur la socket. * * @param resultat/*www . ja va 2s. c o m*/ * resultat * @param fileForTestSize * fileForTestSize * @param channel * channel * @param bufFileForTestRead * bufFileForTestRead * @throws IOException * IOException */ protected void readAndSendFile(final StringBuilder resultat, final long fileForTestSize, final SocketChannel channel, final MappedByteBuffer bufFileForTestRead) throws IOException { // Envoi de la commande final ByteBuffer writeReadBuffer = ByteBuffer.allocate(BUFFER_SIZE); writeReadBuffer.put(ClamAVCheckService.COMMANDE.getBytes(UTF_8)); writeReadBuffer.put(this.intToByteArray((int) fileForTestSize)); writeReadBuffer.flip(); channel.write(writeReadBuffer); // Envoi du fichier long size = fileForTestSize; // envoi du fichier while (size > 0) { size -= channel.write(bufFileForTestRead); } final ByteBuffer writeBuffer = ByteBuffer.allocate(4); writeBuffer.put(new byte[] { 0, 0, 0, 0 }); writeBuffer.flip(); channel.write(writeBuffer); // lecture de la rponse ByteBuffer readBuffer; readBuffer = ByteBuffer.allocate(BUFFER_SIZE); // lecture de la rponse readBuffer.clear(); boolean readLine = false; while (!readLine) { final int numReaden = channel.read(readBuffer); if (numReaden > 0) { readLine = readBuffer.get(numReaden - 1) == '\n'; resultat.append(new String(readBuffer.array(), 0, numReaden, UTF_8)); readBuffer.clear(); } else { if (numReaden == -1) { readLine = true; readBuffer.clear(); } } } }
From source file:org.apache.kylin.storage.hbase.cube.v2.CubeHBaseEndpointRPC.java
@SuppressWarnings("checkstyle:methodlength") @Override//from www. j av a 2 s . co m public IGTScanner getGTScanner(final GTScanRequest scanRequest) throws IOException { final String toggle = BackdoorToggles.getCoprocessorBehavior() == null ? CoprocessorBehavior.SCAN_FILTER_AGGR_CHECKMEM.toString() : BackdoorToggles.getCoprocessorBehavior(); logger.debug("New scanner for current segment {} will use {} as endpoint's behavior", cubeSeg, toggle); Pair<Short, Short> shardNumAndBaseShard = getShardNumAndBaseShard(); short shardNum = shardNumAndBaseShard.getFirst(); short cuboidBaseShard = shardNumAndBaseShard.getSecond(); int totalShards = cubeSeg.getTotalShards(); ByteString scanRequestByteString = null; ByteString rawScanByteString = null; // primary key (also the 0th column block) is always selected final ImmutableBitSet selectedColBlocks = scanRequest.getSelectedColBlocks().set(0); // globally shared connection, does not require close final HConnection conn = HBaseConnection.get(cubeSeg.getCubeInstance().getConfig().getStorageUrl()); final List<IntList> hbaseColumnsToGTIntList = Lists.newArrayList(); List<List<Integer>> hbaseColumnsToGT = getHBaseColumnsGTMapping(selectedColBlocks); for (List<Integer> list : hbaseColumnsToGT) { hbaseColumnsToGTIntList.add(IntList.newBuilder().addAllInts(list).build()); } //TODO: raw scan can be constructed at region side to reduce traffic List<RawScan> rawScans = preparedHBaseScans(scanRequest.getGTScanRanges(), selectedColBlocks); int rawScanBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE; while (true) { try { ByteBuffer rawScanBuffer = ByteBuffer.allocate(rawScanBufferSize); BytesUtil.writeVInt(rawScans.size(), rawScanBuffer); for (RawScan rs : rawScans) { RawScan.serializer.serialize(rs, rawScanBuffer); } rawScanBuffer.flip(); rawScanByteString = HBaseZeroCopyByteString.wrap(rawScanBuffer.array(), rawScanBuffer.position(), rawScanBuffer.limit()); break; } catch (BufferOverflowException boe) { logger.info("Buffer size {} cannot hold the raw scans, resizing to 4 times", rawScanBufferSize); rawScanBufferSize *= 4; } } scanRequest.setGTScanRanges(Lists.<GTScanRange>newArrayList());//since raw scans are sent to coprocessor, we don't need to duplicate sending it int scanRequestBufferSize = BytesSerializer.SERIALIZE_BUFFER_SIZE; while (true) { try { ByteBuffer buffer = ByteBuffer.allocate(scanRequestBufferSize); GTScanRequest.serializer.serialize(scanRequest, buffer); buffer.flip(); scanRequestByteString = HBaseZeroCopyByteString.wrap(buffer.array(), buffer.position(), buffer.limit()); break; } catch (BufferOverflowException boe) { logger.info("Buffer size {} cannot hold the scan request, resizing to 4 times", scanRequestBufferSize); scanRequestBufferSize *= 4; } } logger.debug("Serialized scanRequestBytes {} bytes, rawScanBytesString {} bytes", scanRequestByteString.size(), rawScanByteString.size()); logger.info( "The scan {} for segment {} is as below with {} separate raw scans, shard part of start/end key is set to 0", Integer.toHexString(System.identityHashCode(scanRequest)), cubeSeg, rawScans.size()); for (RawScan rs : rawScans) { logScan(rs, cubeSeg.getStorageLocationIdentifier()); } logger.debug("Submitting rpc to {} shards starting from shard {}, scan range count {}", shardNum, cuboidBaseShard, rawScans.size()); final AtomicInteger totalScannedCount = new AtomicInteger(0); final ExpectedSizeIterator epResultItr = new ExpectedSizeIterator(shardNum); // KylinConfig: use env instance instead of CubeSegment, because KylinConfig will share among queries // for different cubes until redeployment of coprocessor jar. final KylinConfig kylinConfig = KylinConfig.getInstanceFromEnv(); final boolean compressionResult = kylinConfig.getCompressionResult(); final CubeVisitProtos.CubeVisitRequest.Builder builder = CubeVisitProtos.CubeVisitRequest.newBuilder(); builder.setGtScanRequest(scanRequestByteString).setHbaseRawScan(rawScanByteString); for (IntList intList : hbaseColumnsToGTIntList) { builder.addHbaseColumnsToGT(intList); } builder.setRowkeyPreambleSize(cubeSeg.getRowKeyPreambleSize()); builder.setBehavior(toggle); builder.setStartTime(System.currentTimeMillis()); builder.setTimeout(epResultItr.getTimeout()); builder.setKylinProperties(kylinConfig.getConfigAsString()); for (final Pair<byte[], byte[]> epRange : getEPKeyRanges(cuboidBaseShard, shardNum, totalShards)) { executorService.submit(new Runnable() { @Override public void run() { final String logHeader = "<sub-thread for GTScanRequest " + Integer.toHexString(System.identityHashCode(scanRequest)) + "> "; final boolean[] abnormalFinish = new boolean[1]; try { HTableInterface table = conn.getTable(cubeSeg.getStorageLocationIdentifier(), HBaseConnection.getCoprocessorPool()); final CubeVisitRequest request = builder.build(); final byte[] startKey = epRange.getFirst(); final byte[] endKey = epRange.getSecond(); table.coprocessorService(CubeVisitService.class, startKey, endKey, // new Batch.Call<CubeVisitService, CubeVisitResponse>() { public CubeVisitResponse call(CubeVisitService rowsService) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<CubeVisitResponse> rpcCallback = new BlockingRpcCallback<>(); rowsService.visitCube(controller, request, rpcCallback); CubeVisitResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } return response; } }, new Batch.Callback<CubeVisitResponse>() { @Override public void update(byte[] region, byte[] row, CubeVisitResponse result) { if (region == null) return; totalScannedCount.addAndGet(result.getStats().getScannedRowCount()); logger.info(logHeader + getStatsString(region, result)); if (result.getStats().getNormalComplete() != 1) { abnormalFinish[0] = true; return; } try { if (compressionResult) { epResultItr .append(CompressionUtils.decompress(HBaseZeroCopyByteString .zeroCopyGetBytes(result.getCompressedRows()))); } else { epResultItr.append(HBaseZeroCopyByteString .zeroCopyGetBytes(result.getCompressedRows())); } } catch (IOException | DataFormatException e) { throw new RuntimeException(logHeader + "Error when decompressing", e); } } }); } catch (Throwable ex) { logger.error(logHeader + "Error when visiting cubes by endpoint", ex); // double log coz the query thread may already timeout epResultItr.notifyCoprocException(ex); return; } if (abnormalFinish[0]) { Throwable ex = new RuntimeException(logHeader + "The coprocessor thread stopped itself due to scan timeout, failing current query..."); logger.error(logHeader + "Error when visiting cubes by endpoint", ex); // double log coz the query thread may already timeout epResultItr.notifyCoprocException(ex); return; } } }); } return new EndpointResultsAsGTScanner(fullGTInfo, epResultItr, scanRequest.getColumns(), totalScannedCount.get()); }
From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java
/** * Generate meta entry with given master key *//* www. jav a 2 s .c om*/ private ByteBuffer generateMetaEntry(long ledgerId, byte[] masterKey) { ByteBuffer bb = ByteBuffer.allocate(8 + 8 + 4 + masterKey.length); bb.putLong(ledgerId); bb.putLong(Bookie.METAENTRY_ID_LEDGER_KEY); bb.putInt(masterKey.length); bb.put(masterKey); bb.flip(); return bb; }
From source file:com.tongbanjie.tarzan.rpc.protocol.RpcCommand.java
public ByteBuffer encode() throws RpcCommandException { /******* ? *******/ // 1> protocol type size int length = Protocol.PROTOCOL_TYPE_SIZE; // 2> header length size length += Protocol.HEADER_LENGTH_SIZE; // 3> header data length byte[] headerData = this.headerEncode(); length += headerData.length;//from w w w . j a v a 2s . com // 4> body data length if (this.body != null) { length += body.length; } /******* ByteBuffer *******/ //? ByteBuffer result = ByteBuffer.allocate(Protocol.TOTAL_LENGTH_SIZE + length); // 0?length result.putInt(length); // 1?protocol type result.put(markProtocolType(serializeType)); // 2?header length result.putInt(headerData.length); // 3?header data result.put(headerData); // 4?body data; if (this.body != null) { result.put(this.body); } result.flip(); return result; }
From source file:org.alfresco.provision.ActiveMQService.java
private BrokerStats getBrokerStats() throws IOException { BrokerStats brokerStats = new BrokerStats(); StringBuilder sb = new StringBuilder("http://"); sb.append(activeMQHost);/* w ww . j a va 2 s.com*/ sb.append(":"); sb.append(activeMQPort); sb.append("/api/jolokia"); String url = sb.toString(); CloseableHttpResponse httpResponse = null; HttpPost httpPost = new HttpPost(url); Request[] post = new Request[] { new Request("read", "org.apache.activemq:type=Broker,brokerName=localhost", "MemoryPercentUsage"), new Request("read", "org.apache.activemq:type=Broker,brokerName=localhost", "StorePercentUsage"), new Request("read", "org.apache.activemq:type=Broker,brokerName=localhost", "TempPercentUsage") }; String str = mapper.writeValueAsString(post); HttpEntity postEntity = new StringEntity(str); httpPost.setEntity(postEntity); httpResponse = client.execute(httpPost); StatusLine status = httpResponse.getStatusLine(); // Expecting "OK" status if (status.getStatusCode() == HttpStatus.SC_OK) { HttpEntity entity = httpResponse.getEntity(); InputStream in = entity.getContent(); try { ByteBuffer bb = ByteBuffer.allocate(1024 * 10); ReadableByteChannel inChannel = Channels.newChannel(in); int read = -1; do { read = inChannel.read(bb); } while (read != -1); bb.flip(); Response[] response = mapper.readValue(bb.array(), Response[].class); for (Response r : response) { if (r.getRequest().getAttribute().equals("MemoryPercentUsage")) { double memoryPercentUsage = r.getValue() != null ? r.getValue() : 0.0; brokerStats.withMemoryPercentUsage(memoryPercentUsage); } else if (r.getRequest().getAttribute().equals("StorePercentUsage")) { double storePercentUsage = r.getValue() != null ? r.getValue() : 0.0; brokerStats.withStorePercentUsage(storePercentUsage); } else if (r.getRequest().getAttribute().equals("TempPercentUsage")) { double tempPercentUsage = r.getValue() != null ? r.getValue() : 0.0; brokerStats.withTempPercentUsage(tempPercentUsage); } } } finally { if (in != null) { in.close(); } } } else { // TODO } return brokerStats; }
From source file:org.apache.hc.client5.http.impl.auth.CredSspScheme.java
private String wrapHandshake() throws AuthenticationException { final ByteBuffer src = allocateOutBuffer(); src.flip(); final SSLEngine sslEngine = getSSLEngine(); final SSLSession sslSession = sslEngine.getSession(); // Needs to be twice the size as there may be two wraps during handshake. // Primitive and inefficient solution, but it works. final ByteBuffer dst = ByteBuffer.allocate(sslSession.getPacketBufferSize() * 2); while (sslEngine.getHandshakeStatus() == HandshakeStatus.NEED_WRAP) { wrap(src, dst);/* ww w . j a va 2 s . com*/ } dst.flip(); return encodeBase64(dst); }
From source file:net.librec.data.convertor.TextDataConvertor.java
/** * Read data from the data file. Note that we didn't take care of the * duplicated lines./* w w w .j a v a2 s.c o m*/ * * @param dataColumnFormat * the format of input data file * @param inputDataPath * the path of input data file * @param binThold * the threshold to binarize a rating. If a rating is greater * than the threshold, the value will be 1; otherwise 0. To * disable this appender, i.e., keep the original rating value, * set the threshold a negative value * @throws IOException * if the <code>inputDataPath</code> is not valid. */ private void readData(String dataColumnFormat, String inputDataPath, double binThold) throws IOException { LOG.info(String.format("Dataset: %s", StringUtil.last(inputDataPath, 38))); // Table {row-id, col-id, rate} Table<Integer, Integer, Double> dataTable = HashBasedTable.create(); // Table {row-id, col-id, timestamp} Table<Integer, Integer, Long> timeTable = null; // Map {col-id, multiple row-id}: used to fast build a rating matrix Multimap<Integer, Integer> colMap = HashMultimap.create(); // BiMap {raw id, inner id} userIds, itemIds if (this.userIds == null) { this.userIds = HashBiMap.create(); } if (this.itemIds == null) { this.itemIds = HashBiMap.create(); } final List<File> files = new ArrayList<File>(); final ArrayList<Long> fileSizeList = new ArrayList<Long>(); SimpleFileVisitor<Path> finder = new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { fileSizeList.add(file.toFile().length()); files.add(file.toFile()); return super.visitFile(file, attrs); } }; Files.walkFileTree(Paths.get(inputDataPath), finder); LOG.info("All dataset files " + files.toString()); long allFileSize = 0; for (Long everyFileSize : fileSizeList) { allFileSize = allFileSize + everyFileSize.longValue(); } LOG.info("All dataset files size " + Long.toString(allFileSize)); int readingFileCount = 0; long loadAllFileByte = 0; // loop every dataFile collecting from walkFileTree for (File dataFile : files) { LOG.info("Now loading dataset file " + dataFile.toString().substring( dataFile.toString().lastIndexOf(File.separator) + 1, dataFile.toString().lastIndexOf("."))); readingFileCount += 1; loadFilePathRate = readingFileCount / (float) files.size(); long readingOneFileByte = 0; FileInputStream fis = new FileInputStream(dataFile); FileChannel fileRead = fis.getChannel(); ByteBuffer buffer = ByteBuffer.allocate(BSIZE); int len; String bufferLine = new String(); byte[] bytes = new byte[BSIZE]; while ((len = fileRead.read(buffer)) != -1) { readingOneFileByte += len; loadDataFileRate = readingOneFileByte / (float) fileRead.size(); loadAllFileByte += len; loadAllFileRate = loadAllFileByte / (float) allFileSize; buffer.flip(); buffer.get(bytes, 0, len); bufferLine = bufferLine.concat(new String(bytes, 0, len)); bufferLine = bufferLine.replaceAll("\r", "\n"); String[] bufferData = bufferLine.split("(\n)+"); boolean isComplete = bufferLine.endsWith("\n"); int loopLength = isComplete ? bufferData.length : bufferData.length - 1; for (int i = 0; i < loopLength; i++) { String line = new String(bufferData[i]); String[] data = line.trim().split("[ \t,]+"); String user = data[0]; String item = data[1]; Double rate = ((dataColumnFormat.equals("UIR") || dataColumnFormat.equals("UIRT")) && data.length >= 3) ? Double.valueOf(data[2]) : 1.0; // binarize the rating for item recommendation task if (binThold >= 0) { rate = rate > binThold ? 1.0 : 0.0; } // inner id starting from 0 int row = userIds.containsKey(user) ? userIds.get(user) : userIds.size(); userIds.put(user, row); int col = itemIds.containsKey(item) ? itemIds.get(item) : itemIds.size(); itemIds.put(item, col); dataTable.put(row, col, rate); colMap.put(col, row); // record rating's issuing time if (StringUtils.equals(dataColumnFormat, "UIRT") && data.length >= 4) { if (timeTable == null) { timeTable = HashBasedTable.create(); } // convert to million-seconds long mms = 0L; try { mms = Long.parseLong(data[3]); // cannot format // 9.7323480e+008 } catch (NumberFormatException e) { mms = (long) Double.parseDouble(data[3]); } long timestamp = timeUnit.toMillis(mms); timeTable.put(row, col, timestamp); } } if (!isComplete) { bufferLine = bufferData[bufferData.length - 1]; } buffer.clear(); } fileRead.close(); fis.close(); } int numRows = numUsers(), numCols = numItems(); // build rating matrix preferenceMatrix = new SparseMatrix(numRows, numCols, dataTable, colMap); if (timeTable != null) datetimeMatrix = new SparseMatrix(numRows, numCols, timeTable, colMap); // release memory of data table dataTable = null; timeTable = null; }
From source file:org.alfresco.contentstore.ContentStoreTest.java
private void assertFileEquals(InputStream expected, InputStream actual, State state) throws IOException { ByteBuffer bb1 = ByteBuffer.allocate(1024); ByteBuffer bb2 = ByteBuffer.allocate(1024); int count1 = 0; int count2 = 0; try (ReadableByteChannel channel = Channels.newChannel(expected); ReadableByteChannel channel1 = Channels.newChannel(actual)) { int i1 = channel.read(bb1); bb1.flip(); int i2 = channel1.read(bb2); bb2.flip();/*from www . jav a 2s. co m*/ if (i1 == i2) { count1 += i1; count2 += i2; assertTrue("Not equal at " + state, bb1.equals(bb2)); } else { fail("Not equal at " + state); } } }
From source file:com.kactech.otj.Utils.java
public static ByteBuffer seal(String msg, String nymID, PublicKey nymKey, SecretKeySpec aesSecret, IvParameterSpec vector) throws InvalidKeyException, InvalidAlgorithmParameterException, IllegalBlockSizeException, BadPaddingException { ByteBuffer buff = ByteBuffer.allocate(msg.length() + 500);//donno? buff.order(ByteOrder.BIG_ENDIAN); buff.putShort((short) 1);//asymmetric buff.putInt(1);//array size buff.putInt(nymID.length() + 1);/*from w w w . ja v a 2 s. c o m*/ buff.put(bytes(nymID + '\0', US_ASCII)); // create encoded key and message Cipher cipher; try { cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); } catch (Exception e) { throw new RuntimeException(e); } cipher.init(Cipher.ENCRYPT_MODE, aesSecret, vector); byte[] encrypted = cipher.doFinal(bytes(msg + '\0', UTF8)); try { cipher = Cipher.getInstance(WRAP_ALGO); } catch (Exception e) { throw new RuntimeException(e); } cipher.init(Cipher.WRAP_MODE, nymKey); byte[] encKeyBytes = cipher.wrap(aesSecret); buff.putInt(encKeyBytes.length); buff.put(encKeyBytes); buff.putInt(vector.getIV().length); buff.put(vector.getIV()); buff.put(encrypted); buff.flip(); return buff; }