List of usage examples for java.io RandomAccessFile getChannel
public final FileChannel getChannel()
From source file:juicebox.tools.utils.original.Preprocessor.java
private void updateMasterIndex() throws IOException { RandomAccessFile raf = null; try {/* w w w . ja va2 s . co m*/ raf = new RandomAccessFile(outputFile, "rw"); // Master index raf.getChannel().position(masterIndexPositionPosition); BufferedByteWriter buffer = new BufferedByteWriter(); buffer.putLong(masterIndexPosition); raf.write(buffer.getBytes()); } finally { if (raf != null) raf.close(); } }
From source file:com.mellanox.r4h.MiniDFSCluster.java
public static boolean corruptBlock(File blockFile) throws IOException { if (blockFile == null || !blockFile.exists()) { return false; }//w w w. j a v a2 s .co m // Corrupt replica by writing random bytes into replica Random random = new Random(); RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); FileChannel channel = raFile.getChannel(); String badString = "BADBAD"; int rand = random.nextInt((int) channel.size() / 2); raFile.seek(rand); raFile.write(badString.getBytes()); raFile.close(); LOG.warn("Corrupting the block " + blockFile); return true; }
From source file:com.yobidrive.diskmap.needles.NeedleManager.java
private void initializeNeedleFiles() throws NeedleManagerException { // Browse directory for needle files (won't find any if rebuild required) String files[] = logDir.list(); if (files != null) { for (String file : files) { // Verifies file name pattern and extracts file number int extIndex = file.indexOf(EXTENSION); if (extIndex <= 0) continue; int fileNumber = -1; String hexString = file.substring(0, extIndex); try { fileNumber = Integer.parseInt(hexString, 16); } catch (Throwable th) { fileNumber = -1;//ww w. j a v a 2 s. co m } if (fileNumber < 0) { // Normal situation: non log files (including the bucket file) continue; } if (fileNumber > logNumber) logNumber = fileNumber; File needleFile = new File(logDir, file); if (!needleFile.canRead() || !needleFile.canWrite()) { logger.error("No read/write access to " + logPath + "/" + file); throw new NeedleManagerException(); } RandomAccessFile needleRFile; try { needleRFile = new RandomAccessFile(needleFile, mode); // AutoCommit of content writes (rws), content + meta (rwd), or nothing (rw) } catch (Throwable th) { logger.error("File not found " + logPath + "/" + file); throw new NeedleManagerException(); } FileChannel needleChannel = needleRFile.getChannel(); channelMap.putIfAbsent(fileNumber, needleChannel); } } }
From source file:com.yobidrive.diskmap.needles.NeedleManager.java
private FileChannel getChannel(int logNumber) throws NeedleManagerException { FileChannel fc = channelMap.get(new Integer(logNumber)); if (fc == null) { // File Channel not created yet: create and keep String fileName = Integer.toHexString(logNumber); while (fileName.length() < 8) fileName = "0" + fileName; fileName = fileName + EXTENSION; File needleFile = new File(logDir, fileName); if (needleFile.exists() && needleFile.canRead()) { RandomAccessFile needleRFile; try { needleRFile = new RandomAccessFile(needleFile, mode); // AutoCommit of content writes (rws), content + meta (rwd), or nothing (rw) } catch (Throwable th) { logger.error("Needle log file not found " + logPath + "/" + fileName); throw new NeedleManagerException(); }//w ww .ja v a2 s . co m FileChannel needleChannel = needleRFile.getChannel(); channelMap.putIfAbsent(logNumber, needleChannel); } return channelMap.get(new Integer(logNumber)); } else return fc; }
From source file:com.eincs.decanter.handler.StaticFileHandler.java
@Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception { if (!(e.getMessage() instanceof DecanterRequest)) { super.messageReceived(ctx, e); return;/*from w w w . j a v a 2 s. c om*/ } final DecanterRequest request = (DecanterRequest) e.getMessage(); final String path = request.getPath(); if (!path.startsWith(this.path)) { super.messageReceived(ctx, e); return; } if (request.getMethod() != GET) { DecanterChannels.writeError(ctx, request, METHOD_NOT_ALLOWED); return; } final String subPath = path.substring(this.path.length()); final String filePath = sanitizeUri(directory, subPath); if (filePath == null) { DecanterChannels.writeError(ctx, request, FORBIDDEN); return; } final File file = new File(filePath); if (file.isHidden() || !file.exists()) { DecanterChannels.writeError(ctx, request, NOT_FOUND); return; } if (!file.isFile()) { DecanterChannels.writeError(ctx, request, FORBIDDEN); return; } // Cache Validation String ifModifiedSince = request.getHeaders().get(IF_MODIFIED_SINCE); if (ifModifiedSince != null && ifModifiedSince.length() != 0) { Date ifModifiedSinceDate = HttpHeaderValues.parseDate(ifModifiedSince); // Only compare up to the second because the datetime format we send // to the client does // not have milliseconds long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000; long fileLastModifiedSeconds = file.lastModified() / 1000; if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) { DecanterChannels.writeNotModified(ctx, request); return; } } RandomAccessFile raf; try { raf = new RandomAccessFile(file, "r"); } catch (FileNotFoundException fnfe) { DecanterChannels.writeError(ctx, request, NOT_FOUND); return; } long fileLength = raf.length(); // Add cache headers long timeMillis = System.currentTimeMillis(); long expireMillis = timeMillis + HTTP_CACHE_SECONDS * 1000; HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); response.setHeader(CONTENT_TYPE, DecanterContentType.create(file)); response.setHeader(CONTENT_LENGTH, String.valueOf(fileLength)); response.setHeader(DATE, HttpHeaderValues.getCurrentDate()); response.setHeader(EXPIRES, HttpHeaderValues.getDateString(expireMillis)); response.setHeader(CACHE_CONTROL, "private, max-age=" + HTTP_CACHE_SECONDS); response.setHeader(LAST_MODIFIED, HttpHeaderValues.getDateString(file.lastModified())); Channel ch = e.getChannel(); // Write the initial line and the header. ch.write(response); // Write the content. ChannelFuture writeFuture; if (ch.getPipeline().get(SslHandler.class) != null) { // Cannot use zero-copy with HTTPS. writeFuture = ch.write(new ChunkedFile(raf, 0, fileLength, 8192)); } else { // No encryption - use zero-copy. final FileRegion region = new DefaultFileRegion(raf.getChannel(), 0, fileLength); writeFuture = ch.write(region); writeFuture.addListener(new ChannelFutureProgressListener() { public void operationComplete(ChannelFuture future) { region.releaseExternalResources(); } public void operationProgressed(ChannelFuture future, long amount, long current, long total) { System.out.printf("%s: %d / %d (+%d)%n", filePath, current, total, amount); } }); } }
From source file:com.koda.integ.hbase.storage.FileExtStorage.java
@Override public StorageHandle getData(StorageHandle storeHandle, ByteBuffer buf) { FileStorageHandle fsh = (FileStorageHandle) storeHandle; // Check if current file and offset > currentFileOffset int id = maxId.get(); if (fsh.getId() > id || (fsh.getId() == id && fsh.getOffset() >= currentFileOffset.get())) { // not found buf.putInt(0, 0);/*from w ww . j av a 2 s . c om*/ return fsh; } RandomAccessFile file = getFile(fsh.getId());//openFile(fsh.getId(), "r"); boolean needSecondChance = needSecondChance(fsh.getId()); try { if (file == null) { // return null buf.putInt(0, 0); } else { buf.clear(); int toRead = fsh.getSize(); buf.putInt(fsh.getSize()); buf.limit(4 + toRead); try { FileChannel fc = file.getChannel(); int total = 0; int c = 0; // offset start with overall object length .add +4 int off = fsh.getOffset() + 4; while (total < toRead) { c = fc.read(buf, off); off += c; if (c < 0) { // return not found buf.putInt(0, 0); break; } total += c; } } catch (IOException e) { // return not found if (fsh.getId() > minId.get()) { e.printStackTrace(); } buf.putInt(0, 0); } } if (buf.getInt(0) != 0 && needSecondChance) { // store again fsh = (FileStorageHandle) storeData(buf); } return fsh; } finally { if (file != null) { // return file back // PUT we need for old version putFile(fsh.getId(), file); } } }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
/** * //ww w .j a v a2 s. c o m * @return a sorted map of urlfp to item * @throws IOException */ TreeMap<URLFP, ProxyCrawlHistoryItem> loadLocalLogItemMap() throws IOException { TreeMap<URLFP, ProxyCrawlHistoryItem> itemMap = new TreeMap<URLFP, ProxyCrawlHistoryItem>(); LOG.info("Reading Local Log File"); RandomAccessFile file = new RandomAccessFile(getActiveLogFilePath(), "rw"); // valid length indicator ... long validLength = 0; try { // skip header ... file.seek(LocalLogFileHeader.SIZE); validLength = file.getFilePointer(); // ok walk n items ... for (int itemIdx = 0; itemIdx < _header._itemCount && file.getChannel().position() <= _header._fileSize; ++itemIdx) { try { ProxyCrawlHistoryItem item = readItem(file); // update valid length ... validLength = file.getFilePointer(); // ok compute fingerprint for item ... URLFP fingerprintObject = URLUtils.getURLFPFromURL(item.getOriginalURL(), true); if (fingerprintObject == null) { LOG.error("Could not compute fingerprint for URL:" + item.getOriginalURL()); } else { itemMap.put(fingerprintObject, item); } } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); try { if (!seekToNextSyncBytesPos(file)) { LOG.error("Hit EOF While Seeking for next SyncByte Sequence!"); break; } else { LOG.info("Seek to Next SyncByte Succeeded! Continuing Load"); } } catch (IOException e2) { LOG.error(CCStringUtils.stringifyException(e2)); LOG.error("Got IO Exception Reading SyncBytes - Bailing!"); break; } } } } finally { if (file.length() > validLength) { LOG.warn("File Length is:" + file.length() + " Truncating Length to:" + validLength); file.setLength(validLength); } file.close(); } LOG.info("Done Reading Local Log File"); return itemMap; }
From source file:org.apache.hadoop.hive.llap.cache.BuddyAllocator.java
private ByteBuffer preallocateArenaBuffer(int arenaSize) { if (isMapped) { RandomAccessFile rwf = null; File rf = null;/*from w w w . j a va 2 s . c o m*/ Preconditions.checkArgument(isDirect, "All memory mapped allocations have to be direct buffers"); try { rf = File.createTempFile("arena-", ".cache", cacheDir.toFile()); rwf = new RandomAccessFile(rf, "rw"); rwf.setLength(arenaSize); // truncate (TODO: posix_fallocate?) // Use RW, not PRIVATE because the copy-on-write is irrelevant for a deleted file // see discussion in YARN-5551 for the memory accounting discussion ByteBuffer rwbuf = rwf.getChannel().map(MapMode.READ_WRITE, 0, arenaSize); return rwbuf; } catch (IOException ioe) { LlapIoImpl.LOG.warn("Failed trying to allocate memory mapped arena", ioe); // fail similarly when memory allocations fail throw new OutOfMemoryError("Failed trying to allocate memory mapped arena: " + ioe.getMessage()); } finally { // A mapping, once established, is not dependent upon the file channel that was used to // create it. delete file and hold onto the map IOUtils.closeQuietly(rwf); if (rf != null) { rf.delete(); } } } return isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize); }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
/** * update list state of a recently crawled item * // w w w .j a v a 2 s .c om * @param fingerprint - the fingerprint of the updated item * @param newData - the updated crawl history data for the given item * @throws IOException */ @Override public void updateItemState(URLFP fingerprint, ProxyCrawlHistoryItem newData) throws IOException { if (_listState == LoadState.LOADED) { // check for membership ... if (_bloomFilter.isPresent(fingerprint)) { //LOG.info("UpdateItemState Called for URL:" + newData.getOriginalURL() + " List:" + getListId()); //LOG.info("UpdateItemState Loading OnDisk Item for URL:" + newData.getOriginalURL() + " List:" + getListId()); // extract existing item from disk OnDiskCrawlHistoryItem originalItem = loadOnDiskItemForURLFP(fingerprint); //if present (null if false cache hit) if (originalItem != null) { // build an on disk item data structure for any potential changes ... OnDiskCrawlHistoryItem newItem = onDiskItemFromHistoryItem(fingerprint, newData); // set inital offset information newItem._fileOffset = originalItem._fileOffset; newItem._stringsOffset = originalItem._stringsOffset; // LOG.info("UpdateItemState Comparing OnDisk Item to New Item for URL:" + newData.getOriginalURL() + " List:" + getListId()); // compare the two items ... if (!newItem.equals(originalItem)) { //LOG.info("UpdateItemState Items Don't Match for URL:" + newData.getOriginalURL() + " List:" + getListId()); // ok items do not match ... figure out if strings are different ... if (newItem._stringsCRC != originalItem._stringsCRC) { RandomAccessFile stringsFile = new RandomAccessFile(_variableDataFile, "rw"); try { // seek to end stringsFile.seek(stringsFile.length()); // update offset info newItem._stringsOffset = stringsFile.length(); // write out string data length WritableUtils.writeVInt(stringsFile, _stringBuffer1.getLength()); // write strings to log file stringsFile.write(_stringBuffer1.getData(), 0, _stringBuffer1.getLength()); } finally { stringsFile.close(); } } // otherwise take the offset from old item else { newItem._stringsOffset = originalItem._stringsOffset; } //LOG.info("Opening Data File for OnDiskItem load for Fingerprint:" + newItem._urlFingerprint); // ok, different paths depending on wether this is an in memory update or not ... boolean wroteToMemory = false; synchronized (this) { if (_tempFixedDataBuffer != null) { wroteToMemory = true; // reset output buffer _tempOutputBuffer.reset(); // serizlie to output buffer newItem.serialize(_tempOutputBuffer); // copy to appropriate location System.arraycopy(_tempOutputBuffer.getData(), 0, _tempFixedDataBuffer, (int) originalItem._fileOffset, OnDiskCrawlHistoryItem.ON_DISK_SIZE); } } if (!wroteToMemory) { // write to disk RandomAccessFile file = new RandomAccessFile(_fixedDataFile, "rw"); try { while (true) { try { //LOG.info("*** TRYING UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); FileLock lock = file.getChannel().tryLock(originalItem._fileOffset, OnDiskCrawlHistoryItem.ON_DISK_SIZE, false); try { //LOG.info("*** GOT UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); file.seek(originalItem._fileOffset); newItem.serialize(file); //LOG.info("Updated Data File for OnDiskItem for Fingerprint:" + originalItem._urlFingerprint); break; } finally { //LOG.info("*** RELEASED UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); lock.release(); } } catch (OverlappingFileLockException e) { LOG.error("###LockConflict(RETRY):" + CCStringUtils.stringifyException(e)); } } } finally { file.close(); } } // ok now update metadata ... synchronized (_metadata) { int updateFlags = calculateUpdateFlags(originalItem, newItem); if (updateFlags != 0) { int metadataDirtyFlags = updateMetadata(newItem, _metadata, 0); // only write metadata to disk if temp data buffer is null if (metadataDirtyFlags != 0 && !wroteToMemory) { if ((metadataDirtyFlags & MetadataUpdateFlag_ModifiedCrawlStatus) != 0) { _metadata.setQueuedItemCount(_metadata.getQueuedItemCount() - 1); } writeMetadataToDisk(); } // if not writing to memory then update subdomain metadata if (!wroteToMemory) { synchronized (_subDomainMetadataFile) { CrawlListMetadata subDomainMetadata = getSubDomainMetadataByURL( newData.getOriginalURL()); int subDomainMetadataDirtyFlags = updateMetadata(newItem, subDomainMetadata, processFileOffsets); if (subDomainMetadataDirtyFlags != 0 && !wroteToMemory) { if ((subDomainMetadataDirtyFlags & MetadataUpdateFlag_ModifiedCrawlStatus) != 0) { subDomainMetadata.setQueuedItemCount( subDomainMetadata.getQueuedItemCount() - 1); } writeSubDomainMetadataToDisk(subDomainMetadata); } } } } } synchronized (this) { if (_eventListener != null) { _eventListener.itemUpdated(fingerprint); } } } } } } }
From source file:io.minio.MinioClient.java
/** * Creates Request object for given request parameters. * * @param method HTTP method.//from w ww .ja v a 2 s . co m * @param bucketName Bucket name. * @param objectName Object name in the bucket. * @param region Amazon S3 region of the bucket. * @param headerMap Map of HTTP headers for the request. * @param queryParamMap Map of HTTP query parameters of the request. * @param contentType Content type of the request body. * @param body HTTP request body. * @param length Length of HTTP request body. */ private Request createRequest(Method method, String bucketName, String objectName, String region, Map<String, String> headerMap, Map<String, String> queryParamMap, final String contentType, final Object body, final int length) throws InvalidBucketNameException, NoSuchAlgorithmException, InsufficientDataException, IOException { if (bucketName == null && objectName != null) { throw new InvalidBucketNameException(NULL_STRING, "null bucket name for object '" + objectName + "'"); } HttpUrl.Builder urlBuilder = this.baseUrl.newBuilder(); if (bucketName != null) { checkBucketName(bucketName); String host = this.baseUrl.host(); if (host.equals(S3_AMAZONAWS_COM)) { // special case: handle s3.amazonaws.com separately if (region != null) { host = AwsS3Endpoints.INSTANCE.endpoint(region); } boolean usePathStyle = false; if (method == Method.PUT && objectName == null && queryParamMap == null) { // use path style for make bucket to workaround "AuthorizationHeaderMalformed" error from s3.amazonaws.com usePathStyle = true; } else if (queryParamMap != null && queryParamMap.containsKey("location")) { // use path style for location query usePathStyle = true; } else if (bucketName.contains(".") && this.baseUrl.isHttps()) { // use path style where '.' in bucketName causes SSL certificate validation error usePathStyle = true; } if (usePathStyle) { urlBuilder.host(host); urlBuilder.addPathSegment(bucketName); } else { urlBuilder.host(bucketName + "." + host); } } else { urlBuilder.addPathSegment(bucketName); } } if (objectName != null) { for (String pathSegment : objectName.split("/")) { // Limitation: // 1. OkHttp does not allow to add '.' and '..' as path segment. // 2. Its not allowed to add path segment as '/', '//', '/usr' or 'usr/'. urlBuilder.addPathSegment(pathSegment); } } if (queryParamMap != null) { for (Map.Entry<String, String> entry : queryParamMap.entrySet()) { urlBuilder.addQueryParameter(entry.getKey(), entry.getValue()); } } RequestBody requestBody = null; if (body != null) { requestBody = new RequestBody() { @Override public MediaType contentType() { if (contentType != null) { return MediaType.parse(contentType); } else { return MediaType.parse("application/octet-stream"); } } @Override public long contentLength() { if (body instanceof InputStream || body instanceof RandomAccessFile || body instanceof byte[]) { return length; } if (length == 0) { return -1; } else { return length; } } @Override public void writeTo(BufferedSink sink) throws IOException { byte[] data = null; if (body instanceof InputStream) { InputStream stream = (InputStream) body; sink.write(Okio.source(stream), length); } else if (body instanceof RandomAccessFile) { RandomAccessFile file = (RandomAccessFile) body; sink.write(Okio.source(Channels.newInputStream(file.getChannel())), length); } else if (body instanceof byte[]) { sink.write(data, 0, length); } else { sink.writeUtf8(body.toString()); } } }; } HttpUrl url = urlBuilder.build(); // urlBuilder does not encode some characters properly for Amazon S3. // Encode such characters properly here. List<String> pathSegments = url.encodedPathSegments(); urlBuilder = url.newBuilder(); for (int i = 0; i < pathSegments.size(); i++) { urlBuilder.setEncodedPathSegment(i, pathSegments.get(i).replaceAll("\\!", "%21").replaceAll("\\$", "%24").replaceAll("\\&", "%26") .replaceAll("\\'", "%27").replaceAll("\\(", "%28").replaceAll("\\)", "%29") .replaceAll("\\*", "%2A").replaceAll("\\+", "%2B").replaceAll("\\,", "%2C") .replaceAll("\\:", "%3A").replaceAll("\\;", "%3B").replaceAll("\\=", "%3D") .replaceAll("\\@", "%40").replaceAll("\\[", "%5B").replaceAll("\\]", "%5D")); } url = urlBuilder.build(); Request.Builder requestBuilder = new Request.Builder(); requestBuilder.url(url); requestBuilder.method(method.toString(), requestBody); if (headerMap != null) { for (Map.Entry<String, String> entry : headerMap.entrySet()) { requestBuilder.header(entry.getKey(), entry.getValue()); } } String sha256Hash = null; String md5Hash = null; if (this.accessKey != null && this.secretKey != null) { // No need to compute sha256 if endpoint scheme is HTTPS. Issue #415. if (url.isHttps()) { sha256Hash = "UNSIGNED-PAYLOAD"; if (body instanceof BufferedInputStream) { md5Hash = Digest.md5Hash((BufferedInputStream) body, length); } else if (body instanceof RandomAccessFile) { md5Hash = Digest.md5Hash((RandomAccessFile) body, length); } else if (body instanceof byte[]) { byte[] data = (byte[]) body; md5Hash = Digest.md5Hash(data, length); } } else { if (body == null) { sha256Hash = Digest.sha256Hash(new byte[0]); } else { if (body instanceof BufferedInputStream) { String[] hashes = Digest.sha256md5Hashes((BufferedInputStream) body, length); sha256Hash = hashes[0]; md5Hash = hashes[1]; } else if (body instanceof RandomAccessFile) { String[] hashes = Digest.sha256md5Hashes((RandomAccessFile) body, length); sha256Hash = hashes[0]; md5Hash = hashes[1]; } else if (body instanceof byte[]) { byte[] data = (byte[]) body; sha256Hash = Digest.sha256Hash(data, length); md5Hash = Digest.md5Hash(data, length); } else { sha256Hash = Digest.sha256Hash(body.toString()); } } } } if (md5Hash != null) { requestBuilder.header("Content-MD5", md5Hash); } if (url.port() == 80 || url.port() == 443) { requestBuilder.header("Host", url.host()); } else { requestBuilder.header("Host", url.host() + ":" + url.port()); } requestBuilder.header("User-Agent", this.userAgent); if (sha256Hash != null) { requestBuilder.header("x-amz-content-sha256", sha256Hash); } DateTime date = new DateTime(); requestBuilder.header("x-amz-date", date.toString(DateFormat.AMZ_DATE_FORMAT)); return requestBuilder.build(); }