List of usage examples for java.io RandomAccessFile RandomAccessFile
public RandomAccessFile(File file, String mode) throws FileNotFoundException
From source file:com.netflix.imfutility.itunes.image.ImageValidator.java
private static boolean checkBytes(File file, byte[] start, byte[] end) throws IOException { RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r"); byte[] destStart = new byte[start.length]; if (start.length != 0) { int res = randomAccessFile.read(destStart, 0, start.length); if (res == -1) { return false; }//from w w w.java 2s. c om } byte[] destEnd = new byte[end.length]; if (end.length != 0) { randomAccessFile.seek(file.length() - end.length); int res = randomAccessFile.read(destEnd, 0, end.length); if (res == -1) { return false; } } return Arrays.equals(start, destStart) && Arrays.equals(end, destEnd); }
From source file:biz.neustar.nexus.plugins.gitlab.GitlabAuthenticatingRealmIT.java
License:asdf
protected byte[] loadBody(String fileName) { try (RandomAccessFile bodyFile = new RandomAccessFile(testData().resolveFile(fileName), "r");) { byte[] body = new byte[(int) bodyFile.length()]; bodyFile.readFully(body);// www . j ava 2 s .c o m return body; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } }
From source file:com.microsoft.azure.management.datalake.store.uploader.UploadMetadataGenerator.java
/** * Aligns segments to match record boundaries (where a record boundary = a new line). * If not possible (max record size = 4MB), throws an exception. * * @param metadata The metadata to realign * @throws IOException Thrown if the input file path in the metadata is invalid or inaccessible. * @throws UploadFailedException Thrown if the length adjustment cannot be determined. *//*w ww. jav a 2s.c o m*/ private void alignSegmentsToRecordBoundaries(UploadMetadata metadata) throws IOException, UploadFailedException { int remainingSegments = 0; try (RandomAccessFile stream = new RandomAccessFile(metadata.getInputFilePath(), "r")) { long offset = 0; for (int i = 0; i < metadata.getSegments().length; i++) { UploadSegmentMetadata segment = metadata.getSegments()[i]; //updating segment lengths means that both the offset and the length of the next segment needs to be recalculated, to keep the segment lengths somewhat balanced long diff = segment.getOffset() - offset; segment.setOffset(offset); segment.setLength(segment.getLength() + diff); if (segment.getOffset() >= metadata.getFileLength()) { continue; } if (segment.getSegmentNumber() == metadata.getSegments().length - 1) { //last segment picks up the slack segment.setLength(metadata.getFileLength() - segment.getOffset()); } else { //figure out how much do we need to adjust the length of the segment so it ends on a record boundary (this can be negative or positive) int lengthAdjustment = determineLengthAdjustment(segment, stream, Charset.forName(metadata.getEncodingName()), metadata.getDelimiter()) + 1; //adjust segment length and offset segment.setLength(segment.getLength() + lengthAdjustment); } offset += segment.getLength(); remainingSegments++; } } //since we adjusted the segment lengths, it's possible that the last segment(s) became of zero length; so remove it UploadSegmentMetadata[] segments = metadata.getSegments(); if (remainingSegments < segments.length) { ArrayUtils.subarray(segments, 0, remainingSegments); metadata.setSegments(segments); metadata.setSegmentCount(segments.length); } //NOTE: we are not validating consistency here; this method is called by createNewMetadata which calls save() after this, which validates consistency anyway. }
From source file:com.linkedin.pinot.core.io.writer.impl.v2.FixedBitMultiValueWriter.java
public FixedBitMultiValueWriter(File file, int numDocs, int totalNumValues, int columnSizeInBits) throws Exception { float averageValuesPerDoc = totalNumValues / numDocs; this.docsPerChunk = (int) (Math.ceil(PREFERRED_NUM_VALUES_PER_CHUNK / averageValuesPerDoc)); this.numChunks = (numDocs + docsPerChunk - 1) / docsPerChunk; chunkOffsetHeaderSize = numChunks * SIZE_OF_INT * NUM_COLS_IN_HEADER; bitsetSize = (totalNumValues + 7) / 8; rawDataSize = SizeUtil.computeBytesRequired(totalNumValues, columnSizeInBits, SizeUtil.BIT_UNPACK_BATCH_SIZE); LOGGER.info("Allocating:{} for rawDataSize to store {} values of bits:{}", rawDataSize, totalNumValues, columnSizeInBits);/*from www .ja v a 2 s. c o m*/ totalSize = chunkOffsetHeaderSize + bitsetSize + rawDataSize; raf = new RandomAccessFile(file, "rw"); chunkOffsetsBuffer = MmapUtils.mmapFile(raf, FileChannel.MapMode.READ_WRITE, 0, chunkOffsetHeaderSize, file, this.getClass().getSimpleName() + " chunkOffsetsBuffer"); bitsetBuffer = MmapUtils.mmapFile(raf, FileChannel.MapMode.READ_WRITE, chunkOffsetHeaderSize, bitsetSize, file, this.getClass().getSimpleName() + " bitsetBuffer"); rawDataBuffer = MmapUtils.mmapFile(raf, FileChannel.MapMode.READ_WRITE, chunkOffsetHeaderSize + bitsetSize, rawDataSize, file, this.getClass().getSimpleName() + " rawDataBuffer"); chunkOffsetsWriter = new FixedByteSingleValueMultiColWriter(chunkOffsetsBuffer, numDocs, NUM_COLS_IN_HEADER, new int[] { SIZE_OF_INT }); customBitSet = CustomBitSet.withByteBuffer(bitsetSize, bitsetBuffer); rawDataWriter = new FixedBitSingleValueWriter(rawDataBuffer, totalNumValues, columnSizeInBits); }
From source file:org.spf4j.perf.tsdb.TimeSeriesDatabase.java
public TimeSeriesDatabase(final String pathToDatabaseFile, final boolean isWrite, final byte... metaData) throws IOException { file = new RandomAccessFile(pathToDatabaseFile, isWrite ? "rw" : "r"); // uniques per process string for sync purposes. this.path = INTERNER.intern(new File(pathToDatabaseFile).getPath()); tables = new ConcurrentHashMap<>(); writeDataFragments = new HashMap<>(); // read or create header synchronized (path) { this.ch = file.getChannel(); FileLock lock;/*from w w w.j a v a 2 s . c om*/ if (isWrite) { lock = ch.lock(); } else { lock = ch.lock(0, Long.MAX_VALUE, true); } try { if (file.length() == 0) { this.header = new Header(VERSION, metaData); this.header.writeTo(file); this.toc = new TableOfContents(file.getFilePointer()); this.toc.writeTo(file); } else { this.header = new Header(file); this.toc = new TableOfContents(file); } } catch (IOException | RuntimeException e) { try { lock.release(); throw e; } catch (IOException ex) { ex.addSuppressed(e); throw ex; } } lock.release(); lock = ch.lock(0, Long.MAX_VALUE, true); try { readTableInfos(); } catch (IOException | RuntimeException e) { try { lock.release(); throw e; } catch (IOException ex) { ex.addSuppressed(e); throw ex; } } lock.release(); } }
From source file:eu.medsea.mimeutil.detector.OpendesktopMimeDetector.java
private void init(final String mimeCacheFile) { String cacheFile = mimeCacheFile; if (!new File(cacheFile).exists()) { cacheFile = internalMimeCacheFile; }//from w w w. j a v a 2 s . co m // Map the mime.cache file as a memory mapped file FileChannel rCh = null; try { RandomAccessFile raf = null; raf = new RandomAccessFile(cacheFile, "r"); rCh = (raf).getChannel(); content = rCh.map(FileChannel.MapMode.READ_ONLY, 0, rCh.size()); // Read all of the MIME type from the Alias list initMimeTypes(); if (log.isDebugEnabled()) { log.debug("Registering a FileWatcher for [" + cacheFile + "]"); } TimerTask task = new FileWatcher(new File(cacheFile)) { protected void onChange(File file) { initMimeTypes(); } }; timer = new Timer(); // repeat the check every 10 seconds timer.schedule(task, new Date(), 10000); } catch (Exception e) { throw new MimeException(e); } finally { if (rCh != null) { try { rCh.close(); } catch (Exception e) { log.error(e.getLocalizedMessage(), e); } } } }
From source file:de.ailis.wlandsuite.htds.Htds.java
/** * Returns the offsets of the tileset MSQ blocks in the specified file. * The offsets are determined by reading the raw data of each block and * looking at the position in the file.//from w w w.j a v a 2 s.com * * @param file * The file * @return The offsets * @throws IOException * When file operation fails. */ public static List<Integer> getMsqOffsets(final File file) throws IOException { List<Integer> offsets; RandomAccessFile access; FileInputStream stream; MsqHeader header; long offset; byte[] dummy; HuffmanInputStream huffmanStream; offsets = new ArrayList<Integer>(); access = new RandomAccessFile(file, "r"); try { stream = new FileInputStream(access.getFD()); offset = 0; while ((header = MsqHeader.read(stream)) != null) { offsets.add(Integer.valueOf((int) offset)); huffmanStream = new HuffmanInputStream(stream); dummy = new byte[header.getSize()]; huffmanStream.read(dummy); offset = access.getFilePointer(); } } finally { access.close(); } return offsets; }
From source file:de.ailis.wlandsuite.pics.Pics.java
/** * Returns the offsets of the base frame MSQ blocks in the specified file. * The offsets are determined by reading the raw data of each block and * looking at the current position in the file. * * @param file//from w w w . jav a 2 s .c o m * The file * @return The offsets * @throws IOException * When file operation fails. */ public static List<Integer> getMsqOffsets(final File file) throws IOException { List<Integer> offsets; RandomAccessFile access; FileInputStream stream; MsqHeader header; long offset; byte[] dummy; boolean baseFrame = true; HuffmanInputStream huffmanStream; offsets = new ArrayList<Integer>(); access = new RandomAccessFile(file, "r"); try { stream = new FileInputStream(access.getFD()); offset = 0; while ((header = MsqHeader.read(stream)) != null) { if (baseFrame) { offsets.add(Integer.valueOf((int) offset)); } baseFrame = !baseFrame; huffmanStream = new HuffmanInputStream(stream); dummy = new byte[header.getSize()]; huffmanStream.read(dummy); offset = access.getFilePointer(); } } finally { access.close(); } return offsets; }
From source file:com.mmj.app.common.util.IPTools.java
/** * ?/*w w w. j a v a 2 s . c om*/ */ private IPTools() { ipCache = new Hashtable<String, IPLocation>(); loc = new IPLocation(); buf = new byte[100]; b4 = new byte[4]; b3 = new byte[3]; try { ipFile = new RandomAccessFile(IP_FILE, "r"); } catch (FileNotFoundException e) { System.out.println(IP_FILE); System.out.println("IP??IP"); ipFile = null; } // ??? if (ipFile != null) { try { ipBegin = readLong4(0); ipEnd = readLong4(4); if (ipBegin == -1 || ipEnd == -1) { ipFile.close(); ipFile = null; } } catch (IOException e) { System.out.println("IP???IP"); ipFile = null; } } }
From source file:com.octo.captcha.engine.bufferedengine.buffer.DiskCaptchaBuffer.java
private final void initialiseFiles() throws Exception { dataFile = new File(name + ".data"); indexFile = new File(name + ".index"); readIndex();// www. j av a2s. c o m if (diskElements == null || !persistant) { if (log.isDebugEnabled()) { log.debug("Index file dirty or empty. Deleting data file " + getDataFileName()); } dataFile.delete(); diskElements = new HashedMap(); } // Open the data file as random access. The dataFile is created if necessary. randomAccessFile = new RandomAccessFile(dataFile, "rw"); isInitalized = true; log.info("Buffer initialized"); }