List of usage examples for java.io RandomAccessFile seek
public void seek(long pos) throws IOException
From source file:com.amazonaws.services.glacier.transfer.ArchiveTransferManager.java
/** * Download one chunk from Amazon Glacier. It will do the retry if any * errors are encountered while streaming the data from Amazon Glacier. */// ww w.j av a 2 s.c o m private void downloadOneChunk(String accountId, String vaultName, String jobId, RandomAccessFile output, long currentPosition, long endPosition, ProgressListener progressListener) { final long chunkSize = endPosition - currentPosition + 1; TreeHashInputStream input = null; int retries = 0; while (true) { try { GetJobOutputRequest req = new GetJobOutputRequest().withAccountId(accountId) .withVaultName(vaultName).withRange("bytes=" + currentPosition + "-" + endPosition) .withJobId(jobId).withGeneralProgressListener(progressListener); GetJobOutputResult jobOutputResult = glacier.getJobOutput(req); try { input = new TreeHashInputStream(new BufferedInputStream(jobOutputResult.getBody())); appendToFile(output, input); } catch (NoSuchAlgorithmException e) { throw failure(e, "Unable to compute hash for data integrity"); } finally { closeQuietly(input, log); } // Only do tree-hash check when the output checksum is returned from Glacier if (null != jobOutputResult.getChecksum()) { // Checksum does not match if (!input.getTreeHash().equalsIgnoreCase(jobOutputResult.getChecksum())) { // Discard the chunk of bytes received publishResponseBytesDiscarded(progressListener, chunkSize); if (log.isDebugEnabled()) log.debug("reverting " + chunkSize); throw new IOException( "Client side computed hash doesn't match server side hash; possible data corruption"); } } else { log.warn( "Cannot validate the downloaded output since no tree-hash checksum is returned from Glacier. " + "Make sure the InitiateJob and GetJobOutput requests use tree-hash-aligned ranges."); } // Successfully download return; // We will retry IO exception } catch (IOException ioe) { if (retries < DEFAULT_MAX_RETRIES) { retries++; if (log.isDebugEnabled()) { log.debug(retries + " retry downloadOneChunk accountId=" + accountId + ", vaultName=" + vaultName + ", jobId=" + jobId + ", currentPosition=" + currentPosition + " endPosition=" + endPosition); } try { output.seek(currentPosition); } catch (IOException e) { throw new AmazonClientException("Unable to download the archive: " + ioe.getMessage(), e); } } else { throw new AmazonClientException("Unable to download the archive: " + ioe.getMessage(), ioe); } } } }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
/** resubmit failed items * /*w w w.ja v a2s .co m*/ * @param loader */ public void requeueFailedItems(CrawlQueueLoader loader) throws IOException { synchronized (this) { _queueState = QueueState.QUEUEING; } RandomAccessFile fixedDataReader = new RandomAccessFile(_fixedDataFile, "rw"); RandomAccessFile stringDataReader = new RandomAccessFile(_variableDataFile, "rw"); try { OnDiskCrawlHistoryItem item = new OnDiskCrawlHistoryItem(); URLFP fingerprint = new URLFP(); while (fixedDataReader.getFilePointer() != fixedDataReader.length()) { item.deserialize(fixedDataReader); boolean queueItem = false; if (item.isFlagSet(OnDiskCrawlHistoryItem.FLAG_HAS_CRAWL_STATUS)) { if (item.isFlagSet(OnDiskCrawlHistoryItem.FLAG_HAS_REDIRECT_STATUS)) { queueItem = (item._redirectStatus != 0); if (!queueItem) { if (item._redirectHttpResult != 200 && item._redirectHttpResult != 404) { queueItem = true; } } } else { queueItem = (item._crawlStatus != 0); if (!queueItem) { if (item._httpResultCode != 200 && item._httpResultCode != 404) { queueItem = true; } } } if (queueItem) { // seek to string data stringDataReader.seek(item._stringsOffset); // and skip buffer length WritableUtils.readVInt(stringDataReader); // and read primary string String url = stringDataReader.readUTF(); // and spill fingerprint.setDomainHash(item._domainHash); fingerprint.setUrlHash(item._urlFingerprint); loader.queueURL(fingerprint, url); } } } } catch (IOException e) { LOG.error("Encountered Exception Queueing Items for List:" + _listId + " Exception:" + CCStringUtils.stringifyException(e)); _queueState = QueueState.QUEUED; } finally { fixedDataReader.close(); stringDataReader.close(); } }
From source file:okuyama.imdst.util.FileBaseDataMap.java
/** * ?????.<br>/*from w ww. ja va2 s.com*/ * ????null?.<br> * * @param * @return * @throws */ public List getAllOneFileInKeys() { List keys = null; byte[] datas = null; StringBuilder keysBuf = null; RandomAccessFile raf = null; try { for (int retryCnt = 0; retryCnt < 2; retryCnt++) { if (this.nowIterationFileIndex < this.dataFileList.length) { int assistIdx = 0; for (assistIdx = this.nowIterationFileIndex; assistIdx < this.dataFileList.length; assistIdx++) { if (this.dataFileList[assistIdx].length() > 1L) break; } if (assistIdx < this.dataFileList.length) this.nowIterationFileIndex = assistIdx; keys = new ArrayList(); long oneFileLength = new Long(this.dataFileList[this.nowIterationFileIndex].length()) .longValue(); long readSize = lineDataSize * 10; int readLoop = new Long(oneFileLength / readSize).intValue(); if ((oneFileLength % readSize) > 0) readLoop++; raf = new RandomAccessFile(this.dataFileList[this.nowIterationFileIndex], "rwd"); raf.seek(0); for (int readLoopIdx = 0; readLoopIdx < readLoop; readLoopIdx++) { datas = new byte[new Long(readSize).intValue()]; int readLen = -1; readLen = SystemUtil.diskAccessSync(raf, datas); if (readLen > 0) { int loop = readLen / lineDataSize; for (int loopIdx = 0; loopIdx < loop; loopIdx++) { int assist = (lineDataSize * loopIdx); keysBuf = new StringBuilder(ImdstDefine.stringBufferLarge_3Size); int idx = 0; while (true) { if (datas[assist + idx] != FileBaseDataMap.paddingSymbol) { keysBuf.append(new String(datas, assist + idx, 1)); } else { break; } idx++; } String keyStr = keysBuf.toString(); if (!keyStr.equals(FileBaseDataMap.sizeSaveKey)) { keys.add(keyStr); } keysBuf = null; } } } } this.nowIterationFileIndex++; if (keys != null && keys.size() == 0) continue; retryCnt = 2; } } catch (Exception e) { e.printStackTrace(); } finally { try { if (raf != null) raf.close(); raf = null; datas = null; } catch (Exception e2) { e2.printStackTrace(); } } if (keys != null && keys.size() == 0) keys = null; return keys; }
From source file:org.apache.catalina.servlets.DefaultServlet.java
/** * Handle a partial PUT. New content specified in request is appended to * existing content in oldRevisionContent (if present). This code does * not support simultaneous partial updates to the same resource. * * @param req Description of the Parameter * @param range Description of the Parameter * @param path Description of the Parameter * @return Description of the Return Value * @throws IOException Description of the Exception *//*www .j a v a2 s. c om*/ protected File executePartialPut(HttpServletRequest req, Range range, String path) throws IOException { // Append data specified in ranges to existing content for this // resource - create a temp. file on the local filesystem to // perform this operation File tempDir = (File) getServletContext().getAttribute("javax.servlet.context.tempdir"); // Convert all '/' characters to '.' in resourcePath String convertedResourcePath = path.replace('/', '.'); File contentFile = new File(tempDir, convertedResourcePath); if (contentFile.createNewFile()) { // Clean up contentFile when Tomcat is terminated contentFile.deleteOnExit(); } RandomAccessFile randAccessContentFile = new RandomAccessFile(contentFile, "rw"); Resource oldResource = null; try { Object obj = getResources().lookup(path); if (obj instanceof Resource) { oldResource = (Resource) obj; } } catch (NamingException e) { } // Copy data in oldRevisionContent to contentFile if (oldResource != null) { BufferedInputStream bufOldRevStream = new BufferedInputStream(oldResource.streamContent(), BUFFER_SIZE); int numBytesRead; byte[] copyBuffer = new byte[BUFFER_SIZE]; while ((numBytesRead = bufOldRevStream.read(copyBuffer)) != -1) { randAccessContentFile.write(copyBuffer, 0, numBytesRead); } bufOldRevStream.close(); } randAccessContentFile.setLength(range.length); // Append data in request input stream to contentFile randAccessContentFile.seek(range.start); int numBytesRead; byte[] transferBuffer = new byte[BUFFER_SIZE]; BufferedInputStream requestBufInStream = new BufferedInputStream(req.getInputStream(), BUFFER_SIZE); while ((numBytesRead = requestBufInStream.read(transferBuffer)) != -1) { randAccessContentFile.write(transferBuffer, 0, numBytesRead); } randAccessContentFile.close(); requestBufInStream.close(); return contentFile; }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
/** queue uncrawled urls via the CrawlQueueLoader * /*from w w w.j a va 2s.c o m*/ * @param loader */ public void queueUnCrawledItems(CrawlQueueLoader loader) throws IOException { _queueState = QueueState.QUEUEING; int metadataVersion = getMetadata().getVersion(); synchronized (_metadata) { // reset metadata PERIOD int urlCount = _metadata.getUrlCount(); _metadata.clear(); _metadata.setUrlCount(urlCount); } RandomAccessFile fixedDataReader = new RandomAccessFile(_fixedDataFile, "rw"); RandomAccessFile stringDataReader = new RandomAccessFile(_variableDataFile, "rw"); try { OnDiskCrawlHistoryItem item = new OnDiskCrawlHistoryItem(); URLFP fingerprint = new URLFP(); while (fixedDataReader.getFilePointer() != fixedDataReader.length()) { long position = fixedDataReader.getFilePointer(); //LOG.info("*** TRYING READ LOCK FOR OFFSET:" + position); while (true) { // get read lock on position ... try { FileLock lock = fixedDataReader.getChannel().tryLock(position, OnDiskCrawlHistoryItem.ON_DISK_SIZE, false); try { //LOG.info("*** GOT READ LOCK FOR OFFSET:" + position); item.deserialize(fixedDataReader); break; } finally { lock.release(); //LOG.info("*** RELEASED READ LOCK FOR OFFSET:" + position); } } catch (OverlappingFileLockException e) { LOG.error("*** LOCK CONTENTION AT:" + position + " Exception:" + CCStringUtils.stringifyException(e)); } } // seek to string data stringDataReader.seek(item._stringsOffset); // and skip buffer length WritableUtils.readVInt(stringDataReader); // and read primary string String url = stringDataReader.readUTF(); // setup fingerprint fingerprint.setDomainHash(item._domainHash); fingerprint.setUrlHash(item._urlFingerprint); // first, if it has not been crawled ever, crawl it not matter what ... boolean crawlItem = !item.isFlagSet(OnDiskCrawlHistoryItem.FLAG_HAS_CRAWL_STATUS); // if it has been crawled ... check list metadata version ... if (!crawlItem && metadataVersion >= 1) { // ok this is newer version of the list ... // check refresh time if specified ... int refreshIntervalInSeconds = DEFAULT_REFRESH_INTERVAL_IN_SECS; if (getMetadata().getRefreshInterval() != 0) { refreshIntervalInSeconds = getMetadata().getRefreshInterval(); } if (item._updateTimestamp > 0) { long timeSinceLastCrawl = item._updateTimestamp; if (System.currentTimeMillis() - timeSinceLastCrawl >= (refreshIntervalInSeconds * 1000)) { crawlItem = true; } } } if (crawlItem) { loader.queueURL(fingerprint, url); synchronized (_metadata) { // update queued item count _metadata.setQueuedItemCount(_metadata.getQueuedItemCount() + 1); } } else { updateMetadata(item, _metadata, 0); } // ok update subdomain stats updateSubDomainMetadataForItemDuringLoad(item, url, fingerprint, crawlItem); } flushCachedSubDomainMetadata(); loader.flush(); _queueState = QueueState.QUEUED; } catch (IOException e) { LOG.error("Encountered Exception Queueing Items for List:" + _listId + " Exception:" + CCStringUtils.stringifyException(e)); _queueState = QueueState.ERROR; } finally { fixedDataReader.close(); stringDataReader.close(); } }
From source file:org.opencms.webdav.CmsWebdavServlet.java
/** * Handle a partial PUT.<p>// w w w . j a v a 2s.c o m * * New content specified in request is appended to * existing content in oldRevisionContent (if present). This code does * not support simultaneous partial updates to the same resource.<p> * * @param req the servlet request we are processing * @param range the range of the content in the file * @param path the path where to find the resource * * @return the new content file with the appended data * * @throws IOException if an input/output error occurs */ protected File executePartialPut(HttpServletRequest req, CmsWebdavRange range, String path) throws IOException { // Append data specified in ranges to existing content for this // resource - create a temp. file on the local filesystem to // perform this operation File tempDir = (File) getServletContext().getAttribute(ATT_SERVLET_TEMPDIR); // Convert all '/' characters to '.' in resourcePath String convertedResourcePath = path.replace('/', '.'); File contentFile = new File(tempDir, convertedResourcePath); contentFile.createNewFile(); RandomAccessFile randAccessContentFile = new RandomAccessFile(contentFile, "rw"); InputStream oldResourceStream = null; try { I_CmsRepositoryItem item = m_session.getItem(path); oldResourceStream = new ByteArrayInputStream(item.getContent()); } catch (CmsException e) { if (LOG.isErrorEnabled()) { LOG.error(Messages.get().getBundle().key(Messages.LOG_ITEM_NOT_FOUND_1, path), e); } } // Copy data in oldRevisionContent to contentFile if (oldResourceStream != null) { int numBytesRead; byte[] copyBuffer = new byte[BUFFER_SIZE]; while ((numBytesRead = oldResourceStream.read(copyBuffer)) != -1) { randAccessContentFile.write(copyBuffer, 0, numBytesRead); } oldResourceStream.close(); } randAccessContentFile.setLength(range.getLength()); // Append data in request input stream to contentFile randAccessContentFile.seek(range.getStart()); int numBytesRead; byte[] transferBuffer = new byte[BUFFER_SIZE]; BufferedInputStream requestBufInStream = new BufferedInputStream(req.getInputStream(), BUFFER_SIZE); while ((numBytesRead = requestBufInStream.read(transferBuffer)) != -1) { randAccessContentFile.write(transferBuffer, 0, numBytesRead); } randAccessContentFile.close(); requestBufInStream.close(); return contentFile; }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
void resetSubDomainCounts() throws IOException { LOG.info("*** LIST:" + getListId() + " Reset SubDomain Queued Counts."); if (_subDomainMetadataFile.exists()) { LOG.info("*** LIST:" + getListId() + " FILE EXISTS ."); RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw"); DataInputBuffer inputBuffer = new DataInputBuffer(); DataOutputBuffer outputBuffer = new DataOutputBuffer(CrawlListMetadata.Constants.FixedDataSize); try {/*from w ww . j a v a2s.c om*/ // skip version file.read(); // read item count int itemCount = file.readInt(); LOG.info("*** LIST:" + getListId() + " SUBDOMAIN ITEM COUNT:" + itemCount); CrawlListMetadata newMetadata = new CrawlListMetadata(); for (int i = 0; i < itemCount; ++i) { long orignalPos = file.getFilePointer(); file.readFully(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize); inputBuffer.reset(outputBuffer.getData(), CrawlListMetadata.Constants.FixedDataSize); try { newMetadata.deserialize(inputBuffer, new BinaryProtocol()); } catch (Exception e) { LOG.error("-----Failed to Deserialize Metadata at Index:" + i + " Exception:" + CCStringUtils.stringifyException(e)); } // ok reset everything except hashes and first/last url pointers int urlCount = newMetadata.getUrlCount(); long firstRecordOffset = newMetadata.getFirstRecordOffset(); long lastRecordOffset = newMetadata.getLastRecordOffset(); String domainName = newMetadata.getDomainName(); long domainHash = newMetadata.getDomainHash(); // reset newMetadata.clear(); // restore newMetadata.setUrlCount(urlCount); newMetadata.setFirstRecordOffset(firstRecordOffset); newMetadata.setLastRecordOffset(lastRecordOffset); newMetadata.setDomainName(domainName); newMetadata.setDomainHash(domainHash); // serialize it ... outputBuffer.reset(); newMetadata.serialize(outputBuffer, new BinaryProtocol()); // write it back to disk file.seek(orignalPos); // and rewrite it ... file.write(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize); } } finally { file.close(); } LOG.info("*** LIST:" + getListId() + " DONE RESETTIGN SUBDOMAIN METADATA QUEUE COUNTS"); } }
From source file:org.gss_project.gss.server.rest.Webdav.java
/** * Handle a partial PUT. New content specified in request is appended to * existing content in oldRevisionContent (if present). This code does not * support simultaneous partial updates to the same resource. * * @param req/*from ww w . java2s.c om*/ * @param range * @param path * @return * @throws IOException * @throws RpcException * @throws InsufficientPermissionsException * @throws ObjectNotFoundException */ protected File executePartialPut(HttpServletRequest req, Range range, String path) throws IOException, RpcException, ObjectNotFoundException, InsufficientPermissionsException { // Append data specified in ranges to existing content for this // resource - create a temporary file on the local file system to // perform this operation. File tempDir = (File) getServletContext().getAttribute("javax.servlet.context.tempdir"); // Convert all '/' characters to '.' in resourcePath String convertedResourcePath = path.replace('/', '.'); File contentFile = new File(tempDir, convertedResourcePath); if (contentFile.createNewFile()) // Clean up contentFile when Tomcat is terminated. contentFile.deleteOnExit(); RandomAccessFile randAccessContentFile = new RandomAccessFile(contentFile, "rw"); User user = getUser(req); User owner = getOwner(req); FileHeader oldResource = null; try { Object obj = getService().getResourceAtPath(owner.getId(), path, true); if (obj instanceof FileHeader) oldResource = (FileHeader) obj; } catch (ObjectNotFoundException e) { // Do nothing. } // Copy data in oldRevisionContent to contentFile if (oldResource != null) { InputStream contents = getService().getFileContents(user.getId(), oldResource.getId()); BufferedInputStream bufOldRevStream = new BufferedInputStream(contents, BUFFER_SIZE); int numBytesRead; byte[] copyBuffer = new byte[BUFFER_SIZE]; while ((numBytesRead = bufOldRevStream.read(copyBuffer)) != -1) randAccessContentFile.write(copyBuffer, 0, numBytesRead); bufOldRevStream.close(); } randAccessContentFile.setLength(range.length); // Append data in request input stream to contentFile randAccessContentFile.seek(range.start); int numBytesRead; byte[] transferBuffer = new byte[BUFFER_SIZE]; BufferedInputStream requestBufInStream = new BufferedInputStream(req.getInputStream(), BUFFER_SIZE); while ((numBytesRead = requestBufInStream.read(transferBuffer)) != -1) randAccessContentFile.write(transferBuffer, 0, numBytesRead); randAccessContentFile.close(); requestBufInStream.close(); return contentFile; }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private byte[] getJournaledMessage(String logfile, long offset, int length, RandomAccessFile raf) throws IOException { assert (logfile != null); if (!new File(logfile).exists()) return null; boolean locallyOpened = false; if (raf == null) { locallyOpened = true;// w ww. jav a 2 s . c om raf = new RandomAccessFile(logfile, "r"); } boolean error = false; String msg = null; byte[] buf = null; try { raf.seek(offset); assert (raf.length() > offset) : this + " " + raf.length() + " <= " + offset + " while reading logfile " + logfile; int readLength = raf.readInt(); try { assert (readLength == length) : this + " : " + readLength + " != " + length; } catch (Error e) { error = true; log.severe(this + ": " + e); e.printStackTrace(); } int bufLength = length; buf = new byte[bufLength]; raf.readFully(buf); if (JOURNAL_COMPRESSION) buf = inflate(buf); msg = new String(buf, CHARSET); } catch (IOException | Error e) { log.log(Level.INFO, "{0} incurred IOException while retrieving journaled message {1}:{2}", new Object[] { this, logfile, offset + ":" + length }); e.printStackTrace(); if (locallyOpened) raf.close(); throw e; } log.log(error ? Level.INFO : Level.FINEST, "{0} returning journaled message from {1}:{2} = [{3}]", new Object[] { this, logfile, offset + ":" + length, msg }); return buf;// msg; }
From source file:okuyama.imdst.util.FileBaseDataMap.java
/** * put Method.<br>// ww w . j ava 2 s . c o m * * @param key * @param value * @param hashCode This is a key value hash code */ public void put(String key, String value, int hashCode) { /* long start1 = 0L; long start2 = 0L; long start3 = 0L; long start4 = 0L; long end1 = 0L; long end2 = 0L; long end3 = 0L; long end4 = 0L; */ try { //start1 = System.nanoTime(); //start2 = System.nanoTime(); File file = dataFileList[hashCode % numberOfDataFiles]; StringBuilder buf = new StringBuilder(this.keyDataLength); boolean callMapSizeCalc = true; if (key != null && key.equals(FileBaseDataMap.sizeSaveKey)) callMapSizeCalc = false; //TODO:??? buf.append(this.fillCharacter(key, keyDataLength)); //buf.append(this.fillCharacter(value, oneDataLength)); CacheContainer accessor = (CacheContainer) innerCache.get(file.getAbsolutePath()); RandomAccessFile raf = null; BufferedWriter wr = null; if (accessor == null || accessor.isClosed == true) { raf = new RandomAccessFile(file, "rwd"); wr = new BufferedWriter(new FileWriter(file, true)); accessor = new CacheContainer(); accessor.raf = raf; accessor.wr = wr; accessor.file = file; innerCache.put(file.getAbsolutePath(), accessor); } else { raf = accessor.raf; wr = accessor.wr; } //end2 = System.nanoTime(); //start3 = System.nanoTime(); // KeyData Write File for (int tryIdx = 0; tryIdx < 2; tryIdx++) { try { // Key?? long[] dataLineNoRet = this.getLinePoint(key, raf); //end3 = System.nanoTime(); //start4 = System.nanoTime(); if (dataLineNoRet[0] == -1) { wr.write(buf.toString()); SystemUtil.diskAccessSync(wr); wr.write(value); SystemUtil.diskAccessSync(wr); // ???????????? int valueSize = value.length(); byte[] fillByte = new byte[1]; fillByte[0] = new Integer(FileBaseDataMap.paddingSymbol).byteValue(); int paddingSize = (oneDataLength - valueSize); int writeSetCount = paddingSize / 512; int singleWriteCount = paddingSize % 512; for (int i = 0; i < writeSetCount; i++) { wr.write(FileBaseDataMap.paddingSymbolSetString); if ((i % 14) == 0) SystemUtil.diskAccessSync(wr); } SystemUtil.diskAccessSync(wr); byte[] fillBytes = new byte[singleWriteCount]; for (int i = 0; i < singleWriteCount; i++) { fillBytes[i] = fillByte[0]; } wr.write(new String(fillBytes)); SystemUtil.diskAccessSync(wr); // The size of an increment if (callMapSizeCalc) this.getAndIncrement(); } else { // ?????1 boolean increMentFlg = false; if (dataLineNoRet[1] == -1) increMentFlg = true; //if (this.get(key, hashCode) == null) increMentFlg = true; raf.seek(dataLineNoRet[0] * (lineDataSize)); raf.write(buf.toString().getBytes(), 0, keyDataLength); raf.write(value.getBytes()); // ???????????? int valueSize = value.length(); byte[] fillByte = new byte[1]; fillByte[0] = new Integer(FileBaseDataMap.paddingSymbol).byteValue(); int paddingSize = (oneDataLength - valueSize); int writeSetCount = paddingSize / (4096); int singleWriteCount = paddingSize % (4096); for (int i = 0; i < writeSetCount; i++) { raf.write(FileBaseDataMap.fillStream.toByteArray()); } byte[] remainderPaddingBytes = new byte[singleWriteCount]; for (int i = 0; i < singleWriteCount; i++) { remainderPaddingBytes[i] = fillByte[0]; } if (remainderPaddingBytes.length > 0) raf.write(remainderPaddingBytes); if (callMapSizeCalc) { if (increMentFlg) this.getAndIncrement(); } } //end4 = System.nanoTime(); break; } catch (IOException ie) { // IOException???1????? if (tryIdx == 1) throw ie; try { if (raf != null) raf.close(); if (wr != null) wr.close(); raf = new RandomAccessFile(file, "rwd"); wr = new BufferedWriter(new FileWriter(file, true)); accessor = new CacheContainer(); accessor.raf = raf; accessor.wr = wr; accessor.file = file; innerCache.put(file.getAbsolutePath(), accessor); } catch (Exception e) { throw e; } } } } catch (Exception e2) { e2.printStackTrace(); } //end1 = System.nanoTime(); //if (ImdstDefine.fileBaseMapTimeDebug) { // System.out.println("1="+(end1 - start1) + " 2="+(end2 - start2) + " 3="+(end3 - start3) + " 4="+(end4 - start4)); //} }