List of usage examples for java.io RandomAccessFile write
public void write(byte b[], int off, int len) throws IOException
From source file:com.mediatek.systemupdate.HttpManager.java
int writeFile(HttpResponse response, long currSize) { Xlog.i(TAG, "writeFile"); if (mDownloadInfo.getDLSessionStatus() != DownloadInfo.STATE_QUERYNEWVERSION) { //mNotification.clearNotification(NotifyManager.NOTIFY_DOWNLOADING); mNotification.showDownloadingNotificaton(mDownloadInfo.getVerNum(), (int) (((double) Util.getFileSize(Util.getPackageFileName(mContext)) / (double) mDownloadInfo.getUpdateImageSize()) * 100), true);//from w w w .j a v a2 s.c om } Util.cancelAlarm(mContext, Util.Action.ACTION_AUTO_DL_TIME_OUT); mDownloadInfo.setOtaAutoDlStatus(false); mDownloadInfo.setIfPauseWithinTime(false); try { // response.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, // 10000); InputStream in = response.getEntity().getContent(); File ifolder = new File(Util.getPackagePathName(mContext)); if (!ifolder.exists()) { ifolder.mkdirs(); } RandomAccessFile out = null; String pkgFile = Util.getPackageFileName(mContext); if (pkgFile == null) { Xlog.e(TAG, "pkgFile is null"); mErrorCode = HTTP_DETECTED_SDCARD_CRASH_OR_UNMOUNT; return mErrorCode; } try { out = new RandomAccessFile(pkgFile, "rws"); out.seek(currSize); } catch (IOException e) { e.printStackTrace(); onShutdownConn(); mErrorCode = HTTP_DETECTED_SDCARD_CRASH_OR_UNMOUNT; return mErrorCode; } byte[] buff = new byte[4096]; int rc = 0; int i = 0; int j = 0; boolean rightnow = false; boolean finish = false; File fPkg = new File(pkgFile); if (fPkg == null) { out.close(); mErrorCode = HTTP_DETECTED_SDCARD_CRASH_OR_UNMOUNT; return mErrorCode; } while ((rc = in.read(buff, 0, 4096)) > 0) { // to-do: handle Intent.ACTION_MEDIA_EJECT /* * synchronized (this) { if (mEjectFlag) { try { out.close(); } * catch (IOException e) { e.printStackTrace(); } * onShutdownConn(); return mErrorCode = * HTTP_DETECTED_SDCARD_CRASH_OR_UNMOUNT; } } */ try { if (fPkg.exists()) { out.write(buff, 0, rc); } else { Xlog.e(TAG, "file not exist during downloading "); setPauseState(); out.close(); onShutdownConn(); mErrorCode = HTTP_FILE_NOT_EXIST; sendErrorMessage(); return mErrorCode; } } catch (IOException e) { e.printStackTrace(); out.close(); onShutdownConn(); mErrorCode = HTTP_DETECTED_SDCARD_CRASH_OR_UNMOUNT; return mErrorCode; } i++; int status = mDownloadInfo.getDLSessionStatus(); if (status == DownloadInfo.STATE_PAUSEDOWNLOAD || status == DownloadInfo.STATE_QUERYNEWVERSION) { Xlog.i(TAG, "writeFile, DownloadInfo = " + status); mCookies = null; finish = false; out.close(); onShutdownConn(); return 0; } if (mHandler == null) { if (rightnow) { i = 200; rightnow = false; } if (i == 200) { onDownloadProcessUpdate(); i = 0; } } else { if (!rightnow) { i = 18; rightnow = true; } if (i == 20) { i = 0; onDownloadProcessUpdate(); } } j++; if (j == 20) { onTransferRatio(); j = 0; } finish = true; } Xlog.i(TAG, "writeFile, finish, rc = " + rc + "bytes" + ". finish = " + finish); if (finish) { onTransferRatio(); onDownloadProcessUpdate(); } long curSize = Util.getFileSize(Util.getPackageFileName(mContext)); Xlog.i(TAG, "curSize = " + curSize + " mNewVersionInfo.mSize = " + mDownloadInfo.getUpdateImageSize()); out.close(); if (curSize >= mDownloadInfo.getUpdateImageSize()) { onShutdownConn(); return 0; } } catch (SocketTimeoutException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } showNoNetworkToast(); if (mDownloadInfo.getDLSessionStatus() == DownloadInfo.STATE_DOWNLOADING) { setPauseState(); Xlog.e(TAG, "writeFile, exception to set pause state"); mDownloadInfo.setOtaAutoDlStatus(true); mDownloadInfo.setIfPauseWithinTime(true); Util.setAlarm(mContext, AlarmManager.RTC, Calendar.getInstance().getTimeInMillis() + AUTO_DL_TIME, Util.Action.ACTION_AUTO_DL_TIME_OUT); } onShutdownConn(); mErrorCode = HTTP_RESPONSE_NETWORK_ERROR; sendErrorMessage(); return mErrorCode; }
From source file:org.commoncrawl.service.crawler.CrawlList.java
private static void appendTargetsToLogFile(File logFileName, IntrusiveList<CrawlTarget> list) throws IOException { LogFileHeader header = new LogFileHeader(); boolean preExistingHeader = logFileName.exists(); RandomAccessFile file = new RandomAccessFile(logFileName, "rw"); try {// w w w.j a va2s . c om long headerOffset = 0; if (preExistingHeader) { headerOffset = readLogFileHeader(file, header); if (header._writePos == 0) { file.seek(headerOffset); } else { // seelk to appropriate write position file.seek(header._writePos); } } else { headerOffset = writeLogFileHeader(file, header); } CustomByteArrayOutputStream bufferOutputStream = new CustomByteArrayOutputStream(1 << 17); DataOutputStream dataOutputStream = new DataOutputStream(bufferOutputStream); CRC32 crc = new CRC32(); for (CrawlTarget target : list) { PersistentCrawlTarget persistentTarget = target.createPersistentTarget(); bufferOutputStream.reset(); // write to intermediate stream ... persistentTarget.write(dataOutputStream); // and crc the data ... crc.reset(); crc.update(bufferOutputStream.getBuffer(), 0, bufferOutputStream.size()); // write out length first file.writeInt(bufferOutputStream.size()); //crc next long computedValue = crc.getValue(); //TODO: waste of space - write 32 bit values as long because having problems with java sign promotion rules during read... file.writeLong(computedValue); // and then the data file.write(bufferOutputStream.getBuffer(), 0, bufferOutputStream.size()); } // now update header ... header._itemCount += list.size(); header._writePos = file.getFilePointer(); // now write out header anew ... writeLogFileHeader(file, header); } finally { if (file != null) { file.close(); } } }
From source file:org.commoncrawl.service.listcrawler.CacheManager.java
private final void flushLocalLog(final long bytesToRemove, final int itemsToRemove, final List<FingerprintAndOffsetTuple> flushedTupleList, final ArrayList<IndexDataFileTriple> tempFileTriples) { LOG.info("Acquiring Log Access Semaphores"); // first boost this thread's priority ... int originalThreadPriority = Thread.currentThread().getPriority(); Thread.currentThread().setPriority(Thread.MAX_PRIORITY); // next acquire all permits to the local access log ... block until we get there ... getLocalLogAccessSemaphore().acquireUninterruptibly(LOG_ACCESS_SEMAPHORE_COUNT); // now that we have all the semaphores we need, reduce the thread's priority to normal Thread.currentThread().setPriority(originalThreadPriority); LOG.info("Acquired ALL Log Access Semaphores"); long timeStart = System.currentTimeMillis(); // now we have exclusive access to the local transaction log ... File activeLogFilePath = getActiveLogFilePath(); File checkpointLogFilePath = getCheckpointLogFilePath(); try {/*from w w w. j a v a 2s. co m*/ // delete checkpoint file if it existed ... checkpointLogFilePath.delete(); // now rename activelog to checkpoint path activeLogFilePath.renameTo(checkpointLogFilePath); long logFileConsolidationStartTime = System.currentTimeMillis(); // now trap for exceptions in case something fails try { // fix up the header ... _header._fileSize -= bytesToRemove; _header._itemCount -= itemsToRemove; // open a old file and new file RandomAccessFile newFile = new RandomAccessFile(activeLogFilePath, "rw"); RandomAccessFile oldFile = new RandomAccessFile(checkpointLogFilePath, "r"); LOG.info("Opened new and old files. New Header FileSize is:" + _header._fileSize + " ItemCount:" + _header._itemCount); try { // write out header ... long bytesRemainingInLogFile = _header._fileSize; LOG.info("Writing Header to New File. Bytes Remaining for Data are:" + bytesRemainingInLogFile); // write header to new file ... _header.writeHeader(newFile); // decrement bytes available ... bytesRemainingInLogFile -= LocalLogFileHeader.SIZE; if (bytesRemainingInLogFile != 0) { byte transferBuffer[] = new byte[(1 << 20) * 16]; LOG.info("Seeking old file past flushed data (pos:" + LocalLogFileHeader.SIZE + bytesToRemove + ")"); // seek past old data ... oldFile.seek(LocalLogFileHeader.SIZE + bytesToRemove); // and copy across remaining data while (bytesRemainingInLogFile != 0) { int bytesToReadWriteThisIteration = Math.min((int) bytesRemainingInLogFile, transferBuffer.length); oldFile.read(transferBuffer, 0, bytesToReadWriteThisIteration); newFile.write(transferBuffer, 0, bytesToReadWriteThisIteration); LOG.info("Copied " + bytesToReadWriteThisIteration + " from Old to New"); bytesRemainingInLogFile -= bytesToReadWriteThisIteration; } } } finally { if (newFile != null) { newFile.close(); } if (oldFile != null) { oldFile.close(); } } // if we reached here then checkpoint was successfull ... LOG.info("Checkpoint - Log Consolidation Successfull! TOOK:" + (System.currentTimeMillis() - logFileConsolidationStartTime)); LOG.info("Loading Index Files"); for (IndexDataFileTriple triple : tempFileTriples) { LOG.info("Loading Index File:" + triple._localIndexFilePath); final HDFSFileIndex fileIndex = new HDFSFileIndex(_remoteFileSystem, triple._localIndexFilePath, triple._dataFilePath); LOG.info("Loaded Index File"); // update hdfs index list ... synchronized (CacheManager.this) { LOG.info("Adding HDFS Index to list"); _hdfsIndexList.addElement(fileIndex); } } // create a semaphore to wait on final Semaphore semaphore = new Semaphore(0); LOG.info("Scheduling Async Event"); // now we need to schedule an async call to main thread to update data structures safely ... _eventLoop.setTimer(new Timer(0, false, new Timer.Callback() { @Override public void timerFired(Timer timer) { LOG.info("Cleaning Map"); synchronized (CacheManager.this) { // walk tuples for (FingerprintAndOffsetTuple tuple : flushedTupleList) { //TODO: HACK! // remove from collection ... _fingerprintToLocalLogPos.removeAll(tuple._fingerprint); } } LOG.info("Increment Offset Info"); // finally increment locallog offset by bytes removed ... _localLogStartOffset += bytesToRemove; LOG.info("Releasing Wait Semaphore"); //release wait sempahore semaphore.release(); } })); LOG.info("Waiting for Async Event to Complete"); //wait for async operation to complete ... semaphore.acquireUninterruptibly(); LOG.info("Async Event to Completed"); } catch (IOException e) { LOG.error("Checkpoint Failed with Exception:" + CCStringUtils.stringifyException(e)); // delete new file ... activeLogFilePath.delete(); // and rename checkpoint file to active file ... checkpointLogFilePath.renameTo(activeLogFilePath); } } finally { LOG.info("Releasing ALL Log Access Semaphores. HELD FOR:" + (System.currentTimeMillis() - timeStart)); getLocalLogAccessSemaphore().release(LOG_ACCESS_SEMAPHORE_COUNT); } }
From source file:org.apache.catalina.servlets.DefaultServlet.java
/** * Handle a partial PUT. New content specified in request is appended to * existing content in oldRevisionContent (if present). This code does * not support simultaneous partial updates to the same resource. * * @param req Description of the Parameter * @param range Description of the Parameter * @param path Description of the Parameter * @return Description of the Return Value * @throws IOException Description of the Exception *//*from w ww .j a va 2s .c o m*/ protected File executePartialPut(HttpServletRequest req, Range range, String path) throws IOException { // Append data specified in ranges to existing content for this // resource - create a temp. file on the local filesystem to // perform this operation File tempDir = (File) getServletContext().getAttribute("javax.servlet.context.tempdir"); // Convert all '/' characters to '.' in resourcePath String convertedResourcePath = path.replace('/', '.'); File contentFile = new File(tempDir, convertedResourcePath); if (contentFile.createNewFile()) { // Clean up contentFile when Tomcat is terminated contentFile.deleteOnExit(); } RandomAccessFile randAccessContentFile = new RandomAccessFile(contentFile, "rw"); Resource oldResource = null; try { Object obj = getResources().lookup(path); if (obj instanceof Resource) { oldResource = (Resource) obj; } } catch (NamingException e) { } // Copy data in oldRevisionContent to contentFile if (oldResource != null) { BufferedInputStream bufOldRevStream = new BufferedInputStream(oldResource.streamContent(), BUFFER_SIZE); int numBytesRead; byte[] copyBuffer = new byte[BUFFER_SIZE]; while ((numBytesRead = bufOldRevStream.read(copyBuffer)) != -1) { randAccessContentFile.write(copyBuffer, 0, numBytesRead); } bufOldRevStream.close(); } randAccessContentFile.setLength(range.length); // Append data in request input stream to contentFile randAccessContentFile.seek(range.start); int numBytesRead; byte[] transferBuffer = new byte[BUFFER_SIZE]; BufferedInputStream requestBufInStream = new BufferedInputStream(req.getInputStream(), BUFFER_SIZE); while ((numBytesRead = requestBufInStream.read(transferBuffer)) != -1) { randAccessContentFile.write(transferBuffer, 0, numBytesRead); } randAccessContentFile.close(); requestBufInStream.close(); return contentFile; }
From source file:cc.arduino.utils.network.FileDownloader.java
private void downloadFile(boolean noResume) throws InterruptedException { RandomAccessFile file = null; try {//from w w w . j a v a2s. c om // Open file and seek to the end of it file = new RandomAccessFile(outputFile, "rw"); initialSize = file.length(); if (noResume && initialSize > 0) { // delete file and restart downloading Files.delete(outputFile.toPath()); initialSize = 0; } file.seek(initialSize); setStatus(Status.CONNECTING); Proxy proxy = new CustomProxySelector(PreferencesData.getMap()).getProxyFor(downloadUrl.toURI()); if ("true".equals(System.getProperty("DEBUG"))) { System.err.println("Using proxy " + proxy); } HttpURLConnection connection = (HttpURLConnection) downloadUrl.openConnection(proxy); connection.setRequestProperty("User-agent", userAgent); if (downloadUrl.getUserInfo() != null) { String auth = "Basic " + new String(new Base64().encode(downloadUrl.getUserInfo().getBytes())); connection.setRequestProperty("Authorization", auth); } connection.setRequestProperty("Range", "bytes=" + initialSize + "-"); connection.setConnectTimeout(5000); setDownloaded(0); // Connect connection.connect(); int resp = connection.getResponseCode(); if (resp == HttpURLConnection.HTTP_MOVED_PERM || resp == HttpURLConnection.HTTP_MOVED_TEMP) { URL newUrl = new URL(connection.getHeaderField("Location")); proxy = new CustomProxySelector(PreferencesData.getMap()).getProxyFor(newUrl.toURI()); // open the new connnection again connection = (HttpURLConnection) newUrl.openConnection(proxy); connection.setRequestProperty("User-agent", userAgent); if (downloadUrl.getUserInfo() != null) { String auth = "Basic " + new String(new Base64().encode(downloadUrl.getUserInfo().getBytes())); connection.setRequestProperty("Authorization", auth); } connection.setRequestProperty("Range", "bytes=" + initialSize + "-"); connection.setConnectTimeout(5000); connection.connect(); resp = connection.getResponseCode(); } if (resp < 200 || resp >= 300) { throw new IOException("Received invalid http status code from server: " + resp); } // Check for valid content length. long len = connection.getContentLength(); if (len >= 0) { setDownloadSize(len); } setStatus(Status.DOWNLOADING); synchronized (this) { stream = connection.getInputStream(); } byte buffer[] = new byte[10240]; while (status == Status.DOWNLOADING) { int read = stream.read(buffer); if (read == -1) break; file.write(buffer, 0, read); setDownloaded(getDownloaded() + read); if (Thread.interrupted()) { file.close(); throw new InterruptedException(); } } if (getDownloadSize() != null) { if (getDownloaded() < getDownloadSize()) throw new Exception("Incomplete download"); } setStatus(Status.COMPLETE); } catch (InterruptedException e) { setStatus(Status.CANCELLED); // lets InterruptedException go up to the caller throw e; } catch (SocketTimeoutException e) { setStatus(Status.CONNECTION_TIMEOUT_ERROR); setError(e); } catch (Exception e) { setStatus(Status.ERROR); setError(e); } finally { IOUtils.closeQuietly(file); synchronized (this) { IOUtils.closeQuietly(stream); } } }
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
private static void mergeLogfiles(File prev, File cur, PaxosPacketizer packetizer, MessageLogDiskMap msgLog, FileIDMap fidMap) throws IOException, JSONException { File tmpFile = new File(cur.toString() + TMP_FILE_SUFFIX); RandomAccessFile rafTmp = null, rafPrev = null, rafCur = null; long t = System.currentTimeMillis(); try {//from w w w . ja va 2 s.c o m rafTmp = new RandomAccessFile(tmpFile.toString(), "rw"); rafPrev = new RandomAccessFile(prev.toString(), "r"); rafCur = new RandomAccessFile(cur.toString(), "r"); byte[] buf = new byte[1024]; int numRead = 0; // copy prev file to tmp file while ((numRead = rafPrev.read(buf)) > 0) rafTmp.write(buf, 0, numRead); // copy cur file to tmp file while ((numRead = rafCur.read(buf)) > 0) rafTmp.write(buf, 0, numRead); } finally { if (rafTmp != null) rafTmp.close(); if (rafPrev != null) rafPrev.close(); if (rafCur != null) rafCur.close(); } // copy tmp file index into memory HashMap<String, ArrayList<LogIndexEntry>> logIndexEntries = new HashMap<String, ArrayList<LogIndexEntry>>(); try { rafTmp = new RandomAccessFile(tmpFile.toString(), "r"); while (rafTmp.getFilePointer() < rafTmp.length()) { long offset = rafTmp.getFilePointer(); int length = rafTmp.readInt(); byte[] msg = new byte[length]; rafTmp.readFully(msg); PaxosPacket pp = packetizer != null ? packetizer.stringToPaxosPacket(msg // new String(msg, CHARSET) ) : PaxosPacket.getPaxosPacket(new String(msg, CHARSET)); assert (pp != null) : " read logged message " + new String(msg, CHARSET); if (!logIndexEntries.containsKey(pp.getPaxosID())) logIndexEntries.put(pp.getPaxosID(), new ArrayList<LogIndexEntry>()); logIndexEntries.get(pp.getPaxosID()).add(new LogIndexEntry(getSlot(pp), getBallot(pp).ballotNumber, getBallot(pp).coordinatorID, pp.getType().getInt(), cur.toString(), offset, length)); } } finally { if (rafTmp != null) rafTmp.close(); } // atomically copy tmpFile to cur, adjust log index, delete prev synchronized (msgLog) { modifyLogfileAndLogIndex(cur, tmpFile, logIndexEntries, msgLog, fidMap); if (prev.delete()) fidMap.remove(prev.toString()); } DelayProfiler.updateDelay("merge", t); log.log(Level.INFO, "{0} merged logfile {1} into {2}", new Object[] { msgLog, prev, cur }); }
From source file:com.aliyun.android.oss.task.GetObjectTask.java
/** * ???//from w w w . ja va 2s.c o m * @param {@link OSSObject}??? * @return {@link OSSClientUtil} * @throws OSSException */ public int getResult(OSSObject object) throws OSSException { int result = CloudUtil.CLOUDCLIENT_RESULT_OK; HttpResponse response = null; RandomAccessFile randomAccessFile = null; InputStream inputStream = null; object = new OSSObject(this.bucketName, this.objectKey); try { mLogMission.i("getResult", "before network request", missionObject); response = this.execute(); mLogMission.i("getResult", "network response", missionObject); Header rangeHeader = response.getFirstHeader("Content-Range"); if (rangeHeader != null) { String range = rangeHeader.getValue(); mLogMission.i("getResult", "range:" + range.toString(), missionObject); } object.setObjectMetaData(OSSHttpTool.getObjectMetadataFromResponse(response)); if (response.getEntity() != null) { mLogMission.i("getResult", "the content length get from server:" + response.getEntity().getContentLength(), missionObject); if (missionObject.getFileLength() <= 0L) { missionObject.setFileLength(response.getEntity().getContentLength()); mLogMission.i("getResult", "get content length and set file length. fileLength:" + missionObject.getFileLength(), missionObject); } missionObject.setPaused(false); inputStream = response.getEntity().getContent(); if (listener != null) { listener.setTotalSize(missionObject.getFileLength()); } } result = mDatabaseAccessManager.updateDownloadFile(missionObject); if (result != CloudUtil.CLOUDCLIENT_RESULT_OK) { return result; } randomAccessFile = new RandomAccessFile(filePath, "rwd"); long offset = 0; if (range != null) { offset = range.getStart(); mLogMission.i("getResult", "set offset before write file, offset:" + offset, missionObject); } randomAccessFile.seek(offset); mLogMission.i("getResult", "before write to local, file offset:" + offset, missionObject); byte[] buffer = new byte[1024 * 4]; long readTotal = offset; int readLength = 0; while (true) { if (readTotal == missionObject.getFileLength()) { missionObject.setFinished(true); result = CloudUtil.CLOUDCLIENT_RESULT_OK; mLogMission.i("getResult", "readTotal == missionObject's fileLength, readTotal:" + readTotal, missionObject); break; } if (listener != null && listener.isCancel()) { missionObject.setPaused(true); result = CloudUtil.CLOUD_FILE_MISSION_CANCEL; mLogMission.i("getResult", "cancel download missionObject", missionObject); break; } readLength = inputStream.read(buffer); mLogMission.i("getResult", "read buffer length:" + readLength, missionObject); if (readLength == -1) { missionObject.setFinished(true); result = CloudUtil.CLOUDCLIENT_RESULT_OK; mLogMission.i("getResult", "buffer read finish, readLength:" + readLength, missionObject); break; } mLogMission.i("getResult", "write to local, length:" + readLength, missionObject); randomAccessFile.write(buffer, 0, readLength); readTotal += readLength; mLogMission.i("getResult", "readTotal:" + readTotal, missionObject); missionObject.setTransferredLength(readTotal); missionObject.setLastTime(System.currentTimeMillis()); // result = mDatabaseAccessManager.updateDownloadFile(missionObject); // if (result != CloudUtil.CLOUDCLIENT_RESULT_OK) { // break; // } if (listener != null) { listener.transferred(readTotal); } } } catch (OSSException osse) { throw osse; } catch (ParseException pe) { OSSException ossException = new OSSException(pe); ossException.setErrorCode(OSSErrorCode.PARSE_METADATA_ERROR); throw ossException; } catch (IOException ioe) { OSSException ossException = new OSSException(ioe); ossException.setErrorCode(OSSErrorCode.GET_ENTITY_CONTENT_ERROR); throw ossException; } finally { mDatabaseAccessManager.updateDownloadFile(missionObject); mLogMission.i("getResult", "finally, update db", missionObject); this.releaseHttpClient(); if (randomAccessFile != null) { try { inputStream.close(); randomAccessFile.close(); } catch (IOException e) { e.printStackTrace(); } } } return result; }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
void writeInitialSubDomainMetadataToDisk() throws IOException { RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw"); try {//from w w w . j a v a 2 s .c o m file.writeByte(0); // version file.writeInt(_transientSubDomainStats.size()); ArrayList<CrawlListMetadata> sortedMetadata = new ArrayList<CrawlListMetadata>(); sortedMetadata.addAll(_transientSubDomainStats.values()); _transientSubDomainStats = null; CrawlListMetadata metadataArray[] = sortedMetadata.toArray(new CrawlListMetadata[0]); Arrays.sort(metadataArray, new Comparator<CrawlListMetadata>() { @Override public int compare(CrawlListMetadata o1, CrawlListMetadata o2) { int result = ((Integer) o2.getUrlCount()).compareTo(o1.getUrlCount()); if (result == 0) { result = o1.getDomainName().compareTo(o2.getDomainName()); } return result; } }); DataOutputBuffer outputBuffer = new DataOutputBuffer(CrawlListMetadata.Constants.FixedDataSize); TreeMap<Long, Integer> idToOffsetMap = new TreeMap<Long, Integer>(); for (CrawlListMetadata entry : metadataArray) { // reset output buffer outputBuffer.reset(); // write item to disk entry.serialize(outputBuffer, new BinaryProtocol()); if (outputBuffer.getLength() > CrawlListMetadata.Constants.FixedDataSize) { LOG.fatal("Metadata Serialization for List:" + getListId() + " SubDomain:" + entry.getDomainName()); System.out.println("Metadata Serialization for List:" + getListId() + " SubDomain:" + entry.getDomainName()); } // save offset idToOffsetMap.put(entry.getDomainHash(), (int) file.getFilePointer()); // write out fixed data size file.write(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize); } // write lookup table _offsetLookupTable = new DataOutputBuffer(idToOffsetMap.size() * OFFSET_TABLE_ENTRY_SIZE); for (Map.Entry<Long, Integer> entry : idToOffsetMap.entrySet()) { _offsetLookupTable.writeLong(entry.getKey()); _offsetLookupTable.writeInt(entry.getValue()); } } finally { file.close(); } _transientSubDomainStats = null; }
From source file:org.opencms.webdav.CmsWebdavServlet.java
/** * Handle a partial PUT.<p>//from w w w .j a va2s .com * * New content specified in request is appended to * existing content in oldRevisionContent (if present). This code does * not support simultaneous partial updates to the same resource.<p> * * @param req the servlet request we are processing * @param range the range of the content in the file * @param path the path where to find the resource * * @return the new content file with the appended data * * @throws IOException if an input/output error occurs */ protected File executePartialPut(HttpServletRequest req, CmsWebdavRange range, String path) throws IOException { // Append data specified in ranges to existing content for this // resource - create a temp. file on the local filesystem to // perform this operation File tempDir = (File) getServletContext().getAttribute(ATT_SERVLET_TEMPDIR); // Convert all '/' characters to '.' in resourcePath String convertedResourcePath = path.replace('/', '.'); File contentFile = new File(tempDir, convertedResourcePath); contentFile.createNewFile(); RandomAccessFile randAccessContentFile = new RandomAccessFile(contentFile, "rw"); InputStream oldResourceStream = null; try { I_CmsRepositoryItem item = m_session.getItem(path); oldResourceStream = new ByteArrayInputStream(item.getContent()); } catch (CmsException e) { if (LOG.isErrorEnabled()) { LOG.error(Messages.get().getBundle().key(Messages.LOG_ITEM_NOT_FOUND_1, path), e); } } // Copy data in oldRevisionContent to contentFile if (oldResourceStream != null) { int numBytesRead; byte[] copyBuffer = new byte[BUFFER_SIZE]; while ((numBytesRead = oldResourceStream.read(copyBuffer)) != -1) { randAccessContentFile.write(copyBuffer, 0, numBytesRead); } oldResourceStream.close(); } randAccessContentFile.setLength(range.getLength()); // Append data in request input stream to contentFile randAccessContentFile.seek(range.getStart()); int numBytesRead; byte[] transferBuffer = new byte[BUFFER_SIZE]; BufferedInputStream requestBufInStream = new BufferedInputStream(req.getInputStream(), BUFFER_SIZE); while ((numBytesRead = requestBufInStream.read(transferBuffer)) != -1) { randAccessContentFile.write(transferBuffer, 0, numBytesRead); } randAccessContentFile.close(); requestBufInStream.close(); return contentFile; }