List of usage examples for java.io RandomAccessFile close
public void close() throws IOException
From source file:se.kth.ssvl.tslab.bytewalla.androiddtn.servlib.bundling.BundleDaemon.java
protected void handle_bundle_received(BundleReceivedEvent event) { Bundle bundle = event.bundle();//from w w w. java2 s . c o m String notify_msg = ""; String ID = local_eid().toString() + "/"; // Log.v(TAG, "daemon_bundle_id" + (int) bundle.bundleid()); // Log.v(TAG,"daemon_source" + bundle.source().toString()); // Log.v(TAG,"daemon_destination" + bundle.custodian().toString()); // Toast.makeText(,"local : "+local_eid().toString()+"\nDest : "+bundle.act_dest().toString(),Toast.LENGTH_SHORT).show(); // Log.d(TAG, " time ID " + bundle.creation_ts().seconds()); // Log.d(TAG, " custodian ID " + ID); // Log.d(TAG, " LOCAL ID " + local_eid().toString()); Log.d(TAG, " handle bundle received from " + event.source() + ", id = " + bundle.bundleid()); // "update statistics and store an appropriate event descriptor" [DTN2] String source_str = ""; switch (event.source()) { case EVENTSRC_PEER: stats_.received_bundles_++; // MultiHopStorage.getInstance().impt_sqlite_().get_records("bundle","id>0","id"); String output = ""; byte[] result = new byte[bundle.payload().length()]; RandomAccessFile file_handle = null; try { file_handle = new RandomAccessFile(bundle.payload().file(), "r"); file_handle.read(result); output = new String(result, "US-ASCII"); } catch (UnsupportedEncodingException e) { Log.e(TAG, e.getMessage()); } catch (FileNotFoundException e) { Log.e(TAG, e.getMessage()); } catch (IOException e) { Log.e(TAG, e.getMessage()); } finally { try { file_handle.close(); } catch (IOException e) { Log.e(TAG, e.getMessage()); } } if (bundle.custodian() != null && !bundle.custodian().equals(EndpointID.NULL_EID())) { //String encrypted = bundle.custodian().toString(); //String [] both = encrypted.split("\n"); //bundle.set_custodian(); //String custodian = both[0]; //if(!custodian.equals(ID)) { if (!bundle.custodian().toString().equals(ID)) { //String msg = both[1]; //bundle.set_owner(msg); notify_msg += "Guest "; // if this was my bundle that i have sent but some one sent me, discard it if (bundle.source().toString().equals(BundleDaemon.getInstance().local_eid().toString())) return; // ArrayList<String> items = new ArrayList<String>(Arrays.asList(output.split(" ,"))); output = items.get(0); bundle.set_bundleid((Integer.parseInt(items.get(1)))); // Checking if already sent to actual destination if (MultiHopStorage .getInstance().impt_sqlite_().get_records("Forwards", "bundle_id = " + bundle.bundleid() + " and destination = '" + bundle.custodian().toString() + "'", "id") .size() > 0) { return; } // check if this bundle already exists in my database, discard it if (MultiHopStorage.getInstance().impt_sqlite_() .get_records("bundle", "bundle_id = " + bundle.bundleid() + " and source = '" + bundle.source() + "'" + " and destination = '" + bundle.custodian().toString() + "'", "id") .size() > 0) { return; } else { // so its a new bundle destined to some node , i am the intermediate one. // add this bundle to db after adding current alive nodes to forward list to indicate that // bundle is already been sent to these nodes // LinkSet EntriesList = ContactManager.getInstance().links(); Iterator<String> i = DTNDiscovery.getNodes().iterator(); // Link.set_link_counter(0); while (i.hasNext()) { // Link element = i.next(); String Eid = i.next(); MultiHopStorage.getInstance().add(bundle.bundleid(), Eid + "/"); // here it is adding in forwards table } MultiHopStorage.getInstance().add(bundle, output); } } else if (!bundle.source().toString().contains("prophet")) { if (output.contains(" ,")) { ArrayList<String> items = new ArrayList<String>(Arrays.asList(output.split(" ,"))); output = items.get(0); } MultiHopStorage.getInstance().add_messages(bundle.source().toString() + "/", output); // DTNMessageView.update(); } } // else Toast.makeText(DTNManager.getInstance().getApplicationContext(),"From " + bundle.source().toString(),Toast.LENGTH_LONG).show(); Log.d("RECIVINGS", "forwarded From " + bundle.source().toString()); DTNManager.getInstance().notify_user(notify_msg + "DTN Bundle Received", "From " + bundle.source().toString()); break; case EVENTSRC_APP: stats_.received_bundles_++; source_str = " (from app)"; break; case EVENTSRC_STORE: source_str = " (from data store)"; break; case EVENTSRC_ADMIN: stats_.generated_bundles_++; source_str = " (generated)"; break; case EVENTSRC_FRAGMENTATION: stats_.generated_bundles_++; source_str = " (from fragmentation)"; break; case EVENTSRC_ROUTER: stats_.generated_bundles_++; source_str = " (from router)"; break; default: Log.e(TAG, "Bundle Daemon: handle_bundle_received"); } StringBuffer buf = new StringBuffer(); bundle.format(buf); Log.i(TAG, String.format("BUNDLE_RECEIVED %s bundle id (%d) prevhop %s (%d bytes recvd)", source_str, bundle.bundleid(), event.prevhop().toString(), event.bytes_received())); // log the reception in the bundle's forwarding log if (event.source() == event_source_t.EVENTSRC_PEER && event.link() != null) { bundle.fwdlog().add_entry(event.link(), ForwardingInfo.action_t.FORWARD_ACTION, ForwardingInfo.state_t.RECEIVED); } else if (event.source() == event_source_t.EVENTSRC_APP) { if (event.registration() != null) { bundle.fwdlog().add_entry(event.registration(), ForwardingInfo.action_t.FORWARD_ACTION, ForwardingInfo.state_t.RECEIVED); } } // "log a warning if the bundle doesn't have any expiration time or // has a creation time that's in the future. in either case, we // proceed as normal" [DTN2] if (bundle.expiration() == 0) { Log.w(TAG, String.format("bundle id %d arrived with zero expiration time", bundle.bundleid())); } long now = TimeHelper.current_seconds_from_ref(); if ((bundle.creation_ts().seconds() > now) && (bundle.creation_ts().seconds() - now > 30000)) { Log.w(TAG, String.format("bundle id %d arrived with creation time in the future " + "(%d > %d)", bundle.bundleid(), bundle.creation_ts().seconds(), now)); } /* * "If a previous hop block wasn't included, but we know the remote * endpoint id of the link where the bundle arrived, assign the prevhop_ * field in the bundle so it's available for routing." [DTN2] */ if (event.source() == event_source_t.EVENTSRC_PEER) { if (bundle.prevhop() == null || bundle.prevhop().uri() == null) { bundle.set_prevhop(new EndpointID(EndpointID.NULL_EID())); } if (bundle.prevhop().is_null()) { bundle.prevhop().assign(event.prevhop()); } if (!bundle.prevhop().equals(event.prevhop())) { Log.w(TAG, String.format( "previous hop mismatch: prevhop header contains '%s' but " + "convergence layer indicates prevhop is '%s'", bundle.prevhop().toString(), event.prevhop().toString())); } } /* * "validate a bundle, including all bundle blocks, received from a peer" [DTN2] */ if (event.source() == event_source_t.EVENTSRC_PEER) { /* * "Check all BlockProcessors to validate the bundle. Initialize the * value in case the Bundle Protocol didn't give reason" [DTN2] */ status_report_reason_t[] reception_reason = new status_report_reason_t[1]; reception_reason[0] = status_report_reason_t.REASON_NO_ADDTL_INFO; status_report_reason_t[] deletion_reason = new status_report_reason_t[1]; deletion_reason[0] = status_report_reason_t.REASON_NO_ADDTL_INFO; boolean valid = BundleProtocol.validate(bundle, reception_reason, deletion_reason); /* * "Send the reception receipt if requested within the primary block * or some other error occurs that requires a reception status * report but may or may not require deleting the whole bundle." [DTN2] */ if (bundle.receive_rcpt() || reception_reason[0] != BundleProtocol.status_report_reason_t.REASON_NO_ADDTL_INFO) { generate_status_report(bundle, BundleStatusReport.flag_t.STATUS_RECEIVED, reception_reason[0]); } /* * "If the bundle is valid, probe the router to see if it wants to * accept the bundle." [DTN2] */ boolean accept_bundle = false; if (valid) { BundleProtocol.status_report_reason_t[] reason = new BundleProtocol.status_report_reason_t[1]; // "initialize the value in case the router didn't set the value // reason for us" [DTN2] reason[0] = status_report_reason_t.REASON_NO_ADDTL_INFO; accept_bundle = router_.accept_bundle(bundle, reason); deletion_reason[0] = reason[0]; } /* * "Delete a bundle if a validation error was encountered or the * router doesn't want to accept the bundle, in both cases not * giving the reception event to the router." [DTN2] */ if (!accept_bundle) { delete_bundle(bundle, deletion_reason[0]); event.set_daemon_only(true); return; } } /* * "Check if the bundle is a duplicate, i.e. shares a source id, * timestamp, and fragmentation information with some other bundle in * the system." [DTN2] */ Bundle duplicate = find_duplicate(bundle); if (duplicate != null) { Log.i(TAG, String.format("got duplicate bundle: %s . %s creation timestamp %d.%d", bundle.source().toString(), bundle.dest().toString(), bundle.creation_ts().seconds(), bundle.creation_ts().seqno())); stats_.duplicate_bundles_++; if (bundle.custody_requested() && duplicate.local_custody()) { generate_custody_signal(bundle, false, BundleProtocol.custody_signal_reason_t.CUSTODY_REDUNDANT_RECEPTION); } if (params_.suppress_duplicates_) { // "since we don't want the bundle to be processed by the rest // of the system, we mark the event as daemon_only (meaning it // won't be forwarded to routers) and return, which should // eventually remove all references on the bundle and then it // will be deleted" [DTN2] event.set_daemon_only(true); return; } // "The BP says that the "dispatch pending" retention constraint // must be removed from this bundle if there is a duplicate we // currently have custody of. This would cause the bundle to have // no retention constraints and it now "may" be discarded. Assuming // this means it is supposed to be discarded, we have to suppress // a duplicate in this situation regardless of the parameter // setting. We would then be relying on the custody transfer timer // to cause a new forwarding attempt in the case of routing loops // instead of the receipt of a duplicate, so in theory we can indeed // suppress this bundle. It may not be strictly required to do so, // in which case we can remove the following block." [DTN2] if (bundle.custody_requested() && duplicate.local_custody()) { event.set_daemon_only(true); return; } } /* * "Add the bundle to the master pending queue and the data store (unless * the bundle was just reread from the data store on startup) * * Note that if add_to_pending returns false, the bundle has already * expired so we immediately return instead of trying to deliver and/or * forward the bundle. Otherwise there's a chance that expired bundles * will persist in the network." [DTN2] */ boolean ok_to_route = add_to_pending(bundle, (event.source() != event_source_t.EVENTSRC_STORE)); if (!ok_to_route) { event.set_daemon_only(true); return; } /* * "If the bundle is a custody bundle and we're configured to take * custody, then do so. In case the event was delivered due to a reload * from the data store, then if we have local custody, make sure it's * added to the custody bundles list." [DTN2] */ if (bundle.custody_requested() && params_.accept_custody_ && (duplicate == null || !duplicate.local_custody())) { if (event.source() != event_source_t.EVENTSRC_STORE) { accept_custody(bundle); } else if (bundle.local_custody()) { custody_bundles_.push_back(bundle); } } /* * "If this bundle is a duplicate and it has not been suppressed, we can * assume the bundle it duplicates has already been delivered or added * to the fragment manager if required, so do not do so again. We can * bounce out now. Comments/jmmikkel If the extension blocks differ and * we care to do something with them, we can't bounce out quite yet." [DTN2] */ if (duplicate != null) { // We have to delete the Bundle here delete_bundle(bundle, status_report_reason_t.REASON_NO_ADDTL_INFO); return; } /* * "Check if this is a complete (non-fragment) bundle that obsoletes any * fragments that we know about." [DTN2] */ if (!bundle.is_fragment() && DTNService.context().getResources() .getString(R.string.DTNEnableProactiveFragmentation).equals("true")) { fragmentmgr_.delete_obsoleted_fragments(bundle); } /* * "Deliver the bundle to any local registrations that it matches, unless * it's generated by the router or is a bundle fragment. Delivery of * bundle fragments is deferred until after re-assembly." [DTN2] */ boolean is_local = check_local_delivery(bundle, (event.source() != event_source_t.EVENTSRC_ROUTER) && (bundle.is_fragment() == false)); /* * "Re-assemble bundle fragments that are destined to the local node." [DTN2] */ if (bundle.is_fragment() && is_local) { Log.d(TAG, String.format("deferring delivery of bundle %d " + "since bundle is a fragment", bundle.bundleid())); fragmentmgr_.process_for_reassembly(bundle); } /* * "Finally, bounce out so the router(s) can do something further with * the bundle in response to the event." [DTN2] */ }
From source file:com.dotmarketing.servlets.taillog.Tailer.java
/** * Follows changes in the file, calling the TailerListener's handle method for each new line. *///from w w w. j ava 2 s .c o m public void run() { RandomAccessFile reader = null; try { long last = 0; // The last time the file was checked for changes long position = 0; // position within the file // Open the file while (run && reader == null) { try { reader = new RandomAccessFile(file, "r"); } catch (FileNotFoundException e) { listener.fileNotFound(); } if (reader == null) { try { Thread.sleep(delay); } catch (InterruptedException e) { } } else { // The current position in the file position = end ? file.length() : startPosition; last = System.currentTimeMillis(); reader.seek(position); readLine(reader); position = reader.getFilePointer(); } } while (run) { // Check the file length to see if it was rotated long length = file.length(); if (length < position) { // File was rotated listener.fileRotated(); // Reopen the reader after rotation try { // Ensure that the old file is closed iff we re-open it successfully RandomAccessFile save = reader; reader = new RandomAccessFile(file, "r"); position = 0; // close old file explicitly rather than relying on GC picking up previous RAF IOUtils.closeQuietly(save); } catch (FileNotFoundException e) { // in this case we continue to use the previous reader and position values listener.fileNotFound(); } continue; } else { // File was not rotated // See if the file needs to be read again if (length > position) { // The file has more content than it did last time last = System.currentTimeMillis(); position = readLines(reader); } else if (FileUtils.isFileNewer(file, last)) { /* This can happen if the file is truncated or overwritten * with the exact same length of information. In cases like * this, the file position needs to be reset */ position = 0; reader.seek(position); // cannot be null here // Now we can read new lines last = System.currentTimeMillis(); position = readLines(reader); } } try { Thread.sleep(delay); } catch (InterruptedException e) { } } } catch (Exception e) { listener.handle(e); } finally { try { reader.close(); } catch (Exception e) { Logger.error(this.getClass(), "Unable to close: " + e.getMessage()); } } }
From source file:org.commoncrawl.service.listcrawler.CacheManager.java
private final void flushLocalLog(final long bytesToRemove, final int itemsToRemove, final List<FingerprintAndOffsetTuple> flushedTupleList, final ArrayList<IndexDataFileTriple> tempFileTriples) { LOG.info("Acquiring Log Access Semaphores"); // first boost this thread's priority ... int originalThreadPriority = Thread.currentThread().getPriority(); Thread.currentThread().setPriority(Thread.MAX_PRIORITY); // next acquire all permits to the local access log ... block until we get there ... getLocalLogAccessSemaphore().acquireUninterruptibly(LOG_ACCESS_SEMAPHORE_COUNT); // now that we have all the semaphores we need, reduce the thread's priority to normal Thread.currentThread().setPriority(originalThreadPriority); LOG.info("Acquired ALL Log Access Semaphores"); long timeStart = System.currentTimeMillis(); // now we have exclusive access to the local transaction log ... File activeLogFilePath = getActiveLogFilePath(); File checkpointLogFilePath = getCheckpointLogFilePath(); try {//from w w w. j a v a 2s. c o m // delete checkpoint file if it existed ... checkpointLogFilePath.delete(); // now rename activelog to checkpoint path activeLogFilePath.renameTo(checkpointLogFilePath); long logFileConsolidationStartTime = System.currentTimeMillis(); // now trap for exceptions in case something fails try { // fix up the header ... _header._fileSize -= bytesToRemove; _header._itemCount -= itemsToRemove; // open a old file and new file RandomAccessFile newFile = new RandomAccessFile(activeLogFilePath, "rw"); RandomAccessFile oldFile = new RandomAccessFile(checkpointLogFilePath, "r"); LOG.info("Opened new and old files. New Header FileSize is:" + _header._fileSize + " ItemCount:" + _header._itemCount); try { // write out header ... long bytesRemainingInLogFile = _header._fileSize; LOG.info("Writing Header to New File. Bytes Remaining for Data are:" + bytesRemainingInLogFile); // write header to new file ... _header.writeHeader(newFile); // decrement bytes available ... bytesRemainingInLogFile -= LocalLogFileHeader.SIZE; if (bytesRemainingInLogFile != 0) { byte transferBuffer[] = new byte[(1 << 20) * 16]; LOG.info("Seeking old file past flushed data (pos:" + LocalLogFileHeader.SIZE + bytesToRemove + ")"); // seek past old data ... oldFile.seek(LocalLogFileHeader.SIZE + bytesToRemove); // and copy across remaining data while (bytesRemainingInLogFile != 0) { int bytesToReadWriteThisIteration = Math.min((int) bytesRemainingInLogFile, transferBuffer.length); oldFile.read(transferBuffer, 0, bytesToReadWriteThisIteration); newFile.write(transferBuffer, 0, bytesToReadWriteThisIteration); LOG.info("Copied " + bytesToReadWriteThisIteration + " from Old to New"); bytesRemainingInLogFile -= bytesToReadWriteThisIteration; } } } finally { if (newFile != null) { newFile.close(); } if (oldFile != null) { oldFile.close(); } } // if we reached here then checkpoint was successfull ... LOG.info("Checkpoint - Log Consolidation Successfull! TOOK:" + (System.currentTimeMillis() - logFileConsolidationStartTime)); LOG.info("Loading Index Files"); for (IndexDataFileTriple triple : tempFileTriples) { LOG.info("Loading Index File:" + triple._localIndexFilePath); final HDFSFileIndex fileIndex = new HDFSFileIndex(_remoteFileSystem, triple._localIndexFilePath, triple._dataFilePath); LOG.info("Loaded Index File"); // update hdfs index list ... synchronized (CacheManager.this) { LOG.info("Adding HDFS Index to list"); _hdfsIndexList.addElement(fileIndex); } } // create a semaphore to wait on final Semaphore semaphore = new Semaphore(0); LOG.info("Scheduling Async Event"); // now we need to schedule an async call to main thread to update data structures safely ... _eventLoop.setTimer(new Timer(0, false, new Timer.Callback() { @Override public void timerFired(Timer timer) { LOG.info("Cleaning Map"); synchronized (CacheManager.this) { // walk tuples for (FingerprintAndOffsetTuple tuple : flushedTupleList) { //TODO: HACK! // remove from collection ... _fingerprintToLocalLogPos.removeAll(tuple._fingerprint); } } LOG.info("Increment Offset Info"); // finally increment locallog offset by bytes removed ... _localLogStartOffset += bytesToRemove; LOG.info("Releasing Wait Semaphore"); //release wait sempahore semaphore.release(); } })); LOG.info("Waiting for Async Event to Complete"); //wait for async operation to complete ... semaphore.acquireUninterruptibly(); LOG.info("Async Event to Completed"); } catch (IOException e) { LOG.error("Checkpoint Failed with Exception:" + CCStringUtils.stringifyException(e)); // delete new file ... activeLogFilePath.delete(); // and rename checkpoint file to active file ... checkpointLogFilePath.renameTo(activeLogFilePath); } } finally { LOG.info("Releasing ALL Log Access Semaphores. HELD FOR:" + (System.currentTimeMillis() - timeStart)); getLocalLogAccessSemaphore().release(LOG_ACCESS_SEMAPHORE_COUNT); } }
From source file:FileBaseDataMap.java
/** * ??value??.<br>// w ww. j a v a2 s. c om * * @param key * @param hashCode This is a key value hash code * @return * @throws */ public String get(String key, int hashCode) { byte[] tmpBytes = null; String ret = null; byte[] keyBytes = key.getBytes(); byte[] equalKeyBytes = new byte[keyBytes.length + 1]; byte[] lineBufs = new byte[this.getDataSize]; boolean matchFlg = true; // ??? for (int idx = 0; idx < keyBytes.length; idx++) { equalKeyBytes[idx] = keyBytes[idx]; } equalKeyBytes[equalKeyBytes.length - 1] = 38; try { File file = dataFileList[hashCode % numberOfDataFiles]; CacheContainer accessor = (CacheContainer) innerCache.get(file.getAbsolutePath()); RandomAccessFile raf = null; BufferedWriter wr = null; if (accessor == null || accessor.isClosed) { raf = new RandomAccessFile(file, "rwd"); wr = new BufferedWriter(new FileWriter(file, true)); accessor = new CacheContainer(); accessor.raf = raf; accessor.wr = wr; accessor.file = file; innerCache.put(file.getAbsolutePath(), accessor); } else { raf = accessor.raf; } for (int tryIdx = 0; tryIdx < 2; tryIdx++) { try { raf.seek(0); int readLen = -1; while ((readLen = raf.read(lineBufs)) != -1) { matchFlg = true; int loop = readLen / lineDataSize; for (int loopIdx = 0; loopIdx < loop; loopIdx++) { int assist = (lineDataSize * loopIdx); matchFlg = true; if (equalKeyBytes[equalKeyBytes.length - 1] == lineBufs[assist + (equalKeyBytes.length - 1)]) { for (int i = 0; i < equalKeyBytes.length; i++) { if (equalKeyBytes[i] != lineBufs[assist + i]) { matchFlg = false; break; } } } else { matchFlg = false; } // ??????? if (matchFlg) { tmpBytes = new byte[lineDataSize]; for (int i = 0; i < lineDataSize; i++) { tmpBytes[i] = lineBufs[assist + i]; } break; } } if (matchFlg) break; } break; } catch (IOException ie) { // IOException???1???? if (tryIdx == 1) throw ie; try { if (raf != null) raf.close(); if (wr != null) wr.close(); raf = new RandomAccessFile(file, "rwd"); wr = new BufferedWriter(new FileWriter(file, true)); accessor = new CacheContainer(); accessor.raf = raf; accessor.wr = wr; accessor.file = file; innerCache.put(file.getAbsolutePath(), accessor); } catch (Exception e) { throw e; } } } // ? if (tmpBytes != null) { if (tmpBytes[keyDataLength] != 38) { int i = keyDataLength; int counter = 0; for (; i < tmpBytes.length; i++) { if (tmpBytes[i] == 38) break; counter++; } ret = new String(tmpBytes, keyDataLength, counter, "UTF-8"); } } } catch (Exception e) { e.printStackTrace(); } return ret; }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
public ArrayList<CrawlListDomainItem> getSubDomainList(int offset, int count) { synchronized (_metadata) { ArrayList<CrawlListDomainItem> itemsOut = new ArrayList<CrawlListDomainItem>(); try {//from w ww. ja v a2 s . c o m synchronized (_subDomainMetadataFile) { RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw"); DataInputBuffer inputBuffer = new DataInputBuffer(); byte fixedDataBlock[] = new byte[CrawlListMetadata.Constants.FixedDataSize]; try { // skip version file.read(); // read item count int itemCount = file.readInt(); int i = offset; int end = Math.min(i + count, itemCount); LOG.info("*** LIST:" + getListId() + " SUBDOMAIN ITEM COUNT:" + itemCount); if (i < itemCount) { file.seek(5 + (CrawlListMetadata.Constants.FixedDataSize * offset)); CrawlListMetadata newMetadata = new CrawlListMetadata(); for (; i < end; ++i) { long orignalPos = file.getFilePointer(); file.readFully(fixedDataBlock, 0, fixedDataBlock.length); inputBuffer.reset(fixedDataBlock, fixedDataBlock.length); newMetadata.deserialize(inputBuffer, new BinaryProtocol()); itemsOut.add(buildSubDomainSummary(newMetadata.getDomainName(), newMetadata)); } } } finally { file.close(); } } } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); } LOG.info("*** LIST:" + getListId() + " DONE LOADING SUBDOMAIN DATA FROM DISK"); return itemsOut; } }
From source file:org.openmeetings.servlet.outputhandler.DownloadHandler.java
@Override protected void service(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse) throws ServletException, IOException { try {/*from w ww . j av a2 s . c o m*/ if (getUserManagement() == null || getSessionManagement() == null) { return; } httpServletRequest.setCharacterEncoding("UTF-8"); log.debug("\nquery = " + httpServletRequest.getQueryString()); log.debug("\n\nfileName = " + httpServletRequest.getParameter("fileName")); log.debug("\n\nparentPath = " + httpServletRequest.getParameter("parentPath")); String queryString = httpServletRequest.getQueryString(); if (queryString == null) { queryString = ""; } String sid = httpServletRequest.getParameter("sid"); if (sid == null) { sid = "default"; } log.debug("sid: " + sid); Long users_id = getSessionManagement().checkSession(sid); Long user_level = getUserManagement().getUserLevelByID(users_id); if (user_level != null && user_level > 0) { String room_id = httpServletRequest.getParameter("room_id"); if (room_id == null) { room_id = "default"; } String moduleName = httpServletRequest.getParameter("moduleName"); if (moduleName == null) { moduleName = "nomodule"; } String parentPath = httpServletRequest.getParameter("parentPath"); if (parentPath == null) { parentPath = "nomodule"; } String requestedFile = httpServletRequest.getParameter("fileName"); if (requestedFile == null) { requestedFile = ""; } String fileExplorerItemIdParam = httpServletRequest.getParameter("fileExplorerItemId"); Long fileExplorerItemId = null; if (fileExplorerItemIdParam != null) { fileExplorerItemId = Long.parseLong(fileExplorerItemIdParam); } // make a complete name out of domain(organisation) + roomname String roomName = room_id; // trim whitespaces cause it is a directory name roomName = StringUtils.deleteWhitespace(roomName); // Get the current User-Directory String current_dir = getServletContext().getRealPath("/"); String working_dir = ""; working_dir = current_dir + OpenmeetingsVariables.UPLOAD_DIR + File.separatorChar; // Add the Folder for the Room if (moduleName.equals("lzRecorderApp")) { working_dir = current_dir + OpenmeetingsVariables.STREAMS_DIR + File.separatorChar + "hibernate" + File.separatorChar; } else if (moduleName.equals("videoconf1")) { if (parentPath.length() != 0) { if (parentPath.equals("/")) { working_dir = working_dir + roomName + File.separatorChar; } else { working_dir = working_dir + roomName + File.separatorChar + parentPath + File.separatorChar; } } else { working_dir = current_dir + roomName + File.separatorChar; } } else if (moduleName.equals("userprofile")) { working_dir += "profiles" + File.separatorChar; logNonExistentFolder(working_dir); working_dir += ScopeApplicationAdapter.profilesPrefix + users_id + File.separatorChar; logNonExistentFolder(working_dir); } else if (moduleName.equals("remoteuserprofile")) { working_dir += "profiles" + File.separatorChar; logNonExistentFolder(working_dir); String remoteUser_id = httpServletRequest.getParameter("remoteUserid"); if (remoteUser_id == null) { remoteUser_id = "0"; } working_dir += ScopeApplicationAdapter.profilesPrefix + remoteUser_id + File.separatorChar; logNonExistentFolder(working_dir); } else if (moduleName.equals("remoteuserprofilebig")) { working_dir += "profiles" + File.separatorChar; logNonExistentFolder(working_dir); String remoteUser_id = httpServletRequest.getParameter("remoteUserid"); if (remoteUser_id == null) { remoteUser_id = "0"; } working_dir += ScopeApplicationAdapter.profilesPrefix + remoteUser_id + File.separatorChar; logNonExistentFolder(working_dir); requestedFile = this.getBigProfileUserName(working_dir); } else if (moduleName.equals("chat")) { working_dir += "profiles" + File.separatorChar; logNonExistentFolder(working_dir); String remoteUser_id = httpServletRequest.getParameter("remoteUserid"); if (remoteUser_id == null) { remoteUser_id = "0"; } working_dir += ScopeApplicationAdapter.profilesPrefix + remoteUser_id + File.separatorChar; logNonExistentFolder(working_dir); requestedFile = this.getChatUserName(working_dir); } else { working_dir = working_dir + roomName + File.separatorChar; } if (!moduleName.equals("nomodule")) { log.debug("requestedFile: " + requestedFile + " current_dir: " + working_dir); String full_path = working_dir + requestedFile; File f = new File(full_path); // If the File does not exist or is not readable show/load a // place-holder picture if (!f.exists() || !f.canRead()) { if (!f.canRead()) { log.debug("LOG DownloadHandler: The request file is not readable"); } else { log.debug( "LOG DownloadHandler: The request file does not exist / has already been deleted"); } log.debug("LOG ERROR requestedFile: " + requestedFile); // replace the path with the default picture/document if (requestedFile.endsWith(".jpg")) { log.debug("LOG endsWith d.jpg"); log.debug("LOG moduleName: " + moduleName); requestedFile = DownloadHandler.defaultImageName; if (moduleName.equals("remoteuserprofile")) { requestedFile = DownloadHandler.defaultProfileImageName; } else if (moduleName.equals("remoteuserprofilebig")) { requestedFile = DownloadHandler.defaultProfileImageNameBig; } else if (moduleName.equals("userprofile")) { requestedFile = DownloadHandler.defaultProfileImageName; } else if (moduleName.equals("chat")) { requestedFile = DownloadHandler.defaultChatImageName; } // request for an image full_path = current_dir + "default" + File.separatorChar + requestedFile; } else if (requestedFile.endsWith(".swf")) { requestedFile = DownloadHandler.defaultSWFName; // request for a SWFPresentation full_path = current_dir + "default" + File.separatorChar + DownloadHandler.defaultSWFName; } else { // Any document, must be a download request // OR a Moodle Loggedin User requestedFile = DownloadHandler.defaultImageName; full_path = current_dir + "default" + File.separatorChar + DownloadHandler.defaultImageName; } } log.debug("full_path: " + full_path); File f2 = new File(full_path); if (!f2.exists() || !f2.canRead()) { if (!f2.canRead()) { log.debug( "DownloadHandler: The request DEFAULT-file does not exist / has already been deleted"); } else { log.debug( "DownloadHandler: The request DEFAULT-file does not exist / has already been deleted"); } // no file to handle abort processing return; } // Requested file is outside OM webapp folder File curDirFile = new File(current_dir); if (!f2.getCanonicalPath().startsWith(curDirFile.getCanonicalPath())) { throw new Exception("Invalid file requested: f2.cp == " + f2.getCanonicalPath() + "; curDir.cp == " + curDirFile.getCanonicalPath()); } // Get file and handle download RandomAccessFile rf = new RandomAccessFile(full_path, "r"); // Default type - Explorer, Chrome and others int browserType = 0; // Firefox and Opera browsers if (httpServletRequest.getHeader("User-Agent") != null) { if ((httpServletRequest.getHeader("User-Agent").contains("Firefox")) || (httpServletRequest.getHeader("User-Agent").contains("Opera"))) { browserType = 1; } } log.debug("Detected browser type:" + browserType); httpServletResponse.reset(); httpServletResponse.resetBuffer(); OutputStream out = httpServletResponse.getOutputStream(); if (requestedFile.endsWith(".swf")) { // trigger download to SWF => THIS is a workaround for // Flash Player 10, FP 10 does not seem // to accept SWF-Downloads with the Content-Disposition // in the Header httpServletResponse.setContentType("application/x-shockwave-flash"); httpServletResponse.setHeader("Content-Length", "" + rf.length()); } else { httpServletResponse.setContentType("APPLICATION/OCTET-STREAM"); String fileNameResult = requestedFile; if (fileExplorerItemId != null && fileExplorerItemId > 0) { FileExplorerItem fileExplorerItem = getFileExplorerItemDaoImpl() .getFileExplorerItemsById(fileExplorerItemId); if (fileExplorerItem != null) { fileNameResult = fileExplorerItem.getFileName().substring(0, fileExplorerItem.getFileName().length() - 4) + fileNameResult.substring(fileNameResult.length() - 4, fileNameResult.length()); } } if (browserType == 0) { httpServletResponse.setHeader("Content-Disposition", "attachment; filename=" + java.net.URLEncoder.encode(fileNameResult, "UTF-8")); } else { httpServletResponse.setHeader("Content-Disposition", "attachment; filename*=UTF-8'en'" + java.net.URLEncoder.encode(fileNameResult, "UTF-8")); } httpServletResponse.setHeader("Content-Length", "" + rf.length()); } byte[] buffer = new byte[1024]; int readed = -1; while ((readed = rf.read(buffer, 0, buffer.length)) > -1) { out.write(buffer, 0, readed); } rf.close(); out.flush(); out.close(); } } else { System.out.println("ERROR DownloadHandler: not authorized FileDownload " + (new Date())); } } catch (Exception er) { log.error("Error downloading: ", er); // er.printStackTrace(); } }
From source file:com.polyvi.xface.extension.advancedfiletransfer.XFileDownloader.java
@Override public void transfer(XCallbackContext callbackCtx) { initDownloadInfo();/*from w ww. j av a2 s . co m*/ if (mState == DOWNLOADING) { return; } mCallbackCtx = callbackCtx; if (null == mDownloadInfo) { onError(CONNECTION_ERR); } else { setState(DOWNLOADING); new Thread(new Runnable() { @Override public void run() { HttpURLConnection connection = null; RandomAccessFile randomAccessFile = null; InputStream is = null; int retry = RETRY; //TODO:????? do { int completeSize = mDownloadInfo.getCompleteSize(); try { URL url = new URL(mUrl); connection = (HttpURLConnection) url.openConnection(); connection.setConnectTimeout(TIME_OUT_MILLISECOND); connection.setRequestMethod("GET"); // ?Rangebytes x-; connection.setRequestProperty("Range", "bytes=" + completeSize + "-"); //cookie setCookieProperty(connection, mUrl); // ?.temp randomAccessFile = new RandomAccessFile(mLocalFilePath + TEMP_FILE_SUFFIX, "rwd"); randomAccessFile.seek(completeSize); // ??? is = connection.getInputStream(); byte[] buffer = new byte[mBufferSize]; int length = -1; while ((length = is.read(buffer)) != -1) { try { randomAccessFile.write(buffer, 0, length); } catch (Exception e) { retry = -1; break; } completeSize += length; onProgressUpdated(completeSize, 0); if (PAUSE == mState) { break; } } if (mDownloadInfo.isDownloadCompleted()) { // ??.temp renameFile(mLocalFilePath + TEMP_FILE_SUFFIX, mLocalFilePath); onSuccess(); break; } } catch (IOException e) { if (retry <= 0) { onError(CONNECTION_ERR); XLog.e(CLASS_NAME, e.getMessage()); } // ,?1? try { Thread.sleep(RETRY_INTERVAL); } catch (InterruptedException ex) { XLog.e(CLASS_NAME, "sleep be interrupted", ex); } } finally { try { if (null != is) { is.close(); } if (null != randomAccessFile) { // new URL??randomAccessFilenull randomAccessFile.close(); } if (null != connection) { // new URL??connectionnull connection.disconnect(); } } catch (IOException e) { XLog.e(CLASS_NAME, e.getMessage()); } } } while ((DOWNLOADING == mState) && (0 < retry--)); } }).start(); } }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
/** * //from w ww.j a va 2s.c o m * @return a sorted map of urlfp to item * @throws IOException */ TreeMap<URLFP, ProxyCrawlHistoryItem> loadLocalLogItemMap() throws IOException { TreeMap<URLFP, ProxyCrawlHistoryItem> itemMap = new TreeMap<URLFP, ProxyCrawlHistoryItem>(); LOG.info("Reading Local Log File"); RandomAccessFile file = new RandomAccessFile(getActiveLogFilePath(), "rw"); // valid length indicator ... long validLength = 0; try { // skip header ... file.seek(LocalLogFileHeader.SIZE); validLength = file.getFilePointer(); // ok walk n items ... for (int itemIdx = 0; itemIdx < _header._itemCount && file.getChannel().position() <= _header._fileSize; ++itemIdx) { try { ProxyCrawlHistoryItem item = readItem(file); // update valid length ... validLength = file.getFilePointer(); // ok compute fingerprint for item ... URLFP fingerprintObject = URLUtils.getURLFPFromURL(item.getOriginalURL(), true); if (fingerprintObject == null) { LOG.error("Could not compute fingerprint for URL:" + item.getOriginalURL()); } else { itemMap.put(fingerprintObject, item); } } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); try { if (!seekToNextSyncBytesPos(file)) { LOG.error("Hit EOF While Seeking for next SyncByte Sequence!"); break; } else { LOG.info("Seek to Next SyncByte Succeeded! Continuing Load"); } } catch (IOException e2) { LOG.error(CCStringUtils.stringifyException(e2)); LOG.error("Got IO Exception Reading SyncBytes - Bailing!"); break; } } } } finally { if (file.length() > validLength) { LOG.warn("File Length is:" + file.length() + " Truncating Length to:" + validLength); file.setLength(validLength); } file.close(); } LOG.info("Done Reading Local Log File"); return itemMap; }
From source file:com.clustercontrol.agent.job.PublicKeyThread.java
/** * ?Authorized_key????<BR>/*from ww w . j a va 2 s . c om*/ * * @param publicKey * @return true : ?false: */ private synchronized boolean deleteKey(String publicKey) { m_log.debug("delete key start"); if (SKIP_KEYFILE_UPDATE) { m_log.info("skipped deleting publicKey"); return true; } Charset charset = Charset.forName("UTF-8"); CharsetEncoder encoder = charset.newEncoder(); CharsetDecoder decoder = charset.newDecoder(); //??? String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH); if (fileName == null || fileName.length() == 0) return false; //File? File fi = new File(fileName); RandomAccessFile randomAccessFile = null; FileChannel channel = null; FileLock lock = null; boolean delete = false; try { //RandomAccessFile? randomAccessFile = new RandomAccessFile(fi, "rw"); //FileChannel? channel = randomAccessFile.getChannel(); // for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) { if (null != (lock = channel.tryLock())) { break; } m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT) + " : " + fileName + "]"); Thread.sleep(FILELOCK_WAIT); } if (null == lock) { m_log.warn("file locking timeout."); return false; } // (?) synchronized (authKeyLock) { //?? ByteBuffer buffer = ByteBuffer.allocate((int) channel.size()); //?? channel.read(buffer); // ???????????0? buffer.flip(); //?? String contents = decoder.decode(buffer).toString(); // ? m_log.debug("contents " + contents.length() + " : " + contents); //?? List<String> keyCheck = new ArrayList<String>(); StringTokenizer tokenizer = new StringTokenizer(contents, "\n"); while (tokenizer.hasMoreTokens()) { keyCheck.add(tokenizer.nextToken()); } //?????? int s = keyCheck.lastIndexOf(publicKey); if (s != -1) { // ? m_log.debug("remobe key : " + keyCheck.get(s)); keyCheck.remove(s); } //????? encoder.reset(); buffer.clear(); int i; if (keyCheck.size() > 0) { for (i = 0; i < keyCheck.size() - 1; i++) { encoder.encode(CharBuffer.wrap(keyCheck.get(i) + "\n"), buffer, false); } encoder.encode(CharBuffer.wrap(keyCheck.get(i)), buffer, true); } //??? buffer.flip(); channel.truncate(0); channel.position(0); channel.write(buffer); } delete = true; } catch (IOException e) { m_log.error(e.getMessage(), e); } catch (RuntimeException e) { m_log.error(e.getMessage(), e); } catch (InterruptedException e) { m_log.error(e.getMessage(), e); } finally { try { if (channel != null) { channel.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } //? if (lock != null) { lock.release(); } } catch (Exception e) { } } return delete; }
From source file:okuyama.imdst.util.KeyManagerValueMap.java
/** * ??????????.<br>//www .j ava2s .c o m * ?MapMap????????super?Map???Key???<br> * ?????value??????Data????????<br> * Map?Key?????put?.<br> * ??Key??????????????????<br> * ???????super?Map??Map?super?Map?put?.<br> * ??super?Map?????????????????? * ?????????????????? * ????Vacuum.<br> */ public boolean vacuumData() { boolean ret = false; BufferedWriter tmpBw = null; RandomAccessFile raf = null; Map vacuumWorkMap = null; boolean userMap = false; String dataStr = null; Set entrySet = null; Iterator entryIte = null; String key = null; int putCounter = 0; synchronized (sync) { if (this.vacuumDiffDataList != null) { this.vacuumDiffDataList.clear(); this.vacuumDiffDataList = null; } this.vacuumDiffDataList = new FileBaseDataList(this.tmpVacuumeLineFile); vacuumExecFlg = true; } //vacuumWorkMap = new ConcurrentHashMap(super.size()); if (JavaSystemApi.getUseMemoryPercent() > 40) { userMap = true; vacuumWorkMap = new FileBaseDataMap(this.tmpVacuumeCopyMapDirs, super.size(), 0.20); } else { vacuumWorkMap = new HashMap(super.size()); } try { tmpBw = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(new File(this.lineFile + ".tmp"), true), ImdstDefine.keyWorkFileEncoding), 1024 * 256); raf = new RandomAccessFile(new File(this.lineFile), "r"); entrySet = super.entrySet(); entryIte = entrySet.iterator(); while (entryIte.hasNext()) { Map.Entry obj = (Map.Entry) entryIte.next(); key = (String) obj.getKey(); if (key != null && (dataStr = (String) getNoCnv(key)) != null) { tmpBw.write(dataStr); tmpBw.write("\n"); putCounter++; if (mapValueInSize) { vacuumWorkMap.put(key, new Integer(putCounter).toString() + ":" + dataStr.length()); } else { vacuumWorkMap.put(key, new Integer(putCounter).toString()); } } } } catch (Exception e) { e.printStackTrace(); // StatusUtil.setStatusAndMessage(1, "KeyManagerValueMap - vacuumData - Error [" + e.getMessage() + "]"); } finally { try { // ????? if (StatusUtil.getStatus() == 0) { // flush SystemUtil.diskAccessSync(tmpBw); // close tmpBw.close(); // ????? synchronized (sync) { raf.close(); if (this.raf != null) this.raf.close(); if (this.bw != null) this.bw.close(); File dataFile = new File(this.lineFile); if (dataFile.exists()) { dataFile.delete(); } dataFile = null; // KeyMapKeyMap??? File tmpFile = new File(this.lineFile + ".tmp"); tmpFile.renameTo(new File(this.lineFile)); // super?Map? super.clear(); // workMap? Integer workMapData = null; Set workEntrySet = vacuumWorkMap.entrySet(); Iterator workEntryIte = workEntrySet.iterator(); String workKey = null; while (workEntryIte.hasNext()) { Map.Entry obj = (Map.Entry) workEntryIte.next(); workKey = (String) obj.getKey(); if (workKey != null) { if (mapValueInSize) { super.put(key, (String) vacuumWorkMap.get(workKey)); } else { super.put(workKey, new Integer((String) vacuumWorkMap.get(workKey))); } } } // ? this.nowKeySize = super.size(); // ?? this.initNoMemoryModeSetting(this.lineFile); // Vacuum????synchronized????? int vacuumDiffDataSize = this.vacuumDiffDataList.size(); if (vacuumDiffDataSize > 0) { Object[] diffObj = null; for (int i = 0; i < vacuumDiffDataSize; i++) { // ?? diffObj = (Object[]) this.vacuumDiffDataList.get(i); if (diffObj[0].equals("1")) { // put put(diffObj[1], diffObj[2]); } else if (diffObj[0].equals("2")) { // remove remove(diffObj[1]); } } } this.vacuumDiffDataList.clear(); this.vacuumDiffDataList = null; if (userMap) { ((FileBaseDataMap) vacuumWorkMap).finishClear(); } vacuumWorkMap = null; // Vacuum vacuumExecFlg = false; ret = true; } } } catch (Exception e2) { e2.printStackTrace(); try { File tmpFile = new File(this.lineFile + ".tmp"); if (tmpFile.exists()) { tmpFile.delete(); } } catch (Exception e3) { e3.printStackTrace(); // StatusUtil.setStatusAndMessage(1, "KeyManagerValueMap - vacuumData - Error [" + e3.getMessage() + e3.getMessage() + "]"); } } } return ret; }