List of usage examples for java.io File setLastModified
public boolean setLastModified(long time)
From source file:org.rhq.enterprise.server.core.plugin.ServerPluginScanner.java
/** * This method scans the database for any new or updated server plugins and make sure this server * has a plugin file on the filesystem for each of those new/updated server plugins. * * This also checks to see if the enabled flag changed for plugins that we already know about. * If it does, and its plugin container has the plugin already loaded, the plugin will be reloaded. * * @return a list of files that appear to be new or updated and should be deployed *//* w w w. j a va2s . c om*/ private List<File> serverPluginScanDatabase() throws Exception { // these are plugins (name/path/md5/mtime) that have changed in the DB but are missing from the file system List<ServerPlugin> updatedPlugins = new ArrayList<ServerPlugin>(); // the same list as above, only they are the files that are written to the filesystem and no longer missing List<File> updatedFiles = new ArrayList<File>(); // process all the installed plugins ServerPluginsLocal serverPluginsManager = LookupUtil.getServerPlugins(); List<ServerPlugin> installedPlugins = serverPluginsManager.getServerPlugins(); for (ServerPlugin installedPlugin : installedPlugins) { String name = installedPlugin.getName(); String path = installedPlugin.getPath(); String md5 = installedPlugin.getMd5(); long mtime = installedPlugin.getMtime(); String version = installedPlugin.getVersion(); ServerPluginType pluginType = new ServerPluginType(installedPlugin.getType()); // let's see if we have this logical plugin on the filesystem (it may or may not be under the same filename) File expectedFile = new File(this.getServerPluginDir(), path); File currentFile = null; // will be non-null if we find that we have this plugin on the filesystem already PluginWithDescriptor pluginWithDescriptor = this.serverPluginsOnFilesystem.get(expectedFile); if (pluginWithDescriptor != null) { currentFile = expectedFile; // we have it where we are expected to have it if (!pluginWithDescriptor.plugin.getName().equals(name) || !pluginType.equals(pluginWithDescriptor.pluginType)) { // Happens if someone wrote a plugin of one type but later changed it to a different type (or changed names) log.warn("For some reason, the server plugin file [" + expectedFile + "] is plugin [" + pluginWithDescriptor.plugin.getName() + "] of type [" + pluginWithDescriptor.pluginType + "] but the database says it should be [" + name + "] of type [" + pluginType + "]"); } else { log.debug("File system and db agree on server plugin location for [" + expectedFile + "]"); } } else { // the plugin might still be on the file system but under a different filename, see if we can find it for (Map.Entry<File, PluginWithDescriptor> cacheEntry : this.serverPluginsOnFilesystem.entrySet()) { if (cacheEntry.getValue().plugin.getName().equals(name) && cacheEntry.getValue().pluginType.equals(pluginType)) { currentFile = cacheEntry.getKey(); pluginWithDescriptor = cacheEntry.getValue(); log.info("Filesystem has a server plugin [" + name + "] at the file [" + currentFile + "] which is different than where the DB thinks it should be [" + expectedFile + "]"); break; // we found it, no need to continue the loop } } } if (pluginWithDescriptor != null && currentFile != null && currentFile.exists()) { ServerPlugin dbPlugin = new ServerPlugin(name, path); dbPlugin.setType(pluginType.stringify()); dbPlugin.setMd5(md5); dbPlugin.setVersion(version); dbPlugin.setMtime(mtime); ServerPlugin obsoletePlugin = ServerPluginDescriptorUtil.determineObsoletePlugin(dbPlugin, pluginWithDescriptor.plugin); if (obsoletePlugin == pluginWithDescriptor.plugin) { // yes use == for reference equality! StringBuilder logMsg = new StringBuilder(); logMsg.append("Found server plugin [").append(name); logMsg.append("] in the DB that is newer than the one on the filesystem: "); logMsg.append("DB path=[").append(path); logMsg.append("]; file path=[").append(currentFile.getName()); logMsg.append("]; DB MD5=[").append(md5); logMsg.append("]; file MD5=[").append(pluginWithDescriptor.plugin.getMd5()); logMsg.append("]; DB version=[").append(version); logMsg.append("]; file version=[").append(pluginWithDescriptor.plugin.getVersion()); logMsg.append("]; DB timestamp=[").append(new Date(mtime)); logMsg.append("]; file timestamp=[").append(new Date(pluginWithDescriptor.plugin.getMtime())); logMsg.append("]"); log.info(logMsg.toString()); updatedPlugins.add(dbPlugin); if (currentFile.delete()) { log.info("Deleted the obsolete server plugin file to be updated: " + currentFile); this.serverPluginsOnFilesystem.remove(currentFile); } else { log.warn("Failed to delete the obsolete (to-be-updated) server plugin: " + currentFile); } } else if (obsoletePlugin == null) { // the db is up-to-date, but update the cache so we don't check MD5 or parse the descriptor again boolean succeeded = currentFile.setLastModified(mtime); if (!succeeded && log.isDebugEnabled()) { log.debug( "Failed to set mtime to [" + new Date(mtime) + "] on file [" + currentFile + "]."); } pluginWithDescriptor.plugin.setMtime(mtime); pluginWithDescriptor.plugin.setVersion(version); pluginWithDescriptor.plugin.setMd5(md5); } else { log.info("It appears that the server plugin [" + dbPlugin + "] in the database may be obsolete. If so, it will be updated later."); } } else { log.info("Found server plugin in the DB that we do not yet have: " + name); ServerPlugin plugin = new ServerPlugin(name, path, md5); plugin.setType(pluginType.stringify()); plugin.setMtime(mtime); plugin.setVersion(version); updatedPlugins.add(plugin); this.serverPluginsOnFilesystem.remove(expectedFile); // paranoia, make sure the cache doesn't have this } } // write all our updated plugins to the file system if (!updatedPlugins.isEmpty()) { Connection conn = null; PreparedStatement ps = null; ResultSet rs = null; try { DataSource ds = LookupUtil.getDataSource(); conn = ds.getConnection(); ps = conn.prepareStatement("SELECT CONTENT FROM " + ServerPlugin.TABLE_NAME + " WHERE DEPLOYMENT = 'SERVER' AND STATUS = 'INSTALLED' AND NAME = ? AND PTYPE = ?"); for (ServerPlugin plugin : updatedPlugins) { File file = new File(this.getServerPluginDir(), plugin.getPath()); ps.setString(1, plugin.getName()); ps.setString(2, plugin.getType()); rs = ps.executeQuery(); rs.next(); InputStream content = rs.getBinaryStream(1); StreamUtil.copy(content, new FileOutputStream(file)); rs.close(); boolean succeeded = file.setLastModified(plugin.getMtime());// so our file matches the database mtime if (!succeeded && log.isDebugEnabled()) { log.debug("Failed to set mtime to [" + plugin.getMtime() + "] on file [" + file + "]."); } updatedFiles.add(file); // we are writing a new file to the filesystem, cache it since we know about it now cacheFilesystemServerPluginJar(file, null); } } finally { JDBCUtil.safeClose(conn, ps, rs); } } return updatedFiles; }
From source file:org.andrewberman.sync.PDFDownloader.java
void retrievePDFsFromHTML() throws Exception { if (!baseDir.exists() || !baseDir.canWrite() || !baseDir.isDirectory()) { throw new Exception("Error: Destination is read-only or does not exist."); }//from ww w. j a v a 2s. c om String base = baseDir.getCanonicalPath() + sep; syncBibTex(); do { Thread.sleep(InheritMe.BETWEEN_PDF_DOWNLOADS_SLEEP_TIME); getArticleInfo(); /* * First thing's first, create folders and insert the URL links if necessary. */ if (subTags) { status("Creating folders and links..."); createTagFoldersAndLinks(tagSet); status("Folders and links created!"); // Thread.sleep(PAUSE_TIME/10); } List<CiteULikeReference> articlesWithoutPDFs = this.getArticlesWithoutPDFs(this.refs); List<CiteULikeReference> articlesWithPDFs = new ArrayList<CiteULikeReference>(); articlesWithPDFs.addAll(this.refs); articlesWithPDFs.removeAll(articlesWithoutPDFs); itemMax = articlesWithPDFs.size(); itemNum = 0; int i = -1; for (CiteULikeReference ref : articlesWithPDFs) { System.out.println(ref.userfiles); itemNum++; i++; waitOrExit(); setArticleLink("Current article ID: " + ref.article_id, ref.href); try { waitOrExit(); // Grab the article page's text to get the date. Date remote = null; // if (this.uploadNewer) { String articleContent = get(ref.href); remote = getStampFromArticlePage(articleContent); System.out.println("Remote timestamp: " + remote); // } status("Checking for existing file..."); for (String fileName : ref.userfiles.keySet()) { waitOrExit(); String fullPath = ref.userfiles.get(fileName); System.out.println(fileName + " -> " + fullPath); /* * Try and put the year first. */ String[] bits = fileName.split("_"); String yearS = ""; String otherFilename = ""; String targetFilename = ""; if (bits.length == 3) { String flipped = bits[1] + "_" + bits[0] + "_" + bits[2]; if (flipFilename) { otherFilename = fileName; targetFilename = flipped; } else { targetFilename = fileName; otherFilename = flipped; } if (subYears) { yearS = String.valueOf(bits[1]); } } else { targetFilename = fileName; otherFilename = fileName; } /* * If we're organized by tags, add a destination file for each of this PDf's tags. * If not, then just add the one outputFile to the outputFiles array. */ ArrayList<File> outputFiles = new ArrayList<File>(); ArrayList<File> maybeNewFiles = new ArrayList<File>(); File altFile = null; File myFile = null; // Keep track of the newest file and its timestamp. File newestFile = null; long newestStamp = 0; if (subTags && ref.getTags().size() > 0) { for (String tag : ref.getTags()) { if (tag.equalsIgnoreCase("no-tag")) tag = ""; String curDir = base + tag + sep; curDir += yearS; myFile = new File(curDir + sep + targetFilename); altFile = new File(curDir + sep + otherFilename); if (myFile.exists()) { newestFile = returnNewer(newestFile, myFile); } else if (altFile.exists()) { try { altFile.renameTo(myFile); } catch (Exception e) { e.printStackTrace(); } // outputFiles.add(myFile); } else { outputFiles.add(myFile); } } } else { myFile = new File(base + yearS + sep + targetFilename); altFile = new File(base + yearS + sep + otherFilename); maybeNewFiles.add(myFile); maybeNewFiles.add(altFile); if (myFile.exists()) { newestFile = returnNewer(newestFile, myFile); } else if (altFile.exists()) { try { altFile.renameTo(myFile); } catch (Exception e) { e.printStackTrace(); System.exit(0); } // outputFiles.add(myFile); } else { outputFiles.add(myFile); } } // If we have a newest file, check against the server to see if it's much newer. if (newestFile != null && remote != null) { Date local = new Date(newestFile.lastModified()); long lT = local.getTime(); long rT = remote.getTime(); if (lT - rT > (1000) * (60) * (1)) // Fudge factor of 1 minute. { if (uploadNewer) { System.err.println("Local file is newer than remote! Uploading..."); status("Local file was modified! Uploading..."); // OK. Since CiteULike now uses hashing to evaluate the identity of files, we have to first delete // the existing attachment with this filename. String url = this.BASE_URL + "personal_pdf_delete?"; url += "username=" + this.username; url += "&article_id=" + ref.article_id; // Parse the userfile_id from the filename: String userFileId = ref.userfileIds.get(fileName); url += "&userfile_id=" + userFileId; System.out.println(url); get(url); uploadPDF(ref.article_id, newestFile); // Re-collect the timestamp, and re-stamp the local file. This is done so they match after re-uploading. String newContent = get(ref.href); Date remote2 = getStampFromArticlePage(newContent); newestFile.setLastModified(remote2.getTime()); } } } if (outputFiles.size() == 0) { status("Already up-to-date!"); utd++; continue; } Thread.sleep(200); /* * Download the PDF to the first file. */ waitOrExit(); status("Downloading..."); File f = outputFiles.remove(0); try { String fileUrl = this.BASE_URL + fullPath; this.retriesLeft = 2; downloadURLToFile(fileUrl, f); if (remote != null) f.setLastModified(remote.getTime()); } catch (Exception e) { e.printStackTrace(); f.delete(); throw e; } dl++; /* * Go through rest of tags, and copy file accordingly. * NOTE: This is only entered if this file needs to be copied over locally. */ for (int j = 0; j < outputFiles.size(); j++) { status("Copying PDF..."); File f2 = outputFiles.get(j); if (f2.exists()) { if (f2.lastModified() > newestStamp) { newestFile = f2; newestStamp = f2.lastModified(); } continue; } f2.getParentFile().mkdirs(); f2.createNewFile(); RandomAccessFile in = new RandomAccessFile(f, "r"); RandomAccessFile out = new RandomAccessFile(f2, "rw"); byte[] b = new byte[(int) in.length()]; in.readFully(b); out.write(b); in.close(); out.close(); f2.setLastModified(remote.getTime()); } } } catch (Exception e) { err++; e.printStackTrace(); status("Failed. See the Java console for more info."); Thread.sleep(PAUSE_TIME / 2); continue; } } } while (this.refs.size() > 0); this.pageNum = 0; status("Finished. " + dl + " new, " + utd + " up-to-date and " + err + " failed."); out.println("Done!"); }
From source file:org.opencms.synchronize.CmsSynchronize.java
/** * Exports a resource from the VFS to the FS and updates the * synchronization lists.<p>/*from ww w.j ava 2s .c o m*/ * * @param res the resource to be exported * * @throws CmsException if something goes wrong */ private void exportToRfs(CmsResource res) throws CmsException { CmsFile vfsFile; File fsFile; String resourcename; // to get the name of the file in the FS, we must look it up in the // sync list. This is necessary, since the VFS could use a translated // filename. CmsSynchronizeList sync = (CmsSynchronizeList) m_syncList.get(translate(m_cms.getSitePath(res))); // if no entry in the sync list was found, its a new resource and we // can use the name of the VFS resource. if (sync != null) { resourcename = sync.getResName(); } else { // otherwise use the original non-translated name resourcename = m_cms.getSitePath(res); // the parent folder could contain a translated names as well, so // make a lookup in the sync list to get its original // non-translated name String parent = CmsResource.getParentFolder(resourcename); CmsSynchronizeList parentSync = (CmsSynchronizeList) m_newSyncList.get(parent); // use the non-translated pathname if (parentSync != null) { resourcename = parentSync.getResName() + res.getName(); } } if ((res.isFolder()) && (!resourcename.endsWith("/"))) { resourcename += "/"; } fsFile = getFileInRfs(resourcename); try { // if the resource is marked for deletion, do not export it! if (!res.getState().isDeleted()) { // if its a file, create export the file to the FS m_report.print(org.opencms.report.Messages.get() .container(org.opencms.report.Messages.RPT_SUCCESSION_1, String.valueOf(m_count++)), I_CmsReport.FORMAT_NOTE); if (res.isFile()) { m_report.print(Messages.get().container(Messages.RPT_EXPORT_FILE_0), I_CmsReport.FORMAT_NOTE); m_report.print(org.opencms.report.Messages.get() .container(org.opencms.report.Messages.RPT_ARGUMENT_1, m_cms.getSitePath(res))); m_report.print(Messages.get().container(Messages.RPT_TO_FS_AS_0), I_CmsReport.FORMAT_NOTE); m_report.print( org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_ARGUMENT_1, fsFile.getAbsolutePath().replace('\\', '/'))); m_report.print( org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_DOTS_0)); // create the resource if nescessary if (!fsFile.exists()) { createNewLocalFile(fsFile); } // write the file content to the FS vfsFile = m_cms.readFile(m_cms.getSitePath(res), CmsResourceFilter.IGNORE_EXPIRATION); try { writeFileByte(vfsFile.getContents(), fsFile); } catch (IOException e) { throw new CmsSynchronizeException(Messages.get().container(Messages.ERR_WRITE_FILE_0)); } // now check if there is some external method to be called // which should modify the exported resource in the FS Iterator i = m_synchronizeModifications.iterator(); while (i.hasNext()) { try { ((I_CmsSynchronizeModification) i.next()).modifyFs(m_cms, vfsFile, fsFile); } catch (CmsSynchronizeException e) { if (LOG.isWarnEnabled()) { LOG.warn(Messages.get().getBundle().key(Messages.LOG_SYNCHRONIZE_EXPORT_FAILED_1, res.getRootPath()), e); } break; } } fsFile.setLastModified(res.getDateLastModified()); } else { m_report.print(Messages.get().container(Messages.RPT_EXPORT_FOLDER_0), I_CmsReport.FORMAT_NOTE); m_report.print(org.opencms.report.Messages.get() .container(org.opencms.report.Messages.RPT_ARGUMENT_1, m_cms.getSitePath(res))); m_report.print(Messages.get().container(Messages.RPT_TO_FS_AS_0), I_CmsReport.FORMAT_NOTE); m_report.print( org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_ARGUMENT_1, fsFile.getAbsolutePath().replace('\\', '/'))); m_report.print( org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_DOTS_0)); // its a folder, so create a folder in the FS fsFile.mkdirs(); } // add resource to synchronization list CmsSynchronizeList syncList = new CmsSynchronizeList(resourcename, translate(resourcename), res.getDateLastModified(), fsFile.lastModified()); m_newSyncList.put(translate(resourcename), syncList); // and remove it fomr the old one m_syncList.remove(translate(resourcename)); m_report.println(org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_OK_0), I_CmsReport.FORMAT_OK); } // free mem vfsFile = null; } catch (CmsException e) { throw new CmsSynchronizeException(e.getMessageContainer(), e); } }
From source file:io.pyd.synchro.SyncJob.java
protected Map<String, Object[]> applyChanges(Map<String, Object[]> changes, IProgressMonitor monitor, MonitorTaskType taskType) {// w w w. jav a 2 s . co m Set<Entry<String, Object[]>> changesEntrySet = changes.entrySet(); Iterator<Map.Entry<String, Object[]>> it = changesEntrySet.iterator(); Map<String, Object[]> notApplied = createMapDBFile("notApplied"); // Make sure to apply those one at the end Map<String, Object[]> moves = createMapDBFile("moves"); Map<String, Object[]> deletes = createMapDBFile("deletes"); RestRequest rest = this.getRequest(); int total = changes.size(); int work = 0; if (monitor != null) { monitor.begin(currentJobNodeID, getMonitorTaskName(taskType)); } while (it.hasNext()) { notifyProgressMonitor(monitor, total, work++); Map.Entry<String, Object[]> entry = it.next(); String k = entry.getKey(); Object[] value = entry.getValue().clone(); Integer v = (Integer) value[0]; Node n = (Node) value[1]; if (n == null) continue; if (this.interruptRequired) { value[2] = STATUS_INTERRUPTED; notApplied.put(k, value); continue; } // Thread.sleep(2000); try { Map<String, Node> tmpNodes = findNodesInTmpSnapshot(k); if (n.isLeaf() && value[2].equals(STATUS_CONFLICT_SOLVED)) { if (v.equals(TASK_SOLVE_KEEP_MINE)) { v = TASK_REMOTE_PUT_CONTENT; } else if (v.equals(TASK_SOLVE_KEEP_THEIR)) { v = TASK_LOCAL_GET_CONTENT; } else if (v.equals(TASK_SOLVE_KEEP_BOTH)) { // COPY LOCAL FILE AND GET REMOTE COPY File origFile = new File(currentLocalFolder, k); File targetFile = new File(currentLocalFolder, k + ".mine"); InputStream in = new FileInputStream(origFile); OutputStream out = new FileOutputStream(targetFile); byte[] buf = new byte[1024]; int len; while ((len = in.read(buf)) > 0) { out.write(buf, 0, len); } in.close(); out.close(); v = TASK_LOCAL_GET_CONTENT; } } if (v == TASK_LOCAL_GET_CONTENT) { if (direction.equals("up")) continue; if (tmpNodes.get("local") != null && tmpNodes.get("remote") != null) { if (tmpNodes.get("local").getPropertyValue("md5") == null) { updateLocalMD5(tmpNodes.get("local")); } if (tmpNodes.get("local").getPropertyValue("md5") != null && tmpNodes.get("local") .getPropertyValue("md5").equals(tmpNodes.get("remote").getPropertyValue("md5"))) { continue; } } Node node = new Node(Node.NODE_TYPE_ENTRY, "", null); node.setPath(k); File targetFile = new File(currentLocalFolder, k); this.logChange(getMessage("job_log_downloading"), k); try { this.updateNode(node, targetFile, n); } catch (IllegalStateException e) { if (this.statRemoteFile(node, "file", rest) == null) continue; else throw e; } if (!targetFile.exists() || targetFile.length() != Integer.parseInt(n.getPropertyValue("bytesize"))) { JSONObject obj = this.statRemoteFile(node, "file", rest); if (obj == null || obj.get("size").equals(0)) continue; else throw new Exception("Error while downloading file from server"); } if (n != null) { targetFile.setLastModified(n.getLastModified().getTime()); } countFilesDownloaded++; } else if (v == TASK_LOCAL_MKDIR) { if (direction.equals("up")) continue; File f = new File(currentLocalFolder, k); if (!f.exists()) { this.logChange(getMessage("job_log_mkdir"), k); boolean res = f.mkdirs(); if (!res) { throw new Exception("Error while creating local folder"); } countResourcesSynchronized++; } } else if (v == TASK_LOCAL_REMOVE) { if (direction.equals("up")) continue; deletes.put(k, value); } else if (v == TASK_REMOTE_REMOVE) { if (direction.equals("down")) continue; deletes.put(k, value); } else if (v == TASK_REMOTE_MKDIR) { if (direction.equals("down")) continue; this.logChange(getMessage("job_log_mkdir_remote"), k); Node currentDirectory = new Node(Node.NODE_TYPE_ENTRY, "", null); int lastSlash = k.lastIndexOf("/"); currentDirectory.setPath(k.substring(0, lastSlash)); RestStateHolder.getInstance().setDirectory(currentDirectory); rest.getStatusCodeForRequest(AjxpAPI.getInstance().getMkdirUri(k.substring(lastSlash + 1))); JSONObject object = rest.getJSonContent(AjxpAPI.getInstance().getStatUri(k)); if (!object.has("mtime")) { throw new Exception("Could not create remote folder"); } countResourcesSynchronized++; } else if (v == TASK_REMOTE_PUT_CONTENT) { if (direction.equals("down")) continue; if (tmpNodes.get("local") != null && tmpNodes.get("remote") != null) { if (tmpNodes.get("local").getPropertyValue("md5") == null) { updateLocalMD5(tmpNodes.get("local")); } if (tmpNodes.get("local").getPropertyValue("md5") != null && tmpNodes.get("local") .getPropertyValue("md5").equals(tmpNodes.get("remote").getPropertyValue("md5"))) { continue; } } this.logChange(getMessage("job_log_uploading"), k); Node currentDirectory = new Node(Node.NODE_TYPE_ENTRY, "", null); int lastSlash = k.lastIndexOf("/"); currentDirectory.setPath(k.substring(0, lastSlash)); RestStateHolder.getInstance().setDirectory(currentDirectory); File sourceFile = new File(currentLocalFolder, k); if (!sourceFile.exists()) { // Silently ignore, or it will continously try to // reupload it. continue; } boolean checked = false; if (sourceFile.length() == 0) { rest.getStringContent(AjxpAPI.getInstance().getMkfileUri(sourceFile.getName())); } else { checked = this.synchronousUP(currentDirectory, sourceFile, n); } if (!checked) { JSONObject object = null; String path = n.getPath(true); try { object = rest.getJSonContent(AjxpAPI.getInstance().getStatUri(path)); } catch (Exception e) { Logger.getRootLogger().error("Error during uploading file: " + path, e); continue; } if (object != null && (!object.has("size") || object.getInt("size") != (int) sourceFile.length())) { throw new Exception("Could not upload file to the server"); } } countFilesUploaded++; } else if (v == TASK_DO_NOTHING && value[2] == STATUS_CONFLICT) { // Recheck that it's a real conflict? this.logChange(getMessage("job_log_conflict"), k); notApplied.put(k, value); countConflictsDetected++; } else if (v == TASK_LOCAL_MOVE_FILE || v == TASK_REMOTE_MOVE_FILE) { if (v == TASK_LOCAL_MOVE_FILE && direction.equals("up")) continue; if (v == TASK_REMOTE_MOVE_FILE && direction.equals("down")) continue; moves.put(k, value); } } catch (FileNotFoundException ex) { addSyncDetailMessage(k, ex); ex.printStackTrace(); countResourcesErrors++; // Do not put in the notApplied again, otherwise it will // indefinitely happen. } catch (Exception e) { addSyncDetailMessage(k, e); Logger.getRootLogger().error("Synchro", e); countResourcesErrors++; value[2] = STATUS_ERROR; notApplied.put(k, value); } } if (monitor != null) { monitor.end(currentJobNodeID); monitor.begin(currentJobNodeID, getMonitorTaskName(taskType) + " - " + getMonitorTaskName(MonitorTaskType.APPLY_CHANGES_MOVES)); } // APPLY MOVES Set<Entry<String, Object[]>> movesEntrySet = moves.entrySet(); Iterator<Map.Entry<String, Object[]>> mIt = movesEntrySet.iterator(); total = moves.size(); work = 0; while (mIt.hasNext()) { notifyProgressMonitor(monitor, total, work++); Map.Entry<String, Object[]> entry = mIt.next(); String k = entry.getKey(); Object[] value = entry.getValue().clone(); Integer v = (Integer) value[0]; Node n = (Node) value[1]; if (this.interruptRequired) { value[2] = STATUS_INTERRUPTED; notApplied.put(k, value); continue; } try { if (v == TASK_LOCAL_MOVE_FILE && value.length == 4) { this.logChange("Moving resource locally", k); Node dest = (Node) value[3]; File origFile = new File(currentLocalFolder, n.getPath()); if (!origFile.exists()) { // Cannot move a non-existing file! Download instead! value[0] = TASK_LOCAL_GET_CONTENT; value[1] = dest; value[2] = STATUS_TODO; notApplied.put(dest.getPath(true), value); continue; } File destFile = new File(currentLocalFolder, dest.getPath()); origFile.renameTo(destFile); if (!destFile.exists()) { throw new Exception("Error while creating " + dest.getPath()); } countResourcesSynchronized++; } else if (v == TASK_REMOTE_MOVE_FILE && value.length == 4) { this.logChange("Moving resource remotely", k); Node dest = (Node) value[3]; JSONObject object = rest.getJSonContent(AjxpAPI.getInstance().getStatUri(n.getPath())); if (!object.has("size")) { value[0] = TASK_REMOTE_PUT_CONTENT; value[1] = dest; value[2] = STATUS_TODO; notApplied.put(dest.getPath(true), value); continue; } rest.getStatusCodeForRequest(AjxpAPI.getInstance().getRenameUri(n, dest)); object = rest.getJSonContent(AjxpAPI.getInstance().getStatUri(dest.getPath())); if (!object.has("size")) { throw new Exception("Could not move remote file to " + dest.getPath()); } countResourcesSynchronized++; } } catch (FileNotFoundException ex) { addSyncDetailMessage(k, ex); ex.printStackTrace(); countResourcesErrors++; // Do not put in the notApplied again, otherwise it will // indefinitely happen. } catch (Exception e) { addSyncDetailMessage(k, e); Logger.getRootLogger().error("Synchro", e); countResourcesErrors++; value[2] = STATUS_ERROR; notApplied.put(k, value); } } // APPLY DELETES if (monitor != null) { monitor.end(currentJobNodeID); monitor.begin(currentJobNodeID, getMonitorTaskName(taskType) + " - " + getMonitorTaskName(MonitorTaskType.APPLY_CHANGES_DELETES)); } Set<Entry<String, Object[]>> deletesEntrySet = deletes.entrySet(); Iterator<Map.Entry<String, Object[]>> dIt = deletesEntrySet.iterator(); total = deletes.size(); work = 0; while (dIt.hasNext()) { notifyProgressMonitor(monitor, total, work++); Map.Entry<String, Object[]> entry = dIt.next(); String k = entry.getKey(); Object[] value = entry.getValue().clone(); Integer v = (Integer) value[0]; // Node n = (Node)value[1]; if (this.interruptRequired) { value[2] = STATUS_INTERRUPTED; notApplied.put(k, value); continue; } try { if (v == TASK_LOCAL_REMOVE) { this.logChange(getMessage("job_log_rmlocal"), k); File f = new File(currentLocalFolder, k); if (f.exists()) { boolean res = f.delete(); if (!res) { throw new Exception("Error while removing local resource: " + f.getPath()); } countResourcesSynchronized++; } } else if (v == TASK_REMOTE_REMOVE) { this.logChange(getMessage("job_log_rmremote"), k); Node currentDirectory = new Node(Node.NODE_TYPE_ENTRY, "", null); int lastSlash = k.lastIndexOf("/"); currentDirectory.setPath(k.substring(0, lastSlash)); RestStateHolder.getInstance().setDirectory(currentDirectory); rest.getStatusCodeForRequest(AjxpAPI.getInstance().getDeleteUri(k)); JSONObject object = rest.getJSonContent(AjxpAPI.getInstance().getStatUri(k)); if (object.has("mtime")) { // Still exists, should be empty! throw new Exception("Could not remove the resource from the server"); } countResourcesSynchronized++; } } catch (FileNotFoundException ex) { addSyncDetailMessage(k, ex); ex.printStackTrace(); countResourcesErrors++; // Do not put in the notApplied again, otherwise it will // indefinitely happen. } catch (Exception e) { addSyncDetailMessage(k, e); Logger.getRootLogger().error("Synchro", e); countResourcesErrors++; value[2] = STATUS_ERROR; notApplied.put(k, value); } } if (monitor != null) { monitor.end(currentJobNodeID); } rest.release(); return notApplied; }
From source file:com.moviejukebox.MovieJukebox.java
private void generateLibrary() throws Throwable { /**//w w w . j a v a2 s.c o m * ****************************************************************************** * @author Gabriel Corneanu * * The tools used for parallel processing are NOT thread safe (some operations are, but not all) therefore all are added to * a container which is instantiated one per thread * * - xmlWriter looks thread safe<br> * - htmlWriter was not thread safe<br> * - getTransformer is fixed (simple workaround)<br> * - MovieImagePlugin : not clear, made thread specific for safety<br> * - MediaInfoScanner : not sure, made thread specific * * Also important: <br> * The library itself is not thread safe for modifications (API says so) it could be adjusted with concurrent versions, but * it needs many changes it seems that it is safe for subsequent reads (iterators), so leave for now... * * - DatabasePluginController is also fixed to be thread safe (plugins map for each thread) * */ class ToolSet { private final MovieImagePlugin imagePlugin = MovieJukebox .getImagePlugin(getProperty("mjb.image.plugin", "com.moviejukebox.plugin.DefaultImagePlugin")); private final MovieImagePlugin backgroundPlugin = MovieJukebox.getBackgroundPlugin( getProperty("mjb.background.plugin", "com.moviejukebox.plugin.DefaultBackgroundPlugin")); private final MediaInfoScanner miScanner = new MediaInfoScanner(); private final OpenSubtitlesPlugin subtitlePlugin = new OpenSubtitlesPlugin(); private final TrailerScanner trailerScanner = new TrailerScanner(); // FANART.TV TV Artwork Scanners private final ArtworkScanner clearArtScanner = new FanartTvScanner(ArtworkType.CLEARART); private final ArtworkScanner clearLogoScanner = new FanartTvScanner(ArtworkType.CLEARLOGO); private final ArtworkScanner tvThumbScanner = new FanartTvScanner(ArtworkType.TVTHUMB); private final ArtworkScanner seasonThumbScanner = new FanartTvScanner(ArtworkType.SEASONTHUMB); // FANART.TV Movie Artwork Scanners private final ArtworkScanner movieArtScanner = new FanartTvScanner(ArtworkType.MOVIEART); private final ArtworkScanner movieLogoScanner = new FanartTvScanner(ArtworkType.MOVIELOGO); private final ArtworkScanner movieDiscScanner = new FanartTvScanner(ArtworkType.MOVIEDISC); } final ThreadLocal<ToolSet> threadTools = new ThreadLocal<ToolSet>() { @Override protected ToolSet initialValue() { return new ToolSet(); } }; final MovieJukeboxXMLReader xmlReader = new MovieJukeboxXMLReader(); final MovieJukeboxXMLWriter xmlWriter = new MovieJukeboxXMLWriter(); final MovieJukeboxHTMLWriter htmlWriter = new MovieJukeboxHTMLWriter(); File mediaLibraryRoot = new File(movieLibraryRoot); final File jukeboxDetailsRootFile = new FileTools.FileEx(jukebox.getJukeboxRootLocationDetails()); MovieListingPlugin listingPlugin = getListingPlugin( getProperty("mjb.listing.plugin", "com.moviejukebox.plugin.MovieListingPluginBase")); videoimageDownload = PropertiesUtil.getBooleanProperty("mjb.includeVideoImages", Boolean.FALSE); bannerDownload = PropertiesUtil.getBooleanProperty("mjb.includeWideBanners", Boolean.FALSE); photoDownload = PropertiesUtil.getBooleanProperty("mjb.includePhoto", Boolean.FALSE); backdropDownload = PropertiesUtil.getBooleanProperty("mjb.includeBackdrop", Boolean.FALSE); boolean processExtras = PropertiesUtil.getBooleanProperty("filename.extras.process", Boolean.TRUE); boolean moviejukeboxListing = PropertiesUtil.getBooleanProperty("mjb.listing.generate", Boolean.FALSE); // Multi-thread: Processing thread settings maxThreadsProcess = Integer.parseInt(getProperty("mjb.MaxThreadsProcess", "0")); if (maxThreadsProcess <= 0) { maxThreadsProcess = Runtime.getRuntime().availableProcessors(); } maxThreadsDownload = Integer.parseInt(getProperty("mjb.MaxThreadsDownload", "0")); if (maxThreadsDownload <= 0) { maxThreadsDownload = maxThreadsProcess; } LOG.info("Using {} processing threads and {} downloading threads...", maxThreadsProcess, maxThreadsDownload); if (maxThreadsDownload + maxThreadsProcess == 2) { // Display the note about the performance, otherwise assume that the user knows how to change // these parameters as they aren't set to the minimum LOG.info("See README.TXT for increasing performance using these settings."); } /* * ****************************************************************************** * * PART 1 : Preparing the temporary environment * */ SystemTools.showMemory(); LOG.info("Preparing environment..."); // create the ".mjbignore" and ".no_photo.nmj" file in the jukebox folder try { FileTools.makeDirs(jukebox.getJukeboxRootLocationDetailsFile()); new File(jukebox.getJukeboxRootLocationDetailsFile(), ".mjbignore").createNewFile(); FileTools.addJukeboxFile(".mjbignore"); if (getBooleanProperty("mjb.nmjCompliant", Boolean.FALSE)) { new File(jukebox.getJukeboxRootLocationDetailsFile(), ".no_photo.nmj").createNewFile(); FileTools.addJukeboxFile(".no_photo.nmj"); } } catch (IOException error) { LOG.error("Failed creating jukebox directory. Ensure this directory is read/write!"); LOG.error(SystemTools.getStackTrace(error)); return; } // Delete the existing filecache.txt try { (new File("filecache.txt")).delete(); } catch (Exception error) { LOG.error("Failed to delete the filecache.txt file."); LOG.error(SystemTools.getStackTrace(error)); return; } // Save the current state of the preferences to the skin directory for use by the skin // The forceHtmlOverwrite is set by the user or by the JukeboxProperties if there has been a skin change if (PropertiesUtil.getBooleanProperty("mjb.forceHTMLOverwrite", Boolean.FALSE) || !(new File(PropertiesUtil.getPropertiesFilename(Boolean.TRUE))).exists()) { PropertiesUtil.writeProperties(); } SystemTools.showMemory(); LOG.info("Initializing..."); try { FileTools.deleteDir(jukebox.getJukeboxTempLocation()); } catch (Exception error) { LOG.error( "Failed deleting the temporary jukebox directory ({}), please delete this manually and try again", jukebox.getJukeboxTempLocation()); return; } // Try and create the temp directory LOG.debug("Creating temporary jukebox location: {}", jukebox.getJukeboxTempLocation()); FileTools.makeDirs(jukebox.getJukeboxTempLocationDetailsFile()); /* * ****************************************************************************** * * PART 2 : Scan movie libraries for files... * */ SystemTools.showMemory(); LOG.info("Scanning library directory {}", mediaLibraryRoot); LOG.info("Jukebox output goes to {}", jukebox.getJukeboxRootLocation()); if (PropertiesUtil.getBooleanProperty("mjb.dirHash", Boolean.FALSE)) { // Add all folders 2 deep to the fileCache FileTools.fileCache.addDir(jukeboxDetailsRootFile, 2); /* * TODO: Need to watch for any issues when we have scanned the whole * jukebox, such as the watched folder, NFO folder, etc now existing * in the cache */ } else { // If the dirHash is not needed, just scan to the root level plus the watched and people folders FileTools.fileCache.addDir(jukeboxDetailsRootFile, 0); // Add the watched folder File watchedFileHandle = new FileTools.FileEx( jukebox.getJukeboxRootLocationDetails() + File.separator + "Watched"); FileTools.fileCache.addDir(watchedFileHandle, 0); // Add the people folder if needed if (isValidString(peopleFolder)) { File peopleFolderHandle = new FileTools.FileEx( jukebox.getJukeboxRootLocationDetails() + File.separator + peopleFolder); FileTools.fileCache.addDir(peopleFolderHandle, 0); } } ThreadExecutor<Void> tasks = new ThreadExecutor<>(maxThreadsProcess, maxThreadsDownload); final Library library = new Library(); for (final MediaLibraryPath mediaLibraryPath : mediaLibraryPaths) { // Multi-thread parallel processing tasks.submit(new Callable<Void>() { @Override public Void call() { LOG.debug("Scanning media library {}", mediaLibraryPath.getPath()); MovieDirectoryScanner mds = new MovieDirectoryScanner(); // scan uses synchronized method Library.addMovie mds.scan(mediaLibraryPath, library); System.out.print("\n"); return null; } }); } tasks.waitFor(); SystemTools.showMemory(); // If the user asked to preserve the existing movies, scan the output directory as well if (isJukeboxPreserve()) { LOG.info("Scanning output directory for additional videos"); OutputDirectoryScanner ods = new OutputDirectoryScanner(jukebox.getJukeboxRootLocationDetails()); ods.scan(library); } // Now that everything's been scanned, add all extras to library library.mergeExtras(); LOG.info("Found {} videos in your media library", library.size()); LOG.info("Stored {} files in the info cache", FileTools.fileCache.size()); if (enableWatchTraktTv) { // if Trakt.TV watched is enabled then refresh if necessary and preLoad watched data TraktTV.getInstance().initialize().refreshIfNecessary().preloadWatched(); } JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.SCAN_END, System.currentTimeMillis()); JukeboxStatistics.setStatistic(JukeboxStatistic.VIDEOS, library.size()); tasks.restart(); if (!library.isEmpty()) { // Issue 1882: Separate index files for each category boolean separateCategories = PropertiesUtil.getBooleanProperty("mjb.separateCategories", Boolean.FALSE); LOG.info("Searching for information on the video files..."); int movieCounter = 0; for (final Movie movie : library.values()) { // Issue 997: Skip the processing of extras if not required if (movie.isExtra() && !processExtras) { continue; } final int count = ++movieCounter; final String movieTitleExt = movie.getOriginalTitle() + (movie.isTVShow() ? (" [Season " + movie.getSeason() + "]") : "") + (movie.isExtra() ? " [Extra]" : ""); if (movie.isTVShow()) { JukeboxStatistics.increment(JukeboxStatistic.TVSHOWS); } else { JukeboxStatistics.increment(JukeboxStatistic.MOVIES); } // Multi-thread parallel processing tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { ToolSet tools = threadTools.get(); // Change the output message depending on the existance of the XML file boolean xmlExists = FileTools.fileCache .fileExists(StringTools.appendToPath(jukebox.getJukeboxRootLocationDetails(), movie.getBaseName()) + EXT_DOT_XML); if (xmlExists) { LOG.info("Checking existing video: {}", movieTitleExt); JukeboxStatistics.increment(JukeboxStatistic.EXISTING_VIDEOS); } else { LOG.info("Processing new video: {}", movieTitleExt); JukeboxStatistics.increment(JukeboxStatistic.NEW_VIDEOS); } if (ScanningLimit.getToken()) { // First get movie data (title, year, director, genre, etc...) library.toggleDirty( updateMovieData(xmlReader, tools.miScanner, jukebox, movie, library)); if (!movie.getMovieType().equals(Movie.REMOVE)) { // Check for watched and unwatched files if (enableWatchScanner || enableWatchTraktTv) { // Issue 1938 library.toggleDirty(WatchedScanner.checkWatched(jukebox, movie)); } // Get subtitle tools.subtitlePlugin.generate(movie); // Get Trailers if (trailersScannerEnable) { tools.trailerScanner.getTrailers(movie); } // Then get this movie's poster LOG.debug("Updating poster for: {}", movieTitleExt); updateMoviePoster(jukebox, movie); // Download episode images if required if (videoimageDownload) { VideoImageScanner.scan(tools.imagePlugin, jukebox, movie); } // Get FANART only if requested // Note that the FanartScanner will check if the file is newer / different if ((fanartMovieDownload && !movie.isTVShow()) || (fanartTvDownload && movie.isTVShow())) { FanartScanner.scan(tools.backgroundPlugin, jukebox, movie); } // Get BANNER if requested and is a TV show if (bannerDownload && movie.isTVShow()) { if (!BannerScanner.scan(tools.imagePlugin, jukebox, movie)) { updateTvBanner(jukebox, movie, tools.imagePlugin); } } // Get ClearART/LOGOS/etc if (movie.isTVShow()) { // Only scan using the TV Show artwork scanners tools.clearArtScanner.scan(jukebox, movie); tools.clearLogoScanner.scan(jukebox, movie); tools.tvThumbScanner.scan(jukebox, movie); tools.seasonThumbScanner.scan(jukebox, movie); } else { // Only scan using the Movie artwork scanners tools.movieArtScanner.scan(jukebox, movie); tools.movieDiscScanner.scan(jukebox, movie); tools.movieLogoScanner.scan(jukebox, movie); } for (int i = 0; i < footerCount; i++) { if (FOOTER_ENABLE.get(i)) { updateFooter(jukebox, movie, tools.imagePlugin, i, forceFooterOverwrite || movie.isDirty()); } } // If we are multipart, we need to make sure all archives have expanded names. if (PropertiesUtil.getBooleanProperty("mjb.scanner.mediainfo.rar.extended.url", Boolean.FALSE)) { Collection<MovieFile> partsFiles = movie.getFiles(); for (MovieFile mf : partsFiles) { String filename; filename = mf.getFile().getAbsolutePath(); // Check the filename is a mediaInfo extension (RAR, ISO) ? if (tools.miScanner.extendedExtension(filename) == Boolean.TRUE) { if (mf.getArchiveName() == null) { LOG.debug("MovieJukebox: Attempting to get archive name for {}", filename); String archive = tools.miScanner.archiveScan(filename); if (archive != null) { LOG.debug("MovieJukebox: Setting archive name to {}", archive); mf.setArchiveName(archive); } // got archivename } // not already set } // is extension } // for all files } // property is set if (!movie.isDirty()) { ScanningLimit.releaseToken(); } } else { ScanningLimit.releaseToken(); library.remove(movie); } LOG.info(LOG_FINISHED, movieTitleExt, count, library.size()); } else { movie.setSkipped(true); JukeboxProperties.setScanningLimitReached(Boolean.TRUE); LOG.info("Skipped: {} ({}/{})", movieTitleExt, count, library.size()); } // Show memory every (processing count) movies if (showMemory && (count % maxThreadsProcess) == 0) { SystemTools.showMemory(); } return null; } }); } tasks.waitFor(); // Add the new extra files (like trailers that were downloaded) to the library and to the corresponding movies library.mergeExtras(); OpenSubtitlesPlugin.logOut(); AniDbPlugin.anidbClose(); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.PROCESSING_END, System.currentTimeMillis()); if (peopleScan && peopleScrape && !ScanningLimit.isLimitReached()) { LOG.info("Searching for people information..."); int peopleCounter = 0; Map<String, Person> popularPeople = new TreeMap<>(); for (Movie movie : library.values()) { // Issue 997: Skip the processing of extras if not required if (movie.isExtra() && !processExtras) { continue; } if (popularity > 0) { for (Filmography person : movie.getPeople()) { boolean exists = Boolean.FALSE; String name = person.getName(); for (Map.Entry<String, Person> entry : popularPeople.entrySet()) { if (entry.getKey().substring(3).equalsIgnoreCase(name)) { entry.getValue().addDepartment(person.getDepartment()); entry.getValue().popularityUp(movie); exists = Boolean.TRUE; } } if (!exists) { Person p = new Person(person); p.addDepartment(p.getDepartment()); String key = String.format("%03d", person.getOrder()) + person.getName(); popularPeople.put(key, p); popularPeople.get(key).popularityUp(movie); } } } else { peopleCounter += movie.getPeople().size(); } } tasks.restart(); if (popularity > 0) { List<Person> as = new ArrayList<>(popularPeople.values()); Collections.sort(as, new PersonComparator()); List<Person> stars = new ArrayList<>(); Iterator<Person> itr = as.iterator(); while (itr.hasNext()) { if (peopleCounter >= peopleMax) { break; } Person person = itr.next(); if (popularity > person.getPopularity()) { break; } stars.add(person); peopleCounter++; } final int peopleCount = peopleCounter; peopleCounter = 0; for (final Person person : stars) { final int count = ++peopleCounter; final String personName = person.getName(); final Person p = new Person(person); // Multi-thread parallel processing tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { ToolSet tools = threadTools.get(); // Get person data (name, birthday, etc...), download photo updatePersonData(xmlReader, jukebox, p, tools.imagePlugin); library.addPerson(p); LOG.info(LOG_FINISHED, personName, count, peopleCount); // Show memory every (processing count) movies if (showMemory && (count % maxThreadsProcess) == 0) { SystemTools.showMemory(); } return null; } }); } } else { final int peopleCount = peopleCounter; peopleCounter = 0; for (Movie movie : library.values()) { // Issue 997: Skip the processing of extras if not required if (movie.isExtra() && !processExtras) { continue; } Map<String, Integer> typeCounter = new TreeMap<>(); for (Filmography person : movie.getPeople()) { final int count = ++peopleCounter; String job = person.getJob(); if (!typeCounter.containsKey(job)) { typeCounter.put(job, 1); } else if (typeCounter.get(job) == peopleMax) { continue; } else { typeCounter.put(job, typeCounter.get(job) + 1); } final Person p = new Person(person); final String personName = p.getName(); // Multi-thread parallel processing tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { ToolSet tools = threadTools.get(); // Get person data (name, birthday, etc...), download photo and put to library updatePersonData(xmlReader, jukebox, p, tools.imagePlugin); library.addPerson(p); LOG.info(LOG_FINISHED, personName, count, peopleCount); // Show memory every (processing count) movies if (showMemory && (count % maxThreadsProcess) == 0) { SystemTools.showMemory(); } return null; } }); } } } tasks.waitFor(); LOG.info("Add/update people information to the videos..."); boolean dirty; for (Movie movie : library.values()) { // Issue 997: Skip the processing of extras if not required if (movie.isExtra() && !processExtras) { continue; } for (Filmography person : movie.getPeople()) { dirty = Boolean.FALSE; for (Person p : library.getPeople()) { if (Filmography.comparePersonName(person, p) || comparePersonId(person, p)) { if (!person.getFilename().equals(p.getFilename()) && isValidString(p.getFilename())) { person.setFilename(p.getFilename()); dirty = Boolean.TRUE; } if (!person.getUrl().equals(p.getUrl()) && isValidString(p.getUrl())) { person.setUrl(p.getUrl()); dirty = Boolean.TRUE; } for (Map.Entry<String, String> e : p.getIdMap().entrySet()) { if (isNotValidString(e.getValue())) { continue; } if (person.getId(e.getKey()).equals(e.getValue())) { continue; } person.setId(e.getKey(), e.getValue()); dirty = Boolean.TRUE; } if (!person.getPhotoFilename().equals(p.getPhotoFilename()) && isValidString(p.getPhotoFilename())) { person.setPhotoFilename(p.getPhotoFilename()); dirty = Boolean.TRUE; } break; } } if (dirty) { movie.setDirty(DirtyFlag.INFO, Boolean.TRUE); } } for (Person p : library.getPeople()) { for (Filmography film : p.getFilmography()) { if (Filmography.compareMovieAndFilm(movie, film)) { film.setFilename(movie.getBaseName()); film.setTitle(movie.getTitle()); if (film.isDirty()) { p.setDirty(); } break; } } } } for (Person p : library.getPeople()) { for (Filmography film : p.getFilmography()) { if (film.isDirty() || StringTools.isNotValidString(film.getFilename())) { continue; } dirty = Boolean.FALSE; for (Movie movie : library.values()) { if (movie.isExtra() && !processExtras) { continue; } dirty = Filmography.compareMovieAndFilm(movie, film); if (dirty) { break; } } if (!dirty) { film.clearFilename(); p.setDirty(); } } } JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.PEOPLE_END, System.currentTimeMillis()); } /* * ****************************************************************************** * * PART 3 : Indexing the library * */ SystemTools.showMemory(); // This is for programs like NMTServer where they don't need the indexes. if (skipIndexGeneration) { LOG.info("Indexing of libraries skipped."); } else { LOG.info("Indexing libraries..."); library.buildIndex(tasks); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.INDEXING_END, System.currentTimeMillis()); SystemTools.showMemory(); } /* * ****************************************************************************** * * PART 3B - Indexing masters */ LOG.info("Indexing masters..."); /* * This is kind of a hack -- library.values() are the movies that * were found in the library and library.getMoviesList() are the * ones that are there now. So the movies that are in getMoviesList * but not in values are the index masters. */ List<Movie> indexMasters = new ArrayList<>(library.getMoviesList()); indexMasters.removeAll(library.values()); JukeboxStatistics.setStatistic(JukeboxStatistic.SETS, indexMasters.size()); // Multi-thread: Parallel Executor tasks.restart(); final boolean autoCollection = PropertiesUtil.getBooleanProperty("themoviedb.collection", Boolean.FALSE); final TheMovieDbPlugin tmdb = new TheMovieDbPlugin(); for (final Movie movie : indexMasters) { // Multi-tread: Start Parallel Processing tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { ToolSet tools = threadTools.get(); String safeSetMasterBaseName = FileTools.makeSafeFilename(movie.getBaseName()); /* * The master's movie XML is used for generating the * playlist it will be overwritten by the index XML */ LOG.debug("Updating set artwork for: {}...", movie.getOriginalTitle()); // If we can find a set artwork file, use it; otherwise, stick with the first movie's artwork String oldArtworkFilename = movie.getPosterFilename(); // Set a default poster name in case it's not found during the scan movie.setPosterFilename(safeSetMasterBaseName + "." + posterExtension); if (isNotValidString(PosterScanner.scan(jukebox, movie))) { LOG.debug("Local set poster ({}) not found.", safeSetMasterBaseName); String collectionId = movie.getId(TheMovieDbPlugin.CACHE_COLLECTION); if (autoCollection && StringUtils.isNumeric(collectionId)) { LOG.debug("MovieDb Collection detected with ID {}", collectionId); movie.setPosterURL(tmdb.getCollectionPoster(Integer.parseInt(collectionId))); movie.setFanartURL(tmdb.getCollectionFanart(Integer.parseInt(collectionId))); updateMoviePoster(jukebox, movie); } else { movie.setPosterFilename(oldArtworkFilename); } } // If this is a TV Show and we want to download banners, then also check for a banner Set file if (movie.isTVShow() && bannerDownload) { // Set a default banner filename in case it's not found during the scan movie.setBannerFilename(safeSetMasterBaseName + bannerToken + "." + bannerExtension); movie.setWideBannerFilename( safeSetMasterBaseName + wideBannerToken + "." + bannerExtension); if (!BannerScanner.scan(tools.imagePlugin, jukebox, movie)) { updateTvBanner(jukebox, movie, tools.imagePlugin); LOG.debug("Local set banner ({}{}.*) not found.", safeSetMasterBaseName, bannerToken); } else { LOG.debug("Local set banner found, using {}", movie.getBannerFilename()); } } // Check for Set FANART if (setIndexFanart) { // Set a default fanart filename in case it's not found during the scan movie.setFanartFilename(safeSetMasterBaseName + fanartToken + "." + fanartExtension); if (!FanartScanner.scan(tools.backgroundPlugin, jukebox, movie)) { LOG.debug("Local set fanart ({}{}.*) not found.", safeSetMasterBaseName, fanartToken); } else { LOG.debug("Local set fanart found, using {}", movie.getFanartFilename()); } } StringBuilder artworkFilename = new StringBuilder(safeSetMasterBaseName); artworkFilename.append(thumbnailToken).append(".").append(thumbnailExtension); movie.setThumbnailFilename(artworkFilename.toString()); artworkFilename = new StringBuilder(safeSetMasterBaseName); artworkFilename.append(posterToken).append(".").append(posterExtension); movie.setDetailPosterFilename(artworkFilename.toString()); // Generate footer filenames for (int inx = 0; inx < footerCount; inx++) { if (FOOTER_ENABLE.get(inx)) { artworkFilename = new StringBuilder(safeSetMasterBaseName); if (FOOTER_NAME.get(inx).contains("[")) { artworkFilename.append(footerToken).append("_").append(inx); } else { artworkFilename.append(".").append(FOOTER_NAME.get(inx)); } artworkFilename.append(".").append(FOOTER_EXTENSION.get(inx)); movie.setFooterFilename(artworkFilename.toString(), inx); } } // No playlist for index masters // htmlWriter.generatePlaylist(jukeboxDetailsRoot, tempJukeboxDetailsRoot, movie); // Add all the movie files to the exclusion list FileTools.addMovieToJukeboxFilenames(movie); return null; } }); } tasks.waitFor(); // Clear the cache if we've used it CacheMemory.clear(); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.MASTERS_END, System.currentTimeMillis()); SystemTools.showMemory(); // Issue 1886: Html indexes recreated every time StringBuilder indexFilename; for (Movie setMovie : library.getMoviesList()) { if (setMovie.isSetMaster()) { indexFilename = new StringBuilder(jukebox.getJukeboxRootLocationDetails()); indexFilename.append(File.separator).append(setMovie.getBaseName()).append(EXT_DOT_XML); File xmlFile = FileTools.fileCache.getFile(indexFilename.toString()); if (xmlFile.exists()) { xmlReader.parseSetXML(xmlFile, setMovie, library.getMoviesList()); } } } // Issue 1882: Separate index files for each category List<String> categoriesList = Arrays.asList( getProperty("mjb.categories.indexList", "Other,Genres,Title,Certification,Year,Library,Set") .split(",")); if (!skipIndexGeneration) { LOG.info("Writing Indexes XML..."); xmlWriter.writeIndexXML(jukebox, library, tasks); // Issue 2235: Update artworks after masterSet changed ToolSet tools = threadTools.get(); StringBuilder idxName; boolean createPosters = PropertiesUtil.getBooleanProperty("mjb.sets.createPosters", Boolean.FALSE); for (IndexInfo idx : library.getGeneratedIndexes()) { if (!idx.canSkip && idx.categoryName.equals(Library.INDEX_SET)) { idxName = new StringBuilder(idx.categoryName); idxName.append("_").append(FileTools.makeSafeFilename(idx.key)).append("_1"); for (Movie movie : indexMasters) { if (!movie.getBaseName().equals(idxName.toString())) { continue; } if (createPosters) { // Create/update a detail poster for setMaster LOG.debug("Create/update detail poster for set: {}", movie.getBaseName()); createPoster(tools.imagePlugin, jukebox, SkinProperties.getSkinHome(), movie, Boolean.TRUE); } // Create/update a thumbnail for setMaster LOG.debug("Create/update thumbnail for set: {}, isTV: {}, isHD: {}", movie.getBaseName(), movie.isTVShow(), movie.isHD()); createThumbnail(tools.imagePlugin, jukebox, SkinProperties.getSkinHome(), movie, Boolean.TRUE); for (int inx = 0; inx < footerCount; inx++) { if (FOOTER_ENABLE.get(inx)) { LOG.debug("Create/update footer for set: {}, footerName: {}", movie.getBaseName(), FOOTER_NAME.get(inx)); updateFooter(jukebox, movie, tools.imagePlugin, inx, Boolean.TRUE); } } } } } LOG.info("Writing Category XML..."); library.setDirty(library.isDirty() || forceIndexOverwrite); xmlWriter.writeCategoryXML(jukebox, library, "Categories", library.isDirty()); // Issue 1882: Separate index files for each category if (separateCategories) { for (String categoryName : categoriesList) { xmlWriter.writeCategoryXML(jukebox, library, categoryName, library.isDirty()); } } } SystemTools.showMemory(); LOG.info("Writing Library data..."); // Multi-thread: Parallel Executor tasks.restart(); int totalCount = library.values().size(); int currentCount = 1; for (final Movie movie : library.values()) { System.out.print("\r Processing library #" + currentCount++ + "/" + totalCount); // Issue 997: Skip the processing of extras if not required if (movie.isExtra() && !processExtras) { continue; } if (movie.isSkipped()) { continue; } // Multi-tread: Start Parallel Processing tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { ToolSet tools = threadTools.get(); // Update movie XML files with computed index information LOG.debug("Writing index data to movie: {}", movie.getBaseName()); xmlWriter.writeMovieXML(jukebox, movie, library); // Create a detail poster for each movie LOG.debug("Creating detail poster for movie: {}", movie.getBaseName()); createPoster(tools.imagePlugin, jukebox, SkinProperties.getSkinHome(), movie, forcePosterOverwrite); // Create a thumbnail for each movie LOG.debug("Creating thumbnails for movie: {}", movie.getBaseName()); createThumbnail(tools.imagePlugin, jukebox, SkinProperties.getSkinHome(), movie, forceThumbnailOverwrite); if (!skipIndexGeneration && !skipHtmlGeneration) { // write the movie details HTML LOG.debug("Writing detail HTML to movie: {}", movie.getBaseName()); htmlWriter.generateMovieDetailsHTML(jukebox, movie); // write the playlist for the movie if needed if (!skipPlaylistGeneration) { FileTools.addJukeboxFiles(htmlWriter.generatePlaylist(jukebox, movie)); } } // Add all the movie files to the exclusion list FileTools.addMovieToJukeboxFilenames(movie); return null; } }); } tasks.waitFor(); System.out.print("\n"); SystemTools.showMemory(); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.WRITE_INDEX_END, System.currentTimeMillis()); if (peopleScan) { LOG.info("Writing people data..."); // Multi-thread: Parallel Executor tasks.restart(); totalCount = library.getPeople().size(); currentCount = 1; for (final Person person : library.getPeople()) { // Multi-tread: Start Parallel Processing System.out.print("\r Processing person #" + currentCount++ + "/" + totalCount); tasks.submit(new Callable<Void>() { @Override public Void call() throws FileNotFoundException, XMLStreamException { // ToolSet tools = threadTools.get(); // Update person XML files with computed index information LOG.debug("Writing index data to person: {}", person.getName()); xmlWriter.writePersonXML(jukebox, person); if (!skipIndexGeneration && !skipHtmlGeneration) { // write the person details HTML htmlWriter.generatePersonDetailsHTML(jukebox, person); } return null; } }); } tasks.waitFor(); System.out.print("\n"); SystemTools.showMemory(); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.WRITE_PEOPLE_END, System.currentTimeMillis()); } if (!skipIndexGeneration) { if (!skipHtmlGeneration) { LOG.info("Writing Indexes HTML..."); LOG.info(" Video indexes..."); htmlWriter.generateMoviesIndexHTML(jukebox, library, tasks); LOG.info(" Category indexes..."); htmlWriter.generateMoviesCategoryHTML(jukebox, "Categories", "categories.xsl", library.isDirty()); // Issue 1882: Separate index files for each category if (separateCategories) { LOG.info(" Separate category indexes..."); for (String categoryName : categoriesList) { htmlWriter.generateMoviesCategoryHTML(jukebox, categoryName, "category.xsl", library.isDirty()); } } } /* * Generate the index file. * * Do not skip this part as it's the index that starts the jukebox */ htmlWriter.generateMainIndexHTML(jukebox, library); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.WRITE_HTML_END, System.currentTimeMillis()); /* Generate extra pages if required */ String pageList = PropertiesUtil.getProperty("mjb.customPages", ""); if (StringUtils.isNotBlank(pageList)) { List<String> newPages = new ArrayList<>(Arrays.asList(pageList.split(","))); for (String page : newPages) { LOG.info("Transforming skin custom page '{}'", page); htmlWriter.transformXmlFile(jukebox, page); } } } if (enableCompleteMovies) { CompleteMoviesWriter.generate(library, jukebox); } /** * ****************************************************************************** * * PART 4 : Copy files to target directory * */ SystemTools.showMemory(); LOG.info("Copying new files to Jukebox directory..."); String index = getProperty("mjb.indexFile", "index.htm"); FileTools.copyDir(jukebox.getJukeboxTempLocationDetails(), jukebox.getJukeboxRootLocationDetails(), Boolean.TRUE); FileTools.copyFile(new File(jukebox.getJukeboxTempLocation() + File.separator + index), new File(jukebox.getJukeboxRootLocation() + File.separator + index)); String skinDate = jukebox.getJukeboxRootLocationDetails() + File.separator + "pictures" + File.separator + "skin.date"; File skinFile = new File(skinDate); File propFile = new File(userPropertiesName); // Only check the property file date if the jukebox properties are not being monitored. boolean copySkin = JukeboxProperties.isMonitor() ? Boolean.FALSE : FileTools.isNewer(propFile, skinFile); // If forceSkinOverwrite is set, the skin file doesn't exist, the user properties file doesn't exist or is newer than the skin.date file if (forceSkinOverwrite || !skinFile.exists() || !propFile.exists() || (SkinProperties.getFileDate() > skinFile.lastModified()) || copySkin) { if (forceSkinOverwrite) { LOG.info("Copying skin files to Jukebox directory (forceSkinOverwrite)..."); } else if (SkinProperties.getFileDate() > skinFile.lastModified()) { LOG.info("Copying skin files to Jukebox directory (Skin is newer)..."); } else if (!propFile.exists()) { LOG.info("Copying skin files to Jukebox directory (No property file)..."); } else if (FileTools.isNewer(propFile, skinFile)) { LOG.info("Copying skin files to Jukebox directory ({} is newer)...", propFile.getName()); } else { LOG.info("Copying skin files to Jukebox directory..."); } StringTokenizer st = new StringTokenizer(PropertiesUtil.getProperty("mjb.skin.copyDirs", "html"), " ,;|"); while (st.hasMoreTokens()) { String skinDirName = st.nextToken(); String skinDirFull = StringTools.appendToPath(SkinProperties.getSkinHome(), skinDirName); if ((new File(skinDirFull).exists())) { LOG.info("Copying the {} directory...", skinDirName); FileTools.copyDir(skinDirFull, jukebox.getJukeboxRootLocationDetails(), Boolean.TRUE); } } if (skinFile.exists()) { skinFile.setLastModified(JukeboxStatistics.getTime(JukeboxStatistics.JukeboxTimes.START)); } else { FileTools.makeDirsForFile(skinFile); skinFile.createNewFile(); } } else { LOG.info("Skin copying skipped."); LOG.debug("Use mjb.forceSkinOverwrite=true to force the overwitting of the skin files"); } FileTools.fileCache.saveFileList("filecache.txt"); JukeboxStatistics.setJukeboxTime(JukeboxStatistics.JukeboxTimes.COPYING_END, System.currentTimeMillis()); /** * ****************************************************************************** * * PART 5: Clean-up the jukebox directory * */ SystemTools.showMemory(); // Clean the jukebox folder of unneeded files cleanJukeboxFolder(); if (moviejukeboxListing) { LOG.info("Generating listing output..."); listingPlugin.generate(jukebox, library); } LOG.info("Clean up temporary files"); File rootIndex = new File(appendToPath(jukebox.getJukeboxTempLocation(), index)); rootIndex.delete(); FileTools.deleteDir(jukebox.getJukeboxTempLocation()); // clean up extracted attachments AttachmentScanner.cleanUp(); } // Set the end time JukeboxStatistics.setTimeEnd(System.currentTimeMillis()); // Write the jukebox details file at the END of the run (Issue 1830) JukeboxProperties.writeFile(jukebox, library, mediaLibraryPaths); // Output the statistics JukeboxStatistics.writeFile(jukebox, library, mediaLibraryPaths); LOG.info(""); LOG.info("MovieJukebox process completed at {}", new Date()); LOG.info("Processing took {}", JukeboxStatistics.getProcessingTime()); }
From source file:com.android.sdklib.repository.legacy.remote.internal.DownloadCache.java
/** * Downloads a small file, typically XML manifests. * The current {@link Strategy} governs whether the file is served as-is * from the cache, potentially updated first or directly downloaded. * <p>//from w ww .j a v a2 s. co m * For large downloads (e.g. installable archives) please do not invoke the * cache and instead use the {@link #openDirectUrl} method. * <p> * For details on realm authentication and user/password handling, * see {@link HttpConfigurable#openHttpConnection(String)}. * * @param urlString the URL string to be opened. * @param monitor {@link ITaskMonitor} which is related to this URL * fetching. * @return Returns an {@link InputStream} holding the URL content. * Returns null if there's no content (e.g. resource not found.) * Returns null if the document is not cached and strategy is {@link Strategy#ONLY_CACHE}. * @throws IOException Exception thrown when there are problems retrieving * the URL or its content. * @throws ProcessCanceledException Exception thrown if the user cancels the * authentication dialog. */ @NonNull public InputStream openCachedUrl(@NonNull String urlString, @NonNull ITaskMonitor monitor) throws IOException { // Don't cache in direct mode. if (mStrategy == Strategy.DIRECT) { Pair<InputStream, URLConnection> result = openUrl(urlString, true /*needsMarkResetSupport*/, monitor, null /*headers*/); return result.getFirst(); } File cached = new File(mCacheRoot, getCacheFilename(urlString)); File info = new File(mCacheRoot, getInfoFilename(cached.getName())); boolean useCached = mFileOp.exists(cached); if (useCached && mStrategy == Strategy.FRESH_CACHE) { // Check whether the file should be served from the cache or // refreshed first. long cacheModifiedMs = mFileOp.lastModified(cached); /* last mod time in epoch/millis */ boolean checkCache = true; Properties props = readInfo(info); if (props == null) { // No properties, no chocolate for you. useCached = false; } else { long minExpiration = System.currentTimeMillis() - MIN_TIME_EXPIRED_MS; checkCache = cacheModifiedMs < minExpiration; if (!checkCache && DEBUG) { System.out.println(String.format("%s : Too fresh [%,d ms], not checking yet.", //$NON-NLS-1$ urlString, cacheModifiedMs - minExpiration)); } } if (useCached && checkCache) { assert props != null; // Right now we only support 200 codes and will requery all 404s. String code = props.getProperty(KEY_STATUS_CODE, ""); //$NON-NLS-1$ useCached = Integer.toString(HttpStatus.SC_OK).equals(code); if (!useCached && DEBUG) { System.out.println(String.format("%s : cache disabled by code %s", //$NON-NLS-1$ urlString, code)); } if (useCached) { // Do we have a valid Content-Length? If so, it should match the file size. try { long length = Long.parseLong(props.getProperty(HttpHeaders.CONTENT_LENGTH, "-1")); //$NON-NLS-1$ if (length >= 0) { useCached = length == mFileOp.length(cached); if (!useCached && DEBUG) { System.out.println( String.format("%s : cache disabled by length mismatch %d, expected %d", //$NON-NLS-1$ urlString, length, cached.length())); } } } catch (NumberFormatException ignore) { } } if (useCached) { // Do we have an ETag and/or a Last-Modified? String etag = props.getProperty(HttpHeaders.ETAG); String lastMod = props.getProperty(HttpHeaders.LAST_MODIFIED); if (etag != null || lastMod != null) { // Details on how to use them is defined at // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.3.4 // Bottom line: // - if there's an ETag, it should be used first with an // If-None-Match header. That's a strong comparison for HTTP/1.1 servers. // - otherwise use a Last-Modified if an If-Modified-Since header exists. // In this case, we place both and the rules indicates a spec-abiding // server should strongly match ETag and weakly the Modified-Since. // TODO there are some servers out there which report ETag/Last-Mod // yet don't honor them when presented with a precondition. In this // case we should identify it in the reply and invalidate ETag support // for these servers and instead fallback on the pure-timeout case below. AtomicInteger statusCode = new AtomicInteger(0); InputStream is = null; List<Header> headers = new ArrayList<Header>(2); if (etag != null) { headers.add(new BasicHeader(HttpHeaders.IF_NONE_MATCH, etag)); } if (lastMod != null) { headers.add(new BasicHeader(HttpHeaders.IF_MODIFIED_SINCE, lastMod)); } if (!headers.isEmpty()) { is = downloadAndCache(urlString, monitor, cached, info, headers.toArray(new Header[headers.size()]), statusCode); } if (is != null && statusCode.get() == HttpStatus.SC_OK) { // The resource was modified, the server said there was something // new, which has been cached. We can return that to the caller. return is; } // If we get here, we should have is == null and code // could be: // - 304 for not-modified -- same resource, still available, in // which case we'll use the cached one. // - 404 -- resource doesn't exist anymore in which case there's // no point in retrying. // - For any other code, just retry a download. if (is != null) { try { is.close(); } catch (Exception ignore) { } is = null; } if (statusCode.get() == HttpStatus.SC_NOT_MODIFIED) { // Cached file was not modified. // Change its timestamp for the next MIN_TIME_EXPIRED_MS check. cached.setLastModified(System.currentTimeMillis()); // At this point useCached==true so we'll return // the cached file below. } else { // URL fetch returned something other than 200 or 304. // For 404, we're done, no need to check the server again. // For all other codes, we'll retry a download below. useCached = false; if (statusCode.get() == HttpStatus.SC_NOT_FOUND) { return null; } } } else { // If we don't have an Etag nor Last-Modified, let's use a // basic file timestamp and compare to a 1 hour threshold. long maxExpiration = System.currentTimeMillis() - MAX_TIME_EXPIRED_MS; useCached = cacheModifiedMs >= maxExpiration; if (!useCached && DEBUG) { System.out.println( String.format("[%1$s] cache disabled by timestamp %2$tD %2$tT < %3$tD %3$tT", //$NON-NLS-1$ urlString, cacheModifiedMs, maxExpiration)); } } } } } if (useCached) { // The caller needs an InputStream that supports the reset() operation. // The default FileInputStream does not, so load the file into a byte // array and return that. try { InputStream is = readCachedFile(cached); if (is != null) { if (DEBUG) { System.out.println(String.format("%s : Use cached file", urlString)); //$NON-NLS-1$ } return is; } } catch (IOException ignore) { } } if (!useCached && mStrategy == Strategy.ONLY_CACHE) { // We don't have a document to serve from the cache. if (DEBUG) { System.out.println(String.format("%s : file not in cache", urlString)); //$NON-NLS-1$ } return null; } // If we're not using the cache, try to remove the cache and download again. try { mFileOp.delete(cached); mFileOp.delete(info); } catch (SecurityException ignore) { } return downloadAndCache(urlString, monitor, cached, info, null /*headers*/, null /*statusCode*/); }
From source file:com.android.sdklib.internal.repository.DownloadCache.java
/** * Downloads a small file, typically XML manifests. * The current {@link Strategy} governs whether the file is served as-is * from the cache, potentially updated first or directly downloaded. * <p/>//from w w w .j a v a 2 s . com * For large downloads (e.g. installable archives) please do not invoke the * cache and instead use the {@link #openDirectUrl} method. * <p/> * For details on realm authentication and user/password handling, * check the underlying {@link UrlOpener#openUrl(String, boolean, ITaskMonitor, Header[])} * documentation. * * @param urlString the URL string to be opened. * @param monitor {@link ITaskMonitor} which is related to this URL * fetching. * @return Returns an {@link InputStream} holding the URL content. * Returns null if there's no content (e.g. resource not found.) * Returns null if the document is not cached and strategy is {@link Strategy#ONLY_CACHE}. * @throws IOException Exception thrown when there are problems retrieving * the URL or its content. * @throws CanceledByUserException Exception thrown if the user cancels the * authentication dialog. */ @NonNull public InputStream openCachedUrl(@NonNull String urlString, @NonNull ITaskMonitor monitor) throws IOException, CanceledByUserException { // Don't cache in direct mode. if (mStrategy == Strategy.DIRECT) { Pair<InputStream, HttpResponse> result = openUrl(urlString, true /*needsMarkResetSupport*/, monitor, null /*headers*/); return result.getFirst(); } File cached = new File(mCacheRoot, getCacheFilename(urlString)); File info = new File(mCacheRoot, getInfoFilename(cached.getName())); boolean useCached = mFileOp.exists(cached); if (useCached && mStrategy == Strategy.FRESH_CACHE) { // Check whether the file should be served from the cache or // refreshed first. long cacheModifiedMs = mFileOp.lastModified(cached); /* last mod time in epoch/millis */ boolean checkCache = true; Properties props = readInfo(info); if (props == null) { // No properties, no chocolate for you. useCached = false; } else { long minExpiration = System.currentTimeMillis() - MIN_TIME_EXPIRED_MS; checkCache = cacheModifiedMs < minExpiration; if (!checkCache && DEBUG) { System.out.println(String.format("%s : Too fresh [%,d ms], not checking yet.", //$NON-NLS-1$ urlString, cacheModifiedMs - minExpiration)); } } if (useCached && checkCache) { assert props != null; // Right now we only support 200 codes and will requery all 404s. String code = props.getProperty(KEY_STATUS_CODE, ""); //$NON-NLS-1$ useCached = Integer.toString(HttpStatus.SC_OK).equals(code); if (!useCached && DEBUG) { System.out.println(String.format("%s : cache disabled by code %s", //$NON-NLS-1$ urlString, code)); } if (useCached) { // Do we have a valid Content-Length? If so, it should match the file size. try { long length = Long.parseLong(props.getProperty(HttpHeaders.CONTENT_LENGTH, "-1")); //$NON-NLS-1$ if (length >= 0) { useCached = length == mFileOp.length(cached); if (!useCached && DEBUG) { System.out.println( String.format("%s : cache disabled by length mismatch %d, expected %d", //$NON-NLS-1$ urlString, length, cached.length())); } } } catch (NumberFormatException ignore) { } } if (useCached) { // Do we have an ETag and/or a Last-Modified? String etag = props.getProperty(HttpHeaders.ETAG); String lastMod = props.getProperty(HttpHeaders.LAST_MODIFIED); if (etag != null || lastMod != null) { // Details on how to use them is defined at // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.3.4 // Bottom line: // - if there's an ETag, it should be used first with an // If-None-Match header. That's a strong comparison for HTTP/1.1 servers. // - otherwise use a Last-Modified if an If-Modified-Since header exists. // In this case, we place both and the rules indicates a spec-abiding // server should strongly match ETag and weakly the Modified-Since. // TODO there are some servers out there which report ETag/Last-Mod // yet don't honor them when presented with a precondition. In this // case we should identify it in the reply and invalidate ETag support // for these servers and instead fallback on the pure-timeout case below. AtomicInteger statusCode = new AtomicInteger(0); InputStream is = null; List<Header> headers = new ArrayList<Header>(2); if (etag != null) { headers.add(new BasicHeader(HttpHeaders.IF_NONE_MATCH, etag)); } if (lastMod != null) { headers.add(new BasicHeader(HttpHeaders.IF_MODIFIED_SINCE, lastMod)); } if (!headers.isEmpty()) { is = downloadAndCache(urlString, monitor, cached, info, headers.toArray(new Header[headers.size()]), statusCode); } if (is != null && statusCode.get() == HttpStatus.SC_OK) { // The resource was modified, the server said there was something // new, which has been cached. We can return that to the caller. return is; } // If we get here, we should have is == null and code // could be: // - 304 for not-modified -- same resource, still available, in // which case we'll use the cached one. // - 404 -- resource doesn't exist anymore in which case there's // no point in retrying. // - For any other code, just retry a download. if (is != null) { try { is.close(); } catch (Exception ignore) { } is = null; } if (statusCode.get() == HttpStatus.SC_NOT_MODIFIED) { // Cached file was not modified. // Change its timestamp for the next MIN_TIME_EXPIRED_MS check. cached.setLastModified(System.currentTimeMillis()); // At this point useCached==true so we'll return // the cached file below. } else { // URL fetch returned something other than 200 or 304. // For 404, we're done, no need to check the server again. // For all other codes, we'll retry a download below. useCached = false; if (statusCode.get() == HttpStatus.SC_NOT_FOUND) { return null; } } } else { // If we don't have an Etag nor Last-Modified, let's use a // basic file timestamp and compare to a 1 hour threshold. long maxExpiration = System.currentTimeMillis() - MAX_TIME_EXPIRED_MS; useCached = cacheModifiedMs >= maxExpiration; if (!useCached && DEBUG) { System.out.println( String.format("[%1$s] cache disabled by timestamp %2$tD %2$tT < %3$tD %3$tT", //$NON-NLS-1$ urlString, cacheModifiedMs, maxExpiration)); } } } } } if (useCached) { // The caller needs an InputStream that supports the reset() operation. // The default FileInputStream does not, so load the file into a byte // array and return that. try { InputStream is = readCachedFile(cached); if (is != null) { if (DEBUG) { System.out.println(String.format("%s : Use cached file", urlString)); //$NON-NLS-1$ } return is; } } catch (IOException ignore) { } } if (!useCached && mStrategy == Strategy.ONLY_CACHE) { // We don't have a document to serve from the cache. if (DEBUG) { System.out.println(String.format("%s : file not in cache", urlString)); //$NON-NLS-1$ } return null; } // If we're not using the cache, try to remove the cache and download again. try { mFileOp.delete(cached); mFileOp.delete(info); } catch (SecurityException ignore) { } return downloadAndCache(urlString, monitor, cached, info, null /*headers*/, null /*statusCode*/); }
From source file:com.zoffcc.applications.zanavi.Navit.java
static void copyDirectoryOneLocationToAnotherLocation(File sourceLocation, File targetLocation) throws IOException { if (sourceLocation.isDirectory()) { if (!targetLocation.exists()) { targetLocation.mkdir();/* ww w . j av a 2s . c om*/ } String[] children = sourceLocation.list(); for (int i = 0; i < sourceLocation.listFiles().length; i++) { copyDirectoryOneLocationToAnotherLocation(new File(sourceLocation, children[i]), new File(targetLocation, children[i])); } } else { long last_mod = 0L; boolean use_last_mod = true; try { last_mod = sourceLocation.lastModified(); } catch (Exception e) { use_last_mod = false; } InputStream in = new FileInputStream(sourceLocation); OutputStream out = new FileOutputStream(targetLocation); // Copy the bits from instream to outstream byte[] buf = new byte[1024]; int len; while ((len = in.read(buf)) > 0) { out.write(buf, 0, len); } in.close(); out.close(); if (use_last_mod) { try { targetLocation.setLastModified(last_mod); } catch (Exception e) { } } } }
From source file:com.microsoft.tfs.core.clients.versioncontrol.engines.internal.GetEngine.java
/** * Process one operation in the context of an on-going * {@link AsyncGetOperation}. This handles files and directories; gets, * deletes, moves, etc. If the operation completes successfully, it queues * an acknowledgement to the server./*from w w w .jav a 2 s . c o m*/ */ private void processOperation(final GetOperation action, final AsyncGetOperation asyncOp) { Check.notNull(action, "action"); //$NON-NLS-1$ Check.notNull(asyncOp, "asyncOp"); //$NON-NLS-1$ // Get the new local item once since it has to be computed for // GetOperations. final String newLocalItem = action.getTargetLocalItem(); /* * Check the path length here for compatibility with .NET, which * discovers the condition when ItemSpec.GetFullPath() is used. * * Only do for non-preview, as the .NET implementation would only * encounter the limit when writing files to disk. */ if (newLocalItem != null && !asyncOp.isPreview()) { try { LocalPath.checkLocalItem(newLocalItem, null, false, false, false, true); } catch (final PathTooLongException e) { log.warn("Path too long, not getting", e); //$NON-NLS-1$ onNonFatalError(new VersionControlException( MessageFormat.format(Messages.getString("GetEngine.LocalPathTooLongFormat"), //$NON-NLS-1$ newLocalItem)), asyncOp.getWorkspace()); return; } } /* * If this get operation has been overridden by the target conflict * management code below (code that checks the existingLocalHash), then * just ignore this. To better understand this see the code below where * targetAction.ClearLocalItem() is called. */ if (action.isDownloadCompleted()) { return; } // For conflicts, fire the conflict event. if (action.hasConflict()) { asyncOp.addConflict(action); recordEvent(asyncOp, OperationStatus.CONFLICT, action); return; } // Determine whether this is a pending rename that is changing the // path's case. final boolean isCaseChangingRename = action.isCaseChangingRename(); /* * Tracks whether the operation's download is completed in this method, * versus being queued for asynchronous processing, so we can set * .tpattributes if it is complete. */ boolean downloadCompletedHere = false; try { // ************************************** // Error checks against the source item. // ************************************** // Determine if the local item at the location we currently have it // exists. FileSystemAttributes existingLocalAttrs = new FileSystemAttributes(); boolean existingLocalExists = false; if (action.getCurrentLocalItem() != null) { existingLocalAttrs = FileSystemUtils.getInstance().getAttributes(action.getCurrentLocalItem()); existingLocalExists = existingLocalAttrs.exists(); log.debug(MessageFormat.format("existingLocalAttrs = {0}, existingLocalExists = {1}", //$NON-NLS-1$ existingLocalAttrs, existingLocalExists)); /* * If we are undoing an edit that is not also an add, set the * file back to read-only so that we will not see it as a * writable file later. */ if (asyncOp.getType() == ProcessType.UNDO) { if (action.getChangeType().contains(ChangeType.EDIT) && action.getChangeType().contains(ChangeType.ADD) == false) { action.setOkayToOverwriteExistingLocal(true); if (WorkspaceLocation.SERVER == asyncOp.getWorkspace().getLocation() && existingLocalAttrs.isReadOnly() == false) { existingLocalAttrs.setReadOnly(true); existingLocalAttrs.setArchive(false); FileSystemUtils.getInstance().setAttributes(action.getCurrentLocalItem(), existingLocalAttrs); log.debug(MessageFormat.format( "Setting file to read only (archive=true) as part of undoing an edit: {0}", //$NON-NLS-1$ action.getCurrentLocalItem())); } } } // Check for problems deleting the source local file/directory. if (existingLocalExists && (newLocalItem == null || LocalPath.equals(action.getCurrentLocalItem(), newLocalItem) == false)) { // Check if we are getting a file but the source is actually // a directory. if (action.getItemType() == ItemType.FILE && !existingLocalAttrs.isSymbolicLink() && existingLocalAttrs.isDirectory()) { // I have decided to not make this an error, but rather // to skip the deletion of the source. existingLocalExists = false; } } } // ************************************** // Error checks against the target item. // ************************************** /* * Check if there is a get operation against the target item. This * code is critical for breaking rename cycles (the case where all * items involved in the rename cycle have pending changes just * results in a conflict for each -- otherwise, get should make * progress). */ FileSystemAttributes newLocalAttrs = new FileSystemAttributes(); boolean newLocalExists = false; GetOperation targetAction = null; if (newLocalItem != null) { newLocalAttrs = FileSystemUtils.getInstance().getAttributes(newLocalItem); newLocalExists = newLocalAttrs.exists(); log.debug(MessageFormat.format("newLocalAttrs = {0}, newLocalExists = {1}", //$NON-NLS-1$ newLocalAttrs.toString(), newLocalExists)); log.debug(MessageFormat.format("NewContentNeeded = {0}", action.isNewContentNeeded())); //$NON-NLS-1$ // Check if we are getting a file but the target is actually a // directory. if (newLocalExists && action.getItemType() != ItemType.FOLDER && !newLocalAttrs.isSymbolicLink() && newLocalAttrs.isDirectory()) { asyncOp.addWarning(OperationStatus.TARGET_IS_DIRECTORY, action); log.debug(MessageFormat.format("TargetIsDirectory, newLocalItem = {0}", newLocalItem)); //$NON-NLS-1$ return; } targetAction = asyncOp.getExistingLocalHash().get(newLocalItem); if (targetAction != null && targetAction != action && !isCaseChangingRename) { /* * Check if there is a pending change against the target * item. If there is a target pending change and we're not * processing the results of a merge or unshelve or undo * (e.g., unshelve cyclic rename), we'll stop processing the * current action. None of these errors are possible if this * is a case changing rename. */ if (newLocalExists && action.getType() != ProcessType.UNSHELVE && action.getType() != ProcessType.MERGE && action.getType() != ProcessType.ROLLBACK && action.getType() != ProcessType.UNDO && targetAction.getEffectiveChangeType().isEmpty() == false) { asyncOp.addWarning(OperationStatus.TARGET_LOCAL_PENDING, action, targetAction); log.debug(MessageFormat.format( "TargetLocalPending, newLocalItem = {0}, targetAction.ChangeType = {1}", //$NON-NLS-1$ newLocalItem, targetAction.getEffectiveChangeType())); return; } else if (newLocalExists && !asyncOp.isOverwrite() && isWritableFileConflict(asyncOp, action, newLocalAttrs) && newLocalAttrs.isSymbolicLink() == false) { // We have to stop if the target item is a writable // file. asyncOp.addWarning(OperationStatus.TARGET_WRITABLE, action); log.debug(MessageFormat.format("TargetWritable, newLocalItem = {0}", newLocalItem)); //$NON-NLS-1$ return; } else { /* * We have a get operation with this target as the * source but it doesn't have a pending change. If the * target is just a file, processing this action will * handle it and there is no reason to complete the * source portion of the target action. If the target is * a folder, it will get added to the hash of items to * not delete when the directory is created (see further * down). */ if (targetAction.getItemType() == ItemType.FILE) { /* * In order to make this work in the face of * crashes, I need to actually tell the server that * I no longer have it. We must lock the target * action across both clearing the local item and * posting the update to prevent a race condition * where the ULV call for a download could happen in * between. */ synchronized (targetAction) { log.debug(MessageFormat.format("ProcessOperation: clearing source local item {0}", //$NON-NLS-1$ action.getCurrentLocalItem())); if (!asyncOp.isPreview() && !targetAction.isDownloadCompleted()) { /* * For a delete, we can complete the * operation (for merge, we need to ack it * as resolved as well). Otherwise, just * tell the server we don't have the item. */ if (targetAction.isDelete()) { asyncOp.queueLocalVersionUpdate(targetAction, null, targetAction.getVersionLocal()); } } /* * Only set the DownloadCompleted flag if the * target action is a delete; othwerwise, the * action hasn't been completed. */ if (targetAction.isDelete() && !targetAction.isDownloadCompleted()) { targetAction.setDownloadCompleted(true); downloadCompletedHere = true; recordEvent(asyncOp, OperationStatus.DELETING, targetAction); } // We no longer have this item at this location // -- // don't call until after using // the local item path in the ULV call. targetAction.clearLocalItem(); // Now remove the location from the hash. asyncOp.getExistingLocalHash().remove(newLocalItem); } } } } } // if true, this is pending add which is being undone and we are // asked to delete it afterwards final boolean deleteAsUndoAdd = shouldDeleteAsUndoAdd(asyncOp, action); // ************************************** // Time to perform the get. // ************************************** // Check if we have something to get (rather than just deleting // something). if (!action.isDelete()) { // Handle getting folders very differently from getting files. if (action.getItemType() == ItemType.FOLDER) { // Check if the target item is a writable file. if (!asyncOp.isOverwrite() && newLocalExists && isWritableFileConflict(asyncOp, action, newLocalAttrs)) { asyncOp.addWarning(OperationStatus.TARGET_WRITABLE, action); log.debug(MessageFormat.format("TargetWritable, newLocalItem = {0}", newLocalItem)); //$NON-NLS-1$ return; } // Check if an item exists at the target location. if (!asyncOp.isPreview() && !asyncOp.isNoDiskUpdate() && newLocalExists) { // If it is just a file (we've already confirmed it's // read-only) just delete it. if (!newLocalAttrs.isDirectory()) { if (!new File(newLocalItem).delete()) { throw new IOException(MessageFormat.format( Messages.getString("GetEngine.CouldNotDeleteFileFormat"), //$NON-NLS-1$ newLocalItem)); } else { log.debug(MessageFormat.format( "Deleting read-only file that''s in the way of a directory: {0}", //$NON-NLS-1$ newLocalItem)); } newLocalExists = false; } } String sourceLocalItem = null; // if we are case changing rename and the item exists // locally if (isCaseChangingRename && existingLocalExists) { sourceLocalItem = action.getCurrentLocalItem(); } else if (newLocalExists) { // if the target already exists and we have a delete on // the same path // we convert it to a rename -- this takes care of the // get /remap case // where the case changes. if (targetAction != null && targetAction.getItemType() == ItemType.FOLDER && targetAction.isDelete() && LocalPath.lastPartEqualsCaseSensitive(targetAction.getCurrentLocalItem(), newLocalItem) == false) { sourceLocalItem = targetAction.getCurrentLocalItem(); } } // Create the directory. The call to create a directory does // not throw an exception // if the dir already exists (e.g., due to a race // condition). if (!asyncOp.isPreview() && (!newLocalExists || sourceLocalItem != null)) { // If this is a case changing rename then we can safely // do a Directory.Move, so we do, // but only if source directory exists (Bug: 448888) if (sourceLocalItem != null) { final File sourceLocalItemFile = new File(sourceLocalItem); if (sourceLocalItemFile.renameTo(new File(newLocalItem)) == false) { onNonFatalError(new IOException(MessageFormat.format( Messages.getString("GetEngine.FailedToRenameDirectoryFormat"), //$NON-NLS-1$ sourceLocalItemFile, newLocalItem)), asyncOp.getWorkspace()); } else { log.debug(MessageFormat.format("Renamed directory: {0} -> {1}", //$NON-NLS-1$ action.getCurrentLocalItem(), newLocalItem)); } } else { if (!asyncOp.isNoDiskUpdate()) { final File newLocalFile = new File(newLocalItem); if (newLocalFile.mkdirs() == false) { /* * Double-check that the directory does not * exist to avoid race conditions in mkdirs. */ if (!newLocalFile.isDirectory()) { onNonFatalError(new IOException(MessageFormat.format( Messages.getString("GetEngine.FailedToCreateDirectoryFormat"), //$NON-NLS-1$ newLocalItem)), asyncOp.getWorkspace()); } } else { log.debug(MessageFormat.format("Created directory: {0}", newLocalItem)); //$NON-NLS-1$ } } } } if (deleteAsUndoAdd) { if (!asyncOp.getDeletes().containsKey(newLocalItem)) { asyncOp.getDeletes().put(newLocalItem, action); } } // Ensure that the newLocalItem folder will never be deleted // by another operation. else if (!asyncOp.getDontDeleteFolderHash().containsKey(newLocalItem)) { asyncOp.getDontDeleteFolderHash().put(newLocalItem, action); } // Schedule the source file/directory for deletion if it is // different from the target. if (existingLocalExists && !LocalPath.equals(action.getCurrentLocalItem(), newLocalItem)) { if (!asyncOp.getDeletes().containsKey(action.getCurrentLocalItem())) { asyncOp.getDeletes().put(action.getCurrentLocalItem(), action); } // Go ahead and record the "move" and notify the server. recordEvent(asyncOp, action.getCurrentLocalItem() == null ? OperationStatus.GETTING : OperationStatus.REPLACING, action); if (!asyncOp.isPreview()) { asyncOp.queueLocalVersionUpdate(action, action.getTargetLocalItem(), action.getVersionServer()); action.setDownloadCompleted(true); downloadCompletedHere = true; } } else { recordEvent(asyncOp, action.getCurrentLocalItem() == null ? OperationStatus.GETTING : OperationStatus.REPLACING, action); if (!asyncOp.isPreview()) { if (asyncOp.getType() != ProcessType.PEND && asyncOp.getType() != ProcessType.UNDO || !action.getEffectiveChangeType().contains(ChangeType.ADD)) { /* * Queue a request to tell the server that I got * it. In a local workspace, when getting a * folder that we already have, use the force * option. */ asyncOp.queueLocalVersionUpdate(action, action.getTargetLocalItem(), action.getVersionServer(), asyncOp.getWorkspace().getLocation() == WorkspaceLocation.LOCAL); action.setDownloadCompleted(true); downloadCompletedHere = true; } } } } else // Getting a file. { /* * If we are editing an existing file or the file exists at * a different location, GetAll is false, and version local * is the same as on the server, move the file. */ if (existingLocalExists && (action.getEffectiveChangeType().contains(ChangeType.EDIT) && action.getVersionLocal() == action.getVersionServer() || !asyncOp.isGetAll() && !action.isNewContentNeeded())) { try { // Force ignore case here so we can detect // case-changing renames on all platforms. if (LocalPath.equals(action.getCurrentLocalItem(), newLocalItem, true)) { // When edit is true and we get to this point // with the path not having // changed, it is either a GetAll or there is // nothing to download (the // content didn't change on the server even // though the version number did). if (action.getEffectiveChangeType().contains(ChangeType.EDIT) && asyncOp.isGetAll()) { // When GetAll is specified and the file is // being edited, we obviously cannot // download the file. Do NOT add it to the // retry list! recordEvent(asyncOp, OperationStatus.UNABLE_TO_REFRESH, action); asyncOp.getStatus().incrementNumWarnings(); } else { // If this isn't a preview and this is a // case changing rename. (i.e. rename $/project // -> $/PROJECT) if (!asyncOp.isPreview() && isCaseChangingRename && !asyncOp.isNoDiskUpdate()) { if (new File(action.getCurrentLocalItem()) .renameTo(new File(newLocalItem)) == false) { onNonFatalError( new IOException(MessageFormat.format( Messages.getString( "GetEngine.FailedToRenameFileFormat"), //$NON-NLS-1$ action.getCurrentLocalItem(), newLocalItem)), asyncOp.getWorkspace()); } else { log.debug(MessageFormat.format("Renamed file from {0} to {1}", //$NON-NLS-1$ action.getCurrentLocalItem(), newLocalItem)); } } // For get there's nothing to do -- just go // ahead and fire the event. For pend/undo // the action happened on the server and we // need to fire the event, though there // is no disk update. if (asyncOp.getType() != ProcessType.GET || isCaseChangingRename) { recordEvent(asyncOp, OperationStatus.GETTING, action); } } // There's nothing that needs to be downloaded. if (!asyncOp.isPreview()) { Check.isTrue( !action.isNewContentNeeded() || (action.getEffectiveChangeType().contains(ChangeType.EDIT) && asyncOp.isGetAll()), MessageFormat.format( "Local and server versions expected to be equal except for edit: {0}", //$NON-NLS-1$ action)); if (deleteAsUndoAdd) { if (new File(newLocalItem).delete() == false) { throw new IOException(MessageFormat.format( Messages.getString("GetEngine.CouldNotDeleteFileFormat"), //$NON-NLS-1$ newLocalItem)); } } // No need to queue a local version update // for the undo of a pending add. The call // to UndoPendingChanges removes the local // version row as part of that // call. else if (!(ProcessType.UNDO == asyncOp.getType() && action.getChangeType().contains(ChangeType.ADD))) { asyncOp.queueLocalVersionUpdate(action, action.getTargetLocalItem(), action.getVersionServer()); action.setDownloadCompleted(true); downloadCompletedHere = true; } } return; } } finally { /* * If the file is checked out, make sure it is * writable. This is also necessary for the code * further down that maintains the read-only setting * of the source when performing the copy/delete * move. */ if (!asyncOp.isPreview() && action.getEffectiveChangeType().contains(ChangeType.EDIT) && existingLocalAttrs.isReadOnly() && !existingLocalAttrs.isSymbolicLink()) { existingLocalAttrs.setReadOnly(false); existingLocalAttrs.setArchive(true); FileSystemUtils.getInstance().setAttributes(action.getCurrentLocalItem(), existingLocalAttrs); log.debug(MessageFormat.format("Set edited file to read/write (archive=true): {0}", //$NON-NLS-1$ action.getCurrentLocalItem())); } } // Check for a writable target before attempting the // move (we know it's not a directory). if (!asyncOp.isOverwrite() && newLocalExists && isWritableFileConflict(asyncOp, action, newLocalAttrs) && !asyncOp.isNoDiskUpdate()) { asyncOp.addWarning(OperationStatus.TARGET_WRITABLE, action); return; } if (!asyncOp.isPreview() && !deleteAsUndoAdd && !asyncOp.isNoDiskUpdate()) { /** * We may get a rename for a file to a directory * that does not exist. (Particularly when handling * name conflicts and the user chooses the * destination filename.) */ FileHelpers.createDirectoryIfNecessary(LocalPath.getParent(newLocalItem)); // Copy the source over the target file (we know at // this point that the target // must be read-only). If we are undoing pending add // which is under pending rename, we just don't copy // the file. if (newLocalAttrs.exists()) { new File(newLocalItem).delete(); } if (!existingLocalAttrs.isSymbolicLink()) { FileCopyHelper.copy(action.getCurrentLocalItem(), newLocalItem); log.debug(MessageFormat.format("Copied file from {0} to {1}", //$NON-NLS-1$ action.getCurrentLocalItem(), newLocalItem)); // Apply the appropriate last-write time // forward. if (asyncOp.getWorkspace().getOptions() .contains(WorkspaceOptions.SET_FILE_TO_CHECKIN) && !DotNETDate.MIN_CALENDAR.equals(action.getVersionServerDate())) { final File newLocalFile = new File(newLocalItem); if (existingLocalAttrs.isReadOnly()) { existingLocalAttrs.setReadOnly(false); FileSystemUtils.getInstance().setAttributes(newLocalFile, existingLocalAttrs); existingLocalAttrs.setReadOnly(true); } if (action.getEffectiveChangeType().contains(ChangeType.EDIT)) { newLocalFile .setLastModified(action.getVersionServerDate().getTimeInMillis()); } else { // Pending edit; carry the existing // timestamp forward final FileSystemAttributes attrs = FileSystemUtils.getInstance() .getAttributes(action.getCurrentLocalItem()); newLocalFile.setLastModified(attrs.getModificationTime().getJavaTime()); } } } else { final FileSystemUtils util = FileSystemUtils.getInstance(); final String destinationPath = util.getSymbolicLink(action.getCurrentLocalItem()); util.createSymbolicLink(destinationPath, newLocalItem); } // We must preserve the read/write setting since the // user may have attrib'ed the file outside of this program. FileSystemUtils.getInstance().setAttributes(newLocalItem, existingLocalAttrs); } // Report that we are actually getting the file. recordEvent(asyncOp, action.getCurrentLocalItem() == null ? OperationStatus.GETTING : OperationStatus.REPLACING, action); if (asyncOp.isPreview()) { return; } // Tell the server that the file is in the new location. asyncOp.queueLocalVersionUpdate(action, action.getTargetLocalItem(), action.getVersionServer()); synchronized (action) { action.setDownloadCompleted(true); downloadCompletedHere = true; // Delete the source. if (!asyncOp.isNoDiskUpdate()) { if (new File(action.getCurrentLocalItem()).delete() == false) { throw new IOException(MessageFormat.format( Messages.getString("GetEngine.CouldNotDeleteFileFormat"), //$NON-NLS-1$ action.getCurrentLocalItem())); } else { log.debug(MessageFormat.format("Deleted file source of move: {0}", //$NON-NLS-1$ action.getCurrentLocalItem())); } } } } else { // Download the file unless the source or target is // writable and Overwrite is not specified. if (action.getEffectiveChangeType().contains(ChangeType.ADD) && !action.isNewContentNeeded()) { // If the action is for a file with a pending add // and we don't have or it is not on disk, there is // nothing more we can do. For pend or undo, the // change still happened on the server, so fire the // regular event. if (asyncOp.getType() == ProcessType.PEND || asyncOp.getType() == ProcessType.UNDO) { recordEvent(asyncOp, OperationStatus.GETTING, action); } // Let the user know that there is an error unless // we are processing an Undo request or a Pend that // has Preview turned on (happens in the VSIP code). if (!asyncOp.isNoDiskUpdate() && asyncOp.getType() != ProcessType.UNDO && (!asyncOp.isPreview() || asyncOp.getType() != ProcessType.PEND)) { if (newLocalItem != null) { onNonFatalError(new VersionControlException(MessageFormat.format( Messages.getString("GetEngine.AddedItemMissingLocallyFormat"), //$NON-NLS-1$ newLocalItem)), asyncOp.getWorkspace()); } else { onNonFatalError(new VersionControlException(MessageFormat.format( Messages.getString("GetEngine.AddedItemMissingLocallyFormat"), //$NON-NLS-1$ action.getCurrentLocalItem())), asyncOp.getWorkspace()); } } return; } else if (!asyncOp.isOverwrite() && WorkspaceLocation.SERVER == asyncOp.getWorkspace().getLocation() && existingLocalExists && !existingLocalAttrs.isReadOnly() && !action.isOkayToOverwriteExistingLocal() && !isCaseChangingRename && !localContentIsRedundant(action.getCurrentLocalItem(), action.getHashValue()) && !existingLocalAttrs.isSymbolicLink()) { asyncOp.addWarning(OperationStatus.SOURCE_WRITABLE, action, null); } else if (!asyncOp.isOverwrite() && newLocalExists && isWritableFileConflict(asyncOp, action, newLocalAttrs) && !isCaseChangingRename && !newLocalAttrs.isSymbolicLink()) { asyncOp.addWarning(OperationStatus.TARGET_WRITABLE, action); return; } else { // Report that we are actually getting the file. recordEvent(asyncOp, action.getCurrentLocalItem() == null ? OperationStatus.GETTING : OperationStatus.REPLACING, action); if (action.isContentDestroyed()) { onNonFatalError( new DestroyedContentUnavailableException(MessageFormat.format( Messages.getString( "GetEngine.DestroyedFileContentUnavailableExceptionFormat"), //$NON-NLS-1$ action.getVersionServer(), action.getTargetLocalItem())), asyncOp.getWorkspace()); return; } // Don't go any further if we aren't actually // getting it. if (asyncOp.isPreview()) { return; } Check.isTrue(!deleteAsUndoAdd, "We are downloading file which is not needed (undo of pending add)"); //$NON-NLS-1$ if (action.isUndo() && null != action.getBaselineFileGUID() && null == action.getDownloadURL()) { // Local workspace offline undo (baseline folder // restore) if (!asyncOp.isNoDiskUpdate()) { // check symbolic link first final boolean isSymlink = PropertyConstants.IS_SYMLINK .equals(PropertyUtils.selectMatching(action.getPropertyValues(), PropertyConstants.SYMBOLIC_KEY)); asyncOp.getBaselineFolders().copyBaselineToTarget(action.getBaselineFileGUID(), action.getTargetLocalItem(), -1, action.getHashValue(), isSymlink); } if (asyncOp.getWorkspace().getOptions() .contains(WorkspaceOptions.SET_FILE_TO_CHECKIN) && !DotNETDate.MIN_CALENDAR.equals(action.getVersionServerDate())) { new File(action.getTargetLocalItem()) .setLastModified(action.getVersionServerDate().getTimeInMillis()); } asyncOp.queueLocalVersionUpdate(new ClientLocalVersionUpdate( action.getSourceServerItem(), action.getItemID(), action.getTargetLocalItem(), action.getVersionServer(), action.getEncoding(), false, action.getPropertyValues())); if (existingLocalExists && action.getCurrentLocalItem() != null && !LocalPath .equals(action.getCurrentLocalItem(), action.getTargetLocalItem()) && !asyncOp.isNoDiskUpdate()) { Check.isTrue(action.getItemType() != ItemType.FOLDER, MessageFormat.format("Should not try to delete a folder here: {0}", //$NON-NLS-1$ action.toString())); deleteSource(action, existingLocalAttrs); } downloadCompletedHere = true; } else if (null != action.getDownloadURL()) { // Download URL get (common case) asyncGetFile(action, existingLocalExists, existingLocalAttrs, newLocalExists, newLocalAttrs, asyncOp); } } } } } else // Operation is a delete. { // if the server sent back any and we want to do this if the // existing item is a directory if (action.getItemType() == ItemType.FOLDER || (action.getItemType() == ItemType.ANY && !existingLocalAttrs.isSymbolicLink() && existingLocalAttrs.isDirectory())) { // Normally, we just have to queue folder deletes. This is // because we don't want to do it until folders are empty // and the async nature of get makes it really hard to // determine when that is. When the current local path is // null, we just fire an event. if (action.getCurrentLocalItem() == null) { recordEvent(asyncOp, OperationStatus.DELETING, action); } else if (!asyncOp.getDeletes().containsKey(action.getCurrentLocalItem())) { asyncOp.getDeletes().put(action.getCurrentLocalItem(), action); } } else { // If the file is writable, stop. Otherwise, delete the // file. if (!asyncOp.isOverwrite() && existingLocalExists && WorkspaceLocation.SERVER == asyncOp.getWorkspace().getLocation() && !existingLocalAttrs.isReadOnly() && !existingLocalAttrs.isSymbolicLink() && !action.isOkayToOverwriteExistingLocal()) { Check.isTrue(!action.getEffectiveChangeType().contains(ChangeType.EDIT), MessageFormat.format( "The edit bit is set, yet we are trying to delete this file: {0}", //$NON-NLS-1$ action)); asyncOp.addWarning(OperationStatus.SOURCE_WRITABLE, action, null); } else { recordEvent(asyncOp, OperationStatus.DELETING, action); // Don't go any further if we aren't actually deleting // it. if (asyncOp.isPreview()) { return; } // Delete the file and acknowledge it. deleteSource(action, existingLocalAttrs); asyncOp.queueLocalVersionUpdate(action, null, action.getVersionServer() != 0 ? action.getVersionServer() : action.getVersionLocal()); action.setDownloadCompleted(true); downloadCompletedHere = true; } } } } catch (final PathTooLongException e) { /* * We already checked the target item at the top of this method, but * LocalPath.canonicalize or LocalPath.checkLocalItem may have * detected another path that exceeds the limit. */ log.warn("Path too long, not getting", e); //$NON-NLS-1$ onNonFatalError(new VersionControlException( MessageFormat.format(Messages.getString("GetEngine.LocalPathTooLongFormat"), //$NON-NLS-1$ newLocalItem)), asyncOp.getWorkspace()); } catch (final CanceledException e) { // Don't convert these to non-fatals throw e; } catch (final Exception e) { // Note that we'll catch exceptions due to problems such as unable // to open a file for writing because another process has it locked. log.warn("Caught and converted an exception: ", e); //$NON-NLS-1$ onNonFatalError(e, asyncOp.getWorkspace()); } finally { /* * Apply .tpattributes if the download completed here. If it was * queued for asynch processing, it will not be marked completed * here and attributes get applied elsewhere. */ if (downloadCompletedHere && !asyncOp.isPreview() && !asyncOp.isNoDiskUpdate()) { applyFileAttributesAfterGet(asyncOp, action); } } }