List of usage examples for java.util.concurrent Future isDone
boolean isDone();
From source file:com.numenta.core.service.DataSyncService.java
/** * This method is execute periodically and update {@link com.numenta.core.data.CoreDatabase} * with new data from the/*from w ww .j a v a 2s. co m*/ * server. */ protected void synchronizeWithServer() throws IOException { Log.i(TAG, "synchronizeWithServer"); if (_synchronizingWithServer) { return; } if (!NetUtils.isConnected()) { // Not connected, skip until we connect return; } final CoreDatabase database = HTMApplication.getDatabase(); if (database == null) { return; } synchronized (this) { if (_synchronizingWithServer) { return; } _synchronizingWithServer = true; } String result = null; try { // Guard against blocking the UI Thread if (Looper.myLooper() == Looper.getMainLooper()) { throw new IllegalStateException("You should not access the database from the UI thread"); } fireRefreshStateEvent(_synchronizingWithServer); final Context context = _service.getApplicationContext(); final long now = System.currentTimeMillis(); // Check if enough time has passed since we checked for new data SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context); final long lastConnectedTime = prefs.getLong(PREF_LAST_CONNECTED_TIME, 0); if (now - lastConnectedTime < DataUtils.METRIC_DATA_INTERVAL) { return; } // Calculate hours since last update. This information will be // passed to the user together with error message final CharSequence hoursSinceData = DateUtils.getRelativeTimeSpanString(database.getLastTimestamp(), now, DateUtils.MINUTE_IN_MILLIS); Future<?> pendingIO = null; try { // Try to connect to server if (_htmClient == null) { _htmClient = _service.connectToServer(); } if (_htmClient == null) { throw new IOException("Unable to connect to server"); } // Update last connected time SharedPreferences.Editor editor = prefs.edit(); editor.putLong(PREF_LAST_CONNECTED_TIME, now); editor.apply(); // Start by downloading all the metrics available from backend // in a background IO thread pendingIO = _service.getIOThreadPool().submit(new Callable<Void>() { @Override public Void call() throws Exception { try { // First load metrics loadAllMetrics(); // Load all annotations after metrics loadAllAnnotations(); // Load all data after annotations loadAllData(); // Synchronize notifications after data synchronizeNotifications(); // Synchronize application data last HTMApplication.getInstance().loadApplicationData(_htmClient); } catch (android.database.sqlite.SQLiteFullException e) { // Try to delete old records to make room if possible Log.e(TAG, "Failed to save data into database", e); database.deleteOldRecords(); } return null; } }); // Wait for metric data to finish pendingIO.get(); } catch (InterruptedException e) { // Cancel pending tasks if (!pendingIO.isDone()) { pendingIO.cancel(true); } Log.w(TAG, "Interrupted while loading data"); } catch (ExecutionException e) { // Cancel pending tasks if (!pendingIO.isDone()) { pendingIO.cancel(true); } Throwable original = e.getCause(); if (original instanceof AuthenticationException) { _service.fireAuthenticationFailedEvent(); } else if (original instanceof ObjectNotFoundException) { Log.e(TAG, "Error loading data", e); result = context.getString(R.string.refresh_update_error, hoursSinceData); } else if (original instanceof IOException) { Log.e(TAG, "Unable to connect", e); result = context.getString(R.string.refresh_server_unreachable, hoursSinceData); } else { Log.e(TAG, "Error loading data", e); result = context.getString(R.string.refresh_update_error, hoursSinceData); } } catch (AuthenticationException e) { _service.fireAuthenticationFailedEvent(); } catch (HTMException e) { Log.e(TAG, "Error loading data", e); result = context.getString(R.string.refresh_update_error, hoursSinceData); } catch (IOException e) { Log.e(TAG, "Unable to connect", e); result = context.getString(R.string.refresh_server_unreachable, hoursSinceData); } } finally { _synchronizingWithServer = false; fireRefreshStateEvent(_synchronizingWithServer, result); } }
From source file:com.splout.db.dnode.HttpFileExchanger.java
public void send(final String tablespace, final int partition, final long version, final File binaryFile, final String url, boolean blockUntilComplete) { Future<?> future = clientExecutors.submit(new Runnable() { @Override//from ww w. ja v a 2 s . c om public void run() { DataOutputStream writer = null; InputStream input = null; try { HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection(); connection.setChunkedStreamingMode(config.getInt(FetcherProperties.DOWNLOAD_BUFFER)); connection.setDoOutput(true); connection.setRequestProperty("filename", binaryFile.getName()); connection.setRequestProperty("tablespace", tablespace); connection.setRequestProperty("partition", partition + ""); connection.setRequestProperty("version", version + ""); Checksum checkSum = new CRC32(); writer = new DataOutputStream(new GZIPOutputStream(connection.getOutputStream())); // 1 - write file size writer.writeLong(binaryFile.length()); writer.flush(); // 2 - write file content input = new FileInputStream(binaryFile); byte[] buffer = new byte[config.getInt(FetcherProperties.DOWNLOAD_BUFFER)]; long wrote = 0; for (int length = 0; (length = input.read(buffer)) > 0;) { writer.write(buffer, 0, length); checkSum.update(buffer, 0, length); wrote += length; } // 3 - add the CRC so that we can verify the download writer.writeLong(checkSum.getValue()); writer.flush(); log.info("Sent file " + binaryFile + " to " + url + " with #bytes: " + wrote + " and checksum: " + checkSum.getValue()); } catch (IOException e) { log.error(e); } finally { try { if (input != null) { input.close(); } if (writer != null) { writer.close(); } } catch (IOException ignore) { } } } }); try { if (blockUntilComplete) { while (future.isDone() || future.isCancelled()) { Thread.sleep(1000); } } } catch (InterruptedException e) { // interrupted! } }
From source file:com.mirth.connect.connectors.tcp.TcpReceiver.java
/** * Attempts to get the result of any Future tasks which may still be running. Any completed * tasks are removed from the Future list. * /*w ww . j a va 2 s . c o m*/ * This can ensure that all client socket threads are disposed, so that a remote client wouldn't * be able to still send a message after a channel has been stopped or undeployed (even though * it wouldn't be processed through the channel anyway). * * @param block * - If true, then each Future task will be joined to this one, blocking until the * task thread dies. * @param interrupt * - If true, each currently running task thread will be interrupted in an attempt to * stop the task. Any interrupted exceptions will be caught and not thrown, in a best * effort to ensure that all results are taken care of. * @param remove * - If true, each completed result will be removed from the Future set during * iteration. */ private void cleanup(boolean block, boolean interrupt, boolean remove) throws InterruptedException { for (Iterator<Future<Throwable>> it = results.iterator(); it.hasNext();) { Future<Throwable> result = it.next(); if (interrupt) { // Cancel the task, with the option of whether or not to forcefully interrupt it result.cancel(true); } if (block) { // Attempt to get the result (which blocks until it returns) Throwable t = null; try { // If the return value is not null, then an exception was raised somewhere in the client socket thread if ((t = result.get()) != null) { logger.debug("Client socket thread returned unsuccessfully (" + connectorProperties.getName() + " \"Source\" on channel " + getChannelId() + ").", t); } } catch (Exception e) { logger.debug("Error retrieving client socket thread result for " + connectorProperties.getName() + " \"Source\" on channel " + getChannelId() + ".", e); Throwable cause; if (t instanceof ExecutionException) { cause = t.getCause(); } else { cause = t; } if (cause instanceof InterruptedException) { Thread.currentThread().interrupt(); if (!interrupt) { throw (InterruptedException) cause; } } } } if (remove) { // Remove the task from the list if it's done, or if it's been cancelled if (result.isDone()) { it.remove(); } } } }
From source file:com.hygenics.parser.ParseDispatcher.java
/** * Fork/Join Pool Solution Maximizes Speed. JSon increases ease of use * // w w w . j ava2 s .c o m */ public void run() { log.info("Starting Clock and Parsing @" + Calendar.getInstance().getTime().toString()); long t = Calendar.getInstance().getTimeInMillis(); int pid = 0; int id = 0; int checkattempts = 0; String add = null; this.schema = Properties.getProperty(this.schema); this.select = Properties.getProperty(this.select); this.extracondition = Properties.getProperty(this.extracondition); this.column = Properties.getProperty(this.column); ArrayList<String> parsedrows = new ArrayList<String>(); Set<Callable<String>> collect = new HashSet<Callable<String>>(); List<Future<String>> futures; List<Future<ArrayList<String>>> qfutures; Set<Callable<ArrayList<String>>> qcollect = new HashSet<Callable<ArrayList<String>>>(4); ForkJoinPool fjp = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * procnum)); if (schema != null) { createTables(); } boolean run = true; String condition; int w = 0; int start = offset; int chunksize = (int) Math.ceil(pullsize / qnum); // attempt to query the database from multiple threads do { // query for pages pages = new ArrayList<String>(pullsize); log.info("Looking for Pages."); for (int conn = 0; conn < qnum; conn++) { // create condition condition = " WHERE " + pullid + " >= " + (start + (conn * chunksize)) + " AND " + pullid + " < " + Integer.toString(start + (chunksize * (conn + 1))); if (extracondition != null) { condition += " " + extracondition.trim(); } // get queries qcollect.add(new SplitQuery(template, (select + condition))); log.info("Fetching " + select + condition); } start += (chunksize * qnum); qfutures = fjp.invokeAll(qcollect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited for " + w + " cycles"); for (Future<ArrayList<String>> f : qfutures) { try { ArrayList<String> test = f.get(); if (test != null) { if (test.size() > 0) { pages.addAll(test); } } if (f.isDone() == false) { f.cancel(true); } f = null; } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } qcollect = new HashSet<Callable<ArrayList<String>>>(4); qfutures = null; log.info("Finished Getting Pages"); // if no records then get records that may have been dropped if (pages.size() == 0 && checkstring != null && checkstring.trim().length() > 0 && checkattempts < reattempts) { checkattempts += 1; log.info("Checking for Drops"); qcollect.add(new SplitQuery(template, (checkstring))); qfutures = fjp.invokeAll(qcollect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited for " + w + " cycles"); for (Future<ArrayList<String>> f : qfutures) { try { ArrayList<String> test = f.get(); if (test != null) { if (test.size() > 0) { pages.addAll(test); } } if (f.isDone() == false) { f.cancel(true); } f = null; } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } qfutures = null; qcollect = new HashSet<Callable<ArrayList<String>>>(4); } else if (checkattempts >= reattempts) { pages.clear(); } log.info("Found " + pages.size() + " records!"); // get hashes if necessary if (getHash) { log.info("Hashing " + pages.size() + " Records"); ArrayList<String> hashedrows = new ArrayList<String>(); for (String row : pages) { collect.add(new CreateHash(row, pid)); pid++; } log.info("Invoking"); futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited " + w + " Cycles!"); for (Future<String> f : futures) { if (f != null) { String json; try { json = f.get(termtime, TimeUnit.MILLISECONDS); if (json != null) { hashedrows.add(json); } } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } } log.info("Hashed " + hashedrows.size() + " Records!"); pages = hashedrows; collect = new HashSet<Callable<String>>(pullsize); futures.clear(); log.info("Completed Hashing"); } log.info("Performing Regex"); // handle single patterns int i = 0; if (singlepats != null) { log.info("Found Singlepats"); int subs = 0; int rows = 0; for (String row : pages) { rows += 1; String inrow = row; try { inrow = inrow.replaceAll("\t|\r|\r\n|\n", ""); Map<String, Json> jmap = Json.read(inrow).asJsonMap(); if (singlepats.containsKey("table")) { subs += 1; if (fjp.isShutdown()) { fjp = new ForkJoinPool((Runtime.getRuntime().availableProcessors() * procnum)); } if (jmap.get(column) != null) { if (test) { System.out.println("//////////////////////HTML////////////////////////\n" + jmap.get(column).asString() + "\n///////////////////////////////END///////////////////////////\n\n"); } if (mustcontain != null) { if (jmap.get(column).asString().contains(mustcontain)) { if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) collect.add(new ParsePage(unescape, replacementPattern, singlepats.get("table"), jmap.get(column).asString().replaceAll("\\s\\s", " "), singlepats, Calendar.getInstance().getTime().toString(), jmap.get("offenderhash").asString())); } else { collect.add(new ParsePage(unescape, replacementPattern, singlepats.get("table"), jmap.get(column).asString().replaceAll("\\s\\s", " "), singlepats, Calendar.getInstance().getTime().toString(), jmap.get("offenderhash").asString())); } } } else if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) { collect.add( new ParsePage(unescape, replacementPattern, singlepats.get("table"), jmap.get(column).asString().replaceAll("\\s\\s", " "), singlepats, Calendar.getInstance().getTime().toString(), jmap.get("offenderhash").asString())); } } else { collect.add(new ParsePage(unescape, replacementPattern, singlepats.get("table"), jmap.get(column).asString().replaceAll("\\s\\s", " "), singlepats, Calendar.getInstance().getTime().toString(), jmap.get("offenderhash").asString())); } } } i++; if (((i % commit_size) == 0 & i != 0) || i == pages.size() || pages.size() == 1 && singlepats != null) { log.info("Getting Regex Results"); log.info("Getting Tasks"); futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited for " + w + " cycles"); for (Future<String> r : futures) { try { add = r.get(); if (add.contains("No Data") == false) { parsedrows.add(add); } add = null; } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } futures = null; collect = new HashSet<Callable<String>>(); if (parsedrows.size() >= commit_size) { log.info("INSERTING " + parsedrows.size() + " records!"); if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(pullsize); } // hint to the gc in case it actually pays off; use // -X:compactexplicitgc to improve odds and // -XX:UseConcMarkSweepGC for improving odds on // older generation strings // (think if i were a gambling man) System.gc(); Runtime.getRuntime().gc(); } } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } log.info("Submitted " + subs + " records. Found " + rows + " rows"); } log.info("REMAINING ROWS TO COMMIT " + parsedrows.size()); log.info("Rows Left" + parsedrows.size()); if (parsedrows.size() > 0) { if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(); } // handle multi patterns if (multipats != null) { // parse multiple pages for the run int subs = 0; for (String row : pages) { try { for (String k : multipats.keySet()) { if (fjp.isShutdown()) { fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors()); } Map<String, Json> jmap = Json.read(row).asJsonMap(); if (jmap.get(column) != null) { subs += 1; if (test) { System.out.println("//////////////////////HTML////////////////////////\n" + jmap.get(column).asString() + "\n///////////////////////////////END///////////////////////////\n\n"); } if (mustcontain != null) { if (jmap.get(column).asString().contains(mustcontain)) { if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) { collect.add( new ParseMultiPage(unescape, replacementPattern, k, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), multipats.get(k))); } } else { collect.add(new ParseMultiPage(unescape, replacementPattern, k, jmap.get(column).asString(), jmap.get("offenderhash").asString().replaceAll("\\s\\s", " "), Calendar.getInstance().getTime().toString(), multipats.get(k))); } } } else if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) { collect.add(new ParseMultiPage(unescape, replacementPattern, k, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), multipats.get(k))); } } else { collect.add(new ParseMultiPage(unescape, replacementPattern, k, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), multipats.get(k))); } } i++; if (((i % commit_size) == 0 & i != 0) || i == pages.size() || pages.size() == 1 && multipats != null) { futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited " + w + " Cycles"); for (Future<String> r : futures) { try { add = r.get(); if (add.contains("No Data") == false) { for (String js : add.split("~")) { parsedrows.add(js); } } add = null; if (r.isDone() == false) { r.cancel(true); } r = null; } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (ExecutionException e) { // TODO Auto-generated catch block e.printStackTrace(); } } futures = null; collect = new HashSet<Callable<String>>(); if (parsedrows.size() >= commit_size) { log.info("INSERTING " + parsedrows.size() + " records!"); if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(pullsize); } // hint to the gc in case it actually pays off System.gc(); Runtime.getRuntime().gc(); } } } catch (Exception e) { log.warn("Encoding Error!"); } } log.info("Submitted " + subs + " records."); } // handle looped patterns if (loopedpats != null) { log.info("Looped Patterns Found"); int subs = 0; if (fjp.isShutdown()) { fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum); } for (String row : pages) { try { for (String k : loopedpats.keySet()) { if (fjp.isShutdown()) { fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum); } Map<String, Json> jmap = Json.read(row).asJsonMap(); if (jmap.get(column) != null) { subs += 1; if (mustcontain != null) { if (jmap.get(column).asString().contains(mustcontain)) { if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) { collect.add( new LoopRegex(unescape, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), k, replacementPattern, loopedpats.get(k), test)); } } else { collect.add(new LoopRegex(unescape, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), k, replacementPattern, loopedpats.get(k), test)); } } } else if (cannotcontain != null) { if (jmap.get(column).asString().contains(cannotcontain) == false) { collect.add(new LoopRegex(unescape, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), k, replacementPattern, loopedpats.get(k), test)); } } else { collect.add(new LoopRegex(unescape, jmap.get(column).asString().replaceAll("\\s\\s", " "), jmap.get("offenderhash").asString(), Calendar.getInstance().getTime().toString(), k, replacementPattern, loopedpats.get(k), test)); } jmap.remove(k); } i++; if (((i % commit_size) == 0 & i != 0) || (i % (pages.size() - 1)) == 0 || pages.size() == 1) { futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited " + w + " Cycles"); for (Future<String> r : futures) { try { add = r.get(); if (add.contains("No Data") == false) { for (String toarr : add.split("~")) { parsedrows.add(toarr); } } if (r.isDone() == false) { r.cancel(true); } add = null; } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } futures = null; collect = new HashSet<Callable<String>>(); // hint to the gc in case it actually pays off System.gc(); Runtime.getRuntime().gc(); } } if (parsedrows.size() >= this.commit_size) { log.info("INSERTING " + parsedrows.size() + " records!"); if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(pullsize); } } catch (Exception e) { log.warn("Encoding Error!"); } } log.info("Submitted " + subs + " records."); } if (collect.size() > 0) { log.info("Getting Last Regex Results for Iteration"); log.info("Getting Tasks"); futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited for " + w + " cycles"); for (Future<String> r : futures) { try { add = r.get(); if (add.contains("No Data") == false) { parsedrows.add(add); } add = null; } catch (Exception e) { log.warn("Encoding Error!"); e.printStackTrace(); } } futures = null; collect = new HashSet<Callable<String>>(pullsize); // hint to the gc in case it actually pays off; use // -X:compactexplicitgc to improve odds and // -XX:UseConcMarkSweepGC for improving odds on older generation // strings // (think if i were a gambling man) System.gc(); Runtime.getRuntime().gc(); } log.info("REMAINING ROWS TO COMMIT " + parsedrows.size()); log.info("Rows Left" + parsedrows.size()); if (parsedrows.size() > 0) { if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(); } } while (pages != null && pages.size() > 0); // ensure that nothing is still caught in limbo // final parser to ensure that nothing is left out if (collect.size() > 0) { log.info("More Rows Caught in FJP, Completing Process"); futures = fjp.invokeAll(collect); w = 0; while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) { w++; } log.info("Waited " + w + " Cycles"); for (Future<String> r : futures) { try { add = r.get(); if (add.contains("No Data") == false) { for (String js : add.split("~")) { parsedrows.add(js); } } add = null; if (r.isDone() == false) { r.cancel(true); } r = null; } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } } futures = null; collect = null; } // send any remaining parsed rows to the db if (parsedrows.size() > 0) { if (parsedrows.size() >= SPLITSIZE) { sendToDb(parsedrows, true); } else { sendToDb(parsedrows, false); } parsedrows = new ArrayList<String>(); } log.info("Shutting Down Fork Join Pool"); if (fjp.isShutdown() == false) { fjp.shutdownNow(); } fjp = null; log.info("Complete @" + Calendar.getInstance().getTime().toString()); log.info("Total Runtime(seconds): " + Double.toString((double) (Calendar.getInstance().getTimeInMillis() - t) / 1000)); // hint to the gc in case it actually pays off System.gc(); Runtime.getRuntime().gc(); }
From source file:au.org.ala.biocache.dao.SearchDAOImpl.java
/** * Writes the index fields to the supplied output stream in CSV format. * * DM: refactored to split the query by month to improve performance. * Further enhancements possible:/*from ww w .j a va 2 s . c o m*/ * 1) Multi threaded * 2) More filtering, by year or decade.. * * @param downloadParams * @param out * @param includeSensitive * @throws Exception */ public Map<String, Integer> writeResultsFromIndexToStream(final DownloadRequestParams downloadParams, OutputStream out, boolean includeSensitive, final DownloadDetailsDTO dd, boolean checkLimit) throws Exception { long start = System.currentTimeMillis(); final Map<String, Integer> uidStats = new HashMap<String, Integer>(); if (server == null) { initServer(); } try { SolrQuery solrQuery = new SolrQuery(); formatSearchQuery(downloadParams); String dFields = downloadParams.getFields(); if (includeSensitive) { //include raw latitude and longitudes dFields = dFields .replaceFirst("decimalLatitude.p", "sensitive_latitude,sensitive_longitude,decimalLatitude.p") .replaceFirst(",locality,", ",locality,sensitive_locality,"); } StringBuilder sb = new StringBuilder(dFields); if (!downloadParams.getExtra().isEmpty()) { sb.append(",").append(downloadParams.getExtra()); } String[] requestedFields = sb.toString().split(","); List<String>[] indexedFields = downloadFields.getIndexFields(requestedFields); logger.debug("Fields included in download: " + indexedFields[0]); logger.debug("Fields excluded from download: " + indexedFields[1]); logger.debug("The headers in downloads: " + indexedFields[2]); //set the fields to the ones that are available in the index final String[] fields = indexedFields[0].toArray(new String[] {}); solrQuery.setFields(fields); StringBuilder qasb = new StringBuilder(); if (!"none".equals(downloadParams.getQa())) { solrQuery.addField("assertions"); if (!"all".equals(downloadParams.getQa())) { //add all the qa fields qasb.append(downloadParams.getQa()); } } solrQuery.addField("institution_uid").addField("collection_uid").addField("data_resource_uid") .addField("data_provider_uid"); //add context information updateQueryContext(downloadParams); solrQuery.setQuery(buildSpatialQueryString(downloadParams)); solrQuery.setFacetMinCount(1); solrQuery.setFacetLimit(-1); //get the assertion facets to add them to the download fields boolean getAssertionsFromFacets = "all".equals(downloadParams.getQa()); SolrQuery monthAssertionsQuery = getAssertionsFromFacets ? solrQuery.getCopy().addFacetField("month", "assertions") : solrQuery.getCopy().addFacetField("month"); if (getAssertionsFromFacets) { //set the order for the facet to be based on the index - this will force the assertions to be returned in the same order each time //based on alphabetical sort. The number of QA's may change between searches so we can't guarantee that the order won't change monthAssertionsQuery.add("f.assertions.facet.sort", "index"); } QueryResponse facetQuery = runSolrQuery(monthAssertionsQuery, downloadParams.getFq(), 0, 0, "score", "asc"); //set the totalrecords for the download details dd.setTotalRecords(facetQuery.getResults().getNumFound()); if (checkLimit && dd.getTotalRecords() < MAX_DOWNLOAD_SIZE) { checkLimit = false; } //get the month facets to add them to the download fields get the assertion facets. List<Count> splitByFacet = null; for (FacetField facet : facetQuery.getFacetFields()) { if (facet.getName().equals("assertions") && facet.getValueCount() > 0) { for (FacetField.Count facetEntry : facet.getValues()) { if (qasb.length() > 0) qasb.append(","); qasb.append(facetEntry.getName()); } } if (facet.getName().equals("month") && facet.getValueCount() > 0) { splitByFacet = facet.getValues(); } } String qas = qasb.toString(); final String[] qaFields = qas.equals("") ? new String[] {} : qas.split(","); String[] qaTitles = downloadFields.getHeader(qaFields, false); String[] header = org.apache.commons.lang3.ArrayUtils.addAll(indexedFields[2].toArray(new String[] {}), qaTitles); //construct correct RecordWriter based on the supplied fileType final au.org.ala.biocache.RecordWriter rw = downloadParams.getFileType().equals("csv") ? new CSVRecordWriter(out, header, downloadParams.getSep(), downloadParams.getEsc()) : new ShapeFileRecordWriter(downloadParams.getFile(), out, (String[]) ArrayUtils.addAll(fields, qaFields)); if (rw instanceof ShapeFileRecordWriter) { dd.setHeaderMap(((ShapeFileRecordWriter) rw).getHeaderMappings()); } //order the query by _docid_ for faster paging solrQuery.addSortField("_docid_", ORDER.asc); //for each month create a separate query that pages through 500 records per page List<SolrQuery> queries = new ArrayList<SolrQuery>(); if (splitByFacet != null) { for (Count facet : splitByFacet) { if (facet.getCount() > 0) { SolrQuery splitByFacetQuery = solrQuery.getCopy() .addFilterQuery(facet.getFacetField().getName() + ":" + facet.getName()); splitByFacetQuery.setFacet(false); queries.add(splitByFacetQuery); } } SolrQuery remainderQuery = solrQuery.getCopy() .addFilterQuery("-" + splitByFacet.get(0).getFacetField().getName() + ":[* TO *]"); queries.add(0, remainderQuery); } else { queries.add(0, solrQuery); } //multi-thread the requests... ExecutorService pool = Executors.newFixedThreadPool(6); Set<Future<Integer>> futures = new HashSet<Future<Integer>>(); final AtomicInteger resultsCount = new AtomicInteger(0); final boolean threadCheckLimit = checkLimit; //execute each query, writing the results to stream for (final SolrQuery splitByFacetQuery : queries) { //define a thread Callable<Integer> solrCallable = new Callable<Integer>() { int startIndex = 0; @Override public Integer call() throws Exception { QueryResponse qr = runSolrQuery(splitByFacetQuery, downloadParams.getFq(), downloadBatchSize, startIndex, "_docid_", "asc"); int recordsForThread = 0; logger.debug(splitByFacetQuery.getQuery() + " - results: " + qr.getResults().size()); while (qr != null && !qr.getResults().isEmpty()) { logger.debug("Start index: " + startIndex + ", " + splitByFacetQuery.getQuery()); int count = 0; synchronized (rw) { count = processQueryResults(uidStats, fields, qaFields, rw, qr, dd, threadCheckLimit, resultsCount); recordsForThread += count; } startIndex += downloadBatchSize; //we have already set the Filter query the first time the query was constructed rerun with he same params but different startIndex if (!threadCheckLimit || resultsCount.intValue() < MAX_DOWNLOAD_SIZE) { if (!threadCheckLimit) { //throttle the download by sleeping try { Thread.currentThread().sleep(throttle); } catch (InterruptedException e) { //don't care if the sleep was interrupted } } qr = runSolrQuery(splitByFacetQuery, null, downloadBatchSize, startIndex, "_docid_", "asc"); } else { qr = null; } } return recordsForThread; } }; futures.add(pool.submit(solrCallable)); } //check the futures until all have finished int totalDownload = 0; Set<Future<Integer>> completeFutures = new HashSet<Future<Integer>>(); boolean allComplete = false; while (!allComplete) { for (Future future : futures) { if (!completeFutures.contains(future)) { if (future.isDone()) { totalDownload += (Integer) future.get(); completeFutures.add(future); } } } allComplete = completeFutures.size() == futures.size(); if (!allComplete) { Thread.sleep(1000); } } pool.shutdown(); rw.finalise(); out.flush(); long finish = System.currentTimeMillis(); long timeTakenInSecs = (finish - start) / 1000; if (timeTakenInSecs == 0) timeTakenInSecs = 1; logger.info("Download of " + resultsCount + " records in " + timeTakenInSecs + " seconds. Record/sec: " + resultsCount.intValue() / timeTakenInSecs); } catch (SolrServerException ex) { logger.error("Problem communicating with SOLR server while processing download. " + ex.getMessage(), ex); } return uidStats; }
From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java
/** * Compresses the snapshot and uploads it to a bucket in objectstorage gateway as a single or multipart upload based on the configuration in * {@link StorageInfo}. Bucket name should be configured before invoking this method. It can be looked up and initialized by {@link #prepareForUpload()} or * explicitly set using {@link #setBucketName(String)} * /*from w w w . ja v a 2s . co m*/ * @param sourceFileName * absolute path to the snapshot on the file system */ @Override public void upload(String sourceFileName) throws SnapshotTransferException { validateInput(); // Validate input loadTransferConfig(); // Load the transfer configuration parameters from database SnapshotProgressCallback progressCallback = new SnapshotProgressCallback(snapshotId); // Setup the progress callback Boolean error = Boolean.FALSE; ArrayBlockingQueue<SnapshotPart> partQueue = null; SnapshotPart part = null; SnapshotUploadInfo snapUploadInfo = null; Future<List<PartETag>> uploadPartsFuture = null; Future<String> completeUploadFuture = null; byte[] buffer = new byte[READ_BUFFER_SIZE]; Long readOffset = 0L; Long bytesRead = 0L; Long bytesWritten = 0L; int len; int partNumber = 1; try { // Get the uncompressed file size for uploading as metadata Long uncompressedSize = getFileSize(sourceFileName); // Setup the snapshot and part entities. snapUploadInfo = SnapshotUploadInfo.create(snapshotId, bucketName, keyName); Path zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf(partNumber)); part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset); FileInputStream inputStream = new FileInputStream(sourceFileName); ByteArrayOutputStream baos = new ByteArrayOutputStream(); GZIPOutputStream gzipStream = new GZIPOutputStream(baos); FileOutputStream outputStream = new FileOutputStream(zipFilePath.toString()); try { LOG.debug("Reading snapshot " + snapshotId + " and compressing it to disk in chunks of size " + partSize + " bytes or greater"); while ((len = inputStream.read(buffer)) > 0) { bytesRead += len; gzipStream.write(buffer, 0, len); if ((bytesWritten + baos.size()) < partSize) { baos.writeTo(outputStream); bytesWritten += baos.size(); baos.reset(); } else { gzipStream.close(); baos.writeTo(outputStream); // Order is important. Closing the gzip stream flushes stuff bytesWritten += baos.size(); baos.reset(); outputStream.close(); if (partNumber > 1) {// Update the part status part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.FALSE); } else {// Initialize multipart upload only once after the first part is created LOG.info("Uploading snapshot " + snapshotId + " to objectstorage using multipart upload"); progressCallback.setUploadSize(uncompressedSize); uploadId = initiateMulitpartUpload(uncompressedSize); snapUploadInfo = snapUploadInfo.updateUploadId(uploadId); part = part.updateStateCreated(uploadId, bytesWritten, bytesRead, Boolean.FALSE); partQueue = new ArrayBlockingQueue<SnapshotPart>(queueSize); uploadPartsFuture = Threads.enqueue(serviceConfig, UploadPartTask.class, poolSize, new UploadPartTask(partQueue, progressCallback)); } // Check for the future task before adding part to the queue. if (uploadPartsFuture != null && uploadPartsFuture.isDone()) { // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong throw new SnapshotUploadPartException( "Error uploading parts, aborting part creation process. Check previous log messages for the exact error"); } // Add part to the queue partQueue.put(part); // Prep the metadata for the next part readOffset += bytesRead; bytesRead = 0L; bytesWritten = 0L; // Setup the part entity for next part zipFilePath = Files.createTempFile(keyName + '-', '-' + String.valueOf((++partNumber))); part = SnapshotPart.createPart(snapUploadInfo, zipFilePath.toString(), partNumber, readOffset); gzipStream = new GZIPOutputStream(baos); outputStream = new FileOutputStream(zipFilePath.toString()); } } gzipStream.close(); baos.writeTo(outputStream); bytesWritten += baos.size(); baos.reset(); outputStream.close(); inputStream.close(); // Update the part status part = part.updateStateCreated(bytesWritten, bytesRead, Boolean.TRUE); // Update the snapshot upload info status snapUploadInfo = snapUploadInfo.updateStateCreatedParts(partNumber); } catch (Exception e) { LOG.error("Failed to upload " + snapshotId + " due to: ", e); error = Boolean.TRUE; throw new SnapshotTransferException("Failed to upload " + snapshotId + " due to: ", e); } finally { if (inputStream != null) { inputStream.close(); } if (gzipStream != null) { gzipStream.close(); } if (outputStream != null) { outputStream.close(); } baos.reset(); } if (partNumber > 1) { // Check for the future task before adding the last part to the queue. if (uploadPartsFuture != null && uploadPartsFuture.isDone()) { // This task shouldn't be done until the last part is added. If it is done at this point, then something might have gone wrong throw new SnapshotUploadPartException( "Error uploading parts, aborting part upload process. Check previous log messages for the exact error"); } // Add the last part to the queue partQueue.put(part); // Kick off the completion task completeUploadFuture = Threads.enqueue(serviceConfig, CompleteMpuTask.class, poolSize, new CompleteMpuTask(uploadPartsFuture, snapUploadInfo, partNumber)); } else { try { LOG.info("Uploading snapshot " + snapshotId + " to objectstorage as a single object. Compressed size of snapshot (" + bytesWritten + " bytes) is less than minimum part size (" + partSize + " bytes) for multipart upload"); PutObjectResult putResult = uploadSnapshotAsSingleObject(zipFilePath.toString(), bytesWritten, uncompressedSize, progressCallback); markSnapshotAvailable(); try { part = part.updateStateUploaded(putResult.getETag()); snapUploadInfo = snapUploadInfo.updateStateUploaded(putResult.getETag()); } catch (Exception e) { LOG.debug("Failed to update status in DB for " + snapUploadInfo); } LOG.info("Uploaded snapshot " + snapshotId + " to objectstorage"); } catch (Exception e) { error = Boolean.TRUE; LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e); throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e); } finally { deleteFile(zipFilePath); } } } catch (SnapshotTransferException e) { error = Boolean.TRUE; throw e; } catch (Exception e) { error = Boolean.TRUE; LOG.error("Failed to upload snapshot " + snapshotId + " due to: ", e); throw new SnapshotTransferException("Failed to upload snapshot " + snapshotId + " due to: ", e); } finally { if (error) { abortUpload(snapUploadInfo); if (uploadPartsFuture != null && !uploadPartsFuture.isDone()) { uploadPartsFuture.cancel(true); } if (completeUploadFuture != null && !completeUploadFuture.isDone()) { completeUploadFuture.cancel(true); } } } }
From source file:hudson.plugins.sshslaves.SSHLauncher.java
/** * {@inheritDoc}//ww w.j a va 2 s . co m */ @Override public synchronized void afterDisconnect(SlaveComputer slaveComputer, final TaskListener listener) { if (connection != null) { boolean connectionLost = reportTransportLoss(connection, listener); if (session != null) { // give the process 3 seconds to write out its dying message before we cut the loss // and give up on this process. if the slave process had JVM crash, OOME, or any other // critical problem, this will allow us to capture that. // exit code is also an useful info to figure out why the process has died. try { listener.getLogger().println(getSessionOutcomeMessage(session, connectionLost)); session.getStdout().close(); session.close(); } catch (Throwable t) { t.printStackTrace(listener.error(Messages.SSHLauncher_ErrorWhileClosingConnection())); } session = null; } Slave n = slaveComputer.getNode(); if (n != null && !connectionLost) { String workingDirectory = getWorkingDirectory(n); final String fileName = workingDirectory + "/slave.jar"; Future<?> tidyUp = Computer.threadPoolForRemoting.submit(new Runnable() { public void run() { // this would fail if the connection is already lost, so we want to check that. // TODO: Connection class should expose whether it is still connected or not. SFTPv3Client sftpClient = null; try { sftpClient = new SFTPv3Client(connection); sftpClient.rm(fileName); } catch (Exception e) { if (sftpClient == null) {// system without SFTP try { connection.exec("rm " + fileName, listener.getLogger()); } catch (Error error) { throw error; } catch (Throwable x) { x.printStackTrace( listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp()))); // We ignore other Exception types } } else { e.printStackTrace( listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp()))); } } finally { if (sftpClient != null) { sftpClient.close(); } } } }); try { // the delete is best effort only and if it takes longer than 60 seconds - or the launch // timeout (if specified) - then we should just give up and leave the file there. tidyUp.get(launchTimeoutSeconds == null ? 60 : launchTimeoutSeconds, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp()))); // we should either re-apply our interrupt flag or propagate... we don't want to propagate, so... Thread.currentThread().interrupt(); } catch (ExecutionException e) { e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp()))); } catch (TimeoutException e) { e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp()))); } finally { if (!tidyUp.isDone()) { tidyUp.cancel(true); } } } PluginImpl.unregister(connection); cleanupConnection(listener); } }
From source file:de.fiz.ddb.aas.auxiliaryoperations.ThreadOrganisationSetApprove.java
@PreAuthorize(privileges = { PrivilegeEnum.ADMIN }, scope = Scope.ORGANIZATION, cacheUpdate = true) public Organisation call() throws AASUnauthorizedException, ExecutionException, IllegalAccessException { if (ConstEnumOrgStatus.approved.equals(this._organisation.getStatus())) { throw new ExecutionException("Die Institution ist bereits in der Status 'approved'.", null); }//from ww w .j a va 2 s .co m Future<Organisation> submitOrgOnWorkDir = null; Future<Organisation> submitOrgOnLicencedDir = null; Future<Organisation> submitOrgParentOnLicencedDir = null; Future<Organisation> submitOrgParentOnWorkDir = null; Organisation vOrgParentOnLicenceDir = null; Organisation vOrgParentOnWorkDir = null; try { // -- set a new status: this._organisation.setStatus(ConstEnumOrgStatus.approved); // -- save status: ThreadOrganisationUpdate threadOrganisationUpdate = new ThreadOrganisationUpdate(_ready, _organisation, false, _performer); threadOrganisationUpdate.setChangeOfStatus(true); submitOrgOnWorkDir = LDAPConnector.getSingletonInstance().getExecutorServiceOne() .submit(threadOrganisationUpdate); // -- Ist diese Organisation unter Licensed schon vorhanden? // -- Read organization on the license directory: ThreadOrganisationRead threadOrgOnLicencedDirRead = new ThreadOrganisationRead( new OIDs(this._organisation.getOIDs().getOrgName(), false), this.getPerformer()); // -- the request goes to the branch with licensed organizations: threadOrgOnLicencedDirRead.setLicensedOrgs(true); submitOrgOnLicencedDir = LDAPConnector.getSingletonInstance().getExecutorServiceOne() .submit(threadOrgOnLicencedDirRead); // -- Operations in the licensed area... Boolean vIsOrgParentLicense = null; if (this._organisation.getOrgParent() != null) { // -- Parent on the license directory: ThreadOrganisationRead threadOrgParentOnLicencedDirRead = new ThreadOrganisationRead( new OIDs(this._organisation.getOrgParent(), false), this.getPerformer()); // -- the request goes to the branch with licensed organizations: threadOrgParentOnLicencedDirRead.setLicensedOrgs(true); submitOrgParentOnLicencedDir = LDAPConnector.getSingletonInstance().getExecutorServiceOne() .submit(threadOrgParentOnLicencedDirRead); // -- Parent on the work directory: ThreadOrganisationRead threadOrgParentOnWorkDirRead = new ThreadOrganisationRead( new OIDs(this._organisation.getOrgParent(), false), this.getPerformer()); // -- the request goes to the branch with licensed organizations: submitOrgParentOnWorkDir = LDAPConnector.getSingletonInstance().getExecutorServiceOne() .submit(threadOrgParentOnWorkDirRead); // +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // -- Parent on the license directory: try { //vIsOrgParentLicense = (threadOrgParentOnLicencedDirRead.call() != null); vIsOrgParentLicense = ((vOrgParentOnLicenceDir = submitOrgParentOnLicencedDir.get(3, TimeUnit.SECONDS)) != null); } /* catch (NameNotFoundException ex) { // hier gibt es keinen Grund zur Panik! ;-) vIsOrgParentLicense = Boolean.FALSE; } */ catch (ExecutionException ex) { if ((ex.getCause() != null) && (ex.getCause().getClass().isAssignableFrom(NameNotFoundException.class))) { // hier gibt es keinen Grund zur Panik! ;-) vIsOrgParentLicense = Boolean.FALSE; } else { throw ex; } } catch (InterruptedException ex) { throw new ExecutionException(ex); } catch (TimeoutException ex) { throw new ExecutionException(ex); } } try { // -- Update abwarten this._organisation = submitOrgOnWorkDir.get(3, TimeUnit.SECONDS); // -- die Organisation wenn mglich in der lizenzierte Verzeichnis schreiben: if ((vIsOrgParentLicense == null) || (vIsOrgParentLicense.booleanValue())) { // -- ! This institution is classified to the licensed organizations: Organisation vOrgOnLicensedDir = null; try { vOrgOnLicensedDir = submitOrgOnLicencedDir.get(3, TimeUnit.SECONDS); if (!vOrgOnLicensedDir.getOrgRDN().equalsIgnoreCase(this._organisation.getOrgRDN())) { /* * The shift operation works beautifully but may cause to error, because there are * potential changes from the sub-organizations in the Work Directory will not be included. * ...therefore the Orgnanisation is first deleted and then will be re-copied ThreadOrganisationMove threadOrganisationMove = new ThreadOrganisationMove(vOrgOnLicensedDir.getOIDs().getOrgName(), this._organisation .getOrgParent(), true, _performer); vOrgOnLicensedDir = threadOrganisationMove.call(); */ this.deletingFromLicensedOrgsDir(vOrgOnLicensedDir); // -- !!! very important for further processing: vOrgOnLicensedDir = null; } } /* catch (NameNotFoundException ex) { // es gibt keinen Grund zur Panik! ;-) } */ catch (ExecutionException ex) { if ((ex.getCause() != null) && (ex.getCause().getClass().isAssignableFrom(NameNotFoundException.class))) { // hier gibt es keinen Grund zur Panik... } else { // hier aber schon... throw ex; } } catch (InterruptedException ex) { throw new ExecutionException(ex); } catch (TimeoutException ex) { throw new ExecutionException(ex); } if (vOrgOnLicensedDir != null) { if (!ConstEnumOrgStatus.revised.equals(this._oldStatus)) { // -- This should be never happen: LOG.log(Level.WARNING, "The old status is not ''revised'' but this organization is between the Licensed: this should never be happen! Old status: ''{0}''", this._oldStatus.name()); } // -- !!! The organization could not be moved: if (vOrgOnLicensedDir.getOrgRDN().equals(this._organisation.getOrgRDN())) { // -- Update licensed organization: try { threadOrganisationUpdate = new ThreadOrganisationUpdate(_ready, _organisation, false, _performer); threadOrganisationUpdate.setUpdatingOfLicensedOrgs(true); threadOrganisationUpdate.call(); } catch (NameNotFoundException ex) { throw new ExecutionException(ex); } catch (AttributeModificationException ex) { throw new ExecutionException(ex); } } else { LOG.log(Level.WARNING, "The licensed (RDN='" + vOrgOnLicensedDir.getOrgRDN() + "') organization can not be updated because it has been postponed to new RDN='" + this._organisation.getOrgRDN() + "'"); } } else { // -- Der Knoten sollte kopiert werden aber nur unter einem Bedingung... if (submitOrgParentOnWorkDir != null) { // -- Parent on the work directory: try { vOrgParentOnWorkDir = submitOrgParentOnWorkDir.get(3, TimeUnit.SECONDS); } catch (ExecutionException ex) { if ((ex.getCause() != null) && (ex.getCause().getClass() .isAssignableFrom(NameNotFoundException.class))) { // hier gibt es keinen Grund zur Panik! ;-) } else { throw ex; } } catch (InterruptedException ex) { throw new ExecutionException(ex); } catch (TimeoutException ex) { throw new ExecutionException(ex); } } // ...dass die RDN des Parnts- Knoten stimmt, das heit, dass die nicht verschoben wurde: if (((vOrgParentOnWorkDir != null) && (vOrgParentOnLicenceDir != null) && (vOrgParentOnWorkDir.getOrgRDN().equals(vOrgParentOnLicenceDir.getOrgRDN()))) || ((vOrgParentOnWorkDir == null) && (vOrgParentOnLicenceDir == null))) { this.copyingToLicensedOrgs(_organisation); } } } } catch (InterruptedException ex) { throw new ExecutionException(ex); } catch (TimeoutException ex) { throw new ExecutionException(ex); } } finally { if ((submitOrgOnWorkDir != null) && (!submitOrgOnWorkDir.isDone()) && (!submitOrgOnWorkDir.isCancelled())) { submitOrgOnWorkDir.cancel(true); } if ((submitOrgOnLicencedDir != null) && (!submitOrgOnLicencedDir.isDone()) && (!submitOrgOnLicencedDir.isCancelled())) { submitOrgOnLicencedDir.cancel(true); } if ((submitOrgParentOnWorkDir != null) && (!submitOrgParentOnWorkDir.isDone()) && (!submitOrgParentOnWorkDir.isCancelled())) { submitOrgParentOnWorkDir.cancel(true); } if ((submitOrgParentOnLicencedDir != null) && (!submitOrgParentOnLicencedDir.isDone()) && (!submitOrgParentOnLicencedDir.isCancelled())) { submitOrgParentOnLicencedDir.cancel(true); } } return this._organisation; }