List of usage examples for java.lang InterruptedException InterruptedException
public InterruptedException(String s)
InterruptedException
with the specified detail message. From source file:de.cenote.jasperstarter.Report.java
private Map<String, Object> promptForParams(JRParameter[] reportParams, Map<String, Object> params, String reportName) throws InterruptedException { boolean isForPromptingOnly = false; boolean isUserDefinedOnly = false; boolean emptyOnly = false; switch (config.getAskFilter()) { case ae://from w w w . j a va 2 s .c o m emptyOnly = true; case a: isForPromptingOnly = false; isUserDefinedOnly = false; break; case ue: emptyOnly = true; case u: isUserDefinedOnly = true; break; case pe: emptyOnly = true; case p: isUserDefinedOnly = true; isForPromptingOnly = true; break; } Report.setLookAndFeel(); ParameterPrompt prompt = new ParameterPrompt(null, reportParams, params, reportName, isForPromptingOnly, isUserDefinedOnly, emptyOnly); if (JOptionPane.OK_OPTION != prompt.show()) { throw new InterruptedException("User aborted at parameter promt!"); } if (config.isVerbose()) { System.out.println("----------------------------"); System.out.println("Parameter prompt:"); for (Object key : params.keySet()) { System.out.println(key + " = " + params.get(key)); } System.out.println("----------------------------"); } return params; }
From source file:org.apache.solr.handler.IndexFetcher.java
/** * This command downloads all the necessary files from master to install a index commit point. Only changed files are * downloaded. It also downloads the conf files (if they are modified). * * @param forceReplication force a replication in all cases * @param forceCoreReload force a core reload in all cases * @return true on success, false if slave is already in sync * @throws IOException if an exception occurs *//* w ww.ja v a 2s . c o m*/ boolean fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException { boolean cleanupDone = false; boolean successfulInstall = false; markReplicationStart(); Directory tmpIndexDir = null; String tmpIndex; Directory indexDir = null; String indexDirPath; boolean deleteTmpIdxDir = true; File tmpTlogDir = null; if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) { // if the last replication was not a success, we force a full replication // when we are a bit more confident we may want to try a partial replication // if the error is connection related or something, but we have to be careful forceReplication = true; } try { //get the current 'replicateable' index version in the master NamedList response; try { response = getLatestVersion(); } catch (Exception e) { LOG.error("Master at: " + masterUrl + " is not available. Index fetch failed. Exception: " + e.getMessage()); return false; } long latestVersion = (Long) response.get(CMD_INDEX_VERSION); long latestGeneration = (Long) response.get(GENERATION); LOG.info("Master's generation: " + latestGeneration); LOG.info("Master's version: " + latestVersion); // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes) IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit(); if (commit == null) { // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points RefCounted<SolrIndexSearcher> searcherRefCounted = null; try { searcherRefCounted = solrCore.getNewestSearcher(false); if (searcherRefCounted == null) { LOG.warn("No open searcher found - fetch aborted"); return false; } commit = searcherRefCounted.get().getIndexReader().getIndexCommit(); } finally { if (searcherRefCounted != null) searcherRefCounted.decref(); } } LOG.info("Slave's generation: " + commit.getGeneration()); if (latestVersion == 0L) { if (forceReplication && commit.getGeneration() != 0) { // since we won't get the files for an empty index, // we just clear ours and commit RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState() .getIndexWriter(solrCore); try { iw.get().deleteAll(); } finally { iw.decref(); } SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams()); solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false)); } //there is nothing to be replicated successfulInstall = true; return true; } // TODO: Should we be comparing timestamps (across machines) here? if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) { //master and slave are already in sync just return LOG.info("Slave in sync with master."); successfulInstall = true; return true; } LOG.info("Starting replication process"); // get the list of files first fetchFileList(latestGeneration); // this can happen if the commit point is deleted before we fetch the file list. if (filesToDownload.isEmpty()) { return false; } LOG.info("Number of files in latest index in master: " + filesToDownload.size()); if (tlogFilesToDownload != null) { LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size()); } // Create the sync service fsyncService = ExecutorUtil .newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService")); // use a synchronized list because the list is read by other threads (to show details) filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>()); // if the generation of master is older than that of the slave , it means they are not compatible to be copied // then a new index directory to be created and all the files need to be copied boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication; String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date()); String tmpIdxDirName = "index." + timestamp; tmpIndex = solrCore.getDataDir() + tmpIdxDirName; tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); // tmp dir for tlog files if (tlogFilesToDownload != null) { tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp); } // cindex dir... indexDirPath = solrCore.getIndexDir(); indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); try { //We will compare all the index files from the master vs the index files on disk to see if there is a mismatch //in the metadata. If there is a mismatch for the same index file then we download the entire index again. if (!isFullCopyNeeded && isIndexStale(indexDir)) { isFullCopyNeeded = true; } if (!isFullCopyNeeded) { // a searcher might be using some flushed but not committed segments // because of soft commits (which open a searcher on IW's data) // so we need to close the existing searcher on the last commit // and wait until we are able to clean up all unused lucene files if (solrCore.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { solrCore.closeSearcher(); } // rollback and reopen index writer and wait until all unused files // are successfully deleted solrCore.getUpdateHandler().newIndexWriter(true); RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState() .getIndexWriter(null); try { IndexWriter indexWriter = writer.get(); int c = 0; indexWriter.deleteUnusedFiles(); while (hasUnusedFiles(indexDir, commit)) { indexWriter.deleteUnusedFiles(); LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able"); Thread.sleep(1000); c++; if (c >= 30) { LOG.warn( "IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead"); isFullCopyNeeded = true; break; } } if (c > 0) { LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able"); } } finally { writer.decref(); } } boolean reloadCore = false; try { // we have to be careful and do this after we know isFullCopyNeeded won't be flipped if (!isFullCopyNeeded) { solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true); } LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir); successfulInstall = false; long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration); if (tlogFilesToDownload != null) { bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration); reloadCore = true; // reload update log } final long timeTakenSeconds = getReplicationTimeElapsed(); final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null); LOG.info( "Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir); Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload); if (!modifiedConfFiles.isEmpty()) { reloadCore = true; downloadConfFiles(confFilesToDownload, latestGeneration); if (isFullCopyNeeded) { successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName); deleteTmpIdxDir = false; } else { successfulInstall = moveIndexFiles(tmpIndexDir, indexDir); } if (tlogFilesToDownload != null) { // move tlog files and refresh ulog only if we successfully installed a new index successfulInstall &= moveTlogFiles(tmpTlogDir); } if (successfulInstall) { if (isFullCopyNeeded) { // let the system know we are changing dir's and the old one // may be closed if (indexDir != null) { solrCore.getDirectoryFactory().doneWithDirectory(indexDir); // Cleanup all index files not associated with any *named* snapshot. solrCore.deleteNonSnapshotIndexFiles(indexDirPath); } } LOG.info("Configuration files are modified, core will be reloaded"); logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);// write to a file time of replication and // conf files. } } else { terminateAndWaitFsyncService(); if (isFullCopyNeeded) { successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName); deleteTmpIdxDir = false; } else { successfulInstall = moveIndexFiles(tmpIndexDir, indexDir); } if (tlogFilesToDownload != null) { // move tlog files and refresh ulog only if we successfully installed a new index successfulInstall &= moveTlogFiles(tmpTlogDir); } if (successfulInstall) { logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall); } } } finally { if (!isFullCopyNeeded) { solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore); } } // we must reload the core after we open the IW back up if (successfulInstall && (reloadCore || forceCoreReload)) { LOG.info("Reloading SolrCore {}", solrCore.getName()); reloadCore(); } if (successfulInstall) { if (isFullCopyNeeded) { // let the system know we are changing dir's and the old one // may be closed if (indexDir != null) { LOG.info("removing old index directory " + indexDir); solrCore.getDirectoryFactory().doneWithDirectory(indexDir); solrCore.getDirectoryFactory().remove(indexDir); } } if (isFullCopyNeeded) { solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded); } openNewSearcherAndUpdateCommitPoint(); } if (!isFullCopyNeeded && !forceReplication && !successfulInstall) { cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall); cleanupDone = true; // we try with a full copy of the index LOG.warn( "Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore); successfulInstall = fetchLatestIndex(true, reloadCore); } markReplicationStop(); return successfulInstall; } catch (ReplicationHandlerException e) { LOG.error("User aborted Replication"); return false; } catch (SolrException e) { throw e; } catch (InterruptedException e) { throw new InterruptedException("Index fetch interrupted"); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e); } } finally { if (!cleanupDone) { cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall); } } }
From source file:com.sxit.crawler.utils.ArchiveUtils.java
/** * Perform checks as to whether normal execution should proceed. * /* ww w .j a v a2 s . c om*/ * If an external interrupt is detected, throw an interrupted exception. * Used before anything that should not be attempted by a 'zombie' thread * that the Frontier/Crawl has given up on. * * @throws InterruptedException */ public static void continueCheck() throws InterruptedException { if (Thread.interrupted()) { throw new InterruptedException("interrupt detected"); } }
From source file:org.jenkinsci.plugins.os_ci.model.Product.java
public void registerToExternalDNS(String fqdn, String ipaddress, String nameservers) { try {//from w ww .j a va 2s . c om LogUtils.log(listener, "DNS registration: " + fqdn + " to " + ipaddress); if (!new File("/usr/local/bin/do_nsupdate").exists()) { LogUtils.log(listener, "Cannot Register records to DNS. Expected script /usr/local/bin/do_nsupdate is missing"); } else if (!System.getProperty("os.name").toLowerCase().startsWith("windows") && !nameservers.isEmpty()) { // re-register ip ot fqdn if (fqdn.contains("_")) throw new InterruptedException("FQDN string cannot contain '_'"); ExecUtils.executeLocalCommand( "/usr/local/bin/do_nsupdate -D -N " + fqdn + " -R A -S " + nameservers, "/usr/local/bin/"); ExecUtils.executeLocalCommand( "/usr/local/bin/do_nsupdate -I " + ipaddress + " -A -N " + fqdn + " -R A -S " + nameservers, "/usr/local/bin/"); } else if (nameservers.isEmpty()) LogUtils.log(listener, "could not register IP Address to an Empty DNS - please enter a valid nameserver IP or hostname "); else LogUtils.log(listener, "Cannot Register records to DNS under Windows OS"); } catch (InterruptedException e) { LogUtils.log(listener, "could not register IP Address to external DNS " + e.getMessage()); } }
From source file:org.apache.flume.sink.hdfs.BucketWriter.java
/** * This method if the current thread has been interrupted and throws an * exception.// w ww . j a v a 2 s .com * * @throws InterruptedException */ private static void checkAndThrowInterruptedException() throws InterruptedException { if (Thread.currentThread().interrupted()) { throw new InterruptedException("Timed out before HDFS call was made. " + "Your hdfs.callTimeout might be set too low or HDFS calls are " + "taking too long."); } }
From source file:com.orange.clara.cloud.servicedbdumper.integrations.AbstractIntegrationTest.java
protected InterruptedException generateInterruptedExceptionFromProcess(Process process) throws IOException { return new InterruptedException("\nError during process (exit code is " + process.exitValue() + "): \n" + this.getInputStreamToStringFromProcess(process.getErrorStream()) + "\n" + this.getInputStreamToStringFromProcess(process.getInputStream())); }
From source file:org.geoserver.gwc.GWCTest.java
@Test public void testLayerAdded() throws Exception { when(diskQuotaMonitor.isEnabled()).thenReturn(false); mediator.layerAdded("someLayer"); verify(quotaStore, never()).createLayer(anyString()); when(diskQuotaMonitor.isEnabled()).thenReturn(true); mediator.layerAdded("someLayer"); verify(quotaStore, times(1)).createLayer(eq("someLayer")); doThrow(new InterruptedException("fake")).when(quotaStore).createLayer(eq("someLayer")); try {//from ww w . ja v a 2 s . c o m mediator.layerAdded("someLayer"); fail("Expected RTE"); } catch (RuntimeException e) { assertTrue(e.getCause() instanceof InterruptedException); } }
From source file:org.alfresco.extension.bulkfilesystemimport.impl.AbstractBulkFilesystemImporter.java
private final int importContentVersions(final NodeRef nodeRef, final ImportableItem importableItem, final boolean inPlaceImport) throws InterruptedException { int result = 0; int previousMajorVersion = 0; for (final ImportableItem.VersionedContentAndMetadata versionEntry : importableItem.getVersionEntries()) { if (importStatus.isStopping() || Thread.currentThread().isInterrupted()) throw new InterruptedException( Thread.currentThread().getName() + " was interrupted. Terminating early."); Map<String, Serializable> versionProperties = new HashMap<String, Serializable>(); MetadataLoader.Metadata metadata = loadMetadata(importableItem.getFileType(), versionEntry); importContentAndMetadata(nodeRef, versionEntry, inPlaceImport, metadata); if (log.isDebugEnabled()) log.debug("Creating v" + String.valueOf(versionEntry.getVersionLabel()) + " of node '" + String.valueOf(nodeRef) + "' (note: version label in Alfresco will not be the same - it is not currently possible to explicitly force a particular version label - see https://code.google.com/p/alfresco-bulk-filesystem-import/issues/detail?id=85)."); // Note: PROP_VERSION_LABEL is a "reserved" property, and cannot be modified by custom code. // In other words, we can't use the version label on disk as the version label in Alfresco. :-( // See: http://code.google.com/p/alfresco-bulk-filesystem-import/issues/detail?id=85 // versionProperties.put(ContentModel.PROP_VERSION_LABEL.toPrefixString(), versionEntry.getVersionLabel()); if (versionEntry.getMajorVersion() > previousMajorVersion) { versionProperties.put(VersionModel.PROP_VERSION_TYPE, VersionType.MAJOR); previousMajorVersion = versionEntry.getMajorVersion(); } else {//from w w w .j av a 2 s.c om versionProperties.put(VersionModel.PROP_VERSION_TYPE, VersionType.MINOR); } versionService.createVersion(nodeRef, versionProperties); result += metadata.getProperties().size() + 4; // Add 4 for "standard" metadata properties read from filesystem } return (result); }