Example usage for java.lang InterruptedException InterruptedException

List of usage examples for java.lang InterruptedException InterruptedException

Introduction

In this page you can find the example usage for java.lang InterruptedException InterruptedException.

Prototype

public InterruptedException() 

Source Link

Document

Constructs an InterruptedException with no detail message.

Usage

From source file:org.eclipse.jubula.client.core.persistence.ProjectPM.java

/**
 * Persists the given project to the DB. This is performed in a new session.
 * When this method returns, the project will not be attached to any session.
 * @param proj ProjectPO to be saved.//from  w w w .  ja v a2s.c o  m
 * @param newProjectName
 *            name part of the ProjectNamePO. If there is no new name, this
 *            parameter must be null (same project, different version)
 * @param mapperList a List of INameMapper to persist names (Parameter).
 * @param compNameBindingList a List of Component Name mappers to persist 
 *                            names (Component).
 * @throws PMException in case of any db error
 * @throws ProjectDeletedException if project is already deleted
 * @throws InterruptedException if the operation is canceled
 */
public static void saveProject(IProjectPO proj, String newProjectName, List<INameMapper> mapperList,
        List<IWritableComponentNameMapper> compNameBindingList)
        throws PMException, ProjectDeletedException, InterruptedException {

    final EntityManager saveSession = Persistor.instance().openSession();
    EntityTransaction tx = null;
    try {
        tx = Persistor.instance().getTransaction(saveSession);

        saveSession.persist(proj);
        proj.setParentProjectId(proj.getId());

        saveSession.flush();
        if (newProjectName != null) {
            ProjectNameBP.getInstance().setName(saveSession, proj.getGuid(), newProjectName);
        }
        ProjectNameBP.getInstance().storeTransientNames(saveSession);
        for (INameMapper mapper : mapperList) {
            mapper.persist(saveSession, proj.getId());
        }
        for (IWritableComponentNameMapper compNameBinding : compNameBindingList) {
            CompNamePM.flushCompNames(saveSession, proj.getId(), compNameBinding);
        }
        Persistor.instance().commitTransaction(saveSession, tx);
        for (INameMapper mapper : mapperList) {
            mapper.updateStandardMapperAndCleanup(proj.getId());
        }
        for (IComponentNameMapper compNameCache : compNameBindingList) {
            compNameCache.getCompNameCache().updateStandardMapperAndCleanup(proj.getId());
        }
    } catch (PersistenceException e) {
        if (tx != null) {
            Persistor.instance().rollbackTransaction(saveSession, tx);
        }
        if (e.getCause() instanceof InterruptedException) {
            // Operation was canceled.
            throw new InterruptedException();
        }
        String msg = Messages.CantSaveProject + StringConstants.DOT;
        throw new PMSaveException(msg + e.getMessage(), MessageIDs.E_ATTACH_PROJECT);
    } catch (IncompatibleTypeException ite) {
        if (tx != null) {
            Persistor.instance().rollbackTransaction(saveSession, tx);
        }
        String msg = Messages.CantSaveProject + StringConstants.DOT;
        throw new PMSaveException(msg + ite.getMessage(), MessageIDs.E_ATTACH_PROJECT);
    } finally {
        Persistor.instance().dropSession(saveSession);
    }
}

From source file:org.eclipse.jubula.client.archive.XmlImporter.java

/**
 * Checks whether the operation has been canceled. If the operation has been
 * canceled, an <code>InterruptedException</code> will be thrown.
 * /*from  w  w  w  .j av a2  s .c o m*/
 * @throws InterruptedException if the operation has been canceled.
 */
private void checkCancel() throws InterruptedException {
    if (m_monitor.isCanceled()) {
        throw new InterruptedException();
    }
}

From source file:nl.nn.adapterframework.pipes.MessageSendingPipe.java

protected String sendMessage(Object input, IPipeLineSession session, String correlationID, ISender sender,
        Map threadContext) throws SenderException, TimeOutException, InterruptedException {
    String sendResult = sendTextMessage(input, session, correlationID, getSender(), threadContext);
    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }/*from   ww w  .ja v a  2  s.  c o m*/
    if (StringUtils.isNotEmpty(getTimeOutOnResult()) && getTimeOutOnResult().equals(sendResult)) {
        throw new TimeOutException(getLogPrefix(session) + "timeOutOnResult [" + getTimeOutOnResult() + "]");
    }
    if (StringUtils.isNotEmpty(getExceptionOnResult()) && getExceptionOnResult().equals(sendResult)) {
        throw new SenderException(getLogPrefix(session) + "exceptionOnResult [" + getExceptionOnResult() + "]");
    }
    return sendResult;
}

From source file:com.splout.db.dnode.DNodeHandler.java

/**
 * Runs a deploy action. Downloads file and warm up the data.
 * Interruptible./*from w  ww .  jav  a2  s .c  om*/
 */
private void runDeployAction(Fetcher.Reporter reporter, DeployAction action, long version)
        throws IOException, URISyntaxException, DNodeException, InterruptedException {

    log.info("Running deployAction[" + action + "] for version[" + version + "].");
    // 1- Call the fetcher for fetching
    File fetchedContent = fetcher.fetch(action.getDataURI(), reporter);
    // If we reach this point then the fetch has been OK
    // 2- Create the local folder were to move the fetched data
    File dbFolder = getLocalStorageFolder(action.getTablespace(), action.getPartition(), version);
    if (dbFolder.exists()) { // If the new folder where we want to deploy
        // already exists means it is
        // somehow
        // stalled from a previous failed deploy - it is ok to delete it
        FileUtils.deleteDirectory(dbFolder);
    }
    // 3- Perform a "mv" for finally making the data available
    FileUtils.moveDirectory(fetchedContent, dbFolder);

    // 4- Check if interrupted. In this case, we remove the folder before returning
    if (Thread.interrupted()) {
        try {
            FileUtils.deleteDirectory(dbFolder);
        } catch (IOException e) {
            log.warn("Not possible to remove " + dbFolder + " when trying to cancel de deployment.");
        }
        throw new InterruptedException();
    }

    // 5- Store metadata about the partition
    writePartitionMetadata(action, version);

    // 6- Preemptively load the Manager in case initialization is slow
    // Managers might warm up for a while (e.g. loading data into memory)
    loadManagerInEHCache(action.getTablespace(), action.getVersion(), action.getPartition(), dbFolder,
            action.getMetadata());
    log.info("Finished deployAction[" + action + "] for version[" + version + "].");
}

From source file:com.orange.cepheus.broker.controller.NgsiControllerTest.java

@Test
public void postUpdateContextWithThrowInterruptedException() throws Exception {

    //localRegistrations mock return always a providingApplication
    when(providingApplication.hasNext()).thenReturn(true);
    when(providingApplication.next()).thenReturn(new URI("http//iotagent:1234"));
    when(localRegistrations.findProvidingApplication(any(), any())).thenReturn(providingApplication);

    //subscriptions mock return always without matched subscriptions
    when(matchedSubscriptions.hasNext()).thenReturn(false);
    when(subscriptions.findSubscriptions(any(), any())).thenReturn(matchedSubscriptions);

    when(updateContextResponseListenableFuture.get()).thenThrow(new InterruptedException());

    //ngsiclient mock return always createUpdateContextREsponseTemperature when call updateContext
    when(ngsiClient.updateContext(any(), any(), any())).thenReturn(updateContextResponseListenableFuture);

    //check ngsiClient.notify is not called
    verify(ngsiClient, never()).notifyContext(any(), any(), any());

    mockMvc.perform(post("/v1/updateContext").content(json(mapper, createUpdateContextTempSensorAndPressure()))
            .contentType(MediaType.APPLICATION_JSON)).andExpect(status().isOk())
            .andExpect(MockMvcResultMatchers.jsonPath("$.errorCode.code").value("500"))
            .andExpect(/*w w  w  .j a  v a2s  .com*/
                    MockMvcResultMatchers.jsonPath("$.errorCode.reasonPhrase").value("Receiver internal error"))
            .andExpect(MockMvcResultMatchers.jsonPath("$.errorCode.details")
                    .value("An unknown error at the receiver has occured"));
}

From source file:info.ajaxplorer.synchro.SyncJob.java

protected boolean synchronousUP(Node folderNode, final File sourceFile, Node remoteNode) throws Exception {

    if (Manager.getInstance().getRdiffProc() != null && Manager.getInstance().getRdiffProc().rdiffEnabled()) {
        // RDIFF ! 
        File signatureFile = tmpFileName(sourceFile, "sig");
        boolean remoteHasSignature = false;
        try {/*w  ww .j a v a 2  s.c om*/
            this.uriContentToFile(AjxpAPI.getInstance().getFilehashSignatureUri(remoteNode), signatureFile,
                    null);
            remoteHasSignature = true;
        } catch (IllegalStateException e) {
        }
        if (remoteHasSignature && signatureFile.exists() && signatureFile.length() > 0) {
            // Compute delta
            File deltaFile = tmpFileName(sourceFile, "delta");
            RdiffProcessor proc = Manager.getInstance().getRdiffProc();
            proc.delta(signatureFile, sourceFile, deltaFile);
            signatureFile.delete();
            if (deltaFile.exists()) {
                // Send back to server
                RestRequest rest = new RestRequest();
                logChange(Manager.getMessage("job_log_updelta"), sourceFile.getName());
                String patchedFileMd5 = rest.getStringContent(
                        AjxpAPI.getInstance().getFilehashPatchUri(remoteNode), null, deltaFile, null);
                rest.release();
                deltaFile.delete();
                //String localMD5 = (folderNode)
                if (patchedFileMd5.trim().equals(SyncJob.computeMD5(sourceFile))) {
                    // OK !
                    return true;
                }
            }
        }
    }

    final long totalSize = sourceFile.length();
    if (!sourceFile.exists() || totalSize == 0) {
        throw new FileNotFoundException("Cannot find file :" + sourceFile.getAbsolutePath());
    }
    Logger.getRootLogger().info("Uploading " + totalSize + " bytes");
    RestRequest rest = new RestRequest();
    // Ping to make sure the user is logged
    rest.getStatusCodeForRequest(AjxpAPI.getInstance().getAPIUri());
    //final long filesize = totalSize; 
    rest.setUploadProgressListener(new CountingMultipartRequestEntity.ProgressListener() {
        private int previousPercent = 0;
        private int currentPart = 0;
        private int currentTotal = 1;

        @Override
        public void transferred(long num) throws IOException {
            if (SyncJob.this.interruptRequired) {
                throw new IOException("Upload interrupted on demand");
            }
            int currentPercent = (int) (num * 100 / totalSize);
            if (this.currentTotal > 1) {
                long partsSize = totalSize / this.currentTotal;
                currentPercent = (int) (((partsSize * this.currentPart) + num) * 100 / totalSize);
            }
            currentPercent = Math.min(Math.max(currentPercent, 0), 100);
            if (currentPercent > previousPercent) {
                logChange(Manager.getMessage("job_log_uploading"),
                        sourceFile.getName() + " - " + currentPercent + "%");
            }
            previousPercent = currentPercent;
        }

        @Override
        public void partTransferred(int part, int total) throws IOException {
            this.currentPart = part;
            this.currentTotal = total;
            if (SyncJob.this.interruptRequired) {
                throw new IOException("Upload interrupted on demand");
            }
            Logger.getRootLogger().info("PARTS " + " [" + (part + 1) + "/" + total + "]");
            logChange(Manager.getMessage("job_log_uploading"),
                    sourceFile.getName() + " [" + (part + 1) + "/" + total + "]");
        }
    });
    String targetName = sourceFile.getName();
    try {
        rest.getStringContent(AjxpAPI.getInstance().getUploadUri(folderNode.getPath(true)), null, sourceFile,
                targetName);
    } catch (IOException ex) {
        if (this.interruptRequired) {
            rest.release();
            throw new InterruptedException();
        }
    }
    rest.release();
    return false;

}

From source file:org.eclipse.jubula.client.archive.businessprocess.FileStorageBP.java

/**
 * @param projectList The list of projects to export
 * @param exportDirName The export directory of the projects
 * @param exportSession The session to be used for Persistence (JPA / EclipseLink)
 * @param monitor The progress monitor//from   ww  w . jav  a2  s  . c  om
 * @param writeToSystemTempDir Indicates whether the projects have to be 
 *                             written to the system temp directory
 * @param listOfProjectFiles The written project files are added to this 
 *                           list, if the temp dir was used and the list  
 *                           is not null.
 * @param console
 *              The console to use to display pogress and 
 *              error messages.
 */
public static void exportProjectList(List<IProjectPO> projectList, String exportDirName,
        EntityManager exportSession, IProgressMonitor monitor, boolean writeToSystemTempDir,
        List<File> listOfProjectFiles, IProgressConsole console) throws JBException, InterruptedException {

    SubMonitor subMonitor = SubMonitor.convert(monitor, Messages.ExportAllBPExporting,
            XmlStorage.getWorkToSave(projectList));

    for (IProjectPO proj : projectList) {
        if (subMonitor.isCanceled()) {
            throw new InterruptedException();
        }
        IProjectPO projectToExport = ProjectPM.loadProjectById(proj.getId(), exportSession);
        String projectFileName = projectToExport.getDisplayName() + ".xml"; //$NON-NLS-1$
        final String exportFileName;

        if (writeToSystemTempDir) {
            exportFileName = projectFileName;
        } else {
            if (projectToExport.equals(GeneralStorage.getInstance().getProject())) {

                // project is current project
                projectToExport = GeneralStorage.getInstance().getProject();
            }

            exportFileName = exportDirName + projectFileName;
        }

        if (subMonitor.isCanceled()) {
            throw new InterruptedException();
        }
        console.writeLine(
                NLS.bind(Messages.ExportAllBPInfoStartingExportProject, new Object[] { projectFileName }));
        try {
            if (subMonitor.isCanceled()) {
                throw new InterruptedException();
            }
            XmlStorage.save(projectToExport, exportFileName, true,
                    subMonitor.newChild(XmlStorage.getWorkToSave(projectToExport)), writeToSystemTempDir,
                    listOfProjectFiles);

            if (subMonitor.isCanceled()) {
                throw new InterruptedException();
            }

            console.writeLine(
                    NLS.bind(Messages.ExportAllBPInfoFinishedExportProject, new Object[] { projectFileName }));

        } catch (final PMSaveException e) {
            LOG.error(Messages.CouldNotExportProject, e);
            console.writeErrorLine(NLS.bind(Messages.ExportAllBPErrorExportFailedProject,
                    new Object[] { projectFileName, e.getMessage() }));
        }
        exportSession.detach(projectToExport);
    }

}

From source file:info.ajaxplorer.synchro.SyncJob.java

protected void uriContentToFile(URI uri, File targetFile, File uploadFile) throws Exception {

    RestRequest rest = new RestRequest();
    int postedProgress = 0;
    int buffersize = 16384;
    int count = 0;
    HttpEntity entity = rest.getNotConsumedResponseEntity(uri, null, uploadFile);
    long fullLength = entity.getContentLength();
    Logger.getRootLogger().info("Downloaded " + fullLength + " bytes");

    InputStream input = entity.getContent();
    BufferedInputStream in = new BufferedInputStream(input, buffersize);

    FileOutputStream output = new FileOutputStream(targetFile.getPath());
    BufferedOutputStream out = new BufferedOutputStream(output);

    byte data[] = new byte[buffersize];
    int total = 0;

    long startTime = System.nanoTime();
    long lastTime = startTime;
    int lastTimeTotal = 0;

    long secondLength = 1000000000;
    long interval = (long) 2 * secondLength;

    while ((count = in.read(data)) != -1) {
        long duration = System.nanoTime() - lastTime;

        int tmpTotal = total + count;
        // publishing the progress....
        int tmpProgress = (int) (tmpTotal * 100 / fullLength);
        if (tmpProgress - postedProgress > 0 || duration > secondLength) {
            if (duration > interval) {
                lastTime = System.nanoTime();
                long lastTimeBytes = (long) ((tmpTotal - lastTimeTotal) * secondLength / 1024 / 1000);
                long speed = (lastTimeBytes / (duration));
                double bytesleft = (double) (((double) fullLength - (double) tmpTotal) / 1024);
                @SuppressWarnings("unused")
                double ETC = bytesleft / (speed * 10);
            }//from   w ww .j a va  2s .  c o  m
            if (tmpProgress != postedProgress) {
                logChange(Manager.getMessage("job_log_downloading"),
                        targetFile.getName() + " - " + tmpProgress + "%");
            }
            postedProgress = tmpProgress;
        }
        out.write(data, 0, count);
        total = tmpTotal;
        if (this.interruptRequired) {
            break;
        }
    }
    out.flush();
    if (out != null)
        out.close();
    if (in != null)
        in.close();
    if (this.interruptRequired) {
        rest.release();
        throw new InterruptedException();
    }
    rest.release();
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore.java

/**
 * Discards entities with start timestamp less than or equal to the given
 * timestamp.// ww  w.  ja v  a 2s. co  m
 */
@VisibleForTesting
void discardOldEntities(long timestamp) throws IOException, InterruptedException {
    byte[] reverseTimestamp = writeReverseOrderedLong(timestamp);
    long totalCount = 0;
    long t1 = System.currentTimeMillis();
    try {
        List<String> entityTypes = getEntityTypes();
        for (String entityType : entityTypes) {
            DBIterator iterator = null;
            DBIterator pfIterator = null;
            long typeCount = 0;
            try {
                deleteLock.writeLock().lock();
                iterator = getDbIterator(false);
                pfIterator = getDbIterator(false);

                if (deletionThread != null && deletionThread.isInterrupted()) {
                    throw new InterruptedException();
                }
                boolean seeked = false;
                while (deleteNextEntity(entityType, reverseTimestamp, iterator, pfIterator, seeked)) {
                    typeCount++;
                    totalCount++;
                    seeked = true;
                    if (deletionThread != null && deletionThread.isInterrupted()) {
                        throw new InterruptedException();
                    }
                }
            } catch (IOException e) {
                LOG.error("Got IOException while deleting entities for type " + entityType
                        + ", continuing to next type", e);
            } finally {
                IOUtils.cleanup(LOG, iterator, pfIterator);
                deleteLock.writeLock().unlock();
                if (typeCount > 0) {
                    LOG.info("Deleted " + typeCount + " entities of type " + entityType);
                }
            }
        }
    } finally {
        long t2 = System.currentTimeMillis();
        LOG.info("Discarded " + totalCount + " entities for timestamp " + timestamp + " and earlier in "
                + (t2 - t1) / 1000.0 + " seconds");
    }
}

From source file:org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore.java

/**
 * Discards entities with start timestamp less than or equal to the given
 * timestamp./*w w w  .j  a v  a2  s.  c  o  m*/
 */
@VisibleForTesting
void discardOldEntities(long timestamp) throws IOException, InterruptedException {
    byte[] reverseTimestamp = writeReverseOrderedLong(timestamp);
    long totalCount = 0;
    long t1 = System.currentTimeMillis();
    try {
        List<String> entityTypes = getEntityTypes();
        for (String entityType : entityTypes) {
            LeveldbIterator iterator = null;
            LeveldbIterator pfIterator = null;
            long typeCount = 0;
            try {
                deleteLock.writeLock().lock();
                iterator = getDbIterator(false);
                pfIterator = getDbIterator(false);

                if (deletionThread != null && deletionThread.isInterrupted()) {
                    throw new InterruptedException();
                }
                boolean seeked = false;
                while (deleteNextEntity(entityType, reverseTimestamp, iterator, pfIterator, seeked)) {
                    typeCount++;
                    totalCount++;
                    seeked = true;
                    if (deletionThread != null && deletionThread.isInterrupted()) {
                        throw new InterruptedException();
                    }
                }
            } catch (IOException e) {
                LOG.error("Got IOException while deleting entities for type " + entityType
                        + ", continuing to next type", e);
            } finally {
                IOUtils.cleanup(LOG, iterator, pfIterator);
                deleteLock.writeLock().unlock();
                if (typeCount > 0) {
                    LOG.info("Deleted " + typeCount + " entities of type " + entityType);
                }
            }
        }
    } finally {
        long t2 = System.currentTimeMillis();
        LOG.info("Discarded " + totalCount + " entities for timestamp " + timestamp + " and earlier in "
                + (t2 - t1) / 1000.0 + " seconds");
    }
}