Example usage for java.lang InterruptedException getLocalizedMessage

List of usage examples for java.lang InterruptedException getLocalizedMessage

Introduction

In this page you can find the example usage for java.lang InterruptedException getLocalizedMessage.

Prototype

public String getLocalizedMessage() 

Source Link

Document

Creates a localized description of this throwable.

Usage

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.java

@Override
public NamedList<Object> request(final SolrRequest request, String collection)
        throws SolrServerException, IOException {
    if (!(request instanceof UpdateRequest)) {
        return client.request(request, collection);
    }/*from  w  w  w.  j  av a 2 s . c  o  m*/
    UpdateRequest req = (UpdateRequest) request;

    // this happens for commit...
    if (streamDeletes) {
        if ((req.getDocuments() == null || req.getDocuments().isEmpty())
                && (req.getDeleteById() == null || req.getDeleteById().isEmpty())
                && (req.getDeleteByIdMap() == null || req.getDeleteByIdMap().isEmpty())) {
            if (req.getDeleteQuery() == null) {
                blockUntilFinished();
                return client.request(request, collection);
            }
        }
    } else {
        if ((req.getDocuments() == null || req.getDocuments().isEmpty())) {
            blockUntilFinished();
            return client.request(request, collection);
        }
    }

    SolrParams params = req.getParams();
    if (params != null) {
        // check if it is waiting for the searcher
        if (params.getBool(UpdateParams.WAIT_SEARCHER, false)) {
            log.info("blocking for commit/optimize");
            blockUntilFinished(); // empty the queue
            return client.request(request, collection);
        }
    }

    try {
        CountDownLatch tmpLock = lock;
        if (tmpLock != null) {
            tmpLock.await();
        }

        Update update = new Update(req, collection);
        boolean success = queue.offer(update);

        for (;;) {
            synchronized (runners) {
                // see if queue is half full and we can add more runners
                // special case: if only using a threadCount of 1 and the queue
                // is filling up, allow 1 add'l runner to help process the queue
                if (runners.isEmpty()
                        || (queue.remainingCapacity() < queue.size() && runners.size() < threadCount)) {
                    // We need more runners, so start a new one.
                    addRunner();
                } else {
                    // break out of the retry loop if we added the element to the queue
                    // successfully, *and*
                    // while we are still holding the runners lock to prevent race
                    // conditions.
                    if (success)
                        break;
                }
            }

            // Retry to add to the queue w/o the runners lock held (else we risk
            // temporary deadlock)
            // This retry could also fail because
            // 1) existing runners were not able to take off any new elements in the
            // queue
            // 2) the queue was filled back up since our last try
            // If we succeed, the queue may have been completely emptied, and all
            // runners stopped.
            // In all cases, we should loop back to the top to see if we need to
            // start more runners.
            //
            if (!success) {
                success = queue.offer(update, 100, TimeUnit.MILLISECONDS);
            }
        }
    } catch (InterruptedException e) {
        log.error("interrupted", e);
        throw new IOException(e.getLocalizedMessage());
    }

    // RETURN A DUMMY result
    NamedList<Object> dummy = new NamedList<>();
    dummy.add("NOTE", "the request is processed in a background stream");
    return dummy;
}

From source file:org.ut.biolab.medsavant.server.db.variants.annotation.BatchVariantAnnotator.java

/**
 * Perform the prepared batch annotation in parallel
 *
 * @throws IOException//from w ww . java 2s. co m
 * @throws SQLException
 */
public void performBatchAnnotationInParallel()
        throws IOException, SQLException, SessionExpiredException, IllegalArgumentException {

    org.ut.biolab.medsavant.server.serverapi.LogManager.getInstance().addServerLog(sid,
            LogManagerAdapter.LogType.INFO, "Annotation of " + inputTDFFile.getAbsolutePath() + " was started. "
                    + annotations.length + " annotation(s) will be performed.");

    LOG.info("Annotation of " + inputTDFFile.getAbsolutePath() + " was started. " + annotations.length
            + " annotation(s) will be performed.");
    //EmailLogger.logByEmail("Annotation started", "Annotation of " + inputTDFFile.getAbsolutePath() + " was started. " + annotations.length + " annotation(s) will be performed.");

    CSVReader recordReader = null;
    CSVWriter recordWriter = null;
    AnnotationCursor[] cursors = null;
    try {
        // no annotations to perform, copy input to output
        if (annotations.length == 0) {
            jobProgress.setMessage("No annotations to perform, processing intermediate files");
            MedSavantIOController.requestIO(new IOJob("Copy File") {
                @Override
                protected void doIO() throws IOException {
                    IOUtils.copyFile(inputTDFFile, outputTDFFile);
                }
            });

            return;
        }

        // otherwise, perform annotations
        jobProgress.setMessage("Performing " + annotations.length + " annotations");
        LOG.info("Performing " + annotations.length + " annotations");

        // the number of columns in the input file
        int numFieldsInInputFile = getNumFieldsInTDF(inputTDFFile);
        if (numFieldsInInputFile == 0) {
            org.ut.biolab.medsavant.server.serverapi.LogManager.getInstance().addServerLog(sid,
                    LogManagerAdapter.LogType.ERROR,
                    "Error parsing input file " + inputTDFFile.getAbsolutePath() + " . Is it tab delimited?");
            throw new IOException("Error parsing input file. Is it tab delimited?");
        }

        // the number of fields that will be in the output file
        int numFieldsInOutputFile = numFieldsInInputFile;

        // create cursors for all annotations
        cursors = new AnnotationCursor[annotations.length];
        for (int i = 0; i < annotations.length; i++) {
            AnnotationCursor ac = new AnnotationCursor(sid, annotations[i]);
            numFieldsInOutputFile += ac.getNumNonDefaultFields();
            cursors[i] = ac;
        }

        final int numlines[] = new int[1];

        MedSavantIOController.requestIO(new IOJob("Line counter") {
            @Override
            protected void doIO() throws IOException {
                jobProgress.setMessage("Counting number of variants to annotate");
                //LOG.info("DEBUG: Inside doIO of LineCounter, working with TDF File "+inputTDFFile+"\n");
                int numLines = 0;
                BufferedReader reader = null;
                try {
                    reader = new BufferedReader(new FileReader(inputTDFFile));
                    while (reader.readLine() != null) {
                        numLines++;
                    }
                } finally {
                    if (reader != null) {
                        reader.close();
                    }
                }
                numlines[0] = numLines;
                //LOG.info("DEBUG: Read "+numLines+" from "+inputTDFFile);
            }
        });

        // open the input and output files
        recordReader = new CSVReader(new FileReader(inputTDFFile),
                VariantManagerUtils.FIELD_DELIMITER.charAt(0), CSVWriter.DEFAULT_QUOTE_CHARACTER, '\\');
        //recordWriter = new CSVWriter(new FileWriter(outputTDFFile), VariantManagerUtils.FIELD_DELIMITER.charAt(0), CSVWriter.DEFAULT_QUOTE_CHARACTER, '\\', "\r\n");
        recordWriter = new CSVWriter(new FileWriter(outputTDFFile),
                VariantManagerUtils.FIELD_DELIMITER.charAt(0), CSVWriter.DEFAULT_QUOTE_CHARACTER, "\r\n");

        //LOG.info("Reading from " + inputTDFFile.getAbsolutePath());
        //LOG.info("Writing to " + outputTDFFile.getAbsolutePath());
        // read the input, line by line
        String[] inputLine;

        MedSavantIOController.requestIO(new VariantAnnotatorIOJob(cursors, recordReader, recordWriter,
                numlines[0], numFieldsInOutputFile));

        org.ut.biolab.medsavant.server.serverapi.LogManager.getInstance().addServerLog(sid,
                LogManagerAdapter.LogType.INFO, "Annotation of " + inputTDFFile.getAbsolutePath()
                        + " completed. " + annotations.length + " annotations were performed.");

        // report success
        LOG.info("Annotation of " + inputTDFFile.getAbsolutePath() + " completed. " + annotations.length
                + " annotations were performed.");
        //EmailLogger.logByEmail("Annotation completed", "Annotation of " + inputTDFFile.getAbsolutePath() + " completed. " + annotations.length + " annotations were performed.");
    } catch (InterruptedException ie) {

        org.ut.biolab.medsavant.server.serverapi.LogManager.getInstance().addServerLog(sid,
                LogManagerAdapter.LogType.ERROR, "Error performing annotation(s). " + ie.getLocalizedMessage());

        LOG.error("performBatchAnnotationInParallell interrupted: " + ie);
    } finally {
        // clean up
        try {
            /*
            if (cursors != null) {
                    
            for (AnnotationCursor c : cursors) {
                c.cleanup();
            }
            }*/
            if (recordReader != null) {
                recordReader.close();
            }
            if (recordWriter != null) {
                recordWriter.close();
            }
            inputTDFFile.delete(); //no longer need input TDF file.
        } catch (NullPointerException nex) {
            LOG.error("Caught nullpointerexception ");
            nex.printStackTrace();
        }
    }
}

From source file:it.geosolutions.geobatch.flow.file.FileBasedFlowManager.java

/**
 * Main thread loop./*from  w  w  w.jav  a  2  s .  c  o  m*/
 * <ul>
 * <LI>Create and tear down generators when the flow is paused.</LI>
 * <LI>Init the dispatcher.</LI>
 * </UL>
 * 
 * TODO the stopping condition is never used...
 */
public synchronized void run() {
    for (;;) {
        if (terminationRequest) {
            if (initialized) {
                dispatcher.shutdown();
                eventGenerator.dispose();
                initialized = false;
            }

            paused = true;

            break;
        }

        while (paused) {
            try {
                if (initialized && ((eventGenerator != null) && eventGenerator.isRunning())) {

                    eventGenerator.pause();
                }

                this.wait();

                if (terminationRequest) {
                    break;
                }
            } catch (InterruptedException e) {
                final String message = "Error on dispatcher initialization: " + e.getLocalizedMessage();
                LOGGER.error(message);
                throw new RuntimeException(message, e);
            }
        }

        if (!initialized) {
            // Initialize objects
            this.dispatcher = new FileBasedEventDispatcher(this, eventMailBox);
            dispatcher.start();
            initialized = true;
        }

        while (!paused) {
            try {
                if (initialized) {
                    if (eventGenerator == null) {
                        // (re)Creating the FileBasedEventGenerator, which
                        // waits for new events
                        try {
                            createGenerator();
                        } catch (Exception t) {
                            String message = "Error on FS-Monitor initialization for '" + name + "': "
                                    + t.getLocalizedMessage();
                            LOGGER.error(message, t);
                            throw new RuntimeException(message, t);
                        }
                    } else {
                        eventGenerator.start();
                    }
                }

                this.wait();

                if (terminationRequest) {
                    break;
                }
            } catch (InterruptedException e) {
                LOGGER.error("FlowManager cycle exception: " + e.getLocalizedMessage(), e);
                throw new RuntimeException(e);
            }
        }
    }
}

From source file:de.thischwa.pmcms.view.renderer.ExportRenderer.java

private void renderRenderables() throws RenderingException {
    exportController.addAll(buildThreads(exportController, renderableObjects));
    SWTUtils.asyncExec(exportController, display);
    int oldThreadCount = 0;
    do {//w  ww  . j  ava2  s  .  co  m
        isInterruptByUser = (monitor != null && monitor.isCanceled());
        try {
            Thread.sleep(25);
        } catch (InterruptedException e) {
            logger.debug("Controller interrupted.");
        }
        int threadCount = exportController.getTerminatedThreadCount();
        if (oldThreadCount < threadCount) {
            incProgressValue(threadCount - oldThreadCount);
            oldThreadCount = threadCount;
        }
    } while (!exportController.isError() && !exportController.isTerminated() && !isInterruptByUser);

    if (exportController.isError())
        throw new RenderingException(exportController.getThreadException());
    if (isInterruptByUser) {
        exportController.cancel();
        try {
            FileUtils.cleanDirectory(exportDir);
        } catch (IOException e) {
            logger.error("While cleaning the export directory: " + e.getLocalizedMessage(), e);
        }
        logger.debug("Export was interrupt by user, export dir will be deleted!");

    }
}

From source file:org.rhq.plugins.storage.StorageNodeComponent.java

private OperationResult moveDataFiles(Configuration params) {
    ResourceContext context = getResourceContext();
    OperationResult result = new OperationResult();

    log.info("Preparing to move " + this + "'s datafiles to new locations");

    String newCommitLogDirectory = params.getSimpleValue("CommitLogLocation");
    String newSavedCachesDirectory = params.getSimpleValue("SavedCachesLocation");
    PropertyList allDataFileLocations = params.getList("AllDataFileLocations");
    String newDataFileDirectory = null;
    if (allDataFileLocations != null) {
        List<String> dataDirectories = new LinkedList<String>();
        for (Property property : allDataFileLocations.getList()) {
            PropertySimple dataFileLocation = (PropertySimple) property;
            dataDirectories.add(dataFileLocation.getStringValue());
        }//from   www  .  jav a2s .c om
        if (dataDirectories.size() > 1) {
            result.setErrorMessage(
                    "This process does not support more than one active directory for StorageNode data locations. ");
            return result;
        }
        newDataFileDirectory = dataDirectories.get(0);
    }

    if (newCommitLogDirectory == null && newSavedCachesDirectory == null && newDataFileDirectory == null) {
        return new OperationResult("No new directories were specified");
    }

    log.info("Stopping storage node");
    OperationResult shutdownResult = super.shutdownNode(); // CassandraNodeComponent.shutDownNode() does draining before shutting down
    try {
        waitForNodeToGoDown();
    } catch (InterruptedException e) {
        log.error("Received " + e.getLocalizedMessage() + " while waiting for storage node "
                + getResourceContext().getResourceKey() + " to shutdown", e);
        result.setErrorMessage("Failed to stop the storage node. The storage node must be shut down in order "
                + "for the changes made by this operation to take effect. The attempt to stop shut down the storage "
                + "node failed with this error: " + shutdownResult.getErrorMessage());
        return result;
    }
    if (shutdownResult.getErrorMessage() != null) {
        log.error("Failed to stop storage node " + getResourceContext().getResourceKey() + ". The storage node "
                + "must be shut down in order for the changes made by this operation to take effect.");
        result.setErrorMessage("Failed to stop the storage node. The storage node must be shut down in order "
                + "for the changes made by this operation to take effect. The attempt to stop shut down the storage "
                + "node failed with this error: " + shutdownResult.getErrorMessage());
        return result;
    }

    log.info("Storage node shutdown, preparing to move datafiles");

    List<String> originalDataDirectories = new LinkedList<String>();
    List<String> createdDataDirectories = new LinkedList<String>();

    ConfigEditor configEditor = getYamlConfigEditor();
    try {
        configEditor.load();

        // Moving the data directory
        List<String> dataFileDirectories = configEditor.getDataFileDirectories();
        if (dataFileDirectories.size() > 1) {
            // We do not support this scenario
            log.error(
                    "More than one datadirectory configured for the StorageNode. This operation mode is not supported by this tool");
            StringBuilder pathListBuilder = new StringBuilder();
            for (String dataFileDir : dataFileDirectories) {
                pathListBuilder.append(dataFileDir).append(", ");
            }
            result.setErrorMessage("Could not proceed with moving datafiles from " + pathListBuilder.toString()
                    + "this tool does not support" + " multiple datafile paths.");
            return result;
        } else if (dataFileDirectories.size() == 1) {
            String currentDataFileLocation = dataFileDirectories.get(0);
            boolean dataFilesMoved = copyDataDirectoryIfChanged(currentDataFileLocation, newDataFileDirectory);
            if (dataFilesMoved) {
                originalDataDirectories.add(currentDataFileLocation);
                createdDataDirectories.add(newDataFileDirectory);
                List<String> newDataFileDirectories = new LinkedList<String>();
                newDataFileDirectories.add(newDataFileDirectory);
                configEditor.setDataFileDirectories(newDataFileDirectories);
            }
        }

        // In theory we wouldn't need to copy these, as draining should empty these
        String currentCommitLogDirectory = configEditor.getCommitLogDirectory();

        boolean commitLogCopied = copyDataDirectoryIfChanged(currentCommitLogDirectory, newCommitLogDirectory);
        if (commitLogCopied) {
            originalDataDirectories.add(currentCommitLogDirectory);
            createdDataDirectories.add(newCommitLogDirectory);
            configEditor.setCommitLogDirectory(newCommitLogDirectory);
        }

        // Not so dangerous if we lose these, but lets try to keep them
        String currentSavedCachesDirectory = configEditor.getSavedCachesDirectory();

        boolean savedCachesCopied = copyDataDirectoryIfChanged(currentSavedCachesDirectory,
                newSavedCachesDirectory);
        if (savedCachesCopied) {
            originalDataDirectories.add(currentSavedCachesDirectory);
            createdDataDirectories.add(newSavedCachesDirectory);
            configEditor.setSavedCachesDirectory(newSavedCachesDirectory);
        }

        log.info(this + " datafiles have been moved. Restarting storage node...");
        OperationResult startResult = startNode();
        if (startResult.getErrorMessage() != null) {
            log.error("Failed to restart storage node:\n" + startResult.getErrorMessage());
            result.setErrorMessage("Failed to restart storage node:\n" + startResult.getErrorMessage());
            // rollback here
            configEditor.restore();
            purgeDirectories(createdDataDirectories);
        } else {
            result.setSimpleResult("The storage node was succesfully updated.");
            // Commit changes, remove old directories
            configEditor.save(); // This can still throw an exception, in which case we need to rollback
            purgeDirectories(originalDataDirectories);
        }

        return result;
    } catch (ConfigEditorException e) {
        log.error("There was an error while trying to update " + configEditor.getConfigFile(), e);
        if (e.getCause() instanceof YAMLException) {
            log.info("Attempting to restore " + configEditor.getConfigFile());
            try {
                configEditor.restore();
                purgeDirectories(createdDataDirectories);
                result.setErrorMessage("Failed to update configuration file [" + configEditor.getConfigFile()
                        + "]: " + ThrowableUtil.getAllMessages(e.getCause()));
            } catch (ConfigEditorException e1) {
                log.error("Failed to restore " + configEditor.getConfigFile()
                        + ". A copy of the file prior to any modifications " + "can be found at "
                        + configEditor.getBackupFile());
                result.setErrorMessage("There was an error updating [" + configEditor.getConfigFile()
                        + "] and undoing the changes " + "Failed. A copy of the file can be found at "
                        + configEditor.getBackupFile() + ". See the " + "agent logs for more details");
            }
        }

        EmsConnection emsConnection = getEmsConnection();
        EmsBean storageService = emsConnection.getBean("org.apache.cassandra.db:type=StorageService");
        EmsAttribute attribute = storageService.getAttribute("OperationMode");
        String operationMode = (String) attribute.refresh();

        if (!operationMode.equals("NORMAL")) {
            result.setErrorMessage(
                    "Bootstrapping " + getHost() + " failed. The StorageService is reporting " + operationMode
                            + " for its operation mode but it should be reporting NORMAL. The StorageService "
                            + "operation mode is not to be confused with the Storage Node operation mode.");
        }
        return result;
    } catch (IOException e) {
        log.error("Moving datafiles failed", e);
        purgeDirectories(createdDataDirectories);
        configEditor.restore();
        result.setErrorMessage("Failed to move all the files to new destinations, " + e.getLocalizedMessage()
                + ". StorageService was left offline" + ", investigate before restarting the node");
        //            OperationResult startResult = startNode(); // return the StorageNode online, but what if IOException was out of diskspace?
        return result;
    }
}

From source file:com.github.drbookings.ui.controller.MainController.java

public void shutDown() {
    if (logger.isInfoEnabled()) {
        logger.info("Shutting down");
    }/*from  w  w  w.  j a v  a 2  s .c om*/
    EXECUTOR.shutdown();
    try {
        EXECUTOR.awaitTermination(DEFAULT_THREAD_WAIT_SECONDS, TimeUnit.SECONDS);
    } catch (final InterruptedException e) {
        if (logger.isErrorEnabled()) {
            logger.error(e.getLocalizedMessage(), e);
        }
    }
}

From source file:net.anidb.udp.UdpConnection.java

/**
 * This method cares for the flood protection, if enabled.
 * @throws UdpConnectionException If the flood protection was interrupted
 * by an {@link InterruptedException}./*  w ww .  j  a v  a2 s.co m*/
 */
private void doFloodProtection() throws UdpConnectionException {
    long now, timeDifference, waitUntil, sleepTime;

    if (!this.floodProtection) {
        return;
    }
    LOG.debug("packet bonus count: " + this.packetBonusCount);
    // Calculating time difference between this and the last request
    now = System.currentTimeMillis();
    timeDifference = now - this.timestampLastPacketSend;
    if (timeDifference < 0) {
        timeDifference = 0;
    }
    // Increase bonus, if possible.
    if (timeDifference > TIME_BETWEEN_NORMAL_PACKETS) {
        this.packetBonusCount += (timeDifference / TIME_BETWEEN_NORMAL_PACKETS);
        if (this.packetBonusCount > MAXMIMUM_NUMBER_OF_BONUS_PACKETS) {
            this.packetBonusCount = MAXMIMUM_NUMBER_OF_BONUS_PACKETS;
        }
        LOG.debug("New packet bonus count: " + this.packetBonusCount);
        this.timestampLastPacketSend += (timeDifference % TIME_BETWEEN_NORMAL_PACKETS);
    }
    if (this.packetBonusCount > 0) {
        waitUntil = this.timestampLastPacketSend + TIME_BETWEEN_BONUS_PACKETS;
        this.packetBonusCount--;
        LOG.debug("Bonus packet wait time.");
    } else {
        waitUntil = this.timestampLastPacketSend + TIME_BETWEEN_NORMAL_PACKETS;
        LOG.debug("Normal packet wait time.");
    }
    sleepTime = waitUntil - now;
    if (sleepTime > 0) {
        try {
            Thread.sleep(sleepTime);
        } catch (InterruptedException ie) {
            throw new UdpConnectionException(
                    "The flood protection was interrupted: " + ie.getLocalizedMessage(), ie);
        }
    }
}

From source file:com.funambol.tools.test.PostSyncML.java

public void syncAndTest() throws IOException, TestFailedException {
    ////from   w  ww. java  2 s .  c  o m
    // First of all clean up!
    //
    clean();

    SyncML response = null;
    String respURI = null;

    File responseFile = null;
    boolean firstMessage = false;

    File fb = new File(baseDir, "header.properties");
    if (fb.exists()) {
        propsHeader = new Properties();
        propsHeader.load(new FileInputStream(fb));
    }

    existResponseProcessor();

    int msgCount = 0;
    for (int i = 0; i < msgs.length; ++i) {
        msgCount++;

        firstMessage = msgs[i].indexOf("<MsgID>1</MsgID>") != -1;

        if (firstMessage) {
            try {
                Thread.currentThread().sleep(sessionsDelay);
            } catch (InterruptedException ex) {
            }
        }

        try {
            log.info("Trying to execute target: " + msgFiles[i]);
            AntUtil.runAntTarget(antProject, baseDir, xmlBuildFile, msgFiles[i]);
            log.info("Target " + msgFiles[i] + " executed");
        } catch (Exception ex) {

            boolean isIn = containsException(ex.getCause(), ComparisonFailure.class.getName());

            if (isIn) {
                try {
                    log.info("Comparison Failure ");
                    saveEndTestError("Actual Report different than expected.\n" + ex.getLocalizedMessage(),
                            new File(errorDir, "msg-end-test-error.xml"));
                } catch (IOException e) {
                }
            }
            log.info("Error executing target " + msgFiles[i] + " (" + ex + ")");
            //do nothing
        }

        //
        // Read ant properties
        //
        Properties prop = new Properties();

        File f = new File(baseDir, msgFiles[i] + ".properties");
        if (f.exists()) {
            prop.load(new FileInputStream(f));
        }

        String propertiesValues = prop.getProperty("replace");

        //
        // Replace value_N into message before sending response
        //
        if (propertiesValues != null) {
            StringTokenizer values = new StringTokenizer(propertiesValues, ",");
            int y = 1;
            while (values.hasMoreTokens()) {
                String v = values.nextToken();
                msgs[i] = msgs[i].replaceAll("VALUE_" + y, v);
                y++;
            }
        }

        File header = new File(baseDir, "header" + msgCount + ".properties");

        if (header.exists()) {
            propsHeader = new Properties();
            propsHeader.load(new FileInputStream(header));
        }
        log.info("Sending " + msgFiles[i]);

        if (firstMessage) {
            //
            // It is a first message so we can set the
            // respURI to empty string
            //
            log.info("Message with id 1. Start new session");

            nextURL = initialURL;
        }

        try {
            response = postRequest(msgs[i]);
        } catch (RepresentationException e) {
            Timestamp t = new Timestamp(System.currentTimeMillis());
            String message = "<!-- " + t.toString() + " -->\n" + e.getMessage();
            IOTools.writeFile(message, new File(errorDir, msgFiles[i]));
            throw new TestFailedException("XML syntax error: " + e.getMessage(), e);
        } catch (Sync4jException e) {
            Timestamp t = new Timestamp(System.currentTimeMillis());
            String message = "<!-- " + t.toString() + " -->\n" + e.getMessage();
            IOTools.writeFile(message, new File(errorDir, msgFiles[i]));
            throw new TestFailedException("XML syntax error: " + e.getMessage(), e);
        }

        //
        // Write the messages responded by the server, than read the reference
        // and make the comparison (excluding the XPaths specified by
        // ignoreXPaths
        //
        responseFile = new File(responseDir, msgFiles[i]);
        log.info("Writing the response into " + responseFile);

        try {

            String xmlMsg = marshallSyncML(response);

            // Preprocess response message before comparing it with the
            // reference message
            if (preProcessResp) {
                xmlMsg = preProcessResponse(responseProcessor, xmlMsg);
            }

            IOTools.writeFile(xmlMsg, responseFile);

        } catch (Exception e) {
            e.printStackTrace();
            throw new TestFailedException("XML syntax error: " + e.getMessage(), e);
        }

        compare(msgFiles[i]);

        respURI = response.getSyncHdr().getRespURI();

        if (respURI != null) {
            nextURL = respURI;
        }
    }
}

From source file:com.amalto.core.server.DefaultItem.java

/**
 * Extract results through a view and transform them using a transformer<br/>
 * This call is asynchronous and results will be pushed via the passed {@link com.amalto.core.objects.transformers.util.TransformerCallBack}
 *
 * @param dataClusterPOJOPK The Data Cluster where to run the query
 * @param transformerPOJOPK The transformer to use
 * @param viewPOJOPK A filtering view/*from   w  w w .j a  v a  2  s.c  om*/
 * @param whereItem The condition
 * @param spellThreshold The condition spell checking threshold. A negative value de-activates spell
 * @param orderBy The full path of the item user to order
 * @param direction One of {@link com.amalto.xmlserver.interfaces.IXmlServerSLWrapper#ORDER_ASCENDING} or
 * {@link com.amalto.xmlserver.interfaces.IXmlServerSLWrapper#ORDER_DESCENDING}
 * @param start The first item index (starts at zero)
 * @param limit The maximum number of items to return
 */
@Override
public TransformerContext extractUsingTransformerThroughView(DataClusterPOJOPK dataClusterPOJOPK,
        TransformerV2POJOPK transformerPOJOPK, ViewPOJOPK viewPOJOPK, IWhereItem whereItem, int spellThreshold,
        String orderBy, String direction, int start, int limit) throws XtentisException {
    try {
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("extractUsingTransformerThroughView() ");
        }
        TransformerContext context = new TransformerContext(transformerPOJOPK);
        ArrayList<TypedContent> content = new ArrayList<TypedContent>();
        context.put("com.amalto.core.itemctrl2.content", content); //$NON-NLS-1$
        context.put("com.amalto.core.itemctrl2.ready", false); //$NON-NLS-1$
        TransformerCallBack globalCallBack = new TransformerCallBack() {
            @Override
            public void contentIsReady(TransformerContext context) throws XtentisException {
            }

            @Override
            public void done(TransformerContext context) throws XtentisException {
                context.put("com.amalto.core.itemctrl2.ready", true); //$NON-NLS-1$
            }
        };
        extractUsingTransformerThroughView(dataClusterPOJOPK, context, globalCallBack, viewPOJOPK, whereItem,
                spellThreshold, orderBy, direction, start, limit);
        while (!(Boolean) context.get("com.amalto.core.itemctrl2.ready")) {
            try {
                Thread.sleep(50);
            } catch (InterruptedException e) {
                LOGGER.error("Error while waiting for transformer's end", e);
            }
        }
        return context;
    } catch (XtentisException e) {
        throw (e);
    } catch (Exception e) {
        String err = "Unable to extract items using transformer " + transformerPOJOPK.getUniqueId()
                + " through view " + viewPOJOPK.getUniqueId() + ": " + e.getClass().getName() + ": "
                + e.getLocalizedMessage();
        LOGGER.error(err, e);
        throw new XtentisException(err, e);
    }
}

From source file:it.lilik.capturemjpeg.CaptureMJPEG.java

public void run() {
    BufferedInputStream is = null;
    InputStream responseBody = null;
    String boundary = "";

    while (!this.shouldStop) {

        this.isChangePending = false;

        if (!parent.isFocused() && parent.bwSaverActive()) { // Not focused -> wait one second
            try {
                java.lang.Thread.sleep(1000L);
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();//from w  w w  .  j av a 2s . c om
            }
            continue;
        }

        // Open HTTP client
        try {
            this.client.executeMethod(this.method);
            responseBody = this.method.getResponseBodyAsStream();
            if (this.method.getStatusCode() == 404) {
                TextImage error = new TextImage("Unable to find '" + this.method.getPath() + "' on host '"
                        + this.method.getURI().getHost() + "'");
                setErrorImage(error);

                // Try again 
                System.out.println("Unable to find endpoint");
                continue;
            }
            is = new BufferedInputStream(responseBody);
        } catch (Exception e) {
            TextImage error;
            try {
                error = new TextImage("Unable to connect to '" + this.method.getURI().getHost() + "' ("
                        + e.getLocalizedMessage() + ")");
                setErrorImage(error);
                System.out.println("Unable to connect");
            } catch (URIException e1) {
                e1.printStackTrace();
            }

            // Try again
            continue;
        }

        try {

            // automagically guess the boundary
            Header contentType = this.method.getResponseHeader("Content-Type");
            String contentTypeS = contentType.toString();
            int startIndex = contentTypeS.indexOf("boundary=");
            int endIndex = contentTypeS.indexOf(';', startIndex);
            if (endIndex == -1) {//boundary is the last option
                /* some servers, like mjpeg-streamer puts
                 * a '\r' character at the end of each line.
                 */
                if ((endIndex = contentTypeS.indexOf('\r', startIndex)) == -1)
                    endIndex = contentTypeS.length();
            }
            boundary = contentTypeS.substring(startIndex + 9, endIndex);
            //some cameras put -- on boundary, some not
            if (boundary.charAt(0) != '-' && boundary.charAt(1) != '-')
                boundary = "--" + boundary;

        } catch (Exception e) {
            // Who cares? Running the next cycle will solve the issue, whatever...
        }

        while (!this.shouldStop) {

            synchronized (this.method) {
                this.buffer.clear();

            } //end synchronized         

            byte[] img;
            MJPEGInputStream mis = new MJPEGInputStream(is, boundary);
            //System.out.println("Created");
            try {
                synchronized (method) {
                    img = mis.readImage();
                    //System.out.println("Read");
                }
                //synchronized (lastImage) {
                if (captureEventMethod != null) {
                    try {
                        //System.out.println("Call invoker");
                        Image tmp = getImage(new ByteArrayInputStream(img));
                        captureEventMethod.invoke(parent, new Object[] { this.assign(tmp) });
                    } catch (Exception e) {
                        System.err.println(
                                "Disabling captureEvent() for " + parent.getName() + " because of an error.");
                        e.printStackTrace();
                        captureEventMethod = null;
                    }
                } else {
                    this.buffer.push(new ByteArrayInputStream(img));
                }
                //}
            } catch (IOException e) {
                TextImage error = new TextImage(e.getLocalizedMessage());

                setErrorImage(error);

                break;

            }

            if ((!parent.isFocused() && parent.bwSaverActive()) || this.isChangePending)
                break;
        }

        // Reading from stream failed -> let's close the stream, in the next occurrency it'll be opened again...
        this.method.releaseConnection();
        try {
            is.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

    }
}