Example usage for java.lang InterruptedException getMessage

List of usage examples for java.lang InterruptedException getMessage

Introduction

In this page you can find the example usage for java.lang InterruptedException getMessage.

Prototype

public String getMessage() 

Source Link

Document

Returns the detail message string of this throwable.

Usage

From source file:egovframework.com.utl.sys.fsm.service.FileSystemUtils.java

/**
 * Performs the os command.//w w w  . ja  va2  s.  c o m
 *
 * @param cmdAttribs  the command line parameters
 * @param max The maximum limit for the lines returned
 * @return the parsed data
 * @throws IOException if an error occurs
 */
List performCommand(String[] cmdAttribs, int max) throws IOException {
    // this method does what it can to avoid the 'Too many open files' error
    // based on trial and error and these links:
    // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4784692
    // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4801027
    // http://forum.java.sun.com/thread.jspa?threadID=533029&messageID=2572018
    // however, its still not perfect as the JDK support is so poor
    // (see commond-exec or ant for a better multi-threaded multi-os solution)

    List lines = new ArrayList(20);
    Process proc = null;
    InputStream in = null;
    OutputStream out = null;
    InputStream err = null;
    BufferedReader inr = null;
    try {
        proc = openProcess(cmdAttribs);
        in = proc.getInputStream();
        out = proc.getOutputStream();
        err = proc.getErrorStream();
        inr = new BufferedReader(new InputStreamReader(in));
        String line = inr.readLine();
        while (line != null && lines.size() < max) {
            line = line.toLowerCase().trim();
            lines.add(line);
            line = inr.readLine();
        }

        proc.waitFor();
        if (proc.exitValue() != 0) {
            // os command problem, throw exception
            throw new IOException("Command line returned OS error code '" + proc.exitValue() + "' for command "
                    + Arrays.asList(cmdAttribs));
        }
        if (lines.size() == 0) {
            // unknown problem, throw exception
            throw new IOException(
                    "Command line did not return any info " + "for command " + Arrays.asList(cmdAttribs));
        }
        return lines;

    } catch (InterruptedException ex) {
        throw new IOException("Command line threw an InterruptedException '" + ex.getMessage()
                + "' for command " + Arrays.asList(cmdAttribs));
    } finally {
        IOUtils.closeQuietly(in);
        IOUtils.closeQuietly(out);
        IOUtils.closeQuietly(err);
        IOUtils.closeQuietly(inr);
        if (proc != null) {
            proc.destroy();
        }
    }
}

From source file:org.grails.datastore.mapping.redis.engine.RedisEntityPersister.java

@Override
protected void lockEntry(PersistentEntity persistentEntity, @SuppressWarnings("hiding") String entityFamily,
        Serializable id, int timeout) {
    String redisKey = getRedisKey(entityFamily, id);
    final TimeUnit milliUnit = TimeUnit.MILLISECONDS;
    final long waitTime = TimeUnit.SECONDS.toMillis(timeout);
    final String lockName = lockName(redisKey);
    int sleepTime = 0;
    while (true) {
        if (redisTemplate.setnx(lockName, System.currentTimeMillis())
                && redisTemplate.expire(lockName, timeout)) {
            break;
        }/*from ww  w .  jav  a  2s. com*/
        if (redisTemplate.ttl(lockName) > 0) {
            try {
                if (sleepTime > waitTime) {
                    throw new CannotAcquireLockException(
                            "Failed to acquire lock on key [" + redisKey + "]. Wait time exceeded timeout.");
                }
                // wait for previous lock to expire
                sleepTime += 500;
                milliUnit.sleep(500);
            } catch (InterruptedException e) {
                throw new CannotAcquireLockException(
                        "Failed to acquire lock on key [" + redisKey + "]: " + e.getMessage(), e);
            }
        } else {
            if (redisTemplate.getset(lockName, System.currentTimeMillis()) != null
                    && redisTemplate.expire(lockName, timeout)) {
                break;
            }
        }
    }
}

From source file:fi.helsinki.cs.iot.hub.IotHubHTTPDTest.java

@Test
public void testServiceAPI() {
    String res = DuktapeJavascriptEngineWrapper.performJavaHttpRequest("GET",
            "http://127.0.0.1:" + port + "/services/", null);
    assertEquals("[]", res.trim());

    //Now I want to create a javascript plugin to attach to my service
    String pluginName = "MyPlugin";
    //now I want to had a javascript plugin
    File pluginFile = makePluginFileForService(pluginName);
    JSONObject jsonObject = makeJsonObjectForPlugin(pluginName, null, Type.JAVASCRIPT, pluginFile, true);
    assertNotNull(jsonObject);/* w ww.  j  av a2s  . c o m*/
    String myPluginString = DuktapeJavascriptEngineWrapper.performJavaHttpRequest("POST",
            "http://127.0.0.1:" + port + "/plugins/", jsonObject.toString());
    JSONObject jPlugin = null;
    try {
        jPlugin = new JSONObject(myPluginString);
        long pluginId = jPlugin.getLong("id");
        String name = "MyService";
        String metadata = "A freshly created service";
        JSONObject jservice = new JSONObject();
        jservice.put("plugin", pluginId);
        jservice.put("name", name);
        jservice.put("metadata", metadata);
        jservice.put("bootAtStartup", false);

        // I should get an enable with no features as it is not configured
        res = DuktapeJavascriptEngineWrapper.performJavaHttpRequest("POST",
                "http://127.0.0.1:" + port + "/services/", jservice.toString());
        JSONObject jexpectedService = new JSONObject();
        jexpectedService.put("id", 1);
        jexpectedService.put("name", name);
        jexpectedService.put("metadata", metadata);
        jexpectedService.put("plugin", jPlugin);
        jexpectedService.put("bootAtStartup", false);
        assertEquals(jexpectedService.toString(), res.trim());

        JSONObject data = new JSONObject();
        JSONObject config = new JSONObject();
        config.put("value", "Text to print");
        data.put("configuration", config);

        res = DuktapeJavascriptEngineWrapper.performJavaHttpRequest("PUT",
                "http://127.0.0.1:" + port + "/services/" + name, data.toString());
        jexpectedService.put("id", 1);
        jexpectedService.put("config", config.toString());
        assertEquals(jexpectedService.toString(), res.trim());

        res = DuktapeJavascriptEngineWrapper.performJavaHttpRequest("GET",
                "http://127.0.0.1:" + port + "/services/" + name + "/start", null);
        try {
            Thread.sleep(500);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
            fail(e.getMessage());
        }

    } catch (JSONException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        fail(e.getMessage());
    }
}

From source file:com.datatorrent.stram.StramLocalCluster.java

@Override
@SuppressWarnings({ "SleepWhileInLoop", "ResultOfObjectAllocationIgnored" })
public void run(long runMillis) {
    long endMillis = System.currentTimeMillis() + runMillis;

    while (!appDone) {

        for (String containerIdStr : dnmgr.containerStopRequests.values()) {
            // teardown child thread
            StreamingContainer c = childContainers.get(containerIdStr);
            if (c != null) {
                ContainerHeartbeatResponse r = new ContainerHeartbeatResponse();
                r.shutdown = true;//from   ww w  .j a v  a 2s.c om
                c.processHeartbeatResponse(r);
            }
            dnmgr.containerStopRequests.remove(containerIdStr);
            LOG.info("Container {} restart.", containerIdStr);
            dnmgr.scheduleContainerRestart(containerIdStr);
            //dnmgr.removeContainerAgent(containerIdStr);
        }

        // start containers
        while (!dnmgr.containerStartRequests.isEmpty()) {
            ContainerStartRequest cdr = dnmgr.containerStartRequests.poll();
            if (cdr != null) {
                new LocalStramChildLauncher(cdr);
            }
        }

        if (heartbeatMonitoringEnabled) {
            // monitor child containers
            dnmgr.monitorHeartbeat();
        }

        if (childContainers.isEmpty() && dnmgr.containerStartRequests.isEmpty()) {
            appDone = true;
        }

        if (runMillis > 0 && System.currentTimeMillis() > endMillis) {
            appDone = true;
        }

        try {
            if (exitCondition != null && exitCondition.call()) {
                appDone = true;
            }
        } catch (Exception ex) {
            break;
        }

        if (Thread.interrupted()) {
            break;
        }

        if (!appDone) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                LOG.info("Sleep interrupted " + e.getMessage());
                break;
            }
        }
    }

    for (LocalStreamingContainer lsc : childContainers.values()) {
        injectShutdown.put(lsc.getContainerId(), lsc);
        lsc.triggerHeartbeat();
    }

    dnmgr.teardown();

    LOG.info("Application finished.");
    if (!perContainerBufferServer) {
        StreamingContainer.eventloop.stop(bufferServer);
        StreamingContainer.eventloop.stop();
    }
}

From source file:ch.cyberduck.core.openstack.SwiftLargeObjectUploadFeature.java

@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle,
        final StreamListener listener, final TransferStatus status, final ConnectionCallback callback)
        throws BackgroundException {
    final DefaultThreadPool pool = new DefaultThreadPool("multipart", concurrency);
    final List<Path> existingSegments = new ArrayList<Path>();
    if (status.isAppend() || status.isRetry()) {
        // Get a lexicographically ordered list of the existing file segments
        existingSegments//w  w  w  .  j  a va2s .  com
                .addAll(listService
                        .list(segmentService.getSegmentsDirectory(file,
                                status.getOffset() + status.getLength()), new DisabledListProgressListener())
                        .toList());
    }
    // Get the results of the uploads in the order they were submitted
    // this is important for building the manifest, and is not a problem in terms of performance
    // because we should only continue when all segments have uploaded successfully
    final List<StorageObject> completed = new ArrayList<StorageObject>();
    // Submit file segments for concurrent upload
    final List<Future<StorageObject>> segments = new ArrayList<Future<StorageObject>>();
    long remaining = status.getLength();
    long offset = 0;
    for (int segmentNumber = 1; remaining > 0; segmentNumber++) {
        final Long length = Math.min(segmentSize, remaining);
        // Segment name with left padded segment number
        final Path segment = segmentService.getSegment(file, status.getOffset() + status.getLength(),
                segmentNumber);
        if (existingSegments.contains(segment)) {
            final Path existingSegment = existingSegments.get(existingSegments.indexOf(segment));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Skip segment %s", existingSegment));
            }
            final StorageObject stored = new StorageObject(containerService.getKey(segment));
            if (HashAlgorithm.md5.equals(existingSegment.attributes().getChecksum().algorithm)) {
                stored.setMd5sum(existingSegment.attributes().getChecksum().hash);
            }
            stored.setSize(existingSegment.attributes().getSize());
            offset += existingSegment.attributes().getSize();
            completed.add(stored);
        } else {
            // Submit to queue
            segments.add(
                    this.submit(pool, segment, local, throttle, listener, status, offset, length, callback));
            if (log.isDebugEnabled()) {
                log.debug(String.format("Segment %s submitted with size %d and offset %d", segment, length,
                        offset));
            }
            remaining -= length;
            offset += length;
        }
    }
    try {
        for (Future<StorageObject> futureSegment : segments) {
            completed.add(futureSegment.get());
        }
    } catch (InterruptedException e) {
        log.error("Part upload failed with interrupt failure");
        status.setCanceled();
        throw new ConnectionCanceledException(e);
    } catch (ExecutionException e) {
        log.warn(String.format("Part upload failed with execution failure %s", e.getMessage()));
        if (e.getCause() instanceof BackgroundException) {
            throw (BackgroundException) e.getCause();
        }
        throw new DefaultExceptionMappingService().map(e.getCause());
    } finally {
        pool.shutdown(false);
    }
    // Mark parent status as complete
    status.setComplete();
    if (log.isInfoEnabled()) {
        log.info(String.format("Finished large file upload %s with %d parts", file, completed.size()));
    }
    // Create and upload the large object manifest. It is best to upload all the segments first and
    // then create or update the manifest.
    try {
        // Static Large Object.
        final String manifest = segmentService.manifest(containerService.getContainer(file).getName(),
                completed);
        if (log.isDebugEnabled()) {
            log.debug(String.format("Creating SLO manifest %s for %s", manifest, file));
        }
        final StorageObject stored = new StorageObject(containerService.getKey(file));
        final String checksum = session.getClient().createSLOManifestObject(
                regionService.lookup(containerService.getContainer(file)),
                containerService.getContainer(file).getName(), status.getMime(), containerService.getKey(file),
                manifest, Collections.emptyMap());
        // The value of the Content-Length header is the total size of all segment objects, and the value of the ETag header is calculated by taking
        // the ETag value of each segment, concatenating them together, and then returning the MD5 checksum of the result.
        stored.setMd5sum(checksum);
        return stored;
    } catch (GenericException e) {
        throw new SwiftExceptionMappingService().map("Upload {0} failed", e);
    } catch (IOException e) {
        throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
    }
}

From source file:com.piketec.jenkins.plugins.tpt.TptPluginSlaveExecutor.java

/**
 * Executes a small chunks of tests. It binds to the Tpt Api , check if the given Execution
 * Configuration exists. Prepares the test- and data-directories. Creates a temporary testSet from
 * the chunk of test (if no testSet is given). Then through the tpt api executes the testCases and
 * then it copies the results to the master workspace.
 * /*from  w  ww.  j a  v  a  2 s. co m*/
 * @return true if the tpt execution has been successfully.
 */
public boolean execute() {
    logger = new TptLogger(listener.getLogger());
    try {
        // start tpt and recieve API
        TptApi api;
        try {
            api = Utils.getTptApi(build, launcher, logger, exePaths, tptPort, tptBindingName,
                    tptStartupWaitTime);
        } catch (InterruptedException e) {
            logger.interrupt(e.getMessage());
            return false;
        }
        if (api == null) {
            return false;
        }
        // open TPT File
        OpenResult openProject = api.openProject(tptFile);
        if (openProject.getProject() == null) {
            logger.error("Could not open project:\n" + Utils.toString(openProject.getLogs(), "\n"));
            return false;
        }
        new CleanUpTask(openProject.getProject(), masterId);
        // search execution configuration by name
        Collection<ExecutionConfiguration> execConfigs = openProject.getProject().getExecutionConfigurations()
                .getItems();
        ExecutionConfiguration config = null;
        for (ExecutionConfiguration elem : execConfigs) {
            if (elem.getName().equals(execCfg)) {
                config = elem;
                break;
            }
        }
        if (config == null) {
            logger.error("Could not find config");
            return false;
        }
        // adjust config to execute only the given one test case
        File oldReportDir = config.getReportDir();
        File oldTestDataDir = config.getDataDir();

        Collection<Scenario> foundScenearios = new HashSet<>();
        find(openProject.getProject().getTopLevelTestlet().getTopLevelScenarioOrGroup().getItems(),
                testSetString, foundScenearios);
        if (foundScenearios.size() != testSetString.size()) {
            logger.error("Could only find " + foundScenearios.size() + " of " + testSetString.size() + ".");
            return false;
        }

        FilePath slaveDataDir = null;
        FilePath slaveReportDir = null;
        try {
            slaveDataDir = new FilePath(build.getWorkspace(), testDataDir).absolutize();
            if (!masterWorkspace.equals(build.getWorkspace())) {
                logger.info("Creating and/or cleaning test data directory");
                Utils.deleteFiles(slaveDataDir);
            }
        } catch (IOException e) {
            logger.error("Could not create or clear test data dir");
            return false;
        } catch (InterruptedException e) {
            logger.interrupt(e.getMessage());
            return false;
        }
        logger.info("Setting test data directory to " + slaveDataDir.getRemote());
        config.setDataDir(new File(slaveDataDir.getRemote()));

        try {
            slaveReportDir = new FilePath(build.getWorkspace(), reportDir).absolutize();
            if (!masterWorkspace.equals(build.getWorkspace())) {
                logger.info("Creating and/or cleaning report directory");
                slaveReportDir.mkdirs();
                slaveReportDir.deleteContents();
            }
        } catch (IOException e) {
            logger.error(e.getMessage());
            config.setDataDir(oldTestDataDir);
            return false;
        } catch (InterruptedException e) {
            logger.interrupt(e.getMessage());
            config.setDataDir(oldTestDataDir);
            return false;
        }
        logger.info("Setting report directory to " + slaveReportDir.getRemote());
        config.setReportDir(new File(slaveReportDir.getRemote()));

        // store information to undo changes
        List<TestSet> oldTestSets = new ArrayList<>();
        List<TestSet> newTestSets = new ArrayList<>();
        List<ExecutionConfigurationItem> deactivated = new ArrayList<>();
        int i = 0;
        if (StringUtils.isEmpty(testSetName)) {
            for (ExecutionConfigurationItem item : config.getItems()) {
                oldTestSets.add(item.getTestSet());
                if (item.isActive()) {
                    Collection<Scenario> intersectionSet = intersectByHash(
                            item.getTestSet().getTestCases().getItems(), foundScenearios);
                    if (intersectionSet.isEmpty()) {
                        item.setActive(false);
                        deactivated.add(item);
                    } else {
                        String tmpTestSetName = "JENKINS Exec " + i;
                        i++;
                        logger.info("Create test set \"" + tmpTestSetName + "\" for execution of \""
                                + remoteScenarioSetToString(intersectionSet) + "\"");
                        TestSet testSet = openProject.getProject().createTestSet(tmpTestSetName);
                        newTestSets.add(testSet);
                        for (Scenario scen : intersectionSet) {
                            testSet.addTestCase(scen);
                        }
                        item.setTestSet(testSet);
                    }
                }
            }
        } else {
            String tmpTestSetName = "JENKINS Exec " + testSetName;
            logger.info("Create test set \"" + tmpTestSetName + "\" for execution of \""
                    + remoteScenarioSetToString(foundScenearios) + "\" from File " + tptFile.getName());

            TestSet testSet = openProject.getProject().createTestSet(tmpTestSetName);
            newTestSets.add(testSet);
            for (Scenario scen : foundScenearios) {
                testSet.addTestCase(scen);
            }
            for (ExecutionConfigurationItem item : config.getItems()) {
                oldTestSets.add(item.getTestSet());
                if (item.isActive()) {
                    item.setTestSet(testSet);
                }
            }
        }
        // execute test
        ExecutionStatus execStatus = api.run(config);
        while (execStatus.isRunning() || execStatus.isPending()) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                logger.interrupt(e.getMessage());
                execStatus.cancel();
                break;
            }
        }
        // undo changes
        logger.info("Set test sets in execution config to old values.");
        for (ExecutionConfigurationItem item : config.getItems()) {
            item.setTestSet(oldTestSets.remove(0));
        }
        try {
            slaveDataDir.copyRecursiveTo(new FilePath(masterWorkspace, testDataDir));
            slaveReportDir.copyRecursiveTo(new FilePath(masterWorkspace, reportDir));

            logger.info("Copied all data to master from File " + tptFile.getName() + " to "
                    + masterWorkspace.getRemote());

        } catch (InterruptedException e) {
            logger.interrupt(e.getMessage());
            return false;
        } catch (IOException e) {
            logger.error("could not copy results to master: " + e.getMessage());
        }
        logger.info("reset test data and report directory to " + oldTestDataDir.getPath() + " and "
                + oldReportDir.getPath());
        config.setDataDir(oldTestDataDir);
        config.setReportDir(oldReportDir);
        for (TestSet testSet : newTestSets) {
            logger.info("delete temporary test set \"" + testSet.getName() + "\"");
            openProject.getProject().getTestSets().delete(testSet);
        }
        logger.info("Reactivate temporary deactivated execution config items.");
        for (ExecutionConfigurationItem item : deactivated) {
            item.setActive(true);
        }
    } catch (RemoteException e) {
        logger.error(e.getLocalizedMessage());
        e.printStackTrace(logger.getLogger());
        return false;
    } catch (ApiException e) {
        logger.error(e.getLocalizedMessage());
        e.printStackTrace(logger.getLogger());
        return false;
    }
    return true;
}

From source file:com.socrata.datasync.job.MetadataJob.java

public JobStatus run() {
    SocrataConnectionInfo connectionInfo = userPrefs.getConnectionInfo();
    JobStatus runStatus = JobStatus.SUCCESS;
    String runErrorMessage = null;
    JobStatus validationStatus = validate(connectionInfo);
    //boolean workingCopyCreated = false;
    String workingCopyDatasetId = null;

    if (validationStatus.isError()) {
        runStatus = validationStatus;/*from www  . j  a v  a2 s .c o m*/
    } else {

        final SodaDdl updater = SodaDdl.newDdl(connectionInfo.getUrl(), connectionInfo.getUser(),
                connectionInfo.getPassword(), connectionInfo.getToken());
        final SodaWorkflow workflower = SodaWorkflow.newWorkflow(connectionInfo.getUrl(),
                connectionInfo.getUser(), connectionInfo.getPassword(), connectionInfo.getToken());

        boolean noExceptions = false;
        try {
            DatasetInfo datasetInfo = updater.loadDatasetInfo(datasetID);

            if (datasetInfo == null) {
                runErrorMessage = "Dataset with that ID does not exist or you do not have permission to publish to it";
                runStatus = JobStatus.PUBLISH_ERROR;
            } else {
                if (datasetInfo.PUBLISHED.equals(datasetInfo.getPublicationStage())) {
                    DatasetInfo workingCopyDatasetInfo = workflower.createWorkingCopy(datasetInfo.getId());
                    datasetInfo = updater.loadDatasetInfo(workingCopyDatasetInfo.getId());
                    workingCopyDatasetId = datasetInfo.getId();
                }

                datasetInfo.setName(title);
                datasetInfo.setDescription(description);
                datasetInfo.setCategory(category);
                if (keywords != null && !keywords.isEmpty()) {
                    datasetInfo.setTags(keywords);
                } else {
                    datasetInfo.setTags(new ArrayList<String>());
                }
                if (licenseType != null) {
                    //TODO: Once issue with setting no license via api is resolved, update below to handle
                    if (licenseType == LicenseType.no_license) {
                        datasetInfo.setLicenseId(""); //null, "", "''", "\"\"", Tried all of these, no luck, validation errors on all, so
                    } else {
                        datasetInfo.setLicenseId(licenseType.getValue());
                    }
                }
                datasetInfo.setAttribution(dataProvidedBy);
                datasetInfo.setAttributionLink(sourceLink);
                Map<String, Object> privateMetadata = datasetInfo.getPrivateMetadata();
                if (privateMetadata == null) {
                    privateMetadata = new HashMap<String, Object>();
                }
                privateMetadata.put("contactEmail", contactInfo);

                updater.updateDatasetInfo(datasetInfo);

                if (!StringUtils.isBlank(workingCopyDatasetId)) {
                    workflower.publish(datasetInfo.getId());
                    workingCopyDatasetId = null;
                }
                noExceptions = true;
            }
        } catch (SodaError sodaError) {
            runErrorMessage = sodaError.getMessage();
            runStatus = JobStatus.PUBLISH_ERROR;
        } catch (InterruptedException intrruptException) {
            runErrorMessage = intrruptException.getMessage();
            runStatus = JobStatus.PUBLISH_ERROR;
        } catch (Exception other) {
            runErrorMessage = other.toString() + ": " + other.getMessage() + " \r\n "
                    + ExceptionUtils.getStackTrace(other);
            runStatus = JobStatus.PUBLISH_ERROR;
        } finally {
            try {
                if (!StringUtils.isBlank(workingCopyDatasetId)) {
                    workflower.publish(workingCopyDatasetId);
                }
            } catch (Exception e) {
                runErrorMessage += " | Unable to publish dataset after updates";
                runStatus = JobStatus.PUBLISH_ERROR;
            }
        }
    }

    String adminEmail = userPrefs.getAdminEmail();
    String logDatasetID = userPrefs.getLogDatasetID();
    JobStatus logStatus = JobStatus.SUCCESS;
    if (!logDatasetID.equals("")) {
        if (runErrorMessage != null)
            runStatus.setMessage(runErrorMessage);
        logStatus = addLogEntry(logDatasetID, connectionInfo, this, runStatus);
    }
    //Send email if there was an error updating log or target dataset
    if (userPrefs.emailUponError() && !adminEmail.equals("")) {
        String errorEmailMessage = "";
        String urlToLogDataset = connectionInfo.getUrl() + "/d/" + logDatasetID;
        if (runStatus.isError()) {
            errorEmailMessage += "There was an error updating dataset metadata.\n" + "\nDataset: "
                    + connectionInfo.getUrl() + "/d/" + getDatasetID() + "\nJob File: " + pathToSavedJobFile
                    + "\nError message: " + runErrorMessage + "\nLog dataset: " + urlToLogDataset + "\n\n";
        }
        if (logStatus.isError()) {
            errorEmailMessage += "There was an error updating the log dataset: " + urlToLogDataset + "\n"
                    + "Error message: " + logStatus.getMessage() + "\n\n";
        }
        if (runStatus.isError() || logStatus.isError()) {
            try {
                SMTPMailer.send(adminEmail, "Socrata DataSync Error", errorEmailMessage);
            } catch (Exception e) {
                System.out.println("Error sending email to: " + adminEmail + "\n" + e.getMessage());
            }
        }
    }

    // IMPORTANT because setMessage from Logging dataset interferes with enum
    if (runErrorMessage != null)
        runStatus.setMessage(runErrorMessage);

    return runStatus;
}

From source file:me.mast3rplan.phantombot.cache.FollowersCache.java

@Override
@SuppressWarnings("SleepWhileInLoop")
public void run() {
    try {/*from   www  .ja v  a 2  s  .c  o m*/
        Thread.sleep(30 * 1000);
    } catch (InterruptedException e) {
        com.gmt2001.Console.out.println(
                "FollowersCache.run>>Failed to initial sleep: [InterruptedException] " + e.getMessage());
        com.gmt2001.Console.err.logStackTrace(e);
    }

    try {
        try {
            quickUpdate(channel);
        } catch (Exception e) {
            if (e.getMessage().startsWith("[SocketTimeoutException]")
                    || e.getMessage().startsWith("[IOException]")) {
                Calendar c = Calendar.getInstance();

                if (lastFail.after(new Date())) {
                    numfail++;
                } else {
                    numfail = 1;
                }

                c.add(Calendar.MINUTE, 1);

                lastFail = c.getTime();

                if (numfail >= 5) {
                    timeoutExpire = c.getTime();
                }
            }

            com.gmt2001.Console.out
                    .println("FollowersCache.run>>Failed to update followers: " + e.getMessage());
            com.gmt2001.Console.err.logStackTrace(e);
        }
    } catch (Exception e) {
        com.gmt2001.Console.err.printStackTrace(e);
    }

    EventBus.instance()
            .post(new TwitchFollowsInitializedEvent(PhantomBot.instance().getChannel("#" + this.channel)));

    while (!killed) {
        try {
            try {
                if (new Date().after(timeoutExpire)) {
                    /*
                     * int newCount =
                     */
                    quickUpdate(channel);

                    /*
                     * if (new Date().after(timeoutExpire) &&
                     * (Math.abs(newCount - count) > 30 || firstUpdate ||
                     * new Date().after(nextFull))) {
                     * this.updateCache(newCount); }
                     */

                    /*
                     * if (firstUpdate) { firstUpdate = false;
                     * EventBus.instance().post(new
                     * TwitchFollowsInitializedEvent(PhantomBot.instance().getChannel(this.channel)));
                     * }
                     */
                }
            } catch (Exception e) {
                if (e.getMessage().startsWith("[SocketTimeoutException]")
                        || e.getMessage().startsWith("[IOException]")) {
                    Calendar c = Calendar.getInstance();

                    if (lastFail.after(new Date())) {
                        numfail++;
                    } else {
                        numfail = 1;
                    }

                    c.add(Calendar.MINUTE, 1);

                    lastFail = c.getTime();

                    if (numfail >= 5) {
                        timeoutExpire = c.getTime();
                    }
                }

                com.gmt2001.Console.out
                        .println("FollowersCache.run>>Failed to update followers: " + e.getMessage());
                com.gmt2001.Console.err.logStackTrace(e);
            }
        } catch (Exception e) {
            com.gmt2001.Console.err.printStackTrace(e);
        }

        try {
            Thread.sleep(30 * 1000);
        } catch (InterruptedException e) {
            com.gmt2001.Console.out
                    .println("FollowersCache.run>>Failed to sleep: [InterruptedException] " + e.getMessage());
            com.gmt2001.Console.err.logStackTrace(e);
        }
    }
}

From source file:com.mgmtp.jfunk.core.mail.MailAccountManager.java

private MailAccount reserveAvailableMailAccount(final String accountReservationKey,
        final List<MailAccount> addressPool) {
    checkNotNull(accountReservationKey, "'accountReservationKey' must not be null");
    checkNotNull(addressPool, "'addressPool' must not be null");
    checkState(emailAddressPools.keySet().size() > 0, "'addressPool' must not be empty.");

    lock.lock();/*  www . j  a  v  a  2 s . c o  m*/
    try {
        while (true) {
            String fixedAccountId = config.get("mail." + accountReservationKey + ".accountId");

            MailAccount account = lookupUsedMailAccountForCurrentThread(accountReservationKey);
            if (!isNullOrEmpty(fixedAccountId)) {
                if (account != null) {
                    checkState(account.getAccountId().equals(fixedAccountId),
                            "Fixed configured mail account does not match that already reserved (configured=%s, reserved=%s)",
                            fixedAccountId, account.getAccountId());
                } else {
                    account = lookupMailAccount(fixedAccountId);
                    if (isReserved(account)) {
                        // Already reserved by another thread, so we cannot use it.
                        // Set it to null in order to trigger the wait below.
                        account = null;
                    }
                }
            } else {
                checkState(!addressPool.isEmpty(),
                        "No fixed e-mail account configured and specified pool is empty.");

                boolean subaddressing = config.getBoolean(EmailConstants.MAIL_SUBADDRESSING);
                if (subaddressing) {
                    checkState(addressPool.size() == 1,
                            "Mail subaddressing is active, specified pool must contain only one e-mail account.");
                }
                if (account != null) {
                    checkState(addressPool.contains(account), "Account '%s' is already reserved under key: %s",
                            account, accountReservationKey);
                    log.info("Using already reserved e-mail account: {}", account.getAccountId());
                    return account;
                }

                if (subaddressing) {
                    MailAccount acc = addressPool.get(0);
                    MailAuthenticator auth = (MailAuthenticator) acc.getAuthenticator();

                    StringBuilder uniqueAddress = new StringBuilder();
                    uniqueAddress.append(StringUtils.substringBefore(acc.getAddress(), "@"));
                    uniqueAddress.append("+");
                    uniqueAddress.append(UUID.randomUUID());
                    uniqueAddress.append("@");
                    uniqueAddress.append(StringUtils.substringAfter(acc.getAddress(), "@"));

                    account = new MailAccount(acc.getAccountId(), uniqueAddress.toString(), auth.getUser(),
                            auth.getPassword());
                } else {
                    // Try to find a free account.
                    for (MailAccount acc : addressPool) {
                        ThreadReservationKeyWrapper wrapper = usedAccounts.get(acc);
                        if (wrapper == null) {
                            account = acc;
                            break;
                        }
                    }
                }
            }

            if (account == null) {
                // No free account available. We wait and then start over with the loop.
                log.info("No free e-mail account available. Waiting...");
                condition.await();
            } else {
                // We've found a free account and return it.
                String accountId = account.getAccountId();
                log.info("Found free e-mail account={} with address={}", accountId, account.getAddress());

                usedAccounts.put(account,
                        new ThreadReservationKeyWrapper(Thread.currentThread(), accountReservationKey));

                // post account reservation
                eventBusProvider.get().post(new MailAccountReservationEvent(accountReservationKey, account));

                return account;
            }
        }
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        throw new JFunkException(ex.getMessage(), ex);
    } finally {
        lock.unlock();
    }
}

From source file:gov.us.fhim.ui.actions.ImportSpreadsheet.java

/**
 * @see IActionDelegate#run(IAction)/*from w  w  w. java  2  s  .c  o  m*/
 */
public void run(IAction action) {

    ProgressMonitorDialog progressDialog = new ProgressMonitorDialog(shell);

    ObjectPluginAction opa = (ObjectPluginAction) action;

    final TreeSelection selection = (TreeSelection) opa.getSelection();

    final String ActionTitle = "Import Terminology";

    final FileDialog fdlg = new FileDialog(shell, SWT.SINGLE);

    fdlg.setText("Select Terminology Source File");

    fdlg.setFilterNames(FILTER_NAMES);
    fdlg.setFilterExtensions(FILTER_EXTS);

    IRunnableWithProgress runnableWithProgress = new IRunnableWithProgress() {

        public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException {

            IWorkspaceRoot myWorkspaceRoot = ResourcesPlugin.getWorkspace().getRoot();

            File f = (File) selection.getFirstElement();

            String umlPath = myWorkspaceRoot.getLocation().toOSString() + f.getFullPath().toOSString();

            try {
                importMapping(monitor, umlPath, fdlg);
            } catch (Exception e) {
                e.printStackTrace();
            }

            try {
                myWorkspaceRoot.refreshLocal(IResource.DEPTH_INFINITE, null);
            } catch (CoreException e) {
            }

            if (monitor.isCanceled()) {
                monitor.done();
                return;
            }

            monitor.done();

        }

    };

    try {
        if (fdlg.open() != null) {
            progressDialog.run(false, true, runnableWithProgress);
            MetricsDialog dlg = new MetricsDialog(shell);
            dlg.create();
            dlg.open();
        }

    } catch (InvocationTargetException invocationTargetException) {
        MessageDialog.openError(shell, ActionTitle,
                "Error Processing Export " + invocationTargetException.getMessage());

    } catch (InterruptedException interruptedException) {
        MessageDialog.openError(shell, ActionTitle,
                "Error Processing Export " + interruptedException.getMessage());

    } finally {
        progressDialog.close();

    }
}