Example usage for java.util.concurrent TimeoutException TimeoutException

List of usage examples for java.util.concurrent TimeoutException TimeoutException

Introduction

In this page you can find the example usage for java.util.concurrent TimeoutException TimeoutException.

Prototype

public TimeoutException(String message) 

Source Link

Document

Constructs a TimeoutException with the specified detail message.

Usage

From source file:ch.ivyteam.ivy.maven.engine.EngineControl.java

private static long waitFor(Supplier<Boolean> condition, long duration, TimeUnit unit) throws Exception {
    StopWatch watch = new StopWatch();
    watch.start();// w w  w  .  j ava 2s . c om
    long timeout = unit.toMillis(duration);

    while (!condition.get()) {
        Thread.sleep(1_000);
        if (watch.getTime() > timeout) {
            throw new TimeoutException("Condition not reached in " + duration + " " + unit);
        }
    }

    return watch.getTime();
}

From source file:io.github.retz.web.ClientHelper.java

public static Job waitForStart(Job job, Client c, Callable<Boolean> terminate)
        throws IOException, TimeoutException {
    Job current = job;/*from www  . j  a  va2 s . com*/
    int interval = INITAL_INTERVAL_MSEC;
    while (current.state() == Job.JobState.QUEUED) {
        maybeSleep(interval);
        interval = Math.min(interval * 2, MAX_INTERVAL_MSEC);

        try {
            if (terminate != null && terminate.call()) {
                throw new TimeoutException("Timeout at waitForStart");
            }
        } catch (TimeoutException e) {
            throw e;
        } catch (Exception e) {
            LOG.error(e.toString(), e);
            return null; // I don't know how to handle it
        }

        Response res = c.getJob(job.id());
        if (res instanceof GetJobResponse) {
            GetJobResponse getJobResponse = (GetJobResponse) res;
            if (getJobResponse.job().isPresent()) {
                current = getJobResponse.job().get();
                continue;
            }
        } else {
            LOG.error(res.status());
            throw new IOException(res.status());
        }
    }
    return current;
}

From source file:org.apache.marmotta.platform.sparql.services.sparql.SparqlServiceImpl.java

@Override
@Deprecated/*  w  w w .  j av a2  s.  c om*/
public void query(final QueryLanguage queryLanguage, final String query,
        final TupleQueryResultWriter tupleWriter, final BooleanQueryResultWriter booleanWriter,
        final SPARQLGraphResultWriter graphWriter, int timeoutInSeconds)
        throws MarmottaException, MalformedQueryException, QueryEvaluationException, TimeoutException {

    log.debug("executing SPARQL query:\n{}", query);

    Future<Boolean> future = executorService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            long start = System.currentTimeMillis();
            try {
                RepositoryConnection connection = sesameService.getConnection();
                try {
                    connection.begin();
                    Query sparqlQuery = connection.prepareQuery(queryLanguage, query,
                            configurationService.getBaseUri());

                    if (sparqlQuery instanceof TupleQuery) {
                        query((TupleQuery) sparqlQuery, tupleWriter);
                    } else if (sparqlQuery instanceof BooleanQuery) {
                        query((BooleanQuery) sparqlQuery, booleanWriter);
                    } else if (sparqlQuery instanceof GraphQuery) {
                        query((GraphQuery) sparqlQuery, graphWriter.getOutputStream(), graphWriter.getFormat());
                    } else {
                        connection.rollback();
                        throw new InvalidArgumentException(
                                "SPARQL query type " + sparqlQuery.getClass() + " not supported!");
                    }

                    connection.commit();
                } catch (Exception ex) {
                    connection.rollback();
                    throw ex;
                } finally {
                    connection.close();
                }
            } catch (RepositoryException e) {
                log.error("error while getting repository connection: {}", e);
                throw new MarmottaException("error while getting repository connection", e);
            } catch (QueryEvaluationException e) {
                log.error("error while evaluating query: {}", e.getMessage());
                throw new MarmottaException("error while writing query result in format ", e);
            }

            log.debug("SPARQL execution took {}ms", System.currentTimeMillis() - start);

            return Boolean.TRUE;
        }
    });

    try {
        future.get(timeoutInSeconds, TimeUnit.SECONDS);
    } catch (InterruptedException | TimeoutException e) {
        log.info("SPARQL query execution aborted due to timeout");
        future.cancel(true);
        throw new TimeoutException("SPARQL query execution aborted due to timeout ("
                + configurationService.getIntConfiguration("sparql.timeout", 60) + "s)");
    } catch (ExecutionException e) {
        log.info("SPARQL query execution aborted due to exception");
        log.debug("exception details", e);
        if (e.getCause() instanceof MarmottaException) {
            throw (MarmottaException) e.getCause();
        } else if (e.getCause() instanceof MalformedQueryException) {
            throw (MalformedQueryException) e.getCause();
        } else {
            throw new MarmottaException("unknown exception while evaluating SPARQL query", e.getCause());
        }
    }
}

From source file:edu.biu.scapi.comm.twoPartyComm.QueueCommunicationSetup.java

@Override
public Map<String, Channel> prepareForCommunication(String[] connectionsIds, long timeOut)
        throws TimeoutException {
    //Start the watch dog with the given timeout.
    watchdog = new Watchdog(timeOut);
    //Add this instance as the observer in order to receive the event of time out.
    watchdog.addTimeoutObserver(this);
    watchdog.start();//from   w w  w .j  a v  a 2  s .c o m

    //Create a map to hold each created channel.
    Map<String, Channel> connectedChannels = new HashMap<String, Channel>();

    //For each connection between the two parties, create a Queue channel.
    int size = connectionsIds.length;
    for (int i = 0; i < size && !bTimedOut; i++) {
        QueueChannel channel = new QueueChannel(me, other, connection, connectionsIds[i], destroyer);
        //put the created channel in the map.
        connectedChannels.put(connectionsIds[i], channel);
    }

    watchdog.stop();

    if (bTimedOut) {
        Object[] channels = connectedChannels.values().toArray();
        int len = channels.length;
        for (int i = 0; i < len; i++) {
            ((Channel) channels[i]).close();
        }
        throw new TimeoutException("timeout has occurred");
    }

    return connectedChannels;
}

From source file:org.apache.hive.hcatalog.templeton.JobRequestExecutor.java

public T execute(JobCallable<T> jobExecuteCallable)
        throws InterruptedException, TimeoutException, TooManyRequestsException, ExecutionException {
    /*//from   w  w  w.  j  a v  a 2  s. c o m
     * The callable shouldn't be null to execute. The thread pool also should be configured
     * to execute requests.
     */
    assert (jobExecuteCallable != null);
    assert (this.jobExecutePool != null);

    String type = this.requestType.toString().toLowerCase();

    String retryMessageForConcurrentRequests = "Please wait for some time before retrying "
            + "the operation. Please refer to the config " + concurrentRequestsConfigName
            + " to configure concurrent requests.";

    LOG.debug("Starting new " + type + " job request with time out " + this.requestExecutionTimeoutInSec
            + "seconds.");
    Future<T> future = null;

    try {
        future = this.jobExecutePool.submit(jobExecuteCallable);
    } catch (RejectedExecutionException rejectedException) {
        /*
         * Not able to find thread to execute the job request. Raise Busy exception and client
         * can retry the operation.
         */
        String tooManyRequestsExceptionMessage = "Unable to service the " + type + " job request as "
                + "templeton service is busy with too many " + type + " job requests. "
                + retryMessageForConcurrentRequests;

        LOG.warn(tooManyRequestsExceptionMessage);
        throw new TooManyRequestsException(tooManyRequestsExceptionMessage);
    }

    T result = null;

    try {
        result = this.requestExecutionTimeoutInSec > 0
                ? future.get(this.requestExecutionTimeoutInSec, TimeUnit.SECONDS)
                : future.get();
    } catch (TimeoutException e) {
        /*
         * See if the execution thread has just completed operation and result is available.
         * If result is available then return the result. Otherwise, raise exception.
         */
        if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
            String message = this.requestType + " job request got timed out. Please wait for some time "
                    + "before retrying the operation. Please refer to the config " + jobTimeoutConfigName
                    + " to configure job request time out.";
            LOG.warn(message);

            /*
             * Throw TimeoutException to caller.
             */
            throw new TimeoutException(message);
        }
    } catch (InterruptedException e) {
        /*
         * See if the execution thread has just completed operation and result is available.
         * If result is available then return the result. Otherwise, raise exception.
         */
        if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
            String message = this.requestType + " job request got interrupted. Please wait for some time "
                    + "before retrying the operation.";
            LOG.warn(message);

            /*
             * Throw TimeoutException to caller.
             */
            throw new InterruptedException(message);
        }
    } catch (CancellationException e) {
        /*
         * See if the execution thread has just completed operation and result is available.
         * If result is available then return the result. Otherwise, raise exception.
         */
        if ((result = tryGetJobResultOrSetJobStateFailed(jobExecuteCallable)) == null) {
            String message = this.requestType + " job request got cancelled and thread got interrupted. "
                    + "Please wait for some time before retrying the operation.";
            LOG.warn(message);

            throw new InterruptedException(message);
        }
    } finally {
        /*
         * If the thread is still active and needs to be cancelled then cancel it. This may
         * happen in case task got interrupted, or timed out.
         */
        if (enableCancelTask) {
            cancelExecutePoolThread(future);
        }
    }

    LOG.debug("Completed " + type + " job request.");

    return result;
}

From source file:org.cloudifysource.utilitydomain.context.blockstorage.VolumeUtils.java

private static String executeSilentCommandLineReturnOutput(final String commandLine, final long timeout)
        throws LocalStorageOperationException, TimeoutException {

    Executor executor = new DefaultExecutor();
    executor.setExitValue(0);/*from  w w  w . j a  v  a  2  s.co m*/
    ExecuteWatchdog watchdog = new ExecuteWatchdog(timeout);
    executor.setWatchdog(watchdog);
    ProcessOutputStream outAndErr = new ProcessOutputStream();
    try {
        PumpStreamHandler streamHandler = new PumpStreamHandler(outAndErr);
        executor.setStreamHandler(streamHandler);
        executor.execute(CommandLine.parse(commandLine));
    } catch (final Exception e) {
        if (watchdog.killedProcess()) {
            throw new TimeoutException("Timed out while executing commandLine : '" + commandLine + "'");
        }

        throw new LocalStorageOperationException("Failed executing commandLine : '" + commandLine
                + ". Process output was : " + outAndErr.getOutput(), e);
    }

    return outAndErr.getOutput();
}

From source file:org.jenkinsci.plugins.os_ci.model.Product.java

public boolean deploy(NexusClient nexusClient, OpenStackClient openStackClient,
        YumRepoParameters yumRepoParameters, DeployParmeters deployParmeters) throws Exception {
    final String targetFolder = Joiner.on(File.separator).join(build.getWorkspace().getRemote(), "archive");

    LogUtils.logSection(listener, "Deploy Product " + artifact.getArtifactId());

    List<String> fileTypes = new ArrayList<String>();
    fileTypes.add("pom");
    fileTypes.add("tar.gz");
    fileTypes.add("rpm");
    fileTypes.add("rh6");
    nexusClient.downloadProductArtifacts(getArtifact(),
            Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId()), fileTypes);
    //****************** get pom file dependencies ******************
    MavenPom mavenPom = new MavenPom(
            new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "pom.xml")));
    List<ArtifactParameters> subProducts = mavenPom.getPomProductDependencies();

    for (ArtifactParameters m : subProducts) {
        Product p = new Product(m, build, listener);
        boolean return_ = p.deploy(nexusClient, openStackClient, yumRepoParameters, deployParmeters);
        if (!return_)
            throw new ProductDeployPluginException(
                    "Deploy dependent product " + m.getArtifactId() + " failed.");
    }//from w w w . j a  v a  2s  .c o m

    String buildId = Joiner.on("-").join(new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss-SS").format(build.getTime()),
            deployParmeters.getDeployCounter());

    if (!new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(),
            "external_dependencies.tar.gz")).exists()) {
        // if there isn't an external_dependencies.tar.gz file
        // only a pom file => we're deploying a profile
        LogUtils.log(listener, "Finish Deploy: " + artifact.getArtifactId());
        return true;
    }

    CompressUtils.untarFile(new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(),
            "external_dependencies.tar.gz")));
    LogUtils.log(listener, "Untar File: " + Joiner.on(File.separator).join(targetFolder,
            artifact.getArtifactId(), "external_dependencies.tar.gz"));

    //****************** move deployment-scripts ******************
    // Push scripts to YUM repo machine
    copyFolderToRepoMachine(build, listener, yumRepoParameters,
            Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "archive", "deploy-scripts"),
            Joiner.on("/").join("/var", "www", "html", "build", buildId));
    LogUtils.log(listener, "Copy deployment-scripts folder to Yum Repo machine.");

    // move external rpms from archive/product/archive/rpms  to /archive/repo folder

    if (new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "archive", "repo"))
            .exists()) {
        moveExternalRpmsToRepoDirectory(
                Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "archive", "repo"),
                Joiner.on(File.separator).join(targetFolder, "repo"));
        LogUtils.log(listener, "Copied external RPMS to repo directory.");
    }

    File deploymentScriptsRPM = new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(),
            artifact.getArtifactId() + "-" + artifact.getVersion() + ".rpm"));
    if (deploymentScriptsRPM.exists()) {
        //            Rename RPM file according to rpm metadata
        if (!System.getProperty("os.name").toLowerCase().startsWith("windows")) {
            ExecUtils.executeLocalCommand(
                    "/usr/local/bin/download_rpm.sh " + deploymentScriptsRPM.getPath().replaceAll(" ", "\\ "),
                    deploymentScriptsRPM.getParentFile().getPath().replaceAll(" ", "\\ "));
            deploymentScriptsRPM.delete();
            deploymentScriptsRPM = new File(Joiner.on(File.separator).join(targetFolder,
                    artifact.getArtifactId(), "nds_" + artifact.getArtifactId() + "_deployment-scripts" + "-"
                            + artifact.getVersion() + "_1.noarch.rpm"));
        }

        LogUtils.log(listener, deploymentScriptsRPM.getParentFile().list().toString());
        FileUtils.moveFileToDirectory(deploymentScriptsRPM,
                new File(Joiner.on(File.separator).join(targetFolder, "repo")), true);
        LogUtils.log(listener, "Copied deployment-scripts to repo directory");
    }
    MavenPom mp = new MavenPom(
            new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "pom.xml")));
    List<ArtifactParameters> rpms = mp.getPomModuleDependencies();

    // download rpms to archive/repo
    LogUtils.logSection(listener, "Download dependent RPMS.");
    for (ArtifactParameters m : rpms) {
        new NexusClient(m, build, listener).downloadRPM(Joiner.on(File.separator).join(targetFolder, "repo"));
    }

    // create yum repo
    createAndMoveYumRepo(build, listener, yumRepoParameters,
            Joiner.on(File.separator).join(targetFolder, "repo"),
            String.valueOf(deployParmeters.getDeployCounter()));
    LogUtils.log(listener, "YUM repository have been created.");

    // deploy stack
    String stackName = artifact.getArtifactId().toLowerCase().replace("-product", "");

    openStackClient.createStack(stackName, deployParmeters.getOverridingParameters(),
            deployParmeters.getGlobalOutputs(), Joiner.on(File.separator).join(targetFolder,
                    artifact.getArtifactId(), "archive", "heat", stackName));

    long startTime = System.currentTimeMillis();
    boolean createComplete = false;

    while (!createComplete && System.currentTimeMillis() - startTime < CREATE_TIMEOUT) {
        StackStatus stackStatus = openStackClient.getStackStatus(stackName);
        LogUtils.log(listener, "Waiting for stack creation for " + stackName + ". Status is: " + stackStatus);
        if (stackStatus == StackStatus.CREATE_COMPLETE) {
            createComplete = true;
            // update outputs map
            Map<String, String> stackOutputs = openStackClient.getStackOutputs(stackName);
            deployParmeters.setGlobalOutputsWithNewOutputs(stackOutputs);
        } else if (stackStatus == StackStatus.FAILED || stackStatus == StackStatus.CREATE_FAILED
                || stackStatus == StackStatus.UNDEFINED)
            throw new ProductDeployPluginException("Failed to Launch Stack " + stackName);
        else
            Thread.sleep(SLEEP_TIME);

    }
    // if stack is not complete after 40 minutes -  throw a timeout exception
    if (!createComplete)
        throw new TimeoutException("Create Stack- timeout exception");

    // clean files
    try {
        FileUtils.cleanDirectory(new File(Joiner.on(File.separator).join(targetFolder, "repo")));
    } catch (IOException e) {
        /*Swallow*/ }

    deployParmeters.increaseDeployCounter();
    LogUtils.log(listener, "Increased deployment counter.");

    return true;

}

From source file:org.construct_infrastructure.io.MessageReader.java

/**
 * Returns the next message available. Will block if no messages are avaialable.
 * /* www.j a  v a 2 s.  c  o m*/
 * @return the next message available.
 * @throws InterruptedException
 *            if the message reader is instructed to close.
 */
public Message getMessage() throws InterruptedException, TimeoutException {
    while (!available()) {
        synchronized (this) {
            wait(ONE_HUNDRED);
        }
        if (!my_keepReading) {
            throw new InterruptedException("MessageReader instructed to close");
        }
        // System.err.println(this + " - " + (my_timeout - splitTime));
        if (hasTimedOut()) {
            my_errorOccured = true;
            throw new TimeoutException("Socket Timeout Occured (" + my_timeout + ")");
        }
    }
    // Remove the message at the head of the list and return
    final Message message = (Message) my_messageList.get(0);
    my_messageList.remove(0);
    my_stopwatch.stop();
    my_stopwatch.reset();
    my_stopwatch.start();
    return message;
}

From source file:org.opensilk.video.data.DataService.java

/**
 * This observable completes ofter one item
 *//*w  w w .  ja va2  s .c om*/
public Observable<VideoFileInfo> getVideoFileInfo(final MediaBrowser.MediaItem mediaItem) {
    final Uri mediaUri = MediaDescriptionUtil.getMediaUri(mediaItem.getDescription());
    final MediaDescription description = mediaItem.getDescription();
    return Observable.<VideoFileInfo.Builder, Media>using(() -> {
        return new Media(mVlcInstance.get(), mediaUri);
    }, media -> {
        return Observable.create(subscriber -> {
            final Subscription timeout = Observable.timer(30, TimeUnit.SECONDS).subscribe(l -> {
                if (!subscriber.isUnsubscribed()) {
                    subscriber.onError(new TimeoutException("Took too long to parse"));
                }
            });
            subscriber.add(timeout);
            media.setEventListener(event -> {
                timeout.unsubscribe(); //cancel error task
                switch (event.type) {
                case Media.Event.ParsedChanged: {
                    VideoFileInfo.Builder bob = VideoFileInfo.builder(mediaUri)
                            .setTitle(MediaDescriptionUtil.getMediaTitle(description))
                            .setDuration(media.getDuration());
                    for (int ii = 0; ii < media.getTrackCount(); ii++) {
                        Media.Track track = media.getTrack(ii);
                        if (track == null) {
                            continue;
                        }
                        switch (track.type) {
                        case Media.Track.Type.Audio: {
                            Media.AudioTrack audioTrack = (Media.AudioTrack) track;
                            bob.addAudioTrack(audioTrack.codec, audioTrack.bitrate, audioTrack.rate,
                                    audioTrack.channels);
                            break;
                        }
                        case Media.Track.Type.Video: {
                            Media.VideoTrack videoTrack = (Media.VideoTrack) track;
                            bob.addVideoTrack(videoTrack.codec, videoTrack.width, videoTrack.height,
                                    videoTrack.bitrate, videoTrack.frameRateNum, videoTrack.frameRateDen);
                            break;
                        }
                        case Media.Track.Type.Text: {
                            //TODO
                            break;
                        }
                        }
                    }
                    if (subscriber.isUnsubscribed()) {
                        return;
                    }
                    subscriber.onNext(bob);
                    subscriber.onCompleted();
                    break;
                }
                default: {
                    if (subscriber.isUnsubscribed()) {
                        return;
                    }
                    subscriber.onError(new Exception("unexpected event"));
                    break;
                }
                }
            });
            //TODO parse() doesn't synchronously parse
            media.parseAsync(Media.Parse.ParseNetwork);
        });
    }, media -> {
        media.setEventListener(null);
        media.release();
    }).zipWith(getFileSize(mediaItem).toObservable(), (builder, size) -> {
        return builder.setSize(size).build();
    }); //no observeon as media callback is posted to main thread.
}

From source file:com.alu.e3.logger.LogCollector.java

/**
 * Traverse the topology and collect logs from each instance found.
 * /*  w  ww . ja  v  a 2  s .  c o  m*/
 * @param waitTimeout   The time to wait if another collector is running
 * @param unit   The time unit for waitTimeout
 * @return The number of bytes collected in all logs
 * @throws InterruptedException
 * @throws TimeoutException
 * @throws NonExistingManagerException 
 */
public long collectAllLogs(long waitTimeout, TimeUnit unit)
        throws InterruptedException, TimeoutException, NonExistingManagerException {
    DateFormat dateFormat = new SimpleDateFormat("yyyy.MM.dd 'at' HH:mm:ss z");
    String launchDate = dateFormat.format(new Date());
    logger.debug("Launching system-manager log-file collection ({}) ...", launchDate);

    if (!LogCollector.writerLock.tryLock(waitTimeout, unit)) {
        logger.warn(
                "Attempt to run log collector but cannot acquire lock (another collection must be running)");
        throw new TimeoutException("Timeout waiting to acquire log collector write lock");
    }
    long collectionCount = 0L;
    logger.debug("LogCollector ID: {}", getCollectorID());

    try {
        Set<String> visitedIPs = new HashSet<String>();

        // Create the top-level log collection directory, if this is the first time run
        File collectionDir = new File(COLLECTION_PATH);
        if (!collectionDir.exists() && !collectionDir.mkdirs()) {
            logger.error("Unable to create log-collection directory: {}", COLLECTION_PATH);
            return 0L;
        }

        // Iterate through all instances in the current topology         
        Instance logCollectorInstance = Utilities.getManagerByIP(CommonTools.getLocalHostname(),
                CommonTools.getLocalAddress(), topology.getInstancesByType(E3Constant.E3MANAGER), logger);
        List<Instance> instances = getInstanceList(this.topology);
        if (logger.isDebugEnabled()) {
            logger.debug("There are {} instances in the current topology", instances.size());
        }
        for (Instance logSource : instances) {
            LogCollector.logInstance(logSource); // for debugging

            // Avoid visiting the same address twice
            String ipAddress = logSource.getInternalIP();
            if (ipAddress == null) {
                logger.warn("Encountered instance node with null ipAddress during log collection!");
                continue;
            }
            if (CommonTools.isLocal(ipAddress)) {
                ipAddress = E3Constant.localhost; // stay consistent
            }
            if (visitedIPs.contains(ipAddress)) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Skipping already-visited address: {}", ipAddress);
                }
                continue;
            }
            visitedIPs.add(ipAddress);

            // Create or verify the existence of a log-collection target directory
            String sanitizedHost = ipToCollectionDirectory(ipAddress);
            File instanceCollectionDir = new File(COLLECTION_PATH, sanitizedHost);
            if (instanceCollectionDir.exists()) {
                if (!instanceCollectionDir.isDirectory()) {
                    logger.error("Log-collection target exists but is not a directory: {}",
                            instanceCollectionDir.getAbsolutePath());
                    continue;
                }
            } else {
                if (!instanceCollectionDir.mkdirs()) {
                    logger.error("Unable to create log-collection directory: {}",
                            instanceCollectionDir.getAbsolutePath());
                    continue;
                }
            }

            // Finally, perform log collection
            // There may be a chance for parallelism here by farming the collection work for each instance
            // out to a separate worker thread.  At a minimum the local collection could occur in parallel with
            // collection on a remote host.
            if (ipAddress.equalsIgnoreCase(E3Constant.localhost)) {
                try {
                    collectionCount += collectAllLocalLogs(instanceCollectionDir);
                } catch (IOException ex) {
                    logger.warn("Error trying to copy local log files to {}",
                            instanceCollectionDir.getAbsolutePath());
                }
            } else {
                try {
                    collectionCount += collectAllRemoteLogs(logSource, logCollectorInstance,
                            instanceCollectionDir);
                } catch (JSchException ex) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Could not connect to host: {}", logSource.getInternalIP());
                        logger.debug(ex.getLocalizedMessage());
                    }
                } catch (IOException ex) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Got IOException while connecting to or transferring files from host: {}",
                                logSource.getInternalIP());
                        logger.debug(ex.getLocalizedMessage());
                    }
                }
            }
            // At this point the collection has "completed", even if IOExceptions could have
            // occurred and been caught above
            LogCollector.lastCompletedCollector.set(getCollectorID());
            logger.debug("Completed log collection with ID: {} ({})", getCollectorID(),
                    dateFormat.format(new Date()));
        }
    } finally {
        LogCollector.writerLock.unlock();
    }
    return collectionCount;
}