Example usage for java.util.concurrent ExecutorCompletionService submit

List of usage examples for java.util.concurrent ExecutorCompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService submit.

Prototype

public Future<V> submit(Callable<V> task) 

Source Link

Usage

From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrServer.java

/**
 * Tries to query a live server from the list provided in Req. Servers in the
 * dead pool are skipped. If a request fails due to an IOException, the server
 * is moved to the dead pool for a certain period of time, or until a test
 * request on that server succeeds./*from w w  w . ja  v  a 2 s  .c o m*/
 *
 * If a request takes longer than backUpRequestDelay the request will be sent
 * to the next server in the list, this will continue until there is a
 * response, the server list is exhausted or the number of requests in flight
 * equals maximumConcurrentRequests.
 *
 * Servers are queried in the exact order given (except servers currently in
 * the dead pool are skipped). If no live servers from the provided list
 * remain to be tried, a number of previously skipped dead servers will be
 * tried. Req.getNumDeadServersToTry() controls how many dead servers will be
 * tried.
 *
 * If no live servers are found a SolrServerException is thrown.
 *
 * @param req
 *          contains both the request as well as the list of servers to query
 *
 * @return the result of the request
 */
@Override
public Rsp request(Req req) throws SolrServerException, IOException {
    ArrayBlockingQueue<Future<RequestTaskState>> queue = new ArrayBlockingQueue<Future<RequestTaskState>>(
            maximumConcurrentRequests + 1);
    ExecutorCompletionService<RequestTaskState> executer = new ExecutorCompletionService<RequestTaskState>(
            threadPoolExecuter, queue);
    List<ServerWrapper> skipped = new ArrayList<ServerWrapper>(req.getNumDeadServersToTry());
    int inFlight = 0;
    RequestTaskState returnedRsp = null;
    Exception ex = null;

    for (String serverStr : req.getServers()) {
        serverStr = normalize(serverStr);
        // if the server is currently a zombie, just skip to the next one
        ServerWrapper wrapper = zombieServers.get(serverStr);
        if (wrapper != null) {
            if (tryDeadServers && skipped.size() < req.getNumDeadServersToTry()) {
                skipped.add(wrapper);
            }
            continue;
        }
        HttpSolrServer server = makeServer(serverStr);
        Callable<RequestTaskState> task = createRequestTask(server, req, false);
        executer.submit(task);
        inFlight++;
        returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests);
        if (returnedRsp == null) {
            // null response signifies that the response took too long.
            log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed",
                    server.getBaseURL(), backUpRequestDelay);
            continue;
        }
        inFlight--;
        if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
            return returnedRsp.response;
        } else if (returnedRsp.stateDescription == TaskState.ServerException) {
            ex = returnedRsp.exception;
        } else if (returnedRsp.stateDescription == TaskState.RequestException) {
            throw new SolrServerException(returnedRsp.exception);
        }
    }

    // no response so try the zombie servers
    if (tryDeadServers) {
        if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
            // try the servers we previously skipped
            for (ServerWrapper wrapper : skipped) {
                Callable<RequestTaskState> task = createRequestTask(wrapper.solrServer, req, true);
                executer.submit(task);
                inFlight++;
                returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests);
                if (returnedRsp == null) {
                    log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed",
                            wrapper.getKey(), backUpRequestDelay);
                    continue;
                }
                inFlight--;
                if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                    return returnedRsp.response;
                } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                    ex = returnedRsp.exception;
                } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                    throw new SolrServerException(returnedRsp.exception);
                }
            }
        }
    }

    // All current attempts could be slower than backUpRequestPause or returned
    // response could be from struggling server
    // so we need to wait until we get a good response or tasks all are
    // exhausted.
    if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
        while (inFlight > 0) {
            returnedRsp = getResponseIfReady(executer, true);
            inFlight--;
            if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                return returnedRsp.response;
            } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                ex = returnedRsp.exception;
            } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                throw new SolrServerException(returnedRsp.exception);
            }
        }
    }

    if (ex == null) {
        throw new SolrServerException("No live SolrServers available to handle this request");
    } else {
        throw new SolrServerException(
                "No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testKilledServerWithEnsembleProvider() throws Exception {
    final int CLIENT_QTY = 10;
    final Timing timing = new Timing();
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    TestingCluster cluster = new TestingCluster(3);
    try {/*  w w  w  .j a  va2  s .c om*/
        cluster.start();

        final AtomicReference<String> connectionString = new AtomicReference<String>(
                cluster.getConnectString());
        final EnsembleProvider provider = new EnsembleProvider() {
            @Override
            public void start() throws Exception {
            }

            @Override
            public String getConnectionString() {
                return connectionString.get();
            }

            @Override
            public void close() throws IOException {
            }
        };

        final Semaphore acquiredSemaphore = new Semaphore(0);
        final AtomicInteger acquireCount = new AtomicInteger(0);
        final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY);
        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider)
                            .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection())
                            .retryPolicy(new ExponentialBackoffRetry(100, 3)).build();
                    try {
                        final Semaphore suspendedSemaphore = new Semaphore(0);
                        client.getConnectionStateListenable().addListener(new ConnectionStateListener() {
                            @Override
                            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                                if ((newState == ConnectionState.SUSPENDED)
                                        || (newState == ConnectionState.LOST)) {
                                    suspendedLatch.countDown();
                                    suspendedSemaphore.release();
                                }
                            }
                        });

                        client.start();

                        InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1);

                        while (!Thread.currentThread().isInterrupted()) {
                            Lease lease = null;
                            try {
                                lease = semaphore.acquire();
                                acquiredSemaphore.release();
                                acquireCount.incrementAndGet();
                                suspendedSemaphore.acquire();
                            } catch (Exception e) {
                                // just retry
                            } finally {
                                if (lease != null) {
                                    acquireCount.decrementAndGet();
                                    IOUtils.closeQuietly(lease);
                                }
                            }
                        }
                    } finally {
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        Assert.assertEquals(1, acquireCount.get());

        cluster.close();
        timing.awaitLatch(suspendedLatch);
        timing.forWaiting().sleepABit();
        Assert.assertEquals(0, acquireCount.get());

        cluster = new TestingCluster(3);
        cluster.start();

        connectionString.set(cluster.getConnectString());
        timing.forWaiting().sleepABit();

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        timing.forWaiting().sleepABit();
        Assert.assertEquals(1, acquireCount.get());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.SECONDS);
        executorService.shutdownNow();
        IOUtils.closeQuietly(cluster);
    }
}

From source file:com.liferay.sync.engine.lan.session.LanSession.java

protected SyncLanClientQueryResult findSyncLanClient(SyncFile syncFile) throws Exception {

    SyncAccount syncAccount = SyncAccountService.fetchSyncAccount(syncFile.getSyncAccountId());

    List<String> syncLanClientUuids = SyncLanEndpointService
            .findSyncLanClientUuids(syncAccount.getLanServerUuid(), syncFile.getRepositoryId());

    if (syncLanClientUuids.isEmpty()) {
        return null;
    }/*from w  ww.  j  a  v a2  s .  c  o  m*/

    final List<Callable<SyncLanClientQueryResult>> syncLanClientQueryResultCallables = Collections
            .synchronizedList(new ArrayList<Callable<SyncLanClientQueryResult>>(syncLanClientUuids.size()));

    for (String syncLanClientUuid : syncLanClientUuids) {
        SyncLanClient syncLanClient = SyncLanClientService.fetchSyncLanClient(syncLanClientUuid);

        syncLanClientQueryResultCallables.add(createSyncLanClientQueryResultCallable(syncLanClient, syncFile));
    }

    int queryPoolSize = Math.min(syncLanClientUuids.size(), PropsValues.SYNC_LAN_SESSION_QUERY_POOL_MAX_SIZE);

    List<Future<SyncLanClientQueryResult>> pendingSyncLanClientQueryResults = new ArrayList<>(queryPoolSize);

    ExecutorCompletionService<SyncLanClientQueryResult> executorCompletionService = new ExecutorCompletionService<>(
            getExecutorService());

    for (int i = 0; i < queryPoolSize; i++) {
        Callable<SyncLanClientQueryResult> callable = new Callable<SyncLanClientQueryResult>() {

            @Override
            public synchronized SyncLanClientQueryResult call() throws Exception {

                if (syncLanClientQueryResultCallables.isEmpty()) {
                    return null;
                }

                Callable<SyncLanClientQueryResult> syncLanClientQueryResultCallable = syncLanClientQueryResultCallables
                        .remove(0);

                try {
                    return syncLanClientQueryResultCallable.call();
                } catch (Exception e) {
                    return call();
                }
            }

        };

        pendingSyncLanClientQueryResults.add(executorCompletionService.submit(callable));
    }

    List<Future<SyncLanClientQueryResult>> completedSyncLanClientQueryResult = new ArrayList<>(queryPoolSize);

    long timeout = PropsValues.SYNC_LAN_SESSION_QUERY_TOTAL_TIMEOUT;

    long endTime = System.currentTimeMillis() + timeout;

    for (int i = 0; i < queryPoolSize; i++) {
        Future<SyncLanClientQueryResult> future = executorCompletionService.poll(timeout,
                TimeUnit.MILLISECONDS);

        if (future == null) {
            for (Future<SyncLanClientQueryResult> pendingSyncLanClientQueryResult : pendingSyncLanClientQueryResults) {

                if (!pendingSyncLanClientQueryResult.isDone()) {
                    pendingSyncLanClientQueryResult.cancel(true);
                }
            }

            break;
        }

        completedSyncLanClientQueryResult.add(future);

        timeout = endTime - System.currentTimeMillis();
    }

    SyncLanClientQueryResult candidateSyncLanClientQueryResult = null;
    int candidateDownloadRatePerConnection = 0;

    for (Future<SyncLanClientQueryResult> completedFuture : completedSyncLanClientQueryResult) {

        SyncLanClientQueryResult syncLanClientQueryResult = null;

        try {
            syncLanClientQueryResult = completedFuture.get();
        } catch (Exception e) {
            continue;
        }

        if (syncLanClientQueryResult == null) {
            continue;
        }

        if (syncLanClientQueryResult.getConnectionsCount() >= syncLanClientQueryResult.getMaxConnections()) {

            if (candidateSyncLanClientQueryResult == null) {
                candidateSyncLanClientQueryResult = syncLanClientQueryResult;
            }

            continue;
        }

        if (syncLanClientQueryResult.getConnectionsCount() == 0) {
            return syncLanClientQueryResult;
        }

        int downloadRatePerConnection = syncLanClientQueryResult.getDownloadRate()
                / (syncLanClientQueryResult.getConnectionsCount() + 1);

        if (downloadRatePerConnection >= candidateDownloadRatePerConnection) {

            candidateDownloadRatePerConnection = downloadRatePerConnection;
            candidateSyncLanClientQueryResult = syncLanClientQueryResult;
        }
    }

    return candidateSyncLanClientQueryResult;
}

From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImplIT.java

@Test
public void testSummaryMultiThreadDedup() throws ZepException, InterruptedException, ExecutionException {
    // Attempts to create the same event from multiple threads - verifies we get the appropriate de-duping behavior
    // for the count and that we are holding the lock on the database appropriately.
    int poolSize = 10;
    final CyclicBarrier barrier = new CyclicBarrier(poolSize);
    ExecutorService executorService = Executors.newFixedThreadPool(poolSize);
    ExecutorCompletionService<String> ecs = new ExecutorCompletionService<String>(executorService);
    final Event event = EventTestUtils.createSampleEvent();
    final EventPreCreateContext context = new EventPreCreateContextImpl();
    for (int i = 0; i < poolSize; i++) {
        ecs.submit(new Callable<String>() {
            @Override//ww w  .j  av a2  s  .c om
            public String call() throws Exception {
                barrier.await();
                return eventSummaryDao.create(event, context);
            }
        });
    }
    String uuid = null;
    for (int i = 0; i < poolSize; i++) {
        String thisUuid = ecs.take().get();
        if (uuid == null) {
            assertNotNull(thisUuid);
            uuid = thisUuid;
        } else {
            assertEquals(uuid, thisUuid);
        }
    }
    // Now look up the event and make sure the count is equal to the number of submitted workers
    assertEquals(poolSize, this.eventSummaryDao.findByUuid(uuid).getCount());
}

From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java

@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Test//from   ww w. java  2  s  .com
public void testMulti() throws Exception {
    final String PATH = "/queue";
    final int CLIENT_QTY = 4;
    final int MAX_ITEMS = 10;
    final int ADD_ITEMS = MAX_ITEMS * 100;
    final int SLOP_FACTOR = 2;

    final QueueConsumer<String> consumer = new QueueConsumer<String>() {
        @Override
        public void consumeMessage(String message) throws Exception {
            Thread.sleep(10);
        }

        @Override
        public void stateChanged(CuratorFramework client, ConnectionState newState) {
        }
    };

    final Timing timing = new Timing();
    final ExecutorService executor = Executors.newCachedThreadPool();
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor);

    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    try {
        client.start();
        client.create().forPath(PATH);

        final CountDownLatch isWaitingLatch = new CountDownLatch(1);
        final AtomicBoolean isDone = new AtomicBoolean(false);
        final List<Integer> counts = new CopyOnWriteArrayList<Integer>();
        final Object lock = new Object();
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                Watcher watcher = new Watcher() {
                    @Override
                    public void process(WatchedEvent event) {
                        synchronized (lock) {
                            lock.notifyAll();
                        }
                    }
                };

                while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) {
                    synchronized (lock) {
                        int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size();
                        counts.add(size);
                        isWaitingLatch.countDown();
                        lock.wait();
                    }
                }
                return null;
            }
        });
        isWaitingLatch.await();

        for (int i = 0; i < CLIENT_QTY; ++i) {
            final int index = i;
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = null;
                    DistributedQueue<String> queue = null;

                    try {
                        client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
                                timing.connection(), new RetryOneTime(1));
                        client.start();
                        queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor)
                                .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue();
                        queue.start();

                        for (int i = 0; i < ADD_ITEMS; ++i) {
                            queue.put("" + index + "-" + i);
                        }
                    } finally {
                        IOUtils.closeQuietly(queue);
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.take().get();
        }

        isDone.set(true);
        synchronized (lock) {
            lock.notifyAll();
        }

        for (int count : counts) {
            Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR));
        }
    } finally {
        executor.shutdownNow();
        IOUtils.closeQuietly(client);
    }
}

From source file:org.ocelotds.integration.AbstractOcelotTest.java

/**
 *
 * @param <T>//from w  w  w.  j av a2 s .  c  om
 * @param nb
 * @param client
 * @param returnClass
 * @param ds
 * @param methodName
 * @param params
 * @return
 */
protected <T> Collection<T> testCallMultiMethodsInClient(int nb, final Client client,
        final Class<T> returnClass, final Class ds, final String methodName, final String... params) {
    ExecutorCompletionService<ResultMonitored<T>> executorCompletionService = new ExecutorCompletionService(
            managedExecutor);
    Collection<T> results = new ArrayList<>();
    long t0 = System.currentTimeMillis();
    for (int i = 0; i < nb; i++) {
        final int num = i;
        Callable<ResultMonitored<T>> task = new Callable() {
            @Override
            public ResultMonitored<T> call() {
                Client cl = client;
                if (cl == null) {
                    cl = getClient();
                }
                long t0 = System.currentTimeMillis();
                T result = getJava(returnClass,
                        (String) testRSCallWithoutResult(cl, ds, methodName, params).getResponse());
                ResultMonitored resultMonitored = new ResultMonitored(result, num);
                long t1 = System.currentTimeMillis();
                resultMonitored.setTime(t1 - t0);
                return resultMonitored;
            }
        };
        executorCompletionService.submit(task);
    }
    for (int i = 0; i < nb; i++) {
        try {
            Future<ResultMonitored<T>> fut = executorCompletionService.take();
            ResultMonitored<T> res = fut.get();
            //            System.out.println("Time of execution of service " + res.getNum() + ": " + res.getTime() + " ms");
            results.add(res.getResult());
        } catch (InterruptedException | ExecutionException e) {
        }
    }
    long t1 = System.currentTimeMillis();
    System.out.println("Time of execution of all services : " + (t1 - t0) + " ms");
    assertThat(results).hasSize(nb);
    return results;
}

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java

/**
 * //from  www  .j av  a2  s .c o m
 */
@SuppressWarnings("resource")
private boolean doPack(final File targetArchiveFile, List<FileData> fileDatas,
        final ArchiveRetriverCallback<FileData> callback) {
    // ?
    if (true == targetArchiveFile.exists() && false == NioUtils.delete(targetArchiveFile, 3)) {
        throw new ArchiveException(
                String.format("[%s] exist and delete failed", targetArchiveFile.getAbsolutePath()));
    }

    boolean exist = false;
    ZipOutputStream zipOut = null;
    Set<String> entryNames = new HashSet<String>();
    BlockingQueue<Future<ArchiveEntry>> queue = new LinkedBlockingQueue<Future<ArchiveEntry>>(); // ?
    ExecutorCompletionService completionService = new ExecutorCompletionService(executor, queue);

    final File targetDir = new File(targetArchiveFile.getParentFile(),
            FilenameUtils.getBaseName(targetArchiveFile.getPath()));
    try {
        // 
        FileUtils.forceMkdir(targetDir);

        zipOut = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(targetArchiveFile)));
        zipOut.setLevel(Deflater.BEST_SPEED);
        // ??
        for (final FileData fileData : fileDatas) {
            if (fileData.getEventType().isDelete()) {
                continue; // delete??
            }

            String namespace = fileData.getNameSpace();
            String path = fileData.getPath();
            boolean isLocal = StringUtils.isBlank(namespace);
            String entryName = null;
            if (true == isLocal) {
                entryName = FilenameUtils.getPath(path) + FilenameUtils.getName(path);
            } else {
                entryName = namespace + File.separator + path;
            }

            // ????
            if (entryNames.contains(entryName) == false) {
                entryNames.add(entryName);
            } else {
                continue;
            }

            final String name = entryName;
            if (true == isLocal && !useLocalFileMutliThread) {
                // ??
                queue.add(new DummyFuture(new ArchiveEntry(name, callback.retrive(fileData))));
            } else {
                completionService.submit(new Callable<ArchiveEntry>() {

                    public ArchiveEntry call() throws Exception {
                        // ??
                        InputStream input = null;
                        OutputStream output = null;
                        try {
                            input = callback.retrive(fileData);

                            if (input instanceof LazyFileInputStream) {
                                input = ((LazyFileInputStream) input).getInputSteam();// ?stream
                            }

                            if (input != null) {
                                File tmp = new File(targetDir, name);
                                NioUtils.create(tmp.getParentFile(), false, 3);// ?
                                output = new FileOutputStream(tmp);
                                NioUtils.copy(input, output);// ?
                                return new ArchiveEntry(name, new File(targetDir, name));
                            } else {
                                return new ArchiveEntry(name);
                            }
                        } finally {
                            IOUtils.closeQuietly(input);
                            IOUtils.closeQuietly(output);
                        }
                    }
                });
            }
        }

        for (int i = 0; i < entryNames.size(); i++) {
            // ?
            ArchiveEntry input = null;
            InputStream stream = null;
            try {
                input = queue.take().get();
                if (input == null) {
                    continue;
                }

                stream = input.getStream();
                if (stream == null) {
                    continue;
                }

                if (stream instanceof LazyFileInputStream) {
                    stream = ((LazyFileInputStream) stream).getInputSteam();// ?stream
                }

                exist = true;
                zipOut.putNextEntry(new ZipEntry(input.getName()));
                NioUtils.copy(stream, zipOut);// ?
                zipOut.closeEntry();
            } finally {
                IOUtils.closeQuietly(stream);
            }
        }

        if (exist) {
            zipOut.finish();
        }
    } catch (Exception e) {
        throw new ArchiveException(e);
    } finally {
        IOUtils.closeQuietly(zipOut);
        try {
            FileUtils.deleteDirectory(targetDir);// 
        } catch (IOException e) {
            // ignore
        }
    }

    return exist;
}

From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrClient.java

/**
 * Tries to query a live server from the list provided in Req. Servers in the
 * dead pool are skipped. If a request fails due to an IOException, the server
 * is moved to the dead pool for a certain period of time, or until a test
 * request on that server succeeds.//w  w  w .ja v a 2 s.c o  m
 *
 * If a request takes longer than defaultBackUpRequestDelay the request will be sent
 * to the next server in the list, this will continue until there is a
 * response, the server list is exhausted or the number of requests in flight
 * equals defaultMaximumConcurrentRequests.
 *
 * Servers are queried in the exact order given (except servers currently in
 * the dead pool are skipped). If no live servers from the provided list
 * remain to be tried, a number of previously skipped dead servers will be
 * tried. Req.getNumDeadServersToTry() controls how many dead servers will be
 * tried.
 *
 * If no live servers are found a SolrServerException is thrown.
 *
 * @param req
 *          contains both the request as well as the list of servers to query
 *
 * @return the result of the request
 */
@Override
public Rsp request(Req req) throws SolrServerException, IOException {
    SolrParams reqParams = req.getRequest().getParams();

    int maximumConcurrentRequests = reqParams == null ? defaultMaximumConcurrentRequests
            : reqParams.getInt(HttpBackupRequestShardHandlerFactory.MAX_CONCURRENT_REQUESTS,
                    defaultMaximumConcurrentRequests);

    // If we can't do anything useful, fall back to the stock solr code
    if (maximumConcurrentRequests < 0) {
        return super.request(req);
    }

    // if there's an explicit backupDelay in the request, use that
    int backupDelay = reqParams == null ? -1
            : reqParams.getInt(HttpBackupRequestShardHandlerFactory.BACKUP_REQUEST_DELAY, -1);

    BackupPercentile backupPercentile = defaultBackupPercentile;
    String backupPercentileParam = reqParams == null ? null
            : reqParams.get(HttpBackupRequestShardHandlerFactory.BACKUP_PERCENTILE);
    if (backupPercentileParam != null) {
        backupPercentile = getPercentile(backupPercentileParam);
    }

    String performanceClass = reqParams == null ? req.getRequest().getPath() // getPath is typically the request handler name
            : reqParams.get(HttpBackupRequestShardHandlerFactory.PERFORMANCE_CLASS,
                    reqParams.get(CommonParams.QT, req.getRequest().getPath())); // TODO: Is QT getting filtered out of the distrib requests?

    if (backupDelay < 0 && backupPercentile != BackupPercentile.NONE) {
        // no explicit backup delay, consider a backup percentile for the delay.
        double rate = getCachedRate(performanceClass);
        if (rate > 0.1) { // 1 request per 10 seconds minimum.
            backupDelay = getCachedPercentile(performanceClass, backupPercentile);
            log.debug("Using delay of {}ms for percentile {} for performanceClass {}", backupDelay,
                    backupPercentile.name(), performanceClass);
        } else {
            log.info(
                    "Insufficient query rate ({} per sec) to rely on latency percentiles for performanceClass {}",
                    rate, performanceClass);
        }
    } else {
        // not using a percentile to track backupDelay
        performanceClass = null;
    }

    if (backupDelay < 0) {
        backupDelay = defaultBackUpRequestDelay;
    }

    // If we are using a backupPercentile, we need to proceed regardless of backupDelay so we can record and build the percentile info.
    // If not, and we still don't have a backupDelay, fall back to stock solr code.
    if (backupPercentile == BackupPercentile.NONE && backupDelay < 0) {
        return super.request(req);
    }

    // Reaching this point with a backupDelay < 0 means backup requests are effectively disabled, but we're executing
    // this codepath anyway. Presumably in order to build latency percentile data for future requests.

    ArrayBlockingQueue<Future<RequestTaskState>> queue = new ArrayBlockingQueue<Future<RequestTaskState>>(
            maximumConcurrentRequests + 1);
    ExecutorCompletionService<RequestTaskState> executer = new ExecutorCompletionService<RequestTaskState>(
            threadPoolExecuter, queue);

    final int numDeadServersToTry = req.getNumDeadServersToTry();
    final boolean isUpdate = req.getRequest() instanceof IsUpdateRequest;
    List<ServerWrapper> skipped = null;
    int inFlight = 0;
    RequestTaskState returnedRsp = null;
    Exception ex = null;

    long timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
    long timeOutTime = System.nanoTime() + timeAllowedNano;

    for (String serverStr : req.getServers()) {
        if (isTimeExceeded(timeAllowedNano, timeOutTime)) {
            break;
        }

        serverStr = normalize(serverStr);
        // if the server is currently a zombie, just skip to the next one
        ServerWrapper wrapper = zombieServers.get(serverStr);
        if (wrapper != null) {
            if (tryDeadServers && numDeadServersToTry > 0) {
                if (skipped == null) {
                    skipped = new ArrayList<>(numDeadServersToTry);
                    skipped.add(wrapper);
                } else if (skipped.size() < numDeadServersToTry) {
                    skipped.add(wrapper);
                }
            }

            continue;
        }
        HttpSolrClient client = makeSolrClient(serverStr);
        Callable<RequestTaskState> task = createRequestTask(client, req, isUpdate, false, null,
                performanceClass, inFlight > 0);
        executer.submit(task);
        inFlight++;

        returnedRsp = getResponseIfReady(executer, patience(inFlight, maximumConcurrentRequests, backupDelay));
        if (returnedRsp == null) {
            // null response signifies that the response took too long.
            log.debug("Server :{} did not respond before the backupRequestDelay time of {} elapsed",
                    client.baseUrl, backupDelay);
            continue;
        }
        inFlight--;
        if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
            return returnedRsp.response;
        } else if (returnedRsp.stateDescription == TaskState.ServerException) {
            ex = returnedRsp.exception;
        } else if (returnedRsp.stateDescription == TaskState.RequestException) {
            throw new SolrServerException(returnedRsp.exception);
        }
    }

    // no response so try the zombie servers
    if (tryDeadServers && skipped != null) {
        if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
            // try the servers we previously skipped
            for (ServerWrapper wrapper : skipped) {
                if (isTimeExceeded(timeAllowedNano, timeOutTime)) {
                    break;
                }
                Callable<RequestTaskState> task = createRequestTask(wrapper.client, req, isUpdate, true,
                        wrapper.getKey(), performanceClass, inFlight > 0);
                executer.submit(task);
                inFlight++;
                returnedRsp = getResponseIfReady(executer,
                        patience(inFlight, maximumConcurrentRequests, backupDelay));
                if (returnedRsp == null) {
                    log.debug("Server :{} did not respond before the backupRequestDelay time of {} elapsed",
                            wrapper.getKey(), backupDelay);
                    continue;
                }
                inFlight--;
                if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                    return returnedRsp.response;
                } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                    ex = returnedRsp.exception;
                } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                    throw new SolrServerException(returnedRsp.exception);
                }
            }
        }
    }

    // All current attempts could be slower than backUpRequestPause or returned
    // response could be from struggling server
    // so we need to wait until we get a good response or tasks all are
    // exhausted.
    if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) {
        while (inFlight > 0) {
            returnedRsp = getResponseIfReady(executer, -1);
            inFlight--;
            if (returnedRsp.stateDescription == TaskState.ResponseReceived) {
                return returnedRsp.response;
            } else if (returnedRsp.stateDescription == TaskState.ServerException) {
                ex = returnedRsp.exception;
            } else if (returnedRsp.stateDescription == TaskState.RequestException) {
                throw new SolrServerException(returnedRsp.exception);
            }
        }
    }

    if (ex == null) {
        throw new SolrServerException("No live SolrServers available to handle this request");
    } else {
        throw new SolrServerException(
                "No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
    }
}

From source file:com.clustercontrol.http.factory.RunMonitorHttpScenario.java

/**
 * []????//from   w  w  w  .  ja v a2s .c  o m
 *
 * ??1??ID??????ID??????????
 * ????????runMonitorInfo???
 *
 */
@Override
protected boolean runMonitorInfo()
        throws FacilityNotFound, MonitorNotFound, EntityExistsException, InvalidRole, HinemosUnknown {
    m_log.debug("runMonitorInfo()");

    m_now = new Date(HinemosTime.currentTimeMillis());

    m_priorityMap = new HashMap<Integer, ArrayList<String>>();
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_INFO), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_WARNING), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_CRITICAL), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_UNKNOWN), new ArrayList<String>());

    try {
        // 
        boolean run = this.setMonitorInfo(m_monitorTypeId, m_monitorId);
        if (!run) {
            // ?
            return true;
        }

        // 
        setJudgementInfo();

        // ??
        setCheckInfo();

        ArrayList<String> facilityList = null;
        ExecutorCompletionService<ArrayList<MonitorRunResultInfo>> ecs = new ExecutorCompletionService<ArrayList<MonitorRunResultInfo>>(
                ParallelExecution.instance().getExecutorService());
        int taskCount = 0;

        if (!m_isMonitorJob) {
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() == 0) {
                return true;
            }

            m_isNode = new RepositoryControllerBean().isNode(m_facilityId);

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            for (String facilityId : facilityList) {
                try {
                    synchronized (this) {
                        nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(facilityId));
                    }
                } catch (FacilityNotFound e) {
                    // ???
                }
            }

            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            String facilityId = null;

            /**
             * ?
             */
            // ID???????
            Iterator<String> itr = facilityList.iterator();
            while (itr.hasNext()) {
                facilityId = itr.next();
                if (facilityId != null && !"".equals(facilityId)) {

                    // ???RunMonitor????
                    // ?????????????
                    RunMonitorHttpScenario runMonitor = new RunMonitorHttpScenario();

                    // ?????
                    runMonitor.m_monitorTypeId = this.m_monitorTypeId;
                    runMonitor.m_monitorId = this.m_monitorId;
                    runMonitor.m_now = this.m_now;
                    runMonitor.m_priorityMap = this.m_priorityMap;
                    runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
                    runMonitor.setJudgementInfo();
                    runMonitor.setCheckInfo();
                    runMonitor.nodeInfo = this.nodeInfo;

                    ecs.submit(new CallableTaskHttpScenario(runMonitor, facilityId));
                    taskCount++;
                } else {
                    itr.remove();
                }
            }
        } else {
            // ??
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() != 1 || !facilityList.get(0).equals(m_facilityId)) {
                return true;
            }

            m_isNode = new RepositoryControllerBean().isNode(m_facilityId);

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            for (String facilityId : facilityList) {
                try {
                    synchronized (this) {
                        nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(m_facilityId));
                    }
                } catch (FacilityNotFound e) {
                    // ???
                }
            }
            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            /**
             * ?
             */
            // ???RunMonitor????
            // ?????????????
            RunMonitorHttpScenario runMonitor = new RunMonitorHttpScenario();

            // ?????
            runMonitor.m_isMonitorJob = this.m_isMonitorJob;
            runMonitor.m_monitorTypeId = this.m_monitorTypeId;
            runMonitor.m_monitorId = this.m_monitorId;
            runMonitor.m_now = this.m_now;
            runMonitor.m_priorityMap = this.m_priorityMap;
            runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
            runMonitor.setJudgementInfo();
            runMonitor.setCheckInfo();
            runMonitor.nodeInfo = this.nodeInfo;

            ecs.submit(new CallableTaskHttpScenario(runMonitor, m_facilityId));
            taskCount++;
        }
        /**
         * ??
         */
        ArrayList<MonitorRunResultInfo> resultList = null;

        m_log.debug("total start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        // ???
        List<Sample> sampleList = new ArrayList<Sample>();
        Sample sample = null;
        if (m_monitor.getCollectorFlg()) {
            sample = new Sample(HinemosTime.getDateInstance(), m_monitor.getMonitorId());
        }

        for (int i = 0; i < taskCount; i++) {
            Future<ArrayList<MonitorRunResultInfo>> future = ecs.take();
            resultList = future.get(); // ??

            for (MonitorRunResultInfo result : resultList) {
                m_nodeDate = result.getNodeDate();

                String facilityId = result.getFacilityId();

                // ?
                if (!m_isMonitorJob) {
                    if (result.getMonitorFlg()) {
                        notify(true, facilityId, result.getCheckResult(), new Date(m_nodeDate), result);
                    }
                } else {
                    m_monitorRunResultInfo = new MonitorRunResultInfo();
                    m_monitorRunResultInfo.setPriority(result.getPriority());
                    m_monitorRunResultInfo.setCheckResult(result.getCheckResult());
                    m_monitorRunResultInfo.setNodeDate(m_nodeDate);
                    m_monitorRunResultInfo
                            .setMessageOrg(makeJobOrgMessage(result.getMessageOrg(), result.getMessage()));
                }

                // ???
                if (sample != null && result.getCollectorFlg()) {
                    int errorCode = -1;
                    if (result.isCollectorResult()) {
                        errorCode = CollectedDataErrorTypeConstant.NOT_ERROR;
                    } else {
                        errorCode = CollectedDataErrorTypeConstant.UNKNOWN;
                    }
                    sample.set(facilityId, m_monitor.getItemName(), result.getValue(), errorCode,
                            result.getDisplayName());
                }
            }
        }

        // ?????
        if (sample != null) {
            sampleList.add(sample);
        }
        if (!sampleList.isEmpty()) {
            CollectDataUtil.put(sampleList);
        }
        m_log.debug("monitor end : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        return true;

    } catch (EntityExistsException e) {
        throw e;
    } catch (FacilityNotFound e) {
        throw e;
    } catch (InvalidRole e) {
        throw e;
    } catch (InterruptedException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    } catch (ExecutionException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    }
}

From source file:com.clustercontrol.monitor.run.factory.RunMonitor.java

/**
 * ????/*from   w w  w. j a  v  a 2 s . co  m*/
 * <p>
 * <ol>
 * <li>????????{@link #setMonitorInfo(String, String)}</li>
 * <li>?????????{@link #setJudgementInfo()}</li>
 * <li>??????????{@link #setCheckInfo()}</li>
 * <li>???????? {@link #collect(String)}</li>
 * <li>???????? {@link #getCheckResult(boolean)}</li>
 * <li>?????????{@link #getPriority(int)}</li>
 * <li>????{@link #notify(boolean, String, int, Date)}</li>
 * </ol>
 *
 * @return ??????</code> true </code>
 * @throws FacilityNotFound
 * @throws MonitorNotFound
 * @throws InvalidRole
 * @throws EntityExistsException
 * @throws HinemosUnknown
 *
 * @see #setMonitorInfo(String, String)
 * @see #setJudgementInfo()
 * @see #setCheckInfo()
 * @see #collect(String)
 * @see #getCheckResult(boolean)
 * @see #getPriority(int)
 * @see #notify(boolean, String, int, Date)
 */
protected boolean runMonitorInfo()
        throws FacilityNotFound, MonitorNotFound, InvalidRole, EntityExistsException, HinemosUnknown {

    m_now = HinemosTime.getDateInstance();

    m_priorityMap = new HashMap<Integer, ArrayList<String>>();
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_INFO), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_WARNING), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_CRITICAL), new ArrayList<String>());
    m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_UNKNOWN), new ArrayList<String>());
    List<Sample> sampleList = new ArrayList<Sample>();
    List<StringSample> collectedSamples = new ArrayList<>();

    try {
        // 
        boolean run = this.setMonitorInfo(m_monitorTypeId, m_monitorId);
        if (!run) {
            // ?
            return true;
        }

        // 
        setJudgementInfo();

        // ??
        setCheckInfo();

        ArrayList<String> facilityList = null;
        ExecutorCompletionService<MonitorRunResultInfo> ecs = new ExecutorCompletionService<MonitorRunResultInfo>(
                ParallelExecution.instance().getExecutorService());
        int taskCount = 0;

        if (!m_isMonitorJob) {
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() == 0) {
                return true;
            }

            m_isNode = new RepositoryControllerBean().isNode(m_facilityId);

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            for (String facilityId : facilityList) {
                try {
                    synchronized (this) {
                        nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(facilityId));
                    }
                } catch (FacilityNotFound e) {
                    // ???
                }
            }

            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            /**
             * ?
             */
            // ID???????
            Iterator<String> itr = facilityList.iterator();
            while (itr.hasNext()) {
                String facilityId = itr.next();
                if (facilityId != null && !"".equals(facilityId)) {

                    // ???RunMonitor????
                    // ?????????????
                    RunMonitor runMonitor = this.createMonitorInstance();

                    // ?????
                    runMonitor.m_monitorTypeId = this.m_monitorTypeId;
                    runMonitor.m_monitorId = this.m_monitorId;
                    runMonitor.m_now = this.m_now;
                    runMonitor.m_priorityMap = this.m_priorityMap;
                    runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
                    runMonitor.setJudgementInfo();
                    runMonitor.setCheckInfo();
                    runMonitor.nodeInfo = this.nodeInfo;

                    ecs.submit(new MonitorExecuteTask(runMonitor, facilityId));
                    taskCount++;

                    if (m_log.isDebugEnabled()) {
                        m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = "
                                + facilityId);
                    }
                } else {
                    facilityList.remove(facilityId);
                }
            }

        } else {
            // ??
            // ID?????
            // /?true?????ID??
            facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId,
                    m_monitor.getOwnerRoleId());
            if (facilityList.size() != 1 || !facilityList.get(0).equals(m_facilityId)) {
                return true;
            }

            m_isNode = true;

            // ???????
            nodeInfo = new HashMap<String, NodeInfo>();
            try {
                synchronized (this) {
                    nodeInfo.put(m_facilityId, new RepositoryControllerBean().getNode(m_facilityId));
                }
            } catch (FacilityNotFound e) {
                // ???
            }
            m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

            /**
             * ?
             */
            // ???RunMonitor????
            // ?????????????
            RunMonitor runMonitor = this.createMonitorInstance();

            // ?????
            runMonitor.m_isMonitorJob = this.m_isMonitorJob;
            runMonitor.m_monitorTypeId = this.m_monitorTypeId;
            runMonitor.m_monitorId = this.m_monitorId;
            runMonitor.m_now = this.m_now;
            runMonitor.m_priorityMap = this.m_priorityMap;
            runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId);
            runMonitor.setJudgementInfo();
            runMonitor.setCheckInfo();
            runMonitor.nodeInfo = this.nodeInfo;
            runMonitor.m_prvData = this.m_prvData;

            ecs.submit(new MonitorExecuteTask(runMonitor, m_facilityId));
            taskCount++;

            if (m_log.isDebugEnabled()) {
                m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = "
                        + m_facilityId);
            }
        }

        /**
         * ??
         */
        MonitorRunResultInfo result = new MonitorRunResultInfo(); // ??

        m_log.debug("total start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        // ???
        StringSample strSample = null;
        Sample sample = null;
        if (m_monitor.getCollectorFlg()) {
            //? - 
            if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                    || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
                strSample = new StringSample(HinemosTime.getDateInstance(), m_monitor.getMonitorId());
            }
            //? - 
            else {
                sample = new Sample(HinemosTime.getDateInstance(), m_monitor.getMonitorId());
            }
        }

        for (int i = 0; i < taskCount; i++) {
            Future<MonitorRunResultInfo> future = ecs.take();
            result = future.get(); // ??

            String facilityId = result.getFacilityId();
            m_nodeDate = result.getNodeDate();

            if (m_log.isDebugEnabled()) {
                m_log.debug("finished monitor : monitorId = " + m_monitorId + ", facilityId = " + facilityId);
            }

            //??????????
            if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                    || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
                if (strSample != null) {
                    strSample.set(facilityId, m_monitor.getMonitorTypeId(), result.getMessageOrg());
                }
            }

            if (!m_isMonitorJob) {
                // ???
                if (result.getProcessType().booleanValue()) {
                    // ?
                    notify(true, facilityId, result.getCheckResult(), new Date(m_nodeDate), result);
                    // ???
                    if (sample != null) {
                        int errorType = -1;
                        if (result.isCollectorResult()) {
                            errorType = CollectedDataErrorTypeConstant.NOT_ERROR;
                        } else {
                            errorType = CollectedDataErrorTypeConstant.UNKNOWN;
                        }
                        sample.set(facilityId, m_monitor.getItemName(), result.getValue(), errorType);
                    }
                }
            } else {
                m_monitorRunResultInfo = new MonitorRunResultInfo();
                m_monitorRunResultInfo.setPriority(result.getPriority());
                m_monitorRunResultInfo.setCheckResult(result.getCheckResult());
                m_monitorRunResultInfo.setNodeDate(m_nodeDate);
                m_monitorRunResultInfo
                        .setMessageOrg(makeJobOrgMessage(result.getMessageOrg(), result.getMessage()));
                m_monitorRunResultInfo.setCurData(result.getCurData());
            }
        }

        // ?????
        if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING
                || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) {
            //? - ?????
            if (strSample != null) {
                collectedSamples.add(strSample);
            }
            if (!collectedSamples.isEmpty()) {
                CollectStringDataUtil.store(collectedSamples);
            }
        } else {
            if (sample != null) {
                sampleList.add(sample);
            }
            if (!sampleList.isEmpty()) {
                CollectDataUtil.put(sampleList);
            }
        }

        m_log.debug("monitor end : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId);

        return true;

    } catch (FacilityNotFound e) {
        throw e;
    } catch (InterruptedException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId  = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    } catch (ExecutionException e) {
        m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId  = " + m_monitorId
                + " : " + e.getClass().getSimpleName() + ", " + e.getMessage());
        return false;
    }
}