Example usage for java.util.concurrent ThreadPoolExecutor getQueue

List of usage examples for java.util.concurrent ThreadPoolExecutor getQueue

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor getQueue.

Prototype

public BlockingQueue<Runnable> getQueue() 

Source Link

Document

Returns the task queue used by this executor.

Usage

From source file:org.openmrs.module.openconceptlab.updater.Updater.java

private ThreadPoolExecutor newRunner() {
    return new ThreadPoolExecutor(0, THREAD_POOL_SIZE, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(THREAD_POOL_SIZE / 2), new RejectedExecutionHandler() {

                @Override/* ww w. java  2  s .c  o  m*/
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        executor.getQueue().put(r);
                    } catch (InterruptedException e) {
                        throw new RejectedExecutionException("Work discarded", e);
                    }
                }
            });
}

From source file:com.jkoolcloud.tnt4j.streams.custom.dirStream.DirStreamingManager.java

private void initialize() {
    executorService = new ThreadPoolExecutor(CORE_TREAD_POOL_SIZE, MAX_TREAD_POOL_SIZE, KEEP_ALIVE_TIME,
            TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(MAX_TREAD_POOL_SIZE * 2),
            new TNTInputStream.StreamsThreadFactory("DirStreamingManagerExecutorThread-")); // NON-NLS

    executorService.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override/*from  w w  w .  j  a v a  2  s.c  om*/
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    LOGGER.log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamingJobRejected(r);
                }
            } catch (InterruptedException exc) {
                LOGGER.log(OpLevel.WARNING,
                        StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                "DirStreamingManager.job.offer.interrupted"),
                        ((StreamingJob) r).getJobId(), exc);
            }
        }
    });

    dirWatchdog = new DirWatchdog(dirPath, DirWatchdog.getDefaultFilter(fileWildcardName));
    dirWatchdog.addObserverListener(new FileAlterationListenerAdaptor() {
        @Override
        public void onFileCreate(File file) {
            handleJobConfigCreate(file);
        }

        @Override
        public void onFileChange(File file) {
            handleJobConfigChange(file);
        }

        @Override
        public void onFileDelete(File file) {
            handleJobConfigRemoval(file);
        }
    });

    LOGGER.log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
            "DirStreamingManager.dir.monitoring.started"), dirPath, fileWildcardName);
}

From source file:org.geppetto.frontend.messaging.DefaultMessageSender.java

private void submitTask(ThreadPoolExecutor executor, Runnable task) throws InterruptedException {

    if (discardMessagesIfQueueFull) {
        executor.execute(task);/*  w w  w.  j  av  a 2s.c o  m*/
    } else {
        executor.getQueue().put(task);
    }
}

From source file:org.jmangos.commons.threadpool.CommonThreadPoolManager.java

/**
 * @see org.jmangos.commons.threadpool.ThreadPoolManager#fillPoolStats(org.jmangos.commons.threadpool.model.ThreadPoolType)
 *///from w  w w  .  j  av a  2 s.c  o m
@Override
public PoolStats fillPoolStats(final ThreadPoolType poolType) {

    ThreadPoolExecutor executor = null;
    switch (poolType) {
    case INSTANT:
        executor = this.instantPool;
        break;
    case SCHEDULED:
    default:
        executor = this.scheduledPool;
        break;
    }
    final PoolStats stats = new PoolStats(poolType);
    stats.setActiveCount(executor.getActiveCount());
    stats.setCompletedTaskCount(executor.getCompletedTaskCount());
    stats.setCorePoolSize(executor.getCorePoolSize());
    stats.setLargestPoolSize(executor.getLargestPoolSize());
    stats.setMaximumPoolSize(executor.getMaximumPoolSize());
    stats.setPoolSize(executor.getPoolSize());
    stats.setQueueSize(executor.getQueue().size());
    stats.setTaskCount(executor.getTaskCount());
    return stats;
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private void waitUntilFinish(ThreadPoolExecutor executor) {
    final BlockingQueue<Runnable> workQueue = executor.getQueue();
    try {// w  w  w .j av  a2s.  c o  m
        final long started = System.currentTimeMillis();

        int lastToGo = workQueue.size();

        final int total = workQueue.size();
        int performed = 0;

        int maxStatusMessageLength = 0;
        while (!workQueue.isEmpty()) {
            final float doneNow = lastToGo - workQueue.size();
            performed += doneNow;

            final float elapsed = (System.currentTimeMillis() - started) / 1000;

            lastToGo = workQueue.size();

            if (performed > 0) {
                final float throughput = performed / elapsed;
                final float eta = ((elapsed * total) / performed) - elapsed;

                final float percentDone = (100 * (float) lastToGo) / total;
                final int gaugeDone = (int) ((100 - percentDone) / 5);
                final String gauge = "[" + StringUtils.repeat("", gaugeDone)
                        + StringUtils.repeat("-", 20 - gaugeDone) + "]";

                final String sampling = this.profilingQueue.size() > 0
                        ? MessageFormat.format(" | Samples {0}", this.profilingQueue.size())
                        : "";

                if ((maxStatusMessageLength != 0) || (eta > 5)) {
                    String statusMessage = MessageFormat.format(
                            "\r{4} %{5,number,00.00} | ETA {2} | LAST TPS {0} ops / sec | AVG TPS {1,number,#.0} | LEFT {3}{6}", //
                            doneNow, throughput, this.etaToString((int) eta), workQueue.size(), gauge,
                            percentDone, sampling);

                    maxStatusMessageLength = Math.max(statusMessage.length(), maxStatusMessageLength);
                    statusMessage = StringUtils.leftPad(statusMessage,
                            maxStatusMessageLength - statusMessage.length());
                    System.out.print(statusMessage);
                }
            }

            if (elapsed > BenchmarkTest.MAX_TEST_TIME) {
                throw new IllegalStateException("Max allowed test time exceeded");
            }

            Thread.sleep(1000);
        }

        if (maxStatusMessageLength > 0) {
            System.out.print("\r" + StringUtils.repeat(" ", maxStatusMessageLength) + "\r");
        }

        executor.shutdown();

        if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
            BenchmarkTest.LOG.warn("Forcefully shutting down the thread pool");

            executor.shutdownNow();
        }

        BenchmarkTest.LOG.warn("Iterations completed");
    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.openhab.io.transport.modbus.internal.ModbusManagerImpl.java

private void logTaskQueueInfo() {
    synchronized (pollMonitorLogger) {
        ScheduledExecutorService scheduledThreadPoolExecutor = this.scheduledThreadPoolExecutor;
        if (scheduledThreadPoolExecutor == null) {
            return;
        }//from ww  w  .j a  va  2  s  . c  o  m
        // Avoid excessive spamming with queue monitor when many tasks are executed
        if (System.currentTimeMillis() - lastQueueMonitorLog < MONITOR_QUEUE_INTERVAL_MILLIS) {
            return;
        }
        lastQueueMonitorLog = System.currentTimeMillis();
        pollMonitorLogger.trace("<POLL MONITOR>");
        this.scheduledPollTasks.forEach((task, future) -> {
            pollMonitorLogger.trace(
                    "POLL MONITOR: scheduled poll task. FC: {}, start {}, length {}, done: {}, canceled: {}, delay: {}. Full task {}",
                    task.getRequest().getFunctionCode(), task.getRequest().getReference(),
                    task.getRequest().getDataLength(), future.isDone(), future.isCancelled(),
                    future.getDelay(TimeUnit.MILLISECONDS), task);
        });
        if (scheduledThreadPoolExecutor instanceof ThreadPoolExecutor) {
            ThreadPoolExecutor executor = ((ThreadPoolExecutor) scheduledThreadPoolExecutor);
            pollMonitorLogger.trace(
                    "POLL MONITOR: scheduledThreadPoolExecutor queue size: {}, remaining space {}. Active threads {}",
                    executor.getQueue().size(), executor.getQueue().remainingCapacity(),
                    executor.getActiveCount());
            if (executor.getQueue().size() >= WARN_QUEUE_SIZE) {
                pollMonitorLogger.warn(
                        "Many ({}) tasks queued in scheduledThreadPoolExecutor! This might be sign of bad design or bug in the binding code.",
                        executor.getQueue().size());
            }
        }

        pollMonitorLogger.trace("</POLL MONITOR>");
    }
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTInputStream.java

/**
 * Creates thread pool executor service for a given number of threads with bounded tasks queue - queue size is
 * 2x{@code threadsQty}. When queue size is reached, new tasks are offered to queue using defined offer timeout. If
 * task can't be put into queue over this time, task is skipped with making warning log entry. Thus memory use does
 * not grow drastically if consumers can't keep up the pace of producers filling in the queue, making producers
 * synchronize with consumers./*from w w  w  .jav  a  2 s.  c  o m*/
 *
 * @param threadsQty
 *            the number of threads in the pool
 * @param offerTimeout
 *            how long to wait before giving up on offering task to queue
 *
 * @return the newly created thread pool executor
 *
 * @see ThreadPoolExecutor#ThreadPoolExecutor(int, int, long, TimeUnit, BlockingQueue, ThreadFactory)
 */
private ExecutorService getBoundedExecutorService(int threadsQty, final int offerTimeout) {
    StreamsThreadFactory stf = new StreamsThreadFactory("StreamBoundedExecutorThread-"); // NON-NLS
    stf.addThreadFactoryListener(new StreamsThreadFactoryListener());

    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadsQty, threadsQty, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(threadsQty * 2), stf);

    tpe.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    logger().log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamTaskRejected(r);
                }
            } catch (InterruptedException exc) {
                halt(true);
            }
        }
    });

    return tpe;
}

From source file:com.emc.ecs.smart.SmartUploader.java

/**
 * Performs a segmented upload to ECS using the SmartClient and the ECS byte range PUT extensions.  The upload
 * URL will be parsed and the hostname will be enumerated in DNS to see if it contains multiple 'A' records.  If
 * so, those will be used to populate the software load balancer.
 *///from  ww w  .j  ava2 s. c om
private void doSegmentedUpload() {
    try {
        long start = System.currentTimeMillis();
        fileSize = Files.size(fileToUpload);

        // Verify md5Save file path is legit.
        PrintWriter pw = null;
        try {
            if (saveMD5 != null) {
                pw = new PrintWriter(saveMD5);
            }
        } catch (IOException e) {
            System.err.println("Invalid path specified to save local file MD5: " + e.getMessage());
            System.exit(3);
        }

        // Figure out which segment size to use.
        if (segmentSize == -1) {
            if (fileSize >= LARGE_SEGMENT) {
                segmentSize = LARGE_SEGMENT;
            } else {
                segmentSize = SMALL_SEGMENT;
            }
        }

        // Expand the host
        String host = uploadUrl.getHost();
        InetAddress addr = InetAddress.getByName(host);
        List<String> ipAddresses = new ArrayList<>();
        try {
            ipAddresses = getIPAddresses(host);
        } catch (NamingException e) {
            LogMF.warn(l4j, "Could not resolve hostname: {0}: {1}.  Using as-is.", host, e);
            ipAddresses.add(host);
        }
        LogMF.info(l4j, "Host {0} resolves to {1}", host, ipAddresses);

        // Initialize the SmartClient
        SmartConfig smartConfig = new SmartConfig(ipAddresses.toArray(new String[ipAddresses.size()]));
        // We don't need to update the host list
        smartConfig.setHostUpdateEnabled(false);

        // Configure the load balancer
        Client pingClient = SmartClientFactory.createStandardClient(smartConfig,
                new URLConnectionClientHandler());
        pingClient.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        LoadBalancer loadBalancer = smartConfig.getLoadBalancer();
        EcsHostListProvider hostListProvider = new EcsHostListProvider(pingClient, loadBalancer, null, null);
        hostListProvider.setProtocol(uploadUrl.getProtocol());
        if (uploadUrl.getPort() != -1) {
            hostListProvider.setPort(uploadUrl.getPort());
        }
        smartConfig.setHostListProvider(hostListProvider);

        client = SmartClientFactory.createSmartClient(smartConfig, new URLConnectionClientHandler());

        // Add our retry handler
        client.addFilter(new HostnameVerifierFilter(uploadUrl.getHost()));
        client.addFilter(new MD5CheckFilter());
        client.addFilter(new RetryFilter(retryDelay, retryCount));

        // Create a FileChannel for the upload
        fileChannel = new RandomAccessFile(fileToUpload.toFile(), "r").getChannel();

        System.out.printf("Starting upload at %s\n", new Date().toString());
        // The first upload is done without a range to create the initial object.
        doUploadSegment(0);

        // See how many more segments we have
        int segmentCount = (int) (fileSize / (long) segmentSize);
        long remainder = fileSize % segmentSize;
        if (remainder != 0) {
            // Additional bytes at end
            segmentCount++;
        }

        if (segmentCount > 1) {
            // Build a thread pool to upload the segments.
            ThreadPoolExecutor executor = new ThreadPoolExecutor(threadCount, threadCount, 15, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>());

            for (int i = 1; i < segmentCount; i++) {
                executor.execute(new SegmentUpload(i));
            }

            // Wait for completion
            while (true) {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                if (failed) {
                    // Abort!
                    l4j.warn("Error detected, terminating upload");
                    executor.shutdownNow();
                    break;
                }
                if (executor.getQueue().isEmpty()) {
                    l4j.info("All tasks complete, awaiting shutdown");
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1, TimeUnit.MINUTES);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    break;
                }
            }
        }

        // Done!
        long elapsed = System.currentTimeMillis() - start;
        printRate(fileSize, elapsed);

        // Release buffers
        LogMF.debug(l4j, "buffer count at end: {0}", buffers.size());
        buffers = new LinkedList<>();
        System.out.printf("\nUpload completed at %s\n", new Date().toString());

        // Verify
        if (verifyUrl != null) {

            System.out.printf("starting remote MD5...\n");

            String objectMD5 = computeObjectMD5();
            System.out.printf("Object MD5 = %s\n", objectMD5);

            System.out.printf("Remote MD5 complete at %s\nStarting local MD5\n", new Date().toString());

            // At this point we don't need the clients anymore.
            l4j.debug("Shutting down SmartClient");
            SmartClientFactory.destroy(client);
            SmartClientFactory.destroy(pingClient);

            String fileMD5 = standardChecksum ? computeFileMD5Standard() : computeFileMD5();
            System.out.printf("\nFile on disk MD5 = %s\n", fileMD5);
            System.out.printf("Local MD5 complete at %s\n", new Date().toString());
            if (!fileMD5.equals(objectMD5)) {
                System.err.printf("ERROR: file MD5 does not match object MD5! %s != %s", fileMD5, objectMD5);
                System.exit(10);
            }

            if (saveMD5 != null && pw != null) {
                pw.write(fileMD5);
                pw.close();
            }

            System.out.printf("\nObject verification passed!\n");
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.exit(4);
    }
}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

/**
 * Constructs Safe executor.//from  ww w . j  a v  a  2s.  co m
 *
 * @param numThreads
 *            - number of threads
 * @param baseName
 *            - base name of executor threads
 * @param threadFactory
 *            - for constructing threads
 * @param statsLogger
 *            - for reporting executor stats
 * @param traceTaskExecution
 *            - should we stat task execution
 * @param preserveMdcForTaskExecution
 *            - should we preserve MDC for task execution
 * @param warnTimeMicroSec
 *            - log long task exec warning after this interval
 * @param maxTasksInQueue
 *            - maximum items allowed in a thread queue. -1 for no limit
 */
protected OrderedExecutor(String baseName, int numThreads, ThreadFactory threadFactory, StatsLogger statsLogger,
        boolean traceTaskExecution, boolean preserveMdcForTaskExecution, long warnTimeMicroSec,
        int maxTasksInQueue, boolean enableBusyWait) {
    checkArgument(numThreads > 0);
    checkArgument(!StringUtils.isBlank(baseName));

    this.maxTasksInQueue = maxTasksInQueue;
    this.warnTimeMicroSec = warnTimeMicroSec;
    this.enableBusyWait = enableBusyWait;
    name = baseName;
    threads = new ExecutorService[numThreads];
    threadIds = new long[numThreads];
    for (int i = 0; i < numThreads; i++) {
        ThreadPoolExecutor thread = createSingleThreadExecutor(new ThreadFactoryBuilder()
                .setNameFormat(name + "-" + getClass().getSimpleName() + "-" + i + "-%d")
                .setThreadFactory(threadFactory).build());

        threads[i] = addExecutorDecorators(getBoundedExecutor(thread));

        final int idx = i;
        try {
            threads[idx].submit(() -> {
                threadIds[idx] = Thread.currentThread().getId();

                if (enableBusyWait) {
                    // Try to acquire 1 CPU core to the executor thread. If it fails we
                    // are just logging the error and continuing, falling back to
                    // non-isolated CPUs.
                    try {
                        CpuAffinity.acquireCore();
                    } catch (Throwable t) {
                        log.warn("Failed to acquire CPU core for thread {}", Thread.currentThread().getName(),
                                t.getMessage(), t);
                    }
                }
            }).get();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException("Couldn't start thread " + i, e);
        } catch (ExecutionException e) {
            throw new RuntimeException("Couldn't start thread " + i, e);
        }

        // Register gauges
        statsLogger.registerGauge(String.format("%s-queue-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getQueue().size();
            }
        });
        statsLogger.registerGauge(String.format("%s-completed-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getCompletedTaskCount();
            }
        });
        statsLogger.registerGauge(String.format("%s-total-tasks-%d", name, idx), new Gauge<Number>() {
            @Override
            public Number getDefaultValue() {
                return 0;
            }

            @Override
            public Number getSample() {
                return thread.getTaskCount();
            }
        });
    }

    // Stats
    this.taskExecutionStats = statsLogger.scope(name).getOpStatsLogger("task_execution");
    this.taskPendingStats = statsLogger.scope(name).getOpStatsLogger("task_queued");
    this.traceTaskExecution = traceTaskExecution;
    this.preserveMdcForTaskExecution = preserveMdcForTaskExecution;
}

From source file:org.entrystore.harvesting.oaipmh.jobs.ListRecordsJob.java

/**
 * /*from   ww w  . j  av  a2s.c o m*/
 * @param out
 * @throws Exception
 */
synchronized public void run(OutputStream out, JobExecutionContext jobContext) throws Exception {
    JobDataMap dataMap = jobContext.getJobDetail().getJobDataMap();
    RepositoryManagerImpl rm = (RepositoryManagerImpl) dataMap.get("rm");
    ContextManager cm = rm.getContextManager();
    final PrincipalManager pm = rm.getPrincipalManager();

    initXpath();

    URI contextURI = (URI) dataMap.get("contextURI");
    String contextId = contextURI.toString().substring(contextURI.toString().lastIndexOf("/") + 1);
    final Context context = cm.getContext(contextId);
    final String metadataType = dataMap.getString("metadataType");
    final String target = dataMap.getString("target");
    String from = dataMap.getString("from");
    String until = dataMap.getString("until");
    String set = dataMap.getString("set");
    replaceMetadata = "replace"
            .equalsIgnoreCase(rm.getConfiguration().getString(Settings.HARVESTER_OAI_METADATA_POLICY, "skip"));
    boolean fromAutoDetect = "on"
            .equalsIgnoreCase(rm.getConfiguration().getString(Settings.HARVESTER_OAI_FROM_AUTO_DETECT, "on"));

    if (from == null && fromAutoDetect) {
        Date latestEntry = null;
        Set<URI> allEntries = context.getEntries();
        for (URI uri : allEntries) {
            Entry entry = context.getByEntryURI(uri);
            if (entry != null && (EntryType.Reference.equals(entry.getEntryType())
                    || EntryType.LinkReference.equals(entry.getEntryType()))) {
                Date cachedDate = entry.getExternalMetadataCacheDate();
                if (cachedDate != null) {
                    if (latestEntry == null || cachedDate.after(latestEntry)) {
                        latestEntry = cachedDate;
                    }
                }
            }
        }
        if (latestEntry != null) {
            from = new SimpleDateFormat("yyyy-MM-dd").format(latestEntry);
        }
    }

    log.info("OAI-PMH metadataType: " + metadataType);
    log.info("OAI-PMH target: " + target);
    log.info("OAI-PMH from: " + from);
    log.info("OAI-PMH until: " + until);
    log.info("OAI-PMH set: " + set);

    // Get the listrecord from the OAI-PMH target
    ListRecords listRecords = null;
    try {
        listRecords = new ListRecords(target, from, until, set, metadataType);
    } catch (UnknownHostException e) {
        // TODO: handle exception write in the RDF tree
        log.info("UnknownHostException since the target is unknown, the havester will be deleted");
        jobContext.getScheduler().interrupt(jobContext.getJobDetail().getName(),
                jobContext.getJobDetail().getGroup());
        return;
    }

    ThreadPoolExecutor exService = null;
    if ("on".equalsIgnoreCase(rm.getConfiguration().getString(Settings.HARVESTER_OAI_MULTITHREADED, "off"))) {
        int cpuCount = Runtime.getRuntime().availableProcessors();
        if (cpuCount == 1) {
            log.info("Multi-threaded harvesting activated, but only one CPU found; continuing single-threaded");
        } else {
            int threadCount = cpuCount + 1;
            log.info("Creating executor for multi-threaded harvesting, using thread pool of " + threadCount
                    + " (available CPUs + 1) threads");
            exService = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount);
        }
    } else {
        log.info("Performing single-threaded harvesting");
    }

    Date before = new Date();
    int j = 0;
    while (listRecords != null) {
        NodeList errors = listRecords.getErrors();
        if (errors != null && errors.getLength() > 0) {
            log.error("Found errors");
            int length = errors.getLength();
            for (int i = 0; i < length; ++i) {
                Node item = errors.item(i);
                System.out.println(item);
            }
            log.error("Error record: " + listRecords.toString());
            break;
        }

        //out.write(listRecords.toString().getBytes()); 

        // Get the <Root>-element
        final Element el = listRecords.getDocument().getDocumentElement();
        if (el.getElementsByTagName("ListRecords").getLength() == 0) {
            log.error("No ListRecords");
            throw new Exception("No ListRecords");
        }

        // Get the <ListRecords> element
        Element listRecordsElement = (Element) el.getElementsByTagName("ListRecords").item(0);
        NodeList recordList = listRecordsElement.getElementsByTagName("record");
        // old NodeList recordList = getRecords(listRecordsElement);

        // Create entries from the XML
        for (int i = 0; i < recordList.getLength(); i++) {
            final Element recordElement = (Element) recordList.item(i).cloneNode(true);

            if (exService == null) {
                try {
                    createEntry(context, recordElement, target, metadataType);
                } catch (XPathExpressionException e) {
                    log.error(e.getMessage());
                }
            } else {
                exService.execute(new Runnable() {
                    public void run() {
                        try {
                            pm.setAuthenticatedUserURI(pm.getAdminUser().getURI());
                            createEntry(context, recordElement, target, metadataType);
                        } catch (XPathExpressionException e) {
                            log.error(e.getMessage());
                        }
                    }
                });
                // not sure whether the following is necessary
                while (exService.getQueue().size() > 250) {
                    log.info("Waiting before submitting additional Runnables, current queue size is "
                            + exService.getQueue().size());
                    Thread.sleep(50);
                    log.info("Continuing, the current queue size is " + exService.getQueue().size());
                }
            }
            log.debug("total index: " + j++);
        }

        // Check if there is any resumption token
        String resumptionToken = listRecords.getResumptionToken();
        if (resumptionToken == null || resumptionToken.length() == 0) {
            listRecords = null;
        } else {
            log.info("Got resumption token");
            listRecords = new ListRecords(target, resumptionToken);
        }
    }

    if (exService != null) {
        while (exService.getQueue().size() > 0) {
            log.info("Runnables left in queue: " + exService.getQueue().size() + ", waiting");
            Thread.sleep(2000);
        }
        exService.shutdown();
    }

    log.info("OAI-PMH harvester done with execution");
    long diff = new Date().getTime() - before.getTime();
    if (j > 0) {
        log.info("Harvesting of " + j + " records took " + diff + " ms (average of " + diff / j
                + " ms per record)");
    }
}