Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters.

Usage

From source file:org.pentaho.reporting.engine.classic.core.testsupport.gold.GoldTestBase.java

protected void runAllGoldReportsInParallel(int threads) throws Exception {
    initializeTestEnvironment();//from www .  ja  v  a 2  s .  co  m

    final List<Throwable> errors = Collections.synchronizedList(new ArrayList<Throwable>());

    final ExecutorService threadPool = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new TestThreadFactory(), new ThreadPoolExecutor.AbortPolicy());

    List<ExecuteReportRunner> reports = new ArrayList<ExecuteReportRunner>();
    reports.addAll(collectReports("reports", ReportProcessingMode.legacy, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.current, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.current, errors));

    for (ExecuteReportRunner report : reports) {
        threadPool.submit(report);
    }

    threadPool.shutdown();
    while (threadPool.isTerminated() == false) {
        threadPool.awaitTermination(5, TimeUnit.MINUTES);
    }
    if (errors.isEmpty() == false) {
        Log log = LogFactory.getLog(GoldTestBase.class);
        for (Throwable throwable : errors) {
            log.error("Failed", throwable);
        }
        Assert.fail();
    }
}

From source file:org.solmix.datax.mybatis.MybatisDataService.java

private ExecutorService createExecutorServiceIfNS(DataSourceInfo dsi) {
    int coreSize = Runtime.getRuntime().availableProcessors();
    int poolSize = dsi.getPoolSize();
    if (poolSize < coreSize) {
        coreSize = poolSize;//from w ww  . j av  a2  s.  c  o m
    }
    ThreadFactory tf = new NamedThreadFactory("MybatisDataService", true);
    BlockingQueue<Runnable> queueToUse = new LinkedBlockingQueue<Runnable>(coreSize);
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(coreSize, poolSize, 60, TimeUnit.SECONDS,
            queueToUse, tf, new ThreadPoolExecutor.CallerRunsPolicy());
    dsi.setExecutorService(executor);
    return executor;
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

/**
 * Construct an index in the given directory.
 * //ww w. j ava  2  s.c o m
 * @param indexDirectory File
 * @param config LuceneConfig
 */
private IndexInfo(File indexDirectory, LuceneConfig config) {
    super();
    initialiseTransitions();
    this.config = config;

    if (config != null) {
        this.readWriteLock = new ReentrantReadWriteLock(config.getFairLocking());
        this.maxFieldLength = config.getIndexerMaxFieldLength();
        this.threadPoolExecutor = config.getThreadPoolExecutor();
        IndexInfo.useNIOMemoryMapping = config.getUseNioMemoryMapping();
        this.maxDocsForInMemoryMerge = config.getMaxDocsForInMemoryMerge();
        this.maxRamInMbForInMemoryMerge = config.getMaxRamInMbForInMemoryMerge();
        this.maxDocsForInMemoryIndex = config.getMaxDocsForInMemoryIndex();
        this.maxRamInMbForInMemoryIndex = config.getMaxRamInMbForInMemoryIndex();
        this.writerMaxBufferedDocs = config.getWriterMaxBufferedDocs();
        this.writerRamBufferSizeMb = config.getWriterRamBufferSizeMb();
        this.writerMergeFactor = config.getWriterMergeFactor();
        this.writerMaxMergeDocs = config.getWriterMaxMergeDocs();
        this.mergerMaxBufferedDocs = config.getMergerMaxBufferedDocs();
        this.mergerRamBufferSizeMb = config.getMergerRamBufferSizeMb();
        this.mergerMergeFactor = config.getMergerMergeFactor();
        this.mergerMaxMergeDocs = config.getMergerMaxMergeDocs();
        this.termIndexInterval = config.getTermIndexInterval();
        this.mergerTargetOverlays = config.getMergerTargetOverlayCount();
        this.mergerTargetIndexes = config.getMergerTargetIndexCount();
        this.mergerTargetOverlaysBlockingFactor = config.getMergerTargetOverlaysBlockingFactor();
        // Work out the relative path of the index
        try {
            String indexRoot = new File(config.getIndexRootLocation()).getCanonicalPath();
            this.relativePath = indexDirectory.getCanonicalPath().substring(indexRoot.length() + 1);
        } catch (IOException e) {
            throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
        }
    } else {
        this.readWriteLock = new ReentrantReadWriteLock(false);

        // need a default thread pool ....
        TraceableThreadFactory threadFactory = new TraceableThreadFactory();
        threadFactory.setThreadDaemon(true);
        threadFactory.setThreadPriority(5);

        threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS,
                new LinkedBlockingQueue<Runnable>(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());

        // Create a 'fake' relative path
        try {
            this.relativePath = indexDirectory.getCanonicalPath();
            int sepIndex = this.relativePath.indexOf(File.separator);
            if (sepIndex != -1) {
                if (this.relativePath.length() > sepIndex + 1) {
                    this.relativePath = this.relativePath.substring(sepIndex + 1);
                } else {
                    this.relativePath = "";
                }
            }
        } catch (IOException e) {
            throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
        }

    }

    // Create an empty in memory index
    IndexWriter writer;
    try {
        writer = new IndexWriter(emptyIndex, new AlfrescoStandardAnalyser(), true, MaxFieldLength.LIMITED);
        writer.setUseCompoundFile(writerUseCompoundFile);
        writer.setMaxBufferedDocs(writerMaxBufferedDocs);
        writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
        writer.setMergeFactor(writerMergeFactor);
        writer.setMaxMergeDocs(writerMaxMergeDocs);
        writer.setWriteLockTimeout(writeLockTimeout);
        writer.setMaxFieldLength(maxFieldLength);
        writer.setTermIndexInterval(termIndexInterval);
        writer.setMergeScheduler(new SerialMergeScheduler());
        writer.setMergePolicy(new LogDocMergePolicy());
        writer.close();
    } catch (IOException e) {
        throw new IndexerException("Failed to create an empty in memory index!");
    }

    this.indexDirectory = indexDirectory;

    // Make sure the directory exists
    if (!this.indexDirectory.exists()) {
        if (!this.indexDirectory.mkdirs()) {
            throw new AlfrescoRuntimeException("Failed to create index directory");
        }
    }
    if (!this.indexDirectory.isDirectory()) {
        throw new AlfrescoRuntimeException("The index must be held in a directory");
    }

    // Create the info files.
    File indexInfoFile = new File(this.indexDirectory, INDEX_INFO);
    File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP);
    if (createFile(indexInfoFile) && createFile(indexInfoBackupFile)) {
        // If both files required creation this is a new index
        version = 0;
    }

    // Open the files and channels for the index info file and the backup
    this.indexInfoRAF = openFile(indexInfoFile);
    this.indexInfoChannel = this.indexInfoRAF.getChannel();

    this.indexInfoBackupRAF = openFile(indexInfoBackupFile);
    this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel();

    // If the index found no info files (i.e. it is new), check if there is
    // an old style index and covert it.
    if (version == 0) {
        // Check if an old style index exists

        final File oldIndex = new File(this.indexDirectory, OLD_INDEX);
        if (IndexReader.indexExists(oldIndex)) {
            getWriteLock();
            try {
                doWithFileLock(new LockWork<Object>() {
                    public Object doWork() throws Exception {
                        IndexWriter writer;
                        try {
                            writer = new IndexWriter(oldIndex, new AlfrescoStandardAnalyser(), false,
                                    MaxFieldLength.LIMITED);
                            writer.setUseCompoundFile(writerUseCompoundFile);
                            writer.setMaxBufferedDocs(writerMaxBufferedDocs);
                            writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
                            writer.setMergeFactor(writerMergeFactor);
                            writer.setMaxMergeDocs(writerMaxMergeDocs);
                            writer.setWriteLockTimeout(writeLockTimeout);
                            writer.setMaxFieldLength(maxFieldLength);
                            writer.setTermIndexInterval(termIndexInterval);
                            writer.setMergeScheduler(new SerialMergeScheduler());
                            writer.setMergePolicy(new LogDocMergePolicy());
                            writer.optimize();
                            long docs = writer.numDocs();
                            writer.close();

                            IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "",
                                    TransactionStatus.COMMITTED, "", docs, 0, false);
                            indexEntries.put(OLD_INDEX, entry);

                            writeStatus();

                            // The index exists and we should initialise the single reader
                            registerReferenceCountingIndexReader(entry.getName(),
                                    buildReferenceCountingIndexReader(entry.getName(),
                                            entry.getDocumentCount()));
                        } catch (IOException e) {
                            throw new IndexerException("Failed to optimise old index");
                        }
                        return null;
                    }

                    public boolean canRetry() {
                        return false;
                    }
                });
            } finally {
                releaseWriteLock();
            }

        }
    }

    // The index exists
    else if (version == -1) {
        getWriteLock();
        try {
            doWithFileLock(new LockWork<Object>() {
                public Object doWork() throws Exception {
                    setStatusFromFile();

                    // If the index is not shared we can do some easy clean
                    // up
                    if (!indexIsShared) {
                        HashSet<String> deletable = new HashSet<String>();
                        // clean up
                        for (IndexEntry entry : indexEntries.values()) {
                            switch (entry.getStatus()) {
                            // states which can be deleted
                            // We could check prepared states can be
                            // committed.
                            case ACTIVE:
                            case MARKED_ROLLBACK:
                            case NO_TRANSACTION:
                            case PREPARING:
                            case ROLLEDBACK:
                            case ROLLINGBACK:
                            case MERGE_TARGET:
                            case UNKNOWN:
                            case PREPARED:
                            case DELETABLE:
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Deleting index entry " + entry);
                                }
                                entry.setStatus(TransactionStatus.DELETABLE);
                                deletable.add(entry.getName());
                                break;
                            // States which are in mid-transition which we
                            // can roll back to the committed state
                            case COMMITTED_DELETING:
                            case MERGE:
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Resetting merge to committed " + entry);
                                }
                                entry.setStatus(TransactionStatus.COMMITTED);
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            // Complete committing (which is post database
                            // commit)
                            case COMMITTING:
                                // do the commit
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Committing " + entry);
                                }
                                entry.setStatus(TransactionStatus.COMMITTED);
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            // States that require no action
                            case COMMITTED:
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            default:
                                // nothing to do
                                break;
                            }
                        }
                        // Delete entries that are not required
                        invalidateMainReadersFromFirst(deletable);
                        for (String id : deletable) {
                            indexEntries.remove(id);
                        }
                        clearOldReaders();

                        cleaner.schedule();

                        merger.schedule();

                        // persist the new state
                        writeStatus();
                    }
                    return null;
                }

                public boolean canRetry() {
                    return false;
                }

            });
        } finally {
            releaseWriteLock();
        }
    }
    // Need to do with file lock - must share info about other readers to support this with shared indexer
    // implementation

    getWriteLock();
    try {
        LockWork<Object> work = new DeleteUnknownGuidDirectories();
        doWithFileLock(work);
    } finally {
        releaseWriteLock();
    }

    // Run the cleaner around every 20 secods - this just makes the request to the thread pool
    timer.schedule(new TimerTask() {
        @Override
        public void run() {
            cleaner.schedule();
        }
    }, 0, 20000);

    publishDiscoveryEvent();
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

/**
 * initializes the Object set of this hash table.
 * /*  w  w  w.j  ava 2  s . c o  m*/
 * @param initialCapacity
 *            an <code>int</code> value
 * @return an <code>int</code> value
 * @throws HashtableFullException
 * @throws FileNotFoundException
 */
public long setUp() throws Exception {
    File _fs = new File(fileName);
    if (!_fs.getParentFile().exists()) {
        _fs.getParentFile().mkdirs();
    }
    SDFSLogger.getLog().info("Folder = " + _fs.getPath());
    SDFSLogger.getLog().info("Loading freebits bitset");
    long rsz = 0;
    this.setMaxSize(maxSz);
    File[] files = _fs.getParentFile().listFiles(new DBFileFilter());
    if (files.length > 0) {
        CommandLineProgressBar bar = new CommandLineProgressBar("Loading Existing Hash Tables", files.length,
                System.out);
        this.loadEvent.maxCt = files.length + 128;
        for (int i = 0; i < files.length; i++) {
            this.loadEvent.curCt = this.loadEvent.curCt + 1;
            int sz = NextPrime.getNextPrimeI((int) (this.hashTblSz));
            // SDFSLogger.getLog().debug("will create byte array of size "
            // + sz + " propsize was " + propsize);
            ShardedFileByteArrayLongMap m = null;
            String pth = files[i].getPath();
            String pfx = pth.substring(0, pth.length() - 5);
            m = new ShardedFileByteArrayLongMap(pfx, sz);
            long mep = m.setUp();
            if (mep > endPos)
                endPos = mep;
            maps.add(m);
            rsz = rsz + m.size();
            bar.update(i);
            if (!m.isFull() && this.activeWMap == null) {
                m.activate();
                this.activeWMap = m;
            } else {
                m.inActive();
                m.full = true;
            }
        }
        bar.finish();
    }

    this.loadEvent.shortMsg = "Loading BloomFilters";

    if (maps.size() != 0 && !LargeBloomFilter.exists(_fs.getParentFile())) {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
        SDFSLogger.getLog().warn("Recreating BloomFilters...");
        this.loadEvent.shortMsg = "Recreating BloomFilters";

        executor = new ThreadPoolExecutor(Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS,
                worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
        CommandLineProgressBar bar = new CommandLineProgressBar("ReCreating BloomFilters", maps.size(),
                System.out);
        Iterator<AbstractShard> iter = maps.iterator();
        int i = 0;
        ArrayList<LBFReconstructThread> al = new ArrayList<LBFReconstructThread>();
        while (iter.hasNext()) {
            AbstractShard m = iter.next();
            LBFReconstructThread th = new LBFReconstructThread(lbf, m);
            executor.execute(th);
            al.add(th);
            i++;
            bar.update(i);
        }
        executor.shutdown();
        bar.finish();
        try {
            System.out.print("Waiting for all BloomFilters creation threads to finish");
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
                System.out.print(".");

            }
            for (LBFReconstructThread th : al) {
                if (th.ex != null)
                    throw th.ex;
            }
            System.out.println(" done");
        } catch (Exception e1) {
            throw new IOException(e1);
        }
    } else {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
    }
    if (this.activeWMap == null) {
        boolean written = false;
        while (!written) {
            String guid = RandomGUID.getGuid();

            File f = new File(fileName + "-" + guid + ".keys");
            if (!f.exists()) {
                activeWMap = new ShardedFileByteArrayLongMap(fileName + "-" + guid, this.hashTblSz);
                activeWMap.activate();
                activeWMap.setUp();

                this.maps.add(activeWMap);
                written = true;

            }
        }
    }
    this.loadEvent.endEvent("Loaded entries " + rsz);
    System.out.println("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loading BloomFilters " + rsz);
    this.kSz.set(rsz);
    this.closed = false;
    return size;
}

From source file:org.hyperic.hq.measurement.agent.server.ScheduleThread.java

private void collect(ResourceSchedule rs, List<ScheduledMeasurement> items) {
    final boolean debug = log.isDebugEnabled();
    for (int i = 0; (i < items.size()) && (!shouldDie.get()); i++) {
        ScheduledMeasurement meas = items.get(i);
        ParsedTemplate tmpl = toParsedTemplate(meas);
        if (tmpl == null) {
            log.warn("template for meas id=" + meas.getDerivedID() + " is null");
            continue;
        }//w  w  w . j a  va2 s . co  m
        ThreadPoolExecutor executor;
        String plugin;
        synchronized (executors) {
            try {
                GenericPlugin p = manager.getPlugin(tmpl.plugin).getProductPlugin();
                plugin = p.getName();
            } catch (PluginNotFoundException e) {
                if (debug) {
                    log.debug("Could not find plugin name from template '" + tmpl.plugin
                            + "'. Associated plugin might not be initialized yet.");
                }
                continue;
            }
            executor = executors.get(plugin);
            if (executor == null) {
                final int poolSize = getPoolSize(plugin);
                final int queueSize = getQueueSize(plugin);
                log.info("Creating executor for plugin '" + plugin + "' with a poolsize=" + poolSize
                        + " queuesize=" + queueSize);
                final ThreadFactory factory = getFactory(plugin);
                executor = new ThreadPoolExecutor(poolSize, poolSize, 60, TimeUnit.SECONDS,
                        new LinkedBlockingQueue<Runnable>(queueSize), factory,
                        new ThreadPoolExecutor.AbortPolicy());
                executors.put(plugin, executor);
            }
        }
        MetricTask metricTask = new MetricTask(rs, meas);
        statsCollector.addStat(1, SCHEDULE_THREAD_METRIC_TASKS_SUBMITTED);
        try {
            Future<?> task = executor.submit(metricTask);
            synchronized (metricCollections) {
                metricCollections.put(task, metricTask);
            }
        } catch (RejectedExecutionException e) {
            log.warn("Executor[" + plugin + "] rejected metric task " + metricTask);
            statNumMetricsFailed++;
        }
    }
}

From source file:org.apache.hadoop.hbase.mob.MobUtils.java

/**
 * Creates a thread pool./*from  w w  w.  j  a va2 s . c o m*/
 * @param conf the Configuration
 * @return A thread pool.
 */
public static ExecutorService createMobCompactorThreadPool(Configuration conf) {
    int maxThreads = conf.getInt(MobConstants.MOB_COMPACTION_THREADS_MAX,
            MobConstants.DEFAULT_MOB_COMPACTION_THREADS_MAX);
    if (maxThreads == 0) {
        maxThreads = 1;
    }
    final SynchronousQueue<Runnable> queue = new SynchronousQueue<Runnable>();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, queue,
            Threads.newDaemonThreadFactory("MobCompactor"), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        // waiting for a thread to pick up instead of throwing exceptions.
                        queue.put(r);
                    } catch (InterruptedException e) {
                        throw new RejectedExecutionException(e);
                    }
                }
            });
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:org.apache.hadoop.hbase.mob.compactions.TestPartitionedMobCompactor.java

private static ExecutorService createThreadPool() {
    int maxThreads = 10;
    long keepAliveTime = 60;
    final SynchronousQueue<Runnable> queue = new SynchronousQueue<Runnable>();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, queue,
            Threads.newDaemonThreadFactory("MobFileCompactionChore"), new RejectedExecutionHandler() {
                @Override/*from ww  w  .ja v a 2  s .  c  om*/
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        // waiting for a thread to pick up instead of throwing exceptions.
                        queue.put(r);
                    } catch (InterruptedException e) {
                        throw new RejectedExecutionException(e);
                    }
                }
            });
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:org.apache.hadoop.hbase.mob.compactions.TestMobCompactor.java

private static ExecutorService createThreadPool(Configuration conf) {
    int maxThreads = 10;
    long keepAliveTime = 60;
    final SynchronousQueue<Runnable> queue = new SynchronousQueue<Runnable>();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, queue,
            Threads.newDaemonThreadFactory("MobFileCompactionChore"), new RejectedExecutionHandler() {
                @Override//from  w w  w. jav  a 2  s  .c o m
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        // waiting for a thread to pick up instead of throwing exceptions.
                        queue.put(r);
                    } catch (InterruptedException e) {
                        throw new RejectedExecutionException(e);
                    }
                }
            });
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:com.alibaba.cobar.client.CobarSqlMapClientTemplate.java

private ExecutorService createCustomExecutorService(int poolSize, final String method) {
    int coreSize = Runtime.getRuntime().availableProcessors();
    if (poolSize < coreSize) {
        coreSize = poolSize;/*from ww w .  jav a  2s . c  o  m*/
    }
    ThreadFactory tf = new ThreadFactory() {
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r, "thread created at CobarSqlMapClientTemplate method [" + method + "]");
            t.setDaemon(true);
            return t;
        }
    };
    BlockingQueue<Runnable> queueToUse = new LinkedBlockingQueue<Runnable>(coreSize);
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(coreSize, poolSize, 60, TimeUnit.SECONDS,
            queueToUse, tf, new ThreadPoolExecutor.CallerRunsPolicy());

    return executor;
}

From source file:org.orcid.core.manager.impl.OrcidProfileManagerImpl.java

private ExecutorService createThreadPoolForIndexing() {
    return new ThreadPoolExecutor(numberOfIndexingThreads, numberOfIndexingThreads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(INDEXING_BATCH_SIZE), Executors.defaultThreadFactory(),
            new ThreadPoolExecutor.CallerRunsPolicy());
}