Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters.

Usage

From source file:org.apache.tez.dag.app.launcher.TezContainerLauncherImpl.java

@Override
public void start() throws TezException {
    // pass a copy of config to ContainerManagementProtocolProxy until YARN-3497 is fixed
    cmProxy = new ContainerManagementProtocolProxy(conf);

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true)
            .build();/*from w w w  . j av  a  2  s  . c om*/

    // Start with a default core-pool size of 10 and change it dynamically.
    launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE, Integer.MAX_VALUE, 1, TimeUnit.HOURS,
            new LinkedBlockingQueue<Runnable>(), tf, new CustomizedRejectedExecutionHandler());
    eventHandlingThread = new Thread() {
        @Override
        public void run() {
            ContainerOp event = null;
            while (!Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!serviceStopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                int poolSize = launcherPool.getCorePoolSize();

                // See if we need up the pool size only if haven't reached the
                // maximum limit yet.
                if (poolSize != limitOnPoolSize) {

                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int numNodes = getContext().getNumNodes(TezConstants.getTezYarnServicePluginName());
                    int idealPoolSize = Math.min(limitOnPoolSize, numNodes);

                    if (poolSize < idealPoolSize) {
                        // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
                        // later is just a buffer so we are not always increasing the
                        // pool-size
                        int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + INITIAL_POOL_SIZE);
                        LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
                                + " as number-of-nodes to talk to is " + numNodes);
                        launcherPool.setCorePoolSize(newPoolSize);
                    }
                }

                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(createEventProcessor(event));

                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
            }
        }
    };
    eventHandlingThread.setName("ContainerLauncher Event Handler");
    eventHandlingThread.start();
    boolean cleanupDagDataOnComplete = ShuffleUtils.isTezShuffleHandler(conf)
            && conf.getBoolean(TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION,
                    TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION_DEFAULT);
    if (cleanupDagDataOnComplete) {
        String deletionTrackerClassName = conf.get(TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS,
                TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS_DEFAULT);
        deletionTracker = ReflectionUtils.createClazzInstance(deletionTrackerClassName,
                new Class[] { Configuration.class }, new Object[] { conf });
    }
}

From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java

/**
 * Performs lookup for multiple keys and returns a streaming iterator to results.
 * Each element in the iterator is one of
 *  (1) an exception associated with a single lookup
 *  (2) a key value tuple//from   w w  w.  jav a  2s.com
 *
 * @param keys      lookup keys
 * @param progress  (optional) an AtomicInteger for tracking progress
 * @param skipped   (optional) an AtomicInteger for tracking missing keys
 * @return          iterator of lookup results
 */
public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys,
        final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) {
    log.info("starting store lookups");
    LongArrayList addressList = new LongArrayList();
    int notFound = 0;
    while (keys.hasNext()) {
        final K key = keys.next();
        final Long address;
        try {
            address = index.get(key);
        } catch (IOException e) {
            log.error("error", e);
            return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e)));
        }
        if (address != null) {
            addressList.add(address);
        } else {
            notFound++;
        }
    }
    if (progress != null)
        progress.addAndGet(notFound);
    if (skipped != null)
        skipped.addAndGet(notFound);
    log.info("store lookups complete, sorting addresses");

    final long[] addresses = addressList.elements();
    Arrays.sort(addresses, 0, addressList.size());

    log.info("initializing store lookup iterator");
    final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100);
    final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000);
    final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue,
            new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        taskQueue.put(r);
                    } catch (InterruptedException e) {
                        log.error("error", e);
                        throw new RuntimeException(e);
                    }
                }
            });
    final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>(
            10);
    final AtomicLong runningTasks = new AtomicLong(0);
    final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true);

    new Thread(new Runnable() {
        @Override
        public void run() {
            while (iterable.hasNext()) {
                runningTasks.incrementAndGet();
                final List<Long> addressesSublist = iterable.next();
                primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>(
                        new RecordLookupTask(addressesSublist)) {
                    @Override
                    protected void done() {
                        try {
                            final List<Either<Exception, P2<K, V>>> results = get();
                            if (progress != null) {
                                progress.addAndGet(results.size());
                            }
                            completionQueue.put(results);
                        } catch (InterruptedException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        } catch (ExecutionException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        }
                    }
                });
            }
            taskSubmitterRunning.set(false);
        }
    }, "RecordLookupTaskSubmitterThread").start();

    return new Iterator<Either<Exception, P2<K, V>>>() {

        Iterator<Either<Exception, P2<K, V>>> currentIterator;

        @Override
        public boolean hasNext() {
            if (currentIterator != null && currentIterator.hasNext())
                return true;
            while (taskSubmitterRunning.get() || runningTasks.get() > 0) {
                try {
                    final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS);
                    if (list != null) {
                        log.debug("remaining: " + runningTasks.decrementAndGet());
                        currentIterator = list.iterator();
                        if (currentIterator.hasNext())
                            return true;
                    }
                } catch (InterruptedException e) {
                    log.error("error", e);
                    throw new RuntimeException(e);
                }
            }
            primerThreads.shutdown();
            return false;
        }

        @Override
        public Either<Exception, P2<K, V>> next() {
            return currentIterator.next();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {/*from  w  w  w . j a  v  a  2  s. co m*/
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = null;
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
        } finally {
            l.unlock();
        }

        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, csz, lbf);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        AbstractShard _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            this.keyLookup.invalidate(new ByteArrayWrapper(p.key));
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ShardedFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java

public void afterPropertiesSet() throws Exception {
    executor = new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS,
            new ArrayBlockingQueue(poolSize * 4), new NamedThreadFactory(WORKER_NAME),
            new ThreadPoolExecutor.CallerRunsPolicy());
}

From source file:API.amazon.mws.feeds.service.MarketplaceWebServiceClient.java

/**
 * Constructs MarketplaceWebServiceClient with AWS Access Key ID, AWS Secret Key
 * and MarketplaceWebServiceConfig. Use MarketplaceWebServiceConfig to pass additional
 * configuration that affects how service is being called.
 *
 * @param awsAccessKeyId/*from w w w.  ja  v  a  2  s. com*/
 *          AWS Access Key ID
 * @param awsSecretAccessKey
 *          AWS Secret Access Key
 * @param config
 *          Additional configuration options
 */
@SuppressWarnings("serial")
public MarketplaceWebServiceClient(String awsAccessKeyId, String awsSecretAccessKey, String applicationName,
        String applicationVersion, MarketplaceWebServiceConfig config) {
    this.awsAccessKeyId = awsAccessKeyId;
    this.awsSecretAccessKey = awsSecretAccessKey;
    this.config = config;
    this.httpClient = configureHttpClient(applicationName, applicationVersion);
    this.asyncExecutor = new ThreadPoolExecutor(config.getMaxAsyncThreads(), config.getMaxAsyncThreads(), 60L,
            TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(config.getMaxAsyncQueueSize()) {

                @Override
                public boolean offer(Runnable task) {
                    log.debug("Maximum number of concurrent threads reached, queuing task...");
                    return super.offer(task);
                }
            }, new ThreadFactory() {

                private final AtomicInteger threadNumber = new AtomicInteger(1);

                public Thread newThread(Runnable task) {
                    Thread thread = new Thread(task,
                            "MarketplaceWebServiceClient-Thread-" + threadNumber.getAndIncrement());
                    thread.setDaemon(true);
                    if (thread.getPriority() != Thread.NORM_PRIORITY) {
                        thread.setPriority(Thread.NORM_PRIORITY);
                    }
                    log.debug("ThreadFactory created new thread: " + thread.getName());
                    return thread;
                }
            }, new RejectedExecutionHandler() {

                public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
                    log.debug("Maximum number of concurrent threads reached, and queue is full. "
                            + "Running task in the calling thread..." + Thread.currentThread().getName());
                    if (!executor.isShutdown()) {
                        task.run();
                    }
                }
            });
}

From source file:alma.acs.nc.AcsEventSubscriberImplBase.java

/**
 * Subclass may override, but must call super.createConnectionAction().
 *//*from ww w .  j a  v a2 s  .c o  m*/
protected void createConnectionAction(EventDispatcher evtDispatcher, ErrorReporter errRep,
        SCInstance scInstance, Collection<TriggerEvent> derivedEvents) throws AcsJStateMachineActionEx {
    eventHandlingExecutor = new ThreadPoolExecutor(0, 1, 1L, TimeUnit.MINUTES,
            new ArrayBlockingQueue<Runnable>(EVENT_QUEUE_CAPACITY), services.getThreadFactory(),
            new ThreadPoolExecutor.AbortPolicy());
}

From source file:com.alibaba.otter.node.etl.load.loader.db.FileLoadAction.java

public void afterPropertiesSet() throws Exception {
    executor = new ThreadPoolExecutor(poolSize, poolSize, 0L, TimeUnit.MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(poolSize * 4), new NamedThreadFactory(WORKER_NAME),
            new ThreadPoolExecutor.CallerRunsPolicy());
}

From source file:org.codice.ddf.spatial.ogc.csw.catalog.transformer.CswQueryResponseTransformer.java

public void init() {
    int numThreads = Runtime.getRuntime().availableProcessors();
    LOGGER.debug(QUERY_POOL_NAME + " size: {}", numThreads);

    /*//ww  w . j  a v  a 2  s. c  o m
    - when first two args the same, get fixed size thread pool.
    - 3rd arg, keepAliveTime, ignored when !allowsCoreThreadTimeOut (the default); thus pass zero.
    - fixed (and arbitrarily) size blocking queue.
    - CswThreadFactory gives pool threads a name to ease debug.
    - tried arbitrarily large numThreads/queue-size, but did not see performance gain.
    - big queue + small pool minimizes CPU usage, OS resources, and context-switching overhead,
      but *can* lead to artificially low throughput.
    - todo: externalize config to support runtime tuning.
    */
    queryExecutor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingDeque<Runnable>(BLOCKING_Q_INITIAL_SIZE), new CswThreadFactory(),
            new ThreadPoolExecutor.CallerRunsPolicy());

    queryExecutor.prestartAllCoreThreads();
}

From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java

/**
 * initializes the Object set of this hash table.
 * /*  w  w w  .jav a  2s  .co  m*/
 * @param initialCapacity
 *            an <code>int</code> value
 * @return an <code>int</code> value
 * @throws HashtableFullException
 * @throws FileNotFoundException
 */
public long setUp() throws Exception {
    File _fs = new File(fileName);
    if (!_fs.getParentFile().exists()) {
        _fs.getParentFile().mkdirs();
    }
    SDFSLogger.getLog().info("Folder = " + _fs.getPath());
    SDFSLogger.getLog().info("Loading freebits bitset");
    long rsz = 0;
    this.setMaxSize(maxSz);
    File[] files = _fs.getParentFile().listFiles(new DBFileFilter());
    if (files.length > 0) {
        CommandLineProgressBar bar = new CommandLineProgressBar("Loading Existing Hash Tables", files.length,
                System.out);
        this.loadEvent.maxCt = files.length + 128;

        for (int i = 0; i < files.length; i++) {
            this.loadEvent.curCt = this.loadEvent.curCt + 1;
            int sz = NextPrime.getNextPrimeI((int) (this.hashTblSz));
            // SDFSLogger.getLog().debug("will create byte array of size "
            // + sz + " propsize was " + propsize);
            ProgressiveFileByteArrayLongMap m = null;
            String pth = files[i].getPath();
            String pfx = pth.substring(0, pth.length() - 5);
            m = new ProgressiveFileByteArrayLongMap(pfx, sz);
            long mep = m.setUp();
            if (mep > endPos)
                endPos = mep;
            maps.add(m);
            rsz = rsz + m.size();
            bar.update(i);
            if (!m.isFull() && this.activeWriteMaps.size() < AMS) {
                m.activate();
                this.activeWriteMaps.add(m);
            } else {
                m.inActive();
                m.full = true;
            }
        }
        bar.finish();
    }

    this.loadEvent.shortMsg = "Loading BloomFilters";

    if (maps.size() != 0 && !LargeBloomFilter.exists(_fs.getParentFile())) {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
        SDFSLogger.getLog().warn("Recreating BloomFilters...");
        this.loadEvent.shortMsg = "Recreating BloomFilters";

        executor = new ThreadPoolExecutor(Main.writeThreads, Main.writeThreads, 10, TimeUnit.SECONDS,
                worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
        CommandLineProgressBar bar = new CommandLineProgressBar("ReCreating BloomFilters", maps.size(),
                System.out);
        Iterator<AbstractShard> iter = maps.iterator();
        int i = 0;
        ArrayList<LBFReconstructThread> al = new ArrayList<LBFReconstructThread>();
        while (iter.hasNext()) {
            AbstractShard m = iter.next();
            LBFReconstructThread th = new LBFReconstructThread(lbf, m);
            executor.execute(th);
            al.add(th);
            i++;
            bar.update(i);
        }
        executor.shutdown();
        bar.finish();
        try {
            System.out.print("Waiting for all BloomFilters creation threads to finish");
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
                System.out.print(".");

            }
            for (LBFReconstructThread th : al) {
                if (th.ex != null)
                    throw th.ex;
            }
            System.out.println(" done");
        } catch (Exception e1) {
            throw new IOException(e1);
        }
    } else {
        lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
    }
    while (this.activeWriteMaps.size() < AMS) {
        boolean written = false;
        while (!written) {
            String guid = RandomGUID.getGuid();

            File f = new File(fileName + "-" + guid + ".keys");
            if (!f.exists()) {
                ProgressiveFileByteArrayLongMap activeWMap = new ProgressiveFileByteArrayLongMap(
                        fileName + "-" + guid, this.hashTblSz);
                activeWMap.activate();
                activeWMap.setUp();

                this.maps.add(activeWMap);
                written = true;

                this.activeWriteMaps.add(activeWMap);
            }
        }
    }
    this.loadEvent.endEvent("Loaded entries " + rsz);
    System.out.println("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loaded entries " + rsz);
    SDFSLogger.getLog().info("Loading BloomFilters " + rsz);
    this.kSz.set(rsz);
    this.closed = false;
    return size;
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {//from w  ww . jav  a 2 s  . c  om
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = null;
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .001, true, true, false);
        } finally {
            l.unlock();
        }

        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, bf, lbf, csz);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        AbstractShard _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value, p.loc);
                            this.keyLookup.invalidate(new ByteArrayWrapper(p.key));
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ShardedFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}