Java tutorial
/* * Copyright 2013 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package io.netty.util; import io.netty.util.concurrent.FastThreadLocal; import io.netty.util.internal.ObjectPool; import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import java.lang.ref.WeakReference; import java.util.Arrays; import java.util.Map; import java.util.WeakHashMap; import java.util.concurrent.atomic.AtomicInteger; import static io.netty.util.internal.MathUtil.safeFindNextPositivePowerOfTwo; import static java.lang.Math.max; import static java.lang.Math.min; /** * Light-weight object pool based on a thread-local stack. * * @param <T> the type of the pooled object */ public abstract class Recycler<T> { private static final InternalLogger logger = InternalLoggerFactory.getInstance(Recycler.class); @SuppressWarnings("rawtypes") private static final Handle NOOP_HANDLE = new Handle() { @Override public void recycle(Object object) { // NOOP } }; private static final AtomicInteger ID_GENERATOR = new AtomicInteger(Integer.MIN_VALUE); private static final int OWN_THREAD_ID = ID_GENERATOR.getAndIncrement(); private static final int DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD = 4 * 1024; // Use 4k instances as default. private static final int DEFAULT_MAX_CAPACITY_PER_THREAD; private static final int INITIAL_CAPACITY; private static final int MAX_SHARED_CAPACITY_FACTOR; private static final int MAX_DELAYED_QUEUES_PER_THREAD; private static final int LINK_CAPACITY; private static final int RATIO; static { // In the future, we might have different maxCapacity for different object types. // e.g. io.netty.recycler.maxCapacity.writeTask // io.netty.recycler.maxCapacity.outboundBuffer int maxCapacityPerThread = SystemPropertyUtil.getInt("io.netty.recycler.maxCapacityPerThread", SystemPropertyUtil.getInt("io.netty.recycler.maxCapacity", DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD)); if (maxCapacityPerThread < 0) { maxCapacityPerThread = DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD; } DEFAULT_MAX_CAPACITY_PER_THREAD = maxCapacityPerThread; MAX_SHARED_CAPACITY_FACTOR = max(2, SystemPropertyUtil.getInt("io.netty.recycler.maxSharedCapacityFactor", 2)); MAX_DELAYED_QUEUES_PER_THREAD = max(0, SystemPropertyUtil.getInt("io.netty.recycler.maxDelayedQueuesPerThread", // We use the same value as default EventLoop number NettyRuntime.availableProcessors() * 2)); LINK_CAPACITY = safeFindNextPositivePowerOfTwo( max(SystemPropertyUtil.getInt("io.netty.recycler.linkCapacity", 16), 16)); // By default we allow one push to a Recycler for each 8th try on handles that were never recycled before. // This should help to slowly increase the capacity of the recycler while not be too sensitive to allocation // bursts. RATIO = safeFindNextPositivePowerOfTwo(SystemPropertyUtil.getInt("io.netty.recycler.ratio", 8)); if (logger.isDebugEnabled()) { if (DEFAULT_MAX_CAPACITY_PER_THREAD == 0) { logger.debug("-Dio.netty.recycler.maxCapacityPerThread: disabled"); logger.debug("-Dio.netty.recycler.maxSharedCapacityFactor: disabled"); logger.debug("-Dio.netty.recycler.linkCapacity: disabled"); logger.debug("-Dio.netty.recycler.ratio: disabled"); } else { logger.debug("-Dio.netty.recycler.maxCapacityPerThread: {}", DEFAULT_MAX_CAPACITY_PER_THREAD); logger.debug("-Dio.netty.recycler.maxSharedCapacityFactor: {}", MAX_SHARED_CAPACITY_FACTOR); logger.debug("-Dio.netty.recycler.linkCapacity: {}", LINK_CAPACITY); logger.debug("-Dio.netty.recycler.ratio: {}", RATIO); } } INITIAL_CAPACITY = min(DEFAULT_MAX_CAPACITY_PER_THREAD, 256); } private final int maxCapacityPerThread; private final int maxSharedCapacityFactor; private final int interval; private final int maxDelayedQueuesPerThread; private final FastThreadLocal<Stack<T>> threadLocal = new FastThreadLocal<Stack<T>>() { @Override protected Stack<T> initialValue() { return new Stack<T>(Recycler.this, Thread.currentThread(), maxCapacityPerThread, maxSharedCapacityFactor, interval, maxDelayedQueuesPerThread); } @Override protected void onRemoval(Stack<T> value) { // Let us remove the WeakOrderQueue from the WeakHashMap directly if its safe to remove some overhead if (value.threadRef.get() == Thread.currentThread()) { if (DELAYED_RECYCLED.isSet()) { DELAYED_RECYCLED.get().remove(value); } } } }; protected Recycler() { this(DEFAULT_MAX_CAPACITY_PER_THREAD); } protected Recycler(int maxCapacityPerThread) { this(maxCapacityPerThread, MAX_SHARED_CAPACITY_FACTOR); } protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor) { this(maxCapacityPerThread, maxSharedCapacityFactor, RATIO, MAX_DELAYED_QUEUES_PER_THREAD); } protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor, int ratio, int maxDelayedQueuesPerThread) { interval = safeFindNextPositivePowerOfTwo(ratio); if (maxCapacityPerThread <= 0) { this.maxCapacityPerThread = 0; this.maxSharedCapacityFactor = 1; this.maxDelayedQueuesPerThread = 0; } else { this.maxCapacityPerThread = maxCapacityPerThread; this.maxSharedCapacityFactor = max(1, maxSharedCapacityFactor); this.maxDelayedQueuesPerThread = max(0, maxDelayedQueuesPerThread); } } @SuppressWarnings("unchecked") public final T get() { if (maxCapacityPerThread == 0) { return newObject((Handle<T>) NOOP_HANDLE); } Stack<T> stack = threadLocal.get(); DefaultHandle<T> handle = stack.pop(); if (handle == null) { handle = stack.newHandle(); handle.value = newObject(handle); } return (T) handle.value; } /** * @deprecated use {@link Handle#recycle(Object)}. */ @Deprecated public final boolean recycle(T o, Handle<T> handle) { if (handle == NOOP_HANDLE) { return false; } DefaultHandle<T> h = (DefaultHandle<T>) handle; if (h.stack.parent != this) { return false; } h.recycle(o); return true; } final int threadLocalCapacity() { return threadLocal.get().elements.length; } final int threadLocalSize() { return threadLocal.get().size; } protected abstract T newObject(Handle<T> handle); public interface Handle<T> extends ObjectPool.Handle<T> { } private static final class DefaultHandle<T> implements Handle<T> { int lastRecycledId; int recycleId; boolean hasBeenRecycled; Stack<?> stack; Object value; DefaultHandle(Stack<?> stack) { this.stack = stack; } @Override public void recycle(Object object) { if (object != value) { throw new IllegalArgumentException("object does not belong to handle"); } Stack<?> stack = this.stack; if (lastRecycledId != recycleId || stack == null) { throw new IllegalStateException("recycled already"); } stack.push(this); } } private static final FastThreadLocal<Map<Stack<?>, WeakOrderQueue>> DELAYED_RECYCLED = new FastThreadLocal<Map<Stack<?>, WeakOrderQueue>>() { @Override protected Map<Stack<?>, WeakOrderQueue> initialValue() { return new WeakHashMap<Stack<?>, WeakOrderQueue>(); } }; // a queue that makes only moderate guarantees about visibility: items are seen in the correct order, // but we aren't absolutely guaranteed to ever see anything at all, thereby keeping the queue cheap to maintain private static final class WeakOrderQueue extends WeakReference<Thread> { static final WeakOrderQueue DUMMY = new WeakOrderQueue(); // Let Link extend AtomicInteger for intrinsics. The Link itself will be used as writerIndex. @SuppressWarnings("serial") static final class Link extends AtomicInteger { final DefaultHandle<?>[] elements = new DefaultHandle[LINK_CAPACITY]; int readIndex; Link next; } // Its important this does not hold any reference to either Stack or WeakOrderQueue. private static final class Head { private final AtomicInteger availableSharedCapacity; Link link; Head(AtomicInteger availableSharedCapacity) { this.availableSharedCapacity = availableSharedCapacity; } /** * Reclaim all used space and also unlink the nodes to prevent GC nepotism. */ void reclaimAllSpaceAndUnlink() { Link head = link; link = null; int reclaimSpace = 0; while (head != null) { reclaimSpace += LINK_CAPACITY; Link next = head.next; // Unlink to help GC and guard against GC nepotism. head.next = null; head = next; } if (reclaimSpace > 0) { reclaimSpace(reclaimSpace); } } private void reclaimSpace(int space) { availableSharedCapacity.addAndGet(space); } void relink(Link link) { reclaimSpace(LINK_CAPACITY); this.link = link; } /** * Creates a new {@link} and returns it if we can reserve enough space for it, otherwise it * returns {@code null}. */ Link newLink() { return reserveSpaceForLink(availableSharedCapacity) ? new Link() : null; } static boolean reserveSpaceForLink(AtomicInteger availableSharedCapacity) { for (;;) { int available = availableSharedCapacity.get(); if (available < LINK_CAPACITY) { return false; } if (availableSharedCapacity.compareAndSet(available, available - LINK_CAPACITY)) { return true; } } } } // chain of data items private final Head head; private Link tail; // pointer to another queue of delayed items for the same stack private WeakOrderQueue next; private final int id = ID_GENERATOR.getAndIncrement(); private final int interval; private int handleRecycleCount; private WeakOrderQueue() { super(null); head = new Head(null); interval = 0; } private WeakOrderQueue(Stack<?> stack, Thread thread) { super(thread); tail = new Link(); // Its important that we not store the Stack itself in the WeakOrderQueue as the Stack also is used in // the WeakHashMap as key. So just store the enclosed AtomicInteger which should allow to have the // Stack itself GCed. head = new Head(stack.availableSharedCapacity); head.link = tail; interval = stack.interval; handleRecycleCount = interval; // Start at interval so the first one will be recycled. } static WeakOrderQueue newQueue(Stack<?> stack, Thread thread) { // We allocated a Link so reserve the space if (!Head.reserveSpaceForLink(stack.availableSharedCapacity)) { return null; } final WeakOrderQueue queue = new WeakOrderQueue(stack, thread); // Done outside of the constructor to ensure WeakOrderQueue.this does not escape the constructor and so // may be accessed while its still constructed. stack.setHead(queue); return queue; } WeakOrderQueue getNext() { return next; } void setNext(WeakOrderQueue next) { assert next != this; this.next = next; } void reclaimAllSpaceAndUnlink() { head.reclaimAllSpaceAndUnlink(); this.next = null; } void add(DefaultHandle<?> handle) { handle.lastRecycledId = id; // While we also enforce the recycling ratio one we transfer objects from the WeakOrderQueue to the Stack // we better should enforce it as well early. Missing to do so may let the WeakOrderQueue grow very fast // without control if the Stack if (handleRecycleCount < interval) { handleRecycleCount++; // Drop the item to prevent recycling to aggressive. return; } handleRecycleCount = 0; Link tail = this.tail; int writeIndex; if ((writeIndex = tail.get()) == LINK_CAPACITY) { Link link = head.newLink(); if (link == null) { // Drop it. return; } // We allocate a Link so reserve the space this.tail = tail = tail.next = link; writeIndex = tail.get(); } tail.elements[writeIndex] = handle; handle.stack = null; // we lazy set to ensure that setting stack to null appears before we unnull it in the owning thread; // this also means we guarantee visibility of an element in the queue if we see the index updated tail.lazySet(writeIndex + 1); } boolean hasFinalData() { return tail.readIndex != tail.get(); } // transfer as many items as we can from this queue to the stack, returning true if any were transferred @SuppressWarnings("rawtypes") boolean transfer(Stack<?> dst) { Link head = this.head.link; if (head == null) { return false; } if (head.readIndex == LINK_CAPACITY) { if (head.next == null) { return false; } head = head.next; this.head.relink(head); } final int srcStart = head.readIndex; int srcEnd = head.get(); final int srcSize = srcEnd - srcStart; if (srcSize == 0) { return false; } final int dstSize = dst.size; final int expectedCapacity = dstSize + srcSize; if (expectedCapacity > dst.elements.length) { final int actualCapacity = dst.increaseCapacity(expectedCapacity); srcEnd = min(srcStart + actualCapacity - dstSize, srcEnd); } if (srcStart != srcEnd) { final DefaultHandle[] srcElems = head.elements; final DefaultHandle[] dstElems = dst.elements; int newDstSize = dstSize; for (int i = srcStart; i < srcEnd; i++) { DefaultHandle<?> element = srcElems[i]; if (element.recycleId == 0) { element.recycleId = element.lastRecycledId; } else if (element.recycleId != element.lastRecycledId) { throw new IllegalStateException("recycled already"); } srcElems[i] = null; if (dst.dropHandle(element)) { // Drop the object. continue; } element.stack = dst; dstElems[newDstSize++] = element; } if (srcEnd == LINK_CAPACITY && head.next != null) { // Add capacity back as the Link is GCed. this.head.relink(head.next); } head.readIndex = srcEnd; if (dst.size == newDstSize) { return false; } dst.size = newDstSize; return true; } else { // The destination stack is full already. return false; } } } private static final class Stack<T> { // we keep a queue of per-thread queues, which is appended to once only, each time a new thread other // than the stack owner recycles: when we run out of items in our stack we iterate this collection // to scavenge those that can be reused. this permits us to incur minimal thread synchronisation whilst // still recycling all items. final Recycler<T> parent; // We store the Thread in a WeakReference as otherwise we may be the only ones that still hold a strong // Reference to the Thread itself after it died because DefaultHandle will hold a reference to the Stack. // // The biggest issue is if we do not use a WeakReference the Thread may not be able to be collected at all if // the user will store a reference to the DefaultHandle somewhere and never clear this reference (or not clear // it in a timely manner). final WeakReference<Thread> threadRef; final AtomicInteger availableSharedCapacity; private final int maxDelayedQueues; private final int maxCapacity; private final int interval; DefaultHandle<?>[] elements; int size; private int handleRecycleCount; private WeakOrderQueue cursor, prev; private volatile WeakOrderQueue head; Stack(Recycler<T> parent, Thread thread, int maxCapacity, int maxSharedCapacityFactor, int interval, int maxDelayedQueues) { this.parent = parent; threadRef = new WeakReference<Thread>(thread); this.maxCapacity = maxCapacity; availableSharedCapacity = new AtomicInteger(max(maxCapacity / maxSharedCapacityFactor, LINK_CAPACITY)); elements = new DefaultHandle[min(INITIAL_CAPACITY, maxCapacity)]; this.interval = interval; handleRecycleCount = interval; // Start at interval so the first one will be recycled. this.maxDelayedQueues = maxDelayedQueues; } // Marked as synchronized to ensure this is serialized. synchronized void setHead(WeakOrderQueue queue) { queue.setNext(head); head = queue; } int increaseCapacity(int expectedCapacity) { int newCapacity = elements.length; int maxCapacity = this.maxCapacity; do { newCapacity <<= 1; } while (newCapacity < expectedCapacity && newCapacity < maxCapacity); newCapacity = min(newCapacity, maxCapacity); if (newCapacity != elements.length) { elements = Arrays.copyOf(elements, newCapacity); } return newCapacity; } @SuppressWarnings({ "unchecked", "rawtypes" }) DefaultHandle<T> pop() { int size = this.size; if (size == 0) { if (!scavenge()) { return null; } size = this.size; if (size <= 0) { // double check, avoid races return null; } } size--; DefaultHandle ret = elements[size]; elements[size] = null; // As we already set the element[size] to null we also need to store the updated size before we do // any validation. Otherwise we may see a null value when later try to pop again without a new element // added before. this.size = size; if (ret.lastRecycledId != ret.recycleId) { throw new IllegalStateException("recycled multiple times"); } ret.recycleId = 0; ret.lastRecycledId = 0; return ret; } private boolean scavenge() { // continue an existing scavenge, if any if (scavengeSome()) { return true; } // reset our scavenge cursor prev = null; cursor = head; return false; } private boolean scavengeSome() { WeakOrderQueue prev; WeakOrderQueue cursor = this.cursor; if (cursor == null) { prev = null; cursor = head; if (cursor == null) { return false; } } else { prev = this.prev; } boolean success = false; do { if (cursor.transfer(this)) { success = true; break; } WeakOrderQueue next = cursor.getNext(); if (cursor.get() == null) { // If the thread associated with the queue is gone, unlink it, after // performing a volatile read to confirm there is no data left to collect. // We never unlink the first queue, as we don't want to synchronize on updating the head. if (cursor.hasFinalData()) { for (;;) { if (cursor.transfer(this)) { success = true; } else { break; } } } if (prev != null) { // Ensure we reclaim all space before dropping the WeakOrderQueue to be GC'ed. cursor.reclaimAllSpaceAndUnlink(); prev.setNext(next); } } else { prev = cursor; } cursor = next; } while (cursor != null && !success); this.prev = prev; this.cursor = cursor; return success; } void push(DefaultHandle<?> item) { Thread currentThread = Thread.currentThread(); if (threadRef.get() == currentThread) { // The current Thread is the thread that belongs to the Stack, we can try to push the object now. pushNow(item); } else { // The current Thread is not the one that belongs to the Stack // (or the Thread that belonged to the Stack was collected already), we need to signal that the push // happens later. pushLater(item, currentThread); } } private void pushNow(DefaultHandle<?> item) { if ((item.recycleId | item.lastRecycledId) != 0) { throw new IllegalStateException("recycled already"); } item.recycleId = item.lastRecycledId = OWN_THREAD_ID; int size = this.size; if (size >= maxCapacity || dropHandle(item)) { // Hit the maximum capacity or should drop - drop the possibly youngest object. return; } if (size == elements.length) { elements = Arrays.copyOf(elements, min(size << 1, maxCapacity)); } elements[size] = item; this.size = size + 1; } private void pushLater(DefaultHandle<?> item, Thread thread) { if (maxDelayedQueues == 0) { // We don't support recycling across threads and should just drop the item on the floor. return; } // we don't want to have a ref to the queue as the value in our weak map // so we null it out; to ensure there are no races with restoring it later // we impose a memory ordering here (no-op on x86) Map<Stack<?>, WeakOrderQueue> delayedRecycled = DELAYED_RECYCLED.get(); WeakOrderQueue queue = delayedRecycled.get(this); if (queue == null) { if (delayedRecycled.size() >= maxDelayedQueues) { // Add a dummy queue so we know we should drop the object delayedRecycled.put(this, WeakOrderQueue.DUMMY); return; } // Check if we already reached the maximum number of delayed queues and if we can allocate at all. if ((queue = newWeakOrderQueue(thread)) == null) { // drop object return; } delayedRecycled.put(this, queue); } else if (queue == WeakOrderQueue.DUMMY) { // drop object return; } queue.add(item); } /** * Allocate a new {@link WeakOrderQueue} or return {@code null} if not possible. */ private WeakOrderQueue newWeakOrderQueue(Thread thread) { return WeakOrderQueue.newQueue(this, thread); } boolean dropHandle(DefaultHandle<?> handle) { if (!handle.hasBeenRecycled) { if (handleRecycleCount < interval) { handleRecycleCount++; // Drop the object. return true; } handleRecycleCount = 0; handle.hasBeenRecycled = true; } return false; } DefaultHandle<T> newHandle() { return new DefaultHandle<T>(this); } } }