Example usage for java.util.concurrent ThreadFactory ThreadFactory

List of usage examples for java.util.concurrent ThreadFactory ThreadFactory

Introduction

In this page you can find the example usage for java.util.concurrent ThreadFactory ThreadFactory.

Prototype

ThreadFactory

Source Link

Usage

From source file:io.gravitee.gateway.services.apikeyscache.ApiKeysCacheService.java

@Override
protected void doStart() throws Exception {
    if (enabled) {
        super.doStart();

        LOGGER.info("Overriding API key repository implementation with cached API Key repository");
        DefaultListableBeanFactory beanFactory = (DefaultListableBeanFactory) ((ConfigurableApplicationContext) applicationContext
                .getParent()).getBeanFactory();

        this.apiKeyRepository = beanFactory.getBean(ApiKeyRepository.class);
        LOGGER.debug("Current API key repository implementation is {}", apiKeyRepository.getClass().getName());

        String[] beanNames = beanFactory.getBeanNamesForType(ApiKeyRepository.class);
        String oldBeanName = beanNames[0];

        beanFactory.destroySingleton(oldBeanName);

        LOGGER.debug("Register API key repository implementation {}", ApiKeyRepositoryWrapper.class.getName());
        beanFactory.registerSingleton(ApiKeyRepository.class.getName(),
                new ApiKeyRepositoryWrapper(this.apiKeyRepository, cache));

        eventManager.subscribeForEvents(this, ReactorEvent.class);

        executorService = Executors.newScheduledThreadPool(threads, new ThreadFactory() {
            private int counter = 0;
            private String prefix = "apikeys-refresher";

            @Override//from   ww  w .j  av a 2 s. c  om
            public Thread newThread(Runnable r) {
                return new Thread(r, prefix + '-' + counter++);
            }
        });
    }
}

From source file:org.bonitasoft.engine.classloader.VirtualClassLoaderTest.java

/**
 * BS-7152 : test the loading of class when calling the JavaMethodInvoker
 * /*w ww  .  ja  va 2s  .  c  om*/
 * @throws Exception
 */
@Test
public void loadStudentInformation_toVirtualClassLoader_should_be_usable_via_JavaMethodInvoker()
        throws Exception {
    final VirtualClassLoader vcl = new VirtualClassLoader("org.bonitasoft", 1L,
            Thread.currentThread().getContextClassLoader());
    final Map<String, byte[]> resources = new HashMap<String, byte[]>(1);
    resources.put("UOSFaasApplication.jar",
            FileUtils.readFileToByteArray(new File("src/test/resources/UOSFaasApplication.jar")));
    final File tempDir = new File(System.getProperty("java.io.tmpdir"), "VirtualClassLoaderTest");
    final BonitaClassLoader bonitaClassLoader = new BonitaClassLoader(resources, "here", 154L, tempDir.toURI(),
            BonitaClassLoader.class.getClassLoader());

    vcl.setClassLoader(bonitaClassLoader);
    final Object objectToInvokeJavaMethodOn = vcl
            .loadClass("au.edu.sydney.faas.applicationstudent.StudentRequest").getConstructors()[0]
                    .newInstance();
    final Object valueToSetObjectWith = vcl
            .loadClass("au.edu.sydney.faas.applicationstudent.StudentInformation").getConstructors()[0]
                    .newInstance();

    ExecutorService executor = Executors.newSingleThreadExecutor(new ThreadFactory() {

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setContextClassLoader(vcl);
            return t;
        }
    });

    Future<Object> jmiFuture = executor.submit(new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            try {
                JavaMethodInvoker jmi = new JavaMethodInvoker();
                jmi.invokeJavaMethod("au.edu.sydney.faas.applicationstudent.StudentInformation",
                        valueToSetObjectWith, objectToInvokeJavaMethodOn, "setStudentInformation",
                        "au.edu.sydney.faas.applicationstudent.StudentInformation");
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return null;
        }
    });
    jmiFuture.get();

    // To clean
    bonitaClassLoader.destroy();
}

From source file:net.kungfoo.grizzly.proxy.impl.ProxyAdapter.java

/**
 * {@inheritDoc}//from w  ww . ja v a2s .  com
 */
public void service(Request request, Response response) throws Exception {
    String uri = request.unparsedURI().toString();

    final MessageBytes method = request.method();
    logURIAndMethod(uri, method);

    if (maxForwards(request, response, method))
        return;

    String targetHost = request.serverName().toString();
    int targetPort = request.getServerPort();

    ProxyProcessingInfo proxyTask = new ProxyProcessingInfo();

    // TODO: think of it.
    synchronized (proxyTask) {

        // from connected

        // Initialize connection state
        proxyTask.setTarget(new HttpHost(targetHost, targetPort));
        proxyTask.setRequest(convert(method.getString(), uri, request));
        proxyTask.setOriginalRequest(request);
        Runnable completion = (Runnable) request.getAttribute(CALLBACK_KEY);
        proxyTask.setCompletion(completion);
        proxyTask.setResponse(response);

        InetSocketAddress address = new InetSocketAddress(targetHost, targetPort);

        if (!IOReactorStatus.ACTIVE.equals(connectingIOReactor.getStatus())) {
            System.err.println("Connecting reactor not running.");
            response.setStatus(500);
            response.setMessage("Internal Booo");
            // complete request.
            ExecutorService executorService = Executors.newFixedThreadPool(1, new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    return new Thread(r, "EmergencyService"); //To change body of implemented methods use File | Settings | File Templates.
                }
            });
            executorService.submit(completion);
            return;
        } else {
            connectingIOReactor.connect(address, null, proxyTask, null);
        }

        // from requestReceived
        try {
            System.out.println(request + " [client->proxy] >> " + request.unparsedURI().toString());

            // Update connection state
            proxyTask.setClientState(ConnState.REQUEST_RECEIVED);

            if (request.getContentLength() != 0) {
                proxyTask.setClientState(ConnState.REQUEST_BODY_DONE);
            }
            // See if the client expects a 100-Continue
            if (isExpectContinue(request)) {
                response.setStatus(HttpStatus.SC_CONTINUE);
                response.sendHeaders();
            }
        } catch (IOException ignore) {
            System.out.println("err " + ignore.getMessage());
        }
    }

    // handle "Via", TODO: should go after we have headers from target server.
    response.setHeader(Via.name(), request.protocol() + " antares");// TODO hostname, and Via from response

}

From source file:com.ganji.cateye.flume.kestrel.KestrelRpcClient.java

License:asdf

public KestrelRpcClient() {
    stateLock = new ReentrantLock(true);
    connState = State.INIT;/*from w  w w .j  a v a 2s .c  o m*/

    threadCounter = new AtomicLong(0);
    // OK to use cached threadpool, because this is simply meant to timeout
    // the calls - and is IO bound.
    callTimeoutPool = Executors.newCachedThreadPool(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName(KestrelRpcClient.this.sinkName + "-" + String.valueOf(threadCounter.incrementAndGet()));
            return t;
        }
    });
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService.java

private void addExecutorForVolume(final File volume) {
    ThreadFactory threadFactory = new ThreadFactory() {
        int counter = 0;

        @Override// w ww .j a  v  a  2s.  com
        public Thread newThread(Runnable r) {
            int thisIndex;
            synchronized (this) {
                thisIndex = counter++;
            }
            Thread t = new Thread(threadGroup, r);
            t.setName("Async disk worker #" + thisIndex + " for volume " + volume);
            return t;
        }
    };

    ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
            THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory);

    // This can reduce the number of running threads
    executor.allowCoreThreadTimeOut(true);
    executors.put(volume, executor);
}

From source file:org.wso2.carbon.humantask.core.HumanTaskServer.java

/**
 * Scheduler initialisation./*from w w w  .j  a va 2  s  . co m*/
 */
private void initScheduler() {
    ThreadFactory threadFactory = new ThreadFactory() {
        private int threadNumber = 0;

        public Thread newThread(Runnable r) {
            threadNumber += 1;
            Thread t = new Thread(r, "HumanTaskServer-" + threadNumber);
            t.setDaemon(true);
            return t;
        }
    };

    ExecutorService executorService = Executors.newFixedThreadPool(serverConfig.getThreadPoolMaxSize(),
            threadFactory);

    SimpleScheduler simpleScheduler = new SimpleScheduler(new GUID().toString());
    simpleScheduler.setExecutorService(executorService);
    simpleScheduler.setTransactionManager(tnxManager);
    taskEngine.setScheduler(simpleScheduler);
    simpleScheduler.setJobProcessor(new JobProcessorImpl());
    // Start the scheduler within the HumanTaskSchedulerInitializer to ensure that all the tasks are deployed
    // when the scheduler actually starts.
    // simpleScheduler.start();

    scheduler = simpleScheduler;
}

From source file:org.apache.synapse.commons.jmx.ThreadingView.java

public ThreadingView(final String threadNamePrefix) {
    this.threadNamePrefix = threadNamePrefix;
    this.scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(r, "Thread-view-" + threadNamePrefix);
        }//from   ww  w. ja v a  2 s . co m
    });
    initMBean();
}

From source file:org.apache.hadoop.hbase.regionserver.CompactSplitThread.java

/** @param server */
CompactSplitThread(HRegionServer server) {
    super();/*ww  w.j a  v a 2s.  co  m*/
    this.server = server;
    this.conf = server.getConfiguration();
    this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit", Integer.MAX_VALUE);

    int largeThreads = Math.max(1, conf.getInt("hbase.regionserver.thread.compaction.large", 1));
    int smallThreads = conf.getInt("hbase.regionserver.thread.compaction.small", 1);

    int splitThreads = conf.getInt("hbase.regionserver.thread.split", 1);

    // if we have throttle threads, make sure the user also specified size
    Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0);

    final String n = Thread.currentThread().getName();

    this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads, 60, TimeUnit.SECONDS,
            new PriorityBlockingQueue<Runnable>(), new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName(n + "-longCompactions-" + System.currentTimeMillis());
                    return t;
                }
            });
    this.longCompactions.setRejectedExecutionHandler(new Rejection());
    this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads, 60, TimeUnit.SECONDS,
            new PriorityBlockingQueue<Runnable>(), new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName(n + "-shortCompactions-" + System.currentTimeMillis());
                    return t;
                }
            });
    this.shortCompactions.setRejectedExecutionHandler(new Rejection());
    this.splits = (ThreadPoolExecutor) Executors.newFixedThreadPool(splitThreads, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName(n + "-splits-" + System.currentTimeMillis());
            return t;
        }
    });
    int mergeThreads = conf.getInt("hbase.regionserver.thread.merge", 1);
    this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(mergeThreads, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName(n + "-merges-" + System.currentTimeMillis());
            return t;
        }
    });
}

From source file:org.hyperic.hq.measurement.agent.server.TopNScheduler.java

private void createSender() {
    sender = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final AtomicLong i = new AtomicLong(0);

        public Thread newThread(Runnable r) {
            return new Thread(r, "TopNSender" + i.getAndIncrement());
        }//  w  w w .  j  a  va2  s  . c  o  m
    });

    sender.scheduleAtFixedRate(new Runnable() {
        public void run() {
            boolean success;
            List<TopReport> reports = new ArrayList<TopReport>();
            for (TopReport report : storage.<TopReport>getObjectsFromFolder(DATA_FOLDERNAME, MAX_BATCHSIZE)) {
                reports.add(report);

            }
            // If we don't have anything to send -- move along
            if (reports.isEmpty()) {
                log.debug("No TopN records were found in the storage");
                return;
            }
            log.debug("Sending " + reports.size() + " TopN entries " + "to server");
            success = false;
            try {
                TopNSendReport_args report = new TopNSendReport_args();
                if (agentToken == null) {
                    agentToken = storage.getValue(CommandsAPIInfo.PROP_AGENT_TOKEN);
                }
                report.setAgentToken(agentToken);
                report.setTopReports(reports);
                client.topNSendReport(report);
                success = true;
            } catch (AgentCallbackClientException exc) {
                log.error("Error sending TOPN data to server: " + exc.getMessage());
            }

            // delete the records we sent from the storage
            if (success) {
                List<String> filesToDelete = new ArrayList<String>();
                for (TopReport report : reports) {
                    filesToDelete.add(String.valueOf(report.getCreateTime()));
                }
                storage.deleteObjectsFromFolder(DATA_FOLDERNAME,
                        filesToDelete.toArray(new String[filesToDelete.size()]));
            }
        }
        // TimeUnit.MINUTE does not work on java5
    }, SEND_INTERVAL * 60, SEND_INTERVAL * 60, TimeUnit.SECONDS);

}

From source file:com.reactivetechnologies.platform.rest.WebbitRestServerBean.java

/**
 * Instantiate a REST server//w  ww . j  a v a 2  s. c  om
 * @param port listening port
 * @param nThreads no of worker threads
 * @param annotatedPkgToScan base package to scan for JAX-RS annotated classes
 */
public WebbitRestServerBean(int port, int nThreads, String annotatedPkgToScan) {
    super();
    this.annotatedPkgToScan = annotatedPkgToScan;
    server = new NettyWebServer(Executors.newFixedThreadPool(nThreads, new ThreadFactory() {
        private int n = 0;

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r, "REST.Handler-" + (n++));
            t.setDaemon(false);
            return t;
        }
    }), port).uncaughtExceptionHandler(new UncaughtExceptionHandler() {

        @Override
        public void uncaughtException(Thread t, Throwable e) {
            log.error("Worker thread [" + t + "] caught unexpected exception:", e);

        }
    });
    restWrapper = new Rest(server);

}