List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:org.deeplearning4j.text.invertedindex.LuceneInvertedIndex.java
@Override public void eachDocWithLabel(final Function<Pair<List<T>, String>, Void> func, ExecutorService exec) { int[] docIds = allDocs(); for (int i : docIds) { final int j = i; exec.execute(new Runnable() { @Override//from w w w .j ava 2s . c om public void run() { func.apply(documentWithLabel(j)); } }); } }
From source file:org.deeplearning4j.text.invertedindex.LuceneInvertedIndex.java
@Override public void eachDoc(final Function<List<T>, Void> func, ExecutorService exec) { int[] docIds = allDocs(); for (int i : docIds) { final int j = i; exec.execute(new Runnable() { @Override/*from ww w. j a v a2 s . co m*/ public void run() { func.apply(document(j)); } }); } }
From source file:org.deeplearning4j.text.invertedindex.LuceneInvertedIndex.java
@Override public void eachDocWithLabels(final Function<Pair<List<T>, Collection<String>>, Void> func, ExecutorService exec) { int[] docIds = allDocs(); for (int i : docIds) { final int j = i; exec.execute(new Runnable() { @Override// ww w .java 2s. co m public void run() { func.apply(documentWithLabels(j)); } }); } }
From source file:org.copperengine.core.persistent.cassandra.CassandraStorage.java
@Override public void initialize(final HybridDBStorageAccessor internalStorageAccessor, int numberOfThreads) throws Exception { createSchema(session, cluster);//from w ww . j a v a2 s .co m prepareStatements(); // TODO instead of blocking the startup until all active workflow instances are read and resumed, it is // sufficient to read just their existing IDs in COP_WFI_ID and resume them in the background while already // starting the engine an accepting new instances. if (numberOfThreads <= 0) numberOfThreads = 1; logger.info("Starting to initialize with {} threads ...", numberOfThreads); final ExecutorService execService = Executors.newFixedThreadPool(numberOfThreads); final long startTS = System.currentTimeMillis(); final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_WFI_ID_ALL).bind().setFetchSize(500) .setConsistencyLevel(ConsistencyLevel.ONE)); int counter = 0; Row row; while ((row = rs.one()) != null) { counter++; final String wfId = row.getString("ID"); execService.execute(new Runnable() { @Override public void run() { try { resume(wfId, internalStorageAccessor); } catch (Exception e) { logger.error("resume failed", e); } } }); } logger.info("Read {} IDs in {} msec", counter, System.currentTimeMillis() - startTS); execService.shutdown(); final boolean timeoutHappened = !execService.awaitTermination(initializationTimeoutSeconds, TimeUnit.SECONDS); if (timeoutHappened) { throw new CopperRuntimeException("initialize timed out!"); } logger.info("Finished initialization - read {} rows in {} msec", counter, System.currentTimeMillis() - startTS); runtimeStatisticsCollector.submit("storage.init", counter, System.currentTimeMillis() - startTS, TimeUnit.MILLISECONDS); }
From source file:org.apache.nifi.controller.scheduling.TestStandardProcessScheduler.java
/** * Validates that in multi threaded environment enabling service can still * be disabled. This test is set up in such way that disabling of the * service could be initiated by both disable and enable methods. In other * words it tests two conditions in//from w w w . j a va 2 s . c o m * {@link StandardControllerServiceNode#disable(java.util.concurrent.ScheduledExecutorService, Heartbeater)} * where the disabling of the service can be initiated right there (if * ENABLED), or if service is still enabling its disabling will be deferred * to the logic in * {@link StandardControllerServiceNode#enable(java.util.concurrent.ScheduledExecutorService, long, Heartbeater)} * IN any even the resulting state of the service is DISABLED */ @Test @Ignore public void validateEnabledDisableMultiThread() throws Exception { final ProcessScheduler scheduler = createScheduler(); final StandardControllerServiceProvider provider = new StandardControllerServiceProvider(controller, scheduler, null, stateMgrProvider, variableRegistry, nifiProperties); final ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 0; i < 200; i++) { final ControllerServiceNode serviceNode = provider.createControllerService( RandomShortDelayEnablingService.class.getName(), "1", systemBundle.getBundleDetails().getCoordinate(), null, false); executor.execute(new Runnable() { @Override public void run() { scheduler.enableControllerService(serviceNode); } }); Thread.sleep(10); // ensure that enable gets initiated before disable executor.execute(new Runnable() { @Override public void run() { scheduler.disableControllerService(serviceNode); } }); Thread.sleep(100); assertFalse(serviceNode.isActive()); assertTrue(serviceNode.getState() == ControllerServiceState.DISABLED); } // need to sleep a while since we are emulating async invocations on // method that is also internally async Thread.sleep(500); executor.shutdown(); executor.awaitTermination(5000, TimeUnit.MILLISECONDS); }
From source file:de.appsolve.padelcampus.utils.HtmlResourceUtil.java
public void updateCss(final ServletContext context) throws Exception { List<Customer> customers = customerDAO.findAll(); if (customers.isEmpty()) { applyCustomerCss(context, getDefaultCssAttributes(), ""); } else {/*from w w w . jav a2 s.c o m*/ lessCompiler = new LessCompiler(); lessCompiler.init(); int availableProcessors = Runtime.getRuntime().availableProcessors(); LOG.info(String.format("Compiling lesscss with %s cores", availableProcessors)); ExecutorService executor = Executors.newFixedThreadPool(availableProcessors); List<FutureTask<Void>> taskList = new ArrayList<>(); for (final Customer customer : customers) { FutureTask<Void> futureTask = new FutureTask<>(new Callable<Void>() { @Override public Void call() throws Exception { try { updateCss(context, customer); } catch (Exception ex) { LOG.error(ex, ex); } return null; } }); taskList.add(futureTask); executor.execute(futureTask); } for (FutureTask task : taskList) { task.get(); } executor.shutdown(); } }
From source file:co.pugo.convert.ConvertServlet.java
/** * download imageData and encode it base64 * @param imageLinks set of image links extracted with extractImageLinks() * @return map, key = imageLink, value = base64 encoded image *///from w w w . j a va 2 s . c o m private HashMap<String, String> downloadImageData(Set<String> imageLinks) { HashMap<String, String> imageData = new HashMap<>(); ExecutorService service = Executors.newCachedThreadPool(); for (final String imageLink : imageLinks) { RunnableFuture<byte[]> future = new FutureTask<>(new Callable<byte[]>() { @Override public byte[] call() { try { URL srcUrl = new URL(imageLink); URLConnection urlConnection = srcUrl.openConnection(); return IOUtils.toByteArray(urlConnection.getInputStream()); } catch (IOException e) { LOG.severe(e.getMessage()); return null; } } }); service.execute(future); try { imageData.put(imageLink, Base64.encodeBase64String(future.get())); } catch (InterruptedException | ExecutionException e) { LOG.severe(e.getMessage()); } } service.shutdown(); try { service.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { LOG.severe(e.getMessage()); } return imageData; }
From source file:configurator.Configurator.java
/** * The main method for telnet configuring the network devices. Prepare data * and create thread for each device//from w w w. j a v a2 s.c o m * * @param ipText - List of ip ranges, subnets and single ip * @param authData - authorization data for work on devices * @param models list of models for telnet configure */ void telnetWork(String ipText, AuthenticationData authData, List<String> models) { try { List<String> ipList = parseIp(ipText); ExecutorService exec = Executors.newCachedThreadPool(); ipList.forEach((ip) -> { Snmp snmp = new Snmp(authData.getCommunity()); String modelOid = mainProperites.getProperty("Identifier_Oid"); String model = snmp.get(ip, modelOid).trim(); if (models.contains(model)) { try { String modelAddress = getAddressByModelName(model); if (modelAddress != null) { exec.execute(new TelnetConfigureThread(ip, authData, modelAddress)); } else { throw new FileNotFoundException(); } } catch (FileNotFoundException ex) { System.exit(0); } } else { } }); exec.shutdown(); } catch (Exception e) { System.out.println(e); //ip address is incorrect } }
From source file:org.wso2.carbon.membership.scheme.kubernetes.MesosBasedKubernetesMembershipScheme.java
@Override public void init() throws ClusteringFault { try {//from w w w. jav a 2s . c om log.info("Initializing Mesos based Kubernetes membership scheme..."); nwConfig.getJoin().getMulticastConfig().setEnabled(false); nwConfig.getJoin().getAwsConfig().setEnabled(false); TcpIpConfig tcpIpConfig = nwConfig.getJoin().getTcpIpConfig(); tcpIpConfig.setEnabled(true); // Try to read parameters from env variables String kubernetesMaster = System .getenv(KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER); String kubernetesNamespace = System .getenv(KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_NAMESPACE); String kubernetesMasterUsername = System .getenv(KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_USERNAME); String kubernetesMasterPassword = System .getenv(KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_PASSWORD); String skipMasterVerificationValue = System.getenv( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_SKIP_SSL_VERIFICATION); String clusterIds = System.getenv(PARAMETER_NAME_CLUSTER_IDS); memberId = System.getenv(PARAMETER_NAME_MEMBER_ID); // If not available read from clustering configuration if (StringUtils.isEmpty(kubernetesMaster)) { kubernetesMaster = getParameterValue( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER); if (StringUtils.isEmpty(kubernetesMaster)) { throw new ClusteringFault("Kubernetes master parameter not found"); } } if (StringUtils.isEmpty(kubernetesNamespace)) { kubernetesNamespace = getParameterValue( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_NAMESPACE, "default"); } if (StringUtils.isEmpty(kubernetesMasterUsername)) { kubernetesMasterUsername = getParameterValue( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_USERNAME, ""); } if (StringUtils.isEmpty(kubernetesMasterPassword)) { kubernetesMasterPassword = getParameterValue( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_PASSWORD, ""); } if (StringUtils.isEmpty(skipMasterVerificationValue)) { skipMasterVerificationValue = getParameterValue( KubernetesMembershipSchemeConstants.PARAMETER_NAME_KUBERNETES_MASTER_SKIP_SSL_VERIFICATION, "false"); } skipMasterSSLVerification = Boolean.parseBoolean(skipMasterVerificationValue); log.info(String.format( "Mesos kubernetes clustering configuration: [master] %s [namespace] %s [skip-master-ssl-verification] %s", kubernetesMaster, kubernetesNamespace, skipMasterSSLVerification)); if (StringUtils.isEmpty(clusterIds)) { clusterIds = getParameterValue(PARAMETER_NAME_CLUSTER_IDS); } if (clusterIds == null) { throw new RuntimeException(PARAMETER_NAME_CLUSTER_IDS + " parameter not found"); } if (memberId == null) { throw new RuntimeException( PARAMETER_NAME_MEMBER_ID + " parameter not found in " + "System parameters"); } String[] clusterIdArray = clusterIds.split(","); if (!waitForTopologyInitialization()) { return; } List<KubernetesService> kubernetesServices = new ArrayList<>(); try { TopologyManager.acquireReadLock(); for (String clusterId : clusterIdArray) { Cluster cluster = TopologyManager.getTopology().getCluster(clusterId.trim()); if (cluster == null) { throw new RuntimeException("Cluster not found in topology: [cluster-id] " + clusterId); } if (cluster.isKubernetesCluster()) { log.info("Reading Kubernetes services of cluster: [cluster-id] " + clusterId); kubernetesServices.addAll(getKubernetesServicesOfCluster(cluster)); } else { log.info("Cluster " + clusterId + " is not a Kubernetes cluster"); } } } finally { TopologyManager.releaseReadLock(); } for (KubernetesService k8sService : kubernetesServices) { // check if the Service is related to clustering, by checking if the service name // is equal to the port mapping name. Only that particular Service will be selected if (HZ_CLUSTERING_PORT_MAPPING_NAME.equalsIgnoreCase(k8sService.getPortName())) { log.info("Found the relevant Service [ " + k8sService.getId() + " ] for the " + "port mapping name: " + HZ_CLUSTERING_PORT_MAPPING_NAME); this.hazelcastMappingPort = k8sService.getContainerPort(); log.info("Kubernetes service: " + k8sService.getId() + ", clustering port: " + k8sService.getContainerPort()); List<String> hostIPandPortTuples = findHostIPandPortTuples(kubernetesMaster, kubernetesNamespace, k8sService.getId(), kubernetesMasterUsername, kubernetesMasterPassword, k8sService.getContainerPort()); for (String hostIPandPortTuple : hostIPandPortTuples) { tcpIpConfig.addMember(hostIPandPortTuple); log.info( "Member added to cluster configuration: [host-ip,host-port] " + hostIPandPortTuple); } } } log.info("Mesos based Kubernetes membership scheme initialized successfully"); ExecutorService executorService = Executors.newSingleThreadExecutor(); final TCPForwardServer tcpForwardServer = new TCPForwardServer(this.hazelcastMappingPort, this.memberHostPort); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { TCPForwardServer.isRunning = false; } }); executorService.execute(tcpForwardServer); log.info("TCP forwarding server started for [source-port]: " + this.hazelcastMappingPort + " [destination-port]: " + this.memberHostPort); } catch (Exception e) { log.error(e); throw new ClusteringFault("Mesos based Kubernetes membership initialization failed.", e); } }
From source file:es.us.lsi.restest.engine.UnirestTest.java
private void makeParallelRequests() throws InterruptedException { ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(10); final AtomicInteger counter = new AtomicInteger(0); for (int i = 0; i < 200; i++) { newFixedThreadPool.execute(new Runnable() { public void run() { try { Unirest.get("http://httpbin.org/get").queryString("index", counter.incrementAndGet()) .asJson();//from ww w .j a v a 2 s . co m } catch (UnirestException e) { throw new RuntimeException(e); } } }); } newFixedThreadPool.shutdown(); newFixedThreadPool.awaitTermination(10, TimeUnit.MINUTES); }