List of usage examples for java.util.concurrent TimeUnit MINUTES
TimeUnit MINUTES
To view the source code for java.util.concurrent TimeUnit MINUTES.
Click Source Link
From source file:com.attribyte.essem.ApplicationCache.java
ApplicationCache(final AsyncClient client, final RequestOptions requestOptions, final ESEndpoint esEndpoint, final Logger logger) { this.client = client; this.requestOptions = requestOptions; this.esEndpoint = esEndpoint; this.logger = logger; final BlockingQueue<Runnable> requestQueue = new ArrayBlockingQueue<>(4096); final Gauge<Integer> requestQueueSize = new Gauge<Integer>() { @Override/*from w w w .j av a 2 s . c om*/ public Integer getValue() { return requestQueue.size(); } }; final ThreadPoolExecutor requestExecutor = new ThreadPoolExecutor(2, 8, 5L, TimeUnit.MINUTES, requestQueue, new ThreadFactoryBuilder().setNameFormat("application-cache-%d").build()); requestExecutor.prestartAllCoreThreads(); final Counter rejectedRequests = new Counter(); requestExecutor.setRejectedExecutionHandler(new RejectedExecutionHandler() { @Override public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) { rejectedRequests.inc(); } }); this.requestExecutor = MoreExecutors .listeningDecorator(MoreExecutors.getExitingExecutorService(requestExecutor)); this.appRequestTimer = new Timer(); this.appRequestErrors = new Counter(); this.nameRequestTimer = new Timer(); this.nameRequestErrors = new Counter(); this.statsRequestTimer = new Timer(); this.statsRequestErrors = new Counter(); Gauge<Integer> appCacheSize = new Gauge<Integer>() { @Override public Integer getValue() { return appCache.size(); } }; this.metrics = ImmutableMap.<String, com.codahale.metrics.Metric>builder() .put("request-queue-size", requestQueueSize).put("rejected-background-requests", rejectedRequests) .put("app-requests", appRequestTimer).put("app-request-errors", appRequestErrors) .put("name-requests", nameRequestTimer).put("name-request-errors", nameRequestErrors) .put("app-cache-size", appCacheSize).put("stats-requests", statsRequestTimer) .put("stats-request-errors", statsRequestErrors).build(); }
From source file:com.pearson.eidetic.driver.threads.MonitorSnapshotVolumeTime.java
@Override public void run() { Calendar calendar_int = Calendar.getInstance(); //0-365/*w w w. ja va 2 s .c o m*/ today_ = calendar_int.get(Calendar.DAY_OF_YEAR); ConcurrentHashMap<Region, ArrayList<Volume>> localVolumeTime; localVolumeTime = awsAccount_.getVolumeTime_Copy(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) { Region region = entry.getKey(); splitFactorDay_.put(region, 10); HashSet<Date> newHashSet = new HashSet<>(); didMySnapshotDay_.put(entry.getKey(), newHashSet); } addAlreadyDoneTodaySnapshots(localVolumeTime); while (true) { try { //Reset my stuff if (isItTomorrow(today_)) { calendar_int = Calendar.getInstance(); today_ = calendar_int.get(Calendar.DAY_OF_YEAR); resetDidMySnapshotDay(); } localVolumeTime = awsAccount_.getVolumeTime_Copy(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) { Region region = entry.getKey(); if (localVolumeTime.get(region).isEmpty()) { continue; } timeDay_.put(region, extractRunAt(localVolumeTime.get(region))); } for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) { Region region = entry.getKey(); if (localVolumeTime.get(region).isEmpty()) { continue; } timeDay_.get(region).keySet().removeAll(didMySnapshotDay_.get(region)); Calendar calendar = Calendar.getInstance(); Date now = calendar.getTime(); now = dayFormat_.parse(dayFormat_.format(now)); List<Date> lessThanNow = findLessThanNow(timeDay_.get(region).keySet(), now); if (!lessThanNow.isEmpty()) { for (Date date : lessThanNow) { ArrayList<Volume> volumes = timeDay_.get(region).get(date); List<List<Volume>> listOfLists = Lists.partition(volumes, splitFactorDay_.get(region)); if (localVolumeTimeListDay_.get(region) == null || localVolumeTimeListDay_.get(region).isEmpty()) { localVolumeTimeListDay_.put(region, listsToArrayLists(listOfLists)); } else { try { localVolumeTimeListDay_.get(region).add(listsToArrayLists(listOfLists).get(0)); } catch (Exception e) { } } ArrayList<SnapshotVolumeTime> threads = new ArrayList<>(); for (ArrayList<Volume> vols : listsToArrayLists(listOfLists)) { threads.add(new SnapshotVolumeTime(awsAccount_.getAwsAccessKeyId(), awsAccount_.getAwsSecretKey(), awsAccount_.getUniqueAwsAccountIdentifier(), awsAccount_.getMaxApiRequestsPerSecond(), ApplicationConfiguration.getAwsCallRetryAttempts(), region, vols)); } didMySnapshotDay_.get(region).add(date); EideticSubThreads_.put(region, threads); } } } //localVolumeTimeListDay now has hashmaps of regions with keys of arrays of arrays of volumes to take snapshots of. HashMap<Region, Integer> secsSlept = new HashMap<>(); HashMap<Region, Boolean> allDead = new HashMap<>(); for (Map.Entry<Region, ArrayList<Volume>> entry : localVolumeTime.entrySet()) { Region region = entry.getKey(); if (localVolumeTimeListDay_.get(region) == null || localVolumeTimeListDay_.get(region).isEmpty()) { continue; } //Initializing content secsSlept.put(region, 0); //Initializing content allDead.put(region, false); Threads.threadExecutorFixedPool(EideticSubThreads_.get(region), splitFactorDay_.get(region), 300, TimeUnit.MINUTES); } //LETS SEE IF THEY'RE DEAD Boolean ejection = false; Boolean theyreDead; while (true) { for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) { Region region = entry.getKey(); if (areAllThreadsDead(EideticSubThreads_.get(region))) { allDead.put(region, true); } else { secsSlept.replace(region, secsSlept.get(region), secsSlept.get(region) + 1); if (secsSlept.get(region) > 1800) { splitFactorDay_.replace(region, splitFactorDay_.get(region), splitFactorDay_.get(region) + 1); logger.info( "Event=\"increasing_splitFactor\", Monitor=\"SnapshotVolumeTime\", splitFactor=\"" + Integer.toString(splitFactorDay_.get(region)) + "\", VolumeTimeSize=\"" + Integer.toString(localVolumeTime.get(region).size()) + "\""); ejection = true; break; } } } //I dont like this theyreDead = true; for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) { Region region = entry.getKey(); //If any of them have false if (!allDead.get(region)) { theyreDead = false; } } if (ejection || theyreDead) { break; } Threads.sleepSeconds(1); } //See if decrease splitfactor for (Map.Entry<Region, ArrayList<SnapshotVolumeTime>> entry : EideticSubThreads_.entrySet()) { Region region = entry.getKey(); int timeRemaining = 1800 - secsSlept.get(region); if ((splitFactorDay_.get(region) > 5) & (timeRemaining > 60)) { splitFactorDay_.replace(region, splitFactorDay_.get(region), splitFactorDay_.get(region) - 1); logger.info("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Event=\"decreasing_splitFactor\", Monitor=\"SnapshotVolumeNoTime\", splitFactor=\"" + Integer.toString(splitFactorDay_.get(region)) + "\", VolumeNoTimeSize=\"" + Integer.toString(localVolumeTime.get(region).size()) + "\""); } } localVolumeTimeListDay_.clear(); EideticSubThreads_.clear(); Threads.sleepSeconds(30); } catch (Exception e) { logger.error("awsAccountNickname=\"" + awsAccount_.getUniqueAwsAccountIdentifier() + "\",Error=\"MonitorSnapshotVolumeTimeFailure\", stacktrace=\"" + e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e) + "\""); } } }
From source file:com.tfm.utad.sqoopdata.SqoopVerticaDB.java
private static void findBetweenMinIDAndMaxID(Connection conn, Long minID, Long maxID) { Statement stmt = null;/* ww w.j a va 2 s.c om*/ String query; try { stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); query = "SELECT * FROM s1.coordinates WHERE id > " + minID + " AND id <= " + maxID + ""; LOG.info("Query execution: " + query); ResultSet rs = stmt.executeQuery(query); int batch = 0; List<CoordinateCartoDB> result = new ArrayList<>(); long start_time = System.currentTimeMillis(); while (rs.next()) { batch++; CoordinateCartoDB cdb = new CoordinateCartoDB((long) rs.getInt("id"), rs.getString("userstr"), rs.getString("created_date"), rs.getString("activity"), rs.getFloat("latitude"), rs.getFloat("longitude"), (long) rs.getInt("userid")); result.add(cdb); if (batch == 50) { sendDataToCartoDB(result); batch = 0; result = new ArrayList<>(); } } if (batch > 0) { sendDataToCartoDB(result); } long end_time = System.currentTimeMillis(); long difference = end_time - start_time; LOG.info("CartoDB API execution time: " + String.format("%d min %d sec", TimeUnit.MILLISECONDS.toMinutes(difference), TimeUnit.MILLISECONDS.toSeconds(difference) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(difference)))); } catch (SQLException e) { LOG.error("SQLException error: " + e.toString()); } finally { if (stmt != null) { try { stmt.close(); } catch (SQLException ex) { LOG.error("Statement error: " + ex.toString()); } } if (conn != null) { try { conn.close(); } catch (SQLException ex) { LOG.error("Connection error: " + ex.toString()); } } } }
From source file:com.turn.griffin.control.GriffinControlManager.java
public boolean isMessageRecent(long messageTimeStamp) { return (messageTimeStamp > (System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(MESSAGE_ACTIVE_LIFE_MINUTES))); }
From source file:org.cloudifysource.quality.iTests.test.cli.cloudify.UninstallOutputTest.java
/** * Uninstall application and check whether the output contains all services and their number of running instances as expected. * @throws IOException/*from w ww.ja v a2 s . c om*/ * @throws InterruptedException * @throws DSLException * @throws PackagingException * @throws RestClientException */ @Test(timeOut = AbstractTestSupport.DEFAULT_TEST_TIMEOUT, groups = "1", enabled = true) public void uninstallApplicationTest() throws Exception { ProcessingUnitInstanceAddedEventListener addedListener = getAddedListener(); ProcessingUnitInstanceRemovedEventListener removedListener = getRemovedListener(); admin.getProcessingUnits().getProcessingUnitInstanceAdded().add(addedListener); admin.getProcessingUnits().getProcessingUnitInstanceRemoved().add(removedListener); try { // install File applicationFolder = new File(PETCLINIC_PATH); InstallApplicationResponse response = NewRestTestUtils.installApplicationUsingNewRestApi(restUrl, PETCLINIC_NAME, applicationFolder); // get all services planned instances Map<String, Integer> servicesPlannedInstances = new HashMap<String, Integer>(); final RestClient restClient = NewRestTestUtils.createAndConnect(restUrl); List<ServiceDescription> serviceDescriptions = restClient .getServiceDescriptions(response.getDeploymentID()); for (ServiceDescription serviceDescription : serviceDescriptions) { int instanceCount = serviceDescription.getInstanceCount(); String serviceName = serviceDescription.getServiceName(); if (instanceCount == 0) { ServiceDescription serviceDescription2 = restClient.getServiceDescription(PETCLINIC_NAME, serviceName); LogUtils.log("Instances count of " + serviceName + " after installation was 0, got it again from service's description and it was : " + serviceDescription2.getInstanceCount()); } int plannedInstances = serviceDescription.getPlannedInstances(); Assert.assertEquals("instanceCount [" + instanceCount + "] is not equal to plannedInstances [" + plannedInstances + "]", instanceCount, plannedInstances); servicesPlannedInstances.put(serviceName, instanceCount); } LogUtils.log("Instances count after install: " + servicesPlannedInstances); ServiceDescription apacheLBDescription = restClient.getServiceDescription(PETCLINIC_NAME, APACHE_LB_NAME); LogUtils.log("ApacheLB instance count after install: " + apacheLBDescription.getInstanceCount()); // wait for apacheLB to restart so the running instances of apacheLB will be 1 before uninstall: // 1. wait for port 8090 to be available (restart occurred). final String uri = "http://127.0.0.1:" + PETCLINIC_PORT_NUMBER; RepetitiveConditionProvider getCondition = new RepetitiveConditionProvider() { @Override public boolean getCondition() { HttpResponse httpResponse; try { httpResponse = NewRestTestUtils.sendGetRequest(uri); } catch (Exception e) { LogUtils.log("failed to execute get request to " + uri, e); return false; } StatusLine statusLine = httpResponse.getStatusLine(); int statusCode = statusLine.getStatusCode(); if (statusCode != HttpStatus.SC_OK) { LogUtils.log("get request to " + uri + " returned status: " + statusCode + ", reason: " + statusLine.getReasonPhrase()); return false; } String responseBody; try { responseBody = RestClientExecutor.getResponseBody(httpResponse); } catch (RestClientIOException e) { LogUtils.log("failed to transform the response into string.", e); return false; } Assert.assertTrue("The response to the GET request to " + uri + "doesn't contain \"welcome\"." + " Response's body: " + responseBody, responseBody.contains("welcome")); return true; } }; AssertUtils.repetitiveAssertTrue("failed to execute get request to " + uri + " after 5 minutes", getCondition, TimeUnit.MINUTES.toMillis(5)); // 2. wait for the number of instances to become 1 again (increased from 0). RepetitiveConditionProvider apacheLBRunningCondition = new RepetitiveConditionProvider() { @Override public boolean getCondition() { try { ServiceDescription serviceDescription = restClient.getServiceDescription(PETCLINIC_NAME, APACHE_LB_NAME); return serviceDescription.getInstanceCount() > 0; } catch (RestClientException e) { LogUtils.log("failed to get service description from rest client.", e); return false; } } }; AssertUtils.repetitiveAssertTrue("apacheLB number of running instances is still 0 after 5 minutes", apacheLBRunningCondition, TimeUnit.MINUTES.toMillis(5)); // dump machines before uninstall if (!enableLogstash) { try { CloudTestUtils.dumpMachinesNewRestAPI(restUrl, SecurityConstants.USER_PWD_ALL_ROLES, SecurityConstants.USER_PWD_ALL_ROLES); } catch (Exception e) { LogUtils.log("Failed to dump machines.", e); } } // un-install final String uninstallOutput = runCommand( "connect " + restUrl + ";uninstall-application --verbose " + PETCLINIC_NAME); AssertUtils.repetitiveAssertConditionHolds("", new AssertUtils.RepetitiveConditionProvider() { @Override public boolean getCondition() { return uninstallOutput.contains("Application " + PETCLINIC_NAME + " uninstalled successfully"); } }, 5); LogUtils.log("uninstalled."); // asserts for (Entry<String, Integer> entry : servicesPlannedInstances.entrySet()) { String serviceName = entry.getKey(); Integer installed = entry.getValue(); int indexOfService = uninstallOutput.indexOf(serviceName + ":"); Assert.assertTrue( "the output doesn't contain service name [" + serviceName + "], output: " + uninstallOutput, indexOfService != -1); int indexOfInstalled = uninstallOutput.toLowerCase().indexOf("installed", indexOfService); Assert.assertTrue("the output doesn't contain the string \"installed\" after service name [" + serviceName + "], output: " + uninstallOutput, indexOfInstalled != -1); int indexOfPlanned = uninstallOutput.toLowerCase().indexOf("planned", indexOfInstalled); Assert.assertTrue( "the output doesn't contain the string \"planned\" after \"installed\" for service [" + serviceName + "], output: " + uninstallOutput, indexOfPlanned != -1); String initialInstalledCount = uninstallOutput.substring(indexOfInstalled + 9, indexOfPlanned) .trim(); String currentServiceLine = uninstallOutput.substring(indexOfService, indexOfPlanned); Assert.assertEquals(currentServiceLine, installed, Integer.valueOf(initialInstalledCount)); } } finally { admin.getProcessingUnits().getProcessingUnitInstanceAdded().remove(addedListener); admin.getProcessingUnits().getProcessingUnitInstanceRemoved().remove(removedListener); } }
From source file:org.excalibur.aqmp.handler.InstanceConfigurationHandler.java
public void handleInstanceConfigurationTasks(RemoteTasks tasks) { NodeManagerFactory.getManagerReference(); final ListeningExecutorService executionHandlerService = newListeningDynamicScalingThreadPool( "instances-remote-task-result-handler", tasks.size()); VirtualMachine remoteHost = instanceService_ .getInstanceByPublicIp(tasks.first().getHostAndPort().getHost()); if (remoteHost == null) { LOG.debug("Remote host was null. Ignoring [{}] task(s)", tasks.size()); return;//from w ww .j av a 2 s. c o m } UserProviderCredentials credentials = null; LoginCredentials loginCredentials = null; File sshKey = null; final AtomicInteger count = new AtomicInteger(); for (final RemoteTask task : tasks) { try { LOG.debug("Configuring the execution of task [{}] on host [{}] with username [{}]", task.getApplication().getName(), task.getHostAndPort().getHost(), task.getUsername()); ProviderSupport provider = (ProviderSupport) this.providerRepository_ .findByExactlyProviderName(task.getHostAndPort().getProvider()); Zone zone = this.regionRepository_.findZoneByName(task.getZone().getName()); if (remoteHost == null) { LOG.debug("Reference for host [{}] was null!", task.getHostAndPort().getHost()); VmConfiguration configuration = new VmConfiguration() .setKeyName(task.getKeyPairs().getPrivateKey().getKeyName()) .setKeyPairs(new KeyPairs().setPrivateKey(task.getKeyPairs().getPrivateKey())) .setPlatformUserName(task.getUsername()) .setPublicIpAddress(task.getHostAndPort().getHost()) .setPublicDnsName(task.getHostAndPort().getHost()); remoteHost = new VirtualMachine().setConfiguration(configuration) .setType(new InstanceType().setProvider(provider)); } if (credentials == null) { credentials = this.userService_.findUserProviderCredentials(task.getOwner(), provider); credentials .setLoginCredentials(credentials.getLoginCredentials().toBuilder() .credentialName(task.getKeyPairs().getName()).authenticateAsSudo(true).build()) .setRegion(zone.getRegion()); remoteHost.getConfiguration().setCredentials(credentials); sshKey = File.createTempFile(String.format("%s_key_", task.getUsername()), ".key"); Files.write(decrypt(task.getKeyPairs().getPrivateKey().getKeyMaterial()).getBytes(), sshKey); loginCredentials = new LoginCredentials.Builder().authenticateAsSudo(true).privateKey(sshKey) .user(task.getUsername()).build(); } try (RemoteScriptStatementExecutor executor = new RemoteScriptStatementExecutor(remoteHost, loginCredentials, executionHandlerService)) { executor.execute(task.getApplication(), new RemoteTaskExecutionHandler(task, count)); } LOG.debug("Task [{}] executed on host [{}]", task.getApplication().getName(), task.getHostAndPort().getHost()); } catch (Exception exception) { LOG.error( "Error on executing the task: [{}] on host/username [{}/{}]. Error message: [{}]. Cause: [{}]", task.getApplication().getName(), task.getHostAndPort().getHost(), task.getUsername(), exception.getMessage(), exception.getCause() != null ? exception.getCause().getMessage() : "", exception); AnyThrow.throwUncheked(exception); } } awaitTerminationAndShutdownAndIgnoreInterruption(executionHandlerService, 1, TimeUnit.MINUTES); LOG.debug("[{}] of [{}] tasks finished successfully on node [{}]", count.get(), tasks.size(), remoteHost.getConfiguration().getPublicDnsName()); boolean isExcaliburRunning = startExcaliburApplication(remoteHost, tasks.first(), loginCredentials); if (isExcaliburRunning) { LOG.debug("Registering the new instance [{}/{}] on application manager", remoteHost.getName(), remoteHost.getConfiguration().getPublicIpAddress()); NodeManagerFactory.getManagerReference().addIdleInstance(remoteHost); LOG.debug("Instance [{}] registered on application manager [{}]", remoteHost.getName(), NodeManagerFactory.getManagerReference().getThisNodeReference().getName()); } }
From source file:eu.stratuslab.marketplace.server.MarketPlaceApplication.java
private void init(String dataDir, String storeType, String fileStoreType) { setName("StratusLab Marketplace"); setDescription("Marketplace for StratusLab images"); setOwner("StratusLab"); setAuthor("Stuart Kenny"); getMetadataService().addExtension("multipart", MediaType.MULTIPART_FORM_DATA, false); getMetadataService().addExtension("www_form", MediaType.APPLICATION_WWW_FORM, false); getMetadataService().addExtension("application_rdf", MediaType.APPLICATION_RDF_XML, true); getMetadataService().addExtension("application_xml", MediaType.APPLICATION_XML, false); getMetadataService().setDefaultMediaType(MediaType.APPLICATION_XML); setStatusService(new MarketPlaceStatusService()); getTunnelService().setUserAgentTunnel(true); this.dataDir = dataDir; boolean success = MetadataFileUtils.createIfNotExists(dataDir); if (!success) { LOGGER.severe("Unable to create directory: " + dataDir); }//from w w w . jav a 2 s . co m success = MetadataFileUtils.createIfNotExists(Configuration.getParameterValue(PENDING_DIR)); if (!success) { LOGGER.severe("Unable to create directory: " + Configuration.getParameterValue(PENDING_DIR)); } whitelist = new EndorserWhitelist(); RdfStoreFactory factory = new RdfStoreFactoryImpl(); store = factory.createRdfStore(RdfStoreFactory.SESAME_PROVIDER, storeType); store.initialize(); FileStore internalStore = null; if (fileStoreType.equals("file")) { internalStore = new FlatFileStore(dataDir); } else if (fileStoreType.equals("couchbase")) { internalStore = new CouchbaseStore(dataDir); } if (Configuration.getParameterValueAsBoolean(REPLICATION_ENABLED)) { fileStore = new GitStore(dataDir, internalStore); final Runnable update = new Runnable() { public void run() { rdfIndexUpdate(); } }; rdfUpdater = new RdfStoreUpdater(fileStore, new Processor(this)); indexUpdateHandle = indexUpdater.scheduleWithFixedDelay(update, 1, 5, TimeUnit.MINUTES); } else { fileStore = internalStore; } queryBuilder = new SparqlBuilder(); final Runnable remind = new Runnable() { public void run() { remind(); } }; final Runnable expires = new Runnable() { public void run() { expiry(); } }; reminder = new Reminder(this); expiry = new Reminder(this); if (Configuration.getParameterValueAsBoolean(ENDORSER_REMINDER)) { reminderHandle = scheduler.scheduleWithFixedDelay(remind, REMINDER_INTERVAL, REMINDER_INTERVAL, TimeUnit.DAYS); reminderHandle = scheduler.scheduleWithFixedDelay(expires, EXPIRY_INTERVAL, EXPIRY_INTERVAL, TimeUnit.DAYS); } }
From source file:com.codelanx.codelanxlib.util.auth.UUIDFetcher.java
/** * Calls each supplied name individually to Mojang's servers, treating them * as previously used names which henceforth were changed. This method is * much slower than the other call methods, and should only be used * if there is a need to retrieve names which are now changed * // w ww . ja va 2s .c o m * @since 0.1.0 * @version 0.1.0 * * @param output Whether or not to print output * @param log The {@link Logger} to print to * @param doOutput A {@link Predicate} representing when to output a number * @return A {@link Map} of supplied names to relevant {@link UserInfo}. * Note that this map will contain the supplied names even if they * are invalid or not actual usernames (in which case, they will * be mapped to {@code null}). Note names that have never been * changed before will be mapped as invalid per this method * @throws IOException If there's a problem sending or receiving the request * @throws ParseException If the request response cannot be read * @throws InterruptedException If the thread is interrupted while sleeping */ public Map<String, UserInfo> callFromOldNames(boolean output, Logger log, Predicate<? super Integer> doOutput) throws IOException, ParseException, InterruptedException { Map<String, UserInfo> back = new HashMap<>(); int completed = 0; int failed = 0; for (String s : names) { HttpURLConnection connection = UUIDFetcher.createSingleProfileConnection(s); if (connection.getResponseCode() == 429 && this.rateLimiting) { log.warning("[UUIDFetcher] Rate limit hit! Waiting 10 minutes until continuing conversion..."); Thread.sleep(TimeUnit.MINUTES.toMillis(10)); connection = UUIDFetcher.createSingleProfileConnection(s); } if (connection.getResponseCode() == 200) { JSONObject o = (JSONObject) this.jsonParser .parse(new InputStreamReader(connection.getInputStream())); back.put(s, new UserInfo((String) o.get("name"), UUIDFetcher.getUUID((String) o.get("id")))); completed++; } else { //e.g. 400, 204 if (output) { log.warning(String.format("No profile found for '%s', skipping...", s)); } back.put(s, null); failed++; continue; //nothing can be done with the return } if (output) { int processed = completed + failed; if (doOutput.test(processed) || processed == this.names.size()) { log.info(String.format("[UUIDFetcher] Progress: %d/%d, %.2f%%, Failed names: %d", processed, this.names.size(), ((double) processed / this.names.size()) * 100D, failed)); } } } return back; }
From source file:org.carewebframework.logging.perf4j.PerformanceMonitor.java
/** * Sets the expiration time in minutes./* ww w . ja v a 2s . c o m*/ * * @param expirationTimeMinutes expiration time in minutes. */ @Override public void setExpirationTimeMinutes(final long expirationTimeMinutes) { this.expirationTimeMs = TimeUnit.MINUTES.toMillis(expirationTimeMinutes); }
From source file:com.cloud.agent.Agent.java
public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { _shell = shell;//from w w w . j ava 2 s.c o m _resource = resource; _link = null; resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); _id = value != null ? Long.parseLong(value) : null; s_logger.info("id is " + (_id != null ? _id : "")); final Map<String, Object> params = PropertiesUtil.toMap(_shell.getProperties()); // merge with properties from command line to let resource access command line parameters for (final Map.Entry<String, Object> cmdLineProp : _shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } if (!_resource.configure(getResourceName(), params)) { throw new ConfigurationException("Unable to configure " + _resource.getName()); } final String host = _shell.getHost(); _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); s_logger.debug("Adding shutdown hook"); Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask")); _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler")); s_logger.info("Agent [id = " + (_id != null ? _id : "new") + " : type = " + getResourceName() + " : zone = " + _shell.getZone() + " : pod = " + _shell.getPod() + " : workers = " + _shell.getWorkers() + " : host = " + host + " : port = " + _shell.getPort()); }