List of usage examples for java.util.concurrent TimeUnit MINUTES
TimeUnit MINUTES
To view the source code for java.util.concurrent TimeUnit MINUTES.
Click Source Link
From source file:DIA_Umpire_SE.DIA_Umpire_SE.java
/** * @param args the command line arguments DIA_Umpire parameterfile *///from ww w .ja v a 2 s . com public static void main(String[] args) throws InterruptedException, FileNotFoundException, ExecutionException, IOException, ParserConfigurationException, DataFormatException, SAXException, Exception { System.out.println( "================================================================================================="); System.out.println( "DIA-Umpire singal extraction analysis (version: " + UmpireInfo.GetInstance().Version + ")"); if (args.length < 2 || args.length > 3) { System.out.println( "command format error, the correct format is: java -jar -Xmx8G DIA_Umpire_SE.jar mzMXL_file diaumpire_se.params"); System.out.println( "To fix DIA setting, use : java -jar -Xmx8G DIA_Umpire_SE.jar mzMXL_file diaumpire_se.params -f"); return; } try { //Define logger level for console ConsoleLogger.SetConsoleLogger(Level.INFO); //Define logger level and file path for text log file ConsoleLogger.SetFileLogger(Level.DEBUG, FilenameUtils.getFullPath(args[0]) + "diaumpire_se.log"); } catch (Exception e) { } boolean Fix = false; boolean Resume = false; if (args.length == 3 && args[2].equals("-f")) { Fix = true; } String parameterfile = args[1]; String MSFilePath = args[0]; Logger.getRootLogger().info("Version: " + UmpireInfo.GetInstance().Version); Logger.getRootLogger().info("Parameter file:" + parameterfile); Logger.getRootLogger().info("Spectra file:" + MSFilePath); BufferedReader reader = new BufferedReader(new FileReader(parameterfile)); String line = ""; InstrumentParameter param = new InstrumentParameter(InstrumentParameter.InstrumentType.TOF5600); param.DetermineBGByID = false; param.EstimateBG = true; int NoCPUs = 2; SpectralDataType.DataType dataType = SpectralDataType.DataType.DIA_F_Window; String WindowType = ""; int WindowSize = 25; ArrayList<XYData> WindowList = new ArrayList<>(); boolean ExportPrecursorPeak = false; boolean ExportFragmentPeak = false; //<editor-fold defaultstate="collapsed" desc="Read parameter file"> while ((line = reader.readLine()) != null) { Logger.getRootLogger().info(line); if (!"".equals(line) && !line.startsWith("#")) { //System.out.println(line); if (line.equals("==window setting begin")) { while (!(line = reader.readLine()).equals("==window setting end")) { if (!"".equals(line)) { WindowList.add(new XYData(Float.parseFloat(line.split("\t")[0]), Float.parseFloat(line.split("\t")[1]))); } } continue; } if (line.split("=").length < 2) { continue; } String type = line.split("=")[0].trim(); if (type.startsWith("para.")) { type = type.replace("para.", "SE."); } String value = line.split("=")[1].trim(); switch (type) { case "Thread": { NoCPUs = Integer.parseInt(value); break; } case "ExportPrecursorPeak": { ExportPrecursorPeak = Boolean.parseBoolean(value); break; } case "ExportFragmentPeak": { ExportFragmentPeak = Boolean.parseBoolean(value); break; } //<editor-fold defaultstate="collapsed" desc="instrument parameters"> case "RPmax": { param.PrecursorRank = Integer.parseInt(value); break; } case "RFmax": { param.FragmentRank = Integer.parseInt(value); break; } case "CorrThreshold": { param.CorrThreshold = Float.parseFloat(value); break; } case "DeltaApex": { param.ApexDelta = Float.parseFloat(value); break; } case "RTOverlap": { param.RTOverlapThreshold = Float.parseFloat(value); break; } case "BoostComplementaryIon": { param.BoostComplementaryIon = Boolean.parseBoolean(value); break; } case "AdjustFragIntensity": { param.AdjustFragIntensity = Boolean.parseBoolean(value); break; } case "SE.MS1PPM": { param.MS1PPM = Float.parseFloat(value); break; } case "SE.MS2PPM": { param.MS2PPM = Float.parseFloat(value); break; } case "SE.SN": { param.SNThreshold = Float.parseFloat(value); break; } case "SE.MS2SN": { param.MS2SNThreshold = Float.parseFloat(value); break; } case "SE.MinMSIntensity": { param.MinMSIntensity = Float.parseFloat(value); break; } case "SE.MinMSMSIntensity": { param.MinMSMSIntensity = Float.parseFloat(value); break; } case "SE.MinRTRange": { param.MinRTRange = Float.parseFloat(value); break; } case "SE.MaxNoPeakCluster": { param.MaxNoPeakCluster = Integer.parseInt(value); param.MaxMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinNoPeakCluster": { param.MinNoPeakCluster = Integer.parseInt(value); param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinMS2NoPeakCluster": { param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MaxCurveRTRange": { param.MaxCurveRTRange = Float.parseFloat(value); break; } case "SE.Resolution": { param.Resolution = Integer.parseInt(value); break; } case "SE.RTtol": { param.RTtol = Float.parseFloat(value); break; } case "SE.NoPeakPerMin": { param.NoPeakPerMin = Integer.parseInt(value); break; } case "SE.StartCharge": { param.StartCharge = Integer.parseInt(value); break; } case "SE.EndCharge": { param.EndCharge = Integer.parseInt(value); break; } case "SE.MS2StartCharge": { param.MS2StartCharge = Integer.parseInt(value); break; } case "SE.MS2EndCharge": { param.MS2EndCharge = Integer.parseInt(value); break; } case "SE.NoMissedScan": { param.NoMissedScan = Integer.parseInt(value); break; } case "SE.Denoise": { param.Denoise = Boolean.valueOf(value); break; } case "SE.EstimateBG": { param.EstimateBG = Boolean.valueOf(value); break; } case "SE.RemoveGroupedPeaks": { param.RemoveGroupedPeaks = Boolean.valueOf(value); break; } case "SE.MinFrag": { param.MinFrag = Integer.parseInt(value); break; } case "SE.IsoPattern": { param.IsoPattern = Float.valueOf(value); break; } case "SE.StartRT": { param.startRT = Float.valueOf(value); break; } case "SE.EndRT": { param.endRT = Float.valueOf(value); break; } case "SE.RemoveGroupedPeaksRTOverlap": { param.RemoveGroupedPeaksRTOverlap = Float.valueOf(value); break; } case "SE.RemoveGroupedPeaksCorr": { param.RemoveGroupedPeaksCorr = Float.valueOf(value); break; } case "SE.MinMZ": { param.MinMZ = Float.valueOf(value); break; } case "SE.MinPrecursorMass": { param.MinPrecursorMass = Float.valueOf(value); break; } case "SE.MaxPrecursorMass": { param.MaxPrecursorMass = Float.valueOf(value); break; } case "SE.IsoCorrThreshold": { param.IsoCorrThreshold = Float.valueOf(value); break; } case "SE.MassDefectFilter": { param.MassDefectFilter = Boolean.parseBoolean(value); break; } case "SE.MassDefectOffset": { param.MassDefectOffset = Float.valueOf(value); break; } //</editor-fold>//</editor-fold> case "WindowType": { WindowType = value; switch (WindowType) { case "SWATH": { dataType = SpectralDataType.DataType.DIA_F_Window; break; } case "V_SWATH": { dataType = SpectralDataType.DataType.DIA_V_Window; break; } case "MSX": { dataType = SpectralDataType.DataType.MSX; break; } case "MSE": { dataType = SpectralDataType.DataType.MSe; break; } } break; } case "WindowSize": { WindowSize = Integer.parseInt(value); break; } } } } //</editor-fold> try { File MSFile = new File(MSFilePath); if (MSFile.exists()) { long time = System.currentTimeMillis(); Logger.getRootLogger().info( "================================================================================================="); Logger.getRootLogger().info("Processing " + MSFilePath + "...."); //Initialize a DIA file data structure DIAPack DiaFile = new DIAPack(MSFile.getAbsolutePath(), NoCPUs); DiaFile.Resume = Resume; DiaFile.SetDataType(dataType); DiaFile.SetParameter(param); //Set DIA isolation window setting if (dataType == SpectralDataType.DataType.DIA_F_Window) { DiaFile.SetWindowSize(WindowSize); } else if (dataType == SpectralDataType.DataType.DIA_V_Window) { for (XYData window : WindowList) { DiaFile.AddVariableWindow(window); } } DiaFile.SaveDIASetting(); DiaFile.SaveParams(); if (Fix) { DiaFile.FixScanidx(); return; } DiaFile.ExportPrecursorPeak = ExportPrecursorPeak; DiaFile.ExportFragmentPeak = ExportFragmentPeak; Logger.getRootLogger().info("Module A: Signal extraction"); //Start DIA signal extraction process to generate pseudo MS/MS files DiaFile.process(); time = System.currentTimeMillis() - time; Logger.getRootLogger().info(MSFilePath + " processed time:" + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time), TimeUnit.MILLISECONDS.toMinutes(time) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)), TimeUnit.MILLISECONDS.toSeconds(time) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time)))); } else { throw new RuntimeException("file: " + MSFile + "? does not exist!"); } Logger.getRootLogger().info("Job complete"); Logger.getRootLogger().info( "================================================================================================="); } catch (Exception e) { Logger.getRootLogger().error(ExceptionUtils.getStackTrace(e)); throw e; } }
From source file:gov.ca.cwds.cals.service.ComplaintsService.java
@SuppressWarnings("squid:S2142") //Logging and informing client instead of shutdown private List<ComplaintDto> aggregateComplaintsFromDifferentSources(String facilityNumber) { List<ComplaintDto> complaints = new ArrayList<>(); ExecutorService executorService = Executors.newFixedThreadPool(3); try {/*from ww w. ja va 2s . c o m*/ List<Future<List<ComplaintDto>>> futures = executorService.invokeAll(prepareListOfTasks(facilityNumber), 1, TimeUnit.MINUTES); for (Future<List<ComplaintDto>> future : futures) { complaints.addAll(future.get()); } } catch (InterruptedException e) { String message = "One of complaints execution threads has been interrupted"; LOGGER.error(message, e); throw new ServiceException(message, e); } catch (ExecutionException e) { LOGGER.error(e.getMessage(), e); throw new ServiceException(e.getMessage(), e); } shutdownExecutionService(executorService); return complaints; }
From source file:br.unb.cic.bionimbuz.services.storage.bucket.CloudStorageService.java
@Override public void start(List<Listeners> listeners) { this.listeners = listeners; if (listeners != null) { listeners.add(this); }/*from w w w. ja v a 2 s .c o m*/ // Criando pastas zookeeper para o mdulo de armazenamento if (!this.cms.getZNodeExist(Path.BUCKETS.getFullPath(), null)) { this.cms.createZNode(CreateMode.PERSISTENT, Path.BUCKETS.getFullPath(), null); } LOGGER.info("[CloudStorageService] Starting"); // Instance methods object methodsInstance = new CloudMethodsAmazonGoogle(); // Getting parameters from config file bucketsFolder = BioNimbusConfig.get().getBucketsFolder(); CloudStorageMethods.setKeyGoogle(BioNimbusConfig.get().getKeyGoogle()); CloudStorageMethods.setKeyAmazon(BioNimbusConfig.get().getKeyAmazon()); CloudStorageMethods.setGcloudFolder(BioNimbusConfig.get().getGcloudFolder()); CloudStorageMethods.setMyId(BioNimbusConfig.get().getId()); // Instance all buckets LOGGER.info("[CloudStorageService] Instancing Buckets"); this.InstanceBuckets(); // Cleaning possible mounted buckets from last execution for (final BioBucket aux : bucketList) { try { methodsInstance.StorageUmount(aux); } catch (final Throwable t) { // Ignore } } try { // Authenticate providers LOGGER.info("[CloudStorageService] Authenticating Providers"); for (final StorageProvider aux : StorageProvider.values()) { methodsInstance.StorageAuth(aux); } // Mount all buckets LOGGER.info("[CloudStorageService] Mounting Buckets"); for (final BioBucket aux : bucketList) { methodsInstance.StorageMount(aux); } } catch (final Throwable t) { LOGGER.error("[CloudStorageService] Exception: " + t.getMessage()); t.printStackTrace(); } final BandwithCheckerBucket checker = new BandwithCheckerBucket(); checker.start(); this.executorService.scheduleAtFixedRate(this, 0, 5, TimeUnit.MINUTES); }
From source file:com.ning.metrics.collector.TestPerformance.java
private static long scheduleScribeAgents() throws InterruptedException { ExecutorService e = Executors.newFixedThreadPool(THREADPOOL_SIZE, new NamedThreadFactory("Performance tests (Scribe client)")); long startTime = System.currentTimeMillis(); for (int i = 0; i < NUMBER_OF_SCRIBE_CLIENTS; i++) { e.execute(new ScribeClient()); log.debug(String.format("Thread %d/%d submitted", i + 1, NUMBER_OF_SCRIBE_CLIENTS)); }// ww w .ja v a 2 s . c o m e.shutdown(); e.awaitTermination(10, TimeUnit.MINUTES); return startTime; }
From source file:edu.wisc.jmeter.dao.JdbcMonitorDao.java
public JdbcMonitorDao(DataSource dataSource, int purgeOldFailures, int purgeOldStatus) { this.jdbcTemplate = new NamedParameterJdbcTemplate(dataSource); final DataSourceTransactionManager dataSourceTransactionManager = new DataSourceTransactionManager( dataSource);//from w ww . j a v a2s . com dataSourceTransactionManager.afterPropertiesSet(); this.transactionTemplate = new TransactionTemplate(dataSourceTransactionManager); this.transactionTemplate.afterPropertiesSet(); this.purgeOldFailure = TimeUnit.MILLISECONDS.convert(purgeOldFailures, TimeUnit.MINUTES); this.purgeOldStatus = TimeUnit.MILLISECONDS.convert(purgeOldStatus, TimeUnit.MINUTES); }
From source file:br.com.jbugbrasil.bot.service.jbossbooks.JBossBooksService.java
/** * Scheduler default que busca os livros no JBoss Books e salva em cache. *//*from ww w. ja v a2s . c om*/ @Schedule(minute = "0/20", hour = "*", persistent = false) public synchronized void initialize() { if (cache.containsKey("jsonResponse")) { log.fine("Cache populado, retornando"); return; } try { log.fine("Buscando informaes dos livros em [" + GIT_BOOK_ENDPOINT + "]"); HttpGet request = new HttpGet(GIT_BOOK_ENDPOINT); request.setHeader("Authorization", "Bearer " + gitBookToken); ResponseHandler<String> responseHandler = new ResponseHandler<String>() { @Override public String handleResponse(final HttpResponse response) throws IOException { int status = response.getStatusLine().getStatusCode(); if (status >= 200 && status < 300) { HttpEntity entity = response.getEntity(); return entity != null ? EntityUtils.toString(entity) : null; } else { throw new ClientProtocolException("Unexpected response status: " + status); } } }; ObjectMapper mapper = new ObjectMapper(); JSONResponse jsonResponse = mapper.readValue(client().execute(request, responseHandler), JSONResponse.class); cache.put("jsonResponse", jsonResponse, 60, TimeUnit.MINUTES); verifyNewBook(jsonResponse.getTotal()); verifyBookUpdates(jsonResponse); } catch (final Exception e) { e.printStackTrace(); log.warning("Falha ao buscar informaes em [" + GIT_BOOK_ENDPOINT + "]: " + e.getMessage()); } }
From source file:eu.roschi.obdkinesis.utils.DynamoDBUtils.java
/** * Creates the table to store our counts in with a hash key of "resource" and a range key of "timestamp" so we can * query counts for a given resource by time. This uses an initial provisioned throughput of 10 read capacity units * and 5 write capacity units//from w w w .j a va 2 s . c om * * @param tableName The name of the table to create. */ public void createCountTableIfNotExists(String tableName) { List<KeySchemaElement> ks = new ArrayList<>(); ks.add(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName(ATTRIBUTE_NAME_HASH_KEY)); ks.add(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName(ATTRIBUTE_NAME_RANGE_KEY)); ArrayList<AttributeDefinition> attributeDefinitions = new ArrayList<>(); attributeDefinitions.add(new AttributeDefinition().withAttributeName(ATTRIBUTE_NAME_HASH_KEY) .withAttributeType(ScalarAttributeType.S)); // Range key must be a String. DynamoDBMapper translates Dates to ISO8601 strings. attributeDefinitions.add(new AttributeDefinition().withAttributeName(ATTRIBUTE_NAME_RANGE_KEY) .withAttributeType(ScalarAttributeType.S)); // Create the table with enough write IOPS to handle 5 distinct resources updated every 1 second: // 1 update/second * 5 resources = 5 write IOPS. // The provisioned throughput will need to be adjusted if the cadinality of the input data or the interval for // updates changes. CreateTableRequest createTableRequest = new CreateTableRequest().withTableName(tableName) .withProvisionedThroughput(new ProvisionedThroughput(1L, 1L)).withKeySchema(ks) .withAttributeDefinitions(attributeDefinitions); try { dynamoDB.createTable(createTableRequest); LOG.info(String.format("Created DynamoDB table: %s. Waiting up to 5 minutes for it to become ACTIVE...", tableName)); // Wait 5 minutes for the table to become ACTIVE if (!waitUntilTableIsActive(tableName, 10, TimeUnit.MINUTES.toSeconds(5))) { throw new IllegalStateException( String.format("Timed out while waiting for DynamoDB table %s to become ready", tableName)); } } catch (ResourceInUseException ex) { // Assume table exists and is ready to use } }
From source file:com.alertlogic.aws.analytics.poc.DynamoDBUtils.java
/** * Creates the table to store our counts in with a hash key of "resource" and a range key of "timestamp" so we can * query counts for a given resource by time. This uses an initial provisioned throughput of 10 read capacity units * and 5 write capacity units//from w w w.j av a2 s . com * * @param tableName The name of the table to create. */ public void createCountTableIfNotExists(String tableName) { List<KeySchemaElement> ks = new ArrayList<>(); ks.add(new KeySchemaElement().withKeyType(KeyType.HASH).withAttributeName(ATTRIBUTE_NAME_HASH_KEY)); ks.add(new KeySchemaElement().withKeyType(KeyType.RANGE).withAttributeName(ATTRIBUTE_NAME_RANGE_KEY)); ArrayList<AttributeDefinition> attributeDefinitions = new ArrayList<>(); attributeDefinitions.add(new AttributeDefinition().withAttributeName(ATTRIBUTE_NAME_HASH_KEY) .withAttributeType(ScalarAttributeType.S)); // Range key must be a String. DynamoDBMapper translates Dates to ISO8601 strings. attributeDefinitions.add(new AttributeDefinition().withAttributeName(ATTRIBUTE_NAME_RANGE_KEY) .withAttributeType(ScalarAttributeType.S)); // Create the table with enough write IOPS to handle 5 distinct resources updated every 1 second: // 1 update/second * 5 resources = 5 write IOPS. // The provisioned throughput will need to be adjusted if the cadinality of the input data or the interval for // updates changes. CreateTableRequest createTableRequest = new CreateTableRequest().withTableName(tableName) .withProvisionedThroughput(new ProvisionedThroughput(10L, 5L)).withKeySchema(ks) .withAttributeDefinitions(attributeDefinitions); try { dynamoDB.createTable(createTableRequest); LOG.info(String.format("Created DynamoDB table: %s. Waiting up to 5 minutes for it to become ACTIVE...", tableName)); // Wait 5 minutes for the table to become ACTIVE if (!waitUntilTableIsActive(tableName, 10, TimeUnit.MINUTES.toSeconds(5))) { throw new IllegalStateException( String.format("Timed out while waiting for DynamoDB table %s to become ready", tableName)); } } catch (ResourceInUseException ex) { // Assume table exists and is ready to use } }
From source file:io.dropwizard.revolver.http.RevolverHttpClientFactory.java
private static OkHttpClient getOkHttpClient(RevolverHttpServiceConfig serviceConfiguration) throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException, KeyManagementException, UnrecoverableKeyException { final OkHttpClient.Builder builder = new OkHttpClient.Builder(); if (serviceConfiguration.isAuthEnabled()) { switch (serviceConfiguration.getAuth().getType().toLowerCase()) { case "basic": val basicAuthConfig = (BasicAuthConfig) serviceConfiguration.getAuth(); if (!Strings.isNullOrEmpty(basicAuthConfig.getUsername())) { throw new RuntimeException(String.format("No valid authentication data for service %s", serviceConfiguration.getAuth().getType())); }//from w ww. ja v a 2 s . co m builder.authenticator((route, response) -> { String credentials = Credentials.basic(basicAuthConfig.getUsername(), basicAuthConfig.getPassword()); return response.request().newBuilder().addHeader(HttpHeaders.AUTHORIZATION, credentials) .build(); }); break; case "token": val tokenAuthConfig = (TokenAuthConfig) serviceConfiguration.getAuth(); if (Strings.isNullOrEmpty(tokenAuthConfig.getPrefix())) { //No prefix check builder.authenticator((route, response) -> response.request().newBuilder() .addHeader(HttpHeaders.AUTHORIZATION, tokenAuthConfig.getToken()).build()); } else { //with configured prefix builder.authenticator((route, response) -> response.request().newBuilder() .addHeader(HttpHeaders.AUTHORIZATION, String.format("%s %s", tokenAuthConfig.getPrefix(), tokenAuthConfig.getToken())) .build()); } break; default: throw new RuntimeException(String.format("Authentication type %s is not supported", serviceConfiguration.getAuth().getType())); } } if (serviceConfiguration.isSecured()) { final ConnectionSpec spec = new ConnectionSpec.Builder(ConnectionSpec.MODERN_TLS) .allEnabledTlsVersions().allEnabledCipherSuites().build(); builder.connectionSpecs(Collections.singletonList(spec)); final String keystorePath = serviceConfiguration.getKeyStorePath(); final String keystorePassword = (serviceConfiguration.getKeystorePassword() == null) ? "" : serviceConfiguration.getKeystorePassword(); if (!StringUtils.isBlank(keystorePath)) { SSLSocketFactory socketFactory = getSSLContext(keystorePath, keystorePassword).getSocketFactory(); builder.sslSocketFactory(socketFactory); builder.hostnameVerifier(OkHostnameVerifier.INSTANCE); } else { HostnameVerifier hostNameVerifier = (s, sslSession) -> true; builder.hostnameVerifier(hostNameVerifier); } } if (serviceConfiguration.getConnectionKeepAliveInMillis() <= 0) { builder.connectionPool( new ConnectionPool(serviceConfiguration.getConnectionPoolSize(), 5, TimeUnit.MINUTES)); } else { builder.connectionPool(new ConnectionPool(serviceConfiguration.getConnectionPoolSize(), serviceConfiguration.getConnectionKeepAliveInMillis(), TimeUnit.MILLISECONDS)); } builder.connectTimeout(serviceConfiguration.getRuntime().getThreadPool().getTimeout(), TimeUnit.MILLISECONDS); return builder.build(); }
From source file:cc.arduino.packages.Uploader.java
protected boolean executeUploadCommand(String command[]) throws Exception { // Skip empty commands if (command == null || command.length == 0) return true; notFoundError = false;//from www. j av a2 s . c om int result = -1; try { if (verbose) { for (String c : command) System.out.print(c + " "); System.out.println(); } Process process = ProcessUtils.exec(command); programmerPid = process; new MessageSiphon(process.getInputStream(), this, 100); new MessageSiphon(process.getErrorStream(), this, 100); // wait for the process to finish, but not forever // kill the flasher process after 5 minutes to avoid 100% cpu spinning if (!process.waitFor(5, TimeUnit.MINUTES)) { process.destroyForcibly(); } if (!process.isAlive()) { result = process.exitValue(); } else { result = 0; } } catch (Exception e) { e.printStackTrace(); } return result == 0; }