List of usage examples for java.util.concurrent TimeUnit MINUTES
TimeUnit MINUTES
To view the source code for java.util.concurrent TimeUnit MINUTES.
Click Source Link
From source file:io.wcm.caravan.pipeline.impl.JsonPipelineMultipleSubscriptionsTest.java
@Test public void subscribeConcurrentlyToTransformedPipelineOutputs() throws InterruptedException { // this test verifies that pipelines actions are only executed once, even if there are multiple concurrent subscribers firstStep = newPipelineWithResponseBody("{id:123}"); secondStep = firstStep.applyAction(action); when(action.execute(any(), any())).thenReturn(firstStep.getOutput()); // create multiple simultaneous threads that subscribe to the same pipeline output // and use a CountDownLatch to delay the subscription until all threads have been started CountDownLatch countDown = new CountDownLatch(100); ExecutorService executorService = Executors.newCachedThreadPool(); while (countDown.getCount() > 0) { executorService.submit(() -> { countDown.await();//from www . j ava2 s. co m secondStep.getOutput().subscribe(Subscribers.empty()); return null; // this is required for the lambda to be considered a Callable<Void> and therefore be allowed to throw exceptions }); countDown.countDown(); } executorService.shutdown(); executorService.awaitTermination(1, TimeUnit.MINUTES); verify(action, times(1)).execute(any(), any()); }
From source file:br.com.semanticwot.cd.conf.AppWebConfiguration.java
@Bean public CacheManager cacheManager() { // Cache mais avanado, onde possivel determinar por exemplo o tempo de durao do cache CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder().maximumSize(100).expireAfterAccess(5, TimeUnit.MINUTES); GuavaCacheManager cacheManager = new GuavaCacheManager(); cacheManager.setCacheBuilder(builder); return cacheManager; }
From source file:com.clearspring.metriccatcher.MetricCatcher.java
/** * Create a Metric object from a JSONMetric * * @param jsonMetric A JSONMetric to make a Metric from * @return A Metric equivalent to the given JSONMetric *//*from www.j a v a2 s .c o m*/ protected Metric createMetric(JSONMetric jsonMetric) { // Split the name from the JSON on dots for the metric group/type/name MetricName metricName; ArrayList<String> parts = new ArrayList<String>(Arrays.asList(jsonMetric.getName().split("\\."))); if (parts.size() >= 3) { metricName = new MetricName(parts.remove(0), parts.remove(0), StringUtils.join(parts, ".")); } else { metricName = new MetricName(jsonMetric.getName(), "", ""); } Class<?> metricType = jsonMetric.getMetricClass(); if (metricType == Gauge.class) { return Metrics.newGauge(metricName, new GaugeMetricImpl()); } else if (metricType == Counter.class) { return Metrics.newCounter(metricName); } else if (metricType == Meter.class) { // TODO timeunit return Metrics.newMeter(metricName, jsonMetric.getName(), TimeUnit.MINUTES); } else if (metricType == Histogram.class) { if (jsonMetric.getType().equals("biased")) { return Metrics.newHistogram(metricName, true); } else { return Metrics.newHistogram(metricName, false); } } else if (metricType == Timer.class) { return Metrics.newTimer(metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS); } // Uh-oh return null; }
From source file:hudson.plugins.ec2.EC2RetentionStrategy.java
private long internalCheck(EC2Computer computer) { /*/*from ww w . ja v a 2 s. c o m*/ * If we've been told never to terminate, or node is null(deleted), no checks to perform */ if (idleTerminationMinutes == 0 || computer.getNode() == null) { return 1; } if (computer.isIdle() && !DISABLED) { final long uptime; try { uptime = computer.getUptime(); } catch (AmazonClientException | InterruptedException e) { // We'll just retry next time we test for idleness. LOGGER.fine("Exception while checking host uptime for " + computer.getName() + ", will retry next check. Exception: " + e); return 1; } //on rare occasions, AWS may return fault instance which shows running in AWS console but can not be connected. //need terminate such fault instance by {@link #STARTUP_TIMEOUT} if (computer.isOffline() && uptime < TimeUnit2.MINUTES.toMillis(STARTUP_TIMEOUT)) { return 1; } final long idleMilliseconds = System.currentTimeMillis() - computer.getIdleStartMilliseconds(); if (idleTerminationMinutes > 0) { // TODO: really think about the right strategy here, see // JENKINS-23792 if (idleMilliseconds > TimeUnit2.MINUTES.toMillis(idleTerminationMinutes)) { LOGGER.info("Idle timeout of " + computer.getName() + " after " + TimeUnit2.MILLISECONDS.toMinutes(idleMilliseconds) + " idle minutes"); computer.getNode().idleTimeout(); } } else { final int freeSecondsLeft = (60 * 60) - (int) (TimeUnit2.SECONDS.convert(uptime, TimeUnit2.MILLISECONDS) % (60 * 60)); // if we have less "free" (aka already paid for) time left than // our idle time, stop/terminate the instance // See JENKINS-23821 if (freeSecondsLeft <= TimeUnit.MINUTES.toSeconds(Math.abs(idleTerminationMinutes))) { LOGGER.info("Idle timeout of " + computer.getName() + " after " + TimeUnit2.MILLISECONDS.toMinutes(idleMilliseconds) + " idle minutes, with " + TimeUnit2.MILLISECONDS.toMinutes(freeSecondsLeft) + " minutes remaining in billing period"); computer.getNode().idleTimeout(); } } } return 1; }
From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up/*from w ww .j a v a2 s . c o m*/ ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start Zk, Kafka and Pinot startHybridCluster(); // Unpack the Avro files TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir); _tmpDir.mkdirs(); final List<File> avroFiles = getAllAvroFiles(); File schemaFile = getSchemaFile(); schema = Schema.fromFile(schemaFile); addSchema(schemaFile, schema.getSchemaName()); final List<String> invertedIndexColumns = makeInvertedIndexColumns(); final String sortedColumn = makeSortedColumn(); // Create Pinot table addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null); LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns); // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime) final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles); final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles); buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = new CountDownLatch(1); HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR); manager.connect(); manager.addExternalViewChangeListener(new ExternalViewChangeListener() { @Override public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) { for (ExternalView externalView : externalViewList) { if (externalView.getId().contains("mytable")) { Set<String> partitionSet = externalView.getPartitionSet(); if (partitionSet.size() == offlineSegmentCount) { int onlinePartitionCount = 0; for (String partitionId : partitionSet) { Map<String, String> partitionStateMap = externalView.getStateMap(partitionId); if (partitionStateMap.containsValue("ONLINE")) { onlinePartitionCount++; } } if (onlinePartitionCount == offlineSegmentCount) { System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread"); latch.countDown(); } } } } } }); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all offline segments to be online latch.await(); // Load realtime data into Kafka LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles); pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC); // Wait until the Pinot event count matches with the number of events in the Avro files int pinotRecordCount, h2RecordCount; long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L; Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); statement.execute("select count(*) from mytable"); ResultSet rs = statement.getResultSet(); rs.first(); h2RecordCount = rs.getInt(1); rs.close(); waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes); }
From source file:com.spotify.helios.ZooKeeperTestingClusterManager.java
@Override public void start() { try {//from ww w .ja va 2 s . c om start0(); } catch (Exception e) { throw Throwables.propagate(e); } curator = createCurator(connectString(zkAddresses)); try { awaitUp(2, TimeUnit.MINUTES); } catch (TimeoutException e) { throw Throwables.propagate(e); } }
From source file:io.fabric8.apiman.rest.Kubernetes2ApimanFilter.java
/** * @see javax.servlet.Filter#init(javax.servlet.FilterConfig) *//*from ww w .j a v a 2 s .co m*/ @Override public void init(FilterConfig config) throws ServletException { // maximum 10000 tokens in the cache Number nsCacheMaxsize = Systems.getEnvVarOrSystemProperty(NS_CACHE_MAXSIZE, 10000); // cache for 60 min Number nsTTL = Systems.getEnvVarOrSystemProperty(NS_TTL, 10); if (nsCache == null) { nsCache = CacheBuilder.newBuilder().concurrencyLevel(4) // allowed concurrency among update operations .maximumSize(nsCacheMaxsize.longValue()).expireAfterWrite(nsTTL.longValue(), TimeUnit.MINUTES) .build(new CacheLoader<String, ApimanInfo>() { public ApimanInfo load(String authToken) throws Exception { return syncKubernetesToApiman(authToken); } }); } boolean isTestMode = getSystemPropertyOrEnvVar(ApimanStarter.APIMAN_TESTMODE, false); if (!isTestMode && !isWatching) { watchNamespaces(); } }
From source file:ms1quant.MS1Quant.java
/** * @param args the command line arguments MS1Quant parameterfile *///from w ww .j a va 2 s . c o m public static void main(String[] args) throws Exception { BufferedReader reader = null; try { System.out.println( "================================================================================================="); System.out.println("Umpire MS1 quantification and feature detection analysis (version: " + UmpireInfo.GetInstance().Version + ")"); if (args.length < 3 || !args[1].startsWith("-mode")) { System.out .println("command : java -jar -Xmx10G MS1Quant.jar ms1quant.params -mode[1 or 2] [Option]"); System.out.println("\n-mode"); System.out.println("\t1:Single file mode--> mzXML_file PepXML_file"); System.out.println("\t\tEx: -mode1 file1.mzXML file1.pep.xml"); System.out.println( "\t2:Folder mode--> mzXML_Folder PepXML_Folder, all generated csv tables will be merged into a single csv file"); System.out.println("\t\tEx: -mode2 /data/mzxml/ /data/pepxml/"); System.out.println("\nOptions"); System.out.println( "\t-C\tNo of concurrent files to be processed (only for folder mode), Ex. -C5, default:1"); System.out.println("\t-p\tMinimum probability, Ex. -p0.9, default:0.9"); System.out.println("\t-ID\tDetect identified feature only"); System.out.println("\t-O\toutput folder, Ex. -O/data/"); return; } ConsoleLogger consoleLogger = new ConsoleLogger(); consoleLogger.SetConsoleLogger(Level.DEBUG); consoleLogger.SetFileLogger(Level.DEBUG, FilenameUtils.getFullPath(args[0]) + "ms1quant_debug.log"); Logger logger = Logger.getRootLogger(); logger.debug("Command: " + Arrays.toString(args)); logger.info("MS1Quant version: " + UmpireInfo.GetInstance().Version); String parameterfile = args[0]; logger.info("Parameter file: " + parameterfile); File paramfile = new File(parameterfile); if (!paramfile.exists()) { logger.error("Parameter file " + paramfile.getAbsolutePath() + " cannot be found. The program will exit."); } reader = new BufferedReader(new FileReader(paramfile.getAbsolutePath())); String line = ""; InstrumentParameter param = new InstrumentParameter(InstrumentParameter.InstrumentType.TOF5600); int NoCPUs = 2; int NoFile = 1; param.DetermineBGByID = false; param.EstimateBG = true; //<editor-fold defaultstate="collapsed" desc="Read parameter file"> while ((line = reader.readLine()) != null) { if (!"".equals(line) && !line.startsWith("#")) { logger.info(line); //System.out.println(line); if (line.split("=").length < 2) { continue; } if (line.split("=").length < 2) { continue; } String type = line.split("=")[0].trim(); if (type.startsWith("para.")) { type = type.replace("para.", "SE."); } String value = line.split("=")[1].trim(); switch (type) { case "Thread": { NoCPUs = Integer.parseInt(value); break; } //<editor-fold defaultstate="collapsed" desc="instrument parameters"> case "SE.MS1PPM": { param.MS1PPM = Float.parseFloat(value); break; } case "SE.MS2PPM": { param.MS2PPM = Float.parseFloat(value); break; } case "SE.SN": { param.SNThreshold = Float.parseFloat(value); break; } case "SE.MS2SN": { param.MS2SNThreshold = Float.parseFloat(value); break; } case "SE.MinMSIntensity": { param.MinMSIntensity = Float.parseFloat(value); break; } case "SE.MinMSMSIntensity": { param.MinMSMSIntensity = Float.parseFloat(value); break; } case "SE.MinRTRange": { param.MinRTRange = Float.parseFloat(value); break; } case "SE.MaxNoPeakCluster": { param.MaxNoPeakCluster = Integer.parseInt(value); param.MaxMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinNoPeakCluster": { param.MinNoPeakCluster = Integer.parseInt(value); param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinMS2NoPeakCluster": { param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MaxCurveRTRange": { param.MaxCurveRTRange = Float.parseFloat(value); break; } case "SE.Resolution": { param.Resolution = Integer.parseInt(value); break; } case "SE.RTtol": { param.RTtol = Float.parseFloat(value); break; } case "SE.NoPeakPerMin": { param.NoPeakPerMin = Integer.parseInt(value); break; } case "SE.StartCharge": { param.StartCharge = Integer.parseInt(value); break; } case "SE.EndCharge": { param.EndCharge = Integer.parseInt(value); break; } case "SE.MS2StartCharge": { param.MS2StartCharge = Integer.parseInt(value); break; } case "SE.MS2EndCharge": { param.MS2EndCharge = Integer.parseInt(value); break; } case "SE.NoMissedScan": { param.NoMissedScan = Integer.parseInt(value); break; } case "SE.Denoise": { param.Denoise = Boolean.valueOf(value); break; } case "SE.EstimateBG": { param.EstimateBG = Boolean.valueOf(value); break; } case "SE.RemoveGroupedPeaks": { param.RemoveGroupedPeaks = Boolean.valueOf(value); break; } case "SE.MinFrag": { param.MinFrag = Integer.parseInt(value); break; } case "SE.IsoPattern": { param.IsoPattern = Float.valueOf(value); break; } case "SE.StartRT": { param.startRT = Float.valueOf(value); } case "SE.EndRT": { param.endRT = Float.valueOf(value); } //</editor-fold> } } } //</editor-fold> int mode = 1; if (args[1].equals("-mode2")) { mode = 2; } else if (args[1].equals("-mode1")) { mode = 1; } else { logger.error("-mode number not recongized. The program will exit."); } String mzXML = ""; String pepXML = ""; String mzXMLPath = ""; String pepXMLPath = ""; File mzXMLfile = null; File pepXMLfile = null; File mzXMLfolder = null; File pepXMLfolder = null; int idx = 0; if (mode == 1) { mzXML = args[2]; logger.info("Mode1 mzXML file: " + mzXML); mzXMLfile = new File(mzXML); if (!mzXMLfile.exists()) { logger.error("Mode1 mzXML file " + mzXMLfile.getAbsolutePath() + " cannot be found. The program will exit."); return; } pepXML = args[3]; logger.info("Mode1 pepXML file: " + pepXML); pepXMLfile = new File(pepXML); if (!pepXMLfile.exists()) { logger.error("Mode1 pepXML file " + pepXMLfile.getAbsolutePath() + " cannot be found. The program will exit."); return; } idx = 4; } else if (mode == 2) { mzXMLPath = args[2]; logger.info("Mode2 mzXML folder: " + mzXMLPath); mzXMLfolder = new File(mzXMLPath); if (!mzXMLfolder.exists()) { logger.error("Mode2 mzXML folder " + mzXMLfolder.getAbsolutePath() + " does not exist. The program will exit."); return; } pepXMLPath = args[3]; logger.info("Mode2 pepXML folder: " + pepXMLPath); pepXMLfolder = new File(pepXMLPath); if (!pepXMLfolder.exists()) { logger.error("Mode2 pepXML folder " + pepXMLfolder.getAbsolutePath() + " does not exist. The program will exit."); return; } idx = 4; } String outputfolder = ""; float MinProb = 0f; for (int i = idx; i < args.length; i++) { if (args[i].startsWith("-")) { if (args[i].equals("-ID")) { param.TargetIDOnly = true; logger.info("Detect ID feature only: true"); } if (args[i].startsWith("-O")) { outputfolder = args[i].substring(2); logger.info("Output folder: " + outputfolder); File outputfile = new File(outputfolder); if (!outputfolder.endsWith("\\") | outputfolder.endsWith("/")) { outputfolder += "/"; } if (!outputfile.exists()) { outputfile.mkdir(); } } if (args[i].startsWith("-C")) { try { NoFile = Integer.parseInt(args[i].substring(2)); logger.info("No of concurrent files: " + NoFile); } catch (Exception ex) { logger.error(args[i] + " is not a correct integer format, will process only one file at a time."); } } if (args[i].startsWith("-p")) { try { MinProb = Float.parseFloat(args[i].substring(2)); logger.info("probability threshold: " + MinProb); } catch (Exception ex) { logger.error(args[i] + " is not a correct format, will use 0 as threshold instead."); } } } } reader.close(); TandemParam tandemparam = new TandemParam(DBSearchParam.SearchInstrumentType.TOF5600); PTMManager.GetInstance(); if (param.TargetIDOnly) { param.EstimateBG = false; param.ApexDelta = 1.5f; param.NoMissedScan = 10; param.MiniOverlapP = 0.2f; param.RemoveGroupedPeaks = false; param.CheckMonoIsotopicApex = false; param.DetectByCWT = false; param.FillGapByBK = false; param.IsoCorrThreshold = -1f; param.SmoothFactor = 3; } if (mode == 1) { logger.info("Processing " + mzXMLfile.getAbsolutePath() + "...."); long time = System.currentTimeMillis(); LCMSPeakMS1 LCMS1 = new LCMSPeakMS1(mzXMLfile.getAbsolutePath(), NoCPUs); LCMS1.SetParameter(param); LCMS1.Resume = false; if (!param.TargetIDOnly) { LCMS1.CreatePeakFolder(); } LCMS1.ExportPeakClusterTable = true; if (pepXMLfile.exists()) { tandemparam.InteractPepXMLPath = pepXMLfile.getAbsolutePath(); LCMS1.ParsePepXML(tandemparam, MinProb); logger.info("No. of PSMs included: " + LCMS1.IDsummary.PSMList.size()); logger.info("No. of Peptide ions included: " + LCMS1.IDsummary.GetPepIonList().size()); } if (param.TargetIDOnly) { LCMS1.SaveSerializationFile = false; } if (param.TargetIDOnly || !LCMS1.ReadPeakCluster()) { LCMS1.PeakClusterDetection(); } if (pepXMLfile.exists()) { LCMS1.AssignQuant(false); LCMS1.IDsummary.ExportPepID(outputfolder); } time = System.currentTimeMillis() - time; logger.info(LCMS1.ParentmzXMLName + " processed time:" + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time), TimeUnit.MILLISECONDS.toMinutes(time) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)), TimeUnit.MILLISECONDS.toSeconds(time) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time)))); LCMS1.BaseClearAllPeaks(); LCMS1.SetSpectrumParser(null); LCMS1.IDsummary = null; LCMS1 = null; System.gc(); } else if (mode == 2) { LCMSID IDsummary = new LCMSID("", "", ""); logger.info("Parsing all pepXML files in " + pepXMLPath + "...."); for (File file : pepXMLfolder.listFiles()) { if (file.getName().toLowerCase().endsWith("pep.xml") || file.getName().toLowerCase().endsWith("pepxml")) { PepXMLParser pepXMLParser = new PepXMLParser(IDsummary, file.getAbsolutePath(), MinProb); } } HashMap<String, LCMSID> LCMSIDMap = IDsummary.GetLCMSIDFileMap(); ExecutorService executorPool = null; executorPool = Executors.newFixedThreadPool(NoFile); logger.info("Processing all mzXML files in " + mzXMLPath + "...."); for (File file : mzXMLfolder.listFiles()) { if (file.getName().toLowerCase().endsWith("mzxml")) { LCMSID id = LCMSIDMap.get(FilenameUtils.getBaseName(file.getName())); if (id == null || id.PSMList == null) { logger.warn("No IDs found in :" + FilenameUtils.getBaseName(file.getName()) + ". Quantification for this file is skipped"); continue; } if (!id.PSMList.isEmpty()) { MS1TargetQuantThread thread = new MS1TargetQuantThread(file, id, NoCPUs, outputfolder, param); executorPool.execute(thread); } } } LCMSIDMap.clear(); LCMSIDMap = null; IDsummary = null; executorPool.shutdown(); try { executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("interrupted.."); } if (outputfolder == null | outputfolder.equals("")) { outputfolder = mzXMLPath; } logger.info("Merging PSM files.."); File output = new File(outputfolder); FileWriter writer = new FileWriter(output.getAbsolutePath() + "/PSM_merge.csv"); boolean header = false; for (File csvfile : output.listFiles()) { if (csvfile.getName().toLowerCase().endsWith("_psms.csv")) { BufferedReader outreader = new BufferedReader(new FileReader(csvfile)); String outline = outreader.readLine(); if (!header) { writer.write(outline + "\n"); header = true; } while ((outline = outreader.readLine()) != null) { writer.write(outline + "\n"); } outreader.close(); csvfile.delete(); } } writer.close(); } logger.info("MS1 quant module is complete."); } catch (Exception e) { Logger.getRootLogger().error(ExceptionUtils.getStackTrace(e)); throw e; } }
From source file:gov.llnl.lc.smt.command.gui.SmtGui.java
@Override public boolean doCommand(SmtConfig config) throws Exception { boolean exit = false; MessageManager.getInstance()//from w w w . j av a 2 s . co m .postMessage(new SmtMessage(SmtMessageType.SMT_MSG_INIT, "** Initializing the ServiceUpdater")); initServiceUpdater(config); MessageManager.getInstance().postMessage( new SmtMessage(SmtMessageType.SMT_MSG_INIT, "** Finished ServiceUpdater initialization")); if (OMService == null) { System.err.println("The service is null"); } else { // this is the GUI command, and it can take a subcommand and an argument String subCommand = null; String subCommandArg = null; if (config != null) { Map<String, String> map = config.getConfigMap(); subCommand = map.get(SmtProperty.SMT_SUBCOMMAND.getName()); subCommandArg = map.get(subCommand); } // start all managers /* refer to SmtConsole, may need a manager to do this more cleanly */ exit = (UpdateService == null) || (OMService == null); if (!exit) { logger.severe("Starting the SMT GUI Application Now"); MessageManager.getInstance().postMessage( new SmtMessage(SmtMessageType.SMT_MSG_INIT, "ready to start the graphical interface")); SmtGuiApplication.go(UpdateService, OMService, this); } else logger.severe("Could not start the SMT GUI Application, check service updater"); // infinite wait loop while (!exit) TimeUnit.MINUTES.sleep(5); } return true; }
From source file:it.anyplace.sync.bep.IndexBrowser.java
private IndexBrowser(IndexRepository indexRepository, IndexHandler indexHandler, String folder, boolean includeParentInList, boolean allowParentInRoot, Comparator<FileInfo> ordering) { checkNotNull(indexRepository);/*from w w w .ja va 2s . c o m*/ checkNotNull(indexHandler); checkNotNull(emptyToNull(folder)); this.indexRepository = indexRepository; this.indexHandler = indexHandler; this.indexHandler.getEventBus().register(indexHandlerEventListener); this.folder = folder; this.includeParentInList = includeParentInList; this.allowParentInRoot = allowParentInRoot; this.ordering = ordering; PARENT_FILE_INFO = FileInfo.newBuilder().setFolder(folder).setTypeDir().setPath(PARENT_PATH).build(); ROOT_FILE_INFO = FileInfo.newBuilder().setFolder(folder).setTypeDir().setPath(ROOT_PATH).build(); this.currentPath = ROOT_PATH; executorService.scheduleWithFixedDelay(new Runnable() { @Override public void run() { logger.debug("folder cache cleanup"); listFolderCache.cleanUp(); fileInfoCache.cleanUp(); } }, 1, 1, TimeUnit.MINUTES); }