List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:org.wso2.carbon.metrics.jdbc.reporter.JdbcReporterTest.java
@SuppressWarnings("rawtypes") private long reportGauge(TimeUnit timestampUnit) { final Gauge gauge = mock(Gauge.class); when(gauge.getValue()).thenReturn(1); JdbcReporter reporter = JdbcReporter.forRegistry(registry).convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.NANOSECONDS).convertTimestampTo(timestampUnit).withClock(clock) .filter(MetricFilter.ALL).build(SOURCE, dataSource); reporter.report(map("gauge", gauge), map(), map(), map(), map()); List<Map<String, Object>> result = template.queryForList("SELECT * FROM METRIC_GAUGE"); Assert.assertEquals(result.size(), 1); return (Long) result.get(0).get("TIMESTAMP"); }
From source file:io.cloudslang.worker.management.services.WorkerManager.java
public void doRecovery() { // Attempts to stop all actively executing tasks, halts the // processing of waiting tasks, and returns a list of the tasks // that were awaiting execution. ///*from w ww . ja va 2s. co m*/ // This method does not wait for actively executing tasks to // terminate. // // There are no guarantees beyond best-effort attempts to stop // processing actively executing tasks. For example, typical // implementations will cancel via {@link Thread#interrupt}, so any // task that fails to respond to interrupts may never terminate. try { synchronized (this) { executorService.shutdownNow(); //shutting down current running threads threadPoolVersion++; //updating the thread pool version to a new one - so current running threads will exit logger.warn( "Worker is in doRecovery(). Cleaning state and cancelling running tasks. It may take up to 30 seconds..."); } boolean finished = executorService.awaitTermination(30, TimeUnit.SECONDS); if (finished) { logger.warn("Worker succeeded to cancel running tasks during doRecovery()."); } else { logger.warn("Not all running tasks responded to cancel."); } } catch (InterruptedException ex) { /*ignore*/} mapOfRunningTasks.clear(); //Make new executor executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, Long.MAX_VALUE, TimeUnit.NANOSECONDS, inBuffer, new WorkerThreadFactory((threadPoolVersion) + "_WorkerExecutionThread")); }
From source file:com.yahoo.labs.yamall.local.Yamall.java
public static void main(String[] args) { String[] remainingArgs = null; String inputFile = null;//from w w w .j av a 2 s . c om String predsFile = null; String saveModelFile = null; String initialModelFile = null; String lossName = null; String parserName = null; String linkName = null; String invertHashName = null; double learningRate = 1; String minPredictionString = null; String maxPredictionString = null; String fmNumberFactorsString = null; int bitsHash; int numberPasses; int holdoutPeriod = 10; boolean testOnly = false; boolean exponentialProgress; double progressInterval; options.addOption("h", "help", false, "displays this help"); options.addOption("t", false, "ignore label information and just test"); options.addOption(Option.builder().hasArg(false).required(false).longOpt("binary") .desc("reports loss as binary classification with -1,1 labels").build()); options.addOption( Option.builder().hasArg(false).required(false).longOpt("solo").desc("uses SOLO optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("pcsolo") .desc("uses Per Coordinate SOLO optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("pistol") .desc("uses PiSTOL optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("kt") .desc("(EXPERIMENTAL) uses KT optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("pckt") .desc("(EXPERIMENTAL) uses Per Coordinate KT optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("pccocob") .desc("(EXPERIMENTAL) uses Per Coordinate COCOB optimizer").build()); options.addOption(Option.builder().hasArg(false).required(false).longOpt("cocob") .desc("(EXPERIMENTAL) uses COCOB optimizer").build()); options.addOption( Option.builder().hasArg(false).required(false).longOpt("fm").desc("Factorization Machine").build()); options.addOption(Option.builder("f").hasArg(true).required(false).desc("final regressor to save") .type(String.class).longOpt("final_regressor").build()); options.addOption(Option.builder("p").hasArg(true).required(false).desc("file to output predictions to") .longOpt("predictions").type(String.class).build()); options.addOption( Option.builder("i").hasArg(true).required(false).desc("initial regressor(s) to load into memory") .longOpt("initial_regressor").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false).desc( "specify the loss function to be used. Currently available ones are: absolute, squared (default), hinge, logistic") .longOpt("loss_function").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false).desc( "specify the link function used in the output of the predictions. Currently available ones are: identity (default), logistic") .longOpt("link").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("output human-readable final regressor with feature names").longOpt("invert_hash") .type(String.class).build()); options.addOption( Option.builder("l").hasArg(true).required(false).desc("set (initial) learning Rate, default = 1.0") .longOpt("learning_rate").type(String.class).build()); options.addOption(Option.builder("b").hasArg(true).required(false) .desc("number of bits in the feature table, default = 18").longOpt("bit_precision") .type(String.class).build()); options.addOption(Option.builder("P").hasArg(true).required(false) .desc("progress update frequency, integer: additive; float: multiplicative, default = 2.0") .longOpt("progress").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("smallest prediction to output, before the link function, default = -50") .longOpt("min_prediction").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("smallest prediction to output, before the link function, default = 50") .longOpt("max_prediction").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("ignore namespaces beginning with the characters in <arg>").longOpt("ignore") .type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false).desc("number of training passes") .longOpt("passes").type(String.class).build()); options.addOption( Option.builder().hasArg(true).required(false).desc("holdout period for test only, default = 10") .longOpt("holdout_period").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("number of factors for Factorization Machines default = 8").longOpt("fmNumberFactors") .type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false) .desc("specify the parser to use. Currently available ones are: vw (default), libsvm, tsv") .longOpt("parser").type(String.class).build()); options.addOption(Option.builder().hasArg(true).required(false).desc("schema file for the TSV input") .longOpt("schema").type(String.class).build()); CommandLineParser parser = new DefaultParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (ParseException e) { System.out.println("Unrecognized option"); help(); } if (cmd.hasOption("h")) help(); if (cmd.hasOption("t")) testOnly = true; if (cmd.hasOption("binary")) { binary = true; System.out.println("Reporting binary loss"); } initialModelFile = cmd.getOptionValue("i"); predsFile = cmd.getOptionValue("p"); lossName = cmd.getOptionValue("loss_function", "squared"); linkName = cmd.getOptionValue("link", "identity"); saveModelFile = cmd.getOptionValue("f"); learningRate = Double.parseDouble(cmd.getOptionValue("l", "1.0")); bitsHash = Integer.parseInt(cmd.getOptionValue("b", "18")); invertHashName = cmd.getOptionValue("invert_hash"); minPredictionString = cmd.getOptionValue("min_prediction", "-50"); maxPredictionString = cmd.getOptionValue("max_prediction", "50"); fmNumberFactorsString = cmd.getOptionValue("fmNumberFactors", "8"); parserName = cmd.getOptionValue("parser", "vw"); numberPasses = Integer.parseInt(cmd.getOptionValue("passes", "1")); System.out.println("Number of passes = " + numberPasses); if (numberPasses > 1) { holdoutPeriod = Integer.parseInt(cmd.getOptionValue("holdout_period", "10")); System.out.println("Holdout period = " + holdoutPeriod); } remainingArgs = cmd.getArgs(); if (remainingArgs.length == 1) inputFile = remainingArgs[0]; InstanceParser instanceParser = null; if (parserName.equals("vw")) instanceParser = new VWParser(bitsHash, cmd.getOptionValue("ignore"), (invertHashName != null)); else if (parserName.equals("libsvm")) instanceParser = new LIBSVMParser(bitsHash, (invertHashName != null)); else if (parserName.equals("tsv")) { String schema = cmd.getOptionValue("schema"); if (schema == null) { System.out.println("TSV parser requires a schema file."); System.exit(0); } else { String spec = null; try { spec = new String(Files.readAllBytes(Paths.get(schema))); } catch (IOException e) { System.out.println("Error reading the TSV schema file."); e.printStackTrace(); System.exit(0); } instanceParser = new TSVParser(bitsHash, cmd.getOptionValue("ignore"), (invertHashName != null), spec); } } else { System.out.println("Unknown parser."); System.exit(0); } System.out.println("Num weight bits = " + bitsHash); // setup progress String progress = cmd.getOptionValue("P", "2.0"); if (progress.indexOf('.') >= 0) { exponentialProgress = true; progressInterval = (double) Double.parseDouble(progress); } else { exponentialProgress = false; progressInterval = (double) Integer.parseInt(progress); } // min and max predictions minPrediction = (double) Double.parseDouble(minPredictionString); maxPrediction = (double) Double.parseDouble(maxPredictionString); // number of factors for Factorization Machines fmNumberFactors = (int) Integer.parseInt(fmNumberFactorsString); // configure the learner Loss lossFnc = null; LinkFunction link = null; if (initialModelFile == null) { if (cmd.hasOption("kt")) { learner = new KT(bitsHash); } else if (cmd.hasOption("pckt")) { learner = new PerCoordinateKT(bitsHash); } else if (cmd.hasOption("pcsolo")) { learner = new PerCoordinateSOLO(bitsHash); } else if (cmd.hasOption("solo")) { learner = new SOLO(bitsHash); } else if (cmd.hasOption("pccocob")) { learner = new PerCoordinateCOCOB(bitsHash); } else if (cmd.hasOption("cocob")) { learner = new COCOB(bitsHash); } else if (cmd.hasOption("pistol")) { learner = new PerCoordinatePiSTOL(bitsHash); } else if (cmd.hasOption("fm")) { learner = new SGD_FM(bitsHash, fmNumberFactors); } else learner = new SGD_VW(bitsHash); } else { learner = IOLearner.loadLearner(initialModelFile); } // setup link function if (linkName.equals("identity")) { link = new IdentityLinkFunction(); } else if (linkName.equals("logistic")) { link = new LogisticLinkFunction(); } else { System.out.println("Unknown link function."); System.exit(0); } // setup loss function if (lossName.equals("squared")) { lossFnc = new SquareLoss(); } else if (lossName.equals("hinge")) { lossFnc = new HingeLoss(); } else if (lossName.equals("logistic")) { lossFnc = new LogisticLoss(); } else if (lossName.equals("absolute")) { lossFnc = new AbsLoss(); } else { System.out.println("Unknown loss function."); System.exit(0); } learner.setLoss(lossFnc); learner.setLearningRate(learningRate); // maximum range predictions System.out.println("Max prediction = " + maxPrediction + ", Min Prediction = " + minPrediction); // print information about the learner System.out.println(learner.toString()); // print information about the link function System.out.println(link.toString()); // print information about the parser System.out.println(instanceParser.toString()); // print information about ignored namespaces System.out.println("Ignored namespaces = " + cmd.getOptionValue("ignore", "")); long start = System.nanoTime(); FileInputStream fstream; try { BufferedReader br = null; if (inputFile != null) { fstream = new FileInputStream(inputFile); System.out.println("Reading datafile = " + inputFile); br = new BufferedReader(new InputStreamReader(fstream)); } else { System.out.println("Reading from console"); br = new BufferedReader(new InputStreamReader(System.in)); } File fout = null; FileOutputStream fos = null; BufferedWriter bw = null; if (predsFile != null) { fout = new File(predsFile); fos = new FileOutputStream(fout); bw = new BufferedWriter(new OutputStreamWriter(fos)); } try { System.out.println("average example current current current"); System.out.println("loss counter label predict features"); int iter = 0; double cumLoss = 0; double weightedSampleSum = 0; double sPlus = 0; double sMinus = 0; Instance sample = null; boolean justPrinted = false; int pass = 0; ObjectOutputStream ooutTr = null; ObjectOutputStream ooutHO = null; ObjectInputStream oinTr = null; double pred = 0; int limit = 1; double hError = Double.MAX_VALUE; double lastHError = Double.MAX_VALUE; int numTestSample = 0; int numTrainingSample = 0; int idx = 0; if (numberPasses > 1) { ooutTr = new ObjectOutputStream(new FileOutputStream("cache_training.bin")); ooutHO = new ObjectOutputStream(new FileOutputStream("cache_holdout.bin")); oinTr = new ObjectInputStream(new FileInputStream("cache_training.bin")); } do { while (true) { double score; if (pass > 0 && numberPasses > 1) { Instance tmp = (Instance) oinTr.readObject(); if (tmp != null) sample = tmp; else break; } else { String strLine = br.readLine(); if (strLine != null) sample = instanceParser.parse(strLine); else break; } justPrinted = false; idx++; if (numberPasses > 1 && pass == 0 && idx % holdoutPeriod == 0) { // store the current sample for the holdout set ooutHO.writeObject(sample); ooutHO.reset(); numTestSample++; } else { if (numberPasses > 1 && pass == 0) { ooutTr.writeObject(sample); ooutTr.reset(); numTrainingSample++; } iter++; if (testOnly) { // predict the sample score = learner.predict(sample); } else { // predict the sample and update the classifier using the sample score = learner.update(sample); } score = Math.min(Math.max(score, minPrediction), maxPrediction); pred = link.apply(score); if (!binary) cumLoss += learner.getLoss().lossValue(score, sample.getLabel()) * sample.getWeight(); else if (Math.signum(score) != sample.getLabel()) cumLoss += sample.getWeight(); weightedSampleSum += sample.getWeight(); if (sample.getLabel() > 0) sPlus = sPlus + sample.getWeight(); else sMinus = sMinus + sample.getWeight(); // output predictions to file if (predsFile != null) { bw.write(String.format("%.6f %s", pred, sample.getTag())); bw.newLine(); } // print statistics to screen if (iter == limit) { justPrinted = true; System.out.printf("%.6f %12d % .4f % .4f %d\n", cumLoss / weightedSampleSum, iter, sample.getLabel(), pred, sample.getVector().size()); if (exponentialProgress) limit *= progressInterval; else limit += progressInterval; } } } if (numberPasses > 1) { if (pass == 0) { // finished first pass of many // write a null at the end of the files ooutTr.writeObject(null); ooutHO.writeObject(null); ooutTr.flush(); ooutHO.flush(); ooutTr.close(); ooutHO.close(); System.out.println("finished first epoch"); System.out.println(numTrainingSample + " training samples"); System.out.println(numTestSample + " holdout samples saved"); } lastHError = hError; hError = evalHoldoutError(); } if (numberPasses > 1) { System.out.printf("Weighted loss on holdout on epoch %d = %.6f\n", pass + 1, hError); oinTr.close(); oinTr = new ObjectInputStream(new FileInputStream("cache_training.bin")); if (hError > lastHError) { System.out.println("Early stopping"); break; } } pass++; } while (pass < numberPasses); if (justPrinted == false) { System.out.printf("%.6f %12d % .4f % .4f %d\n", cumLoss / weightedSampleSum, iter, sample.getLabel(), pred, sample.getVector().size()); } System.out.println("finished run"); System.out.println(String.format("average loss best constant predictor: %.6f", lossFnc.lossConstantBinaryLabels(sPlus, sMinus))); if (saveModelFile != null) IOLearner.saveLearner(learner, saveModelFile); if (invertHashName != null) IOLearner.saveInvertHash(learner.getWeights(), instanceParser.getInvertHashMap(), invertHashName); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (ClassNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } // close the input stream try { br.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } // close the output stream if (predsFile != null) { try { bw.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } long millis = System.nanoTime() - start; System.out.printf("Elapsed time: %d min, %d sec\n", TimeUnit.NANOSECONDS.toMinutes(millis), TimeUnit.NANOSECONDS.toSeconds(millis) - 60 * TimeUnit.NANOSECONDS.toMinutes(millis)); } catch ( FileNotFoundException e) { System.out.println("Error opening the input file"); e.printStackTrace(); } }
From source file:com.netflix.genie.web.services.impl.JobSpecificationServiceImpl.java
private Command getCommand(final String commandId, final String jobId) throws GenieException { final long start = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try {// w w w . j av a 2s . com log.info("Selecting command for job {} ", jobId); final Command command = this.commandPersistenceService.getCommand(commandId); log.info("Selected command {} for job {} ", commandId, jobId); MetricsUtils.addSuccessTags(tags); return command; } catch (final Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(SELECT_COMMAND_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.genie.web.services.loadbalancers.script.ScriptLoadBalancer.java
/** * Check if the script file needs to be refreshed. */// w w w . ja v a2 s. c o m public void refresh() { log.debug("Refreshing"); final long updateStart = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try { this.isUpdating.set(true); // Update the script timeout this.timeoutLength.set(this.environment.getProperty(ScriptLoadBalancerProperties.TIMEOUT_PROPERTY, Long.class, DEFAULT_TIMEOUT_LENGTH)); final String scriptFileSourceValue = this.environment .getProperty(ScriptLoadBalancerProperties.SCRIPT_FILE_SOURCE_PROPERTY); if (StringUtils.isBlank(scriptFileSourceValue)) { throw new IllegalStateException("Invalid empty value for script source file property: " + ScriptLoadBalancerProperties.SCRIPT_FILE_SOURCE_PROPERTY); } final String scriptFileSource = new URI(scriptFileSourceValue).toString(); final String scriptFileDestinationValue = this.environment .getProperty(ScriptLoadBalancerProperties.SCRIPT_FILE_DESTINATION_PROPERTY); if (StringUtils.isBlank(scriptFileDestinationValue)) { throw new IllegalStateException("Invalid empty value for script destination directory property: " + ScriptLoadBalancerProperties.SCRIPT_FILE_DESTINATION_PROPERTY); } final Path scriptDestinationDirectory = Paths.get(new URI(scriptFileDestinationValue)); // Check the validity of the destination directory if (!Files.exists(scriptDestinationDirectory)) { Files.createDirectories(scriptDestinationDirectory); } else if (!Files.isDirectory(scriptDestinationDirectory)) { throw new IllegalStateException("The script destination directory " + scriptDestinationDirectory + " exists but is not a directory"); } final String fileName = StringUtils.substringAfterLast(scriptFileSource, SLASH); if (StringUtils.isBlank(fileName)) { throw new IllegalStateException("No file name found from " + scriptFileSource); } final String scriptExtension = StringUtils.substringAfterLast(fileName, PERIOD); if (StringUtils.isBlank(scriptExtension)) { throw new IllegalStateException("No file extension available in " + fileName); } final Path scriptDestinationPath = scriptDestinationDirectory.resolve(fileName); // Download and cache the file (if it's not already there) this.fileTransferService.getFile(scriptFileSource, scriptDestinationPath.toUri().toString()); final ScriptEngine engine = this.scriptEngineManager.getEngineByExtension(scriptExtension); // We want a compilable engine so we can cache the script if (!(engine instanceof Compilable)) { throw new IllegalArgumentException("Script engine must be of type " + Compilable.class.getName()); } final Compilable compilable = (Compilable) engine; try (InputStream fis = Files.newInputStream(scriptDestinationPath); InputStreamReader reader = new InputStreamReader(fis, UTF_8)) { log.debug("Compiling {}", scriptFileSource); this.script.set(compilable.compile(reader)); } tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_OK)); this.isConfigured.set(true); } catch (final GenieException | IOException | ScriptException | RuntimeException | URISyntaxException e) { tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_FAILED)); tags.add(Tag.of(MetricsConstants.TagKeys.EXCEPTION_CLASS, e.getClass().getName())); log.error("Refreshing the load balancing script for ScriptLoadBalancer failed due to {}", e.getMessage(), e); this.isConfigured.set(false); } finally { this.isUpdating.set(false); this.registry.timer(UPDATE_TIMER_NAME, tags).record(System.nanoTime() - updateStart, TimeUnit.NANOSECONDS); log.debug("Refresh completed"); } }
From source file:org.agatom.springatom.cmp.action.DefaultActionsModelReader.java
private void parseActionModels() throws Exception { Assert.notNull(this.actionModel); LOGGER.trace("Starting parsing actionModel"); try {//w w w . j av a2 s . co m final long startTime = System.nanoTime(); { final JsonNode actionModelsNode = this.actionModel.get(ACTION_MODELS_KEY); Assert.isTrue(actionModelsNode.getNodeType().equals(JsonNodeType.ARRAY)); final ArrayNode actionModels = (ArrayNode) actionModelsNode; final int length = actionModels.size(); LOGGER.trace(String.format("%d actionModels found", length)); final Map<String, ActionModelReferenceMap> referenceMap = Maps.newHashMap(); for (final JsonNode node : actionModels) { referenceMap.put(node.get("name").textValue(), this.flattenActionModels((ObjectNode) node)); } this.flattenActionModel = referenceMap; } LOGGER.info(String.format("Loaded actionModel in %dms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime))); } catch (Exception exp) { LOGGER.error("Error in parsing actionModel", exp); throw exp; } }
From source file:com.linkedin.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.java
public RealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata, final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata, RealtimeTableDataManager realtimeResourceManager, final String resourceDataDir, final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception { super();/* w w w. ja va 2 s . c o m*/ this.schema = schema; this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema); this.serverMetrics = serverMetrics; this.segmentName = segmentMetadata.getSegmentName(); this.tableName = tableConfig.getTableName(); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); if (indexingConfig.getSortedColumn().isEmpty()) { LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}", segmentName); this.sortedColumn = null; } else { String firstSortedColumn = indexingConfig.getSortedColumn().get(0); if (this.schema.hasColumn(firstSortedColumn)) { LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}", firstSortedColumn, segmentName); this.sortedColumn = firstSortedColumn; } else { LOGGER.warn( "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.", firstSortedColumn, segmentName); this.sortedColumn = null; } } //inverted index columns invertedIndexColumns = indexingConfig.getInvertedIndexColumns(); this.segmentMetatdaZk = segmentMetadata; // create and init stream provider config // TODO : ideally resourceMetatda should create and give back a streamProviderConfig this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig(); this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema); segmentLogger = LoggerFactory.getLogger(RealtimeSegmentDataManager.class.getName() + "_" + segmentName + "_" + kafkaStreamProviderConfig.getStreamName()); segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}", sortedColumn, invertedIndexColumns); segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment(); this.resourceDir = new File(resourceDataDir); this.resourceTmpDir = new File(resourceDataDir, "_tmp"); if (!resourceTmpDir.exists()) { resourceTmpDir.mkdirs(); } // create and init stream provider final String tableName = tableConfig.getTableName(); this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider(); this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics); this.kafkaStreamProvider.start(); this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName(); // lets create a new realtime segment segmentLogger.info("Started kafka stream provider"); realtimeSegment = new RealtimeSegmentImpl(schema, kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName, segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics); realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema); notifier = realtimeResourceManager; segmentStatusTask = new TimerTask() { @Override public void run() { computeKeepIndexing(); } }; // start the indexing thread indexingThread = new Thread(new Runnable() { @Override public void run() { // continue indexing until criteria is met boolean notFull = true; long exceptionSleepMillis = 50L; segmentLogger.info("Starting to collect rows"); do { GenericRow row = null; try { row = kafkaStreamProvider.next(); row = extractor.transform(row); if (row != null) { notFull = realtimeSegment.index(row); exceptionSleepMillis = 50L; } } catch (Exception e) { segmentLogger.warn( "Caught exception while indexing row, sleeping for {} ms, row contents {}", exceptionSleepMillis, row, e); // Sleep for a short time as to avoid filling the logs with exceptions too quickly Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS); exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2); } catch (Error e) { segmentLogger.error("Caught error in indexing thread", e); throw e; } } while (notFull && keepIndexing && (!isShuttingDown)); if (isShuttingDown) { segmentLogger.info("Shutting down indexing thread!"); return; } try { int numErrors, numConversions, numNulls, numNullCols; if ((numErrors = extractor.getTotalErrors()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS, (long) numErrors); } Map<String, Integer> errorCount = extractor.getError_count(); for (String column : errorCount.keySet()) { if ((numErrors = errorCount.get(column)) > 0) { segmentLogger.warn("Column {} had {} rows with errors", column, numErrors); } } if ((numConversions = extractor.getTotalConversions()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) numConversions); segmentLogger.info("{} rows needed conversions ", numConversions); } if ((numNulls = extractor.getTotalNulls()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) numNulls); segmentLogger.info("{} rows had null columns", numNulls); } if ((numNullCols = extractor.getTotalNullCols()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) numNullCols); segmentLogger.info("{} columns had null values", numNullCols); } segmentLogger.info("Indexing threshold reached, proceeding with index conversion"); // kill the timer first segmentStatusTask.cancel(); updateCurrentDocumentCountMetrics(); segmentLogger.info("Indexed {} raw events, current number of docs = {}", realtimeSegment.getRawDocumentCount(), realtimeSegment.getSegmentMetadata().getTotalDocs()); File tempSegmentFolder = new File(resourceTmpDir, "tmp-" + String.valueOf(System.currentTimeMillis())); // lets convert the segment now RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment, tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(), segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns); segmentLogger.info("Trying to build segment"); final long buildStartTime = System.nanoTime(); converter.build(); final long buildEndTime = System.nanoTime(); segmentLogger.info("Built segment in {} ms", TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS)); File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName()); FileUtils.deleteQuietly(destDir); FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir); FileUtils.deleteQuietly(tempSegmentFolder); long segStartTime = realtimeSegment.getMinTime(); long segEndTime = realtimeSegment.getMaxTime(); TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType(); Configuration configuration = new PropertyListConfiguration(); configuration.setProperty(IndexLoadingConfigMetadata.KEY_OF_LOADING_INVERTED_INDEX, invertedIndexColumns); IndexLoadingConfigMetadata configMetadata = new IndexLoadingConfigMetadata(configuration); IndexSegment segment = Loaders.IndexSegment .load(new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode, configMetadata); segmentLogger.info("Committing Kafka offsets"); boolean commitSuccessful = false; try { kafkaStreamProvider.commit(); commitSuccessful = true; kafkaStreamProvider.shutdown(); segmentLogger.info("Successfully committed Kafka offsets, consumer release requested."); } catch (Throwable e) { // If we got here, it means that either the commit or the shutdown failed. Considering that the // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this // likely means that writing the Kafka offsets failed. // // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die // in a terrible way, leaving the consumer open and causing us to only get half the records from that point // on. In this case, because we keep the consumer open for a little while, we should be okay if the // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that // we get to committing the offsets, the transient ZK failure that caused the write to fail will not // happen again and everything will be good. // // Several things can happen: // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will // keep the consumer open for about a minute, which should be enough time for the controller to reassign // us a new segment) and the next time we close the segment the offsets commit successfully; we're good. // - The controller reassigns us a new segment, but after we released the consumer (if the controller was // down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever // data was in this segment. Not good. // - The server crashes after this comment and before we mark the current segment as done; if the Kafka // offsets didn't get written, then when the server restarts it'll start consuming the current segment // from the previously committed offsets; we're good. // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as // done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment // if we restart the server (not good). If we manually mark the segment as done in Helix by editing the // state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets. // // This is still better than the previous logic, which would have these failure modes: // - Consumer was left open and the controller reassigned us a new segment; consume only half the events // (because there are two consumers and Kafka will try to rebalance partitions between those two) // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that // we got in this segment again, as we're starting consumption from the previously committed offset (eg. // duplicate data). // // This is still not very satisfactory, which is why this part is due for a redesign. // // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to // do. If you're not, then based on how critical it is to get those rows back, then your options are: // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't // see query results from partially consumed data, then re-enable it when this replica has caught up) // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data // soon anyway) // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this // replica, assign a new consumer group id to this replica, rename the copied segments and edit their // metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto // the new consumer group id and then restart both replicas. This should get you the missing rows. segmentLogger.error( "FATAL: Exception committing or shutting down consumer commitSuccessful={}", commitSuccessful, e); serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS, 1L); if (!commitSuccessful) { kafkaStreamProvider.shutdown(); } } try { segmentLogger.info("Marking current segment as completed in Helix"); RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata(); metadataToOverwrite.setTableName(segmentMetadata.getTableName()); metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName()); metadataToOverwrite.setSegmentType(SegmentType.OFFLINE); metadataToOverwrite.setStatus(Status.DONE); metadataToOverwrite.setStartTime(segStartTime); metadataToOverwrite.setEndTime(segEndTime); metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs()); metadataToOverwrite.setTimeUnit(timeUnit); notifier.notifySegmentCommitted(metadataToOverwrite, segment); segmentLogger.info( "Completed write of segment completion to Helix, waiting for controller to assign a new segment"); } catch (Exception e) { if (commitSuccessful) { segmentLogger.error( "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.", e); } else { segmentLogger.warn( "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.", e); } } } catch (Exception e) { segmentLogger.error("Caught exception in the realtime indexing thread", e); } } }); indexingThread.start(); serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L); segmentLogger.debug("scheduling keepIndexing timer check"); // start a schedule timer to keep track of the segment TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC); segmentLogger.info("finished scheduling keepIndexing timer check"); }
From source file:monasca.persister.repository.cassandra.CassandraMetricRepo.java
@Override public int flush(String id) throws RepoException { long startTime = System.nanoTime(); List<ResultSetFuture> results = new ArrayList<>(); List<Deque<BatchStatement>> list = batches.getAllBatches(); for (Deque<BatchStatement> q : list) { BatchStatement b;/*from w w w.j av a 2 s. c om*/ while ((b = q.poll()) != null) { results.add(session.executeAsync(b)); } } List<ListenableFuture<ResultSet>> futures = Futures.inCompletionOrder(results); boolean cancel = false; Exception ex = null; for (ListenableFuture<ResultSet> future : futures) { if (cancel) { future.cancel(false); continue; } try { future.get(); } catch (InterruptedException | ExecutionException e) { cancel = true; ex = e; } } this.commitTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); if (ex != null) { metricFailed.inc(metricCount); throw new RepoException(ex); } batches.clear(); int flushCnt = metricCount; metricCount = 0; metricCompleted.inc(flushCnt); return flushCnt; }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImplTest.java
/** * Test the getFile method for valid s3 path. * * @throws GenieException If there is any problem */// www. j av a 2 s . com @Test(expected = GenieServerException.class) public void testGetFileMethodFailureToFetch() throws GenieException { Mockito.when(this.s3Client.getObject(Mockito.any(GetObjectRequest.class), Mockito.any(File.class))) .thenThrow(new AmazonS3Exception("something")); final ArgumentCaptor<GetObjectRequest> argument = ArgumentCaptor.forClass(GetObjectRequest.class); try { this.s3FileTransfer.getFile(S3_PATH, LOCAL_PATH); } finally { Mockito.verify(this.s3Client).getObject(argument.capture(), Mockito.any()); Assert.assertEquals(S3_BUCKET, argument.getValue().getBucketName()); Assert.assertEquals(S3_KEY, argument.getValue().getKey()); Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.registry, Mockito.times(1)) .timer(Mockito.eq(S3FileTransferImpl.DOWNLOAD_TIMER_NAME), this.tagsCaptor.capture()); Assert.assertEquals(MetricsUtils.newFailureTagsSetForException(new GenieServerException("blah")), this.tagsCaptor.getValue()); } }
From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java
/** * Test the getFile method for valid s3 path. * * @throws GenieException If there is any problem *///from w w w. ja v a 2 s .c o m @Test(expected = GenieServerException.class) public void testGetFileMethodFailureToFetch() throws GenieException { Mockito.when(this.s3Client.getObject(Mockito.any(GetObjectRequest.class), Mockito.any(File.class))) .thenThrow(new AmazonS3Exception("something")); final ArgumentCaptor<GetObjectRequest> argument = ArgumentCaptor.forClass(GetObjectRequest.class); try { s3FileTransfer.getFile(S3_PATH, LOCAL_PATH); } finally { Mockito.verify(this.s3Client).getObject(argument.capture(), Mockito.any()); Assert.assertEquals(S3_BUCKET, argument.getValue().getBucketName()); Assert.assertEquals(S3_KEY, argument.getValue().getKey()); Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.downloadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture()); Assert.assertEquals(FAILURE_TAGS, tagsCaptor.getValue()); } }