List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:fr.xebia.management.statistics.ServiceStatistics.java
/** * Increment {@link #totalDurationInNanosCounter}. * /*w w w .j a v a 2 s. c om*/ * @param deltaInMillis * delta in millis */ public void incrementTotalDurationWithMillis(long deltaInMillis) { incrementTotalDurationWithNanos(TimeUnit.NANOSECONDS.convert(deltaInMillis, TimeUnit.MILLISECONDS)); }
From source file:com.tinspx.util.concurrent.DelayedSemaphoreTest.java
private String acquire(DelayedSemaphore ls, int count) throws InterruptedException { final long MAX_WAIT = TimeUnit.MICROSECONDS.toNanos(50000), MAX_CHECK = TimeUnit.MICROSECONDS.toNanos(75000); switch (count % 3) { case 0:/*w ww.ja va 2s . c om*/ ls.acquire(); return null; case 1: while (!ls.tryAcquire()) { } return null; case 2: while (true) { long time = ls.ticker().read(); boolean acquired = ls.tryAcquire(MAX_WAIT, TimeUnit.NANOSECONDS); time = ls.ticker().read() - time; if (time > MAX_CHECK) { return String.format("%d > %d (max %d), acquired: %b", time, MAX_CHECK, MAX_WAIT, acquired); } // System.out.println("passed"); if (acquired) { return null; } else { attempts++; } } default: throw new AssertionError(); } }
From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java
/** * Test the putFile method for invalid s3 path. * * @throws GenieException If there is any problem *//*from w w w .ja v a 2 s.c o m*/ @Test(expected = GenieServerException.class) public void testPutFileMethodInvalidS3Path() throws GenieException { final String invalidS3Path = "filepath"; try { s3FileTransfer.putFile(LOCAL_PATH, invalidS3Path); } finally { Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.uploadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture()); Assert.assertEquals(FAILURE_TAGS, tagsCaptor.getValue()); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImplTest.java
/** * Test the putFile method for invalid s3 path. * * @throws GenieException If there is any problem *///from w w w . java 2 s. c o m @Test(expected = GenieBadRequestException.class) public void testPutFileMethodInvalidS3Path() throws GenieException { final String invalidS3Path = "filepath"; try { s3FileTransfer.putFile(LOCAL_PATH, invalidS3Path); } finally { Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.registry, Mockito.times(1)).timer(Mockito.eq(S3FileTransferImpl.UPLOAD_TIMER_NAME), this.tagsCaptor.capture()); Assert.assertEquals(FAILURE_TAGS, this.tagsCaptor.getValue()); } }
From source file:org.apache.jackrabbit.oak.spi.blob.AbstractBlobStore.java
private void convertBlobToId(InputStream in, ByteArrayOutputStream idStream, int level, long totalLength) throws IOException { int count = 0; // try to re-use the block (but not concurrently) byte[] block = blockBuffer.getAndSet(null); if (block == null || block.length != blockSize) { // not yet initialized yet, already in use, or wrong size: // create a new one block = new byte[blockSize]; }/*from ww w .j a v a 2s . co m*/ while (true) { int blockLen = IOUtils.readFully(in, block, 0, block.length); count++; if (blockLen == 0) { break; } else if (blockLen < blockSizeMin) { idStream.write(TYPE_DATA); IOUtils.writeVarInt(idStream, blockLen); idStream.write(block, 0, blockLen); totalLength += blockLen; } else { MessageDigest messageDigest; try { messageDigest = MessageDigest.getInstance(HASH_ALGORITHM); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } messageDigest.update(block, 0, blockLen); byte[] digest = messageDigest.digest(); idStream.write(TYPE_HASH); IOUtils.writeVarInt(idStream, level); if (level > 0) { // level > 0: total size (size of all sub-blocks) // (see class level javadoc for details) IOUtils.writeVarLong(idStream, totalLength); } // level = 0: size (size of this block) // level > 0: size of the indirection block // (see class level javadoc for details) IOUtils.writeVarLong(idStream, blockLen); totalLength += blockLen; IOUtils.writeVarInt(idStream, digest.length); idStream.write(digest); long start = System.nanoTime(); storeBlock(digest, level, Arrays.copyOf(block, blockLen)); statsCollector.uploaded(System.nanoTime() - start, TimeUnit.NANOSECONDS, blockLen); } if (idStream.size() > blockSize / 2) { // convert large ids to a block, but ensure it can be stored as // one block (otherwise the indirection no longer works) byte[] idBlock = idStream.toByteArray(); idStream.reset(); convertBlobToId(new ByteArrayInputStream(idBlock), idStream, level + 1, totalLength); count = 1; } } // re-use the block blockBuffer.set(block); if (count > 0 && idStream.size() > blockSizeMin) { // at the very end, convert large ids to a block, // because large block ids are not handy // (specially if they are used to read data in small chunks) byte[] idBlock = idStream.toByteArray(); idStream.reset(); convertBlobToId(new ByteArrayInputStream(idBlock), idStream, level + 1, totalLength); } in.close(); }
From source file:com.netflix.genie.core.services.impl.LocalJobRunner.java
private JobExecution executeJob(final Map<String, Object> context, final File runScript) throws GenieException { final long start = System.nanoTime(); try (final Writer writer = new OutputStreamWriter(new FileOutputStream(runScript), StandardCharsets.UTF_8)) { final String jobId = ((JobExecutionEnvironment) context.get(JobConstants.JOB_EXECUTION_ENV_KEY)) .getJobRequest().getId() .orElseThrow(() -> new GenieServerException("No job id. Unable to execute")); log.info("Executing job workflow for job {}", jobId); context.put(JobConstants.WRITER_KEY, writer); for (WorkflowTask workflowTask : this.jobWorkflowTasks) { workflowTask.executeTask(context); if (Thread.currentThread().isInterrupted()) { log.info("Interrupted job workflow for job {}", jobId); break; }/*w w w.j av a 2 s. c om*/ } log.info("Finished Executing job workflow for job {}", jobId); return (JobExecution) context.get(JobConstants.JOB_EXECUTION_DTO_KEY); } catch (final IOException ioe) { throw new GenieServerException("Failed to execute job due to: " + ioe.getMessage(), ioe); } finally { this.executeJobTimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:org.apache.solr.cloud.HttpPartitionTest.java
protected void testLeaderZkSessionLoss() throws Exception { String testCollectionName = "c8n_1x2_leader_session_loss"; createCollectionRetry(testCollectionName, 1, 2, 1); cloudClient.setDefaultCollection(testCollectionName); sendDoc(1);/*w w w . j av a 2 s. c om*/ List<Replica> notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive); assertTrue("Expected 1 replicas for collection " + testCollectionName + " but found " + notLeaders.size() + "; clusterState: " + printClusterStateInfo(testCollectionName), notLeaders.size() == 1); Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); String leaderNode = leader.getNodeName(); assertNotNull("Could not find leader for shard1 of " + testCollectionName + "; clusterState: " + printClusterStateInfo(testCollectionName), leader); JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader)); SolrInputDocument doc = new SolrInputDocument(); doc.addField(id, String.valueOf(2)); doc.addField("a_t", "hello" + 2); // cause leader migration by expiring the current leader's zk session chaosMonkey.expireSession(leaderJetty); String expectedNewLeaderCoreNodeName = notLeaders.get(0).getName(); long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); while (System.nanoTime() < timeout) { String currentLeaderName = null; try { Replica currentLeader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); currentLeaderName = currentLeader.getName(); } catch (Exception exc) { } if (expectedNewLeaderCoreNodeName.equals(currentLeaderName)) break; // new leader was elected after zk session expiration Thread.sleep(500); } Replica currentLeader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1"); assertEquals(expectedNewLeaderCoreNodeName, currentLeader.getName()); // TODO: This test logic seems to be timing dependent and fails on Jenkins // need to come up with a better approach log.info("Sending doc 2 to old leader " + leader.getName()); try (HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName)) { leaderSolr.add(doc); leaderSolr.close(); // if the add worked, then the doc must exist on the new leader try (HttpSolrClient newLeaderSolr = getHttpSolrClient(currentLeader, testCollectionName)) { assertDocExists(newLeaderSolr, testCollectionName, "2"); } } catch (SolrException exc) { // this is ok provided the doc doesn't exist on the current leader try (HttpSolrClient client = getHttpSolrClient(currentLeader, testCollectionName)) { client.add(doc); // this should work } } List<Replica> participatingReplicas = getActiveOrRecoveringReplicas(testCollectionName, "shard1"); Set<String> replicasToCheck = new HashSet<>(); for (Replica stillUp : participatingReplicas) replicasToCheck.add(stillUp.getName()); waitToSeeReplicasActive(testCollectionName, "shard1", replicasToCheck, 20); assertDocsExistInAllReplicas(participatingReplicas, testCollectionName, 1, 2); log.info("testLeaderZkSessionLoss succeeded ... deleting the " + testCollectionName + " collection"); // try to clean up try { CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete(); req.setCollectionName(testCollectionName); req.process(cloudClient); } catch (Exception e) { // don't fail the test log.warn("Could not delete collection {} after test completed", testCollectionName); } }
From source file:com.netflix.genie.web.services.impl.JobSpecificationServiceImpl.java
private List<Application> getApplications(final String id, final JobRequest jobRequest, final Command command) throws GenieException { final long start = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try {// ww w. j av a2s .c o m final String commandId = command.getId(); log.info("Selecting applications for job {} and command {}", id, commandId); // TODO: What do we do about application status? Should probably check here final List<Application> applications = Lists.newArrayList(); if (jobRequest.getCriteria().getApplicationIds().isEmpty()) { applications.addAll(this.commandPersistenceService.getApplicationsForCommand(commandId)); } else { for (final String applicationId : jobRequest.getCriteria().getApplicationIds()) { applications.add(this.applicationPersistenceService.getApplication(applicationId)); } } log.info("Selected applications {} for job {}", applications.stream().map(Application::getId) .reduce((one, two) -> one + "," + two).orElse(NO_ID_FOUND), id); MetricsUtils.addSuccessTags(tags); return applications; } catch (final Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(SELECT_APPLICATIONS_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.genie.core.services.impl.JobCoordinatorServiceImpl.java
/** * {@inheritDoc}//from w ww . ja v a 2 s . co m */ @Override public String coordinateJob( @Valid @NotNull(message = "No job request provided. Unable to execute.") final JobRequest jobRequest, @Valid @NotNull(message = "No job metadata provided. Unable to execute.") final JobMetadata jobMetadata) throws GenieException { final long coordinationStart = System.nanoTime(); final Map<String, String> tags = MetricsUtils.newSuccessTagsMap(); final String jobId = jobRequest.getId() .orElseThrow(() -> new GenieServerException("Id of the jobRequest cannot be null")); JobStatus jobStatus = JobStatus.FAILED; try { log.info("Called to schedule job launch for job {}", jobId); // create the job object in the database with status INIT final Job.Builder jobBuilder = new Job.Builder(jobRequest.getName(), jobRequest.getUser(), jobRequest.getVersion(), jobRequest.getCommandArgs()).withId(jobId) .withTags(jobRequest.getTags()).withStatus(JobStatus.INIT) .withStatusMsg("Job Accepted and in initialization phase."); jobRequest.getDescription().ifPresent(jobBuilder::withDescription); if (!jobRequest.isDisableLogArchival()) { jobBuilder.withArchiveLocation(this.jobsProperties.getLocations().getArchives() + JobConstants.FILE_PATH_DELIMITER + jobId + ".tar.gz"); } final JobExecution jobExecution = new JobExecution.Builder(this.hostName).withId(jobId).build(); // Log all the job initial job information this.jobPersistenceService.createJob(jobRequest, jobMetadata, jobBuilder.build(), jobExecution); this.jobStateService.init(jobId); //TODO: Combine the cluster and command selection into a single method/database query for efficiency // Resolve the cluster for the job request based on the tags specified final Cluster cluster = this.getCluster(jobRequest); // Resolve the command for the job request based on command tags and cluster chosen final Command command = this.getCommand(jobRequest, cluster); // Resolve the applications to use based on the command that was selected final List<Application> applications = this.getApplications(jobRequest, command); // Now that we have command how much memory should the job use? final int memory = jobRequest.getMemory() .orElse(command.getMemory().orElse(this.jobsProperties.getMemory().getDefaultJobMemory())); // Save all the runtime information this.setRuntimeEnvironment(jobId, cluster, command, applications, memory); final int maxJobMemory = this.jobsProperties.getMemory().getMaxJobMemory(); if (memory > maxJobMemory) { jobStatus = JobStatus.INVALID; throw new GeniePreconditionException("Requested " + memory + " MB to run job which is more than the " + maxJobMemory + " MB allowed"); } log.info("Checking if can run job {} from user {}", jobRequest.getId(), jobRequest.getUser()); final JobsUsersActiveLimitProperties activeLimit = this.jobsProperties.getUsers().getActiveLimit(); if (activeLimit.isEnabled()) { final long activeJobsLimit = activeLimit.getCount(); final long activeJobsCount = this.jobSearchService.getActiveJobCountForUser(jobRequest.getUser()); if (activeJobsCount >= activeJobsLimit) { throw GenieUserLimitExceededException.createForActiveJobsLimit(jobRequest.getUser(), activeJobsCount, activeJobsLimit); } } synchronized (this) { log.info("Checking if can run job {} on this node", jobRequest.getId()); final int maxSystemMemory = this.jobsProperties.getMemory().getMaxSystemMemory(); final int usedMemory = this.jobStateService.getUsedMemory(); if (usedMemory + memory <= maxSystemMemory) { log.info("Job {} can run on this node as only {}/{} MB are used and requested {} MB", jobId, usedMemory, maxSystemMemory, memory); // Tell the system a new job has been scheduled so any actions can be taken log.info("Publishing job scheduled event for job {}", jobId); this.jobStateService.schedule(jobId, jobRequest, cluster, command, applications, memory); return jobId; } else { throw new GenieServerUnavailableException("Job " + jobId + " can't run on this node " + usedMemory + "/" + maxSystemMemory + " MB are used and requested " + memory + " MB"); } } } catch (final GenieConflictException e) { MetricsUtils.addFailureTagsWithException(tags, e); // Job has not been initiated so we don't have to call JobStateService.done() throw e; } catch (final GenieException e) { MetricsUtils.addFailureTagsWithException(tags, e); // // Need to check if the job exists in the JobStateService // because this error can happen before the job is initiated. // if (this.jobStateService.jobExists(jobId)) { this.jobStateService.done(jobId); this.jobPersistenceService.updateJobStatus(jobId, jobStatus, e.getMessage()); } throw e; } catch (final Exception e) { MetricsUtils.addFailureTagsWithException(tags, e); // // Need to check if the job exists in the JobStateService // because this error can happen before the job is initiated. // if (this.jobStateService.jobExists(jobId)) { this.jobStateService.done(jobId); this.jobPersistenceService.updateJobStatus(jobId, jobStatus, e.getMessage()); } throw new GenieServerException("Failed to coordinate job launch", e); } catch (final Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(this.coordinationTimerId.withTags(tags)) .record(System.nanoTime() - coordinationStart, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.security.KubernetesV2Credentials.java
private <T> T runAndRecordMetrics(String action, List<KubernetesKind> kinds, String namespace, Supplier<T> op) { T result = null;//from ww w . j a v a2s. c o m Throwable failure = null; KubectlException apiException = null; long startTime = clock.monotonicTime(); try { result = op.get(); } catch (KubectlException e) { apiException = e; } catch (Exception e) { failure = e; } finally { Map<String, String> tags = new HashMap<>(); tags.put("action", action); if (kinds.size() == 1) { tags.put("kind", kinds.get(0).toString()); } else { tags.put("kinds", String.join(",", kinds.stream().map(KubernetesKind::toString).collect(Collectors.toList()))); } tags.put("account", accountName); tags.put("namespace", StringUtils.isEmpty(namespace) ? "none" : namespace); if (failure == null) { tags.put("success", "true"); } else { tags.put("success", "false"); tags.put("reason", failure.getClass().getSimpleName() + ": " + failure.getMessage()); } registry.timer(registry.createId("kubernetes.api", tags)).record(clock.monotonicTime() - startTime, TimeUnit.NANOSECONDS); if (failure != null) { throw new KubectlJobExecutor.KubectlException( "Failure running " + action + " on " + kinds + ": " + failure.getMessage(), failure); } else if (apiException != null) { throw apiException; } else { return result; } } }