List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:org.apache.hadoop.hdfs.qjournal.server.Journal.java
/** * Write a batch of edits to the journal. * {@see QJournalProtocol#journal(RequestInfo, long, long, int, byte[])} *///from ww w .ja v a 2 s . c om synchronized void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException { checkFormatted(); checkWriteRequest(reqInfo); checkSync(curSegment != null, "Can't write, no segment open"); if (curSegmentTxId != segmentTxId) { // Sanity check: it is possible that the writer will fail IPCs // on both the finalize() and then the start() of the next segment. // This could cause us to continue writing to an old segment // instead of rolling to a new one, which breaks one of the // invariants in the design. If it happens, abort the segment // and throw an exception. JournalOutOfSyncException e = new JournalOutOfSyncException( "Writer out of sync: it thinks it is writing segment " + segmentTxId + " but current segment is " + curSegmentTxId); abortCurSegment(); throw e; } checkSync(nextTxId == firstTxnId, "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId); long lastTxnId = firstTxnId + numTxns - 1; if (LOG.isTraceEnabled()) { LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId); } // If the edit has already been marked as committed, we know // it has been fsynced on a quorum of other nodes, and we are // "catching up" with the rest. Hence we do not need to fsync. boolean isLagging = lastTxnId <= committedTxnId.get(); boolean shouldFsync = !isLagging; curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); StopWatch sw = new StopWatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); long nanoSeconds = sw.now(); metrics.addSync(TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); long milliSeconds = TimeUnit.MILLISECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS); if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + " took " + milliSeconds + "ms"); } if (isLagging) { // This batch of edits has already been committed on a quorum of other // nodes. So, we are in "catch up" mode. This gets its own metric. metrics.batchesWrittenWhileLagging.incr(1); } metrics.batchesWritten.incr(1); metrics.bytesWritten.incr(records.length); metrics.txnsWritten.incr(numTxns); highestWrittenTxId = lastTxnId; nextTxId = lastTxnId + 1; }
From source file:com.netflix.genie.web.services.impl.JobCoordinatorServiceImpl.java
private void setRuntimeEnvironment(final String jobId, final Cluster cluster, final Command command, final List<Application> applications, final int memory) throws GenieException { final long jobEnvironmentStart = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try {//from w w w .j ava 2 s. c o m final String clusterId = cluster.getId(); final String commandId = command.getId(); this.jobPersistenceService.updateJobWithRuntimeEnvironment(jobId, clusterId, commandId, applications.stream().map(Application::getId).collect(Collectors.toList()), memory); MetricsUtils.addSuccessTags(tags); } catch (final Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(SET_JOB_ENVIRONMENT_TIMER_NAME, tags) .record(System.nanoTime() - jobEnvironmentStart, TimeUnit.NANOSECONDS); } }
From source file:net.nzcorp.hbase.tableevent_signaler.TableEventSignaler.java
private void publishMessage(String queueName, AMQP.BasicProperties headers, String message) throws IOException { long pmStart = System.nanoTime(); LOGGER.trace("Getting channel"); final Channel channel = getChannel(); try {/*from w ww .ja va 2 s . c o m*/ LOGGER.trace(String.format("Ensuring that queue: %s exists", queueName)); ensureQueue(channel, queueName); LOGGER.debug(String.format("Ensured channel in %d ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - pmStart))); LOGGER.trace(String.format("Sending message to queue: %s", queueName)); channel.basicPublish("", queueName, headers, message.getBytes()); // Channel seems to work. Use it again. LOGGER.debug(String.format("Sent message in %d ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - pmStart))); LOGGER.trace("Message sent, releasing channel"); releaseChannel(channel); LOGGER.debug(String.format("Released channel in %d ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - pmStart))); } catch (Throwable t) { // There was an error on the channel, throw it away. try { channel.close(); } catch (Exception e) { } LOGGER.error(String.format("Error sending message to channel: %s", queueName), t); throw t; } }
From source file:com.vmware.identity.idm.server.ServerUtils.java
private static ILdapConnectionEx getLdapConnection(URI uri, String userName, String password, AuthenticationType authType, boolean useGcPort, LdapCertificateValidationSettings certValidationsettings) throws Exception { ValidateUtil.validateNotNull(uri, "uri"); boolean isLdaps = uri.getScheme().compareToIgnoreCase(DirectoryStoreProtocol.LDAPS.getName()) == 0; List<LdapSetting> connOptions = null; List<LdapSetting> settings = new ArrayList<LdapSetting>(); settings.add(new LdapSetting(LdapOption.LDAP_OPT_PROTOCOL_VERSION, LdapConstants.LDAP_VERSION3)); settings.add(new LdapSetting(LdapOption.LDAP_OPT_REFERRALS, Boolean.FALSE)); settings.add(new LdapSetting(LdapOption.LDAP_OPT_NETWORK_TIMEOUT, DEFAULT_LDAP_NETWORK_TIMEOUT)); if (isLdaps) { //if is ldaps connection and certificate validation is enabled set the options for validation boolean isLdapsCertValidationEnabled = certValidationsettings != null && (certValidationsettings.isForceValidation() || IdmServerConfig.getInstance().isLdapsCertValidationEnabled() || !certValidationsettings.isLegacy()); if (isLdapsCertValidationEnabled) { ISslX509VerificationCallback certVerifierCallback = certValidationsettings .getCertVerificationCallback(uri); settings.add(new LdapSetting(LdapOption.LDAP_OPT_X_TLS_REQUIRE_CERT, LdapConstants.LDAP_OPT_X_TLS_DEMAND)); settings.add(/*from w w w . j a va 2 s. co m*/ new LdapSetting(LdapOption.LDAP_OPT_X_CLIENT_TRUSTED_FP_CALLBACK, certVerifierCallback)); int sslMinProtocol = certValidationsettings.isLegacy() ? LdapSSLProtocols.getDefaultLegacyMinProtocol().getCode() : LdapSSLProtocols.getDefaultMinProtocol().getCode(); settings.add(new LdapSetting(LdapOption.LDAP_OPT_X_TLS_PROTOCOL, sslMinProtocol)); } else { settings.add(new LdapSetting(LdapOption.LDAP_OPT_X_TLS_REQUIRE_CERT, LdapConstants.LDAP_OPT_X_TLS_NEVER)); } } // When doing GSSAPI authentication, LDAP SASL binding by default does reverse DNS lookup to validate the // target name, this causes authentication failures because Most DNS servers in AD do not have PTR records // registered for all DCs, any of which could be the binding target. if (!SystemUtils.IS_OS_WINDOWS && authType == AuthenticationType.USE_KERBEROS || authType == AuthenticationType.SRP) { settings.add(new LdapSetting(LdapOption.LDAP_OPT_X_SASL_NOCANON, LdapConstants.LDAP_OPT_ON)); } connOptions = Collections.unmodifiableList(settings); ILdapConnectionEx connection = null; // if No port# or the default port of 389 (ldap) or 636 (ldaps) is specified then useGcport takes effect; // otherwise, go with the explicit specified port# if (authType == AuthenticationType.SRP) { connection = (ILdapConnectionEx) LdapConnectionFactory.getInstance().getLdapConnection(uri, connOptions, true); } else if ((uri.getPort() == -1 || uri.getPort() == LdapConstants.LDAP_PORT || uri.getPort() == LdapConstants.LDAP_SSL_PORT) && useGcPort) { connection = LdapConnectionFactoryEx.getInstance().getLdapConnection(uri.getHost(), isLdaps ? LdapConstants.LDAP_SSL_GC_PORT : LdapConstants.LDAP_GC_PORT, connOptions); } else { connection = LdapConnectionFactoryEx.getInstance().getLdapConnection(uri, connOptions); } try { // All the client options are set, bind now long startTime = System.nanoTime(); if (AuthenticationType.SRP == authType) { ValidateUtil.validateNotEmpty(userName, "userName"); ValidateUtil.validateNotEmpty(password, "password"); ((ILdapConnectionExWithGetConnectionString) connection).bindSaslSrpConnection(userName, password); } else if (AuthenticationType.USE_KERBEROS == authType) { String userUPN = null; int idxSep = 0; if (!ServerUtils.isNullOrEmpty(userName)) { userUPN = ValidateUtil.normalizeIdsKrbUserName(userName); idxSep = userUPN.indexOf(ValidateUtil.UPN_SEPARATOR); } connection.bindSaslConnection( ServerUtils.isNullOrEmpty(userUPN) ? null : userUPN.substring(0, idxSep), ServerUtils.isNullOrEmpty(userUPN) ? null : userUPN.substring(idxSep + 1), password); } else if (AuthenticationType.PASSWORD == authType) { ValidateUtil.validateNotEmpty(userName, "userName"); ValidateUtil.validateNotEmpty(password, "password"); connection.bindConnection(userName, password, LdapBindMethod.LDAP_BIND_SIMPLE); } else { String errMsg = String.format("Unsupported authenticationType to bind connection: [%s, %s]", uri, userName); logger.warn(errMsg); throw new IllegalStateException(errMsg); } long delta = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); if (logger.isTraceEnabled()) { logger.trace(String.format("\tbinding connection took [%d] ms", delta)); } if (IdmServer.getPerfDataSinkInstance() != null) { IdmServer.getPerfDataSinkInstance().addMeasurement( new PerfBucketKey(PerfMeasurementPoint.LdapBindConnection, uri.toString()), delta); } } catch (Exception ex) { logger.warn(String.format("cannot bind connection: [%s, %s]", uri, userName)); if (connection != null) { connection.close(); } throw ex; } return connection; }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
/** * It unloads all owned namespacebundles gracefully. * <ul>//w ww. j a v a 2 s.c o m * <li>First it makes current broker unavailable and isolates from the clusters so, it will not serve any new * requests.</li> * <li>Second it starts unloading namespace bundle one by one without closing the connection in order to avoid * disruption for other namespacebundles which are sharing the same connection from the same client.</li> * <ul> * */ public void unloadNamespaceBundlesGracefully() { try { // make broker-node unavailable from the cluster if (pulsar.getLoadManager() != null) { pulsar.getLoadManager().disableBroker(); } // unload all namespace-bundles gracefully long closeTopicsStartTime = System.nanoTime(); Set<NamespaceBundle> serviceUnits = pulsar.getNamespaceService().getOwnedServiceUnits(); serviceUnits.forEach(su -> { if (su instanceof NamespaceBundle) { try { pulsar.getNamespaceService().unloadNamespaceBundle((NamespaceBundle) su); } catch (Exception e) { log.warn("Failed to unload namespace bundle {}", su, e); } } }); double closeTopicsTimeSeconds = TimeUnit.NANOSECONDS .toMillis((System.nanoTime() - closeTopicsStartTime)) / 1000.0; log.info("Unloading {} namespace-bundles completed in {} seconds", serviceUnits.size(), closeTopicsTimeSeconds); } catch (Exception e) { log.error("Failed to disable broker from loadbalancer list {}", e.getMessage(), e); } }
From source file:fr.xebia.management.statistics.ServiceStatistics.java
@ManagedAttribute(description = "Max acquisition duration fur the max active semaphore") public void setSemaphoreAcquisitionMaxTimeInMillis(long semaphoreAcquisitionMaxTimeInMillis) { this.maxActiveSemaphoreAcquisitionMaxTimeInNanos = TimeUnit.NANOSECONDS .convert(semaphoreAcquisitionMaxTimeInMillis, TimeUnit.MILLISECONDS); }
From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java
/** * Test the putFile method for valid s3 path. * * @throws GenieException If there is any problem */// w w w. java2 s . c o m @Test public void testPutFileMethodValidS3Path() throws GenieException { final PutObjectResult putObjectResult = Mockito.mock(PutObjectResult.class); Mockito.when(this.s3Client.putObject(Mockito.any(), Mockito.any(), Mockito.any(File.class))) .thenReturn(putObjectResult); final ArgumentCaptor<String> bucketArgument = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> keyArgument = ArgumentCaptor.forClass(String.class); s3FileTransfer.putFile(LOCAL_PATH, S3_PATH); Mockito.verify(this.s3Client).putObject(bucketArgument.capture(), keyArgument.capture(), Mockito.any(File.class)); Assert.assertEquals(S3_BUCKET, bucketArgument.getValue()); Assert.assertEquals(S3_KEY, keyArgument.getValue()); Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.uploadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture()); Assert.assertEquals(SUCCESS_TAGS, tagsCaptor.getValue()); }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImplTest.java
/** * Test the putFile method for valid s3 path. * * @throws GenieException If there is any problem *//*from w w w.ja va2s .c om*/ @Test public void testPutFileMethodValidS3Path() throws GenieException { final PutObjectResult putObjectResult = Mockito.mock(PutObjectResult.class); Mockito.when(this.s3Client.putObject(Mockito.any(), Mockito.any(), Mockito.any(File.class))) .thenReturn(putObjectResult); final ArgumentCaptor<String> bucketArgument = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> keyArgument = ArgumentCaptor.forClass(String.class); s3FileTransfer.putFile(LOCAL_PATH, S3_PATH); Mockito.verify(this.s3Client).putObject(bucketArgument.capture(), keyArgument.capture(), Mockito.any(File.class)); Assert.assertEquals(S3_BUCKET, bucketArgument.getValue()); Assert.assertEquals(S3_KEY, keyArgument.getValue()); Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.registry, Mockito.times(1)).timer(Mockito.eq(S3FileTransferImpl.UPLOAD_TIMER_NAME), this.tagsCaptor.capture()); Assert.assertEquals(SUCCESS_TAGS, this.tagsCaptor.getValue()); }
From source file:org.apache.htrace.impl.HTracedRESTReceiver.java
@Override public void receiveSpan(Span span) { boolean added = false; lock.lock();/*from ww w . ja v a 2 s. c o m*/ try { if (shutdown) { LOG.trace("receiveSpan(span=" + span + "): HTracedRESTReceiver " + "is already shut down."); return; } if (spans.size() < capacity) { spans.add(span); added = true; if (spans.size() >= maxToSendAtATime) { cond.signal(); } } else { cond.signal(); } } finally { lock.unlock(); } if (!added) { long now = TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS); long last = lastAtCapacityWarningLog.get(); if (now - last > WARN_TIMEOUT_MS) { // Only log every 5 minutes. Any more than this for a guest process // is obnoxious. if (lastAtCapacityWarningLog.compareAndSet(last, now)) { // If the atomic-compare-and-set succeeds, we should log. Otherwise, // we should assume another thread already logged and bumped up the // value of lastAtCapacityWarning sometime between our get and the // "if" statement. LOG.warn("There are too many HTrace spans to buffer! We have " + "already buffered " + capacity + " spans. Dropping spans."); } } } }
From source file:gobblin.compaction.mapreduce.MRCompactor.java
private JobRunnerExecutor createJobExecutor() { int threadPoolSize = getThreadPoolSize(); BlockingQueue<Runnable> queue = new PriorityBlockingQueue<Runnable>(); return new JobRunnerExecutor(threadPoolSize, threadPoolSize, Long.MAX_VALUE, TimeUnit.NANOSECONDS, queue); }