Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.linkedin.pinot.core.data.manager.realtime.HLRealtimeSegmentDataManager.java

public HLRealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata,
        final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata,
        final RealtimeTableDataManager realtimeTableDataManager, final String resourceDataDir,
        final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception {
    super();//from  w w w  .ja v  a2 s. c  om
    _realtimeTableDataManager = realtimeTableDataManager;
    final String segmentVersionStr = tableConfig.getIndexingConfig().getSegmentFormatVersion();
    _segmentVersion = SegmentVersion.fromStringOrDefault(segmentVersionStr);
    this.schema = schema;
    this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema);
    this.serverMetrics = serverMetrics;
    this.segmentName = segmentMetadata.getSegmentName();
    this.tableName = tableConfig.getTableName();

    IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
    if (indexingConfig.getSortedColumn().isEmpty()) {
        LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}",
                segmentName);
        this.sortedColumn = null;
    } else {
        String firstSortedColumn = indexingConfig.getSortedColumn().get(0);
        if (this.schema.hasColumn(firstSortedColumn)) {
            LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}",
                    firstSortedColumn, segmentName);
            this.sortedColumn = firstSortedColumn;
        } else {
            LOGGER.warn(
                    "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.",
                    firstSortedColumn, segmentName);
            this.sortedColumn = null;
        }
    }
    //inverted index columns
    invertedIndexColumns = indexingConfig.getInvertedIndexColumns();
    if (sortedColumn != null && !invertedIndexColumns.contains(sortedColumn)) {
        invertedIndexColumns.add(sortedColumn);
    }
    this.segmentMetatdaZk = segmentMetadata;

    // create and init stream provider config
    // TODO : ideally resourceMetatda should create and give back a streamProviderConfig
    this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig();
    this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema);
    segmentLogger = LoggerFactory.getLogger(HLRealtimeSegmentDataManager.class.getName() + "_" + segmentName
            + "_" + kafkaStreamProviderConfig.getStreamName());
    segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}",
            sortedColumn, invertedIndexColumns);

    segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment();

    this.resourceDir = new File(resourceDataDir);
    this.resourceTmpDir = new File(resourceDataDir, "_tmp");
    if (!resourceTmpDir.exists()) {
        resourceTmpDir.mkdirs();
    }
    // create and init stream provider
    final String tableName = tableConfig.getTableName();
    this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider();
    this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics);
    this.kafkaStreamProvider.start();
    this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName();

    // lets create a new realtime segment
    segmentLogger.info("Started kafka stream provider");
    realtimeSegment = new RealtimeSegmentImpl(schema,
            kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName,
            segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics,
            invertedIndexColumns);
    realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema);
    notifier = realtimeTableDataManager;

    segmentStatusTask = new TimerTask() {
        @Override
        public void run() {
            computeKeepIndexing();
        }
    };

    // start the indexing thread
    indexingThread = new Thread(new Runnable() {
        @Override
        public void run() {
            // continue indexing until criteria is met
            boolean notFull = true;
            long exceptionSleepMillis = 50L;
            segmentLogger.info("Starting to collect rows");

            do {
                GenericRow row = null;
                try {
                    row = kafkaStreamProvider.next();

                    if (row != null) {
                        row = extractor.transform(row);
                        notFull = realtimeSegment.index(row);
                        exceptionSleepMillis = 50L;
                    }
                } catch (Exception e) {
                    segmentLogger.warn(
                            "Caught exception while indexing row, sleeping for {} ms, row contents {}",
                            exceptionSleepMillis, row, e);

                    // Sleep for a short time as to avoid filling the logs with exceptions too quickly
                    Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS);
                    exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2);
                } catch (Error e) {
                    segmentLogger.error("Caught error in indexing thread", e);
                    throw e;
                }
            } while (notFull && keepIndexing && (!isShuttingDown));

            if (isShuttingDown) {
                segmentLogger.info("Shutting down indexing thread!");
                return;
            }
            try {
                int numErrors, numConversions, numNulls, numNullCols;
                if ((numErrors = extractor.getTotalErrors()) > 0) {
                    serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS,
                            (long) numErrors);
                }
                Map<String, Integer> errorCount = extractor.getErrorCount();
                for (String column : errorCount.keySet()) {
                    if ((numErrors = errorCount.get(column)) > 0) {
                        segmentLogger.warn("Column {} had {} rows with errors", column, numErrors);
                    }
                }
                if ((numConversions = extractor.getTotalConversions()) > 0) {
                    serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS,
                            (long) numConversions);
                    segmentLogger.info("{} rows needed conversions ", numConversions);
                }
                if ((numNulls = extractor.getTotalNulls()) > 0) {
                    serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES,
                            (long) numNulls);
                    segmentLogger.info("{} rows had null columns", numNulls);
                }
                if ((numNullCols = extractor.getTotalNullCols()) > 0) {
                    serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES,
                            (long) numNullCols);
                    segmentLogger.info("{} columns had null values", numNullCols);
                }
                segmentLogger.info("Indexing threshold reached, proceeding with index conversion");
                // kill the timer first
                segmentStatusTask.cancel();
                updateCurrentDocumentCountMetrics();
                segmentLogger.info("Indexed {} raw events, current number of docs = {}",
                        realtimeSegment.getRawDocumentCount(),
                        realtimeSegment.getSegmentMetadata().getTotalDocs());
                File tempSegmentFolder = new File(resourceTmpDir,
                        "tmp-" + String.valueOf(System.currentTimeMillis()));

                // lets convert the segment now
                RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment,
                        tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(),
                        segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns);

                segmentLogger.info("Trying to build segment");
                final long buildStartTime = System.nanoTime();
                converter.build(_segmentVersion);
                final long buildEndTime = System.nanoTime();
                segmentLogger.info("Built segment in {} ms",
                        TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS));
                File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName());
                FileUtils.deleteQuietly(destDir);
                FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir);

                FileUtils.deleteQuietly(tempSegmentFolder);
                long segStartTime = realtimeSegment.getMinTime();
                long segEndTime = realtimeSegment.getMaxTime();

                TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType();
                IndexSegment segment = Loaders.IndexSegment.load(
                        new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode,
                        realtimeTableDataManager.getIndexLoadingConfigMetadata());

                segmentLogger.info("Committing Kafka offsets");
                boolean commitSuccessful = false;
                try {
                    kafkaStreamProvider.commit();
                    commitSuccessful = true;
                    kafkaStreamProvider.shutdown();
                    segmentLogger.info("Successfully committed Kafka offsets, consumer release requested.");
                } catch (Throwable e) {
                    // If we got here, it means that either the commit or the shutdown failed. Considering that the
                    // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this
                    // likely means that writing the Kafka offsets failed.
                    //
                    // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die
                    // in a terrible way, leaving the consumer open and causing us to only get half the records from that point
                    // on. In this case, because we keep the consumer open for a little while, we should be okay if the
                    // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that
                    // we get to committing the offsets, the transient ZK failure that caused the write to fail will not
                    // happen again and everything will be good.
                    //
                    // Several things can happen:
                    // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will
                    //   keep the consumer open for about a minute, which should be enough time for the controller to reassign
                    //   us a new segment) and the next time we close the segment the offsets commit successfully; we're good.
                    // - The controller reassigns us a new segment, but after we released the consumer (if the controller was
                    //   down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever
                    //   data was in this segment. Not good.
                    // - The server crashes after this comment and before we mark the current segment as done; if the Kafka
                    //   offsets didn't get written, then when the server restarts it'll start consuming the current segment
                    //   from the previously committed offsets; we're good.
                    // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as
                    //   done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment
                    //   if we restart the server (not good). If we manually mark the segment as done in Helix by editing the
                    //   state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets.
                    //
                    // This is still better than the previous logic, which would have these failure modes:
                    // - Consumer was left open and the controller reassigned us a new segment; consume only half the events
                    //   (because there are two consumers and Kafka will try to rebalance partitions between those two)
                    // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that
                    //   we got in this segment again, as we're starting consumption from the previously committed offset (eg.
                    //   duplicate data).
                    //
                    // This is still not very satisfactory, which is why this part is due for a redesign.
                    //
                    // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine
                    // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to
                    // do. If you're not, then based on how critical it is to get those rows back, then your options are:
                    // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't
                    //   see query results from partially consumed data, then re-enable it when this replica has caught up)
                    // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data
                    //   soon anyway)
                    // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this
                    //   replica, assign a new consumer group id to this replica, rename the copied segments and edit their
                    //   metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto
                    //   the new consumer group id and then restart both replicas. This should get you the missing rows.

                    segmentLogger.error(
                            "FATAL: Exception committing or shutting down consumer commitSuccessful={}",
                            commitSuccessful, e);
                    serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS,
                            1L);
                    if (!commitSuccessful) {
                        kafkaStreamProvider.shutdown();
                    }
                }

                try {
                    segmentLogger.info("Marking current segment as completed in Helix");
                    RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata();
                    metadataToOverwrite.setTableName(segmentMetadata.getTableName());
                    metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName());
                    metadataToOverwrite.setSegmentType(SegmentType.OFFLINE);
                    metadataToOverwrite.setStatus(Status.DONE);
                    metadataToOverwrite.setStartTime(segStartTime);
                    metadataToOverwrite.setEndTime(segEndTime);
                    metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs());
                    metadataToOverwrite.setTimeUnit(timeUnit);
                    notifier.notifySegmentCommitted(metadataToOverwrite, segment);
                    segmentLogger.info(
                            "Completed write of segment completion to Helix, waiting for controller to assign a new segment");
                } catch (Exception e) {
                    if (commitSuccessful) {
                        segmentLogger.error(
                                "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.",
                                e);
                    } else {
                        segmentLogger.warn(
                                "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.",
                                e);
                    }
                }
            } catch (Exception e) {
                segmentLogger.error("Caught exception in the realtime indexing thread", e);
            }
        }
    });

    indexingThread.start();
    serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L);
    segmentLogger.debug("scheduling keepIndexing timer check");
    // start a schedule timer to keep track of the segment
    TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC);
    segmentLogger.info("finished scheduling keepIndexing timer check");
}

From source file:org.apache.drill.exec.store.http.HttpGroupScan.java

/**
 *
 * @param incomingEndpoints/*w  w  w.j  a v  a2 s. c  om*/
 */
@Override
public void applyAssignments(List<DrillbitEndpoint> incomingEndpoints) {
    watch.reset();
    watch.start();

    final int numSlots = incomingEndpoints.size();
    logger.info("incomingEndpoints size: " + numSlots);
    logger.info("incomingEndpoints: " + incomingEndpoints.toString());

    Preconditions.checkArgument(numSlots <= httpWorks.size(), String.format(
            "Incoming endpoints %d is greater than number of scan regions %d", numSlots, httpWorks.size()));

    /*
     * Minimum/Maximum number of assignment per slot
     */
    final int minPerEndpointSlot = (int) Math.floor((double) httpWorks.size() / numSlots);
    final int maxPerEndpointSlot = (int) Math.ceil((double) httpWorks.size() / numSlots);

    endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);

    Boolean executeLimitFlg = calcExecuteLimitFlg();
    List<String> orderByCols = httpScanSpec.generateOrderByCols();

    for (int i = 0; i < httpWorks.size(); i++) {

        int slotIndex = i % numSlots;
        HttpWork work = httpWorks.get(i);

        List<HttpSubScanSpec> endpointSlotScanList = endpointFragmentMapping.get(slotIndex);
        if (endpointSlotScanList == null) {
            endpointSlotScanList = new ArrayList<HttpSubScanSpec>(maxPerEndpointSlot);
        }

        //TODO
        HttpSubScanSpec tmpScanSpec = new HttpSubScanSpec(work.getDbName(), httpScanSpec.getTableName(),
                storagePluginConfig.getConnection(), storagePluginConfig.getResultKey(),
                work.getPartitionKeyStart(), work.getPartitionKeyEnd(), httpScanSpec.getFilterArgs(),
                httpScanSpec.getGroupByCols(), orderByCols,
                executeLimitFlg ? httpScanSpec.getLimitValue() : null, this.columns);

        endpointSlotScanList.add(tmpScanSpec);
        endpointFragmentMapping.put(slotIndex, endpointSlotScanList);
        logger.info("endpointSlotScanList: " + endpointSlotScanList);
    }

    logger.info("applyAssignments endpointFragmentMapping: " + endpointFragmentMapping);

    logger.debug("Built assignment map in {} s.\nEndpoints: {}.\nAssignment Map: {}",
            watch.elapsed(TimeUnit.NANOSECONDS) / 1000, incomingEndpoints, endpointFragmentMapping.toString());
}

From source file:org.apache.nifi.processors.aws.wag.InvokeAWSGatewayApi.java

@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    ComponentLog logger = getLogger();/*from   w w w.  j  a  v  a 2  s . c o  m*/
    FlowFile requestFlowFile = session.get();

    // Checking to see if the property to put the body of the response in an attribute was set
    boolean putToAttribute = context.getProperty(PROP_PUT_OUTPUT_IN_ATTRIBUTE).isSet();
    if (requestFlowFile == null) {
        String request = context.getProperty(PROP_METHOD).evaluateAttributeExpressions().getValue()
                .toUpperCase();
        if ("POST".equals(request) || "PUT".equals(request) || "PATCH".equals(request)) {
            return;
        } else if (putToAttribute) {
            requestFlowFile = session.create();
        }
    }

    // Every request/response cycle has a unique transaction id which will be stored as a flowfile attribute.
    final UUID txId = UUID.randomUUID();
    FlowFile responseFlowFile = null;

    try {
        final int maxAttributeSize = context.getProperty(PROP_PUT_ATTRIBUTE_MAX_LENGTH).asInteger();

        final String resourceName = context.getProperty(PROP_RESOURCE_NAME).getValue();

        final GenericApiGatewayClient client = getClient();

        final GenericApiGatewayRequest request = configureRequest(context, session, resourceName,
                requestFlowFile);

        logRequest(logger, client.getEndpoint(), request);
        final long startNanos = System.nanoTime();
        GenericApiGatewayResponse response = null;
        GenericApiGatewayException exception = null;
        try {
            response = client.execute(request);
            logResponse(logger, response);
        } catch (GenericApiGatewayException gag) {
            // ERROR response codes may come back as exceptions, 404 for example
            exception = gag;
        }

        final int statusCode;
        if (exception != null) {
            statusCode = exception.getStatusCode();
        } else {
            statusCode = response.getHttpResponse().getStatusCode();
        }

        if (statusCode == 0) {
            throw new IllegalStateException("Status code unknown, connection hasn't been attempted.");
        }
        final String endpoint = context.getProperty(PROP_AWS_GATEWAY_API_ENDPOINT).getValue();
        boolean outputRegardless = context.getProperty(PROP_OUTPUT_RESPONSE_REGARDLESS).asBoolean();

        boolean outputBodyToResponseContent = (isSuccess(statusCode) && !putToAttribute || outputRegardless);
        boolean outputBodyToRequestAttribute = (!isSuccess(statusCode) || putToAttribute)
                && requestFlowFile != null;
        boolean bodyExists = response != null && response.getBody() != null;

        final String statusExplanation;
        if (exception != null) {
            statusExplanation = EnglishReasonPhraseCatalog.INSTANCE.getReason(statusCode, null);
        } else {
            statusExplanation = response.getHttpResponse().getStatusText();
        }

        // Create a map of the status attributes that are always written to the request and response FlowFiles
        final Map<String, String> statusAttributes = new HashMap<>();
        statusAttributes.put(STATUS_CODE, String.valueOf(statusCode));
        statusAttributes.put(STATUS_MESSAGE, statusExplanation);
        statusAttributes.put(ENDPOINT_ATTR, client.getEndpointPrefix());
        statusAttributes.put(RESOURCE_NAME_ATTR, resourceName);
        statusAttributes.put(TRANSACTION_ID, txId.toString());

        if (outputBodyToResponseContent) {
            /*
             * If successful and putting to response flowfile, store the response body as the flowfile payload
             * we include additional flowfile attributes including the response headers and the status codes.
             */

            // clone the flowfile to capture the response
            if (requestFlowFile != null) {
                responseFlowFile = session.create(requestFlowFile);
                // write attributes to request flowfile
                requestFlowFile = session.putAllAttributes(requestFlowFile, statusAttributes);
                // If the property to add the response headers to the request flowfile is true then add them
                if (context.getProperty(PROP_ADD_HEADERS_TO_REQUEST).asBoolean()) {
                    // write the response headers as attributes
                    // this will overwrite any existing flowfile attributes
                    requestFlowFile = session.putAllAttributes(requestFlowFile,
                            convertAttributesFromHeaders(response));
                }
            } else {
                responseFlowFile = session.create();
            }

            // write attributes to response flowfile
            responseFlowFile = session.putAllAttributes(responseFlowFile, statusAttributes);

            // write the response headers as attributes
            // this will overwrite any existing flowfile attributes
            if (response != null) {
                responseFlowFile = session.putAllAttributes(responseFlowFile,
                        convertAttributesFromHeaders(response));
            } else {
                responseFlowFile = session.putAllAttributes(responseFlowFile, exception.getHttpHeaders());
            }
            // transfer the message body to the payload
            // can potentially be null in edge cases
            if (bodyExists) {
                final String contentType = response.getHttpResponse().getHeaders().get("Content-Type");
                if (!(contentType == null) && !contentType.trim().isEmpty()) {
                    responseFlowFile = session.putAttribute(responseFlowFile, CoreAttributes.MIME_TYPE.key(),
                            contentType.trim());
                }

                responseFlowFile = session.importFrom(new ByteArrayInputStream(response.getBody().getBytes()),
                        responseFlowFile);

                // emit provenance event
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (requestFlowFile != null) {
                    session.getProvenanceReporter().fetch(responseFlowFile, endpoint, millis);
                } else {
                    session.getProvenanceReporter().receive(responseFlowFile, endpoint, millis);
                }
            } else if (exception != null) {
                final String contentType = "application/json";
                responseFlowFile = session.putAttribute(responseFlowFile, CoreAttributes.MIME_TYPE.key(),
                        contentType.trim());

                responseFlowFile = session.importFrom(new ByteArrayInputStream(exception.getRawResponse()),
                        responseFlowFile);

                // emit provenance event
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (requestFlowFile != null) {
                    session.getProvenanceReporter().fetch(responseFlowFile, endpoint, millis);
                } else {
                    session.getProvenanceReporter().receive(responseFlowFile, endpoint, millis);
                }
            }
        }
        // if not successful and request flowfile is not null, store the response body into a flowfile attribute
        if (outputBodyToRequestAttribute) {
            String attributeKey = context.getProperty(PROP_PUT_OUTPUT_IN_ATTRIBUTE)
                    .evaluateAttributeExpressions(requestFlowFile).getValue();
            if (attributeKey == null) {
                attributeKey = RESPONSE_BODY;
            }
            byte[] outputBuffer;
            int size = 0;
            outputBuffer = new byte[maxAttributeSize];
            if (bodyExists) {
                size = StreamUtils.fillBuffer(new ByteArrayInputStream(response.getBody().getBytes()),
                        outputBuffer, false);
            } else if (exception != null && exception.getRawResponse() != null
                    && exception.getRawResponse().length > 0) {
                size = StreamUtils.fillBuffer(new ByteArrayInputStream(exception.getRawResponse()),
                        outputBuffer, false);
            }

            if (size > 0) {
                String bodyString = new String(outputBuffer, 0, size, "UTF-8");
                requestFlowFile = session.putAttribute(requestFlowFile, attributeKey, bodyString);
            }

            requestFlowFile = session.putAllAttributes(requestFlowFile, statusAttributes);

            final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
            session.getProvenanceReporter().modifyAttributes(requestFlowFile,
                    "The " + attributeKey + " has been added. The value of which is the body of a http call to "
                            + endpoint + resourceName + ". It took " + millis + "millis,");
        }

        route(requestFlowFile, responseFlowFile, session, context, statusCode, getRelationships());
    } catch (Exception e) {
        // penalize or yield
        if (requestFlowFile != null) {
            logger.error("Routing to {} due to exception: {}", new Object[] { REL_FAILURE.getName(), e }, e);
            requestFlowFile = session.penalize(requestFlowFile);
            requestFlowFile = session.putAttribute(requestFlowFile, EXCEPTION_CLASS, e.getClass().getName());
            requestFlowFile = session.putAttribute(requestFlowFile, EXCEPTION_MESSAGE, e.getMessage());
            // transfer original to failure
            session.transfer(requestFlowFile, getRelationshipForName(REL_FAILURE_NAME, getRelationships()));
        } else {
            logger.error("Yielding processor due to exception encountered as a source processor: {}", e);
            context.yield();
        }

        // cleanup response flowfile, if applicable
        try {
            if (responseFlowFile != null) {
                session.remove(responseFlowFile);
            }
        } catch (final Exception e1) {
            logger.error("Could not cleanup response flowfile due to exception: {}", new Object[] { e1 }, e1);
        }
    }
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

@Override
public ByteBuf getEntry(long ledgerId, long entryId) throws IOException {
    long offset;/*from  ww w .j a va2 s .c o  m*/
    /*
     * If entryId is BookieProtocol.LAST_ADD_CONFIRMED, then return the last written.
     */
    if (entryId == BookieProtocol.LAST_ADD_CONFIRMED) {
        entryId = ledgerCache.getLastEntry(ledgerId);
    }

    // Get Offset
    long startTimeNanos = MathUtils.nowInNano();
    boolean success = false;
    try {
        offset = ledgerCache.getEntryOffset(ledgerId, entryId);
        if (offset == 0) {
            throw new Bookie.NoEntryException(ledgerId, entryId);
        }
        success = true;
    } finally {
        if (success) {
            getOffsetStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos),
                    TimeUnit.NANOSECONDS);
        } else {
            getOffsetStats.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
        }
    }
    // Get Entry
    startTimeNanos = MathUtils.nowInNano();
    success = false;
    try {
        ByteBuf retBytes = entryLogger.readEntry(ledgerId, entryId, offset);
        success = true;
        return retBytes;
    } finally {
        if (success) {
            getEntryStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
        } else {
            getEntryStats.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
        }
    }
}

From source file:co.paralleluniverse.galaxy.core.BackupImpl.java

private BACKUP_PACKET flush1() {
    currentBackupsLock.lock();/*from  w ww . java 2s .  c  o m*/
    try {
        if (lastSent == null) {
            shouldFlush = false;
            this.lastFlush = System.nanoTime();
            if (currentBackups.isEmpty())
                return null;
            final BACKUP_PACKET packet;
            packet = Message.BACKUP_PACKET(nextId, currentBackups.values());
            nextId++;
            lastSent = packet;
            currentBackups.clear();
            return packet;
        } else { // last backup not yet acked
            LOG.debug("Last backup not acked. Not sending.");
            final long passedMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - lastFlush,
                    TimeUnit.NANOSECONDS);
            if (passedMillis > 2000)
                LOG.warn("SLAVE HAS NOT ACKED IN {} MILLISECONDS. SOMETHING IS SERIOUSLY WRONG!", passedMillis);
            shouldFlush = true;
            return null;
        }
    } finally {
        currentBackupsLock.unlock();
    }
}

From source file:org.apache.drill.exec.store.hive.HiveMetadataProvider.java

/**
 * Gets list of input splits based on table location.
 * These input splits are grouped logically by file name
 * if skip header / footer logic should be applied later on.
 *
 * @param properties table or partition properties
 * @param sd storage descriptor//w w w .ja  va 2  s . c o m
 * @param partition hive partition
 * @return list of logically grouped input splits
 */
private List<LogicalInputSplit> splitInputWithUGI(final Properties properties, final StorageDescriptor sd,
        final Partition partition) {
    watch.start();
    try {
        return ugi.doAs(new PrivilegedExceptionAction<List<LogicalInputSplit>>() {
            public List<LogicalInputSplit> run() throws Exception {
                final List<LogicalInputSplit> splits = Lists.newArrayList();
                final JobConf job = new JobConf(hiveConf);
                HiveUtilities.addConfToJob(job, properties);
                HiveUtilities.verifyAndAddTransactionalProperties(job, sd);
                job.setInputFormat(HiveUtilities.getInputFormatClass(job, sd, hiveReadEntry.getTable()));
                final Path path = new Path(sd.getLocation());
                final FileSystem fs = path.getFileSystem(job);
                if (fs.exists(path)) {
                    FileInputFormat.addInputPath(job, path);
                    final InputFormat<?, ?> format = job.getInputFormat();
                    InputSplit[] inputSplits = format.getSplits(job, 1);

                    // if current table with text input format and has header / footer,
                    // we need to make sure that splits of the same file are grouped together
                    if (TextInputFormat.class.getCanonicalName().equals(sd.getInputFormat())
                            && HiveUtilities.hasHeaderOrFooter(hiveReadEntry.getTable())) {
                        Multimap<Path, FileSplit> inputSplitMultimap = transformFileSplits(inputSplits);
                        for (Collection<FileSplit> logicalInputSplit : inputSplitMultimap.asMap().values()) {
                            splits.add(new LogicalInputSplit(logicalInputSplit, partition));
                        }
                    } else {
                        for (final InputSplit split : inputSplits) {
                            splits.add(new LogicalInputSplit(split, partition));
                        }
                    }
                }
                return splits;
            }
        });
    } catch (final InterruptedException | IOException e) {
        final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
        logger.error(errMsg, e);
        throw new DrillRuntimeException(errMsg, e);
    } finally {
        logger.trace("Took {} s to get splits from {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000,
                sd.getLocation());
        watch.stop();
    }
}

From source file:com.netflix.genie.web.services.impl.JobCoordinatorServiceImpl.java

/**
 * {@inheritDoc}/*from ww w.ja  va2 s.  c o  m*/
 */
@Override
public String coordinateJob(
        @Valid @NotNull(message = "No job request provided. Unable to execute.") final JobRequest jobRequest,
        @Valid @NotNull(message = "No job metadata provided. Unable to execute.") final JobMetadata jobMetadata)
        throws GenieException {
    final long coordinationStart = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    final String jobId = jobRequest.getId()
            .orElseThrow(() -> new GenieServerException("Id of the jobRequest cannot be null"));
    JobStatus jobStatus = JobStatus.FAILED;
    try {
        log.info("Called to schedule job launch for job {}", jobId);
        // create the job object in the database with status INIT
        final Job.Builder jobBuilder = new Job.Builder(jobRequest.getName(), jobRequest.getUser(),
                jobRequest.getVersion()).withId(jobId).withTags(jobRequest.getTags()).withStatus(JobStatus.INIT)
                        .withStatusMsg("Job Accepted and in initialization phase.");

        jobRequest.getCommandArgs().ifPresent(commandArgs -> jobBuilder.withCommandArgs(
                Lists.newArrayList(StringUtils.splitByWholeSeparator(commandArgs, StringUtils.SPACE))));
        jobRequest.getDescription().ifPresent(jobBuilder::withDescription);
        if (!jobRequest.isDisableLogArchival()) {
            jobBuilder.withArchiveLocation(this.jobsProperties.getLocations().getArchives()
                    + JobConstants.FILE_PATH_DELIMITER + jobId + ".tar.gz");
        }

        final JobExecution jobExecution = new JobExecution.Builder(this.hostname).withId(jobId).build();

        // Log all the job initial job information
        this.jobPersistenceService.createJob(jobRequest, jobMetadata, jobBuilder.build(), jobExecution);
        this.jobStateService.init(jobId);
        log.info("Finding possible clusters and commands for job {}", jobRequest.getId().orElse(NO_ID_FOUND));
        final JobSpecification jobSpecification;
        try {
            jobSpecification = this.specificationService.resolveJobSpecification(jobId,
                    DtoConverters.toV4JobRequest(jobRequest));
        } catch (final RuntimeException re) {
            //TODO: Here for now as we figure out what to do with exceptions for JobSpecificationServiceImpl
            throw new GeniePreconditionException(re.getMessage(), re);
        }
        final Cluster cluster = this.clusterPersistenceService
                .getCluster(jobSpecification.getCluster().getId());
        final Command command = this.commandPersistenceService
                .getCommand(jobSpecification.getCommand().getId());

        // Now that we have command how much memory should the job use?
        final int memory = jobRequest.getMemory()
                .orElse(command.getMemory().orElse(this.jobsProperties.getMemory().getDefaultJobMemory()));

        final ImmutableList.Builder<Application> applicationsBuilder = ImmutableList.builder();
        for (final JobSpecification.ExecutionResource applicationResource : jobSpecification
                .getApplications()) {
            applicationsBuilder
                    .add(this.applicationPersistenceService.getApplication(applicationResource.getId()));
        }
        final ImmutableList<Application> applications = applicationsBuilder.build();

        // Save all the runtime information
        this.setRuntimeEnvironment(jobId, cluster, command, applications, memory);

        final int maxJobMemory = this.jobsProperties.getMemory().getMaxJobMemory();
        if (memory > maxJobMemory) {
            jobStatus = JobStatus.INVALID;
            throw new GeniePreconditionException("Requested " + memory
                    + " MB to run job which is more than the " + maxJobMemory + " MB allowed");
        }

        log.info("Checking if can run job {} from user {}", jobRequest.getId(), jobRequest.getUser());
        final JobsUsersActiveLimitProperties activeLimit = this.jobsProperties.getUsers().getActiveLimit();
        if (activeLimit.isEnabled()) {
            final long activeJobsLimit = activeLimit.getCount();
            final long activeJobsCount = this.jobSearchService.getActiveJobCountForUser(jobRequest.getUser());
            if (activeJobsCount >= activeJobsLimit) {
                throw GenieUserLimitExceededException.createForActiveJobsLimit(jobRequest.getUser(),
                        activeJobsCount, activeJobsLimit);
            }
        }

        synchronized (this) {
            log.info("Checking if can run job {} on this node", jobRequest.getId());
            final int maxSystemMemory = this.jobsProperties.getMemory().getMaxSystemMemory();
            final int usedMemory = this.jobStateService.getUsedMemory();
            if (usedMemory + memory <= maxSystemMemory) {
                log.info("Job {} can run on this node as only {}/{} MB are used and requested {} MB", jobId,
                        usedMemory, maxSystemMemory, memory);
                // Tell the system a new job has been scheduled so any actions can be taken
                log.info("Publishing job scheduled event for job {}", jobId);
                this.jobStateService.schedule(jobId, jobRequest, cluster, command, applications, memory);
                MetricsUtils.addSuccessTags(tags);
                return jobId;
            } else {
                throw new GenieServerUnavailableException("Job " + jobId + " can't run on this node "
                        + usedMemory + "/" + maxSystemMemory + " MB are used and requested " + memory + " MB");
            }
        }
    } catch (final GenieConflictException e) {
        MetricsUtils.addFailureTagsWithException(tags, e);
        // Job has not been initiated so we don't have to call JobStateService.done()
        throw e;
    } catch (final GenieException e) {
        MetricsUtils.addFailureTagsWithException(tags, e);
        //
        // Need to check if the job exists in the JobStateService
        // because this error can happen before the job is initiated.
        //
        if (this.jobStateService.jobExists(jobId)) {
            this.jobStateService.done(jobId);
            this.jobPersistenceService.updateJobStatus(jobId, jobStatus, e.getMessage());
        }
        throw e;
    } catch (final Exception e) {
        MetricsUtils.addFailureTagsWithException(tags, e);
        //
        // Need to check if the job exists in the JobStateService
        // because this error can happen before the job is initiated.
        //
        if (this.jobStateService.jobExists(jobId)) {
            this.jobStateService.done(jobId);
            this.jobPersistenceService.updateJobStatus(jobId, jobStatus, e.getMessage());
        }
        throw new GenieServerException("Failed to coordinate job launch", e);
    } catch (final Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(OVERALL_COORDINATION_TIMER_NAME, tags).record(System.nanoTime() - coordinationStart,
                TimeUnit.NANOSECONDS);
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.java

private long elapsedMs() {
    return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - nextCallStartNs);
}

From source file:org.agatom.springatom.cmp.wizards.core.AbstractWizardProcessor.java

@SuppressWarnings("UnusedAssignment")
private void doValidate(final WizardResult result, final DataBinder binder, String step,
        final Map<String, Object> formData, final Locale locale) throws Exception {

    final Object target = binder.getTarget();

    if (!StringUtils.hasText(step)) {
        // Wizard submission, because step is null
        step = this.stepHelperDelegate.getLastStep();
        if (!this.stepHelperDelegate.isValidationEnabled(step)) {
            // reset the step again
            step = null;//from   w w  w .j  a  v a2s.co m
        }
    }

    try {
        final BindingResult bindingResult = binder.getBindingResult();
        boolean alreadyValidate = false;

        if (this.localValidator != null) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(String.format("Validating via Validator instance=%s", this.localValidator));
            }
            this.validationService.validate(this.localValidator, bindingResult, result);
            alreadyValidate = true;
        }

        if (!alreadyValidate) {
            final ValidationBean bean = new ValidationBean();

            bean.setPartialResult(result);
            bean.setStepId(step);
            bean.setCommandBean(bindingResult.getTarget());
            bean.setCommandBeanName(this.getContextObjectName());
            bean.setFormData(formData);
            bean.setBindingModel(formData);

            if (this.validationService.canValidate(bean)) {
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug(
                            String.format("Validating via validation service for validationBean=%s", bean));
                }
                this.validationService.validate(bean);
                alreadyValidate = true;
            }
        }

        /* Will validate only if not yet validated it is not step submission and wizard is allowed to validate
         * This a last opportunity to validate however unlike validation via
         * - localValidator
         * - validationService
         * this validation will be run only if
         * - not yet validated
         * - current (or last) step has validation flag set
         * - entire wizard has validation flag set
         */
        if (!alreadyValidate && this.isValidationEnabledForStep(step) && this.isValidationEnabled()) {
            LOGGER.debug(String.format(
                    "Not yet validated (tried localValidator and via validationService), assuming that is wizard submission due to step===null, validating through binder"));
            final Validator validator = binder.getValidator();
            if (validator != null) {

                final long startTime = System.nanoTime();
                validator.validate(target, bindingResult);
                final long endTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);

                result.addDebugData(WizardDebugDataKeys.VALIDATION_TIME, endTime);
                result.addDebugData(WizardDebugDataKeys.VALIDATOR,
                        ClassUtils.getShortName(validator.getClass()));

            }
        }

        if (LOGGER.isDebugEnabled()) {
            final Set<Message> messages = result.getValidationMessages();
            final short count = (short) (messages == null ? 0 : messages.size());
            LOGGER.debug(String.format("Validation completed, found %d validation errors", count));
        }
    } catch (Exception exp) {
        // Catch any validation exception and add it as an error
        LOGGER.error("Validation failed either via [localValidator,validationService,binder#validator", exp);
        result.addError(exp);
        result.addFeedbackMessage(FeedbackMessage.newError()
                .setTitle(this.messageSource.getMessage("sa.wiz.validationError.title", locale))
                .setMessage(this.messageSource.getMessage("sa.wiz.validationError.msg",
                        new Object[] { target.toString() }, locale)));
    }

}

From source file:com.linkedin.pinot.controller.helix.SegmentStatusChecker.java

/**
 * Runs a segment status pass over the currently loaded tables.
 *///from  w w w.  j a  v  a2s  .co m
public void runSegmentMetrics() {
    if (!_pinotHelixResourceManager.isLeader()) {
        LOGGER.info("Skipping Segment Status check, not leader!");
        setStatusToDefault();
        stop();
        return;
    }

    long startTime = System.nanoTime();

    LOGGER.info("Starting Segment Status check for metrics");

    // Fetch the list of tables
    List<String> allTableNames = _pinotHelixResourceManager.getAllTables();
    String helixClusterName = _pinotHelixResourceManager.getHelixClusterName();
    HelixAdmin helixAdmin = _pinotHelixResourceManager.getHelixAdmin();
    int realTimeTableCount = 0;
    int offlineTableCount = 0;
    ZkHelixPropertyStore<ZNRecord> propertyStore = _pinotHelixResourceManager.getPropertyStore();

    for (String tableName : allTableNames) {
        if (TableNameBuilder.getTableTypeFromTableName(tableName) == TableType.OFFLINE) {
            offlineTableCount++;
        } else {
            realTimeTableCount++;
        }
        IdealState idealState = helixAdmin.getResourceIdealState(helixClusterName, tableName);
        if ((idealState == null) || (idealState.getPartitionSet().isEmpty())) {
            int nReplicasFromIdealState = 1;
            try {
                if (idealState != null) {
                    nReplicasFromIdealState = Integer.valueOf(idealState.getReplicas());
                }
            } catch (NumberFormatException e) {
                // Ignore
            }
            _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.NUMBER_OF_REPLICAS,
                    nReplicasFromIdealState);
            _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.PERCENT_OF_REPLICAS, 100);
            _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.PERCENT_SEGMENTS_AVAILABLE, 100);
            continue;
        }
        _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.IDEALSTATE_ZNODE_SIZE,
                idealState.toString().length());
        ExternalView externalView = helixAdmin.getResourceExternalView(helixClusterName, tableName);

        int nReplicasIdealMax = 0; // Keeps track of maximum number of replicas in ideal state
        int nReplicasExternal = -1; // Keeps track of minimum number of replicas in external view
        int nErrors = 0; // Keeps track of number of segments in error state
        int nOffline = 0; // Keeps track of number segments with no online replicas
        int nSegments = 0; // Counts number of segments
        for (String partitionName : idealState.getPartitionSet()) {
            int nReplicas = 0;
            int nIdeal = 0;
            nSegments++;
            // Skip segments not online in ideal state
            for (Map.Entry<String, String> serverAndState : idealState.getInstanceStateMap(partitionName)
                    .entrySet()) {
                if (serverAndState == null) {
                    break;
                }
                if (serverAndState.getValue().equals(ONLINE)) {
                    nIdeal++;
                    break;
                }
            }
            if (nIdeal == 0) {
                // No online segments in ideal state
                continue;
            }
            nReplicasIdealMax = (idealState.getInstanceStateMap(partitionName).size() > nReplicasIdealMax)
                    ? idealState.getInstanceStateMap(partitionName).size()
                    : nReplicasIdealMax;
            if ((externalView == null) || (externalView.getStateMap(partitionName) == null)) {
                // No replicas for this segment
                TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableName);
                if ((tableType != null) && (tableType.equals(TableType.OFFLINE))) {
                    OfflineSegmentZKMetadata segmentZKMetadata = ZKMetadataProvider
                            .getOfflineSegmentZKMetadata(propertyStore, tableName, partitionName);
                    if (segmentZKMetadata != null && segmentZKMetadata
                            .getPushTime() > System.currentTimeMillis() - _waitForPushTimeSeconds * 1000) {
                        // push not yet finished, skip
                        continue;
                    }
                }
                nOffline++;
                if (nOffline < MaxOfflineSegmentsToLog) {
                    LOGGER.warn("Segment {} of table {} has no replicas", partitionName, tableName);
                }
                nReplicasExternal = 0;
                continue;
            }
            for (Map.Entry<String, String> serverAndState : externalView.getStateMap(partitionName)
                    .entrySet()) {
                // Count number of online replicas. Ignore if state is CONSUMING.
                // It is possible for a segment to be ONLINE in idealstate, and CONSUMING in EV for a short period of time.
                // So, ignore this combination. If a segment exists in this combination for a long time, we will get
                // kafka-partition-not-consuming alert anyway.
                if (serverAndState.getValue().equals(ONLINE) || serverAndState.getValue().equals(CONSUMING)) {
                    nReplicas++;
                }
                if (serverAndState.getValue().equals(ERROR)) {
                    nErrors++;
                }
            }
            if (nReplicas == 0) {
                if (nOffline < MaxOfflineSegmentsToLog) {
                    LOGGER.warn("Segment {} of table {} has no online replicas", partitionName, tableName);
                }
                nOffline++;
            }
            nReplicasExternal = ((nReplicasExternal > nReplicas) || (nReplicasExternal == -1)) ? nReplicas
                    : nReplicasExternal;
        }
        if (nReplicasExternal == -1) {
            nReplicasExternal = (nReplicasIdealMax == 0) ? 1 : 0;
        }
        // Synchronization provided by Controller Gauge to make sure that only one thread updates the gauge
        _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.NUMBER_OF_REPLICAS, nReplicasExternal);
        _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.PERCENT_OF_REPLICAS,
                (nReplicasIdealMax > 0) ? (nReplicasExternal * 100 / nReplicasIdealMax) : 100);
        _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.SEGMENTS_IN_ERROR_STATE, nErrors);
        _metricsRegistry.setValueOfTableGauge(tableName, ControllerGauge.PERCENT_SEGMENTS_AVAILABLE,
                (nSegments > 0) ? (100 - (nOffline * 100 / nSegments)) : 100);
        if (nOffline > 0) {
            LOGGER.warn("Table {} has {} segments with no online replicas", tableName, nOffline);
        }
        if (nReplicasExternal < nReplicasIdealMax) {
            LOGGER.warn("Table {} has {} replicas, below replication threshold :{}", tableName,
                    nReplicasExternal, nReplicasIdealMax);
        }
    }
    _metricsRegistry.setValueOfGlobalGauge(ControllerGauge.REALTIME_TABLE_COUNT, realTimeTableCount);
    _metricsRegistry.setValueOfGlobalGauge(ControllerGauge.OFFLINE_TABLE_COUNT, offlineTableCount);
    long totalNanos = System.nanoTime() - startTime;
    LOGGER.info("Segment status metrics completed in {}ms",
            TimeUnit.MILLISECONDS.convert(totalNanos, TimeUnit.NANOSECONDS));
}