Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.nifi.processors.kite.ConvertAvroSchema.java

@Override
public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile incomingAvro = session.get();
    if (incomingAvro == null) {
        return;/*from  w w w . j av a  2s. c  om*/
    }

    String inputSchemaProperty = context.getProperty(INPUT_SCHEMA).evaluateAttributeExpressions(incomingAvro)
            .getValue();
    final Schema inputSchema;
    try {
        inputSchema = getSchema(inputSchemaProperty, DefaultConfiguration.get());
    } catch (SchemaNotFoundException e) {
        getLogger().error("Cannot find schema: " + inputSchemaProperty);
        session.transfer(incomingAvro, FAILURE);
        return;
    }
    String outputSchemaProperty = context.getProperty(OUTPUT_SCHEMA).evaluateAttributeExpressions(incomingAvro)
            .getValue();
    final Schema outputSchema;
    try {
        outputSchema = getSchema(outputSchemaProperty, DefaultConfiguration.get());
    } catch (SchemaNotFoundException e) {
        getLogger().error("Cannot find schema: " + outputSchemaProperty);
        session.transfer(incomingAvro, FAILURE);
        return;
    }
    final Map<String, String> fieldMapping = new HashMap<>();
    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
        if (entry.getKey().isDynamic()) {
            fieldMapping.put(entry.getKey().getName(), entry.getValue());
        }
    }
    // Set locale
    final String localeProperty = context.getProperty(LOCALE).getValue();
    final Locale locale = localeProperty.equals(DEFAULT_LOCALE_VALUE) ? Locale.getDefault()
            : LocaleUtils.toLocale(localeProperty);
    final AvroRecordConverter converter = new AvroRecordConverter(inputSchema, outputSchema, fieldMapping,
            locale);

    final DataFileWriter<Record> writer = new DataFileWriter<>(
            AvroUtil.newDatumWriter(outputSchema, Record.class));
    writer.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue()));

    final DataFileWriter<Record> failureWriter = new DataFileWriter<>(
            AvroUtil.newDatumWriter(outputSchema, Record.class));
    failureWriter.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue()));

    try {
        final AtomicLong written = new AtomicLong(0L);
        final FailureTracker failures = new FailureTracker();

        final List<Record> badRecords = Lists.newLinkedList();
        FlowFile incomingAvroCopy = session.clone(incomingAvro);
        FlowFile outgoingAvro = session.write(incomingAvro, new StreamCallback() {
            @Override
            public void process(InputStream in, OutputStream out) throws IOException {
                try (DataFileStream<Record> stream = new DataFileStream<Record>(in,
                        new GenericDatumReader<Record>(converter.getInputSchema()))) {
                    try (DataFileWriter<Record> w = writer.create(outputSchema, out)) {
                        for (Record record : stream) {
                            try {
                                Record converted = converter.convert(record);
                                w.append(converted);
                                written.incrementAndGet();
                            } catch (AvroConversionException e) {
                                failures.add(e);
                                getLogger().error("Error converting data: " + e.getMessage());
                                badRecords.add(record);
                            }
                        }
                    }
                }
            }
        });

        FlowFile badOutput = session.write(incomingAvroCopy, new StreamCallback() {
            @Override
            public void process(InputStream in, OutputStream out) throws IOException {

                try (DataFileWriter<Record> w = failureWriter.create(inputSchema, out)) {
                    for (Record record : badRecords) {
                        w.append(record);
                    }
                }

            }
        });

        long errors = failures.count();

        // update only if file transfer is successful
        session.adjustCounter("Converted records", written.get(), false);
        // update only if file transfer is successful
        session.adjustCounter("Conversion errors", errors, false);

        if (written.get() > 0L) {
            session.transfer(outgoingAvro, SUCCESS);
        } else {
            session.remove(outgoingAvro);

            if (errors == 0L) {
                badOutput = session.putAttribute(badOutput, "errors", "No incoming records");
                session.transfer(badOutput, FAILURE);
            }
        }

        if (errors > 0L) {
            getLogger().warn("Failed to convert {}/{} records between Avro Schemas",
                    new Object[] { errors, errors + written.get() });
            badOutput = session.putAttribute(badOutput, "errors", failures.summary());
            session.transfer(badOutput, FAILURE);
        } else {
            session.remove(badOutput);
        }
    } catch (ProcessException | DatasetIOException e) {
        getLogger().error("Failed reading or writing", e);
        session.transfer(incomingAvro, FAILURE);
    } catch (DatasetException e) {
        getLogger().error("Failed to read FlowFile", e);
        session.transfer(incomingAvro, FAILURE);
    } finally {
        try {
            writer.close();
        } catch (IOException e) {
            getLogger().warn("Unable to close writer ressource", e);
        }
        try {
            failureWriter.close();
        } catch (IOException e) {
            getLogger().warn("Unable to close writer ressource", e);
        }
    }
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the flush() method only with Append operations.
 * Verifies both length-based and time-based flush triggers, as well as flushing rather large operations.
 *//* ww w . java 2s .  c  om*/
@Test
public void testFlushAppend() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = config.getFlushThresholdBytes() * 10;

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    AtomicLong outstandingSize = new AtomicLong(); // Number of bytes remaining to be flushed.
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);

    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);

        boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes();
        Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush,
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());

        // Call flush() and inspect the result.
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
        if (expectFlush) {
            AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).",
                    config.getFlushThresholdBytes(), flushResult.getFlushedBytes());
            outstandingSize.addAndGet(-flushResult.getFlushedBytes());
            Assert.assertEquals(
                    "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).",
                    sequenceNumbers.getLowestUncommitted(),
                    context.segmentAggregator.getLowestUncommittedSequenceNumber());
        } else {
            Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d",
                    outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
        }

        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }

    // Part 2: flush triggered by time.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);

        // Call flush() and inspect the result.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
        Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();

        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0,
                flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());

        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }

    // Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly.
    final int transactionSize = 100;
    for (int i = 0; i < appendCount / 10; i++) {
        for (int j = 0; j < transactionSize; j++) {
            // Add another operation and record its length.
            StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
            outstandingSize.addAndGet(appendOp.getLength());
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
            sequenceNumbers.record(appendOp);
            Assert.assertEquals(
                    "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).",
                    sequenceNumbers.getLowestUncommitted(),
                    context.segmentAggregator.getLowestUncommittedSequenceNumber());
        }

        // Call flush() and inspect the result.
        Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).",
                context.segmentAggregator.mustFlush());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();

        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0,
                flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());

        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }

    // Part 4: large appends (larger than MaxFlushSize).
    Random random = new Random();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1];
        random.nextBytes(largeAppendData);
        StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);

        // Call flush() and inspect the result.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
        Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();

        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0,
                flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());

        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).",
                context.segmentAggregator.mustFlush());
        Assert.assertEquals(
                "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).",
                sequenceNumbers.getLowestUncommitted(),
                context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0,
                flushResult.getMergedBytes());
    }

    // Verify data.
    Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get());
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:org.apache.nifi.processors.cassandra.QueryCassandra.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;//from   w w w .  j  a  va2 s . c o  m
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final String selectQuery = context.getProperty(CQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
            .getValue();
    final long queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS);
    final String outputFormat = context.getProperty(OUTPUT_FORMAT).getValue();
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
    final StopWatch stopWatch = new StopWatch(true);

    if (fileToProcess == null) {
        fileToProcess = session.create();
    }

    try {
        // The documentation for the driver recommends the session remain open the entire time the processor is running
        // and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
        final Session connectionSession = cassandraSession.get();
        final ResultSetFuture queryFuture = connectionSession.executeAsync(selectQuery);
        final AtomicLong nrOfRows = new AtomicLong(0L);

        fileToProcess = session.write(fileToProcess, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing CQL query {}", new Object[] { selectQuery });
                    final ResultSet resultSet;
                    if (queryTimeout > 0) {
                        resultSet = queryFuture.getUninterruptibly(queryTimeout, TimeUnit.MILLISECONDS);
                        if (AVRO_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(
                                    convertToAvroStream(resultSet, out, queryTimeout, TimeUnit.MILLISECONDS));
                        } else if (JSON_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToJsonStream(resultSet, out, charset, queryTimeout,
                                    TimeUnit.MILLISECONDS));
                        }
                    } else {
                        resultSet = queryFuture.getUninterruptibly();
                        if (AVRO_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToAvroStream(resultSet, out, 0, null));
                        } else if (JSON_FORMAT.equals(outputFormat)) {
                            nrOfRows.set(convertToJsonStream(resultSet, out, charset, 0, null));
                        }
                    }

                } catch (final TimeoutException | InterruptedException | ExecutionException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, REL_SUCCESS);

    } catch (final NoHostAvailableException nhae) {
        getLogger().error(
                "No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
        // Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
        // a thousand error messages would be logged. However we would like information from Cassandra itself, so
        // cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
        // logger message above).
        getLogger().error(nhae.getCustomMessage(10, true, false));
        fileToProcess = session.penalize(fileToProcess);
        session.transfer(fileToProcess, REL_RETRY);

    } catch (final QueryExecutionException qee) {
        logger.error("Cannot execute the query with the requested consistency level successfully", qee);
        fileToProcess = session.penalize(fileToProcess);
        session.transfer(fileToProcess, REL_RETRY);

    } catch (final QueryValidationException qve) {
        if (context.hasIncomingConnection()) {
            logger.error(
                    "The CQL query {} is invalid due to syntax error, authorization issue, or another "
                            + "validation problem; routing {} to failure",
                    new Object[] { selectQuery, fileToProcess }, qve);
            fileToProcess = session.penalize(fileToProcess);
            session.transfer(fileToProcess, REL_FAILURE);
        } else {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another "
                    + "validation problem", new Object[] { selectQuery }, qve);
            session.remove(fileToProcess);
            context.yield();
        }
    } catch (final ProcessException e) {
        if (context.hasIncomingConnection()) {
            logger.error("Unable to execute CQL select query {} for {} due to {}; routing to failure",
                    new Object[] { selectQuery, fileToProcess, e });
            fileToProcess = session.penalize(fileToProcess);
            session.transfer(fileToProcess, REL_FAILURE);
        } else {
            logger.error("Unable to execute CQL select query {} due to {}", new Object[] { selectQuery, e });
            session.remove(fileToProcess);
            context.yield();
        }
    }
}

From source file:io.pravega.test.integration.selftest.Producer.java

/**
 * Executes the given operation./*from  w w w .j  a v a  2  s.  com*/
 */
private CompletableFuture<Void> executeOperation(ProducerOperation operation) {
    CompletableFuture<Void> result;
    final AtomicLong startTime = new AtomicLong(TIME_PROVIDER.get());
    if (operation.getType() == ProducerOperationType.CREATE_TRANSACTION) {
        // Create the Transaction, then record it's name in the operation's result.
        StoreAdapter.Feature.Transaction.ensureSupported(this.store, "create transaction");
        startTime.set(TIME_PROVIDER.get());
        result = this.store.createTransaction(operation.getTarget(), this.config.getTimeout())
                .thenAccept(operation::setResult);
    } else if (operation.getType() == ProducerOperationType.MERGE_TRANSACTION) {
        // Merge the Transaction.
        StoreAdapter.Feature.Transaction.ensureSupported(this.store, "merge transaction");
        startTime.set(TIME_PROVIDER.get());
        result = this.store.mergeTransaction(operation.getTarget(), this.config.getTimeout());
    } else if (operation.getType() == ProducerOperationType.ABORT_TRANSACTION) {
        // Abort the Transaction.
        StoreAdapter.Feature.Transaction.ensureSupported(this.store, "abort transaction");
        startTime.set(TIME_PROVIDER.get());
        result = this.store.abortTransaction(operation.getTarget(), this.config.getTimeout());
    } else if (operation.getType() == ProducerOperationType.APPEND) {
        // Generate some random data, then append it.
        StoreAdapter.Feature.Append.ensureSupported(this.store, "append");
        Event event = this.dataSource.nextEvent(operation.getTarget(), this.id);
        operation.setLength(event.getSerialization().getLength());
        startTime.set(TIME_PROVIDER.get());
        result = this.store.append(operation.getTarget(), event, this.config.getTimeout());
    } else if (operation.getType() == ProducerOperationType.SEAL) {
        // Seal the target.
        StoreAdapter.Feature.Seal.ensureSupported(this.store, "seal");
        startTime.set(TIME_PROVIDER.get());
        result = this.store.seal(operation.getTarget(), this.config.getTimeout());
    } else {
        throw new IllegalArgumentException("Unsupported Operation Type: " + operation.getType());
    }

    return result.exceptionally(ex -> attemptReconcile(ex, operation)).thenRun(
            () -> operation.completed((TIME_PROVIDER.get() - startTime.get()) / AbstractTimer.NANOS_TO_MILLIS));
}

From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java

/**
 * Performs lookup for multiple keys and returns a streaming iterator to results.
 * Each element in the iterator is one of
 *  (1) an exception associated with a single lookup
 *  (2) a key value tuple//from   ww  w .  j ava  2  s. c o m
 *
 * @param keys      lookup keys
 * @param progress  (optional) an AtomicInteger for tracking progress
 * @param skipped   (optional) an AtomicInteger for tracking missing keys
 * @return          iterator of lookup results
 */
public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys,
        final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) {
    log.info("starting store lookups");
    LongArrayList addressList = new LongArrayList();
    int notFound = 0;
    while (keys.hasNext()) {
        final K key = keys.next();
        final Long address;
        try {
            address = index.get(key);
        } catch (IOException e) {
            log.error("error", e);
            return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e)));
        }
        if (address != null) {
            addressList.add(address);
        } else {
            notFound++;
        }
    }
    if (progress != null)
        progress.addAndGet(notFound);
    if (skipped != null)
        skipped.addAndGet(notFound);
    log.info("store lookups complete, sorting addresses");

    final long[] addresses = addressList.elements();
    Arrays.sort(addresses, 0, addressList.size());

    log.info("initializing store lookup iterator");
    final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100);
    final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000);
    final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue,
            new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        taskQueue.put(r);
                    } catch (InterruptedException e) {
                        log.error("error", e);
                        throw new RuntimeException(e);
                    }
                }
            });
    final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>(
            10);
    final AtomicLong runningTasks = new AtomicLong(0);
    final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true);

    new Thread(new Runnable() {
        @Override
        public void run() {
            while (iterable.hasNext()) {
                runningTasks.incrementAndGet();
                final List<Long> addressesSublist = iterable.next();
                primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>(
                        new RecordLookupTask(addressesSublist)) {
                    @Override
                    protected void done() {
                        try {
                            final List<Either<Exception, P2<K, V>>> results = get();
                            if (progress != null) {
                                progress.addAndGet(results.size());
                            }
                            completionQueue.put(results);
                        } catch (InterruptedException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        } catch (ExecutionException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        }
                    }
                });
            }
            taskSubmitterRunning.set(false);
        }
    }, "RecordLookupTaskSubmitterThread").start();

    return new Iterator<Either<Exception, P2<K, V>>>() {

        Iterator<Either<Exception, P2<K, V>>> currentIterator;

        @Override
        public boolean hasNext() {
            if (currentIterator != null && currentIterator.hasNext())
                return true;
            while (taskSubmitterRunning.get() || runningTasks.get() > 0) {
                try {
                    final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS);
                    if (list != null) {
                        log.debug("remaining: " + runningTasks.decrementAndGet());
                        currentIterator = list.iterator();
                        if (currentIterator.hasNext())
                            return true;
                    }
                } catch (InterruptedException e) {
                    log.error("error", e);
                    throw new RuntimeException(e);
                }
            }
            primerThreads.shutdown();
            return false;
        }

        @Override
        public Either<Exception, P2<K, V>> next() {
            return currentIterator.next();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.apache.nifi.processors.standard.TailFile.java

private void processTailFile(final ProcessContext context, final ProcessSession session,
        final String tailFile) {
    // If user changes the file that is being tailed, we need to consume the already-rolled-over data according
    // to the Initial Start Position property
    boolean rolloverOccurred;
    TailFileObject tfo = states.get(tailFile);

    if (tfo.isTailFileChanged()) {
        rolloverOccurred = false;//from   w w  w.j av  a 2s .c om
        final String recoverPosition = context.getProperty(START_POSITION).getValue();

        if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) {
            recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(),
                    tfo.getState().getTimestamp(), tfo.getState().getPosition());
        } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) {
            cleanup();
            tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer()));
        } else {
            final String filename = tailFile;
            final File file = new File(filename);

            try {
                final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
                getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file });

                final Checksum checksum = new CRC32();
                final long position = file.length();
                final long timestamp = file.lastModified();

                try (final InputStream fis = new FileInputStream(file);
                        final CheckedInputStream in = new CheckedInputStream(fis, checksum)) {
                    StreamUtils.copy(in, new NullOutputStream(), position);
                }

                fileChannel.position(position);
                cleanup();
                tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(),
                        checksum, tfo.getState().getBuffer()));
            } catch (final IOException ioe) {
                getLogger().error(
                        "Attempted to position Reader at current position in file {} but failed to do so due to {}",
                        new Object[] { file, ioe.toString() }, ioe);
                context.yield();
                return;
            }
        }

        tfo.setTailFileChanged(false);
    } else {
        // Recover any data that may have rolled over since the last time that this processor ran.
        // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value
        // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered"
        // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case,
        // use whatever checksum value is currently in the state.
        Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum();
        if (expectedChecksumValue == null) {
            expectedChecksumValue = tfo.getState().getChecksum() == null ? null
                    : tfo.getState().getChecksum().getValue();
        }

        rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue,
                tfo.getState().getTimestamp(), tfo.getState().getPosition());
        tfo.setExpectedRecoveryChecksum(null);
    }

    // initialize local variables from state object; this is done so that we can easily change the values throughout
    // the onTrigger method and then create a new state object after we finish processing the files.
    TailFileState state = tfo.getState();
    File file = state.getFile();
    FileChannel reader = state.getReader();
    Checksum checksum = state.getChecksum();
    if (checksum == null) {
        checksum = new CRC32();
    }
    long position = state.getPosition();
    long timestamp = state.getTimestamp();
    long length = state.getLength();

    // Create a reader if necessary.
    if (file == null || reader == null) {
        file = new File(tailFile);
        reader = createReader(file, position);
        if (reader == null) {
            context.yield();
            return;
        }
    }

    final long startNanos = System.nanoTime();

    // Check if file has rotated
    if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length())
            || (timestamp < file.lastModified() && length >= file.length())) {

        // Since file has rotated, we close the reader, create a new one, and then reset our state.
        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader });
        } catch (final IOException ioe) {
            getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe });
        }

        reader = createReader(file, 0L);
        position = 0L;
        checksum.reset();
    }

    if (file.length() == position || !file.exists()) {
        // no data to consume so rather than continually running, yield to allow other processors to use the thread.
        getLogger().debug("No data to consume; created no FlowFiles");
        tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
                state.getBuffer()));
        persistState(tfo, context);
        context.yield();
        return;
    }

    // If there is data to consume, read as much as we can.
    final TailFileState currentState = state;
    final Checksum chksum = checksum;
    // data has been written to file. Stream it to a new FlowFile.
    FlowFile flowFile = session.create();

    final FileChannel fileReader = reader;
    final AtomicLong positionHolder = new AtomicLong(position);
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream rawOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(rawOut)) {
                positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum));
            }
        }
    });

    // If there ended up being no data, just remove the FlowFile
    if (flowFile.getSize() == 0) {
        session.remove(flowFile);
        getLogger().debug("No data to consume; removed created FlowFile");
    } else {
        // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension>
        final String tailFilename = file.getName();
        final String baseName = StringUtils.substringBeforeLast(tailFilename, ".");
        final String flowFileName;
        if (baseName.length() < tailFilename.length()) {
            flowFileName = baseName + "." + position + "-" + positionHolder.get() + "."
                    + StringUtils.substringAfterLast(tailFilename, ".");
        } else {
            flowFileName = baseName + "." + position + "-" + positionHolder.get();
        }

        final Map<String, String> attributes = new HashMap<>(3);
        attributes.put(CoreAttributes.FILENAME.key(), flowFileName);
        attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
        attributes.put("tailfile.original.path", tailFile);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, file.toURI().toString(),
                "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
        session.transfer(flowFile, REL_SUCCESS);
        position = positionHolder.get();

        // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state.
        // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date
        // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the
        // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the
        // rotated file a second time.
        timestamp = Math.max(state.getTimestamp(), file.lastModified());
        length = file.length();
        getLogger().debug("Created {} and routed to success", new Object[] { flowFile });
    }

    // Create a new state object to represent our current position, timestamp, etc.
    tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
            state.getBuffer()));

    // We must commit session before persisting state in order to avoid data loss on restart
    session.commit();
    persistState(tfo, context);
}

From source file:org.apache.tinkerpop.gremlin.structure.TransactionTest.java

@Test
@org.junit.Ignore("Ignoring this test for now.  Perhaps it will have relelvance later. see - https://github.org/apache/tinkerpop/tinkerpop3/issues/31")
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = Graph.Features.GraphFeatures.class, feature = Graph.Features.GraphFeatures.FEATURE_TRANSACTIONS)
public void shouldSupportTransactionIsolationWithSeparateThreads() throws Exception {
    // one thread modifies the graph and a separate thread reads before the transaction is committed.
    // the expectation is that the changes in the transaction are isolated to the thread that made the change
    // and the second thread should not see the change until commit() in the first thread.
    final CountDownLatch latchCommit = new CountDownLatch(1);
    final CountDownLatch latchFirstRead = new CountDownLatch(1);
    final CountDownLatch latchSecondRead = new CountDownLatch(1);

    final Thread threadMod = new Thread() {
        @Override//ww  w . j a va2  s  .c  o m
        public void run() {
            graph.addVertex();

            latchFirstRead.countDown();

            try {
                latchCommit.await();
            } catch (InterruptedException ie) {
                throw new RuntimeException(ie);
            }

            graph.tx().commit();

            latchSecondRead.countDown();
        }
    };

    threadMod.start();

    final AtomicLong beforeCommitInOtherThread = new AtomicLong(0);
    final AtomicLong afterCommitInOtherThreadButBeforeRollbackInCurrentThread = new AtomicLong(0);
    final AtomicLong afterCommitInOtherThread = new AtomicLong(0);
    final Thread threadRead = new Thread() {
        @Override
        public void run() {
            try {
                latchFirstRead.await();
            } catch (InterruptedException ie) {
                throw new RuntimeException(ie);
            }

            // reading vertex before tx from other thread is committed...should have zero vertices
            beforeCommitInOtherThread.set(IteratorUtils.count(graph.vertices()));

            latchCommit.countDown();

            try {
                latchSecondRead.await();
            } catch (InterruptedException ie) {
                throw new RuntimeException(ie);
            }

            // tx in other thread is committed...should have one vertex.  rollback first to start a new tx
            // to get a fresh read given the commit
            afterCommitInOtherThreadButBeforeRollbackInCurrentThread.set(IteratorUtils.count(graph.vertices()));
            graph.tx().rollback();
            afterCommitInOtherThread.set(IteratorUtils.count(graph.vertices()));
        }
    };

    threadRead.start();

    threadMod.join();
    threadRead.join();

    assertEquals(0l, beforeCommitInOtherThread.get());
    assertEquals(0l, afterCommitInOtherThreadButBeforeRollbackInCurrentThread.get());
    assertEquals(1l, afterCommitInOtherThread.get());
}

From source file:org.apache.nifi.processors.hive.SelectHive_1_1QL.java

private void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;//from  w w  w.j ava  2 s .  c o m

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final Hive_1_1DBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(Hive_1_1DBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    List<String> preQueries = getQueries(
            context.getProperty(HIVEQL_PRE_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());
    List<String> postQueries = getQueries(
            context.getProperty(HIVEQL_POST_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    String hqlStatement;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        hqlStatement = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, charset)));
        hqlStatement = queryContents.toString();
    }

    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions(fileToProcess)
            .asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions(fileToProcess).asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions(fileToProcess).asInteger()
            : 0;
    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final Statement st = (flowbased ? con.prepareStatement(hqlStatement) : con.createStatement())) {
        Pair<String, SQLException> failure = executeConfigStatements(con, preQueries);
        if (failure != null) {
            // In case of failure, assigning config query to "hqlStatement"  to follow current error handling
            hqlStatement = failure.getLeft();
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }
        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
        try {
            logger.debug("Executing query {}", new Object[] { hqlStatement });
            if (flowbased) {
                // Hive JDBC Doesn't Support this yet:
                // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                // int paramCount = pmd.getParameterCount();

                // Alternate way to determine number of params in SQL.
                int paramCount = StringUtils.countMatches(hqlStatement, "?");

                if (paramCount > 0) {
                    setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                }
            }

            final ResultSet resultSet;

            try {
                resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                        : st.executeQuery(hqlStatement));
            } catch (SQLException se) {
                // If an error occurs during the query, a flowfile is expected to be routed to failure, so ensure one here
                flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
                fileToProcess = null;
                throw se;
            }

            int fragmentIndex = 0;
            String baseFilename = (fileToProcess != null)
                    ? fileToProcess.getAttribute(CoreAttributes.FILENAME.key())
                    : null;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);
                flowfile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                if (baseFilename == null) {
                    baseFilename = flowfile.getAttribute(CoreAttributes.FILENAME.key());
                }
                try {
                    flowfile = session.write(flowfile, out -> {
                        try {
                            if (AVRO.equals(outputFormat)) {
                                nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out,
                                        maxRowsPerFlowFile, convertNamesForAvro));
                            } else if (CSV.equals(outputFormat)) {
                                CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter,
                                        quote, escape, maxRowsPerFlowFile);
                                nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                            } else {
                                nrOfRows.set(0L);
                                throw new ProcessException("Unsupported output format: " + outputFormat);
                            }
                        } catch (final SQLException | RuntimeException e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(flowfile);
                    throw e;
                }

                if (nrOfRows.get() > 0 || resultSetFlowFiles.isEmpty()) {
                    final Map<String, String> attributes = new HashMap<>();
                    // Set attribute for how many rows were selected
                    attributes.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

                    try {
                        // Set input/output table names by parsing the query
                        attributes.putAll(toQueryTableAttributes(findTableNames(hqlStatement)));
                    } catch (Exception e) {
                        // If failed to parse the query, just log a warning message, but continue.
                        getLogger().warn("Failed to parse query: {} due to {}",
                                new Object[] { hqlStatement, e }, e);
                    }

                    // Set MIME type on output document and add extension to filename
                    if (AVRO.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), MIME_TYPE_AVRO_BINARY);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".avro");
                    } else if (CSV.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".csv");
                    }

                    if (maxRowsPerFlowFile > 0) {
                        attributes.put("fragment.identifier", fragmentIdentifier);
                        attributes.put("fragment.index", String.valueOf(fragmentIndex));
                    }

                    flowfile = session.putAllAttributes(flowfile, attributes);

                    logger.info("{} contains {} " + outputFormat + " records; transferring to 'success'",
                            new Object[] { flowfile, nrOfRows.get() });

                    if (context.hasIncomingConnection()) {
                        // If the flow file came from an incoming connection, issue a Fetch provenance event
                        session.getProvenanceReporter().fetch(flowfile, dbcpService.getConnectionURL(),
                                "Retrieved " + nrOfRows.get() + " rows",
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    } else {
                        // If we created a flow file from rows received from Hive, issue a Receive provenance event
                        session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    }
                    resultSetFlowFiles.add(flowfile);
                } else {
                    // If there were no rows returned (and the first flow file has been sent, we're done processing, so remove the flowfile and carry on
                    session.remove(flowfile);
                    if (resultSetFlowFiles != null && resultSetFlowFiles.size() > 0) {
                        flowfile = resultSetFlowFiles.get(resultSetFlowFiles.size() - 1);
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        failure = executeConfigStatements(con, postQueries);
        if (failure != null) {
            hqlStatement = failure.getLeft();
            if (resultSetFlowFiles != null) {
                resultSetFlowFiles.forEach(ff -> session.remove(ff));
            }
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        if (fileToProcess != null) {
            session.remove(fileToProcess);
        }
    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { hqlStatement, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { hqlStatement, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { hqlStatement, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { hqlStatement, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    }
}

From source file:org.apache.nifi.processors.hive.SelectHive3QL.java

private void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;/*from w w  w .ja va  2s . co  m*/

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final Hive3DBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(Hive3DBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    List<String> preQueries = getQueries(
            context.getProperty(HIVEQL_PRE_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());
    List<String> postQueries = getQueries(
            context.getProperty(HIVEQL_POST_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    String hqlStatement;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        hqlStatement = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, charset)));
        hqlStatement = queryContents.toString();
    }

    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions(fileToProcess)
            .asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions(fileToProcess).asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions(fileToProcess).asInteger()
            : 0;
    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final Statement st = (flowbased ? con.prepareStatement(hqlStatement) : con.createStatement())) {
        Pair<String, SQLException> failure = executeConfigStatements(con, preQueries);
        if (failure != null) {
            // In case of failure, assigning config query to "hqlStatement"  to follow current error handling
            hqlStatement = failure.getLeft();
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }
        st.setQueryTimeout(
                context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asInteger());

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
        try {
            logger.debug("Executing query {}", new Object[] { hqlStatement });
            if (flowbased) {
                // Hive JDBC Doesn't Support this yet:
                // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                // int paramCount = pmd.getParameterCount();

                // Alternate way to determine number of params in SQL.
                int paramCount = StringUtils.countMatches(hqlStatement, "?");

                if (paramCount > 0) {
                    setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                }
            }

            final ResultSet resultSet;

            try {
                resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                        : st.executeQuery(hqlStatement));
            } catch (SQLException se) {
                // If an error occurs during the query, a flowfile is expected to be routed to failure, so ensure one here
                flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
                fileToProcess = null;
                throw se;
            }

            int fragmentIndex = 0;
            String baseFilename = (fileToProcess != null)
                    ? fileToProcess.getAttribute(CoreAttributes.FILENAME.key())
                    : null;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);
                flowfile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                if (baseFilename == null) {
                    baseFilename = flowfile.getAttribute(CoreAttributes.FILENAME.key());
                }
                try {
                    flowfile = session.write(flowfile, out -> {
                        try {
                            if (AVRO.equals(outputFormat)) {
                                nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out,
                                        maxRowsPerFlowFile, convertNamesForAvro));
                            } else if (CSV.equals(outputFormat)) {
                                CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter,
                                        quote, escape, maxRowsPerFlowFile);
                                nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                            } else {
                                nrOfRows.set(0L);
                                throw new ProcessException("Unsupported output format: " + outputFormat);
                            }
                        } catch (final SQLException | RuntimeException e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(flowfile);
                    throw e;
                }

                if (nrOfRows.get() > 0 || resultSetFlowFiles.isEmpty()) {
                    final Map<String, String> attributes = new HashMap<>();
                    // Set attribute for how many rows were selected
                    attributes.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

                    try {
                        // Set input/output table names by parsing the query
                        attributes.putAll(toQueryTableAttributes(findTableNames(hqlStatement)));
                    } catch (Exception e) {
                        // If failed to parse the query, just log a warning message, but continue.
                        getLogger().warn("Failed to parse query: {} due to {}",
                                new Object[] { hqlStatement, e }, e);
                    }

                    // Set MIME type on output document and add extension to filename
                    if (AVRO.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), MIME_TYPE_AVRO_BINARY);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".avro");
                    } else if (CSV.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".csv");
                    }

                    if (maxRowsPerFlowFile > 0) {
                        attributes.put("fragment.identifier", fragmentIdentifier);
                        attributes.put("fragment.index", String.valueOf(fragmentIndex));
                    }

                    flowfile = session.putAllAttributes(flowfile, attributes);

                    logger.info("{} contains {} " + outputFormat + " records; transferring to 'success'",
                            new Object[] { flowfile, nrOfRows.get() });

                    if (context.hasIncomingConnection()) {
                        // If the flow file came from an incoming connection, issue a Fetch provenance event
                        session.getProvenanceReporter().fetch(flowfile, dbcpService.getConnectionURL(),
                                "Retrieved " + nrOfRows.get() + " rows",
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    } else {
                        // If we created a flow file from rows received from Hive, issue a Receive provenance event
                        session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    }
                    resultSetFlowFiles.add(flowfile);
                } else {
                    // If there were no rows returned (and the first flow file has been sent, we're done processing, so remove the flowfile and carry on
                    session.remove(flowfile);
                    if (resultSetFlowFiles != null && resultSetFlowFiles.size() > 0) {
                        flowfile = resultSetFlowFiles.get(resultSetFlowFiles.size() - 1);
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        failure = executeConfigStatements(con, postQueries);
        if (failure != null) {
            hqlStatement = failure.getLeft();
            if (resultSetFlowFiles != null) {
                resultSetFlowFiles.forEach(ff -> session.remove(ff));
            }
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        if (fileToProcess != null) {
            session.remove(fileToProcess);
        }

    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { hqlStatement, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { hqlStatement, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { hqlStatement, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { hqlStatement, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    }
}

From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java

public void doTestSendFailIfNoSpaceDoesNotBlockNetwork(ActiveMQDestination slowDestination,
        ActiveMQDestination fastDestination) throws Exception {

    final int NUM_MESSAGES = 100;
    final long TEST_MESSAGE_SIZE = 1024;
    final long SLOW_CONSUMER_DELAY_MILLIS = 100;

    // Start a local and a remote broker.
    createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true"));
    BrokerService remoteBroker = createBroker(
            new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true"));
    remoteBroker.getSystemUsage().setSendFailIfNoSpace(true);

    // Set a policy on the remote broker that limits the maximum size of the
    // slow shared queue.
    PolicyEntry policyEntry = new PolicyEntry();
    policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE);
    PolicyMap policyMap = new PolicyMap();
    policyMap.put(slowDestination, policyEntry);
    remoteBroker.setDestinationPolicy(policyMap);

    // Create an outbound bridge from the local broker to the remote broker.
    // The bridge is configured with the remoteDispatchType enhancement.
    NetworkConnector nc = bridgeBrokers("broker0", "broker1");
    nc.setAlwaysSyncSend(true);//  w ww .  ja v a 2s  .c  om
    nc.setPrefetchSize(1);

    startAllBrokers();
    waitForBridgeFormation();

    // Start two asynchronous consumers on the remote broker, one for each
    // of the two shared queues, and keep track of how long it takes for
    // each of the consumers to receive all the messages.
    final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES);
    final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES);

    final long startTimeMillis = System.currentTimeMillis();
    final AtomicLong fastConsumerTime = new AtomicLong();
    final AtomicLong slowConsumerTime = new AtomicLong();

    Thread fastWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                fastConsumerLatch.await();
                fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    Thread slowWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                slowConsumerLatch.await();
                slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    fastWaitThread.start();
    slowWaitThread.start();

    createConsumer("broker1", fastDestination, fastConsumerLatch);
    MessageConsumer slowConsumer = createConsumer("broker1", slowDestination, slowConsumerLatch);
    MessageIdList messageIdList = brokers.get("broker1").consumers.get(slowConsumer);
    messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS);

    // Send the test messages to the local broker's shared queues. The
    // messages are either persistent or non-persistent to demonstrate the
    // difference between synchronous and asynchronous dispatch.
    persistentDelivery = false;
    sendMessages("broker0", fastDestination, NUM_MESSAGES);
    sendMessages("broker0", slowDestination, NUM_MESSAGES);

    fastWaitThread.join(TimeUnit.SECONDS.toMillis(60));
    slowWaitThread.join(TimeUnit.SECONDS.toMillis(60));

    assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty());

    LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get());
    LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get());

    assertTrue("fast time set", fastConsumerTime.get() > 0);
    assertTrue("slow time set", slowConsumerTime.get() > 0);

    // Verify the behaviour as described in the description of this class.
    Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 10);
}