Example usage for java.sql SQLException getLocalizedMessage

List of usage examples for java.sql SQLException getLocalizedMessage

Introduction

In this page you can find the example usage for java.sql SQLException getLocalizedMessage.

Prototype

public String getLocalizedMessage() 

Source Link

Document

Creates a localized description of this throwable.

Usage

From source file:broadwick.data.readers.DataFileReader.java

/**
 * Perform the insertion into the database.
 * @param connection      the connection to the database.
 * @param tableName       the name of the table into which the data will be put.
 * @param insertString    the command used to insert a row into the database.
 * @param dataFile        the [CSV] file that contained the data.
 * @param dateFormat      the format of the date in the file.
 * @param insertedColInfo a map of column name to column in the data file.
 * @param dateFields      a collection of columns in the csv file that contains date fields.
 * @return the number of rows inserted./*from  w ww.jav  a2s .  c o m*/
 */
protected final int insert(final Connection connection, final String tableName, final String insertString,
        final String dataFile, final String dateFormat, final Map<String, Integer> insertedColInfo,
        final Collection<Integer> dateFields) {

    int inserted = 0;
    try {
        // Now do the insertion.
        log.trace("Inserting into {} via {}", tableName, insertString);
        PreparedStatement pstmt = connection.prepareStatement(insertString);
        log.trace("Prepared statement = {}", pstmt.toString());

        try (FileInput instance = new FileInput(dataFile, ",")) {
            final StopWatch sw = new StopWatch();
            sw.start();
            List<String> data = instance.readLine();
            while (data != null && !data.isEmpty()) {
                int parameterIndex = 1;
                for (Map.Entry<String, Integer> entry : insertedColInfo.entrySet()) {
                    if (entry.getValue() == -1) {
                        pstmt.setObject(parameterIndex, null);
                    } else {
                        final String value = data.get(entry.getValue() - 1);
                        if (dateFields.contains(entry.getValue())) {
                            int dateField = Integer.MAX_VALUE;
                            if (value != null && !value.isEmpty()) {
                                dateField = BroadwickConstants.getDate(value, dateFormat);
                            }
                            pstmt.setObject(parameterIndex, dateField);
                        } else {
                            pstmt.setObject(parameterIndex, value);
                        }
                    }
                    parameterIndex++;
                }
                pstmt.addBatch();
                try {
                    pstmt.executeUpdate();
                    inserted++;
                } catch (SQLException ex) {
                    if ("23505".equals(ex.getSQLState())) {
                        //Ignore found duplicate from database view
                        continue;
                    } else {
                        log.warn("Duplicate data found for {}: continuing despite errors: {}", data.get(0),
                                ex.getLocalizedMessage());
                        log.trace("{}", Throwables.getStackTraceAsString(ex));
                        throw ex;
                    }
                }
                if (inserted % 250000 == 0) {
                    log.trace("Inserted {} rows in {}", inserted, sw.toString());
                    connection.commit();
                    pstmt.close();
                    pstmt = connection.prepareStatement(insertString);
                }

                data = instance.readLine();
            }
            connection.commit();

        } catch (IOException ex) {
            log.error("IO error : {}", ex.getLocalizedMessage());
            log.trace("{}", Throwables.getStackTraceAsString(ex));
        } catch (SQLException ex) {
            log.error("SQL Error : {}", ex.getLocalizedMessage());
            log.trace("{}", Throwables.getStackTraceAsString(ex));
            throw ex;
        } finally {
            pstmt.close();
        }
    } catch (SQLException ex) {
        log.error("{}", ex.getLocalizedMessage());
        log.trace("{}", Throwables.getStackTraceAsString(ex));
        throw new BroadwickException(ex);
    }

    return inserted;
}

From source file:seava.j4e.web.controller.AbstractBaseController.java

/**
 * Exception// w  ww .jav  a 2s.c  om
 * 
 * @param e
 * @param response
 * @return
 * @throws IOException
 */
@ResponseBody
protected String handleManagedException(IErrorCode errorCode, Exception e, HttpServletResponse response,
        String outputType) throws IOException {

    IErrorCode err = errorCode;
    Map<String, String> result = new HashMap<String, String>();

    if (!(e instanceof BusinessException)) {
        e.printStackTrace();
    }
    if (err == null) {
        err = ErrorCode.G_RUNTIME_ERROR;
    }

    // if (e instanceof BusinessException) {
    // err = ((BusinessException) e).getErrorCode();
    // } else {
    // if (e.getCause() != null) {
    // Throwable t = e;
    // while (t.getCause() != null) {
    // t = t.getCause();
    // if (t instanceof BusinessException) {
    // err = ((BusinessException) t).getErrorCode();
    // break;
    // }
    // }
    // }
    // }

    result.put("err_group", err.getErrGroup());
    result.put("err_no", err.getErrNo() + "");
    result.put("err_msg", err.getErrMsg());

    // --------------------

    if (e.getCause() != null) {
        Throwable t = e;
        while (t.getCause() != null) {
            t = t.getCause();
        }

        if (t instanceof SQLException) {
            SQLException sqlException = (SQLException) t;
            result.put("msg", sqlException.getErrorCode() + " - " + sqlException.getSQLState() + " - "
                    + sqlException.getLocalizedMessage());

        } else {
            result.put("msg", t.getMessage());
        }

    }

    // ---------------------

    if (!result.containsKey("msg")) {
        result.put("msg", e.getMessage());
    } else {
        result.put("details", e.getMessage());
    }

    StringBuffer sb = new StringBuffer();

    if (outputType.matches("txt")) {
        sb.append(result.get("err_group") + "-" + result.get("err_no") + "\n||\n");
        sb.append(result.get("err_msg") + "\n||\n");
        sb.append(result.get("msg") + "\n||\n");
        sb.append(result.get("details") + "\n||\n");
    } else if (outputType.matches("html")) {
        sb.append("<html><body><div>" + result.get("msg") + "</div></body></html>");
    }

    response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.value());
    response.getOutputStream().print(sb.toString());
    return null;
}

From source file:org.apache.kylin.common.persistence.JDBCResourceDAO.java

private void createIndex(final String indexName, final String tableName, final String colName) {
    try {//  w w w . j  av a  2s  .c o m
        executeSql(new SqlOperation() {
            @Override
            public void execute(Connection connection) throws SQLException {
                pstat = connection.prepareStatement(getCreateIndexSql(indexName, tableName, colName));
                pstat.executeUpdate();
            }
        });
    } catch (SQLException ex) {
        logger.info("Create index failed with message: " + ex.getLocalizedMessage());
    }
}

From source file:org.pentaho.platform.dataaccess.datasource.wizard.service.impl.DSWDatasourceServiceImpl.java

public boolean testDataSourceConnection(String connectionName) throws DatasourceServiceException {
    if (!hasDataAccessPermission()) {
        logger.error(Messages.getErrorString("DatasourceServiceImpl.ERROR_0001_PERMISSION_DENIED")); //$NON-NLS-1$
        throw new DatasourceServiceException(
                Messages.getErrorString("DatasourceServiceImpl.ERROR_0001_PERMISSION_DENIED")); //$NON-NLS-1$
    }//from w w w. j a  v a  2s  .c o  m
    java.sql.Connection conn = null;
    try {
        conn = DatasourceServiceHelper.getDataSourceConnection(connectionName,
                PentahoSessionHolder.getSession());
        if (conn == null) {
            logger.error(Messages.getErrorString("DatasourceServiceImpl.ERROR_0018_UNABLE_TO_TEST_CONNECTION", //$NON-NLS-1$
                    connectionName));
            throw new DatasourceServiceException(Messages.getErrorString(
                    "DatasourceServiceImpl.ERROR_0018_UNABLE_TO_TEST_CONNECTION", connectionName)); //$NON-NLS-1$
        }
    } finally {
        try {
            if (conn != null) {
                conn.close();
            }
        } catch (SQLException e) {
            logger.error(Messages.getErrorString("DatasourceServiceImpl.ERROR_0018_UNABLE_TO_TEST_CONNECTION",
                    connectionName, e.getLocalizedMessage()), e); //$NON-NLS-1$
            throw new DatasourceServiceException(
                    Messages.getErrorString("DatasourceServiceImpl.ERROR_0018_UNABLE_TO_TEST_CONNECTION",
                            connectionName, e.getLocalizedMessage()),
                    e); //$NON-NLS-1$
        }
    }
    return true;
}

From source file:org.apache.nifi.processors.standard.QueryDatabaseTable.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory)
        throws ProcessException {
    ProcessSession session = sessionFactory.createSession();
    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();

    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES)
            .evaluateAttributeExpressions().getValue();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger()
            : 0;/*  ww  w .j a  v  a  2 s .  c om*/
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();

    final Map<String, String> maxValueProperties = getDefaultMaxValueProperties(context.getProperties());

    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;

    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform "
                + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
    // set as the current state map (after the session has been committed)
    final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());

    //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
    for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
        String maxPropKey = maxProp.getKey().toLowerCase();
        String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey);
        if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
            String newMaxPropValue;
            // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme)
            // the value has been stored under a key that is only the column name. Fall back to check the column name,
            // but store the new initial max value under the fully-qualified key.
            if (statePropertyMap.containsKey(maxPropKey)) {
                newMaxPropValue = statePropertyMap.get(maxPropKey);
            } else {
                newMaxPropValue = maxProp.getValue();
            }
            statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);

        }
    }

    List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null
            : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
    final String selectQuery = getQuery(dbAdapter, tableName, columnNames, maxValueColumnNameList,
            statePropertyMap);
    final StopWatch stopWatch = new StopWatch(true);
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        String jdbcURL = "DBCPService";
        try {
            DatabaseMetaData databaseMetaData = con.getMetaData();
            if (databaseMetaData != null) {
                jdbcURL = databaseMetaData.getURL();
            }
        } catch (SQLException se) {
            // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
        }

        final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions()
                .asTimePeriod(TimeUnit.SECONDS).intValue();
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        try {
            logger.debug("Executing query {}", new Object[] { selectQuery });
            final ResultSet resultSet = st.executeQuery(selectQuery);
            int fragmentIndex = 0;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                FlowFile fileToProcess = session.create();
                try {
                    fileToProcess = session.write(fileToProcess, out -> {
                        // Max values will be updated in the state property map by the callback
                        final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(
                                tableName, statePropertyMap, dbAdapter);
                        try {
                            nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, tableName,
                                    maxValCollector, maxRowsPerFlowFile, convertNamesForAvro));
                        } catch (SQLException | RuntimeException e) {
                            throw new ProcessException(
                                    "Error during database query or conversion of records to Avro.", e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(fileToProcess);
                    throw e;
                }

                if (nrOfRows.get() > 0) {
                    // set attribute how many rows were selected
                    fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT,
                            String.valueOf(nrOfRows.get()));
                    fileToProcess = session.putAttribute(fileToProcess, RESULT_TABLENAME, tableName);
                    if (maxRowsPerFlowFile > 0) {
                        fileToProcess = session.putAttribute(fileToProcess, "fragment.identifier",
                                fragmentIdentifier);
                        fileToProcess = session.putAttribute(fileToProcess, "fragment.index",
                                String.valueOf(fragmentIndex));
                    }

                    logger.info("{} contains {} Avro records; transferring to 'success'",
                            new Object[] { fileToProcess, nrOfRows.get() });

                    session.getProvenanceReporter().receive(fileToProcess, jdbcURL,
                            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    resultSetFlowFiles.add(fileToProcess);
                } else {
                    // If there were no rows returned, don't send the flowfile
                    session.remove(fileToProcess);
                    context.yield();
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Add maximum values as attributes
                for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
                    // Get just the column name from the key
                    String key = entry.getKey();
                    String colName = key
                            .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                            "maxvalue." + colName, entry.getValue()));
                }

                //set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);

    } catch (final ProcessException | SQLException e) {
        logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
        if (!resultSetFlowFiles.isEmpty()) {
            session.remove(resultSetFlowFiles);
        }
        context.yield();
    } finally {
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded",
                    new Object[] { this, ioe });
        }
    }
}

From source file:org.talend.dq.indicators.AbstractSchemaEvaluator.java

/**
 * DOC scorreia Comment method "getPKCount".
 * /*from   w  w  w .  j  a va  2  s  .c o m*/
 * @param catalog
 * @param schema
 * @param table
 * @param pkCount
 * @return
 * @throws SQLException
 */
@SuppressWarnings("deprecation")
private int getPKCount(String catalog, String schema, String table) throws SQLException {
    int pkCount = 0;
    ResultSet pk = null;
    try {
        // MOD xqliu 2009-07-13 bug 7888
        pk = org.talend.utils.sql.ConnectionUtils.getConnectionMetadata(getConnection()).getPrimaryKeys(catalog,
                schema, table);
        // ~
    } catch (SQLException e1) {
        log.warn(Messages.getString("AbstractSchemaEvaluator.PrimaryException", //$NON-NLS-1$
                this.dbms().toQualifiedName(catalog, schema, table), e1.getLocalizedMessage()), e1);
        reloadConnectionAfterException(catalog);
    }
    if (pk != null) {
        while (pk.next()) {
            pkCount += 1;
        }
        pk.close();
    }
    return pkCount;
}

From source file:org.talend.dq.indicators.AbstractSchemaEvaluator.java

/**
 * DOC scorreia Comment method "getIndexCount".
 * // w  w  w.j a v  a  2 s  . c  om
 * @param catalog
 * @param schema
 * @param table
 * @param idxCount
 * @return
 * @throws SQLException
 */
@SuppressWarnings("deprecation")
private int getIndexCount(String catalog, String schema, String table) throws SQLException {
    int idxCount = 0;
    ResultSet idx = null;
    try {
        // MOD xqliu 2009-07-13 bug 7888
        idx = org.talend.utils.sql.ConnectionUtils.getConnectionMetadata(getConnection()).getIndexInfo(catalog,
                schema, table, false, true);
        // ~
    } catch (SQLException e) {

        log.warn(Messages.getString("AbstractSchemaEvaluator.IndexException", //$NON-NLS-1$
                this.dbms().toQualifiedName(catalog, schema, table), e.getLocalizedMessage()), e);
        // Oracle increments the number of cursors to close each time a new query is executed after this exception!
        reloadConnectionAfterException(catalog);
    }
    // TODO unicity of index could be a parameter
    if (idx != null) {
        while (idx.next()) {
            // MOD msjian 2011-10-9 TDQ-3566: incorrect index number result in overview analysis
            // MOD 20130418 TDQ-6823 use type!=tableIndexStatistic to filter the statistic index(do not show this
            // type)
            if (0 != idx.getShort("TYPE")) { //$NON-NLS-1$
                idxCount += 1;
            }
            // TDQ-3566 ~
        }
        idx.close();
    }
    return idxCount;
}

From source file:org.apache.nifi.processors.standard.AbstractQueryDatabaseTable.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory)
        throws ProcessException {
    // Fetch the column/table info once
    if (!setupComplete.get()) {
        super.setup(context);
    }/*from ww  w  .  j a v a 2  s  . c o m*/
    ProcessSession session = sessionFactory.createSession();
    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();

    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
    final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES)
            .evaluateAttributeExpressions().getValue();
    final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions()
            .getValue();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger()
            : 0;

    SqlWriter sqlWriter = configureSqlWriter(session, context);

    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;

    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform "
                + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
    // set as the current state map (after the session has been committed)
    final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());

    //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
    for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
        String maxPropKey = maxProp.getKey().toLowerCase();
        String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter);
        if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
            String newMaxPropValue;
            // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme)
            // the value has been stored under a key that is only the column name. Fall back to check the column name,
            // but store the new initial max value under the fully-qualified key.
            if (statePropertyMap.containsKey(maxPropKey)) {
                newMaxPropValue = statePropertyMap.get(maxPropKey);
            } else {
                newMaxPropValue = maxProp.getValue();
            }
            statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);

        }
    }

    List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null
            : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
    final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList,
            customWhereClause, statePropertyMap);
    final StopWatch stopWatch = new StopWatch(true);
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService.getConnection(Collections.emptyMap());
            final Statement st = con.createStatement()) {

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        String jdbcURL = "DBCPService";
        try {
            DatabaseMetaData databaseMetaData = con.getMetaData();
            if (databaseMetaData != null) {
                jdbcURL = databaseMetaData.getURL();
            }
        } catch (SQLException se) {
            // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
        }

        final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions()
                .asTimePeriod(TimeUnit.SECONDS).intValue();
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        if (logger.isDebugEnabled()) {
            logger.debug("Executing query {}", new Object[] { selectQuery });
        }
        try (final ResultSet resultSet = st.executeQuery(selectQuery)) {
            int fragmentIndex = 0;
            // Max values will be updated in the state property map by the callback
            final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName,
                    statePropertyMap, dbAdapter);

            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                FlowFile fileToProcess = session.create();
                try {
                    fileToProcess = session.write(fileToProcess, out -> {
                        try {
                            nrOfRows.set(
                                    sqlWriter.writeResultSet(resultSet, out, getLogger(), maxValCollector));
                        } catch (Exception e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(fileToProcess);
                    throw e;
                }

                if (nrOfRows.get() > 0) {
                    // set attributes
                    final Map<String, String> attributesToAdd = new HashMap<>();
                    attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                    attributesToAdd.put(RESULT_TABLENAME, tableName);

                    if (maxRowsPerFlowFile > 0) {
                        attributesToAdd.put(FRAGMENT_ID, fragmentIdentifier);
                        attributesToAdd.put(FRAGMENT_INDEX, String.valueOf(fragmentIndex));
                    }

                    attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                    fileToProcess = session.putAllAttributes(fileToProcess, attributesToAdd);
                    sqlWriter.updateCounters(session);

                    logger.info("{} contains {} records; transferring to 'success'",
                            new Object[] { fileToProcess, nrOfRows.get() });

                    session.getProvenanceReporter().receive(fileToProcess, jdbcURL,
                            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    resultSetFlowFiles.add(fileToProcess);
                    // If we've reached the batch size, send out the flow files
                    if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                        session.transfer(resultSetFlowFiles, REL_SUCCESS);
                        session.commit();
                        resultSetFlowFiles.clear();
                    }
                } else {
                    // If there were no rows returned, don't send the flowfile
                    session.remove(fileToProcess);
                    // If no rows and this was first FlowFile, yield
                    if (fragmentIndex == 0) {
                        context.yield();
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }

                // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around
                if (maxFragments == 0 && maxRowsPerFlowFile == 0) {
                    break;
                }

                // If we are splitting up the data into flow files, don't loop back around if we've gotten all results
                if (maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) {
                    break;
                }
            }

            // Apply state changes from the Max Value tracker
            maxValCollector.applyStateChanges();

            // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes
            if (outputBatchSize == 0) {
                for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                    // Add maximum values as attributes
                    for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
                        // Get just the column name from the key
                        String key = entry.getKey();
                        String colName = key
                                .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                "maxvalue." + colName, entry.getValue()));
                    }

                    //set count on all FlowFiles
                    if (maxRowsPerFlowFile > 0) {
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                    }
                }
            }
        } catch (final SQLException e) {
            throw e;
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);

    } catch (final ProcessException | SQLException e) {
        logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
        if (!resultSetFlowFiles.isEmpty()) {
            session.remove(resultSetFlowFiles);
        }
        context.yield();
    } finally {
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded",
                    new Object[] { this, ioe });
        }
    }
}

From source file:org.apache.nifi.processors.hive.SelectHive_1_1QL.java

private void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;/*from  w  ww. j  a  v a 2s .c  o m*/

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final Hive_1_1DBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(Hive_1_1DBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    List<String> preQueries = getQueries(
            context.getProperty(HIVEQL_PRE_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());
    List<String> postQueries = getQueries(
            context.getProperty(HIVEQL_POST_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    String hqlStatement;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        hqlStatement = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, charset)));
        hqlStatement = queryContents.toString();
    }

    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions(fileToProcess)
            .asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions(fileToProcess).asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions(fileToProcess).asInteger()
            : 0;
    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final Statement st = (flowbased ? con.prepareStatement(hqlStatement) : con.createStatement())) {
        Pair<String, SQLException> failure = executeConfigStatements(con, preQueries);
        if (failure != null) {
            // In case of failure, assigning config query to "hqlStatement"  to follow current error handling
            hqlStatement = failure.getLeft();
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }
        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
        try {
            logger.debug("Executing query {}", new Object[] { hqlStatement });
            if (flowbased) {
                // Hive JDBC Doesn't Support this yet:
                // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                // int paramCount = pmd.getParameterCount();

                // Alternate way to determine number of params in SQL.
                int paramCount = StringUtils.countMatches(hqlStatement, "?");

                if (paramCount > 0) {
                    setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                }
            }

            final ResultSet resultSet;

            try {
                resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                        : st.executeQuery(hqlStatement));
            } catch (SQLException se) {
                // If an error occurs during the query, a flowfile is expected to be routed to failure, so ensure one here
                flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
                fileToProcess = null;
                throw se;
            }

            int fragmentIndex = 0;
            String baseFilename = (fileToProcess != null)
                    ? fileToProcess.getAttribute(CoreAttributes.FILENAME.key())
                    : null;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);
                flowfile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                if (baseFilename == null) {
                    baseFilename = flowfile.getAttribute(CoreAttributes.FILENAME.key());
                }
                try {
                    flowfile = session.write(flowfile, out -> {
                        try {
                            if (AVRO.equals(outputFormat)) {
                                nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out,
                                        maxRowsPerFlowFile, convertNamesForAvro));
                            } else if (CSV.equals(outputFormat)) {
                                CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter,
                                        quote, escape, maxRowsPerFlowFile);
                                nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                            } else {
                                nrOfRows.set(0L);
                                throw new ProcessException("Unsupported output format: " + outputFormat);
                            }
                        } catch (final SQLException | RuntimeException e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(flowfile);
                    throw e;
                }

                if (nrOfRows.get() > 0 || resultSetFlowFiles.isEmpty()) {
                    final Map<String, String> attributes = new HashMap<>();
                    // Set attribute for how many rows were selected
                    attributes.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

                    try {
                        // Set input/output table names by parsing the query
                        attributes.putAll(toQueryTableAttributes(findTableNames(hqlStatement)));
                    } catch (Exception e) {
                        // If failed to parse the query, just log a warning message, but continue.
                        getLogger().warn("Failed to parse query: {} due to {}",
                                new Object[] { hqlStatement, e }, e);
                    }

                    // Set MIME type on output document and add extension to filename
                    if (AVRO.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), MIME_TYPE_AVRO_BINARY);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".avro");
                    } else if (CSV.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".csv");
                    }

                    if (maxRowsPerFlowFile > 0) {
                        attributes.put("fragment.identifier", fragmentIdentifier);
                        attributes.put("fragment.index", String.valueOf(fragmentIndex));
                    }

                    flowfile = session.putAllAttributes(flowfile, attributes);

                    logger.info("{} contains {} " + outputFormat + " records; transferring to 'success'",
                            new Object[] { flowfile, nrOfRows.get() });

                    if (context.hasIncomingConnection()) {
                        // If the flow file came from an incoming connection, issue a Fetch provenance event
                        session.getProvenanceReporter().fetch(flowfile, dbcpService.getConnectionURL(),
                                "Retrieved " + nrOfRows.get() + " rows",
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    } else {
                        // If we created a flow file from rows received from Hive, issue a Receive provenance event
                        session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    }
                    resultSetFlowFiles.add(flowfile);
                } else {
                    // If there were no rows returned (and the first flow file has been sent, we're done processing, so remove the flowfile and carry on
                    session.remove(flowfile);
                    if (resultSetFlowFiles != null && resultSetFlowFiles.size() > 0) {
                        flowfile = resultSetFlowFiles.get(resultSetFlowFiles.size() - 1);
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        failure = executeConfigStatements(con, postQueries);
        if (failure != null) {
            hqlStatement = failure.getLeft();
            if (resultSetFlowFiles != null) {
                resultSetFlowFiles.forEach(ff -> session.remove(ff));
            }
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        if (fileToProcess != null) {
            session.remove(fileToProcess);
        }
    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { hqlStatement, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { hqlStatement, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { hqlStatement, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { hqlStatement, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    }
}

From source file:org.apache.nifi.processors.hive.SelectHive3QL.java

private void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;/*w w  w .j  av  a  2 s  .  c o  m*/

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final Hive3DBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(Hive3DBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    List<String> preQueries = getQueries(
            context.getProperty(HIVEQL_PRE_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());
    List<String> postQueries = getQueries(
            context.getProperty(HIVEQL_POST_QUERY).evaluateAttributeExpressions(fileToProcess).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    String hqlStatement;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        hqlStatement = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, charset)));
        hqlStatement = queryContents.toString();
    }

    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions(fileToProcess)
            .asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions(fileToProcess).asInteger();
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions(fileToProcess).asInteger()
            : 0;
    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final Statement st = (flowbased ? con.prepareStatement(hqlStatement) : con.createStatement())) {
        Pair<String, SQLException> failure = executeConfigStatements(con, preQueries);
        if (failure != null) {
            // In case of failure, assigning config query to "hqlStatement"  to follow current error handling
            hqlStatement = failure.getLeft();
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }
        st.setQueryTimeout(
                context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asInteger());

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
        try {
            logger.debug("Executing query {}", new Object[] { hqlStatement });
            if (flowbased) {
                // Hive JDBC Doesn't Support this yet:
                // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                // int paramCount = pmd.getParameterCount();

                // Alternate way to determine number of params in SQL.
                int paramCount = StringUtils.countMatches(hqlStatement, "?");

                if (paramCount > 0) {
                    setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                }
            }

            final ResultSet resultSet;

            try {
                resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                        : st.executeQuery(hqlStatement));
            } catch (SQLException se) {
                // If an error occurs during the query, a flowfile is expected to be routed to failure, so ensure one here
                flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
                fileToProcess = null;
                throw se;
            }

            int fragmentIndex = 0;
            String baseFilename = (fileToProcess != null)
                    ? fileToProcess.getAttribute(CoreAttributes.FILENAME.key())
                    : null;
            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);
                flowfile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                if (baseFilename == null) {
                    baseFilename = flowfile.getAttribute(CoreAttributes.FILENAME.key());
                }
                try {
                    flowfile = session.write(flowfile, out -> {
                        try {
                            if (AVRO.equals(outputFormat)) {
                                nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out,
                                        maxRowsPerFlowFile, convertNamesForAvro));
                            } else if (CSV.equals(outputFormat)) {
                                CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter,
                                        quote, escape, maxRowsPerFlowFile);
                                nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                            } else {
                                nrOfRows.set(0L);
                                throw new ProcessException("Unsupported output format: " + outputFormat);
                            }
                        } catch (final SQLException | RuntimeException e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(flowfile);
                    throw e;
                }

                if (nrOfRows.get() > 0 || resultSetFlowFiles.isEmpty()) {
                    final Map<String, String> attributes = new HashMap<>();
                    // Set attribute for how many rows were selected
                    attributes.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

                    try {
                        // Set input/output table names by parsing the query
                        attributes.putAll(toQueryTableAttributes(findTableNames(hqlStatement)));
                    } catch (Exception e) {
                        // If failed to parse the query, just log a warning message, but continue.
                        getLogger().warn("Failed to parse query: {} due to {}",
                                new Object[] { hqlStatement, e }, e);
                    }

                    // Set MIME type on output document and add extension to filename
                    if (AVRO.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), MIME_TYPE_AVRO_BINARY);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".avro");
                    } else if (CSV.equals(outputFormat)) {
                        attributes.put(CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
                        attributes.put(CoreAttributes.FILENAME.key(),
                                baseFilename + "." + fragmentIndex + ".csv");
                    }

                    if (maxRowsPerFlowFile > 0) {
                        attributes.put("fragment.identifier", fragmentIdentifier);
                        attributes.put("fragment.index", String.valueOf(fragmentIndex));
                    }

                    flowfile = session.putAllAttributes(flowfile, attributes);

                    logger.info("{} contains {} " + outputFormat + " records; transferring to 'success'",
                            new Object[] { flowfile, nrOfRows.get() });

                    if (context.hasIncomingConnection()) {
                        // If the flow file came from an incoming connection, issue a Fetch provenance event
                        session.getProvenanceReporter().fetch(flowfile, dbcpService.getConnectionURL(),
                                "Retrieved " + nrOfRows.get() + " rows",
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    } else {
                        // If we created a flow file from rows received from Hive, issue a Receive provenance event
                        session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    }
                    resultSetFlowFiles.add(flowfile);
                } else {
                    // If there were no rows returned (and the first flow file has been sent, we're done processing, so remove the flowfile and carry on
                    session.remove(flowfile);
                    if (resultSetFlowFiles != null && resultSetFlowFiles.size() > 0) {
                        flowfile = resultSetFlowFiles.get(resultSetFlowFiles.size() - 1);
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }
            }

            for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                // Set count on all FlowFiles
                if (maxRowsPerFlowFile > 0) {
                    resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count",
                            Integer.toString(fragmentIndex)));
                }
            }

        } catch (final SQLException e) {
            throw e;
        }

        failure = executeConfigStatements(con, postQueries);
        if (failure != null) {
            hqlStatement = failure.getLeft();
            if (resultSetFlowFiles != null) {
                resultSetFlowFiles.forEach(ff -> session.remove(ff));
            }
            flowfile = (fileToProcess == null) ? session.create() : fileToProcess;
            fileToProcess = null;
            throw failure.getRight();
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        if (fileToProcess != null) {
            session.remove(fileToProcess);
        }

    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { hqlStatement, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { hqlStatement, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { hqlStatement, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { hqlStatement, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    }
}