Example usage for java.sql Statement setMaxRows

List of usage examples for java.sql Statement setMaxRows

Introduction

In this page you can find the example usage for java.sql Statement setMaxRows.

Prototype

void setMaxRows(int max) throws SQLException;

Source Link

Document

Sets the limit for the maximum number of rows that any ResultSet object generated by this Statement object can contain to the given number.

Usage

From source file:jade.domain.DFDBKB.java

/**
 * Retrieve the DFDs matching the given template
 *//*from   w ww . j  a  v  a2s.  c  o  m*/
protected List searchSingle(Object template, int maxResult) throws SQLException {
    List matchingAIDs = new ArrayList();

    // Get the names of all DFDs matching the template
    String select = null;
    ResultSet rs = null;
    Statement s = null;

    try {
        select = createSelect((DFAgentDescription) template);

        s = getConnectionWrapper().getConnection().createStatement();
        if (maxResult >= 0) {
            s.setMaxRows(maxResult);
            s.setFetchSize(maxResult);
        }
        rs = s.executeQuery(select);

        while (rs.next()) {
            String aidS = rs.getString("aid");
            matchingAIDs.add(aidS);
        }
    } catch (SQLException sqle) {
        // Let it through
        throw sqle;
    } catch (Exception e) {
        logger.log(Logger.SEVERE, "Couldn't create the SQL SELECT statement.", e);
        throw new SQLException("Couldn't create the SQL SELECT statement. " + e.getMessage());
    } finally {
        closeResultSet(rs);
        closeStatement(s);
    }

    // For each matching AID reconstruct the complete DFD
    List dfds = new ArrayList(matchingAIDs.size());
    Iterator it = matchingAIDs.iterator();
    // FIXME: Define a proper constant and possibly a proper configuration option
    if (matchingAIDs.size() < 10) {
        while (it.hasNext()) {
            dfds.add(getDFD((String) it.next()));
        }
    } else {
        // If we found several matching agents we preload protocols languages and ontologies once for all 
        // instead of making several queries one per agent.
        PreparedStatements pss = getPreparedStatements();
        Map allLanguages = preloadIdValueTable(pss.stm_selCountAllLanguages, pss.stm_selAllLanguages);
        Map allOntologies = preloadIdValueTable(pss.stm_selCountAllOntologies, pss.stm_selAllOntologies);
        Map allProtocols = preloadIdValueTable(pss.stm_selCountAllProtocols, pss.stm_selAllProtocols);
        while (it.hasNext()) {
            dfds.add(getDFD((String) it.next(), allLanguages, allOntologies, allProtocols));
        }
    }

    return dfds;
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.JdbcSource.java

@Override
public String produce(String lastSourceOffset, int maxBatchSize, BatchMaker batchMaker) throws StageException {
    int batchSize = Math.min(this.commonSourceConfigBean.maxBatchSize, maxBatchSize);
    String nextSourceOffset = lastSourceOffset == null ? initialOffset : lastSourceOffset;

    long now = System.currentTimeMillis();
    long delay = Math.max(0, (lastQueryCompletedTime + queryIntervalMillis) - now);

    if (delay > 0) {
        // Sleep in one second increments so we don't tie up the app.
        LOG.debug("{}ms remaining until next fetch.", delay);
        ThreadUtil.sleep(Math.min(delay, 1000));
    } else {/*from w w w. j av a2s . c o  m*/
        Statement statement = null;
        Hasher hasher = HF.newHasher();
        try {
            if (null == resultSet || resultSet.isClosed()) {
                // The result set got closed outside of us, so we also clean up the connection (if any)
                closeQuietly(connection);

                connection = dataSource.getConnection();

                if (!txnColumnName.isEmpty()) {
                    // CDC requires scrollable cursors.
                    statement = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
                            ResultSet.CONCUR_READ_ONLY);
                } else {
                    statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY,
                            ResultSet.CONCUR_READ_ONLY);
                }

                int fetchSize = batchSize;
                // MySQL does not support cursors or fetch size except 0 and "streaming" (1 at a time).
                if (hikariConfigBean.getConnectionString().toLowerCase().contains("mysql")) {
                    // Enable MySQL streaming mode.
                    fetchSize = Integer.MIN_VALUE;
                }
                LOG.debug("Using query fetch size: {}", fetchSize);
                statement.setFetchSize(fetchSize);

                if (getContext().isPreview()) {
                    statement.setMaxRows(batchSize);
                }
                preparedQuery = prepareQuery(query, lastSourceOffset);
                LOG.trace("Executing query: " + preparedQuery);
                hashedQuery = hasher.putString(preparedQuery, Charsets.UTF_8).hash().toString();
                LOG.debug("Executing query: " + hashedQuery);
                resultSet = statement.executeQuery(preparedQuery);
                queryRowCount = 0;
                numQueryErrors = 0;
                firstQueryException = null;
            }

            // Read Data and track last offset
            int rowCount = 0;
            String lastTransactionId = "";
            boolean haveNext = true;
            while (continueReading(rowCount, batchSize) && (haveNext = resultSet.next())) {
                final Record record = processRow(resultSet, rowCount);

                if (null != record) {
                    if (!txnColumnName.isEmpty()) {
                        String newTransactionId = resultSet.getString(txnColumnName);
                        if (lastTransactionId.isEmpty()) {
                            lastTransactionId = newTransactionId;
                            batchMaker.addRecord(record);
                        } else if (lastTransactionId.equals(newTransactionId)) {
                            batchMaker.addRecord(record);
                        } else {
                            // The Transaction ID Column Name config should not be used with MySQL as it
                            // does not provide a change log table and the JDBC driver may not support scrollable cursors.
                            resultSet.relative(-1);
                            break; // Complete this batch without including the new record.
                        }
                    } else {
                        batchMaker.addRecord(record);
                    }
                }

                // Get the offset column value for this record
                if (isIncrementalMode) {
                    nextSourceOffset = resultSet.getString(offsetColumn);
                } else {
                    nextSourceOffset = initialOffset;
                }
                ++rowCount;
                ++queryRowCount;
                ++noMoreDataRecordCount;
                shouldFire = true;
            }
            LOG.debug("Processed rows: " + rowCount);

            if (!haveNext || rowCount == 0) {
                // We didn't have any data left in the cursor. Close everything
                // We may not have the statement here if we're not producing the
                // same batch as when we got it, so get it from the result set
                // Get it before we close the result set, just to be safe!
                statement = resultSet.getStatement();
                closeQuietly(resultSet);
                closeQuietly(statement);
                closeQuietly(connection);
                lastQueryCompletedTime = System.currentTimeMillis();
                LOG.debug("Query completed at: {}", lastQueryCompletedTime);
                QUERY_SUCCESS.create(getContext()).with(QUERY, preparedQuery)
                        .with(TIMESTAMP, lastQueryCompletedTime).with(ROW_COUNT, queryRowCount)
                        .with(SOURCE_OFFSET, nextSourceOffset).createAndSend();

                // In case of non-incremental mode, we need to generate no-more-data event as soon as we hit end of the
                // result set. Incremental mode will try to run the query again and generate the event if and only if
                // the next query results in zero rows.
                if (!isIncrementalMode) {
                    generateNoMoreDataEvent();
                }
            }

            /*
             * We want to generate no-more data event on next batch if:
             * 1) We run a query in this batch and returned empty.
             * 2) We consumed at least some data since last time (to not generate the event all the time)
             */

            if (isIncrementalMode && rowCount == 0 && !haveNext && shouldFire && !firstTime) {
                generateNoMoreDataEvent();
                shouldFire = false;
            }
            firstTime = false;

        } catch (SQLException e) {
            if (++numQueryErrors == 1) {
                firstQueryException = e;
            }
            String formattedError = jdbcUtil.formatSqlException(e);
            LOG.error(formattedError, e);
            if (resultSet != null) {
                try {
                    statement = resultSet.getStatement();
                } catch (SQLException e1) {
                    LOG.debug("Error while getting statement from result set: {}", e1.toString(), e1);
                }
                closeQuietly(resultSet);
                closeQuietly(statement);
            }
            closeQuietly(connection);
            lastQueryCompletedTime = System.currentTimeMillis();
            QUERY_FAILURE.create(getContext()).with(QUERY, preparedQuery)
                    .with(TIMESTAMP, lastQueryCompletedTime).with(ERROR, formattedError)
                    .with(ROW_COUNT, queryRowCount).with(SOURCE_OFFSET, nextSourceOffset).createAndSend();
            LOG.debug("Query '{}' failed at: {}; {} errors so far", preparedQuery, lastQueryCompletedTime,
                    numQueryErrors);
            if (numQueryErrors > commonSourceConfigBean.numSQLErrorRetries) {
                throw new StageException(JdbcErrors.JDBC_77, e.getClass().getSimpleName(), preparedQuery,
                        numQueryErrors, jdbcUtil.formatSqlException(firstQueryException));
            } // else allow nextSourceOffset to be returned, to retry
        }
    }
    return nextSourceOffset;
}

From source file:com.hangum.tadpole.rdb.core.editors.main.composite.ResultSetComposite.java

/**
 *  ./*from   w  w w .  ja  v a  2  s .co  m*/
 * 
 * @param reqQuery
 * @param queryTimeOut
 * @param strUserEmail
 * @param intSelectLimitCnt
 * @param intStartCnt
 * @param strNullValue 
 * @return
 * @throws Exception
 */
public QueryExecuteResultDTO runSelect(final RequestQuery reqQuery, final int queryTimeOut,
        final String strUserEmail, final int intSelectLimitCnt, final int intStartCnt) throws Exception {
    String strSQL = reqQuery.getSql();
    if (!PermissionChecker.isExecute(getDbUserRoleType(), getUserDB(), strSQL)) {
        throw new Exception(Messages.get().MainEditor_21);
    }
    if (logger.isDebugEnabled())
        logger.debug("==> real execute query : " + strSQL);

    tadpole_system_message = "";
    QueryExecuteResultDTO queryResultDAO = null;

    //  ??   ???  .
    IMainEditorExtension[] extensions = getRdbResultComposite().getMainEditor().getMainEditorExtions();
    if (extensions != null) {
        for (IMainEditorExtension iMainEditorExtension : extensions) {
            String strCostumSQL = iMainEditorExtension.sqlCostume(strSQL);
            if (!strCostumSQL.equals(strSQL)) {
                if (logger.isDebugEnabled())
                    logger.debug("** extension costume sql is : " + strCostumSQL); //$NON-NLS-1$
                strSQL = strCostumSQL;
            }
        }
    }

    //  ??   ???  .
    ResultSet resultSet = null;
    java.sql.Connection javaConn = null;
    Statement statement = null;
    PreparedStatement preparedStatement = null;

    try {
        if (DBGroupDefine.TAJO_GROUP == getUserDB().getDBGroup()) {
            javaConn = ConnectionPoolManager.getDataSource(getUserDB()).getConnection();
        } else {
            if (reqQuery.isAutoCommit()) {
                SqlMapClient client = TadpoleSQLManager.getInstance(getUserDB());
                javaConn = client.getDataSource().getConnection();
            } else {
                javaConn = TadpoleSQLTransactionManager.getInstance(strUserEmail, getUserDB());
            }
        }
        if (javaConn == null) {
            throw new Exception("Cann't create session. Please check system.");
        }

        // if statement type is prepared statement?
        if (reqQuery.getSqlStatementType() == SQL_STATEMENT_TYPE.NONE) {
            statement = javaConn.createStatement();

            statement.setFetchSize(intSelectLimitCnt);
            if (DBGroupDefine.HIVE_GROUP != getUserDB().getDBGroup()) {
                statement.setQueryTimeout(queryTimeOut);
                statement.setMaxRows(intSelectLimitCnt);
            }

            // check stop thread
            esCheckStop = Executors.newSingleThreadExecutor();
            CheckStopThread cst = new CheckStopThread(statement);
            cst.setName("TDB Query Stop checker"); //$NON-NLS-1$
            esCheckStop.execute(cst);

            // execute query
            execServiceQuery = Executors.newSingleThreadExecutor();
            if (intStartCnt == 0) {
                resultSet = _runSQLSelect(statement, strSQL);
            } else {
                strSQL = PartQueryUtil.makeSelect(getUserDB(), strSQL, intStartCnt, intSelectLimitCnt);

                if (logger.isDebugEnabled())
                    logger.debug("part sql called : " + strSQL);
                resultSet = _runSQLSelect(statement, strSQL);
            }

        } else if (reqQuery.getSqlStatementType() == SQL_STATEMENT_TYPE.PREPARED_STATEMENT) {
            preparedStatement = javaConn.prepareStatement(strSQL);

            preparedStatement.setFetchSize(intSelectLimitCnt);
            if (DBGroupDefine.HIVE_GROUP != getUserDB().getDBGroup()) {
                preparedStatement.setQueryTimeout(queryTimeOut);
                preparedStatement.setMaxRows(intSelectLimitCnt);
            }

            // check stop thread
            esCheckStop = Executors.newSingleThreadExecutor();
            CheckStopThread cst = new CheckStopThread(preparedStatement);
            cst.setName("TDB Query Stop checker"); //$NON-NLS-1$
            esCheckStop.execute(cst);

            // execute query
            execServiceQuery = Executors.newSingleThreadExecutor();
            if (intStartCnt == 0) {
                resultSet = _runSQLSelect(preparedStatement, reqQuery.getStatementParameter());
            } else {
                strSQL = PartQueryUtil.makeSelect(getUserDB(), strSQL, intStartCnt, intSelectLimitCnt);

                if (logger.isDebugEnabled())
                    logger.debug("part sql called : " + strSQL);
                resultSet = _runSQLSelect(preparedStatement, reqQuery.getStatementParameter());
            }
        }

        queryResultDAO = new QueryExecuteResultDTO(getUserDB(), reqQuery.getSql(), true, resultSet,
                intSelectLimitCnt, intStartCnt);
        if (resultSet == null) {
            if (StringUtils.isEmpty(StringUtils.deleteWhitespace(tadpole_system_message))) {
                tadpole_system_message = CMD_COMPLETE_MSG;
            }

        }
        queryResultDAO.setQueryMsg(tadpole_system_message);

    } catch (Exception e) {
        throw e;
    } finally {
        isCheckRunning = false;
        try {
            if (preparedStatement != null)
                preparedStatement.close();
        } catch (Exception e) {
        }
        try {
            if (statement != null)
                statement.close();
        } catch (Exception e) {
        }
        try {
            if (resultSet != null)
                resultSet.close();
        } catch (Exception e) {
        }

        if (reqQuery.isAutoCommit()) {
            try {
                if (javaConn != null)
                    javaConn.close();
            } catch (Exception e) {
            }
        }
    }

    return queryResultDAO;
}

From source file:helma.objectmodel.db.NodeManager.java

/**
 *  Loades subnodes via subnode relation. This is similar to getNodeIDs, but it
 *  actually loades all nodes in one go, which is better for small node collections.
 *  This method is used when xxx.loadmode=aggressive is specified.
 *//*from   ww w .ja  v  a2  s  . c  o  m*/
public List getNodes(Node home, Relation rel) throws Exception {
    // This does not apply for groupby nodes - use getNodeIDs instead
    assert rel.groupby == null;

    if ((rel == null) || (rel.otherType == null) || !rel.otherType.isRelational()) {
        // this should never be called for embedded nodes
        throw new RuntimeException("getNodes called for non-relational node " + home);
    }

    List retval = new ArrayList();
    DbMapping dbm = rel.otherType;

    Connection con = dbm.getConnection();
    // set connection to read-only mode
    if (!con.isReadOnly())
        con.setReadOnly(true);

    Statement stmt = con.createStatement();
    DbColumn[] columns = dbm.getColumns();
    Relation[] joins = dbm.getJoins();
    String query = null;
    long logTimeStart = logSql ? System.currentTimeMillis() : 0;

    try {
        StringBuffer b = dbm.getSelect(rel);

        if (home.getSubnodeRelation() != null) {
            b.append(home.getSubnodeRelation());
        } else {
            // let relation object build the query
            rel.buildQuery(b, home, true, false);
        }

        query = b.toString();

        if (rel.maxSize > 0) {
            stmt.setMaxRows(rel.maxSize);
        }

        ResultSet rs = stmt.executeQuery(query);

        while (rs.next()) {
            // create new Nodes.
            Node node = createNode(rel.otherType, rs, columns, 0);
            if (node == null) {
                continue;
            }
            Key primKey = node.getKey();

            retval.add(new NodeHandle(primKey));

            registerNewNode(node, null);

            fetchJoinedNodes(rs, joins, columns.length);
        }

    } finally {
        if (logSql) {
            long logTimeStop = System.currentTimeMillis();
            logSqlStatement("SQL SELECT_ALL", dbm.getTableName(), logTimeStart, logTimeStop, query);
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (Exception ignore) {
            }
        }
    }

    return retval;
}

From source file:helma.objectmodel.db.NodeManager.java

/**
 *  Loades subnodes via subnode relation. Only the ID index is loaded, the nodes are
 *  loaded later on demand./*  w  w  w .j av  a  2  s .c  o  m*/
 */
public List getNodeIDs(Node home, Relation rel) throws Exception {
    DbMapping type = rel == null ? null : rel.otherType;
    if (type == null || !type.isRelational()) {
        // this should never be called for embedded nodes
        throw new RuntimeException("getNodeIDs called for non-relational node " + home);
    }
    List retval = new ArrayList();

    // if we do a groupby query (creating an intermediate layer of groupby nodes),
    // retrieve the value of that field instead of the primary key
    Connection con = type.getConnection();
    // set connection to read-only mode
    if (!con.isReadOnly())
        con.setReadOnly(true);

    Statement stmt = null;
    long logTimeStart = logSql ? System.currentTimeMillis() : 0;
    String query = null;

    try {
        StringBuffer b = rel.getIdSelect();

        if (home.getSubnodeRelation() != null) {
            // subnode relation was explicitly set
            query = b.append(" ").append(home.getSubnodeRelation()).toString();
        } else {
            // let relation object build the query
            rel.buildQuery(b, home, true, false);
            query = b.toString();
        }

        stmt = con.createStatement();

        if (rel.maxSize > 0) {
            stmt.setMaxRows(rel.maxSize);
        }

        ResultSet result = stmt.executeQuery(query);

        // problem: how do we derive a SyntheticKey from a not-yet-persistent Node?
        Key k = (rel.groupby != null) ? home.getKey() : null;

        while (result.next()) {
            String kstr = result.getString(1);

            // jump over null values - this can happen especially when the selected
            // column is a group-by column.
            if (kstr == null) {
                continue;
            }

            // make the proper key for the object, either a generic DB key or a groupby key
            Key key = (rel.groupby == null) ? (Key) new DbKey(rel.otherType, kstr)
                    : (Key) new SyntheticKey(k, kstr);
            retval.add(new NodeHandle(key));

            // if these are groupby nodes, evict nullNode keys
            if (rel.groupby != null) {
                Node n = (Node) cache.get(key);

                if ((n != null) && n.isNullNode()) {
                    evictKey(key);
                }
            }
        }
    } finally {
        if (logSql) {
            long logTimeStop = System.currentTimeMillis();
            logSqlStatement("SQL SELECT_IDS", type.getTableName(), logTimeStart, logTimeStop, query);
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (Exception ignore) {
            }
        }
    }

    return retval;
}

From source file:com.baifendian.swordfish.execserver.engine.hive.HiveSqlExec.java

/**
 *  sql ? ?, ?,  execute, ?/*from w  w w .j  ava  2  s .  c om*/
 *
 * @param createFuncs ?
 * @param sqls  sql
 * @param isContinue ?, ???
 * @param resultCallback , ?
 * @param queryLimit ?
 * @param remainTime ?, 
 */
public boolean execute(List<String> createFuncs, List<String> sqls, boolean isContinue,
        ResultCallback resultCallback, Integer queryLimit, int remainTime) {

    // ?
    if (remainTime <= 0) {
        return false;
    }

    // ?
    queryLimit = (queryLimit != null) ? queryLimit : defaultQueryLimit;

    HiveConnection hiveConnection = null;
    Statement sta = null;
    Thread logThread = null;

    //  hive ?
    HiveService2ConnectionInfo hiveService2ConnectionInfo = hiveUtil.getHiveService2ConnectionInfo(userName);

    logger.info("execution connection information:{}", hiveService2ConnectionInfo);

    HiveService2Client hiveService2Client = hiveUtil.getHiveService2Client();

    try {
        try {
            hiveConnection = hiveService2Client.borrowClient(hiveService2ConnectionInfo);

            sta = hiveConnection.createStatement();
            //        sta.setQueryTimeout(remainTime);

            // 
            logThread = new Thread(new JdbcLogRunnable(sta));
            logThread.setDaemon(true);
            logThread.start();

            // set queue
            if (queueSQL != null) {
                logger.info("hive queue : {}", queueSQL);
                sta.execute(queueSQL);
            }

            //  function
            if (createFuncs != null) {
                for (String createFunc : createFuncs) {
                    logger.info("hive create function sql: {}", createFunc);
                    sta.execute(createFunc);
                }
            }
        } catch (Exception e) {
            logger.error("execute query exception", e);

            // , , ?
            handlerResults(0, sqls, FlowStatus.FAILED, resultCallback);

            return false;
        }

        //  sql ?
        for (int index = 0; index < sqls.size(); ++index) {
            String sql = sqls.get(index);

            Date startTime = new Date();

            logger.info("hive execute sql: {}", sql);

            ExecResult execResult = new ExecResult();
            execResult.setIndex(index);
            execResult.setStm(sql);

            try {
                // ? query  show ?
                if (HiveUtil.isTokQuery(sql) || HiveUtil.isLikeShowStm(sql)) {
                    sta.setMaxRows(queryLimit);
                    ResultSet res = sta.executeQuery(sql);

                    ResultSetMetaData resultSetMetaData = res.getMetaData();
                    int count = resultSetMetaData.getColumnCount();

                    List<String> colums = new ArrayList<>();
                    for (int i = 1; i <= count; i++) {
                        colums.add(resultSetMetaData.getColumnLabel(
                                i)/*parseColumnName(resultSetMetaData.getColumnLabel(i), colums)*/);
                    }

                    execResult.setTitles(colums);

                    List<List<String>> datas = new ArrayList<>();

                    //  1,  query ?
                    if (count > 1 || HiveUtil.isTokQuery(sql)) {
                        while (res.next()) {
                            List<String> values = new ArrayList<>();
                            for (int i = 1; i <= count; ++i) {
                                values.add(res.getString(i));
                            }

                            datas.add(values);
                        }
                    } else {
                        StringBuffer buffer = new StringBuffer();

                        while (res.next()) {
                            buffer.append(res.getString(1));
                            buffer.append("\n");
                        }

                        List<String> values = new ArrayList<>();
                        values.add(buffer.toString().trim());

                        datas.add(values);
                    }

                    execResult.setValues(datas);
                } else {
                    sta.execute(sql);
                }

                // ??
                execResult.setStatus(FlowStatus.SUCCESS);

                // ?
                if (resultCallback != null) {
                    Date endTime = new Date();
                    resultCallback.handleResult(execResult, startTime, endTime);
                }
            } catch (SQLTimeoutException e) {
                // sql 
                logger.error("executeQuery timeout exception", e);

                handlerResults(index, sqls, FlowStatus.FAILED, resultCallback);
                return false;
            } catch (DaoSemanticException | HiveSQLException e) {
                // 
                logger.error("executeQuery exception", e);

                if (isContinue) {
                    handlerResult(index, sql, FlowStatus.FAILED, resultCallback);
                } else {
                    handlerResults(index, sqls, FlowStatus.FAILED, resultCallback);
                    return false;
                }
            } catch (Exception e) {
                // TTransport 
                if (e.toString().contains("TTransportException")) {
                    logger.error("Get TTransportException return a client", e);
                    // ???
                    //            hiveService2Client.invalidateObject(hiveService2ConnectionInfo, hiveConnection);
                    handlerResults(index, sqls, FlowStatus.FAILED, resultCallback);
                    return false;
                }

                // socket 
                if (e.toString().contains("SocketException")) {
                    logger.error("SocketException clear pool", e);
                    hiveService2Client.clear();
                    handlerResults(index, sqls, FlowStatus.FAILED, resultCallback);
                    return false;
                }

                logger.error("executeQuery exception", e);

                if (isContinue) {
                    handlerResult(index, sql, FlowStatus.FAILED, resultCallback);
                } else {
                    handlerResults(index, sqls, FlowStatus.FAILED, resultCallback);
                    return false;
                }
            }
        }
    } finally {
        // 
        try {
            if (sta != null) {
                sta.close();
            }
        } catch (Exception e) {
            logger.error("Catch an exception", e);
        }

        try {
            // 
            if (hiveConnection != null) {
                // 
                hiveConnection.close();

                // , ??
                hiveService2Client.returnClient(hiveService2ConnectionInfo, hiveConnection);
            }
        } catch (Exception e) {
            logger.error("Catch an exception", e);
        }

        // 
        try {
            if (logThread != null) {
                logThread.interrupt();
                logThread.join(HiveUtil.DEFAULT_QUERY_PROGRESS_THREAD_TIMEOUT);
            }
        } catch (Exception e) {
            //        logger.error("Catch an exception", e);
        }
    }

    return true;
}

From source file:axiom.objectmodel.db.NodeManager.java

/**
 *  Loades subnodes via subnode relation. This is similar to getNodeIDs, but it
 *  actually loades all nodes in one go, which is better for small node collections.
 *  This method is used when xxx.loadmode=aggressive is specified.
 *///from   w w  w .j ava  2 s . c o  m
public Collection<NodeHandle> getNodes(Node home, Relation rel) throws Exception {
    // This does not apply for groupby nodes - use getNodeIDs instead
    if (rel.groupby != null) {
        return getNodeIDs(home, rel);
    }

    // Transactor tx = (Transactor) Thread.currentThread ();
    // tx.timer.beginEvent ("getNodes "+home);
    if ((rel == null) || (rel.otherType == null) || !rel.otherType.isRelational()) {
        // this should never be called for embedded nodes
        throw new RuntimeException("NodeMgr.getNodes called for non-relational node " + home);
    } else {
        Collection<NodeHandle> retval = home.createSubnodeList();
        DbMapping dbm = rel.otherType;

        Connection con = dbm.getConnection();
        // set connection to read-only mode
        if (!con.isReadOnly())
            con.setReadOnly(true);

        Statement stmt = con.createStatement();
        DbColumn[] columns = dbm.getColumns();
        Relation[] joins = dbm.getJoins();
        String query = null;
        long logTimeStart = logSql ? System.currentTimeMillis() : 0;

        try {
            StringBuffer b = dbm.getSelect(rel);

            if (home.getSubnodeRelation() != null) {
                b.append(home.getSubnodeRelation());
            } else {
                // let relation object build the query
                b.append(rel.buildQuery(home, home.getNonVirtualParent(), null, " WHERE ", true));
            }

            query = b.toString();

            if (rel.maxSize > 0) {
                stmt.setMaxRows(rel.maxSize);
            }

            ResultSet rs = stmt.executeQuery(query);

            while (rs.next()) {
                // create new Nodes.
                Node node = createNode(rel.otherType, rs, columns, 0);
                if (node == null) {
                    continue;
                }
                Key primKey = node.getKey();

                if (retval instanceof SubnodeList) {
                    ((SubnodeList) retval).addSorted(new NodeHandle(primKey));
                } else {
                    retval.add(new NodeHandle(primKey));
                }

                // do we need to synchronize on primKey here?
                synchronized (cache) {
                    Node oldnode = (Node) cache.put(primKey, node);

                    if ((oldnode != null) && (oldnode.getState() != INode.INVALID)) {
                        cache.put(primKey, oldnode);
                    }
                }

                fetchJoinedNodes(rs, joins, columns.length);
            }

        } finally {
            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL SELECT_ALL", dbm.getTableName(), logTimeStart, logTimeStop, query);
            }
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                    app.logEvent(ignore.getMessage());
                }
            }
        }

        return retval;
    }
}

From source file:axiom.objectmodel.db.NodeManager.java

/**
 *  Loades subnodes via subnode relation. Only the ID index is loaded, the nodes are
 *  loaded later on demand./*from ww w  .  j a  v a2s.co m*/
 */
public Collection<NodeHandle> getNodeIDs(Node home, Relation rel) throws Exception {
    // Transactor tx = (Transactor) Thread.currentThread ();
    // tx.timer.beginEvent ("getNodeIDs "+home);

    if ((rel == null) || (rel.otherType == null) || !rel.otherType.isRelational()) {
        // this should never be called for embedded nodes
        throw new RuntimeException("NodeMgr.getNodeIDs called for non-relational node " + home);
    } else {
        Collection<NodeHandle> retval = home.createSubnodeList();

        // if we do a groupby query (creating an intermediate layer of groupby nodes),
        // retrieve the value of that field instead of the primary key
        String idfield = (rel.groupby == null) ? rel.otherType.getIDField() : rel.groupby;
        Connection con = rel.otherType.getConnection();
        // set connection to read-only mode
        if (!con.isReadOnly())
            con.setReadOnly(true);

        String table = rel.otherType.getTableName();

        Statement stmt = null;
        long logTimeStart = logSql ? System.currentTimeMillis() : 0;
        String query = null;

        try {
            StringBuffer b = new StringBuffer("SELECT ");

            if (rel.queryHints != null) {
                b.append(rel.queryHints).append(" ");
            }

            /*if (idfield.indexOf('(') == -1 && idfield.indexOf('.') == -1) {
            b.append(table).append('.');
            }*/
            b/*.append(rel.otherType.getTableName(0)).append(".")*/.append(idfield).append(" FROM ")
                    .append(table);

            rel.appendAdditionalTables(b);

            if (home.getSubnodeRelation() != null) {
                // subnode relation was explicitly set
                query = b.append(" ").append(home.getSubnodeRelation()).toString();
            } else {
                // let relation object build the query
                query = b.append(rel.buildQuery(home, home.getNonVirtualParent(), null, " WHERE ", true))
                        .toString();
            }

            stmt = con.createStatement();
            int primary = 1;
            if (query.indexOf("WHERE") > -1) {
                primary = 0;
            }
            query += rel.otherType.getTableJoinClause(primary);

            if (rel.maxSize > 0) {
                stmt.setMaxRows(rel.maxSize);
            }

            ResultSet result = stmt.executeQuery(query);

            // problem: how do we derive a SyntheticKey from a not-yet-persistent Node?
            Key k = (rel.groupby != null) ? home.getKey() : null;

            while (result.next()) {
                String kstr = result.getString(1);

                // jump over null values - this can happen especially when the selected
                // column is a group-by column.
                if (kstr == null) {
                    continue;
                }

                // make the proper key for the object, either a generic DB key or a groupby key
                Key key = (rel.groupby == null)
                        ? (Key) new DbKey(rel.otherType, kstr, this.app.getCurrentRequestEvaluator().getLayer())
                        : (Key) new SyntheticKey(k, kstr);

                if (retval instanceof SubnodeList) {
                    ((SubnodeList) retval).addSorted(new NodeHandle(key));
                } else {
                    retval.add(new NodeHandle(key));
                }

                // if these are groupby nodes, evict nullNode keys
                if (rel.groupby != null) {
                    Node n = this.getNodeFromCache(key);

                    if ((n != null) && n.isNullNode()) {
                        evictKey(key);
                    }
                }
            }
        } finally {
            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL SELECT_IDS", table, logTimeStart, logTimeStop, query);
            }
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                }
            }
        }

        return retval;
    }
}

From source file:axiom.objectmodel.db.NodeManager.java

/**
 * Update a UpdateableSubnodeList retrieving all values having
 * higher Values according to the updateCriteria's set for this Collection's Relation
 * The returned Map-Object has two Properties:
 * addedNodes = an Integer representing the number of Nodes added to this collection
 * newNodes = an Integer representing the number of Records returned by the Select-Statement
 * These two values may be different if a max-size is defined for this Collection and a new
 * node would be outside of this Border because of the ordering of this collection.
 * @param home the home of this subnode-list
 * @param rel the relation the home-node has to the nodes contained inside the subnodelist
 * @return A map having two properties of type String (newNodes (number of nodes retreived by the select-statment), addedNodes (nodes added to the collection))
 * @throws Exception//from  ww  w . jav  a2s .c o  m
 */
public int updateSubnodeList(Node home, Relation rel) throws Exception {
    if ((rel == null) || (rel.otherType == null) || !rel.otherType.isRelational()) {
        // this should never be called for embedded nodes
        throw new RuntimeException("NodeMgr.updateSubnodeList called for non-relational node " + home);
    } else {

        Collection<NodeHandle> list = home.getSubnodeList();
        if (list == null)
            list = home.createSubnodeList();

        if (!(list instanceof UpdateableSubnodeList))
            throw new RuntimeException(
                    "unable to update SubnodeList not marked as updateable (" + rel.propName + ")");

        UpdateableSubnodeList sublist = (UpdateableSubnodeList) list;

        // FIXME: grouped subnodes aren't supported yet
        if (rel.groupby != null)
            throw new RuntimeException("update not yet supported on grouped collections");

        String idfield = rel.otherType.getIDField();
        Connection con = rel.otherType.getConnection();
        String table = rel.otherType.getTableName();

        Statement stmt = null;

        try {
            String q = null;

            StringBuffer b = new StringBuffer();
            if (rel.loadAggressively()) {
                b.append(rel.otherType.getSelect(rel));
            } else {
                b.append("SELECT ");
                if (rel.queryHints != null) {
                    b.append(rel.queryHints).append(" ");
                }
                b/*.append(table).append('.')*/
                        .append(idfield).append(" FROM ").append(table);

                rel.appendAdditionalTables(b);
            }
            String updateCriteria = sublist.getUpdateCriteria();
            if (home.getSubnodeRelation() != null) {
                if (updateCriteria != null) {
                    b.append(" WHERE ");
                    b.append(sublist.getUpdateCriteria());
                    b.append(" AND ");
                    b.append(home.getSubnodeRelation());
                } else {
                    b.append(" WHERE ");
                    b.append(home.getSubnodeRelation());
                }
            } else {
                if (updateCriteria != null) {
                    b.append(" WHERE ");
                    b.append(updateCriteria);
                    b.append(rel.buildQuery(home, home.getNonVirtualParent(), null, " AND ", true));
                } else {
                    b.append(rel.buildQuery(home, home.getNonVirtualParent(), null, " WHERE ", true));
                }
                q = b.toString();
            }

            long logTimeStart = logSql ? System.currentTimeMillis() : 0;

            stmt = con.createStatement();

            if (rel.maxSize > 0) {
                stmt.setMaxRows(rel.maxSize);
            }

            ResultSet result = stmt.executeQuery(q);

            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL SELECT_UPDATE_SUBNODE_LIST", table, logTimeStart, logTimeStop, q);
            }

            // problem: how do we derive a SyntheticKey from a not-yet-persistent Node?
            // Key k = (rel.groupby != null) ? home.getKey() : null;
            // int cntr = 0;

            DbColumn[] columns = rel.loadAggressively() ? rel.otherType.getColumns() : null;
            List newNodes = new ArrayList(rel.maxSize);
            while (result.next()) {
                String kstr = result.getString(1);

                // jump over null values - this can happen especially when the selected
                // column is a group-by column.
                if (kstr == null) {
                    continue;
                }

                // make the proper key for the object, either a generic DB key or a groupby key
                Key key;
                if (rel.loadAggressively()) {
                    Node node = createNode(rel.otherType, result, columns, 0);
                    if (node == null) {
                        continue;
                    }
                    key = node.getKey();
                    synchronized (cache) {
                        Node oldnode = (Node) cache.put(key, node);
                        if ((oldnode != null) && (oldnode.getState() != INode.INVALID)) {
                            cache.put(key, oldnode);
                        }
                    }
                } else {
                    key = new DbKey(rel.otherType, kstr, this.app.getCurrentRequestEvaluator().getLayer());
                }
                newNodes.add(new NodeHandle(key));

                // if these are groupby nodes, evict nullNode keys
                if (rel.groupby != null) {
                    Node n = this.getNodeFromCache(key);

                    if ((n != null) && n.isNullNode()) {
                        evictKey(key);
                    }
                }
            }
            // System.err.println("GOT NEW NODES: " + newNodes);
            if (!newNodes.isEmpty())
                sublist.addAll(newNodes);
            return newNodes.size();
        } finally {
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                    app.logEvent(ignore.getMessage());
                }
            }
        }
    }
}

From source file:org.apache.calcite.avatica.jdbc.JdbcMeta.java

public ExecuteResult prepareAndExecute(StatementHandle h, String sql, long maxRowCount,
        PrepareCallback callback) {/*w  w  w  . j  av  a  2 s  . co  m*/
    try {
        final StatementInfo info = statementCache.getIfPresent(h.id);
        if (info == null) {
            throw new RuntimeException("Statement not found, potentially expired. " + h);
        }
        final Statement statement = info.statement;
        // Special handling of maxRowCount as JDBC 0 is unlimited, our meta 0 row
        if (maxRowCount > 0) {
            AvaticaUtils.setLargeMaxRows(statement, maxRowCount);
        } else if (maxRowCount < 0) {
            statement.setMaxRows(0);
        }
        boolean ret = statement.execute(sql);
        info.resultSet = statement.getResultSet();
        assert ret || info.resultSet == null;
        final List<MetaResultSet> resultSets = new ArrayList<>();
        if (info.resultSet == null) {
            // Create a special result set that just carries update count
            resultSets.add(
                    MetaResultSet.count(h.connectionId, h.id, AvaticaUtils.getLargeUpdateCount(statement)));
        } else {
            resultSets.add(JdbcResultSet.create(h.connectionId, h.id, info.resultSet, maxRowCount));
        }
        if (LOG.isTraceEnabled()) {
            LOG.trace("prepAndExec statement " + h);
        }
        // TODO: review client to ensure statementId is updated when appropriate
        return new ExecuteResult(resultSets);
    } catch (SQLException e) {
        throw propagate(e);
    }
}