Example usage for java.sql PreparedStatement setFetchSize

List of usage examples for java.sql PreparedStatement setFetchSize

Introduction

In this page you can find the example usage for java.sql PreparedStatement setFetchSize.

Prototype

void setFetchSize(int rows) throws SQLException;

Source Link

Document

Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for ResultSet objects generated by this Statement.

Usage

From source file:edu.mayo.informatics.lexgrid.convert.directConversions.MetaThesaurusToSQL.java

/**
 * Carry out the mapping of relationships from UMLS RRF-based format to
 * LexGrid model.// w  w w. ja v  a  2 s  . c  o m
 * 
 * @param assoc
 * @param codingSchemeName
 * @return The number of association instances added to the LexGrid
 *         repository.
 * @throws SQLException
 */
protected int loadRelationsHelper(Association assoc, String codingSchemeName) throws SQLException {
    // Statement to get count of items to process,
    // qualified by SAB if association is SAB-specific ...
    StringBuffer sb;
    sb = new StringBuffer(256).append("SELECT COUNT(*) AS cnt FROM MRREL WHERE ").append(assoc.rrfField)
            .append(" = ?");
    if (StringUtils.isNotBlank(assoc.rrfSAB))
        sb.append(" AND SAB = ?");
    PreparedStatement getRelationsCount = umlsConnection2_.prepareStatement(sb.toString());

    // Statement to get items to process,
    // qualified by SAB if association is SAB-specific ...
    // Note: ordered to enable a cache to not load duplicates.
    sb = new StringBuffer(256).append("SELECT CUI1, CUI2, AUI1, AUI2, SAB, RG FROM MRREL WHERE ")
            .append(assoc.rrfField).append(" = ?");
    if (StringUtils.isNotBlank(assoc.rrfSAB))
        sb.append(" AND SAB = ?");
    sb.append(" ORDER BY CUI1 {LIMIT}");
    PreparedStatement getRelations = umlsConnection2_
            .prepareStatement(umlsSqlModifier_.modifySQL(sb.toString()));

    int count = 0;
    try {
        // Retrieve and print the count for progress monitoring ...
        int start = 0;
        int total = 0;
        getRelationsCount.setString(1, assoc.rrfName);
        if (StringUtils.isNotBlank(assoc.rrfSAB))
            getRelationsCount.setString(2, assoc.rrfSAB);

        messages_.info("Counting relations for " + assoc.rrfName);
        ResultSet results = getRelationsCount.executeQuery();
        results.next();
        total = results.getInt("cnt");
        results.close();

        // Process matching entries ...
        while (start < total) {
            int param = 0;
            getRelations.setString(++param, assoc.rrfName);
            if (StringUtils.isNotBlank(assoc.rrfSAB))
                getRelations.setString(++param, assoc.rrfSAB);

            // MySQL doesn't stream results - the {LIMIT above and this is
            // for getting limits on MySQL code}
            if (umlsSqlModifier_.getDatabaseType().equals("MySQL")) {
                getRelations.setInt(++param, start);
                getRelations.setInt(++param, batchSize);
                start += batchSize;
            }
            // PostgreSQL properly streams results, we can just set the
            // fetch size, and only loop once
            else if (umlsSqlModifier_.getDatabaseType().equals("PostgreSQL")) {
                getRelations.setFetchSize(batchSize);
                umlsConnection2_.setAutoCommit(false);
                start = total;
            } else if (umlsSqlModifier_.getDatabaseType().equals("ACCESS")) {
                //Don't set the fetch size for MS Access
                start = total;
            } else {
                getRelations.setFetchSize(batchSize);
                start = total;
            }

            messages_.info("Getting a batch of relations for " + assoc.rrfName);
            results = getRelations.executeQuery();

            while (results.next()) {
                // If the source association definition is reversed, we need
                // to flip
                // the source and target concepts accordingly.
                boolean isReversed = assoc.rrfSourceDirectionalityReversed;
                String sourceCode = results.getString(isReversed ? "CUI2" : "CUI1");
                String targetCode = results.getString(isReversed ? "CUI1" : "CUI2");
                String sab = results.getString("SAB");

                String aui1 = results.getString("AUI1");
                String aui2 = results.getString("AUI2");
                String roleGroup = results.getString("RG");

                if (sourceCode.equals(targetCode)) {
                    try {
                        insertIntoConceptPropertyLinks(codingSchemeName, SQLTableConstants.ENTITYTYPE_CONCEPT,
                                sourceCode, getPresentationFromAUI(codingSchemeName, sourceCode, aui1),
                                assoc.name, getPresentationFromAUI(codingSchemeName, sourceCode, aui2));

                        supportedPropertyLinks_.add(assoc.name);
                        propertyLinksCounter_++;
                    } catch (Exception e) {
                        // Just in case there are duplicate listings -- do
                        // nothing.
                    }

                } else {

                    boolean result = addConceptAssociationToConceptsHelper(codingSchemeName, sourceCode,
                            assoc.name, targetCode, sab, roleGroup, assoc.rrfName, assoc.rrfInverse, count);

                    if (result) {
                        count++;

                        if (count % 100 == 0)
                            messages_.busy();
                        if (count % 10000 == 0)
                            messages_.info("Loaded " + count + " out of a possible total of " + total);
                    }
                }
            }
            results.close();
        }
        messages_.info("Loaded " + count + " out of a possible total of " + total);
    } finally {
        getRelations.close();
        getRelationsCount.close();
    }
    return count;
}

From source file:com.hangum.tadpole.rdb.core.editors.main.composite.ResultSetComposite.java

/**
 *  .//from   w w  w  . j a v a  2s  .c om
 * 
 * @param reqQuery
 * @param queryTimeOut
 * @param strUserEmail
 * @param intSelectLimitCnt
 * @param intStartCnt
 * @param strNullValue 
 * @return
 * @throws Exception
 */
public QueryExecuteResultDTO runSelect(final RequestQuery reqQuery, final int queryTimeOut,
        final String strUserEmail, final int intSelectLimitCnt, final int intStartCnt) throws Exception {
    String strSQL = reqQuery.getSql();
    if (!PermissionChecker.isExecute(getDbUserRoleType(), getUserDB(), strSQL)) {
        throw new Exception(Messages.get().MainEditor_21);
    }
    if (logger.isDebugEnabled())
        logger.debug("==> real execute query : " + strSQL);

    tadpole_system_message = "";
    QueryExecuteResultDTO queryResultDAO = null;

    //  ??   ???  .
    IMainEditorExtension[] extensions = getRdbResultComposite().getMainEditor().getMainEditorExtions();
    if (extensions != null) {
        for (IMainEditorExtension iMainEditorExtension : extensions) {
            String strCostumSQL = iMainEditorExtension.sqlCostume(strSQL);
            if (!strCostumSQL.equals(strSQL)) {
                if (logger.isDebugEnabled())
                    logger.debug("** extension costume sql is : " + strCostumSQL); //$NON-NLS-1$
                strSQL = strCostumSQL;
            }
        }
    }

    //  ??   ???  .
    ResultSet resultSet = null;
    java.sql.Connection javaConn = null;
    Statement statement = null;
    PreparedStatement preparedStatement = null;

    try {
        if (DBGroupDefine.TAJO_GROUP == getUserDB().getDBGroup()) {
            javaConn = ConnectionPoolManager.getDataSource(getUserDB()).getConnection();
        } else {
            if (reqQuery.isAutoCommit()) {
                SqlMapClient client = TadpoleSQLManager.getInstance(getUserDB());
                javaConn = client.getDataSource().getConnection();
            } else {
                javaConn = TadpoleSQLTransactionManager.getInstance(strUserEmail, getUserDB());
            }
        }
        if (javaConn == null) {
            throw new Exception("Cann't create session. Please check system.");
        }

        // if statement type is prepared statement?
        if (reqQuery.getSqlStatementType() == SQL_STATEMENT_TYPE.NONE) {
            statement = javaConn.createStatement();

            statement.setFetchSize(intSelectLimitCnt);
            if (DBGroupDefine.HIVE_GROUP != getUserDB().getDBGroup()) {
                statement.setQueryTimeout(queryTimeOut);
                statement.setMaxRows(intSelectLimitCnt);
            }

            // check stop thread
            esCheckStop = Executors.newSingleThreadExecutor();
            CheckStopThread cst = new CheckStopThread(statement);
            cst.setName("TDB Query Stop checker"); //$NON-NLS-1$
            esCheckStop.execute(cst);

            // execute query
            execServiceQuery = Executors.newSingleThreadExecutor();
            if (intStartCnt == 0) {
                resultSet = _runSQLSelect(statement, strSQL);
            } else {
                strSQL = PartQueryUtil.makeSelect(getUserDB(), strSQL, intStartCnt, intSelectLimitCnt);

                if (logger.isDebugEnabled())
                    logger.debug("part sql called : " + strSQL);
                resultSet = _runSQLSelect(statement, strSQL);
            }

        } else if (reqQuery.getSqlStatementType() == SQL_STATEMENT_TYPE.PREPARED_STATEMENT) {
            preparedStatement = javaConn.prepareStatement(strSQL);

            preparedStatement.setFetchSize(intSelectLimitCnt);
            if (DBGroupDefine.HIVE_GROUP != getUserDB().getDBGroup()) {
                preparedStatement.setQueryTimeout(queryTimeOut);
                preparedStatement.setMaxRows(intSelectLimitCnt);
            }

            // check stop thread
            esCheckStop = Executors.newSingleThreadExecutor();
            CheckStopThread cst = new CheckStopThread(preparedStatement);
            cst.setName("TDB Query Stop checker"); //$NON-NLS-1$
            esCheckStop.execute(cst);

            // execute query
            execServiceQuery = Executors.newSingleThreadExecutor();
            if (intStartCnt == 0) {
                resultSet = _runSQLSelect(preparedStatement, reqQuery.getStatementParameter());
            } else {
                strSQL = PartQueryUtil.makeSelect(getUserDB(), strSQL, intStartCnt, intSelectLimitCnt);

                if (logger.isDebugEnabled())
                    logger.debug("part sql called : " + strSQL);
                resultSet = _runSQLSelect(preparedStatement, reqQuery.getStatementParameter());
            }
        }

        queryResultDAO = new QueryExecuteResultDTO(getUserDB(), reqQuery.getSql(), true, resultSet,
                intSelectLimitCnt, intStartCnt);
        if (resultSet == null) {
            if (StringUtils.isEmpty(StringUtils.deleteWhitespace(tadpole_system_message))) {
                tadpole_system_message = CMD_COMPLETE_MSG;
            }

        }
        queryResultDAO.setQueryMsg(tadpole_system_message);

    } catch (Exception e) {
        throw e;
    } finally {
        isCheckRunning = false;
        try {
            if (preparedStatement != null)
                preparedStatement.close();
        } catch (Exception e) {
        }
        try {
            if (statement != null)
                statement.close();
        } catch (Exception e) {
        }
        try {
            if (resultSet != null)
                resultSet.close();
        } catch (Exception e) {
        }

        if (reqQuery.isAutoCommit()) {
            try {
                if (javaConn != null)
                    javaConn.close();
            } catch (Exception e) {
            }
        }
    }

    return queryResultDAO;
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java

private void generateRecords(Offset startingOffset, PreparedStatement selectChanges) {
    // When this is called the first time, Logminer was started either from SCN or from a start date, so we just keep
    // track of the start date etc.
    LOG.info("Attempting to generate records");
    boolean error;
    StringBuilder query = new StringBuilder();
    BigDecimal lastCommitSCN = new BigDecimal(startingOffset.scn);
    int sequenceNumber = startingOffset.sequence;
    LocalDateTime startTime = adjustStartTime(startingOffset.timestamp);
    String lastTxnId = startingOffset.txnId;
    LocalDateTime endTime = getEndTimeForStartTime(startTime);
    ResultSet resultSet = null;//ww  w  .  jav a2 s  . co  m
    while (!getContext().isStopped()) {
        error = false;
        generationStarted = true;
        try {
            recordQueue.put(new RecordOffset(dummyRecord,
                    new Offset(version, startTime, lastCommitSCN.toPlainString(), sequenceNumber, lastTxnId)));
            selectChanges = getSelectChangesStatement();
            if (!useLocalBuffering) {
                selectChanges.setBigDecimal(1, lastCommitSCN);
                selectChanges.setInt(2, sequenceNumber);
                selectChanges.setBigDecimal(3, lastCommitSCN);
                if (shouldTrackDDL) {
                    selectChanges.setBigDecimal(4, lastCommitSCN);
                }
            }
            selectChanges.setFetchSize(configBean.jdbcFetchSize);
            resultSet = selectChanges.executeQuery();
            while (resultSet.next() && !getContext().isStopped()) {
                String queryFragment = resultSet.getString(5);
                BigDecimal scnDecimal = resultSet.getBigDecimal(1);
                String scn = scnDecimal.toPlainString();
                String xidUsn = String.valueOf(resultSet.getLong(10));
                String xidSlt = String.valueOf(resultSet.getString(11));
                String xidSqn = String.valueOf(resultSet.getString(12));
                String xid = xidUsn + "." + xidSlt + "." + xidSqn;
                // Query Fragment is not null -> we need to process
                // Query Fragment is null AND the query string buffered from previous rows due to CSF == 0 is null,
                // nothing to do, go to next row
                // Query Fragment is null, but there is previously buffered data in the query, go ahead and process.
                if (queryFragment != null) {
                    query.append(queryFragment);
                } else if (queryFragment == null && query.length() == 0) {
                    LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid);
                    continue;
                }

                // CSF is 1 if the query is incomplete, so read the next row before parsing
                // CSF being 0 means query is complete, generate the record
                if (resultSet.getInt(9) == 0) {
                    if (query.length() == 0) {
                        LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid);
                        continue;
                    }
                    String queryString = query.toString();
                    query.setLength(0);
                    String username = resultSet.getString(2);
                    short op = resultSet.getShort(3);
                    String timestamp = resultSet.getString(4);
                    LocalDateTime tsDate = Timestamp.valueOf(timestamp).toLocalDateTime();
                    delay.getValue().put("delay", getDelay(tsDate));
                    String table = resultSet.getString(6);
                    BigDecimal commitSCN = resultSet.getBigDecimal(7);
                    int seq = resultSet.getInt(8);

                    String rsId = resultSet.getString(13);
                    Object ssn = resultSet.getObject(14);
                    String schema = String.valueOf(resultSet.getString(15));
                    int rollback = resultSet.getInt(16);
                    String rowId = resultSet.getString(17);
                    SchemaAndTable schemaAndTable = new SchemaAndTable(schema, table);
                    TransactionIdKey key = new TransactionIdKey(xid);
                    bufferedRecordsLock.lock();
                    try {
                        if (useLocalBuffering && bufferedRecords.containsKey(key) && bufferedRecords.get(key)
                                .contains(new RecordSequence(null, null, 0, 0, rsId, ssn, null))) {
                            continue;
                        }
                    } finally {
                        bufferedRecordsLock.unlock();
                    }
                    Offset offset = null;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "Commit SCN = {}, SCN = {}, Operation = {}, Txn Id = {}, Timestamp = {}, Row Id = {}, Redo SQL = {}",
                                commitSCN, scn, op, xid, tsDate, rowId, queryString);
                    }

                    if (op != DDL_CODE && op != COMMIT_CODE && op != ROLLBACK_CODE) {
                        if (!useLocalBuffering) {
                            offset = new Offset(version, tsDate, commitSCN.toPlainString(), seq, xid);
                        }
                        Map<String, String> attributes = new HashMap<>();
                        attributes.put(SCN, scn);
                        attributes.put(USER, username);
                        attributes.put(TIMESTAMP_HEADER, timestamp);
                        attributes.put(TABLE, table);
                        attributes.put(SEQ, String.valueOf(seq));
                        attributes.put(XID, xid);
                        attributes.put(RS_ID, rsId);
                        attributes.put(SSN, ssn.toString());
                        attributes.put(SCHEMA, schema);
                        attributes.put(ROLLBACK, String.valueOf(rollback));
                        attributes.put(ROWID_KEY, rowId);
                        if (!useLocalBuffering || getContext().isPreview()) {
                            if (commitSCN.compareTo(lastCommitSCN) < 0
                                    || (commitSCN.compareTo(lastCommitSCN) == 0 && seq < sequenceNumber)) {
                                continue;
                            }
                            lastCommitSCN = commitSCN;
                            sequenceNumber = seq;
                            if (configBean.keepOriginalQuery) {
                                attributes.put(QUERY_KEY, queryString);
                            }
                            try {
                                Record record = generateRecord(queryString, attributes, op);
                                if (record != null && record.getEscapedFieldPaths().size() > 0) {
                                    recordQueue.put(new RecordOffset(record, offset));
                                }
                            } catch (UnparseableSQLException ex) {
                                LOG.error("Parsing failed", ex);
                                unparseable.offer(queryString);
                            }
                        } else {
                            bufferedRecordsLock.lock();
                            try {
                                HashQueue<RecordSequence> records = bufferedRecords.computeIfAbsent(key, x -> {
                                    x.setTxnStartTime(tsDate);
                                    return createTransactionBuffer(key.txnId);
                                });

                                int nextSeq = records.isEmpty() ? 1 : records.tail().seq + 1;
                                RecordSequence node = new RecordSequence(attributes, queryString, nextSeq, op,
                                        rsId, ssn, tsDate);
                                records.add(node);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        }
                    } else if (!getContext().isPreview() && useLocalBuffering
                            && (op == COMMIT_CODE || op == ROLLBACK_CODE)) {
                        // so this commit was previously processed or it is a rollback, so don't care.
                        if (op == ROLLBACK_CODE || scnDecimal.compareTo(lastCommitSCN) < 0) {
                            bufferedRecordsLock.lock();
                            try {
                                bufferedRecords.remove(key);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        } else {
                            bufferedRecordsLock.lock();
                            try {
                                HashQueue<RecordSequence> records = bufferedRecords.getOrDefault(key,
                                        EMPTY_LINKED_HASHSET);
                                if (lastCommitSCN.equals(scnDecimal) && xid.equals(lastTxnId)) {
                                    removeProcessedRecords(records, sequenceNumber);
                                }
                                int bufferedRecordsToBeRemoved = records.size();
                                LOG.debug(FOUND_RECORDS_IN_TRANSACTION, bufferedRecordsToBeRemoved, xid);
                                lastCommitSCN = scnDecimal;
                                lastTxnId = xid;
                                sequenceNumber = addRecordsToQueue(tsDate, scn, xid);
                            } finally {
                                bufferedRecordsLock.unlock();
                            }
                        }
                    } else {
                        offset = new Offset(version, tsDate, scn, 0, xid);
                        boolean sendSchema = false;
                        // Commit/rollback in Preview will also end up here, so don't really do any of the following in preview
                        // Don't bother with DDL events here.
                        if (!getContext().isPreview()) {
                            // Event is sent on every DDL, but schema is not always sent.
                            // Schema sending logic:
                            // CREATE/ALTER: Schema is sent if the schema after the ALTER is newer than the cached schema
                            // (which we would have sent as an event earlier, at the last alter)
                            // DROP/TRUNCATE: Schema is not sent, since they don't change schema.
                            DDL_EVENT type = getDdlType(queryString);
                            if (type == DDL_EVENT.ALTER || type == DDL_EVENT.CREATE) {
                                sendSchema = refreshSchema(scnDecimal, new SchemaAndTable(schema, table));
                            }
                            recordQueue.put(new RecordOffset(createEventRecord(type, queryString,
                                    schemaAndTable, offset.toString(), sendSchema, timestamp), offset));
                        }
                    }
                    query.setLength(0);
                }
            }
        } catch (SQLException ex) {
            error = true;
            // force a restart from the same timestamp.
            if (ex.getErrorCode() == MISSING_LOG_FILE) {
                LOG.warn("SQL Exception while retrieving records", ex);
                addToStageExceptionsQueue(new StageException(JDBC_86, ex));
            } else if (ex.getErrorCode() != RESULTSET_CLOSED_AS_LOGMINER_SESSION_CLOSED) {
                LOG.warn("SQL Exception while retrieving records", ex);
            } else if (ex.getErrorCode() == QUERY_TIMEOUT) {
                LOG.warn("LogMiner select query timed out");
            } else if (ex.getErrorCode() == LOGMINER_START_MUST_BE_CALLED) {
                LOG.warn("Last LogMiner session did not start successfully. Will retry", ex);
            } else {
                LOG.error("Error while reading data", ex);
                addToStageExceptionsQueue(new StageException(JDBC_52, ex));
            }
        } catch (StageException e) {
            LOG.error("Error while reading data", e);
            error = true;
            addToStageExceptionsQueue(e);
        } catch (InterruptedException ex) {
            LOG.error("Interrupted while waiting to add data");
            Thread.currentThread().interrupt();
        } catch (Exception ex) {
            LOG.error("Error while reading data", ex);
            error = true;
            addToStageExceptionsQueue(new StageException(JDBC_52, ex));
        } finally {
            // If an incomplete batch is seen, it means we are going to move the window forward
            // Ending this session and starting a new one helps reduce PGA memory usage.
            try {
                if (resultSet != null && !resultSet.isClosed()) {
                    resultSet.close();
                }
                if (selectChanges != null && !selectChanges.isClosed()) {
                    selectChanges.close();
                }
            } catch (SQLException ex) {
                LOG.warn("Error while attempting to close SQL statements", ex);
            }
            try {
                endLogMnr.execute();
            } catch (SQLException ex) {
                LOG.warn("Error while trying to close logminer session", ex);
            }
            try {
                if (error) {
                    resetConnectionsQuietly();
                } else {
                    discardOldUncommitted(startTime);
                    startTime = adjustStartTime(endTime);
                    endTime = getEndTimeForStartTime(startTime);
                }
                startLogMinerUsingGivenDates(startTime.format(dateTimeColumnHandler.dateFormatter),
                        endTime.format(dateTimeColumnHandler.dateFormatter));
            } catch (SQLException ex) {
                LOG.error("Error while attempting to start LogMiner", ex);
                addToStageExceptionsQueue(new StageException(JDBC_52, ex));
            } catch (StageException ex) {
                LOG.error("Error while attempting to start logminer for redo log dictionary", ex);
                addToStageExceptionsQueue(ex);
            }
        }
    }
}

From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java

@Override
public Attachment getMessageAttachment(String channelId, String attachmentId, Long messageId) {
    ResultSet resultSet = null;//from  www  .j  a v a 2  s  .  c  om
    Attachment attachment = new Attachment();
    try {
        // Get the total size of each attachment by summing the sizes of its segments
        PreparedStatement statement = prepareStatement("selectMessageAttachmentSize", channelId);
        statement.setString(1, attachmentId);
        statement.setLong(2, messageId);
        resultSet = statement.executeQuery();

        int size = 0;
        if (resultSet.next()) {
            // Store the attachment size in a map with the attachment id as the key
            size = resultSet.getInt("attachment_size");
        }

        close(resultSet);

        // Get the attachment data
        statement = prepareStatement("selectMessageAttachment", channelId);
        statement.setString(1, attachmentId);
        statement.setLong(2, messageId);
        // Set the number of rows to be fetched into memory at a time. This limits the amount of memory required for the query.
        statement.setFetchSize(1);
        resultSet = statement.executeQuery();

        // The type of the current attachment
        String type = null;

        // Initialize the output stream's buffer size to the exact size of the attachment. This should minimize the memory requirements if the numbers are correct.
        byte[] content = null;
        int offset = 0;

        while (resultSet.next()) {
            if (content == null) {
                type = resultSet.getString("type");
                content = new byte[size];
            }

            // write the current segment to the output stream buffer
            byte[] segment = resultSet.getBytes("content");
            System.arraycopy(segment, 0, content, offset, segment.length);

            offset += segment.length;
        }

        // Finish the message if one exists by adding it to the list of attachments to return
        if (content != null) {
            attachment.setId(attachmentId);
            attachment.setContent(content);
            attachment.setType(type);
        }
        content = null;

        return attachment;
    } catch (SQLException e) {
        throw new DonkeyDaoException(e);
    } finally {
        close(resultSet);
    }
}

From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java

@Override
public List<Attachment> getMessageAttachment(String channelId, long messageId) {
    ResultSet resultSet = null;/*  w  ww  . ja v  a2s.  c  om*/

    try {
        // Get the total size of each attachment by summing the sizes of its segments
        PreparedStatement statement = prepareStatement("selectMessageAttachmentSizeByMessageId", channelId);
        statement.setLong(1, messageId);
        resultSet = statement.executeQuery();

        Map<String, Integer> attachmentSize = new HashMap<String, Integer>();
        while (resultSet.next()) {
            // Store the attachment size in a map with the attachment id as the key
            attachmentSize.put(resultSet.getString("id"), resultSet.getInt("attachment_size"));
        }

        close(resultSet);

        // Get the attachment data
        statement = prepareStatement("selectMessageAttachmentByMessageId", channelId);
        statement.setLong(1, messageId);
        // Set the number of rows to be fetched into memory at a time. This limits the amount of memory required for the query.
        statement.setFetchSize(1);
        resultSet = statement.executeQuery();

        // Initialize the return object
        List<Attachment> attachments = new ArrayList<Attachment>();
        // The current attachment id that is being stitched together
        String currentAttachmentId = null;
        // The type of the current attachment
        String type = null;
        // Use an byte array to combine the segments
        byte[] content = null;
        int offset = 0;

        while (resultSet.next()) {
            // Get the attachment id of the current segment
            String attachmentId = resultSet.getString("id");

            // Ensure that the attachmentId is in the map we created earlier, otherwise don't return this attachment
            if (attachmentSize.containsKey(attachmentId)) {
                // If starting a new attachment
                if (!attachmentId.equals(currentAttachmentId)) {
                    // If there was a previous attachment, we need to finish it.
                    if (content != null) {
                        // Add the data in the output stream to the list of attachments to return
                        attachments.add(new Attachment(currentAttachmentId, content, type));
                    }
                    currentAttachmentId = attachmentId;
                    type = resultSet.getString("type");

                    // Initialize the byte array size to the exact size of the attachment. This should minimize the memory requirements if the numbers are correct.
                    // Use 0 as a backup in case the size is not in the map. (If trying to return an attachment that no longer exists)
                    content = new byte[attachmentSize.get(attachmentId)];
                    offset = 0;
                }

                // write the current segment to the output stream buffer
                byte[] segment = resultSet.getBytes("content");
                System.arraycopy(segment, 0, content, offset, segment.length);

                offset += segment.length;
            }
        }

        // Finish the message if one exists by adding it to the list of attachments to return
        if (content != null) {
            attachments.add(new Attachment(currentAttachmentId, content, type));
        }
        content = null;

        return attachments;
    } catch (SQLException e) {
        throw new DonkeyDaoException(e);
    } finally {
        close(resultSet);
    }
}

From source file:edu.mayo.informatics.lexgrid.convert.directConversions.UMLSToSQL.java

private void loadContexts(String UMLSCodingSchemeName, String codingSchemeName) throws SQLException {
    messages_.info("Processing HCD-tagged MRHIER entries.");

    boolean constructHCD = false;

    /*/* w  w  w.  j  a  v a2s .c o  m*/
     * Create a temporary view (that will be destroyed at the end of the
     * method) to speed up the queries in this method
     */
    try {
        messages_.info("loading contexts - getting a total count");

        String getCodingSchemeInfoSQL = "SELECT COUNT(1) as cnt FROM MRHIER" + " WHERE SAB = ? ";
        if (!constructHCD) {
            getCodingSchemeInfoSQL = getCodingSchemeInfoSQL + " AND HCD != ? ";
        }

        PreparedStatement getCodingSchemeInfo = umlsConnection2_
                .prepareStatement(umlsSqlModifier_.modifySQL(getCodingSchemeInfoSQL));
        ResultSet results = null;

        getCodingSchemeInfo.setString(1, UMLSCodingSchemeName);
        // this is weird, but its the easiest way to get the
        // appropriate quotes around NULL
        if (!constructHCD)
            getCodingSchemeInfo.setString(2, "NULL");
        int total = 0;
        try {
            results = getCodingSchemeInfo.executeQuery();

            results.next();
            total = results.getInt("cnt");

            results.close();
        } finally {
            getCodingSchemeInfo.close();
        }

        int start = 0;
        int contextCount = 0;
        int rowCount = 0;

        getCodingSchemeInfoSQL = "SELECT MRHIER.CUI, MRHIER.AUI, MRHIER.PTR, MRHIER.HCD, "
                + " MRHIER.SAB, MRHIER.RELA, MRHIER.CXN " + " FROM MRHIER " + " WHERE SAB = ? ";
        if (!constructHCD) {
            getCodingSchemeInfoSQL = getCodingSchemeInfoSQL + " AND HCD != ? ";
        }
        getCodingSchemeInfoSQL = getCodingSchemeInfoSQL + " ORDER BY {BINARY} MRHIER.CUI {LIMIT}";
        getCodingSchemeInfo = umlsConnection2_
                .prepareStatement(umlsSqlModifier_.modifySQL(getCodingSchemeInfoSQL));

        // Count var usage to make sure things are placed correctly
        int boundVar = 1;
        getCodingSchemeInfo.setString(boundVar++, UMLSCodingSchemeName);
        // this is weird, but its the easiest way to get the
        // appropriate quotes around NULL
        if (!constructHCD)
            getCodingSchemeInfo.setString(boundVar++, "NULL");
        try {
            // collect them all into batches of concept codes. when the code
            // changes, load that code
            while (start < total) {
                messages_.info("Fetching a batch of results");

                if (umlsSqlModifier_.getDatabaseType().equals("MySQL")) {
                    // mysql doesn't stream results - the {LIMIT above and
                    // this is for getting limits on mysql code}
                    getCodingSchemeInfo.setInt(boundVar, start);
                    getCodingSchemeInfo.setInt(boundVar + 1, batchSize);
                    start += batchSize;
                } else if (umlsSqlModifier_.getDatabaseType().equals("PostgreSQL")) {
                    // postgres properly streams results, we can just set
                    // the fetch size, and only loop once
                    getCodingSchemeInfo.setFetchSize(batchSize);
                    umlsConnection2_.setAutoCommit(false);
                    start = total;
                } else {
                    start = total;
                }

                results = getCodingSchemeInfo.executeQuery();
                try {
                    messages_.debug("query finished, processing results");

                    while (results.next()) {
                        rowCount++;
                        // store all the data from this row.

                        AssociationQualification aq = new AssociationQualification();

                        aq.codingSchemeName = codingSchemeName;
                        String cui = results.getString("CUI");
                        aq.sourceConceptAUI = results.getString("AUI");
                        aq.sourceConceptCode = mapCUIToCode(cui, aq.sourceConceptAUI,
                                UMLSCodingSchemeName)[0].code;
                        aq.qualifierName = "HCD";
                        aq.qualifierValue = results.getString("HCD");
                        aq.pathToRoot = results.getString("PTR");

                        contextCount += loadContext(aq, constructHCD, results.getString("RELA"), contextCount);

                        if (rowCount % 10 == 0) {
                            messages_.busy();
                        }

                        if (rowCount % 1000 == 0) {
                            messages_.info("On context " + rowCount + " out of " + total + " - found "
                                    + contextCount + " contextual links");
                        }
                    }
                } finally {
                    results.close();
                }
            }
            messages_.info("Loaded " + contextCount + " contextual links from  " + rowCount + " contexts");
        } finally {
            getCodingSchemeInfo.close();
        }
    } finally {
    }
}

From source file:edu.mayo.informatics.lexgrid.convert.directConversions.MetaThesaurusToSQL.java

private void loadContexts(String codingSchemeName) throws SQLException {
    /*/*  w w  w.j a v  a 2  s . c  o  m*/
     * Evaluate hierarchy processing.
     */

    boolean constructHCD = false;

    /*
     * Create a temporary view (that will be destroyed at the end of the
     * method) to speed up the queries in this method
     */
    try {
        messages_.info("loading contexts - getting a total count");

        String getCodingSchemeInfoSQL = "SELECT COUNT(*) as cnt FROM MRHIER";
        if (!constructHCD) {
            getCodingSchemeInfoSQL = getCodingSchemeInfoSQL + " WHERE HCD != ? ";
        }

        PreparedStatement getCodingSchemeInfo = umlsConnection2_
                .prepareStatement(umlsSqlModifier_.modifySQL(getCodingSchemeInfoSQL));
        ResultSet results = null;

        // this is weird, but its the easiest way to get the
        // appropriate quotes around NULL
        if (!constructHCD)
            getCodingSchemeInfo.setString(1, "NULL");
        int total = 0;
        try {
            results = getCodingSchemeInfo.executeQuery();

            results.next();
            total = results.getInt("cnt");

            results.close();
        } finally {
            getCodingSchemeInfo.close();
        }

        int start = 0;
        int contextCount = 0;
        int rowCount = 0;

        getCodingSchemeInfoSQL = "SELECT MRHIER.CUI, MRHIER.AUI, MRHIER.PTR, MRHIER.HCD, "
                + " MRHIER.SAB, MRHIER.RELA, MRHIER.CXN " + " FROM MRHIER "; // +
        if (!constructHCD) {
            getCodingSchemeInfoSQL = getCodingSchemeInfoSQL +

                    " WHERE HCD != ? ";
        }
        getCodingSchemeInfoSQL = getCodingSchemeInfoSQL + " ORDER BY {BINARY} MRHIER.CUI {LIMIT}";
        getCodingSchemeInfo = umlsConnection2_
                .prepareStatement(umlsSqlModifier_.modifySQL(getCodingSchemeInfoSQL));

        // Count var usage to make sure things are placed correctly
        int boundVar = 1;
        // this is weird, but its the easiest way to get the
        // appropriate quotes around NULL
        if (!constructHCD)
            getCodingSchemeInfo.setString(boundVar++, "NULL");
        try {
            // collect them all into batches of concept codes. when the code
            // changes, load that code
            while (start < total) {
                messages_.info("Fetching a batch of results");

                if (umlsSqlModifier_.getDatabaseType().equals("MySQL")) {
                    // mysql doesn't stream results - the {LIMIT above and
                    // this is for getting limits on mysql code}
                    getCodingSchemeInfo.setInt(boundVar, start);
                    getCodingSchemeInfo.setInt(boundVar + 1, batchSize);
                    start += batchSize;
                } else if (umlsSqlModifier_.getDatabaseType().equals("PostgreSQL")) {
                    // postgres properly streams results, we can just set
                    // the fetch size, and only loop once
                    getCodingSchemeInfo.setFetchSize(batchSize);
                    umlsConnection2_.setAutoCommit(false);
                    start = total;
                } else {
                    start = total;
                }

                results = getCodingSchemeInfo.executeQuery();
                try {
                    messages_.debug("query finished, processing results");

                    while (results.next()) {
                        rowCount++;
                        // store all the data from this row.

                        AssociationQualification aq = new AssociationQualification();
                        aq.codingSchemeName = codingSchemeName;
                        String cui = results.getString("CUI");
                        aq.sourceConceptAUI = results.getString("AUI");
                        aq.sourceConceptCode = cui;
                        aq.qualifierName = "HCD";
                        aq.qualifierValue = results.getString("HCD");
                        aq.pathToRoot = results.getString("PTR");

                        contextCount += loadContext(aq, constructHCD, results.getString("RELA"), contextCount);

                        if (rowCount % 10 == 0) {
                            messages_.busy();
                        }

                        if (rowCount % 1000 == 0) {
                            messages_.info("On context " + rowCount + " out of " + total + " - found "
                                    + contextCount + " contextual links");
                        }
                    }
                } finally {
                    results.close();
                }
            }
            messages_.info("Loaded " + contextCount + " contextual links from  " + rowCount + " contexts");
        } finally {
            getCodingSchemeInfo.close();
        }
    } finally {
    }
}

From source file:org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcPollInputOperator.java

/**
 * Execute the query and transfer results to the emit queue.
 * @param preparedStatement PreparedStatement to execute the query and fetch results.
 *///w  w  w.j a  va 2 s .c  om
protected int insertDbDataInQueue(PreparedStatement preparedStatement)
        throws SQLException, InterruptedException {
    int resultCount = 0;
    preparedStatement.setFetchSize(getFetchSize());
    ResultSet result = preparedStatement.executeQuery();
    while (execute && result.next()) {
        T obj = getTuple(result);
        if (obj == null) {
            continue;
        }
        while (execute && !emitQueue.offer(obj)) {
            Thread.sleep(DEFAULT_SLEEP_TIME);
        }
        if (isPollerPartition && rebaseOffset) {
            if (prevKey == null) {
                prevKey = extractKey(result);
            } else if (this.fetchedKeyAndOffset.get() == null) {
                // track key change
                Object nextKey = extractKey(result);
                if (!nextKey.equals(prevKey)) {
                    // new key, ready for rebase (WHERE key > ?)
                    fetchedKeyAndOffset.set(new MutablePair<>(prevKey, lastOffset + resultCount));
                }
            }
        }
        resultCount++;
    }
    result.close();
    preparedStatement.close();
    return resultCount;
}

From source file:org.apache.cayenne.access.jdbc.SQLTemplateAction.java

/**
 * Binds parameters to the PreparedStatement.
 *//*  w w w.  ja  v  a 2 s.com*/
protected void bind(PreparedStatement preparedStatement, ParameterBinding[] bindings)
        throws SQLException, Exception {
    // bind parameters
    int i = 1;
    for (ParameterBinding binding : bindings) {
        Object value = binding.getValue();
        ExtendedType extendedType = value != null
                ? getAdapter().getExtendedTypes().getRegisteredType(value.getClass())
                : getAdapter().getExtendedTypes().getDefaultType();
        binding.setExtendedType(extendedType);
        binding.setStatementPosition(i++);
        dataNode.getAdapter().bindParameter(preparedStatement, binding);
    }

    if (queryMetadata.getStatementFetchSize() != 0) {
        preparedStatement.setFetchSize(queryMetadata.getStatementFetchSize());
    }
}

From source file:org.apache.ctakes.ytex.kernel.KernelUtilImpl.java

/**
 * this can be very large - avoid loading the entire jdbc ResultSet into
 * memory/*from w ww .  ja  v a2  s . c  o m*/
 */
@Override
public InstanceData loadInstances(String strQuery) {
    final InstanceData instanceLabel = new InstanceData();
    PreparedStatement s = null;
    Connection conn = null;
    ResultSet rs = null;
    try {
        // jdbcTemplate.query(strQuery, new RowCallbackHandler() {
        RowCallbackHandler ch = new RowCallbackHandler() {

            @Override
            public void processRow(ResultSet rs) throws SQLException {
                String label = "";
                int run = 0;
                int fold = 0;
                boolean train = true;
                long instanceId = rs.getLong(1);
                String className = rs.getString(2);
                if (rs.getMetaData().getColumnCount() >= 3)
                    train = rs.getBoolean(3);
                if (rs.getMetaData().getColumnCount() >= 4) {
                    label = rs.getString(4);
                    if (label == null)
                        label = "";
                }
                if (rs.getMetaData().getColumnCount() >= 5)
                    fold = rs.getInt(5);
                if (rs.getMetaData().getColumnCount() >= 6)
                    run = rs.getInt(6);
                // get runs for label
                SortedMap<Integer, SortedMap<Integer, SortedMap<Boolean, SortedMap<Long, String>>>> runToInstanceMap = instanceLabel
                        .getLabelToInstanceMap().get(label);
                if (runToInstanceMap == null) {
                    runToInstanceMap = new TreeMap<Integer, SortedMap<Integer, SortedMap<Boolean, SortedMap<Long, String>>>>();
                    instanceLabel.getLabelToInstanceMap().put(label, runToInstanceMap);
                }
                // get folds for run
                SortedMap<Integer, SortedMap<Boolean, SortedMap<Long, String>>> foldToInstanceMap = runToInstanceMap
                        .get(run);
                if (foldToInstanceMap == null) {
                    foldToInstanceMap = new TreeMap<Integer, SortedMap<Boolean, SortedMap<Long, String>>>();
                    runToInstanceMap.put(run, foldToInstanceMap);
                }
                // get train/test set for fold
                SortedMap<Boolean, SortedMap<Long, String>> ttToClassMap = foldToInstanceMap.get(fold);
                if (ttToClassMap == null) {
                    ttToClassMap = new TreeMap<Boolean, SortedMap<Long, String>>();
                    foldToInstanceMap.put(fold, ttToClassMap);
                }
                // get instances for train/test set
                SortedMap<Long, String> instanceToClassMap = ttToClassMap.get(train);
                if (instanceToClassMap == null) {
                    instanceToClassMap = new TreeMap<Long, String>();
                    ttToClassMap.put(train, instanceToClassMap);
                }
                // set the instance class
                instanceToClassMap.put(instanceId, className);
                // add the class to the labelToClassMap
                SortedSet<String> labelClasses = instanceLabel.getLabelToClassMap().get(label);
                if (labelClasses == null) {
                    labelClasses = new TreeSet<String>();
                    instanceLabel.getLabelToClassMap().put(label, labelClasses);
                }
                if (!labelClasses.contains(className))
                    labelClasses.add(className);
            }
        };
        conn = this.jdbcTemplate.getDataSource().getConnection();
        s = conn.prepareStatement(strQuery, java.sql.ResultSet.TYPE_FORWARD_ONLY,
                java.sql.ResultSet.CONCUR_READ_ONLY);
        if ("MySQL".equals(conn.getMetaData().getDatabaseProductName())) {
            s.setFetchSize(Integer.MIN_VALUE);
        } else if (s.getClass().getName().equals("com.microsoft.sqlserver.jdbc.SQLServerStatement")) {
            try {
                BeanUtils.setProperty(s, "responseBuffering", "adaptive");
            } catch (IllegalAccessException e) {
                log.warn("error setting responseBuffering", e);
            } catch (InvocationTargetException e) {
                log.warn("error setting responseBuffering", e);
            }
        }
        rs = s.executeQuery();
        while (rs.next()) {
            ch.processRow(rs);
        }
    } catch (SQLException j) {
        log.error("loadInstances failed", j);
        throw new RuntimeException(j);
    } finally {
        if (rs != null) {
            try {
                rs.close();
            } catch (SQLException e) {
            }
        }
        if (s != null) {
            try {
                s.close();
            } catch (SQLException e) {
            }
        }
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
            }
        }
    }
    return instanceLabel;
}