Example usage for java.sql PreparedStatement clearParameters

List of usage examples for java.sql PreparedStatement clearParameters

Introduction

In this page you can find the example usage for java.sql PreparedStatement clearParameters.

Prototype

void clearParameters() throws SQLException;

Source Link

Document

Clears the current parameter values immediately.

Usage

From source file:dk.netarkivet.harvester.datamodel.JobDBDAO.java

/** Create the entries in the job_configs table for this job.
 * Since some jobs have up to 10000 configs, this must be optimized.
 * The entries are only created, if job.configsChanged is true.
 *
 * @param dbconnection A connection to work on
 * @param job The job to store entries for
 * @throws SQLException If any problems occur during creation of the
 * new entries in the job_configs table.
 *//*w ww  . j  a v a2s  .c  om*/
private void createJobConfigsEntries(Connection dbconnection, Job job) throws SQLException {
    if (job.configsChanged) {
        PreparedStatement statement = null;
        String tmpTable = null;
        Long jobID = job.getJobID();
        try {
            statement = dbconnection.prepareStatement("DELETE FROM job_configs WHERE job_id = ?");
            statement.setLong(1, jobID);
            statement.executeUpdate();
            statement.close();
            tmpTable = DBSpecifics.getInstance().getJobConfigsTmpTable(dbconnection);
            final Map<String, String> domainConfigurationMap = job.getDomainConfigurationMap();
            statement = dbconnection.prepareStatement(
                    "INSERT INTO " + tmpTable + " ( domain_name, config_name ) " + " VALUES ( ?, ?)");
            for (Map.Entry<String, String> entry : domainConfigurationMap.entrySet()) {
                statement.setString(1, entry.getKey());
                statement.setString(2, entry.getValue());
                statement.executeUpdate();
                statement.clearParameters();
            }
            statement.close();
            // Now we have a temp table with all the domains and configs
            statement = dbconnection.prepareStatement("INSERT INTO job_configs " + "( job_id, config_id ) "
                    + "SELECT ?, configurations.config_id " + "  FROM domains, configurations, " + tmpTable
                    + " WHERE domains.name = " + tmpTable + ".domain_name"
                    + "   AND domains.domain_id = configurations.domain_id" + "   AND configurations.name = "
                    + tmpTable + ".config_name");
            statement.setLong(1, jobID);
            int rows = statement.executeUpdate();
            if (rows != domainConfigurationMap.size()) {
                log.debug("Domain or configuration in table for " + job + " missing: Should have "
                        + domainConfigurationMap.size() + ", got " + rows);
            }
            dbconnection.commit();
        } finally {
            if (tmpTable != null) {
                DBSpecifics.getInstance().dropJobConfigsTmpTable(dbconnection, tmpTable);
            }
            job.configsChanged = false;
        }
    }
}

From source file:org.sakaiproject.search.optimize.shared.impl.DbJournalOptimizationManager.java

/**
 * @throws LockTimeoutException//from ww  w. ja v a  2s .co m
 * @see org.sakaiproject.search.journal.api.JournalManager#prepareSave(long)
 */
public JournalManagerState prepareSave(long transactionId) throws IndexJournalException {
    PreparedStatement getJournalSavePointPst = null;
    PreparedStatement getEarlierSavePoint = null;
    PreparedStatement getEarlierSavePoint2 = null;
    PreparedStatement lockEarlierSavePoints = null;
    PreparedStatement listMergeSet = null;
    PreparedStatement listJournal = null;

    OptimizeJournalManagerStateImpl jms = new OptimizeJournalManagerStateImpl();
    ResultSet rs = null;
    Connection connection = null;
    try {

        connection = datasource.getConnection();
        getJournalSavePointPst = connection
                .prepareStatement("select serverid, jid from search_node_status order by jid asc ");
        getEarlierSavePoint = connection
                .prepareStatement("select serverid, jid from search_node_status order by jid asc ");

        jms.indexWriter = serverId + ":" + transactionId;
        jms.transactionId = transactionId;
        jms.oldestSavePoint = 0;

        List<String> servers = clusterService.getServers();

        // workout the oldest SavePoint that has not yet been merged
        // by any running cluster node.
        // this assumes that all the cluster nodes are running.
        // Any that are not running will have to be restarted in a clean
        // state
        // so that they update from scratch.

        getJournalSavePointPst.clearParameters();
        rs = getJournalSavePointPst.executeQuery();
        long oldestActiveSavepoint = 0;
        while (oldestActiveSavepoint == 0 && rs.next()) {
            String server = rs.getString(1);
            log.debug("Got Server " + server + " with savePoint " + rs.getLong(2));
            for (String s : servers) {
                int dash = s.lastIndexOf('-');
                if (dash > 0) {
                    s = s.substring(0, dash);
                }
                if (server.equals(s)) {
                    oldestActiveSavepoint = rs.getLong(2);
                    log.debug("   Match against " + s);
                    break;
                } else {
                    log.debug("No Match against " + s);
                }
            }

        }
        rs.close();
        //SRCh-38 we will also select on the old mispelled value for backward compatability
        getEarlierSavePoint2 = connection.prepareStatement(
                "select min(txid),max(txid) from search_journal where txid < ? and  (status = 'commited' or status = 'committed') ");
        getEarlierSavePoint2.clearParameters();
        getEarlierSavePoint2.setLong(1, oldestActiveSavepoint);
        rs = getEarlierSavePoint2.executeQuery();
        jms.oldestSavePoint = 0;
        long earliestSavePoint = 0;
        if (rs.next()) {
            earliestSavePoint = rs.getLong(1);
            jms.oldestSavePoint = rs.getLong(2);
        }

        if (jms.oldestSavePoint <= 0) {
            throw new NoOptimizationRequiredException("Oldest savePoint is 0");
        }
        rs.close();

        long nshared = jms.oldestSavePoint - earliestSavePoint;

        // no point in going further there are not enough segments.
        if (nshared < journalSettings.getMinimumOptimizeSavePoints()) {
            throw new NoOptimizationRequiredException("Insuficient Journal Entries prior to savepoint "
                    + jms.oldestSavePoint + " to optimize, found " + nshared);
        }

        // too many ?
        if (nshared > 2 * journalSettings.getMinimumOptimizeSavePoints()) {
            // adjust the oldestSavePoint
            // the number will be less than this if there are holes
            jms.oldestSavePoint = earliestSavePoint + 2 * journalSettings.getMinimumOptimizeSavePoints();
            // adjust for a potential hole
            getEarlierSavePoint2.setLong(1, jms.oldestSavePoint);
            rs = getEarlierSavePoint2.executeQuery();
            jms.oldestSavePoint = 0;
            earliestSavePoint = 0;
            if (rs.next()) {
                earliestSavePoint = rs.getLong(1);
                jms.oldestSavePoint = rs.getLong(2);
            }
            if (jms.oldestSavePoint <= 0) {
                throw new NoOptimizationRequiredException("Oldest savePoint is 0");
            }
            rs.close();

        }

        log.debug("Optimizing shared Journal Storage to savepoint " + jms.oldestSavePoint);

        // this requires read committed transaction issolation and WILL NOT
        // work on HSQL
        lockEarlierSavePoints = connection.prepareStatement(
                "update search_journal set indexwriter = ?, status = 'merging-prepare', txts = ? where txid <= ? and  (status = 'commited' or status = 'committed' ) ");
        lockEarlierSavePoints.clearParameters();
        lockEarlierSavePoints.setString(1, jms.indexWriter);
        lockEarlierSavePoints.setLong(2, System.currentTimeMillis());
        lockEarlierSavePoints.setLong(3, jms.oldestSavePoint);
        int i = 0;
        try {
            i = lockEarlierSavePoints.executeUpdate();
        } catch (SQLException lockTimepout) {
            throw new LockTimeoutException(lockTimepout.getMessage(), lockTimepout);
        }
        listJournal = connection
                .prepareStatement("select txid, indexwriter, status, txts  from search_journal");

        if (log.isDebugEnabled()) {
            listJournal.clearParameters();
            rs = listJournal.executeQuery();
            while (rs.next()) {
                log.debug("TX[" + rs.getLong(1) + "];indexwriter[" + rs.getString(2) + "];status["
                        + rs.getString(3) + "];timestamp[" + rs.getLong(4) + "]");
            }
            rs.close();
        }

        if (i < journalSettings.getMinimumOptimizeSavePoints()) {
            throw new NoOptimizationRequiredException("Insuficient Journal Entries prior to savepoint "
                    + jms.oldestSavePoint + " to optimize, found " + i);
        }

        log.info("Locked " + i + " savePoints ");
        listJournal.clearParameters();
        rs = listJournal.executeQuery();
        while (rs.next()) {
            log.info("TX[" + rs.getLong(1) + "];indexwriter[" + rs.getString(2) + "];status[" + rs.getString(3)
                    + "];timestamp[" + rs.getLong(4) + "]");
        }
        rs.close();

        jms.mergeList = new ArrayList<Long>();

        listMergeSet = connection
                .prepareStatement("select txid from search_journal where indexwriter = ?  order by txid asc ");
        listMergeSet.clearParameters();
        listMergeSet.setString(1, jms.indexWriter);
        rs = listMergeSet.executeQuery();
        while (rs.next()) {
            jms.mergeList.add(rs.getLong(1));
        }
        log.info("Retrieved " + jms.mergeList.size() + " locked savePoints ");

        connection.commit();

    } catch (IndexJournalException ijex) {
        try {
            connection.rollback();
        } catch (Exception ex2) {
            log.debug(ex2);
        }
        throw ijex;
    } catch (Exception ex) {
        try {
            connection.rollback();
        } catch (Exception ex2) {
            log.debug(ex);
        }
        if (ex instanceof LockTimeoutException) {
            throw (LockTimeoutException) ex;
        } else {
            throw new IndexJournalException("Failed to lock savePoints to this node ", ex);
        }
    } finally {
        try {
            rs.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            getJournalSavePointPst.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            getEarlierSavePoint.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            getEarlierSavePoint2.close();
        } catch (Exception ex) {
            log.debug(ex);
        }

        try {
            if (lockEarlierSavePoints != null)
                lockEarlierSavePoints.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            if (listJournal != null)
                listJournal.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            if (listMergeSet != null)
                listMergeSet.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            connection.close();
        } catch (Exception ex2) {
            log.debug(ex2);
        }
    }
    return jms;
}

From source file:com.atlassian.jira.util.BugzillaImportBean.java

private void createAttachments(final Connection conn, final PreparedStatement attachPrepStatement,
        final int bug_id, final GenericValue issue) throws Exception {
    if (applicationProperties.getOption(APKeys.JIRA_OPTION_ALLOWATTACHMENTS)) {
        ResultSet resultSet = null;
        try {//w w w .  j a va 2s  . c o  m
            attachPrepStatement.clearParameters();
            attachPrepStatement.setInt(1, bug_id);
            resultSet = attachPrepStatement.executeQuery();
            while (resultSet.next()) {
                String fileName = resultSet.getString("attachment_title");
                if (fileName.lastIndexOf('\\') > -1) {
                    fileName = fileName.substring(fileName.lastIndexOf('\\') + 1);
                }

                if (fileName.lastIndexOf('/') > -1) {
                    fileName = fileName.substring(fileName.lastIndexOf('/') + 1);
                }

                log("Importing attachment for bug " + bug_id + ".");

                byte[] fileBytes;
                /*                    try
                                    {*/
                fileBytes = resultSet.getBytes("attachment_data");
                /*                    }
                                    catch (final SQLException e)
                                    {
                                      final PreparedStatement ps = conn.prepareStatement("select thedata from attach_data where id = ?");
                ps.setInt(1, resultSet.getInt("attach_id"));
                final ResultSet attachmentRS = ps.executeQuery();
                attachmentRS.next();
                fileBytes = attachmentRS.getBytes("thedata");
                attachmentRS.close();
                resultSet.close();
                return;
                                    }
                                    */

                final int submitterId = resultSet.getInt("user_id");
                final Attachment attachment = attachmentManager.createAttachment(issue, getUser(submitterId),
                        resultSet.getString("mimetype"), fileName, new Long(fileBytes.length), null,
                        resultSet.getTimestamp("created_ts"));

                // UtilDateTime.nowTimestamp());

                //we need to set the created date back to when it was created in the original system.
                attachment.getGenericValue().set("created", resultSet.getTimestamp("created_ts"));
                attachment.store();

                CoreFactory.getGenericDelegator().storeAll(EasyList.build(issue));
                cacheManager.flush(CacheManager.ISSUE_CACHE, issue);

                final File realAttachFile = AttachmentUtils.getAttachmentFile(attachment);
                final BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(realAttachFile));
                out.write(fileBytes, 0, fileBytes.length);
                out.close();
            }
        } catch (final SQLException e) {
            log("Error on importing attachments for bug " + bug_id + ". Error:" + e.getMessage());
        } finally {
            if (resultSet != null) //prevent nullpointer - JRA-6154
            {
                resultSet.close();
            }
        }
    } else {
        log("Attachments will not be imported from phpBB since attachements are disabled in JIRA.");
    }
}

From source file:com.atlassian.jira.util.BugzillaImportBean.java

private void createLinks(final IssueLinkType linkType, final String inwardColumn, final String outwardColumn,
        final PreparedStatement linkInwardPrepStatement, final PreparedStatement linkOutwardPrepStatement,
        final int bug_id, final GenericValue issue) throws SQLException {
    final Long issueId = issue.getLong("id");
    ResultSet resultSet = null;/*from   w ww . ja  va2s .  c om*/
    try {
        linkInwardPrepStatement.clearParameters();
        linkInwardPrepStatement.setInt(1, bug_id);
        resultSet = linkInwardPrepStatement.executeQuery();
        while (resultSet.next()) {
            final String inward = resultSet.getString(inwardColumn);
            final Long linkedIssueId = (Long) previouslyImportedKeys.get(new Integer(inward));
            if (linkedIssueId != null) {
                try {
                    final GenericValue linkedIssue = issueManager.getIssue(linkedIssueId);
                    if (linkedIssue == null) {
                        log4jLog.error("Could not find issue with id " + linkedIssueId
                                + " although it was once imported from bug #" + inward);
                        continue;
                    }
                    issueLinkManager.createIssueLink(linkedIssueId, issueId, linkType.getId(), null, null);
                    log("Creating link: issue " + issue.getString("key") + " " + linkType.getInward() + " "
                            + linkedIssue.getString("key"));
                } catch (final CreateException e) {
                    log4jLog.error(e, e);
                }
            }
        }
        resultSet.close();

        linkOutwardPrepStatement.clearParameters();
        linkOutwardPrepStatement.setInt(1, bug_id);
        resultSet = linkOutwardPrepStatement.executeQuery();
        while (resultSet.next()) {
            final String outward = resultSet.getString(outwardColumn);
            final Long linkedIssueId = (Long) previouslyImportedKeys.get(new Integer(outward));
            if (linkedIssueId != null) {
                try {
                    final GenericValue linkedIssue = issueManager.getIssue(linkedIssueId);
                    if (linkedIssue == null) {
                        log4jLog.error("Could not find issue with id " + linkedIssueId
                                + " although it was once imported from bug #" + outward);
                        continue;
                    }
                    issueLinkManager.createIssueLink(issueId, linkedIssueId, linkType.getId(), null, null);
                    log("Creating link: issue " + issue.getString("key") + " " + linkType.getOutward() + " "
                            + linkedIssue.getString("key"));

                } catch (final CreateException e) {
                    log4jLog.error(e, e);
                }
            }
        }
    } catch (final SQLException e) {
        log("Error creating dependency link for bug " + bug_id + ". Error:" + e.getMessage());
    } finally {
        if (resultSet != null) //prevent nullpointer - JRA-6154
        {
            resultSet.close();
        }
    }
}

From source file:com.commander4j.db.JDBDespatch.java

public boolean confirm() {
    boolean result = false;
    JDBPalletHistory ph = new JDBPalletHistory(getHostID(), getSessionID());
    long txn = 0;

    logger.debug("confirm2 [" + getDespatchNo() + "]");

    if (getStatus().equals("Confirmed") == true) {
        setErrorMessage("Despatch already confirmed.");
    } else {/*from w w  w .  ja  v a 2 s .c o  m*/
        if (isValid(true) == true) {

            // Run in current thread.
            logger.debug("*NON THREADED DESPATCH*");
            while (txn == 0) {
                txn = ph.generateNewTransactionRef();
                if (txn > 0) {
                    logger.debug("Transaction Number = " + String.valueOf(txn));
                    setDespatchDate(com.commander4j.util.JUtility.getSQLDateTime());
                    logger.debug("Confirm Date       = " + String.valueOf(getDespatchDate()));
                    try {

                        // Update Despatch Status
                        logger.debug("Updating Despatch Status");
                        PreparedStatement stmtupdate0;

                        stmtupdate0 = Common.hostList.getHost(getHostID()).getConnection(getSessionID())
                                .prepareStatement(Common.hostList.getHost(getHostID()).getSqlstatements()
                                        .getSQL("JDBDespatch.setConfirmed"));
                        stmtupdate0.setLong(1, txn);
                        stmtupdate0.setString(2, "Confirmed");
                        stmtupdate0.setTimestamp(3, getDespatchDate());
                        stmtupdate0.setString(4, getDespatchNo());
                        stmtupdate0.setString(5, getDespatchNo());
                        stmtupdate0.execute();
                        stmtupdate0.clearParameters();
                        stmtupdate0.close();

                        // Write FROM Locations to Pallet History
                        logger.debug("Updating Pallet location_id records");
                        PreparedStatement stmtupdate1;

                        stmtupdate1 = Common.hostList.getHost(getHostID()).getConnection(getSessionID())
                                .prepareStatement(Common.hostList.getHost(getHostID()).getSqlstatements()
                                        .getSQL("JDBPalletHistory.insertFromPallet"));
                        stmtupdate1.setLong(1, txn);
                        stmtupdate1.setString(2, "DESPATCH");
                        stmtupdate1.setString(3, "FROM");
                        stmtupdate1.setTimestamp(4, getDespatchDate());
                        stmtupdate1.setString(5, getUserID());
                        stmtupdate1.setString(6, getDespatchNo());
                        stmtupdate1.execute();
                        stmtupdate1.clearParameters();
                        stmtupdate1.close();

                        // Update Pallet Locations to TO
                        logger.debug("Updating Pallet Locations");
                        PreparedStatement stmtupdate2;

                        stmtupdate2 = Common.hostList.getHost(getHostID()).getConnection(getSessionID())
                                .prepareStatement(Common.hostList.getHost(getHostID()).getSqlstatements()
                                        .getSQL("JDBPallet.updateLocationIDByDespatchNo"));
                        stmtupdate2.setString(1, getLocationIDTo());
                        stmtupdate2.setString(2, getUserID());
                        stmtupdate2.setTimestamp(3, getDespatchDate());
                        stmtupdate2.setString(4, getDespatchNo());
                        stmtupdate2.execute();
                        stmtupdate2.clearParameters();
                        stmtupdate2.close();

                        // Write TO Locations to Pallet History
                        logger.debug("Writing TO Pallet History records");
                        PreparedStatement stmtupdate3;

                        stmtupdate3 = Common.hostList.getHost(getHostID()).getConnection(getSessionID())
                                .prepareStatement(Common.hostList.getHost(getHostID()).getSqlstatements()
                                        .getSQL("JDBPalletHistory.insertFromPallet"));
                        stmtupdate3.setLong(1, txn);
                        stmtupdate3.setString(2, "DESPATCH");
                        stmtupdate3.setString(3, "TO");
                        stmtupdate3.setTimestamp(4, getDespatchDate());
                        stmtupdate3.setString(5, getUserID());
                        stmtupdate3.setString(6, getDespatchNo());
                        stmtupdate3.execute();
                        stmtupdate3.clearParameters();
                        stmtupdate3.close();

                        // COMMIT !
                        logger.debug("Commiting updates");
                        Common.hostList.getHost(getHostID()).getConnection(getSessionID()).commit();

                        // Refresh data
                        logger.debug("Refreshing data");
                        getDespatchProperties();

                        // Request Interfaces
                        if (getLocationDBTo().isDespatchConfirmationMessageRequired()) {
                            logger.debug("Requesting outbound despatch message");
                            odc = new OutgoingDespatchConfirmation(getHostID(), getSessionID());
                            odc.submit(txn);
                        }

                        if (getLocationDBTo().isDespatchEquipmentTrackingMessageRequired()) {
                            logger.debug("Requesting outbound equipment message");
                            oet = new OutgoingEquipmentTracking(getHostID(), getSessionID());
                            oet.submit(txn);
                        }

                        if (getLocationDBTo().isDespatchPreAdviceMessageRequired()) {
                            logger.debug("Requesting outbound pre-advice message");
                            opa = new OutgoingDespatchPreAdvice(getHostID(), getSessionID());
                            opa.submit(txn);
                        }

                        if (getLocationDBTo().isDespatchEmailRequired()) {
                            logger.debug("Requesting outbound despatch email message");
                            ode = new OutgoingDespatchEmail(getHostID(), getSessionID());
                            ode.submit(txn);
                        }

                        result = true;

                    } catch (SQLException e) {
                        logger.error("Confirm2 error )" + e.getMessage());
                        try {
                            Common.hostList.getHost(getHostID()).getConnection(getSessionID()).rollback();
                            logger.error("Confirm2 failed (rollback success)");
                        } catch (SQLException e1) {
                            logger.error("Confirm2 failed (rollback failure)" + e1.getMessage());
                        }
                    }

                } else {
                    logger.error("Unable to get Transaction Number - retrying");
                }

            }

        }
    }

    return result;
}

From source file:org.sakaiproject.search.component.service.impl.SearchIndexBuilderWorkerImpl.java

public boolean removeWorkerLock() {
    Connection connection = null;
    PreparedStatement selectLock = null;
    PreparedStatement selectNodeLock = null;
    PreparedStatement clearLock = null;
    ResultSet resultSet = null;/*from   w w w . ja va2 s.c  om*/

    try {

        // I need to go direct to JDBC since its just too awful to
        // try and do this in Hibernate.

        connection = dataSource.getConnection();

        selectNodeLock = connection.prepareStatement(SELECT_NODE_LOCK_SQL);
        selectLock = connection.prepareStatement(SELECT_LOCK_SQL);
        clearLock = connection.prepareStatement(CLEAR_LOCK_SQL);

        SearchWriterLock swl = null;
        selectLock.clearParameters();
        selectLock.setString(1, LOCKKEY);
        resultSet = selectLock.executeQuery();
        if (resultSet.next()) {
            swl = new SearchWriterLockImpl();
            swl.setId(resultSet.getString(1));
            swl.setNodename(resultSet.getString(2));
            swl.setLockkey(resultSet.getString(3));
            swl.setExpires(resultSet.getTimestamp(4));
        } else {
            connection.rollback();
            return true;
        }

        resultSet.close();
        resultSet = null;

        selectNodeLock.clearParameters();
        resultSet = selectLock.executeQuery();

        while (resultSet.next()) {
            SearchWriterLock node = new SearchWriterLockImpl();
            node.setId(resultSet.getString(1));
            node.setNodename(resultSet.getString(2));
            node.setLockkey(resultSet.getString(3));
            node.setExpires(resultSet.getTimestamp(4));
            if (swl.getNodename().equals(node.getNodename())) {
                log.info("Cant remove Lock to node " + node.getNodename() + " node exists ");
                connection.rollback();
                return false;
            }
        }

        resultSet.close();
        resultSet = null;

        clearLock.clearParameters();
        clearLock.setString(1, NO_NODE);
        clearLock.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
        clearLock.setString(3, swl.getNodename());
        clearLock.setString(4, LOCKKEY);
        if (clearLock.executeUpdate() == 1) {
            log.warn("NODE UNLOCKED BY USER " + swl.getNodename());
            connection.commit();
        } else {
            log.info("NODE NOT UNLOCKED BY USER " + swl.getNodename());
            connection.commit();
            return false;
        }

        return true;

    } catch (Exception ex) {
        log.error("Failed to unlock ", ex);
        return false;
    } finally {
        if (resultSet != null) {
            try {
                resultSet.close();
            } catch (SQLException e) {
            }
        }
        if (selectLock != null) {
            try {
                selectLock.close();
            } catch (SQLException e) {
            }
        }
        if (selectNodeLock != null) {
            try {
                selectNodeLock.close();
            } catch (SQLException e) {
            }
        }
        if (clearLock != null) {
            try {
                clearLock.close();
            } catch (SQLException e) {
            }
        }

        if (connection != null) {
            try {
                connection.close();
                log.debug("Connection Closed ");
            } catch (SQLException e) {
                log.error("Error Closing Connection ", e);
            }
            connection = null;
        }
    }
}

From source file:dk.netarkivet.harvester.datamodel.DomainDBDAO.java

/**
 * Update the list of owner info for the given domain, keeping IDs where
 * applicable.// w ww . j av  a  2  s  .  c  om
 * @param c 
 *            A connection to the database
 * @param d
 *            A domain to update.
 * @throws SQLException
 *             If any database problems occur during the update process.
 */
private void updateOwnerInfo(Connection c, Domain d) throws SQLException {
    List<Long> oldIDs = DBUtils.selectLongList(c, "SELECT ownerinfo_id FROM ownerinfo " + "WHERE domain_id = ?",
            d.getID());
    PreparedStatement s = c.prepareStatement(
            "UPDATE ownerinfo SET " + "created = ?, " + "info = ? " + "WHERE ownerinfo_id = ?");
    for (DomainOwnerInfo doi : d.getAllDomainOwnerInfo()) {
        if (doi.hasID() && oldIDs.remove(doi.getID())) {
            s.setTimestamp(1, new Timestamp(doi.getDate().getTime()));
            DBUtils.setStringMaxLength(s, 2, doi.getInfo(), Constants.MAX_OWNERINFO_SIZE, doi, "owner info");
            s.setLong(3, doi.getID());
            s.executeUpdate();
            s.clearParameters();
        } else {
            insertOwnerInfo(c, d, doi);
        }
    }
    if (oldIDs.size() != 0) {
        String message = "Not allowed to delete ownerinfo " + oldIDs + " on " + d;
        log.debug(message);
        throw new IOFailure(message);
    }
}

From source file:org.sakaiproject.util.conversion.SchemaConversionController.java

public boolean migrate(DataSource datasource, SchemaConversionHandler convert, SchemaConversionDriver driver)
        throws SchemaConversionException {
    // issues:/*w  w w.  j  a  v a2s  .  co  m*/
    // Data size bigger than max size for this type?
    // Failure may cause rest of set to fail?

    boolean alldone = false;
    Connection connection = null;
    PreparedStatement selectNextBatch = null;
    PreparedStatement markNextBatch = null;
    PreparedStatement completeNextBatch = null;
    PreparedStatement selectRecord = null;
    PreparedStatement selectValidateRecord = null;
    PreparedStatement updateRecord = null;
    PreparedStatement reportError = null;
    ResultSet rs = null;
    try {
        connection = datasource.getConnection();
        connection.setAutoCommit(false);
        selectNextBatch = connection.prepareStatement(driver.getSelectNextBatch());
        markNextBatch = connection.prepareStatement(driver.getMarkNextBatch());
        completeNextBatch = connection.prepareStatement(driver.getCompleteNextBatch());
        String selectRecordStr = driver.getSelectRecord();
        selectRecord = connection.prepareStatement(selectRecordStr);
        selectValidateRecord = connection.prepareStatement(driver.getSelectValidateRecord());
        updateRecord = connection.prepareStatement(driver.getUpdateRecord());
        if (reportErrorsInTable) {
            reportError = connection.prepareStatement(driver.getErrorReportSql());
        }
        // log.info("  +++ updateRecord == " + driver.getUpdateRecord());

        // 2. select x at a time
        rs = selectNextBatch.executeQuery();
        List<String> l = new ArrayList<String>();
        while (rs.next()) {
            l.add(rs.getString(1));
        }
        rs.close();
        log.info("Migrating " + l.size() + " records of " + nrecords);

        for (String id : l) {

            markNextBatch.clearParameters();
            markNextBatch.clearWarnings();
            markNextBatch.setString(1, id);
            if (markNextBatch.executeUpdate() != 1) {
                log.warn("  --> Failed to mark id [" + id + "][" + id.length() + "] for processing ");
                insertErrorReport(reportError, id, driver.getHandler(),
                        "Unable to mark this record for processing");
            }
        }

        int count = 1;
        for (String id : l) {
            selectRecord.clearParameters();
            selectRecord.setString(1, id);
            rs = selectRecord.executeQuery();
            Object source = null;
            if (rs.next()) {
                source = convert.getSource(id, rs);
            } else {
                log.warn("  --> Result-set is empty for id: " + id + " [" + count + " of " + l.size() + "]");
                insertErrorReport(reportError, id, driver.getHandler(), "Result set empty getting source");
            }
            rs.close();
            if (source == null) {
                log.warn("  --> Source is null for id: " + id + " [" + count + " of " + l.size() + "]");
                insertErrorReport(reportError, id, driver.getHandler(), "Source null");
            } else {
                try {
                    updateRecord.clearParameters();
                    if (convert.convertSource(id, source, updateRecord)) {
                        if (updateRecord.executeUpdate() == 1) {
                            selectValidateRecord.clearParameters();
                            selectValidateRecord.setString(1, id);
                            rs = selectValidateRecord.executeQuery();
                            Object result = null;
                            if (rs.next()) {
                                result = convert.getValidateSource(id, rs);
                            }

                            convert.validate(id, source, result);
                        } else {
                            log.warn("  --> Failed to update record " + id + " [" + count + " of " + l.size()
                                    + "]");
                            insertErrorReport(reportError, id, driver.getHandler(), "Failed to update record");
                        }
                    } else {
                        log.warn("  --> Did not update record " + id + " [" + count + " of " + l.size() + "]");
                        insertErrorReport(reportError, id, driver.getHandler(), "Failed to write update to db");
                    }
                    rs.close();
                } catch (SQLException e) {
                    String msg = "  --> Failure converting or validating item " + id + " [" + count + " of "
                            + l.size() + "] \n";
                    insertErrorReport(reportError, id, driver.getHandler(),
                            "Exception while updating, converting or verifying item");
                    SQLWarning warnings = updateRecord.getWarnings();
                    while (warnings != null) {
                        msg += "\t\t\t" + warnings.getErrorCode() + "\t" + warnings.getMessage() + "\n";
                        warnings = warnings.getNextWarning();
                    }
                    log.warn(msg, e);
                    updateRecord.clearWarnings();
                    updateRecord.clearParameters();
                }

            }
            completeNextBatch.clearParameters();
            completeNextBatch.setString(1, id);
            if (completeNextBatch.executeUpdate() != 1) {
                log.warn("  --> Failed to mark id " + id + " for processing [" + count + " of " + l.size()
                        + "]");
                insertErrorReport(reportError, id, driver.getHandler(), "Unable to complete next batch");
            }
            count++;
        }

        if (l.size() == 0) {
            dropRegisterTable(connection, convert, driver);
            alldone = true;
        }
        connection.commit();
        nrecords -= l.size();

    } catch (Exception e) {
        log.error("Failed to perform migration ", e);
        try {
            connection.rollback();
            log.error("  ==> Rollback Sucessful ", e);
        } catch (Exception ex) {
            log.error("  ==> Rollback Failed ", e);
        }
        throw new SchemaConversionException(
                "Schema Conversion has been aborted due to earlier errors, please investigate ");

    } finally {
        try {
            rs.close();
        } catch (Exception ex) {
            log.debug("exception closing rs " + ex);
        }
        try {
            selectNextBatch.close();
        } catch (Exception ex) {
            log.debug("exception closing selectNextBatch " + ex);
        }
        try {
            markNextBatch.close();
        } catch (Exception ex) {
            log.debug("exception closing markNextBatch " + ex);
        }
        try {
            completeNextBatch.close();
        } catch (Exception ex) {
            log.debug("exception closing completeNextBatch " + ex);
        }
        try {
            selectRecord.close();
        } catch (Exception ex) {
            log.debug("exception closing selectRecord " + ex);
        }
        try {
            selectValidateRecord.close();
        } catch (Exception ex) {
            log.debug("exception closing selectValidateRecord " + ex);
        }
        try {
            updateRecord.close();
        } catch (Exception ex) {
            log.debug("exception closing updateRecord " + ex);
        }
        if (reportError != null) {
            try {
                reportError.close();
            } catch (Exception ex) {
                log.debug("exception closing reportError " + ex);
            }
        }

        try {
            connection.close();

        } catch (Exception ex) {
            log.debug("Exception closing connection " + ex);
        }

    }
    return !alldone;
}

From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java

/**
 * updte a segment from the database//  w  w  w  .  j ava2 s  .com
 * 
 * @param connection
 * @param addsi
 */
protected void updateLocalSegmentFilesystem(Connection connection, SegmentInfo addsi)
        throws SQLException, IOException {
    if (log.isDebugEnabled())
        log.debug("Updating local segment from databse " + addsi);
    PreparedStatement segmentSelect = null;
    ResultSet resultSet = null;
    try {
        segmentSelect = connection.prepareStatement("select version_ from search_segments where name_ = ?");
        segmentSelect.clearParameters();
        segmentSelect.setString(1, addsi.getName());
        resultSet = segmentSelect.executeQuery();
        if (resultSet.next()) {
            InputStream packetStream = null;
            try {
                long version = resultSet.getLong(1);
                File f = new File(getSharedFileName(addsi.getName(), sharedStructuredStorage));
                if (f.exists()) {
                    packetStream = new FileInputStream(f);
                    clusterStorage.unpackSegment(addsi, packetStream, version);
                    if (log.isDebugEnabled())
                        log.debug("Updated Local " + addsi);
                } else {
                    log.warn("Shared Segment file is missing " + f.getPath());
                }
            } finally {
                try {
                    packetStream.close();
                } catch (Exception ex) {
                    log.debug(ex);
                }
            }
        } else {
            log.error("Didnt find segment in database");
        }
    } finally {
        try {
            resultSet.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentSelect.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
    }

}

From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java

protected void updateLocalPatchFilesystem(Connection connection) throws SQLException, IOException {
    if (log.isDebugEnabled())
        log.debug("Updating local patch ");
    PreparedStatement segmentSelect = null;
    ResultSet resultSet = null;/*from   ww  w.ja v  a2  s  . com*/
    try {
        segmentSelect = connection.prepareStatement("select version_ from search_segments where name_ = ?");
        segmentSelect.clearParameters();
        segmentSelect.setString(1, INDEX_PATCHNAME);
        resultSet = segmentSelect.executeQuery();
        if (resultSet.next()) {
            InputStream packetStream = null;
            try {

                File f = new File(getSharedFileName(INDEX_PATCHNAME, sharedStructuredStorage));
                if (f.exists()) {
                    packetStream = new FileInputStream(f);
                    clusterStorage.unpackPatch(packetStream);
                    if (log.isDebugEnabled())
                        log.debug("Updated Patch ");
                } else {
                    log.warn("Shared Segment File does not exist " + f.getPath());
                }
            } finally {
                try {
                    if (packetStream != null) {
                        packetStream.close();
                    }
                } catch (Exception ex) {
                    log.debug(ex);
                }
            }
        } else {
            if (log.isDebugEnabled())
                log.debug("Didnt find patch in database, this is Ok");
        }
    } finally {
        try {
            resultSet.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentSelect.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
    }

}