Example usage for java.sql PreparedStatement clearParameters

List of usage examples for java.sql PreparedStatement clearParameters

Introduction

In this page you can find the example usage for java.sql PreparedStatement clearParameters.

Prototype

void clearParameters() throws SQLException;

Source Link

Document

Clears the current parameter values immediately.

Usage

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Update the metadata value for the given key; can be used by KiWi modules to set module-specific metadata.
 *
 * @param key/*from w ww .j a va 2s. c  om*/
 * @return
 * @throws SQLException
 */
public void setMetadata(String key, String value) throws SQLException {
    requireJDBCConnection();

    PreparedStatement statement = getPreparedStatement("meta.get");
    ResultSet result = statement.executeQuery();
    try {
        if (result.next()) {
            PreparedStatement update = getPreparedStatement("meta.update");
            update.clearParameters();
            update.setString(1, value);
            update.setString(2, key);
            update.executeUpdate();
        } else {
            PreparedStatement insert = getPreparedStatement("meta.insert");
            insert.clearParameters();
            insert.setString(1, key);
            insert.setString(2, value);
            insert.executeUpdate();
        }
    } finally {
        result.close();
    }
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

public synchronized void flushBatch() throws SQLException {
    if (batchCommit && tripleBatch != null) {
        requireJDBCConnection();/*from ww  w.  j  a v  a2  s  .  c  om*/

        commitLock.lock();
        try {
            RetryExecution execution = new RetryExecution("FLUSH BATCH");
            execution.setUseSavepoint(true);
            execution.execute(connection, new RetryCommand<Void>() {
                @Override
                public Void run() throws SQLException {
                    PreparedStatement insertTriple = getPreparedStatement("store.triple");
                    insertTriple.clearParameters();
                    insertTriple.clearBatch();

                    synchronized (tripleBatch) {
                        for (KiWiTriple triple : tripleBatch) {
                            // retrieve a new triple ID and set it in the object
                            if (triple.getId() < 0) {
                                triple.setId(getNextSequence());
                            }

                            insertTriple.setLong(1, triple.getId());
                            insertTriple.setLong(2, triple.getSubject().getId());
                            insertTriple.setLong(3, triple.getPredicate().getId());
                            insertTriple.setLong(4, triple.getObject().getId());
                            if (triple.getContext() != null) {
                                insertTriple.setLong(5, triple.getContext().getId());
                            } else {
                                insertTriple.setNull(5, Types.BIGINT);
                            }
                            insertTriple.setBoolean(6, triple.isInferred());
                            insertTriple.setTimestamp(7, new Timestamp(triple.getCreated().getTime()));

                            insertTriple.addBatch();
                        }
                    }
                    insertTriple.executeBatch();

                    tripleBatch.clear();

                    return null;
                }
            });

        } finally {
            commitLock.unlock();
        }

    }

}

From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java

/**
 * updat this save this local segment into the db
 * //from   ww w  .ja  va  2 s  .co m
 * @param connection
 * @param addsi
 */
protected void updateDBPatchBLOB(Connection connection) throws SQLException, IOException {

    PreparedStatement segmentUpdate = null;
    PreparedStatement segmentInsert = null;
    InputStream packetStream = null;
    File packetFile = null;
    long newVersion = System.currentTimeMillis();
    try {
        segmentUpdate = connection.prepareStatement(
                "update search_segments set packet_ = ?, version_ = ?, size_ = ? where name_ = ?");
        segmentInsert = connection.prepareStatement(
                "insert into search_segments (packet_, name_, version_, size_ ) values ( ?,?,?,?)");
        packetFile = clusterStorage.packPatch();
        if (packetFile.exists()) {
            packetStream = new FileInputStream(packetFile);
            segmentUpdate.clearParameters();
            segmentUpdate.setBinaryStream(1, packetStream, (int) packetFile.length());
            segmentUpdate.setLong(2, newVersion);
            segmentUpdate.setLong(3, packetFile.length());
            segmentUpdate.setString(4, INDEX_PATCHNAME);
            if (segmentUpdate.executeUpdate() != 1) {
                segmentInsert.clearParameters();
                segmentInsert.setBinaryStream(1, packetStream, (int) packetFile.length());
                segmentInsert.setString(2, INDEX_PATCHNAME);
                segmentInsert.setLong(3, newVersion);
                segmentInsert.setLong(4, packetFile.length());
                if (segmentInsert.executeUpdate() != 1) {
                    throw new SQLException(" Failed to insert patch  ");
                }
            }
            if (log.isDebugEnabled())
                log.debug("DB Updated Patch ");
        } else {
            log.warn(" Packed Patch does not exist " + packetFile.getPath());
        }
    } finally {
        try {
            if (packetStream != null) {
                packetStream.close();
            }
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            packetFile.delete();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentUpdate.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentInsert.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
    }

}

From source file:org.apache.roller.weblogger.business.startup.DatabaseInstaller.java

/**
 * Upgrade database for Roller 2.1.0/*  ww w.  j  av  a  2 s.c om*/
 */
private void upgradeTo210(Connection con, boolean runScripts) throws StartupException {
    SQLScriptRunner runner = null;
    try {
        if (runScripts) {
            String handle = getDatabaseHandle(con);
            String scriptPath = handle + "/200-to-210-migration.sql";
            successMessage("Running database upgrade script: " + scriptPath);
            runner = new SQLScriptRunner(scripts.getDatabaseScript(scriptPath));
            runner.runScript(con, true);
            messages.addAll(runner.getMessages());
        }

        /*
         * For Roller 2.1.0 we are going to standardize some of the
         * weblog templates and make them less editable.  To do this
         * we need to do a little surgery.
         *
         * The goal for this upgrade is to ensure that ALL weblogs now have
         * the required "Weblog" template as their default template.
         */

        successMessage("Doing upgrade to 210 ...");
        successMessage("Ensuring that all weblogs use the 'Weblog' template as their default page");

        // this query will give us all websites that have modified their
        // default page to link to something other than "Weblog"
        PreparedStatement selectUpdateWeblogs = con
                .prepareStatement("select website.id,template,website.handle from website,webpage "
                        + "where webpage.id = website.defaultpageid " + "and webpage.link != 'Weblog'");

        PreparedStatement selectWeblogTemplate = con
                .prepareStatement("select id from webpage where websiteid = ? and link = 'Weblog'");

        PreparedStatement updateWeblogTemplate = con
                .prepareStatement("update webpage set template = ? where id = ?");

        // insert a new template for a website
        PreparedStatement insertWeblogTemplate = con.prepareStatement("insert into webpage"
                + "(id, name, description, link, websiteid, template, updatetime) " + "values(?,?,?,?,?,?,?)");

        // update the default page for a website
        PreparedStatement updateDefaultPage = con
                .prepareStatement("update website set defaultpageid = ? " + "where id = ?");

        String description = "This template is used to render the main " + "page of your weblog.";
        ResultSet websiteSet = selectUpdateWeblogs.executeQuery();
        Date now = new Date();
        while (websiteSet.next()) {
            String websiteid = websiteSet.getString(1);
            String template = websiteSet.getString(2);
            String handle = websiteSet.getString(3);
            successMessage("Processing website: " + handle);

            String defaultpageid = null;

            // it's possible that this weblog has a "Weblog" template, but just
            // isn't using it as their default.  if so we need to fix that.
            selectWeblogTemplate.clearParameters();
            selectWeblogTemplate.setString(1, websiteid);
            ResultSet weblogPageSet = selectWeblogTemplate.executeQuery();
            if (weblogPageSet.next()) {
                // this person already has a "Weblog" template, so update it
                String id = weblogPageSet.getString(1);

                updateWeblogTemplate.clearParameters();
                updateWeblogTemplate.setString(1, template);
                updateWeblogTemplate.setString(2, id);
                updateWeblogTemplate.executeUpdate();

                // make sure and adjust what default page id we want to use
                defaultpageid = id;
            } else {
                // no "Weblog" template, so insert a new one
                insertWeblogTemplate.clearParameters();
                insertWeblogTemplate.setString(1, websiteid + "q");
                insertWeblogTemplate.setString(2, "Weblog");
                insertWeblogTemplate.setString(3, description);
                insertWeblogTemplate.setString(4, "Weblog");
                insertWeblogTemplate.setString(5, websiteid);
                insertWeblogTemplate.setString(6, template);
                insertWeblogTemplate.setDate(7, new java.sql.Date(now.getTime()));
                insertWeblogTemplate.executeUpdate();

                // set the new default page id
                defaultpageid = websiteid + "q";
            }

            // update defaultpageid value
            updateDefaultPage.clearParameters();
            updateDefaultPage.setString(1, defaultpageid);
            updateDefaultPage.setString(2, websiteid);
            updateDefaultPage.executeUpdate();
        }

        if (!con.getAutoCommit())
            con.commit();

        successMessage("Upgrade to 210 complete.");

    } catch (Exception e) {
        log.error("ERROR running 310 database upgrade script", e);
        if (runner != null)
            messages.addAll(runner.getMessages());

        log.error("Problem upgrading database to version 210", e);
        throw new StartupException("Problem upgrading database to version 210", e);
    }

    updateDatabaseVersion(con, 210);
}

From source file:dk.netarkivet.harvester.datamodel.DomainDBDAO.java

/**
 * Update the list of passwords for the given domain, keeping IDs where
 * applicable./*from   ww w  . j ava 2 s.c  o  m*/
 * @param c 
 *            A connection to the database
 * @param d
 *            A domain to update.
 * @throws SQLException
 *             If any database problems occur during the update process.
 */
private void updatePasswords(Connection c, Domain d) throws SQLException {
    Map<String, Long> oldNames = DBUtils.selectStringLongMap(c,
            "SELECT name, password_id FROM passwords " + "WHERE domain_id = ?", d.getID());
    PreparedStatement s = c.prepareStatement("UPDATE passwords SET " + "comments = ?, " + "url = ?, "
            + "realm = ?, " + "username = ?, " + "password = ? " + "WHERE name = ? AND domain_id = ?");
    for (Iterator<Password> pwds = d.getAllPasswords(); pwds.hasNext();) {
        Password pwd = pwds.next();
        if (oldNames.containsKey(pwd.getName())) {
            DBUtils.setComments(s, 1, pwd, Constants.MAX_COMMENT_SIZE);
            DBUtils.setStringMaxLength(s, 2, pwd.getPasswordDomain(), Constants.MAX_URL_SIZE, pwd,
                    "password url");
            DBUtils.setStringMaxLength(s, 3, pwd.getRealm(), Constants.MAX_REALM_NAME_SIZE, pwd,
                    "password realm");
            DBUtils.setStringMaxLength(s, 4, pwd.getUsername(), Constants.MAX_USER_NAME_SIZE, pwd,
                    "password username");
            DBUtils.setStringMaxLength(s, 5, pwd.getPassword(), Constants.MAX_PASSWORD_SIZE, pwd, "password");
            s.setString(6, pwd.getName());
            s.setLong(7, d.getID());
            s.executeUpdate();
            s.clearParameters();
            pwd.setID(oldNames.get(pwd.getName()));
            oldNames.remove(pwd.getName());
        } else {
            insertPassword(c, d, pwd);
        }
    }
    s.close();
    s = c.prepareStatement("DELETE FROM passwords WHERE password_id = ?");
    for (Long gone : oldNames.values()) {
        // Check that we're not deleting something that's in use
        // Since deletion is very rare, this is allowed to take
        // some time.
        String usages = DBUtils.getUsages(c,
                "SELECT configurations.name" + "  FROM configurations, config_passwords"
                        + " WHERE configurations.config_id = " + "config_passwords.config_id"
                        + "   AND config_passwords.password_id = ?",
                gone, gone);
        if (usages != null) {
            String name = DBUtils.selectStringValue(c, "SELECT name FROM passwords WHERE password_id = ?",
                    gone);
            String message = "Cannot delete password " + name + " as it is used in " + usages;
            log.debug(message);
            throw new PermissionDenied(message);
        }
        s.setLong(1, gone);
        s.executeUpdate();
        s.clearParameters();
    }
}

From source file:org.sakaiproject.search.index.impl.JDBCClusterIndexStore.java

/**
 * updat this save this local segment into the db
 * //w  ww .j ava 2  s .  co  m
 * @param connection
 * @param addsi
 */
protected void updateDBSegmentBLOB(Connection connection, SegmentInfo addsi) throws SQLException, IOException {

    PreparedStatement segmentUpdate = null;
    PreparedStatement segmentInsert = null;
    InputStream packetStream = null;
    File packetFile = null;
    long newVersion = System.currentTimeMillis();
    try {
        segmentUpdate = connection.prepareStatement(
                "update search_segments set packet_ = ?, version_ = ?, size_ = ? where name_ = ? and version_ = ?");
        segmentInsert = connection.prepareStatement(
                "insert into search_segments (packet_, name_, version_, size_ ) values ( ?,?,?,?)");
        packetFile = clusterStorage.packSegment(addsi, newVersion);
        if (packetFile.exists()) {
            packetStream = new FileInputStream(packetFile);
            if (addsi.isInDb()) {
                segmentUpdate.clearParameters();
                segmentUpdate.setBinaryStream(1, packetStream, (int) packetFile.length());
                segmentUpdate.setLong(2, newVersion);
                segmentUpdate.setLong(3, packetFile.length());
                segmentUpdate.setString(4, addsi.getName());
                segmentUpdate.setLong(5, addsi.getVersion());
                if (segmentUpdate.executeUpdate() != 1) {
                    throw new SQLException(" ant Find packet to update " + addsi);
                }
            } else {
                segmentInsert.clearParameters();
                segmentInsert.setBinaryStream(1, packetStream, (int) packetFile.length());
                segmentInsert.setString(2, addsi.getName());
                segmentInsert.setLong(3, newVersion);
                segmentInsert.setLong(4, packetFile.length());
                if (segmentInsert.executeUpdate() != 1) {
                    throw new SQLException(" Failed to insert packet  " + addsi);
                }
            }
            addsi.setVersion(newVersion);
            if (log.isDebugEnabled())
                log.debug("DB Updated " + addsi);
            try {
                packetStream.close();
            } catch (Exception ex) {
                log.debug(ex);
            }
            try {
                packetFile.delete();
            } catch (Exception ex) {
                log.debug(ex);
            }
        } else {
            log.warn("Packet file does not exist " + packetFile.getPath());
        }

    } finally {
        try {
            packetStream.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            packetFile.delete();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentUpdate.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
        try {
            segmentInsert.close();
        } catch (Exception ex) {
            log.debug(ex);
        }
    }

}

From source file:org.sakaiproject.search.journal.impl.JournaledFSIndexStorage.java

/**
 * //  w  w  w  . j ava 2  s . c o  m
 */
private void deleteJournalSavePoint() {
    Connection connection = null;
    PreparedStatement deleteJournalSavePointPst = null;
    if (datasource != null) {
        try {
            connection = datasource.getConnection();
            boolean tableExists = false;
            PreparedStatement checkJournalSavePoint = null;
            ResultSet rs = null;
            try {
                checkJournalSavePoint = connection.prepareStatement("select count(*) from search_node_status ");
                rs = checkJournalSavePoint.executeQuery();
                if (rs.next()) {
                    tableExists = true;
                }
            } catch (Exception ex) {
                if (log.isDebugEnabled()) {
                    log.debug("Failed to check for existance of table, this is Ok on first start ", ex);
                }
            } finally {
                try {
                    if (rs != null) {
                        rs.close();
                    }
                } catch (Exception ex) {
                    log.debug(ex);

                }
                try {
                    checkJournalSavePoint.close();
                } catch (Exception ex) {
                    log.debug(ex);
                }
            }
            if (tableExists) {
                //SRCH-3 deleting this entirely can lead to search inconsistency
                deleteJournalSavePointPst = connection
                        .prepareStatement("update search_node_status set jid = -1 where serverid = ? ");
                deleteJournalSavePointPst.clearParameters();
                deleteJournalSavePointPst.setString(1, serverId);
                deleteJournalSavePointPst.executeUpdate();
                connection.commit();
            }
        } catch (Exception ex) {
            log.warn("Unable to delete Search Jorunal SavePoint ", ex);
        } finally {
            try {
                deleteJournalSavePointPst.close();
            } catch (Exception ex) {
                log.debug(ex);
            }
            try {
                connection.close();
            } catch (Exception ex) {
                log.debug(ex);
            }
        }
    }
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Internal implementation for actually carrying out the query. Returns a closable iteration that can be used
 * in a repository result. The iteration is forward-only and does not allow removing result rows.
 *
 * @param subject    the subject to query for, or null for a wildcard query
 * @param predicate  the predicate to query for, or null for a wildcard query
 * @param object     the object to query for, or null for a wildcard query
 * @param context    the context to query for, or null for a wildcard query
 * @param inferred   if true, the result will also contain triples inferred by the reasoner, if false not
 * @param wildcardContext if true, a null context will be interpreted as a wildcard, if false, a null context will be interpreted as "no context"
 * @return a ClosableIteration that wraps the database ResultSet; needs to be closed explicitly by the caller
 * @throws SQLException/*  ww  w .  j a  va2 s  . c om*/
 */
private CloseableIteration<Statement, SQLException> listTriplesInternal(KiWiResource subject,
        KiWiUriResource predicate, KiWiNode object, KiWiResource context, boolean inferred,
        final boolean wildcardContext) throws SQLException {
    // if one of the database ids is null, there will not be any database results, so we can return an empty result
    if (subject != null && subject.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (predicate != null && predicate.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (object != null && object.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }
    if (context != null && context.getId() < 0) {
        return new EmptyIteration<Statement, SQLException>();
    }

    requireJDBCConnection();

    // otherwise we need to create an appropriate SQL query and execute it, the repository result will be read-only
    // and only allow forward iteration, so we can limit the query using the respective flags
    PreparedStatement query = connection.prepareStatement(
            constructTripleQuery(subject, predicate, object, context, inferred, wildcardContext),
            ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    query.clearParameters();

    if (persistence.getDialect().isCursorSupported()) {
        query.setFetchSize(persistence.getConfiguration().getCursorSize());
    }

    // set query parameters
    int position = 1;
    if (subject != null) {
        query.setLong(position++, subject.getId());
    }
    if (predicate != null) {
        query.setLong(position++, predicate.getId());
    }
    if (object != null) {
        query.setLong(position++, object.getId());
    }
    if (context != null) {
        query.setLong(position++, context.getId());
    }

    final ResultSet result = query.executeQuery();

    return new CloseableIteration<Statement, SQLException>() {

        List<KiWiTriple> batch = null;
        int batchPosition = 0;

        @Override
        public void close() throws SQLException {
            result.close();
        }

        @Override
        public boolean hasNext() throws SQLException {
            fetchBatch();

            return batch.size() > batchPosition;
        }

        @Override
        public Statement next() throws SQLException {
            fetchBatch();

            if (batch.size() > batchPosition) {
                return batch.get(batchPosition++);
            } else {
                return null;
            }
        }

        private void fetchBatch() throws SQLException {
            if (batch == null || batch.size() <= batchPosition) {
                batch = constructTriplesFromDatabase(result, QUERY_BATCH_SIZE);
                batchPosition = 0;
            }
        }

        @Override
        public void remove() throws SQLException {
            throw new UnsupportedOperationException("removing results not supported");
        }
    };
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn//from   www . ja  v  a2s  .c  o m
 * @param idPId
 * @param addedRoles
 * @param deletedRoles
 * @param renamedOldRoles
 * @param renamedNewRoles
 * @throws SQLException
 */
private void updateIdPRoles(Connection conn, int idPId, List<String> addedRoles, List<String> deletedRoles,
        List<String> renamedOldRoles, List<String> renamedNewRoles) throws SQLException {

    PreparedStatement prepStmt = null;
    String sqlStmt = null;

    try {

        for (String deletedRole : deletedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.DELETE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, deletedRole);
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (String addedRole : addedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, CharacterEncoder.getSafeText(addedRole));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (int i = 0; i < renamedOldRoles.size(); i++) {
            sqlStmt = IdPManagementConstants.SQLQueries.UPDATE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setString(1, CharacterEncoder.getSafeText(renamedNewRoles.get(i)));
            prepStmt.setInt(2, idPId);
            prepStmt.setString(3, CharacterEncoder.getSafeText(renamedOldRoles.get(i)));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();

    } finally {
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }

}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.CreateMetadataHelper.java

public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
    // ?//ww  w.j  a va 2s .  c  om

    Map<String, String> mAttrs = metaModel.getMAttrs();
    boolean hasChildMetaModel = metaModel.isHasChildMetaModel();

    // ???
    List<AbstractMetadata> metadatas = metaModel.getMetadatas();
    int size = metadatas.size();
    String code = "";
    String metaModelCode = "";
    MMMetadata parentMetadata = null;
    String logMsg = "";
    try {
        for (int i = 0; i < size; i++) {

            MMMetadata metadata = (MMMetadata) metadatas.get(i);
            if (metadata.isHasExist()) {
                // ??,??
                continue;
            }

            parentMetadata = metadata.getParentMetadata();
            if (parentMetadata == null) {
                String error = new StringBuilder("?:").append(metadata.getCode())
                        .append(" ,??!!").toString();
                log.error(error);
                throw new SQLException(error);
            }
            String metadataNamespace = genNamespace(parentMetadata, metadata.getId(), hasChildMetaModel);

            // ?ID
            ps.setString(1, metadata.getId());
            code = metadata.getCode();
            // ???
            ps.setString(2, code);
            // ???
            ps.setString(3,
                    (metadata.getName() == null || metadata.getName().equals("")) ? code : metadata.getName());
            // ID
            metaModelCode = metaModel.getCode();
            ps.setString(4, metaModelCode);

            // namespaceID
            ps.setString(5, metadataNamespace);
            ps.setString(6, parentMetadata.getId());
            // START_TIME: 
            ps.setLong(7, this.getGlobalTime());

            int index = setAttrs(ps, metadata, mAttrs);

            setPs(ps, metadata, index + 7);

            if (log.isDebugEnabled()) {
                log.debug(new StringBuilder().append(":parent_id:").append(parentMetadata.getId())
                        .append(",parent_code:").append(parentMetadata.getCode()).append(",instance_code:")
                        .append(code).append(",classifier_id:").append(metaModelCode).toString());
            }
            ps.addBatch();
            // ??
            ps.clearParameters();

            if (++super.count % super.batchSize == 0) {
                ps.executeBatch();
                ps.clearBatch();
            }
        }

        if (super.count % super.batchSize != 0) {
            ps.executeBatch();
            ps.clearBatch();

        }
    } catch (SQLException e) {
        logMsg = new StringBuilder().append("?,?:parent_id:")
                .append(parentMetadata.getId()).append(",parent_code:").append(parentMetadata.getCode())
                .append(",instance_code:").append(code).append(",classifier_id:").append(metaModelCode)
                .append("  ?:").append(e.getLocalizedMessage()).toString();
        log.error(logMsg);
        AdapterExtractorContext.addExtractorLog(ExtractorLogLevel.ERROR, logMsg);
        throw e;
    }
    return null;
    // test for callback
    // throw new SQLException();
}