Example usage for java.sql Statement addBatch

List of usage examples for java.sql Statement addBatch

Introduction

In this page you can find the example usage for java.sql Statement addBatch.

Prototype

void addBatch(String sql) throws SQLException;

Source Link

Document

Adds the given SQL command to the current list of commands for this Statement object.

Usage

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

/**
 * {@inheritDoc}/*from  w w w.j  a  v  a 2 s .  co m*/
 */
@Override
public void move(Connection con, SequencerEngine seq, FxTreeMode mode, long nodeId, long newParentId,
        int newPosition) throws FxApplicationException {

    // Check both nodes (this throws an Exception if they do not exist)
    FxTreeNodeInfo node = getTreeNodeInfo(con, mode, nodeId);
    FxTreeNodeInfoSpreaded destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    final FxTreeNodeInfo parent = getTreeNodeInfo(con, mode, newParentId);

    acquireLocksForUpdate(con, mode, Arrays.asList(nodeId, newParentId, node.getParentId()));

    final long currentPos = node.getPosition();

    // Sanity checks for the position
    if (newPosition < 0) {
        newPosition = 0;
    } else if (newPosition > parent.getDirectChildCount()) {
        newPosition = parent.getDirectChildCount() == 0 ? 1 : parent.getDirectChildCount();
    }

    final boolean getsNewParent = node.getParentId() != newParentId;

    // Take ourself into account if the node stays at the same level
    //System.out.println("newPos:"+newPosition);
    if (!getsNewParent) {
        if (node.getPosition() == newPosition) {
            // Nothing to do at all
            return;
        } else if (newPosition < currentPos) {
            //newPosition = newPosition - 1;
        } else {
            newPosition = newPosition + 1;
        }
    }
    if (newPosition < 0)
        newPosition = 0;
    //System.out.println("newPosX:"+newPosition);

    final long oldParent = node.getParentId();

    // Node may not be moved inside itself!
    if (nodeId == newParentId || node.isParentOf(destinationNode)) {
        throw new FxTreeException("ex.tree.move.recursion", nodeId);
    }

    // Make space for the new nodes
    BigInteger spacing = makeSpace(con, seq, mode, newParentId, newPosition, node.getTotalChildCount() + 1);

    // Reload the node to obtain the new boundary and spacing informations
    destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    BigInteger boundaries[] = getBoundaries(con, destinationNode, newPosition);

    // Move the nodes
    int depthDelta = (destinationNode.getDepth() + 1) - node.getDepth();
    reorganizeSpace(con, seq, mode, mode, node.getId(), true, spacing, boundaries[0], null, -1, null, null,
            depthDelta, null, false, false, true);

    Statement stmt = null;
    final String TRUE = StorageManager.getBooleanTrueExpression();
    try {
        // Update the parent of the node
        stmt = con.createStatement();
        stmt.addBatch("UPDATE " + getTable(mode) + " SET PARENT=" + newParentId + " WHERE ID=" + nodeId);
        if (mode != FxTreeMode.Live)
            stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID=" + nodeId);
        stmt.executeBatch();
        stmt.close();

        // Update the childcount of the new and old parent if needed + set dirty flag
        if (getsNewParent) {
            node = getTreeNodeInfo(con, mode, nodeId);
            stmt = con.createStatement();
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT+1 WHERE ID=" + newParentId);
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT-1 WHERE ID=" + oldParent);
            if (mode != FxTreeMode.Live) {
                final List<Long> newChildren = selectAllChildNodeIds(con, mode, node.getLeft(), node.getRight(),
                        false);
                acquireLocksForUpdate(con, mode, Iterables.concat(newChildren, Arrays.asList(nodeId)));

                for (List<Long> part : Iterables.partition(newChildren, SQL_IN_PARTSIZE)) {
                    stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN ("
                            + StringUtils.join(part, ',') + ")");
                }

                stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN(" + oldParent
                        + "," + newParentId + ")");
            }
            stmt.executeBatch();
            stmt.close();
        }

    } catch (SQLException e) {
        throw new FxTreeException(e, "ex.tree.move.parentUpdate.failed", node.getId(), e.getMessage());
    } finally {
        try {
            if (stmt != null)
                stmt.close();
        } catch (Exception exc) {
            //ignore
        }
    }
}

From source file:org.freebxml.omar.server.persistence.rdb.RegistryObjectDAO.java

/**
 * Update the status of specified objects (homogenous collection) to the specified status.
 * @param statusUnchanged if an id in registryObjectIds is in this ArrayList, no AuditableEvent
 * generated for that RegistryObject/*from   ww  w  . ja v  a  2s  . com*/
 */
public void updateStatus(RegistryObjectType ro, String status) throws RegistryException {
    Statement stmt = null;

    // HIEOS/BHT/AMS: Changed to also update status in the RegistryObject table.
    try {
        stmt = context.getConnection().createStatement();
        String registryObjectTableName = getTableName();
        // First update the concrete table (e.g. ExtrinsicObject, RegistryPackage).
        String sql = this.getSQLStatementFragmentForStatusUpdate(registryObjectTableName, status, ro.getId());
        log.trace("SQL = " + sql);
        stmt.addBatch(sql);

        // Now, update the RegistryObject table (if not already updated above).
        if (!registryObjectTableName.equals(RegistryObjectDAO.getTableNameStatic())) {
            sql = this.getSQLStatementFragmentForStatusUpdate(RegistryObjectDAO.getTableNameStatic(), status,
                    ro.getId());
            log.trace("SQL = " + sql);
            stmt.addBatch(sql);
        }
        stmt.executeBatch();
    } catch (SQLException e) {
        log.error(ServerResourceBundle.getInstance().getString("message.CaughtException"), e);
        throw new RegistryException(e);
    } finally {
        closeStatement(stmt);
    }
}

From source file:cc.tooyoung.common.db.JdbcTemplate.java

public int[] batchUpdate(final String[] sql) throws DataAccessException {
    Assert.notEmpty(sql, "SQL array must not be empty");
    if (ApiLogger.isTraceEnabled()) {
        ApiLogger.trace(new StringBuilder(128).append("Executing SQL batch update of ").append(sql.length)
                .append(" statements"));
    }//w  w w .  j  av a2  s.c om

    class BatchUpdateStatementCallback implements StatementCallback, SqlProvider {
        private String currSql;

        public Object doInStatement(Statement stmt) throws SQLException, DataAccessException {
            int[] rowsAffected = new int[sql.length];
            if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    stmt.addBatch(sql[i]);
                }
                rowsAffected = stmt.executeBatch();
            } else {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    if (!stmt.execute(sql[i])) {
                        rowsAffected[i] = stmt.getUpdateCount();
                    } else {
                        throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
                    }
                }
            }
            return rowsAffected;
        }

        public String getSql() {
            return currSql;
        }
    }
    return (int[]) execute(new BatchUpdateStatementCallback(), true);
}

From source file:com.edgenius.wiki.installation.UpgradeServiceImpl.java

@SuppressWarnings("unused")
private void up3000To3100() throws Exception {
    log.info("Version 3.0 to 3.1 is upgarding");

    String root = DataRoot.getDataRoot();
    if (FileUtil.exist(root + Server.FILE)) {
        Server server = new Server();
        Properties prop = FileUtil.loadProperties(root + Server.FILE);
        server.syncFrom(prop);/* w  w  w  .  ja v a 2s . c  o m*/
        if (server.getMqServerEmbedded() == null || BooleanUtils.toBoolean(server.getMqServerEmbedded())) {
            //embedded
            if (!server.getMqServerUrl().startsWith("tcp://")) {
                server.setMqServerUrl(
                        "tcp://" + server.getMqServerUrl() + "?wireFormat.maxInactivityDuration=0");
                server.syncTo(prop);
                prop.store(FileUtil.getFileOutputStream(root + Server.FILE), "save by system program");
            }
        }
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // database - remove all quartz tables - we don't backup Exportable job(backup and remove space) - it is not perfect but not big issue.
    if (FileUtil.exist(root + Server.FILE)) {
        Server server = new Server();
        Properties prop = FileUtil.loadProperties(root + Server.FILE);
        server.syncFrom(prop);
        String dbType = server.getDbType();

        String migrateSQL = dbType + "-3000-3100.sql";
        DBLoader loader = new DBLoader();
        ConnectionProxy con = loader.getConnection(dbType, server.getDbUrl(), server.getDbSchema(),
                server.getDbUsername(), server.getDbPassword());
        loader.runSQLFile(dbType, migrateSQL, con);

        //reload quartz table
        log.info("Initialize quartz tables for system...");
        Statement stat = con.createStatement();
        Statement dropStat = con.createStatement();
        List<String> lines = loader.loadSQLFile(dbType, dbType + "-quartz.sql");
        for (String sql : lines) {
            sql = sql.replaceAll("\n", " ").trim();
            if (sql.toLowerCase().startsWith("drop ")) {
                try {
                    dropStat.execute(sql);
                } catch (Exception e) {
                    log.error("Drop operation failed...." + sql);
                }
                continue;
            }
            stat.addBatch(sql);
        }
        stat.executeBatch();

        dropStat.close();
        stat.close();
        con.close();
    }

}

From source file:org.kuali.kpme.core.util.ClearDatabaseLifecycle.java

protected void clearTables(final PlatformTransactionManager transactionManager, final DataSource dataSource,
        final String schemaName) {
    LOG.info("Clearing tables for schema " + schemaName);
    Assert.assertNotNull("DataSource could not be located.", dataSource);

    if (schemaName == null || schemaName.equals("")) {
        Assert.fail("Empty schema name given");
    }//ww w  . j ava 2s .  co m
    new TransactionTemplate(transactionManager).execute(new TransactionCallback<Object>() {
        public Object doInTransaction(final TransactionStatus status) {
            verifyTestEnvironment(dataSource);
            return new JdbcTemplate(dataSource).execute(new StatementCallback<Object>() {
                public Object doInStatement(Statement statement) throws SQLException {
                    final List<String> reEnableConstraints = new ArrayList<String>();
                    List<List<String>> tableLists = new ArrayList<List<String>>(2);
                    tableLists.add(TABLES_TO_CLEAR);
                    tableLists.add(alternativeTablesToClear);
                    for (List<String> list : tableLists) {
                        for (String tableName : list) {
                            //if there is an id name that doesnt follow convention check and limit accordingly
                            String idName = TABLE_TO_ID_MAP.get(tableName);
                            String deleteStatement = null;
                            Integer clearId = TABLE_START_CLEAR_ID.get(tableName) != null
                                    ? TABLE_START_CLEAR_ID.get(tableName)
                                    : START_CLEAR_ID;
                            if (idName == null) {
                                deleteStatement = "DELETE FROM " + tableName + " WHERE "
                                        + StringUtils.removeEnd(tableName, "_T") + "_ID" + " >= " + clearId;
                            } else {
                                deleteStatement = "DELETE FROM " + tableName + " WHERE " + idName + " >= "
                                        + clearId;
                            }

                            LOG.debug("Clearing contents using statement ->" + deleteStatement + "<-");
                            statement.addBatch(deleteStatement);
                        }
                    }

                    for (final String constraint : reEnableConstraints) {
                        LOG.debug("Enabling constraints using statement ->" + constraint + "<-");
                        statement.addBatch(constraint);
                    }
                    statement.executeBatch();
                    return null;
                }
            });
        }
    });
    LOG.info("Tables successfully cleared for schema " + schemaName);
}

From source file:org.freebxml.omar.server.persistence.rdb.EmailAddressDAO.java

public void insert(List users) throws RegistryException {
    // log.info(ServerResourceBundle.getInstance().getString("message.InsertingEmailAddresss", new Object[]{new Integer(emailAddresss.size())}));
    if (users.size() == 0) {
        return;/*from ww w.j a  v  a2s. c  o m*/
    }

    Statement stmt = null;

    try {
        Iterator usersIter = users.iterator();
        stmt = context.getConnection().createStatement();

        while (usersIter.hasNext()) {
            UserType user = (UserType) usersIter.next();

            if (log.isDebugEnabled()) {
                try {
                    StringWriter writer = new StringWriter();
                    bu.rimFac.createMarshaller().marshal(user, writer);
                    log.debug("Inserting user: " + writer.getBuffer().toString());
                } catch (Exception e) {
                    log.debug("Failed to marshal user: ", e);
                }
            }

            String parentId = user.getId();

            List emails = user.getEmailAddress();
            Iterator emailsIter = emails.iterator();

            while (emailsIter.hasNext()) {
                //Log.print(Log.TRACE, 8, "\tDATABASE EVENT: storing EmailAddress " );
                Object obj = emailsIter.next();

                EmailAddressType emailAddress = (EmailAddressType) obj;

                String address = emailAddress.getAddress();

                String type = emailAddress.getType();

                if (type != null) {
                    type = "'" + type + "'";
                }

                String str = "INSERT INTO " + getTableName() + " VALUES( " + "'" + address + "', " + type + ", "
                        + "'" + parentId + "' )";
                log.trace("SQL = " + str);
                stmt.addBatch(str);
            }
        }

        if (users.size() > 0) {
            stmt.executeBatch();
        }
    } catch (SQLException e) {
        RegistryException exception = new RegistryException(e);
        throw exception;
    } finally {
        closeStatement(stmt);
    }
}

From source file:org.apache.tajo.catalog.store.DBStore.java

private void createBaseTable() throws SQLException {
    wlock.lock();//from  w w w .j a  va2s  .c  om
    try {
        // META
        Statement stmt = conn.createStatement();
        String meta_ddl = "CREATE TABLE " + TB_META + " (version int NOT NULL)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(meta_ddl);
        }
        stmt.executeUpdate(meta_ddl);
        LOG.info("Table '" + TB_META + " is created.");

        // TABLES
        stmt = conn.createStatement();
        String tables_ddl = "CREATE TABLE " + TB_TABLES + " ("
                + "TID int NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1), " + C_TABLE_ID
                + " VARCHAR(256) NOT NULL CONSTRAINT TABLE_ID_UNIQ UNIQUE, " + "path VARCHAR(1024), "
                + "store_type CHAR(16), " + "options VARCHAR(32672), "
                + "CONSTRAINT TABLES_PK PRIMARY KEY (TID)" + ")";
        if (LOG.isDebugEnabled()) {
            LOG.debug(tables_ddl);
        }
        stmt.addBatch(tables_ddl);
        String idx_tables_tid = "CREATE UNIQUE INDEX idx_tables_tid on " + TB_TABLES + " (TID)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_tables_tid);
        }
        stmt.addBatch(idx_tables_tid);

        String idx_tables_name = "CREATE UNIQUE INDEX idx_tables_name on " + TB_TABLES + "(" + C_TABLE_ID + ")";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_tables_name);
        }
        stmt.addBatch(idx_tables_name);
        stmt.executeBatch();
        LOG.info("Table '" + TB_TABLES + "' is created.");

        // COLUMNS
        stmt = conn.createStatement();
        String columns_ddl = "CREATE TABLE " + TB_COLUMNS + " (" + "TID INT NOT NULL REFERENCES " + TB_TABLES
                + " (TID) ON DELETE CASCADE, " + C_TABLE_ID + " VARCHAR(256) NOT NULL REFERENCES " + TB_TABLES
                + "(" + C_TABLE_ID + ") ON DELETE CASCADE, " + "column_id INT NOT NULL,"
                + "column_name VARCHAR(256) NOT NULL, " + "data_type CHAR(16), "
                + "CONSTRAINT C_COLUMN_ID UNIQUE (" + C_TABLE_ID + ", column_name))";
        if (LOG.isDebugEnabled()) {
            LOG.debug(columns_ddl);
        }
        stmt.addBatch(columns_ddl);

        String idx_fk_columns_table_name = "CREATE UNIQUE INDEX idx_fk_columns_table_name on " + TB_COLUMNS
                + "(" + C_TABLE_ID + ", column_name)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_fk_columns_table_name);
        }
        stmt.addBatch(idx_fk_columns_table_name);
        stmt.executeBatch();
        LOG.info("Table '" + TB_COLUMNS + " is created.");

        // OPTIONS
        stmt = conn.createStatement();
        String options_ddl = "CREATE TABLE " + TB_OPTIONS + " (" + C_TABLE_ID
                + " VARCHAR(256) NOT NULL REFERENCES TABLES (" + C_TABLE_ID + ") " + "ON DELETE CASCADE, "
                + "key_ VARCHAR(256) NOT NULL, value_ VARCHAR(256) NOT NULL)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(options_ddl);
        }
        stmt.addBatch(options_ddl);

        String idx_options_key = "CREATE INDEX idx_options_key on " + TB_OPTIONS + " (" + C_TABLE_ID + ")";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_options_key);
        }
        stmt.addBatch(idx_options_key);
        String idx_options_table_name = "CREATE INDEX idx_options_table_name on " + TB_OPTIONS + "("
                + C_TABLE_ID + ")";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_options_table_name);
        }
        stmt.addBatch(idx_options_table_name);
        stmt.executeBatch();
        LOG.info("Table '" + TB_OPTIONS + " is created.");

        // INDEXES
        stmt = conn.createStatement();
        String indexes_ddl = "CREATE TABLE " + TB_INDEXES + "("
                + "index_name VARCHAR(256) NOT NULL PRIMARY KEY, " + C_TABLE_ID
                + " VARCHAR(256) NOT NULL REFERENCES TABLES (" + C_TABLE_ID + ") " + "ON DELETE CASCADE, "
                + "column_name VARCHAR(256) NOT NULL, " + "data_type VARCHAR(256) NOT NULL, "
                + "index_type CHAR(32) NOT NULL, " + "is_unique BOOLEAN NOT NULL, "
                + "is_clustered BOOLEAN NOT NULL, " + "is_ascending BOOLEAN NOT NULL)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(indexes_ddl);
        }
        stmt.addBatch(indexes_ddl);

        String idx_indexes_key = "CREATE UNIQUE INDEX idx_indexes_key ON " + TB_INDEXES + " (index_name)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_indexes_key);
        }
        stmt.addBatch(idx_indexes_key);

        String idx_indexes_columns = "CREATE INDEX idx_indexes_columns ON " + TB_INDEXES + " (" + C_TABLE_ID
                + ", column_name)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_indexes_columns);
        }
        stmt.addBatch(idx_indexes_columns);
        stmt.executeBatch();
        LOG.info("Table '" + TB_INDEXES + "' is created.");

        String stats_ddl = "CREATE TABLE " + TB_STATISTICS + "(" + C_TABLE_ID
                + " VARCHAR(256) NOT NULL REFERENCES TABLES (" + C_TABLE_ID + ") " + "ON DELETE CASCADE, "
                + "num_rows BIGINT, " + "num_bytes BIGINT)";
        if (LOG.isDebugEnabled()) {
            LOG.debug(stats_ddl);
        }
        stmt.addBatch(stats_ddl);

        String idx_stats_fk_table_name = "CREATE INDEX idx_stats_table_name ON " + TB_STATISTICS + " ("
                + C_TABLE_ID + ")";
        if (LOG.isDebugEnabled()) {
            LOG.debug(idx_stats_fk_table_name);
        }
        stmt.addBatch(idx_stats_fk_table_name);
        stmt.executeBatch();
        LOG.info("Table '" + TB_STATISTICS + "' is created.");

    } finally {
        wlock.unlock();
    }
}

From source file:lib.JdbcTemplate.java

@Override
public int[] batchUpdate(final String... sql) throws DataAccessException {
    Assert.notEmpty(sql, "SQL array must not be empty");
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL batch update of " + sql.length + " statements");
    }// w w w .  j av a  2 s .c  o m

    class BatchUpdateStatementCallback implements StatementCallback<int[]>, SqlProvider {

        private String currSql;

        @Override
        public int[] doInStatement(Statement stmt) throws SQLException, DataAccessException {
            int[] rowsAffected = new int[sql.length];
            if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
                for (String sqlStmt : sql) {
                    this.currSql = appendSql(this.currSql, sqlStmt);
                    stmt.addBatch(sqlStmt);
                }
                try {
                    rowsAffected = stmt.executeBatch();
                } catch (BatchUpdateException ex) {
                    String batchExceptionSql = null;
                    for (int i = 0; i < ex.getUpdateCounts().length; i++) {
                        if (ex.getUpdateCounts()[i] == Statement.EXECUTE_FAILED) {
                            batchExceptionSql = appendSql(batchExceptionSql, sql[i]);
                        }
                    }
                    if (StringUtils.hasLength(batchExceptionSql)) {
                        this.currSql = batchExceptionSql;
                    }
                    throw ex;
                }
            } else {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    if (!stmt.execute(sql[i])) {
                        rowsAffected[i] = stmt.getUpdateCount();
                    } else {
                        throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
                    }
                }
            }
            return rowsAffected;
        }

        private String appendSql(String sql, String statement) {
            return (StringUtils.isEmpty(sql) ? statement : sql + "; " + statement);
        }

        @Override
        public String getSql() {
            return this.currSql;
        }
    }

    return execute(new BatchUpdateStatementCallback());
}

From source file:it.cnr.icar.eric.server.persistence.rdb.EmailAddressDAO.java

public void insert(@SuppressWarnings("rawtypes") List users) throws RegistryException {
    // log.info(ServerResourceBundle.getInstance().getString("message.InsertingEmailAddresss", new Object[]{new Integer(emailAddresss.size())}));
    if (users.size() == 0) {
        return;/*  w  w  w.  j a  v a2  s.co  m*/
    }

    Statement stmt = null;

    try {
        Iterator<?> usersIter = users.iterator();
        stmt = context.getConnection().createStatement();

        while (usersIter.hasNext()) {
            UserType user = (UserType) usersIter.next();

            if (log.isDebugEnabled()) {
                try {
                    StringWriter writer = new StringWriter();
                    //                        bu.rimFac.createMarshaller()
                    bu.getJAXBContext().createMarshaller().marshal(user, writer);
                    log.debug("Inserting user: " + writer.getBuffer().toString());
                } catch (Exception e) {
                    log.debug("Failed to marshal user: ", e);
                }
            }

            String parentId = user.getId();

            List<EmailAddressType> emails = user.getEmailAddress();
            Iterator<EmailAddressType> emailsIter = emails.iterator();

            while (emailsIter.hasNext()) {
                //Log.print(Log.TRACE, 8, "\tDATABASE EVENT: storing EmailAddress " );
                Object obj = emailsIter.next();

                EmailAddressType emailAddress = (EmailAddressType) obj;

                String address = emailAddress.getAddress();

                String type = emailAddress.getType();

                if (type != null) {
                    type = "'" + type + "'";
                }

                String str = "INSERT INTO " + getTableName() + " VALUES( " + "'" + address + "', " + type + ", "
                        + "'" + parentId + "' )";
                log.trace("stmt = " + str);
                stmt.addBatch(str);
            }
        }

        if (users.size() > 0) {
            stmt.executeBatch();
        }
    } catch (SQLException e) {
        RegistryException exception = new RegistryException(e);
        throw exception;
    } finally {
        closeStatement(stmt);
    }
}

From source file:org.freebxml.omar.server.persistence.rdb.TelephoneNumberDAO.java

/**
 * Does a bulk insert of a Collection of objects that match the type for this persister.
 *
 *///w  ww .  jav  a  2  s . c o  m
public void insert(String parentId, List telephoneNumbers) throws RegistryException {
    Statement stmt = null;

    if (telephoneNumbers.size() == 0) {
        return;
    }

    log.debug(ServerResourceBundle.getInstance().getString("message.InsertingTelephoneNumbersSize",
            new Object[] { new Integer(telephoneNumbers.size()) }));

    try {
        stmt = context.getConnection().createStatement();

        Iterator iter = telephoneNumbers.iterator();

        while (iter.hasNext()) {
            TelephoneNumberType telephoneNumber = (TelephoneNumberType) iter.next();

            //Log.print(Log.TRACE, 8, "\tDATABASE EVENT: storing TelephoneNumber " );
            String areaCode = telephoneNumber.getAreaCode();

            if (areaCode != null) {
                areaCode = "'" + areaCode + "'";
            }

            String countryCode = telephoneNumber.getCountryCode();

            if (countryCode != null) {
                countryCode = "'" + countryCode + "'";
            }

            String extension = telephoneNumber.getExtension();

            if (extension != null) {
                extension = "'" + extension + "'";
            }

            String number = telephoneNumber.getNumber();

            if (number != null) {
                number = "'" + number + "'";
            }

            String phoneType = telephoneNumber.getPhoneType();

            if (phoneType != null) {
                phoneType = "'" + phoneType + "'";
            }

            String str = "INSERT INTO TelephoneNumber " + "VALUES( " + areaCode + ", " + countryCode + ", "
                    + extension + ", " + number + ", " + phoneType + ", " + "'" + parentId + "' )";

            log.trace("SQL = " + str);
            stmt.addBatch(str);
        }

        if (telephoneNumbers.size() > 0) {
            stmt.executeBatch();
        }
    } catch (SQLException e) {
        log.error(ServerResourceBundle.getInstance().getString("message.CaughtException1"), e);
        throw new RegistryException(e);
    } finally {
        closeStatement(stmt);
    }
}