Example usage for java.sql Statement executeBatch

List of usage examples for java.sql Statement executeBatch

Introduction

In this page you can find the example usage for java.sql Statement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

/**
 * {@inheritDoc}//from ww  w .j av  a2  s  .co m
 */
@Override
public long copy(Connection con, SequencerEngine seq, FxTreeMode mode, long srcNodeId, long dstParentNodeId,
        int dstPosition, boolean deepReferenceCopy, String copyOfPrefix) throws FxApplicationException {
    // Check both nodes (this throws a FxNotFoundException if they do not exist)
    final FxTreeNodeInfo sourceNode = getTreeNodeInfo(con, mode, srcNodeId);
    getTreeNodeInfo(con, mode, dstParentNodeId);

    // Make space for the new nodes
    BigInteger spacing = makeSpace(con, seq, mode, dstParentNodeId, dstPosition,
            sourceNode.getTotalChildCount() + 1);

    // Reload the node to obtain the new boundary and spacing informations
    final FxTreeNodeInfoSpreaded destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode,
            dstParentNodeId);

    acquireLocksForUpdate(con, mode, Arrays.asList(srcNodeId, sourceNode.getParentId(), dstParentNodeId));

    // Copy the data
    BigInteger boundaries[] = getBoundaries(con, destinationNode, dstPosition);
    int depthDelta = (destinationNode.getDepth() + 1) - sourceNode.getDepth();
    long firstCreatedNodeId = reorganizeSpace(con, seq, mode, mode, sourceNode.getId(), true, spacing,
            boundaries[0], null, -1, null, null, depthDelta, dstParentNodeId, true, false, true);

    Statement stmt = null;
    PreparedStatement ps = null;
    try {
        // Update the childcount of the new parents
        stmt = con.createStatement();
        stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT+1 WHERE ID=" + dstParentNodeId);
        stmt.executeBatch();

        if (deepReferenceCopy) {
            FxTreeNodeInfoSpreaded nodeInfo = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode,
                    firstCreatedNodeId);
            ps = con.prepareStatement("SELECT ID,REF FROM " + getTable(mode) + " WHERE LFT>=? AND RGT<=?");
            setNodeBounds(ps, 1, nodeInfo.getLeft());
            setNodeBounds(ps, 2, nodeInfo.getRight());
            ResultSet rs = ps.executeQuery();
            final ContentEngine ce = EJBLookup.getContentEngine();
            while (rs != null && rs.next()) {
                FxPK pkRef = new FxPK(rs.getLong(2));
                long nodeId = rs.getLong(1);
                FxPK pkNew = ce.save(ce.load(pkRef).copyAsNewInstance());
                updateReference(con, mode, nodeId, pkNew.getId());
            }
        }
    } catch (SQLException exc) {
        throw new FxTreeException(
                "MoveNode: Failed to update the parent of node#" + srcNodeId + ": " + exc.getMessage());
    } finally {
        Database.closeObjects(GenericTreeStorageSpreaded.class, stmt, ps);
    }
    return firstCreatedNodeId;
}

From source file:org.freebxml.omar.server.persistence.rdb.AbstractDAO.java

/**
 * @see org.freebxml.omar.server.persistence.rdb.OMARDAO#insert(org.oasis.ebxml.registry.bindings.rim.UserType, java.sql.Connection, java.util.List, java.util.HashMap)
 *///from   ww w.j  a  va  2  s.  co m
public void insert(List objects) throws RegistryException {
    //Return immediatley if no objects to insert
    if (objects.size() == 0) {
        return;
    }

    //First process any objects that may already exists in persistence layer
    // BHT: OPTIMIZE ME!!!!!!!!!!!!!
    objects = processExistingObjects(objects);

    //Return immediately if no objects to insert
    if (objects.size() == 0) {
        return;
    }

    log.trace(ServerResourceBundle.getInstance().getString("message.InsertingRowsInTable",
            new Object[] { new Integer(objects.size()), getTableName() }));
    action = DAO_ACTION_INSERT;

    Statement stmt = null;
    try {
        stmt = context.getConnection().createStatement();

        Iterator iter = objects.iterator();
        while (iter.hasNext()) {
            Object obj = iter.next();

            String str = getSQLStatementFragment(obj);
            log.trace("SQL = " + str);
            stmt.addBatch(str);

            // HIEOS/BHT (Added block to get rid of MySQL performance bug with DB views).
            String mirrorImageStr = this.getSQLStatementFragmentForMirrorImage(obj);
            if (mirrorImageStr != null) {
                log.trace("SQL = " + mirrorImageStr); // HIEOS/BHT (DEBUG)
                stmt.addBatch(mirrorImageStr); // Now, insert into the mirror.
            }

            prepareToInsert(obj);
        }
        long startTime = System.currentTimeMillis();
        log.trace("AbstractDAO.insert: doing executeBatch");
        int[] updateCounts = stmt.executeBatch();
        long endTime = System.currentTimeMillis();
        log.trace("AbstractDAO.insert: done executeBatch elapedTimeMillis=" + (endTime - startTime));
        iter = objects.iterator();
        while (iter.hasNext()) {
            Object obj = iter.next();
            onInsert(obj);
        }

    } catch (SQLException e) {
        log.error(ServerResourceBundle.getInstance().getString("message.CaughtException1"), e);
        throw new RegistryException(e);
    } finally {
        closeStatement(stmt);
    }
}

From source file:jmdbtools.JMdbTools.java

private int[] insertData(Table dataTable, DbTable dbTable, Connection conn) {
    Statement stmt = null;
    int[] execStatus = new int[0];
    try {//from w  ww  .  jav a 2  s . c om
        stmt = conn.createStatement();
        log("Creating Insert Statements:" + dbTable.getName(), "info");

        for (Row row : dataTable) {
            InsertQuery insertQuery = new InsertQuery(dbTable);
            for (Map.Entry<String, Object> col : row.entrySet()) {
                //We had to add crap to the column name, so have to muck about to get match

                DbColumn column = dbTable.findColumn(fixColumnName(col.getKey()));

                if (col.getValue() != null) {
                    if (column.getTypeNameSQL().equalsIgnoreCase("DATE")
                            || column.getTypeNameSQL().equalsIgnoreCase("DATETIME")) {
                        java.sql.Timestamp sqlDate = new java.sql.Timestamp(((Date) col.getValue()).getTime());
                        //log(sqlDate.toString(), "debug");
                        insertQuery.addColumn(column, sqlDate);
                    } else {
                        insertQuery.addColumn(column, col.getValue());
                    }
                }
            }
            stmt.addBatch(insertQuery.validate().toString());
        }
        log("Executing Insert", "info");

        execStatus = stmt.executeBatch();
    } catch (SQLException e) {
        e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
    }

    return execStatus;

}

From source file:net.ymate.platform.persistence.jdbc.base.impl.BatchUpdateOperator.java

protected int __doExecute() throws Exception {
    Statement _statement = null;
    AccessorEventContext _context = null;
    try {/*from   w w w .jav  a2  s  . co  m*/
        IAccessor _accessor = new BaseAccessor(this.getAccessorConfig());
        if (StringUtils.isNotBlank(this.getSQL())) {
            _statement = _accessor.getPreparedStatement(this.getConnectionHolder().getConnection(),
                    this.getSQL());
            //
            for (SQLBatchParameter _batchParam : this.__batchParameters) {
                for (int i = 0; i < _batchParam.getParameters().size(); i++) {
                    SQLParameter _param = _batchParam.getParameters().get(i);
                    if (_param.getValue() == null) {
                        ((PreparedStatement) _statement).setNull(i + 1, 0);
                    } else {
                        ((PreparedStatement) _statement).setObject(i + 1, _param.getValue());
                    }
                }
                ((PreparedStatement) _statement).addBatch();
            }
        } else {
            _statement = _accessor.getStatement(this.getConnectionHolder().getConnection());
        }
        //
        for (String _batchSQL : this.__batchSQL) {
            _statement.addBatch(_batchSQL);
        }
        //
        if (this.getAccessorConfig() != null) {
            this.getAccessorConfig().beforeStatementExecution(
                    _context = new AccessorEventContext(_statement, JDBC.DB_OPERATION_TYPE.BATCH_UPDATE));
        }
        effectCounts = _statement.executeBatch();
        // ??
        int _count = 0;
        for (int _c : effectCounts) {
            _count += _c;
        }
        return _count;
    } finally {
        if (this.getAccessorConfig() != null && _context != null) {
            this.getAccessorConfig().afterStatementExecution(_context);
        }
        if (_statement != null) {
            _statement.clearBatch();
            _statement.close();
        }
    }
}

From source file:com.edgenius.wiki.installation.UpgradeServiceImpl.java

@SuppressWarnings("unused")
private void up3000To3100() throws Exception {
    log.info("Version 3.0 to 3.1 is upgarding");

    String root = DataRoot.getDataRoot();
    if (FileUtil.exist(root + Server.FILE)) {
        Server server = new Server();
        Properties prop = FileUtil.loadProperties(root + Server.FILE);
        server.syncFrom(prop);/* w  w  w . ja  va 2s .com*/
        if (server.getMqServerEmbedded() == null || BooleanUtils.toBoolean(server.getMqServerEmbedded())) {
            //embedded
            if (!server.getMqServerUrl().startsWith("tcp://")) {
                server.setMqServerUrl(
                        "tcp://" + server.getMqServerUrl() + "?wireFormat.maxInactivityDuration=0");
                server.syncTo(prop);
                prop.store(FileUtil.getFileOutputStream(root + Server.FILE), "save by system program");
            }
        }
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // database - remove all quartz tables - we don't backup Exportable job(backup and remove space) - it is not perfect but not big issue.
    if (FileUtil.exist(root + Server.FILE)) {
        Server server = new Server();
        Properties prop = FileUtil.loadProperties(root + Server.FILE);
        server.syncFrom(prop);
        String dbType = server.getDbType();

        String migrateSQL = dbType + "-3000-3100.sql";
        DBLoader loader = new DBLoader();
        ConnectionProxy con = loader.getConnection(dbType, server.getDbUrl(), server.getDbSchema(),
                server.getDbUsername(), server.getDbPassword());
        loader.runSQLFile(dbType, migrateSQL, con);

        //reload quartz table
        log.info("Initialize quartz tables for system...");
        Statement stat = con.createStatement();
        Statement dropStat = con.createStatement();
        List<String> lines = loader.loadSQLFile(dbType, dbType + "-quartz.sql");
        for (String sql : lines) {
            sql = sql.replaceAll("\n", " ").trim();
            if (sql.toLowerCase().startsWith("drop ")) {
                try {
                    dropStat.execute(sql);
                } catch (Exception e) {
                    log.error("Drop operation failed...." + sql);
                }
                continue;
            }
            stat.addBatch(sql);
        }
        stat.executeBatch();

        dropStat.close();
        stat.close();
        con.close();
    }

}

From source file:com.autentia.tnt.bill.migration.BillToBillPaymentMigration.java

/**
 * executes an script received by parameter
 * @param script the script to be launched
 *//*from   w w  w  . j  av  a  2s .  c  om*/
private static void executeScript(String script) throws Exception {
    Connection con = null;
    Statement stmt = null;
    LineNumberReader file = null;
    String delimiter = ";";

    try {
        log.debug("LOADING DATABASE SCRIPT" + script);

        // connect to database
        Class.forName(DATABASE_DRIVER);
        con = DriverManager.getConnection(DATABASE_CONNECTION, DATABASE_USER, DATABASE_PASS); //NOSONAR
        con.setAutoCommit(false); // DATABASE_PASS es nula
        stmt = con.createStatement();

        // load file
        InputStream sqlScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(script);
        if (sqlScript == null) {
            throw new FileNotFoundException(script);
        }
        file = new LineNumberReader(new InputStreamReader(new BufferedInputStream(sqlScript), "UTF-8"));

        // Add batched SQL sentences to statement
        StringBuilder sentence = new StringBuilder();
        String line;
        while ((line = file.readLine()) != null) {
            line = line.trim();
            if (!line.startsWith("--")) {
                // Interpret "DELIMITER" sentences
                if (line.trim().toUpperCase(Locale.ENGLISH).startsWith("DELIMITER")) {
                    delimiter = line.trim().substring("DELIMITER".length()).trim();
                } else {
                    // Add line to sentence
                    if (line.endsWith(delimiter)) {
                        // Remove delimiter
                        String lastLine = line.substring(0, line.length() - delimiter.length());

                        // Append line to sentence
                        sentence.append(lastLine);

                        // Execute sentence
                        log.debug(" " + sentence.toString());
                        stmt.addBatch(sentence.toString());

                        // Prepare new sentence
                        sentence = new StringBuilder();
                    } else {
                        // Append line to sentence
                        sentence.append(line);

                        // Append separator for next line
                        sentence.append(" ");
                    }
                }
            }
        }

        // Execute batch
        log.debug("upgradeDatabase - executing batch of commands");
        stmt.executeBatch();

        // Commit transaction
        log.debug("upgradeDatabase - commiting changes to database");
        con.commit();

        // Report end of migration
        log.debug("END LOADING DATABASE SCRIPT");

    } catch (Exception e) {
        log.error("FAILED: WILL BE ROLLED BACK: ", e);
        if (con != null) {
            con.rollback();
        }

    } finally {
        cierraFichero(file);
        liberaConexion(con, stmt, null);
    }
}

From source file:cc.tooyoung.common.db.JdbcTemplate.java

public int[] batchUpdate(final String[] sql) throws DataAccessException {
    Assert.notEmpty(sql, "SQL array must not be empty");
    if (ApiLogger.isTraceEnabled()) {
        ApiLogger.trace(new StringBuilder(128).append("Executing SQL batch update of ").append(sql.length)
                .append(" statements"));
    }//from w  ww .  ja  v a2 s. co  m

    class BatchUpdateStatementCallback implements StatementCallback, SqlProvider {
        private String currSql;

        public Object doInStatement(Statement stmt) throws SQLException, DataAccessException {
            int[] rowsAffected = new int[sql.length];
            if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    stmt.addBatch(sql[i]);
                }
                rowsAffected = stmt.executeBatch();
            } else {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    if (!stmt.execute(sql[i])) {
                        rowsAffected[i] = stmt.getUpdateCount();
                    } else {
                        throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
                    }
                }
            }
            return rowsAffected;
        }

        public String getSql() {
            return currSql;
        }
    }
    return (int[]) execute(new BatchUpdateStatementCallback(), true);
}

From source file:org.ensembl.healthcheck.util.DBUtils.java

/**
 * Read a database schema from a file and create a temporary database from
 * it.// w w w  .jav a2s .  co  m
 * 
 * @param fileName
 *            The name of the schema to read.
 * @return A connection to a database built from the schema.
 * @throws FileNotFoundException
 */
public static Connection importSchema(String fileName, String databaseURL, String user, String password)
        throws FileNotFoundException {
    Connection con = null;

    // ----------------------------------------------------
    // Parse the file first in case there are problems
    SQLParser sqlParser = new SQLParser();

    // try {
    List sqlCommands = sqlParser.parse(fileName);
    // sqlParser.printLines();
    // } catch (FileNotFoundException fnfe) {
    // fnfe.printStackTrace();
    // }

    // ----------------------------------------------------
    // create the database

    String tempDBName = DBUtils.generateTempDatabaseName();

    try {

        Class.forName(System.getProperty("driver"));

        Connection tmpCon = DriverManager.getConnection(databaseURL, user, password);

        String sql = "CREATE DATABASE " + tempDBName;
        logger.finest(sql);
        Statement stmt = tmpCon.createStatement();
        stmt.execute(sql);
        logger.fine("Database " + tempDBName + " created!");

        // close the temporary connection and create a "real" one
        tmpCon.close();
        con = DriverManager.getConnection(databaseURL + tempDBName, user, password);

    } catch (Exception e) {
        String msg = "Could not create database " + tempDBName;
        logger.severe(msg);
        throw new RuntimeException(msg, e);
    }

    // ----------------------------------------------------
    // Build the schema

    try {

        Statement stmt = con.createStatement();

        // Fill the batch of SQL commands
        stmt = sqlParser.populateBatch(stmt);

        // execute the batch that has been built up previously
        logger.info("Creating temporary database ...");
        stmt.executeBatch();
        logger.info("Done.");

        // close statement
        stmt.close();

    } catch (Exception e) {

        String msg = "Could not load schema for database " + tempDBName;
        logger.severe(msg);
        throw new RuntimeException(msg, e);

    }

    return con;

}

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

/**
 * {@inheritDoc}//ww w  .j  a  v a 2s.  c o  m
 */
@Override
public void move(Connection con, SequencerEngine seq, FxTreeMode mode, long nodeId, long newParentId,
        int newPosition) throws FxApplicationException {

    // Check both nodes (this throws an Exception if they do not exist)
    FxTreeNodeInfo node = getTreeNodeInfo(con, mode, nodeId);
    FxTreeNodeInfoSpreaded destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    final FxTreeNodeInfo parent = getTreeNodeInfo(con, mode, newParentId);

    acquireLocksForUpdate(con, mode, Arrays.asList(nodeId, newParentId, node.getParentId()));

    final long currentPos = node.getPosition();

    // Sanity checks for the position
    if (newPosition < 0) {
        newPosition = 0;
    } else if (newPosition > parent.getDirectChildCount()) {
        newPosition = parent.getDirectChildCount() == 0 ? 1 : parent.getDirectChildCount();
    }

    final boolean getsNewParent = node.getParentId() != newParentId;

    // Take ourself into account if the node stays at the same level
    //System.out.println("newPos:"+newPosition);
    if (!getsNewParent) {
        if (node.getPosition() == newPosition) {
            // Nothing to do at all
            return;
        } else if (newPosition < currentPos) {
            //newPosition = newPosition - 1;
        } else {
            newPosition = newPosition + 1;
        }
    }
    if (newPosition < 0)
        newPosition = 0;
    //System.out.println("newPosX:"+newPosition);

    final long oldParent = node.getParentId();

    // Node may not be moved inside itself!
    if (nodeId == newParentId || node.isParentOf(destinationNode)) {
        throw new FxTreeException("ex.tree.move.recursion", nodeId);
    }

    // Make space for the new nodes
    BigInteger spacing = makeSpace(con, seq, mode, newParentId, newPosition, node.getTotalChildCount() + 1);

    // Reload the node to obtain the new boundary and spacing informations
    destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    BigInteger boundaries[] = getBoundaries(con, destinationNode, newPosition);

    // Move the nodes
    int depthDelta = (destinationNode.getDepth() + 1) - node.getDepth();
    reorganizeSpace(con, seq, mode, mode, node.getId(), true, spacing, boundaries[0], null, -1, null, null,
            depthDelta, null, false, false, true);

    Statement stmt = null;
    final String TRUE = StorageManager.getBooleanTrueExpression();
    try {
        // Update the parent of the node
        stmt = con.createStatement();
        stmt.addBatch("UPDATE " + getTable(mode) + " SET PARENT=" + newParentId + " WHERE ID=" + nodeId);
        if (mode != FxTreeMode.Live)
            stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID=" + nodeId);
        stmt.executeBatch();
        stmt.close();

        // Update the childcount of the new and old parent if needed + set dirty flag
        if (getsNewParent) {
            node = getTreeNodeInfo(con, mode, nodeId);
            stmt = con.createStatement();
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT+1 WHERE ID=" + newParentId);
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT-1 WHERE ID=" + oldParent);
            if (mode != FxTreeMode.Live) {
                final List<Long> newChildren = selectAllChildNodeIds(con, mode, node.getLeft(), node.getRight(),
                        false);
                acquireLocksForUpdate(con, mode, Iterables.concat(newChildren, Arrays.asList(nodeId)));

                for (List<Long> part : Iterables.partition(newChildren, SQL_IN_PARTSIZE)) {
                    stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN ("
                            + StringUtils.join(part, ',') + ")");
                }

                stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN(" + oldParent
                        + "," + newParentId + ")");
            }
            stmt.executeBatch();
            stmt.close();
        }

    } catch (SQLException e) {
        throw new FxTreeException(e, "ex.tree.move.parentUpdate.failed", node.getId(), e.getMessage());
    } finally {
        try {
            if (stmt != null)
                stmt.close();
        } catch (Exception exc) {
            //ignore
        }
    }
}

From source file:com.raulexposito.alarife.sqlexecutor.SQLExecutor.java

/**
 * Updates the database from one version to other by using the scripts
 * @param databaseType database type// w  w  w.  ja va 2s  .  c  o  m
 * @param st sql statement to launch the creates and updates
 * @param nextVersion version of the scripts to be launched to upgrade to the next version
 * @throws com.raulexposito.alarife.exception.DatabaseException if something goes wrong
 */
private void updateToVersion(final DatabaseType databaseType, final ApplicationMode applicationMode,
        final Statement st, final Version nextVersion) throws DatabaseException {
    log.info("migrating to '" + nextVersion + "' version");

    final long startTime = System.currentTimeMillis();
    final ScriptsDirectoryUtil scriptsDirectoryUtil = new ScriptsDirectoryUtil();

    try {

        st.executeUpdate(dcfspr.getChangeDatabase(databaseType));
        log.debug("changed to schema '" + dcfspr.getInstance() + "'");

        // read the content of the file with the SQL commands to update tables
        final InputStream upgradeTables = scriptsDirectoryUtil.getUpgradeTablesScript(databaseType,
                applicationMode, nextVersion);
        log.info("reading the content of the upgrade tables script");

        final List<String> upgradeTablesCommands = getSQLCommandsFromScriptFile(upgradeTables);

        for (String command : upgradeTablesCommands) {
            st.addBatch(command);
        }

        // read the content of the file with the SQL commands to insert data
        final InputStream insertData = scriptsDirectoryUtil.getInsertDataScript(databaseType, applicationMode,
                nextVersion);
        log.info("reading the content of the insert data script");

        final List<String> insertDataCommands = getSQLCommandsFromScriptFile(insertData);

        for (String command : insertDataCommands) {
            st.addBatch(command);
        }

        // execution of the different commands
        st.executeBatch();
        log.info("scripts succesfully executed [" + (System.currentTimeMillis() - startTime) + " ms]");
    } catch (Exception e) {
        log.error(CANNOT_UPGRADE_TO_VERSION + "'" + nextVersion + "': " + e);
        throw new DatabaseException(CANNOT_UPGRADE_TO_VERSION + "'" + nextVersion + "'", e);
    }
}