Example usage for java.sql Connection getAutoCommit

List of usage examples for java.sql Connection getAutoCommit

Introduction

In this page you can find the example usage for java.sql Connection getAutoCommit.

Prototype

boolean getAutoCommit() throws SQLException;

Source Link

Document

Retrieves the current auto-commit mode for this Connection object.

Usage

From source file:org.sakaiproject.mailarchive.impl.DbMailArchiveService.java

/**
 * fill in the draft and owner db fields
 *//*from   ww  w .  ja v  a 2 s . c  om*/
protected void convertToDraft() {
    M_log.info("convertToDraft");

    try {
        // get a connection
        final Connection connection = m_sqlService.borrowConnection();
        boolean wasCommit = connection.getAutoCommit();
        connection.setAutoCommit(false);

        // read all message records that need conversion
        String sql = "select CHANNEL_ID, MESSAGE_ID, XML from " + m_rTableName /* + " where OWNER is null" */;
        m_sqlService.dbRead(connection, sql, null, new SqlReader() {
            private int count = 0;

            public Object readSqlResultRecord(ResultSet result) {
                try {
                    // create the Resource from the db xml
                    String channelId = result.getString(1);
                    String messageId = result.getString(2);
                    String xml = result.getString(3);

                    // read the xml
                    Document doc = Xml.readDocumentFromString(xml);

                    // verify the root element
                    Element root = doc.getDocumentElement();
                    if (!root.getTagName().equals("message")) {
                        M_log.warn("convertToDraft(): XML root element not message: " + root.getTagName());
                        return null;
                    }
                    Message m = new BaseMessageEdit(null, root);

                    // pick up the fields
                    String owner = m.getHeader().getFrom().getId();
                    boolean draft = m.getHeader().getDraft();

                    // update
                    String update = "update " + m_rTableName
                            + " set OWNER = ?, DRAFT = ? where CHANNEL_ID = ? and MESSAGE_ID = ?";
                    Object fields[] = new Object[4];
                    fields[0] = owner;
                    fields[1] = (draft ? "1" : "0");
                    fields[2] = channelId;
                    fields[3] = messageId;
                    boolean ok = m_sqlService.dbWrite(connection, update, fields);

                    if (!ok)
                        M_log.info("convertToDraft: channel: " + channelId + " message: " + messageId
                                + " owner: " + owner + " draft: " + draft + " ok: " + ok);

                    count++;
                    if (count % 100 == 0) {
                        M_log.info("convertToDraft: " + count);
                    }
                    return null;
                } catch (Exception ignore) {
                    return null;
                }
            }
        });

        connection.commit();
        connection.setAutoCommit(wasCommit);
        m_sqlService.returnConnection(connection);
    } catch (Exception t) {
        M_log.warn("convertToDraft: failed: " + t);
    }

    M_log.info("convertToDraft: done");
}

From source file:org.apache.torque.util.TransactionManagerImpl.java

/**
 * Commit a transaction and close the connection.
 * If the connection is in autocommit mode or the database does not support
 * transactions, only a connection close is performed
 *
 * @param con The Connection for the transaction.
 * @throws TorqueException Any exceptions caught during processing will be
 *         rethrown wrapped into a TorqueException.
 *///from   www  .  jav  a  2  s .c o  m
public void commit(Connection con) throws TorqueException {
    if (con == null) {
        throw new NullPointerException(
                "Connection object was null. " + "This could be due to a misconfiguration of the "
                        + "DataSourceFactory. Check the logs and Torque.properties "
                        + "to better determine the cause.");
    }

    try {
        if (con.getMetaData().supportsTransactions() && !con.getAutoCommit()) {
            con.commit();
        }
    } catch (SQLException e) {
        throw new TorqueException(e);
    } finally {
        Torque.closeConnection(con);
    }
}

From source file:org.apache.torque.util.TransactionManagerImpl.java

/**
 * Roll back a transaction and release the connection.
 * In databases that do not support transactions or if autocommit is true,
 * no rollback will be performed, but the connection will be closed anyway.
 *
 * @param con The Connection for the transaction.
 *
 * @throws TorqueException Any exceptions caught during processing will be
 *         rethrown wrapped into a TorqueException.
 *//*from   ww  w  .  ja va 2  s  .c o  m*/
public void rollback(Connection con) throws TorqueException {
    if (con == null) {
        throw new TorqueException(
                "Connection object was null. " + "This could be due to a misconfiguration of the "
                        + "DataSourceFactory. Check the logs and Torque.properties "
                        + "to better determine the cause.");
    } else {
        try {
            if (con.getMetaData().supportsTransactions() && !con.getAutoCommit()) {
                con.rollback();
            }
        } catch (SQLException e) {
            log.error("An attempt was made to rollback a transaction "
                    + "but the database did not allow the operation to be " + "rolled back.", e);
            throw new TorqueException(e);
        } finally {
            Torque.closeConnection(con);
        }
    }
}

From source file:com.flexive.core.storage.GenericDBStorage.java

/**
 * {@inheritDoc}//from ww  w  . j  a  v  a 2  s  .c  o m
 */
@Override
public void importDivision(Connection _con, ZipFile zip) throws Exception {
    long startTime = System.currentTimeMillis();
    GenericDivisionImporter importer = getDivisionImporter();
    FxDivisionExportInfo exportInfo = importer.getDivisionExportInfo(zip);
    if (FxSharedUtils.getDBVersion() != exportInfo.getSchemaVersion()) {
        LOG.warn("DB Version mismatch! Current:" + FxSharedUtils.getDBVersion() + ", exported schema:"
                + exportInfo.getSchemaVersion());
    }
    boolean isNonTX = importer.importRequiresNonTXConnection();
    Connection con = isNonTX ? Database.getNonTXDataSource().getConnection() : _con;

    boolean autoCommit = false;
    if (isNonTX) {
        autoCommit = con.getAutoCommit();
        con.setAutoCommit(false);
        con.commit(); //ensure a "clean" connection
    }
    Exception inner = null;
    try {
        importer.wipeDivisionData(con);
        if (isNonTX)
            con.commit();
        Statement stmt = con.createStatement();
        if (isNonTX)
            con.commit();
        try {
            importer.importLanguages(con, zip);
            if (isNonTX)
                con.commit();
            importer.importMandators(con, zip);
            if (isNonTX)
                con.commit();
            importer.importSecurity(con, zip);
            if (isNonTX)
                con.commit();
            importer.importWorkflows(con, zip);
            if (isNonTX)
                con.commit();
            importer.importConfigurations(con, zip);
            if (isNonTX)
                con.commit();
            importer.importBinaries(con, zip);
            if (isNonTX)
                con.commit();
            stmt.execute(getReferentialIntegrityChecksStatement(false));
            importer.importStructures(con, zip);
            if (isNonTX)
                con.commit();
            importer.importHierarchicalContents(con, zip);
            if (isNonTX)
                con.commit();
            importer.importScripts(con, zip);
            if (isNonTX)
                con.commit();
            importer.importTree(con, zip);
            if (isNonTX)
                con.commit();
            importer.importHistory(con, zip);
            if (isNonTX)
                con.commit();
            importer.importResources(con, zip);
            if (isNonTX)
                con.commit();
            importer.importBriefcases(con, zip);
            if (isNonTX)
                con.commit();
            importer.importFlatStorages(con, zip, exportInfo);
            if (isNonTX)
                con.commit();
            importer.importSequencers(con, zip);
            if (isNonTX)
                con.commit();
        } catch (Exception e) {
            if (isNonTX)
                con.rollback();
            inner = e;
            throw e;
        } finally {
            if (isNonTX)
                con.commit();
            stmt.execute(getReferentialIntegrityChecksStatement(true));
        }
        if (isNonTX)
            con.commit();
        //rebuild fulltext index
        FulltextIndexer ft = StorageManager.getStorageImpl().getContentStorage(TypeStorageMode.Hierarchical)
                .getFulltextIndexer(null, con);
        ft.rebuildIndex();
        if (isNonTX)
            con.commit();
    } catch (Exception e) {
        if (isNonTX)
            con.rollback();
        if (inner != null) {
            LOG.error(e);
            throw inner;
        }
        throw e;
    } finally {
        if (isNonTX) {
            con.commit();
            con.setAutoCommit(autoCommit);
            Database.closeObjects(GenericDBStorage.class, con, null);
        }
        LOG.info(" Importing took " + FxFormatUtils.formatTimeSpan((System.currentTimeMillis() - startTime)));
    }
}

From source file:org.geoserver.security.jdbc.AbstractJDBCService.java

public void createTablesIfRequired(JDBCSecurityServiceConfig config) throws IOException {

    if (this.canCreateStore() == false)
        return;/*from  w  w  w  .  j av a  2  s.c o  m*/
    if (config.isCreatingTables() == false)
        return;
    if (tablesAlreadyCreated())
        return;

    Connection con = null;
    PreparedStatement ps = null;
    try {
        con = datasource.getConnection();
        if (con.getAutoCommit() == true)
            con.setAutoCommit(false);
        con = getConnection();
        for (String stmt : getOrderedNamesForCreate()) {
            ps = getDDLStatement(stmt, con);
            ps.execute();
            ps.close();
        }
        con.commit();
    } catch (SQLException ex) {
        throw new IOException(ex);
    } finally {
        closeFinally(con, ps, null);
    }
}

From source file:org.web4thejob.module.JobletInstallerImpl.java

@Override
@SuppressWarnings("unchecked")
public <E extends Exception> List<E> install(List<Joblet> joblets) {
    List<E> exceptions = new ArrayList<E>();

    try {// w w  w . ja  v  a2  s.  c o m

        final Configuration configuration = new Configuration();
        configuration.setProperty(AvailableSettings.DIALECT,
                connInfo.getProperty(DatasourceProperties.DIALECT));
        configuration.setProperty(AvailableSettings.DRIVER, connInfo.getProperty(DatasourceProperties.DRIVER));
        configuration.setProperty(AvailableSettings.URL, connInfo.getProperty(DatasourceProperties.URL));
        configuration.setProperty(AvailableSettings.USER, connInfo.getProperty(DatasourceProperties.USER));
        configuration.setProperty(AvailableSettings.PASS, connInfo.getProperty(DatasourceProperties.PASSWORD));

        final ServiceRegistry serviceRegistry = new StandardServiceRegistryBuilder()
                .applySettings(configuration.getProperties()).build();

        if (StringUtils.hasText(connInfo.getProperty(DatasourceProperties.SCHEMA_SYNTAX))) {
            String schemaSyntax = connInfo.getProperty(DatasourceProperties.SCHEMA_SYNTAX);
            Connection connection = serviceRegistry.getService(ConnectionProvider.class).getConnection();

            for (Joblet joblet : joblets) {
                for (String schema : joblet.getSchemas()) {
                    Statement statement = connection.createStatement();
                    statement.executeUpdate(schemaSyntax.replace("%s", schema));
                    statement.close();
                }
            }

            if (!connection.getAutoCommit()) {
                connection.commit();
            }
        }

        for (Joblet joblet : joblets) {
            for (Resource resource : joblet.getResources()) {
                configuration.addInputStream(resource.getInputStream());
            }
        }

        SchemaExport schemaExport = new SchemaExport(serviceRegistry, configuration);
        schemaExport.execute(Target.EXPORT, SchemaExport.Type.CREATE);
        exceptions.addAll(schemaExport.getExceptions());

    } catch (Exception e) {
        exceptions.add((E) e);
    }

    return exceptions;

}

From source file:org.zenoss.zep.dao.impl.ElapsedTime.java

@Override
public void optimizeTables() throws ZepException {
    final DatabaseType dbType = databaseCompatibility.getDatabaseType();

    final String externalToolName = this.useExternalToolPath + "/pt-online-schema-change";
    final String tableToOptimize = "event_summary";
    // if we want to use percona's pt-online-schema-change to avoid locking the tables due to mysql optimize...
    //checks if external tool is available
    if (this.useExternalTool && dbType == DatabaseType.MYSQL
            && DaoUtils.executeCommand("ls " + externalToolName) == 0) {
        logger.info("Validating state of event_summary");
        this.validateEventSummaryState();
        logger.debug("Optimizing table: " + tableToOptimize + " via percona " + externalToolName);
        eventSummaryOptimizationTime.setStartTime();

        String externalToolCommandPrefix = externalToolName + " --alter \"ENGINE=Innodb\" D=" + this.dbname
                + ",t=";
        String externalToolCommandSuffix = "";
        if (System.getenv("USE_ZENDS") != null && Integer.parseInt(System.getenv("USE_ZENDS").trim()) == 1) {
            externalToolCommandSuffix = " --defaults-file=/opt/zends/etc/zends.cnf";
        }//from w w  w.j  a  v a 2 s  .  co  m
        externalToolCommandSuffix += " " + this.externalToolOptions
                + " --alter-foreign-keys-method=drop_swap --host=" + this.hostname + " --port=" + this.port
                + " --user=" + this.username + " --password=" + this.password + " --execute";
        int return_code = DaoUtils
                .executeCommand(externalToolCommandPrefix + tableToOptimize + externalToolCommandSuffix);
        if (return_code != 0) {
            logger.error("External tool failed on: " + tableToOptimize + ". Therefore, table:" + tableToOptimize
                    + "will not be optimized.");
        } else {
            logger.debug(
                    "Successfully optimized table: " + tableToOptimize + "using percona " + externalToolName);
        }

        eventSummaryOptimizationTime.setEndTime();
        SendOptimizationTimeEvent(eventSummaryOptimizationTime, tableToOptimize, "percona");

        if (this.tablesToOptimize.contains(tableToOptimize)) {
            this.tablesToOptimize.remove(tableToOptimize);
        }
    } else {
        if (this.useExternalTool) {
            logger.warn(
                    "External tool not available. Table: " + tableToOptimize + " optimization may be slow.");
        }
        if (!this.tablesToOptimize.contains(tableToOptimize)) {
            this.tablesToOptimize.add(tableToOptimize);
        }
    }

    eventSummaryOptimizationTime.setStartTime(); // init so elapsedTime() == 0

    try {
        logger.debug("Optimizing tables: {}", this.tablesToOptimize);
        this.template.execute(new ConnectionCallback<Object>() {
            @Override
            public Object doInConnection(Connection con) throws SQLException, DataAccessException {
                Boolean currentAutoCommit = null;
                Statement statement = null;
                try {
                    currentAutoCommit = con.getAutoCommit();
                    con.setAutoCommit(true);
                    statement = con.createStatement();
                    for (String tableToOptimize : tablesToOptimize) {
                        logger.debug("Optimizing table: {}", tableToOptimize);
                        final String sql;
                        switch (dbType) {
                        case MYSQL:
                            sql = "OPTIMIZE TABLE " + tableToOptimize;
                            break;
                        case POSTGRESQL:
                            sql = "VACUUM ANALYZE " + tableToOptimize;
                            break;
                        default:
                            throw new IllegalStateException("Unsupported database type: " + dbType);
                        }
                        if (tableToOptimize == "event_summary") {
                            eventSummaryOptimizationTime.setStartTime();
                        }
                        statement.execute(sql);
                        if (tableToOptimize == "event_summary") {
                            eventSummaryOptimizationTime.setEndTime();
                        }
                        logger.debug("Completed optimizing table: {}", tableToOptimize);
                    }
                } finally {
                    JdbcUtils.closeStatement(statement);
                    if (currentAutoCommit != null) {
                        con.setAutoCommit(currentAutoCommit);
                    }
                }
                return null;
            }
        });
    } finally {
        logger.info("Validating state of event_summary");
        this.validateEventSummaryState();
    }

    if (eventSummaryOptimizationTime.getElapsedTime() > 0) {
        SendOptimizationTimeEvent(eventSummaryOptimizationTime, "event_summary", "");
    }

    logger.debug("Completed Optimizing tables: {}", tablesToOptimize);
}

From source file:com.glaf.core.jdbc.connection.DruidConnectionProvider.java

public Connection getConnection() throws SQLException {
    Connection connection = null;
    int count = 0;
    while (count < conf.getInt("jdbc.connection.retryCount", 10)) {
        try {/*from   w w w . j  a  v  a2  s.co  m*/
            connection = ds.getConnection();
            if (connection != null) {
                if (isolation != null) {
                    connection.setTransactionIsolation(isolation.intValue());
                }
                if (connection.getAutoCommit() != autocommit) {
                    connection.setAutoCommit(autocommit);
                }
                log.debug("druid connection: " + connection.toString());
                return connection;
            } else {
                count++;
                try {
                    Thread.sleep(conf.getInt("jdbc.connection.retryTimeMs", 500));
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (SQLException ex) {
            count++;
            try {
                Thread.sleep(conf.getInt("jdbc.connection.retryTimeMs", 500));
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            if (count >= conf.getInt("jdbc.connection.retryCount", 10)) {
                ex.printStackTrace();
                throw ex;
            }
        }
    }
    return connection;
}

From source file:com.adaptris.jdbc.connection.FailoverDatasourceTest.java

@Test
public void testCommitRollback() throws Exception {
    Connection conn = new MyProxy();

    try {//from  w  ww.j ava  2s .c o m
        try {
            conn.setAutoCommit(conn.getAutoCommit());
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.commit();
        } catch (SQLException e) {

        }

        try {
            conn.setAutoCommit(false);
            conn.rollback();
        } catch (SQLException e) {

        }

        try {
            conn.setAutoCommit(false);
            conn.rollback(conn.setSavepoint());
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.rollback(conn.setSavepoint("test"));
        } catch (SQLException e) {

        }
        try {
            conn.setAutoCommit(false);
            conn.releaseSavepoint(conn.setSavepoint("test2"));
        } catch (SQLException e) {

        }
    } finally {
        JdbcUtil.closeQuietly(conn);

    }
}

From source file:org.intermine.api.profile.StorableBag.java

/**
 * Save the values given in input into bagvalues table
 * @param bagValues the values to save//  w w w . j a  v  a 2  s .c o m
 */
protected void addBagValues(Collection<BagValue> bagValues) {
    Connection conn = null;
    Batch batch = null;
    Boolean oldAuto = null;
    ObjectStoreWriter uosw = getUserProfileWriter();
    Integer sbid = getSavedBagId();
    try {
        conn = ((ObjectStoreWriterInterMineImpl) uosw).getConnection();
        oldAuto = conn.getAutoCommit();
        conn.setAutoCommit(false);
        batch = new Batch(new BatchWriterPostgresCopyImpl());
        String[] colNames = new String[] { "savedbagid", "value", "extra" };
        for (BagValue bagValue : bagValues) {
            batch.addRow(conn, InterMineBag.BAG_VALUES, sbid, colNames,
                    new Object[] { sbid, bagValue.value, bagValue.extra });
        }
        batch.flush(conn);
        conn.commit();
        conn.setAutoCommit(oldAuto);
    } catch (SQLException sqle) {
        LOG.error("Exception committing bagValues for bag: " + sbid, sqle);
        try {
            conn.rollback();
            if (oldAuto != null) {
                conn.setAutoCommit(oldAuto);
            }
        } catch (SQLException sqlex) {
            throw new RuntimeException("Error aborting transaction", sqlex);
        }
    } finally {
        try {
            batch.close(conn);
        } catch (Exception e) {
            LOG.error("Exception caught when closing Batch while addbagValues", e);
        }
        ((ObjectStoreWriterInterMineImpl) uosw).releaseConnection(conn);
    }
}