Example usage for java.sql Connection setAutoCommit

List of usage examples for java.sql Connection setAutoCommit

Introduction

In this page you can find the example usage for java.sql Connection setAutoCommit.

Prototype

void setAutoCommit(boolean autoCommit) throws SQLException;

Source Link

Document

Sets this connection's auto-commit mode to the given state.

Usage

From source file:com.zimbra.cs.db.DbPool.java

/**
 * Returns a new database connection for maintenance operations, such as
 * restore. Does not specify the name of the default database. This
 * connection is created outside the context of the database connection
 * pool.//from  w  w w  .  j  a  v a2s.  c  om
 */
public static DbConnection getMaintenanceConnection() throws ServiceException {
    try {
        String user = LC.zimbra_mysql_user.value();
        String pwd = LC.zimbra_mysql_password.value();
        Connection conn = DriverManager.getConnection(sRootUrl + "?user=" + user + "&password=" + pwd);
        conn.setAutoCommit(false);
        return new DbConnection(conn);
    } catch (SQLException e) {
        throw ServiceException.FAILURE("getting database maintenance connection", e);
    }
}

From source file:gridool.util.jdbc.JDBCUtils.java

/**
 * Execute a batch of SQL INSERT, UPDATE, or DELETE queries.
 * //from   w ww .j  a  v  a2s .c o  m
 * @param conn The Connection to use to run the query.  The caller is
 * responsible for closing this Connection.
 * @param sql The SQL to execute.
 * @param params An array of query replacement parameters.  Each row in
 * this array is one set of batch replacement values. 
 * @return The number of rows updated per statement.
 * @throws SQLException
 */
public static int[] batch(Connection conn, String sql, Object[][] params) throws SQLException {
    final boolean autoCommit = conn.getAutoCommit();
    if (autoCommit) {
        conn.setAutoCommit(false);
    }
    PreparedStatement stmt = null;
    int[] rows = null;
    try {
        stmt = conn.prepareStatement(sql);
        for (int i = 0; i < params.length; i++) {
            fillStatement(stmt, params[i]);
            stmt.addBatch();
        }
        verboseQuery(sql, (Object[]) params);
        rows = stmt.executeBatch();
    } catch (SQLException e) {
        rethrow(e, sql, (Object[]) params);
    } finally {
        close(stmt);
    }
    return rows;
}

From source file:com.mirth.connect.server.util.DatabaseUtil.java

public static void executeScript(String script, boolean ignoreErrors) throws Exception {
    SqlSessionManager sqlSessionManger = SqlConfig.getSqlSessionManager();

    Connection conn = null;
    ResultSet resultSet = null;/*w  w  w . j  a  va 2s.  co  m*/
    Statement statement = null;

    try {
        sqlSessionManger.startManagedSession();
        conn = sqlSessionManger.getConnection();

        /*
         * Set auto commit to false or an exception will be thrown when trying to rollback
         */
        conn.setAutoCommit(false);

        statement = conn.createStatement();

        Scanner s = new Scanner(script);

        while (s.hasNextLine()) {
            StringBuilder sb = new StringBuilder();
            boolean blankLine = false;

            while (s.hasNextLine() && !blankLine) {
                String temp = s.nextLine();

                if (temp.trim().length() > 0)
                    sb.append(temp + " ");
                else
                    blankLine = true;
            }

            // Trim ending semicolons so Oracle doesn't throw
            // "java.sql.SQLException: ORA-00911: invalid character"
            String statementString = StringUtils.removeEnd(sb.toString().trim(), ";");

            if (statementString.length() > 0) {
                try {
                    statement.execute(statementString);
                    conn.commit();
                } catch (SQLException se) {
                    if (!ignoreErrors) {
                        throw se;
                    } else {
                        logger.error("Error was encountered and ignored while executing statement: "
                                + statementString, se);
                        conn.rollback();
                    }
                }
            }
        }

    } catch (Exception e) {
        throw new Exception(e);
    } finally {
        DbUtils.closeQuietly(statement);
        DbUtils.closeQuietly(resultSet);
        DbUtils.closeQuietly(conn);
        sqlSessionManger.close();
    }
}

From source file:com.clustercontrol.platform.infra.InfraJdbcExecutorSupport.java

public static String execSelectFileContent(String fileId, String fileName) throws HinemosUnknown {
    Connection conn = null;

    JpaTransactionManager tm = null;//from  ww w  . jav a2  s.c o  m
    PGCopyInputStream pgStream = null;
    OutputStream fos = null;
    try {
        tm = new JpaTransactionManager();
        tm.begin();
        conn = tm.getEntityManager().unwrap(java.sql.Connection.class);
        conn.setAutoCommit(false);

        String exportDirectory = HinemosPropertyUtil.getHinemosPropertyStr("infra.export.dir",
                HinemosPropertyDefault.getString(HinemosPropertyDefault.StringKey.INFRA_EXPORT_DIR));
        String filepath = exportDirectory + "/" + fileName;

        pgStream = new PGCopyInputStream((PGConnection) conn,
                "COPY (select file_content from binarydata.cc_infra_file_content where file_id = '" + fileId
                        + "') TO STDOUT WITH (FORMAT BINARY)");
        fos = Files.newOutputStream(Paths.get(filepath));

        // ????(?21byte)?????
        long skipLen = pgStream.skip(21);
        if (skipLen != 21) {
            String message = "error in the binary format file parsing (skip tuple from sign) skipLen = "
                    + skipLen;
            log.warn(message);
            throw new HinemosUnknown(message);
        }

        byte[] lenBuf = new byte[4];
        int ret = pgStream.read(lenBuf, 0, lenBuf.length);
        if (ret == -1) {
            String message = "error in the binary format file parsing (read file length)";
            log.warn(message);
            throw new HinemosUnknown(message);
        }
        int len = ByteBuffer.wrap(lenBuf).getInt();

        byte[] buf = new byte[1024 * 1024];
        int read;
        int readTotalSize = 0;
        while ((read = pgStream.read(buf)) != -1) {
            readTotalSize += read;
            if (readTotalSize > len) {
                // ?
                if ((readTotalSize - len) == 2) {
                    fos.write(buf, 0, read - 2);
                    break;
                } else {
                    fos.write(buf, 0, read - 1);
                    break;
                }
            } else {
                fos.write(buf, 0, read);
            }
        }

        if (!tm.isNestedEm()) {
            conn.commit();
        }
        tm.commit();

        return filepath;
    } catch (SQLException | IOException | RuntimeException e) {
        log.warn(e.getMessage(), e);
        if (conn != null) {
            try {
                conn.rollback();
            } catch (SQLException e1) {
                log.warn(e1);
            }
        }
        throw new HinemosUnknown(e.getMessage(), e);
    } finally {
        if (fos != null) {
            try {
                fos.close();
            } catch (IOException e) {
                log.warn(e.getMessage(), e);
                throw new HinemosUnknown(e.getMessage(), e);
            }
        }
        if (pgStream != null) {
            try {
                pgStream.close();
            } catch (IOException e) {
                log.warn(e.getMessage(), e);
                throw new HinemosUnknown(e.getMessage(), e);
            }
        }
        if (tm != null) {
            tm.close();
        }
    }
}

From source file:com.krawler.database.DbPool.java

/**
 * return a connection to use for the Krawler database.
 * //  w w w  . j a v  a2  s  . com
 * @param
 * @return
 * @throws ServiceException
 */
public static Connection getConnection() throws ServiceException {
    java.sql.Connection conn = null;

    long start = KrawlerPerf.STOPWATCH_DB_CONN.start();

    try {
        conn = sPoolingDataSource.getConnection();

        if (conn.getAutoCommit() != false)
            conn.setAutoCommit(false);

        // We want READ COMMITTED transaction isolation level for duplicate
        // handling code in BucketBlobStore.newBlobInfo().
        conn.setTransactionIsolation(java.sql.Connection.TRANSACTION_READ_COMMITTED);
    } catch (SQLException e) {
        throw ServiceException.FAILURE("getting database connection", e);
    }

    // If the connection pool is overutilized, warn about potential leaks
    int numActive = sConnectionPool.getNumActive();
    int maxActive = sConnectionPool.getMaxActive();

    if (numActive > maxActive * 0.75) {
        String stackTraceMsg = "Turn on debug logging for KrawlerLog.dbconn to see stack "
                + "traces of connections not returned to the pool.";
        if (KrawlerLog.dbconn.isDebugEnabled()) {
            StringBuffer buf = new StringBuffer();
            synchronized (sConnectionStackCounter) {
                Iterator i = sConnectionStackCounter.iterator();
                while (i.hasNext()) {
                    String stackTrace = (String) i.next();
                    int count = sConnectionStackCounter.getCount(stackTrace);
                    if (count == 0) {
                        i.remove();
                    } else {
                        buf.append(count + " connections allocated at " + stackTrace + "\n");
                    }
                }
            }
            stackTraceMsg = buf.toString();
        }
        KrawlerLog.dbconn.warn("Connection pool is 75% utilized.  " + numActive
                + " connections out of a maximum of " + maxActive + " in use.  " + stackTraceMsg);
    }

    if (KrawlerLog.sqltrace.isDebugEnabled() || KrawlerLog.perf.isDebugEnabled()) {
        // conn = new DebugConnection(conn); //TODO: uncomment later[BS]
    }
    Connection krawlerCon = new Connection(conn);

    // If we're debugging, update the counter with the current stack trace
    if (KrawlerLog.dbconn.isDebugEnabled()) {
        Throwable t = new Throwable();
        krawlerCon.setStackTrace(t);

        String stackTrace = SystemUtil.getStackTrace(t);
        synchronized (sConnectionStackCounter) {
            sConnectionStackCounter.increment(stackTrace);
        }
    }

    KrawlerPerf.STOPWATCH_DB_CONN.stop(start);
    return krawlerCon;
}

From source file:com.hangum.tadpole.db.bander.cubrid.CubridExecutePlanUtils.java

/**
 * cubrid execute plan/*from  w w w .j a  v a 2  s.c o  m*/
 * 
 * @param userDB
 * @param sql
 * @return
 * @throws Exception
 */
public static String plan(UserDBDAO userDB, String sql) throws Exception {
    if (!sql.toLowerCase().startsWith("select")) {
        logger.error("[cubrid execute plan ]" + sql);
        throw new Exception("This statment not select. please check.");
    }
    Connection conn = null;
    ResultSet rs = null;
    PreparedStatement pstmt = null;

    try {
        //         Class.forName("cubrid.jdbc.driver.CUBRIDDriver");
        //         conn = DriverManager.getConnection(userDB.getUrl(), userDB.getUsers(), userDB.getPasswd());
        //         conn.setAutoCommit(false); //     auto commit? false  .
        conn = TadpoleSQLManager.getInstance(userDB).getDataSource().getConnection();
        conn.setAutoCommit(false); //     auto commit? false  .

        sql = StringUtils.trim(sql).substring(6);
        if (logger.isDebugEnabled())
            logger.debug("[qubrid modifying query]" + sql);
        sql = "select " + RECOMPILE + sql;

        pstmt = conn.prepareStatement(sql);
        ((CUBRIDStatement) pstmt).setQueryInfo(true);
        rs = pstmt.executeQuery();

        String plan = ((CUBRIDStatement) pstmt).getQueryplan(); //  ?    .
        //         conn.commit();

        if (logger.isDebugEnabled())
            logger.debug("cubrid plan text : " + plan);

        return plan;

    } finally {
        if (rs != null)
            rs.close();
        if (pstmt != null)
            pstmt.close();
        if (conn != null)
            conn.close();
    }
}

From source file:com.cisco.iwe.services.util.EmailMonitor.java

/** This method saves the email contents in the database  **/
public static void saveAttachmentAndText(String from, String subject, byte[] mailAttachment, byte[] MailText,
        String fileType, Date sent, String pdfText) throws Exception {

    Connection conn = null;
    PreparedStatement stmt = null;
    try {/*from w  w  w  .ja  v a2 s .  co m*/
        String query = EmailParseConstants.saveQuery;
        conn = DataBaseUtil.getDevConnection();
        // DataBaseUtil.getConnection(jndiName+"_"+System.getProperty("cisco.life"));
        conn.setAutoCommit(false);
        stmt = conn.prepareStatement(query);
        stmt.setString(1, from);
        stmt.setString(2, subject);
        stmt.setBinaryStream(3, new ByteArrayInputStream(mailAttachment), mailAttachment.length);
        stmt.setBinaryStream(4, new ByteArrayInputStream(MailText), MailText.length);
        stmt.setString(5, fileType);
        stmt.setTimestamp(6, new Timestamp(sent.getTime()));
        stmt.executeUpdate();
    } finally {
        try {
            if (stmt != null) {
            }
        } finally {
            if (conn != null) {
                conn.close();
            }
        }
    }

}

From source file:com.wso2telco.core.dbutils.DbUtils.java

/**
 * Close connection.//from w ww .  ja  v a 2 s  .c o  m
 *
 * @param dbConnection
 *            the db connection
 */
private static void closeConnection(Connection dbConnection) {

    try {

        if (dbConnection != null && dbConnection.getAutoCommit() != true) {

            log.debug("database connection is active and auto commit is false");
            dbConnection.setAutoCommit(true);
            dbConnection.close();
            log.debug("database connection set to close and auto commit set to true");
        } else if (dbConnection != null) {

            log.debug("database connection is active");
            dbConnection.close();
            log.debug("database connection set to closed");
        }
    } catch (SQLException e) {

        log.error("database error. Could not close database connection. continuing with others. - "
                + e.getMessage(), e);
    }

}

From source file:com.wso2telco.dbutils.DbUtils.java

/**
 * Close connection./*  w w w .j a va 2 s. co  m*/
 *
 * @param dbConnection
 *            the db connection
 */
private static void closeConnection(Connection dbConnection) {

    try {

        if (dbConnection != null && dbConnection.getAutoCommit() != true) {

            log.debug("database connection is active and auto commit is false");
            dbConnection.setAutoCommit(true);
            dbConnection.close();
            log.debug("database connection set to close and auto commit set to true");
        } else if (dbConnection != null) {

            log.debug("database connection is active");
            dbConnection.close();
            log.debug("database connection set to closed");
        }
    } catch (SQLException e) {

        log.error("database error. Could not close database connection. continuing with others. - "
                + e.getMessage(), e);
    }

    /*
     * if (dbConnection != null) { try { dbConnection.close(); } catch
     * (SQLException e) { log.warn(
     * "Database error. Could not close database connection. Continuing with "
     * + "others. - " + e.getMessage(), e); } }
     */
}

From source file:com.clustercontrol.platform.infra.InfraJdbcExecutorSupport.java

public static void execInsertFileContent(String fileId, DataHandler handler)
        throws HinemosUnknown, InfraFileTooLarge {
    Connection conn = null;

    JpaTransactionManager tm = null;/*from  w  ww.  j  a va2  s .c o  m*/
    PGCopyOutputStream pgStream = null;
    FileOutputStream fos = null;
    BufferedInputStream bis = null;
    File tempFile = null;
    try {
        tm = new JpaTransactionManager();
        conn = tm.getEntityManager().unwrap(java.sql.Connection.class);
        conn.setAutoCommit(false);

        pgStream = new PGCopyOutputStream((PGConnection) conn,
                "COPY binarydata.cc_infra_file_content(file_id, file_content) FROM STDIN WITH (FORMAT BINARY)");

        String exportDirectory = HinemosPropertyUtil.getHinemosPropertyStr("infra.export.dir",
                HinemosPropertyDefault.getString(HinemosPropertyDefault.StringKey.INFRA_EXPORT_DIR));
        tempFile = new File(exportDirectory + fileId);
        fos = new FileOutputStream(tempFile);
        handler.writeTo(fos);

        long fileLength = tempFile.length();
        int maxSize = HinemosPropertyUtil.getHinemosPropertyNum(MAX_FILE_KEY, Long.valueOf(1024 * 1024 * 64))
                .intValue(); // 64MB
        if (fileLength > maxSize) {
            throw new InfraFileTooLarge(String.format("File size is larger than the limit size(%d)", maxSize));
        }

        pgStream.write(HEADER_SIGN_PART);
        pgStream.write(HEADER_FLG_FIELD_PART);
        pgStream.write(HEADER_EX_PART);
        pgStream.write(TUPLE_FIELD_COUNT_PART);
        pgStream.write(ByteBuffer.allocate(4).putInt(fileId.getBytes().length).array());
        pgStream.write(fileId.getBytes());
        pgStream.write(ByteBuffer.allocate(4).putInt((int) fileLength).array());

        bis = new BufferedInputStream(new FileInputStream(tempFile));
        byte[] buf = new byte[1024 * 1024];
        int read;
        while ((read = bis.read(buf)) != -1) {
            pgStream.write(buf, 0, read);
        }
        pgStream.write(FILETRAILER);
        pgStream.flush();

        if (!tm.isNestedEm()) {
            conn.commit();
        }
    } catch (InfraFileTooLarge e) {
        log.warn(e.getMessage());
        try {
            pgStream.close();
        } catch (IOException e1) {
            log.warn(e1);
        }
        try {
            conn.rollback();
        } catch (SQLException e1) {
            log.warn(e1);
        }
        throw e;
    } catch (Exception e) {
        log.warn(e.getMessage(), e);
        try {
            if (pgStream != null)
                pgStream.close();
        } catch (IOException e1) {
            log.warn(e1);
        }
        try {
            if (conn != null)
                conn.rollback();
        } catch (SQLException e1) {
            log.warn(e1);
        }
        throw new HinemosUnknown(e.getMessage(), e);
    } finally {
        if (fos != null) {
            try {
                fos.close();
            } catch (IOException e) {
                log.warn(e.getMessage(), e);
                throw new HinemosUnknown(e.getMessage(), e);
            }
        }
        if (bis != null) {
            try {
                bis.close();
            } catch (IOException e) {
                log.warn(e.getMessage(), e);
                throw new HinemosUnknown(e.getMessage(), e);
            }
        }
        if (pgStream != null) {
            try {
                pgStream.close();
            } catch (IOException e) {
                log.warn(e.getMessage(), e);
                throw new HinemosUnknown(e.getMessage(), e);
            }
        }
        if (tm != null) {
            tm.close();
        }
        if (tempFile == null) {
            log.debug("Fail to delete. tempFile is null");
        } else if (!tempFile.delete()) {
            log.debug("Fail to delete " + tempFile.getAbsolutePath());
        }
    }
}