Example usage for java.sql Connection rollback

List of usage examples for java.sql Connection rollback

Introduction

In this page you can find the example usage for java.sql Connection rollback.

Prototype

void rollback() throws SQLException;

Source Link

Document

Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object.

Usage

From source file:dao.RegistrationDaoDb.java

/**
 * Given a login add the user in hdlogin, hdprofile and usertab tables.
 * @param login - login// w  w  w.  ja  v a 2 s  . c  om
 * @param fname - first name
 * @param lname - last name
 * @param email - email
 * @param password - password
 * @param aFlag - activation code
 * @param hear - hear about us
 * @param budget - budget
 * @param contact - contact user
 * @param bphone - bphone
 * @param biz - business that this user belongs to
 * @return String - (0) failure (1) success (send activation code msg), (2) login exists
 * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect
 **/
public String addUser(String login, String fname, String lname, String mname, String email, String password,
        String aFlag, String hear, String budget, String contact, String bphone, String biz)
        throws BaseDaoException {

    boolean ldapActive = WebUtil.isLdapActive();
    if (WebUtil.isLdapActive()) {
        if (RegexStrUtil.isNull(login) || RegexStrUtil.isNull(password)) {
            throw new BaseDaoException("params are null");
        }
    } else {
        if (RegexStrUtil.isNull(login) || RegexStrUtil.isNull(email) || RegexStrUtil.isNull(password)) {
            throw new BaseDaoException("params are null");
        }
    }

    if (!RegexStrUtil.isNull(fname))
        logger.info("fname = " + fname);
    if (!RegexStrUtil.isNull(lname))
        logger.info("lname = " + lname);
    if (!RegexStrUtil.isNull(email))
        logger.info("email = " + email);
    if (!RegexStrUtil.isNull(biz))
        logger.info("biz = " + biz);

    /**
    *  set the flag to false, if user exists 
    */
    Hdlogin hdlogin = getLoginid(login);
    if (hdlogin != null) {
        String loginid = hdlogin.getValue(DbConstants.LOGIN_ID);
        if (loginid != null) {
            return DbConstants.LOGIN_EXISTS;
        }
    }

    /**
     *  hdlogin, hdprofile, usertab - no partitioned
     */
    String sourceName = null;
    sourceName = scalabilityManager.getWriteZeroScalability();
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null " + sourceName);
    }

    int aCode = getActivationCode(login, email);

    Connection conn = null;
    try {
        conn = ds.getConnection();
        conn.setAutoCommit(false);

        /**
        * add login, new user, get the latest id of the login
         */
        if (fname == null)
            fname = "";
        if (lname == null)
            lname = "";
        if (mname == null)
            mname = "";

        String[] params = new String[12];
        params[0] = login;
        params[1] = fname;
        params[2] = lname;
        params[3] = mname;
        params[4] = email;
        params[5] = password;
        params[6] = new Integer(aCode).toString();
        params[7] = aFlag;
        params[8] = hear;
        params[9] = budget;
        params[10] = contact;
        params[11] = biz;
        String queryName = scalabilityManager.getWriteZeroScalability("hdloginaddquery");
        hdloginAddQuery = getQueryMapper().getCommonQuery(queryName);
        hdloginAddQuery.run(conn, params);

        /**
         *  set inform flags to zero
         */

        queryName = scalabilityManager.getWriteZeroScalability("hdprofileaddquery");
        hdprofileAddQuery = getQueryMapper().getCommonQuery(queryName);
        params = new String[3];
        params[0] = "0";
        params[1] = "0";
        params[2] = "0";
        hdprofileAddQuery.run(conn, params);

        /**
         *  set inform flags to zero
         */
        queryName = scalabilityManager.getWriteZeroScalability("personalinfoaddquery");
        personalinfoAddQuery = getQueryMapper().getCommonQuery(queryName);

        params = new String[35];
        for (int i = 0; i < 35; i++)
            params[i] = "";
        params[16] = "0"; // male
        params[25] = bphone; // bphone
        personalinfoAddQuery.run(conn, params);

        //personalinfoAddQuery.run(conn, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "",gender, "", "", "", "", "", "","", "", "", "", "", "", "", loginid, "", "");
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException("connection close exception", e2);
            }
            throw new BaseDaoException("error occured while rollingback entries from hdlogin/hdprofile/usertab",
                    e1);
        }
        throw new BaseDaoException("error occured while making entries in hdlogin/hdprofile/usertab", e);
    }

    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException("commit exception", e3);
    }

    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException("connection close exception", e4);
    }

    // mail the activation code only if ldap is not enabled
    if (!WebUtil.isLdapActive()) {
        mailActivationCode(login, fname, lname, email, aCode);
    }
    return DbConstants.SUCCESS;
}

From source file:com.app.das.business.dao.SearchDAO.java

/**
 * ?  ? ?  .//from w w  w.  j a  v a  2 s .  c o  m
 * @param tapeLendingItemDOList TapeLendingItemDO  ?  List
 * @param commonDO 
 * @throws Exception 
 */
public void deleteTapeLendingItemList(List tapeLendingItemDOList, DASCommonDO commonDO) throws Exception {
    Connection con = null;
    //PreparedStatement stmt = null;
    try {
        con = DBService.getInstance().getConnection();
        //logger.debug("######deleteTapeLendingItemList######## con : " + con);
        con.setAutoCommit(false);

        for (Iterator i = tapeLendingItemDOList.iterator(); i.hasNext();) {
            deleteTapeLendingItem(con, (TapeLendingItemDO) i.next());
        }

        con.commit();

    }

    catch (Exception e) {
        logger.error("tapeLendingItemDOList : " + tapeLendingItemDOList);
        logger.error("commonDO : " + commonDO);

        if (con != null) {
            try {
                con.rollback();
            } catch (SQLException e1) {
                // TODO ?? ?? catch ?
                e1.printStackTrace();
            }
        }

        throw e;
    } finally {
        release(null, null, con);
    }

}

From source file:dao.DirectoryAuthorDaoDb.java

/**
 * unblock the blocked directories for this member
 * @param idList the list of the directory ids
 * @param memberId memberId who is to be unblocked or deleted from block list. 
 * @param userId administrator of directory
 * @param userLogin administrator's login of directory
 * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect
 */// www .  ja va  2s.c  o  m
public void unBlockDirectories(ArrayList idList, String memberId, String userId, String userLogin)
        throws BaseDaoException {

    if ((idList == null) || RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(memberId)
            || RegexStrUtil.isNull(userLogin)) {
        throw new BaseDaoException("params are null");
    }

    /**
          *  check user permissions
          */
    /*
             if (!diaryAdmin.isDiaryAdmin(userLogin) && !isAuthor(directoryId, userId) ) {
          throw new BaseDaoException("User does not have permission to list moderators, " + directoryId + " userId = " + userId);
             }
    */

    /**
     *  Get scalability datasource for dirblock, partitioned on loginid
     */
    String sourceName = scalabilityManager.getWriteScalability(memberId);
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null, unBlockDirectories() " + sourceName + " memberId = " + memberId);
    }

    Hdlogin hdlogin = getLogin(memberId);
    List blockedDirs = null;
    if (hdlogin != null) {
        blockedDirs = listBlockedDirectories(memberId, hdlogin.getValue(DbConstants.LOGIN),
                DbConstants.READ_FROM_MASTER);
    }

    /**
     *  unblock the directories for this member
     */
    Connection conn = null;
    try {
        conn = ds.getConnection();
        conn.setAutoCommit(false);
        for (int i = 0; i < idList.size(); i++) {
            /**
             * delete the blocked member, only if this member has been blocked on this directory
             */
            if (blockedDirs != null) {
                for (int j = 0; j < blockedDirs.size(); j++) {
                    if ((DirAuthor) blockedDirs.get(j) == null)
                        continue;
                    String dirId = (String) ((DirAuthor) blockedDirs.get(j)).getValue(DbConstants.DIRECTORY_ID);
                    if (dirId.equals((String) idList.get(i))) {
                        deleteBlockQuery.run(conn, (String) idList.get(i), memberId);
                    }
                }
            }
        }
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException("conn.close() error, unBlockDirectories, memberId" + memberId, e2);
            }
            throw new BaseDaoException(" rollback() exception, for unBlockDirectories(),  userId = " + userId,
                    e1);
        }
    }

    /**
    * commit the transaction
    */
    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException(" commit() exception, for unBlockDirectories() userId = " + userId, e3);
    }
    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException(
                " conn.close() exception, for commit(), unBlockDirectories() userId = " + userId, e4);
    }

    Fqn fqn = cacheUtil.fqn(DbConstants.AUTHOR_BLOCKED_DIRS);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }
    fqn = cacheUtil.fqn(DbConstants.DIR_MOVE);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }
    fqn = cacheUtil.fqn(DbConstants.DIR_COPY);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }

    for (int i = 0; i < idList.size(); i++) {
        fqn = cacheUtil.fqn(DbConstants.DIRECTORY);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
        removeUsersFromDirAuthorsCache((String) idList.get(i));
        fqn = cacheUtil.fqn(DbConstants.DIR_AUTHORS);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
        fqn = cacheUtil.fqn(DbConstants.DIR_ADMINS);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
    }

    fqn = cacheUtil.fqn(DbConstants.ADMIN_DIRS);
    if (treeCache.exists(fqn, DbConstants.ADMIN_DIRS)) {
        treeCache.remove(fqn, DbConstants.ADMIN_DIRS);
    }
}

From source file:es.juntadeandalucia.panelGestion.negocio.utiles.JDBCConnector.java

public int executeFeatureInsertLowLevel(String sql, SimpleFeature feature, List<ColumnVO> columns)
        throws Exception {
    Exception error = null;/*  w ww  .  j a  v a 2 s . c o  m*/

    int numRowsAffected = 0;

    if (columns.size() > 0) {
        Connection connection = null;
        PreparedStatement preparedStmnt = null;

        try {
            DataSource dataSource = poolDataSources.get(schemaId);
            connection = dataSource.getConnection();
            connection.setAutoCommit(false);
            preparedStmnt = connection.prepareStatement(sql);

            int paramPosition = 1;
            for (ColumnVO column : columns) {
                String dataValue = null;
                Object attribute = feature.getAttribute(column.getFilePosition());
                if (attribute != null) {
                    dataValue = attribute.toString();
                }
                Integer dataType = column.getSqlType();
                if (dataType == Types.OTHER) { // it is a geometry
                    // ((org.postgresql.PGConnection)connection).addDataType(column.getName(),
                    // column.getTypeClass());
                    preparedStmnt.setObject(paramPosition, dataValue);
                } else {
                    if (StringUtils.isEmpty(dataValue)) {
                        preparedStmnt.setNull(paramPosition, dataType);
                    } else {
                        preparedStmnt.setObject(paramPosition, dataValue, dataType);
                    }
                }
                paramPosition++;
            }

            numRowsAffected = preparedStmnt.executeUpdate();

            connection.commit();
        } catch (SQLException e) {
            error = e;
        } finally {
            if (preparedStmnt != null) {
                try {
                    preparedStmnt.close();
                } catch (SQLException se2) {
                    log.warn("No se pudo cerrar el statment: ".concat(se2.getLocalizedMessage()));
                }
            }
            if (connection != null) {
                try {
                    if (error != null) {
                        connection.rollback();
                    }
                } catch (SQLException se) {
                    log.warn("Se produjo un error al manejar la conexin: ".concat(se.getLocalizedMessage()));
                }
                try {
                    connection.close();
                } catch (SQLException se) {
                    log.warn("Se produjo un error al intentar cerrar la conexin: "
                            .concat(se.getLocalizedMessage()));
                }
            }
        }
        if (error != null) {
            throw error;
        }
    }
    return numRowsAffected;
}

From source file:dao.CarryonDaoDb.java

/**
 * updates caption for the blob in carryon.
 * @param btitle - the btitle/*from  w  ww.ja  v a2s  .c  o  m*/
 * @param zoom - zoom applies to photos 
 * @param entryid - the entry id
 * @param memberId - the member id
 * @param category - the category
 * @param member - the member
 * @param def - the default
 * @param usertags - usertags
 * @param caption - caption
 * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect
  */
public void updateCaption(String btitle, String zoom, String entryid, String memberId, String category,
        String member, boolean def, String usertags, String caption) throws BaseDaoException {

    if (RegexStrUtil.isNull(memberId) || RegexStrUtil.isNull(btitle) || RegexStrUtil.isNull(entryid)
            || RegexStrUtil.isNull(member)) {
        throw new BaseDaoException("null parameters passed");
    }

    boolean isFile = true;
    // zoom applies only to photos currently
    if ((category != null) && (category.equalsIgnoreCase("1"))) {
        isFile = false;
        if (RegexStrUtil.isNull(zoom)) {
            throw new BaseDaoException("zoom parameter is null");
        }
    }

    /**
     * Set the source based on scalability
     */
    String sourceName = scalabilityManager.getWriteBlobScalability(memberId);
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds is null for sourceName = " + sourceName);
    }

    boolean exists = false;
    try {
        Object[] params = { (Object) entryid };
        List result = defaultQuery.execute(params);
        if (result != null && result.size() > 0) {
            exists = true;
        }
    } catch (Exception e) {
        throw new BaseDaoException("error while" + defaultQuery.getSql());
    }

    if (WebUtil.isSanEnabled()) {
        Photo photo = getPhoto(memberId, entryid, DbConstants.READ_FROM_SLAVE);
        if (photo != null) {
            String srcFileName = photo.getValue(DbConstants.BTITLE);
            if (!RegexStrUtil.isNull(srcFileName) && !srcFileName.equals(btitle)) {
                try {
                    SanUtils sanUtils = new SanUtils();
                    sanUtils.renameSanFile(member, SanConstants.sanUserPath, srcFileName, btitle);
                } catch (SanException e) {
                    throw new BaseDaoException("renameSanFile() in CarryonDaoDb " + member + " srcFileName "
                            + srcFileName + " destFileName " + btitle, e);
                }
            }
        }
    }

    Connection conn = null;

    try {
        conn = ds.getConnection();
        if (conn != null) {
            /* delete the photo, if this entry exists */
            conn.setAutoCommit(false);
            if (RegexStrUtil.isNull(caption)) {
                caption = btitle;
            }
            updateQuery.run(conn, btitle, zoom, entryid, memberId, isFile, caption);
            /** if this is the default photo and this photo does not exist, add this entry */
            if (def) {
                if (!exists) {
                    deleteDefQuery.run(conn, memberId);
                    addDefQuery.run(conn, entryid, memberId);
                }
            } else {
                /**  no more a default photo, delete this entry */
                if (exists) {
                    deleteDefQuery.run(conn, memberId);
                }
            }
        }
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException("connection close exception", e2);
            }
            throw new BaseDaoException("error occured while rollingback entries from carryon/defcarryon", e1);
        }
    }
    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException("commit exception", e3);
    }
    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException("connection close exception", e4);
    }

    // update the blob title
    updateTitleCarryonHits(entryid, memberId, caption);

    /**
     * updateTags
     */
    sourceName = scalabilityManager.getWriteZeroScalability();
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds is null for sourceName = " + sourceName);
    }

    conn = null;
    try {
        conn = ds.getConnection();
        if (conn != null) {
            updateTagsQuery.run(conn, caption, entryid, memberId, usertags);
            updateRecentQuery.run(conn, btitle, zoom, entryid, memberId, isFile, caption);
        }
    } catch (Exception e) {
        try {
            if (conn != null) {
                conn.close();
            }
        } catch (Exception e1) {
            throw new BaseDaoException("connection close exception for updateTagsQuery()", e1);
        }
        StringBuffer sb = new StringBuffer("error occured while executing in updateCaption()  caption = ");
        sb.append(caption);
        sb.append(" zoom = ");
        sb.append(zoom);
        sb.append(" entryid = ");
        sb.append(entryid);
        sb.append(" memberId = ");
        sb.append(memberId);
        throw new BaseDaoException(sb.toString(), e);
    }

    // close the connection
    try {
        if (conn != null) {
            conn.close();
        }
    } catch (Exception e1) {
        throw new BaseDaoException("connection close exception", e1);
    }

    /**
     * remove from userstreamblob based on the key (memberId+entryid)
     */
    StringBuffer sb = new StringBuffer(memberId);
    sb.append("-");
    sb.append(entryid);
    String key = sb.toString();
    Fqn fqn = cacheUtil.fqn(DbConstants.USER_STREAM_BLOB_ENTRY);
    if (treeCache.exists(fqn, key)) {
        treeCache.remove(fqn, key);
    }
    fqn = cacheUtil.fqn(DbConstants.USER_STREAM_BLOB_DATA);
    if (treeCache.exists(fqn, key)) {
        treeCache.remove(fqn, key);
    }

    /**
     * uses memberid-category key
     */
    sb.delete(0, sb.length());
    sb.append(memberId);
    sb.append("-");
    sb.append(category);
    fqn = cacheUtil.fqn(DbConstants.USER_STREAM_BLOBS_CAT);
    if (treeCache.exists(fqn, sb.toString())) {
        treeCache.remove(fqn, sb.toString());
    }

    /**
     * remove all entries for this user, key based on memberId 
     */
    fqn = cacheUtil.fqn(DbConstants.USER_STREAM_BLOB);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }

    fqn = cacheUtil.fqn(DbConstants.USER_PAGE);
    if (treeCache.exists(fqn, member)) {
        treeCache.remove(fqn, member);
    }

    fqn = cacheUtil.fqn(DbConstants.DEFAULT_PHOTO);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }

    /* remove this from cache only when the blogger matches one of the recent blog entry matches */
    /*
       Fqn fqn = cacheUtil.fqn(DbConstants.RECENT_BLOGS);
            if (treeCache.exists(fqn, DbConstants.RECENT_BLOG_KEY)) {
               treeCache.remove(fqn, DbConstants.RECENT_BLOG_KEY);
            }
    */
}

From source file:org.opennms.ng.services.capsd.BroadcastEventProcessor.java

/**
 * Handle a deleteNode Event. Here we process the event by marking all the
 * appropriate data rows as deleted./*w  ww  .  ja  va  2 s .  c  o  m*/
 *
 * @param event The event indicating what node to delete
 * @throws org.opennms.netmgt.capsd.InsufficientInformationException if the required information is not part of the event
 * @throws org.opennms.netmgt.capsd.FailedOperationException         if any.
 */
@EventHandler(uei = EventConstants.DELETE_NODE_EVENT_UEI)
public void handleDeleteNode(Event event) throws InsufficientInformationException, FailedOperationException {
    // validate event
    EventUtils.checkEventId(event);
    EventUtils.checkNodeId(event);
    if (isXmlRpcEnabled()) {
        EventUtils.requireParm(event, EventConstants.PARM_TRANSACTION_NO);
    }

    // log the event
    long nodeid = event.getNodeid();
    LOG.debug("handleDeleteNode: Event\nuei\t\t{}\neventid\t\t{}\nnodeId\t\t{}\neventtime\t{}", event.getUei(),
            event.getDbid(), nodeid, (event.getTime() != null ? event.getTime() : "<null>"));

    long txNo = EventUtils.getLongParm(event, EventConstants.PARM_TRANSACTION_NO, -1L);

    // update the database
    Connection dbConn = null;
    List<Event> eventsToSend = null;
    try {
        dbConn = getConnection();
        dbConn.setAutoCommit(false);

        String source = (event.getSource() == null ? "OpenNMS.Capsd" : event.getSource());

        eventsToSend = doDeleteNode(dbConn, source, nodeid, txNo);
    } catch (SQLException ex) {
        LOG.error("handleDeleteService:  Database error deleting service {} on ipAddr {} for node {}",
                event.getService(), event.getInterface(), nodeid, ex);
        throw new FailedOperationException("database error: " + ex.getMessage(), ex);
    } finally {

        if (dbConn != null) {
            try {
                if (eventsToSend != null) {
                    dbConn.commit();
                    for (Event e : eventsToSend) {
                        EventUtils.sendEvent(e, event.getUei(), txNo, isXmlRpcEnabled());
                    }
                } else {
                    dbConn.rollback();
                }
            } catch (SQLException ex) {
                LOG.error("handleDeleteNode: Exception thrown during commit/rollback: ", ex);
                throw new FailedOperationException("exeption processing deleteNode: " + ex.getMessage(), ex);
            } finally {
                if (dbConn != null) {
                    try {
                        dbConn.close();
                    } catch (SQLException ex) {
                        LOG.error("handleDeleteNode: Exception thrown closing connection: ", ex);
                    }
                }
            }
        }
    }
}

From source file:dao.DirectoryAuthorDaoDb.java

/**
 * Block multiple directories for this member
 * @param idList the list of the directory ids
 * @param memberId memberId who is to be blocked. 
 * @param userId administrator of directory
 * @param userLogin administrator's login of directory
 * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect
 *///from   w  ww  .j  a v a  2  s.c  o m
public void blockDirectories(ArrayList idList, String memberId, String userId, String userLogin)
        throws BaseDaoException {

    if ((idList == null) || RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(memberId)
            || RegexStrUtil.isNull(userLogin)) {
        throw new BaseDaoException("params are null");
    }

    /**
          *  check user permissions
          */
    /*
             if (!diaryAdmin.isDiaryAdmin(userLogin) && !isAuthor(directoryId, userId) ) {
          throw new BaseDaoException("User does not have permission to list moderators, " + directoryId + " userId = " + userId);
             }
    */

    /**
     *  Get scalability datasource for dirblock, partitioned on loginid
     **/
    String sourceName = scalabilityManager.getWriteScalability(memberId);
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null, blockDirectories() " + sourceName + " memberId = " + memberId);
    }

    Hdlogin hdlogin = getLogin(memberId);
    List blockedDirs = null;
    if (hdlogin != null) {
        blockedDirs = listBlockedDirectories(memberId, hdlogin.getValue(DbConstants.LOGIN),
                DbConstants.READ_FROM_MASTER);
    }

    /**
     *  block the directories for this member
     */
    Connection conn = null;
    try {
        conn = ds.getConnection();
        conn.setAutoCommit(false);
        for (int i = 0; i < idList.size(); i++) {
            /**
             * if the user is already blocked, dont block it again
             */
            if (blockedDirs != null) {
                for (int j = 0; j < blockedDirs.size(); j++) {
                    if ((DirAuthor) blockedDirs.get(j) == null)
                        continue;
                    String dirId = (String) ((DirAuthor) blockedDirs.get(j)).getValue(DbConstants.DIRECTORY_ID);
                    if (dirId.equals((String) idList.get(i))) {
                        continue;
                    }
                }
            }
            addBlockQuery.run(conn, (String) idList.get(i), memberId);
            deleteAutoAuthorQuery.run(conn, (String) idList.get(i), memberId);
        }
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException("conn.close() error, addBlockDirectories, memberId" + memberId, e2);
            }
            throw new BaseDaoException(" rollback() exception, for addBlockDirectories(),  userId = " + userId,
                    e1);
        }
    }

    /**
    * commit the transaction
    */
    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException(" commit() exception, for addBlockDirectories() userId = " + userId, e3);
    }
    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException(
                " conn.close() exception, for commit(), addBlockDirectories() userId = " + userId, e4);
    }

    Fqn fqn = cacheUtil.fqn(DbConstants.AUTHOR_BLOCKED_DIRS);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }
    fqn = cacheUtil.fqn(DbConstants.DIR_MOVE);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }
    fqn = cacheUtil.fqn(DbConstants.DIR_COPY);
    if (treeCache.exists(fqn, memberId)) {
        treeCache.remove(fqn, memberId);
    }

    for (int i = 0; i < idList.size(); i++) {
        fqn = cacheUtil.fqn(DbConstants.DIRECTORY);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
        removeUsersFromDirAuthorsCache((String) idList.get(i));
        fqn = cacheUtil.fqn(DbConstants.DIR_AUTHORS);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
        fqn = cacheUtil.fqn(DbConstants.DIR_ADMINS);
        if (treeCache.exists(fqn, (String) idList.get(i))) {
            treeCache.remove(fqn, (String) idList.get(i));
        }
    }

    fqn = cacheUtil.fqn(DbConstants.ADMIN_DIRS);
    if (treeCache.exists(fqn, DbConstants.ADMIN_DIRS)) {
        treeCache.remove(fqn, DbConstants.ADMIN_DIRS);
    }
}

From source file:com.quest.orahive.HiveJdbcClient.java

private static void insertHiveResultsIntoOracleTable(OraHiveOptions opts, String insertSql,
        List<OracleTableColumn> oracleColumns, Connection oracleConnection, ResultSet resultSet,
        OraHiveCounters counters) {/*w ww.ja  va  2  s.co  m*/

    long timerHiveFetching = 0;
    long timerOracleInserting = 0;
    long rowsProcessed = 0;

    try {

        oracle.jdbc.OraclePreparedStatement statement = (oracle.jdbc.OraclePreparedStatement) oracleConnection
                .prepareStatement(insertSql);

        int rowIdx = 0;
        int batchIdx = 0;
        int numberOfBatchesCommitted = 0;

        try {
            resultSet.setFetchSize(opts.insertBatchSize);
        } catch (SQLException e) {
            try {
                // Apply fetchN hack for much better performance with pre 0.8 JDBC driver
                LOG.info(
                        "Hive ResultSet does not implement setFetchSize. Wrapping with FetchNResultSet for better performance.");
                resultSet = new FetchNResultSet(resultSet);
                resultSet.setFetchSize(opts.insertBatchSize);
            } catch (IllegalArgumentException iae) {
                LOG.warn(
                        "Wrapping Hive ResultSet with FetchNResultSet failed. Performance may be poor for large result sets.");
                LOG.debug("FetchNResultSet exception was:", iae);
            }
        }
        long start = System.nanoTime();
        while (resultSet.next()) {
            for (int idx = 0; idx < oracleColumns.size(); idx++) { // <- JDBC is 1-based
                statement.setObject(idx + 1, resultSet.getObject(idx + 1));
            }
            timerHiveFetching += System.nanoTime() - start;

            rowsProcessed++;
            statement.addBatch();

            rowIdx++;
            if (rowIdx == opts.insertBatchSize) {
                rowIdx = 0;

                start = System.nanoTime();

                // executeBatchWithRetry(statement, oracleConnection);
                statement.executeBatch();
                statement.clearBatch();

                timerOracleInserting += System.nanoTime() - start;

                batchIdx++;
            }

            if (batchIdx == opts.commitBatchCount) {
                batchIdx = 0;
                oracleConnection.commit();
                numberOfBatchesCommitted++;
                LOG.info(String.format("Number of rows inserted so far: %d",
                        numberOfBatchesCommitted * (opts.insertBatchSize * opts.commitBatchCount)));
            }
            start = System.nanoTime();
        }

        if (rowIdx > 0) {
            start = System.nanoTime();

            //executeBatchWithRetry(statement, oracleConnection);
            statement.executeBatch();

            timerOracleInserting += System.nanoTime() - start;
        }

        oracleConnection.commit();

        statement.close();
    } catch (SQLException ex) {

        if (Utilities.oracleSessionHasBeenKilled(ex)) {
            LOG.info("\n*********************************************************"
                    + "\nThe Oracle session in use has been killed by a 3rd party."
                    + "\n*********************************************************");
        } else
            LOG.error("An error occurred within the process of fetching Hive results "
                    + "and inserting them into an Oracle table. (1)", ex);

        try {
            oracleConnection.rollback();
        } catch (SQLException e) {
        }

        System.exit(1);
    } catch (Exception ex) {
        LOG.error("An error occurred within the process of fetching Hive results "
                + "and inserting them into an Oracle table. (2)", ex);
    } finally {
        LOG.info(String.format("Number of rows obtained from Hive: %d", rowsProcessed));
    }

    counters.rowsProcessed = rowsProcessed;
    counters.hiveFetchTimeNanoSec = timerHiveFetching;
    counters.oracleInsertTimeNanoSec = timerOracleInserting;
}

From source file:org.opennms.ng.services.capsd.BroadcastEventProcessor.java

/**
 * Handle a deleteService Event. Here we process the event by marking all
 * the appropriate data rows as deleted.
 *
 * @param event The event indicating what service to delete
 * @throws org.opennms.netmgt.capsd.InsufficientInformationException if the required information is not part of the event
 * @throws org.opennms.netmgt.capsd.FailedOperationException         if any.
 *//* w  w w  . j a va 2s .c o m*/
@EventHandler(uei = EventConstants.DELETE_SERVICE_EVENT_UEI)
public void handleDeleteService(Event event) throws InsufficientInformationException, FailedOperationException {

    // validate event
    EventUtils.checkEventId(event);
    EventUtils.checkNodeId(event);
    EventUtils.checkInterface(event);
    EventUtils.checkService(event);

    // log the event
    LOG.debug(
            "handleDeleteService: Event\nuei\t\t{}\neventid\t\t{}\nnodeid\t\t{}\nipaddr\t\t{}\nservice\t\t{}\neventtime\t{}",
            event.getUei(), event.getDbid(), event.getNodeid(), event.getInterface(), event.getService(),
            (event.getTime() != null ? event.getTime() : "<null>"));

    long txNo = EventUtils.getLongParm(event, EventConstants.PARM_TRANSACTION_NO, -1L);

    // update the database
    Connection dbConn = null;
    List<Event> eventsToSend = null;
    try {
        dbConn = getConnection();
        dbConn.setAutoCommit(false);
        String source = (event.getSource() == null ? "OpenNMS.Capsd" : event.getSource());
        eventsToSend = doDeleteService(dbConn, source, event.getNodeid(), event.getInterface(),
                event.getService(), txNo);
    } catch (SQLException ex) {
        LOG.error("handleDeleteService:  Database error deleting service {} on ipAddr {} for node {}",
                event.getService(), event.getInterface(), event.getNodeid(), ex);
        throw new FailedOperationException("database error: " + ex.getMessage(), ex);
    } finally {

        if (dbConn != null) {
            try {
                if (eventsToSend != null) {
                    dbConn.commit();
                    for (Event e : eventsToSend) {
                        EventUtils.sendEvent(e, event.getUei(), txNo, isXmlRpcEnabled());
                    }
                } else {
                    dbConn.rollback();
                }
            } catch (SQLException ex) {
                LOG.error("handleDeleteService: Exception thrown during commit/rollback: ", ex);
                throw new FailedOperationException("exeption processing deleteService: " + ex.getMessage(), ex);
            } finally {
                if (dbConn != null) {
                    try {
                        dbConn.close();
                    } catch (SQLException ex) {
                        LOG.error("handleDeleteService: Exception thrown closing connection: ", ex);
                    }
                }
            }
        }
    }
}

From source file:dao.DirectoryAuthorDaoDb.java

/**
 *  Adds the author for this directory in the same connection
 *  @param directoryId - directory id/*from  w  w  w  . j av a 2  s.  co  m*/
 *  @param userId - user login is used to check if this user has the permission to add authors
 *  @param userLogin - user login is used to check if this user has the permission to add authors
 *  @param authorList  author list
 */
public void addAuthors(String directoryId, String userId, String userLogin, List authorList)
        throws BaseDaoException {

    if (RegexStrUtil.isNull(directoryId) || RegexStrUtil.isNull(userLogin) || (authorList == null)
            || RegexStrUtil.isNull(userId)) {
        throw new BaseDaoException("addAuthors() params are null");
    }
    logger.info("authorList = " + authorList.size());

    /**
          *  check if this user has permission to add authors
          */
    if (!diaryAdmin.isDiaryAdmin(userLogin) && !isAuthor(directoryId, userId)) {
        throw new BaseDaoException("addAuthors(), User does not have permission to add authors to directory, "
                + directoryId + " userId = " + userId);
    }

    /**
     *  Get scalability datasource for diradmin, not partitioned
     */
    String sourceName = scalabilityManager.getWriteZeroScalability();
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null, addAuthors() " + sourceName + " userId = " + userId);
    }

    /**
     *  Add author to this directory
     */
    String memberId = null;
    Connection conn = null;
    try {
        logger.info("authorList = " + authorList.size());
        conn = ds.getConnection();
        conn.setAutoCommit(false);
        for (int i = 0; i < authorList.size(); i++) {
            String member = (String) authorList.get(i);
            logger.info("member = " + authorList.get(i));
            if (!RegexStrUtil.isNull(member)) {
                Hdlogin hdlogin = getLoginid(member);
                if (hdlogin == null) {
                    throw new BaseDaoException(
                            "addAuthors(), Hdlogin for member is null, addAuthor(), member= " + member);
                } else {
                    memberId = hdlogin.getValue(DbConstants.LOGIN_ID);
                    logger.info("memberId = " + memberId);
                    addAdminQuery.run(conn, directoryId, memberId);
                    deleteBlockQuery.run(conn, directoryId, memberId);
                }
            }
        }
    } catch (Exception e) {
        try {
            conn.rollback();
        } catch (Exception e1) {
            try {
                if (conn != null) {
                    conn.setAutoCommit(true);
                    conn.close();
                }
            } catch (Exception e2) {
                throw new BaseDaoException(
                        "conn.close() error, addAuthors(), addAdminQuery()/deleteBlockQuery(), memberId"
                                + memberId,
                        e2);
            }
            throw new BaseDaoException(
                    " rollback() exception, for addAuthors(), addAdminQuery(), deleteBlockQuery()  userId = "
                            + userId,
                    e1);
        }
        throw new BaseDaoException(
                " addAuthors(), addAdminQuery/deleteBlockQuery, exception,  userId = " + userId, e);
    }

    // connection commit
    try {
        conn.commit();
    } catch (Exception e3) {
        throw new BaseDaoException(
                " commit() exception, for addAuthors(), addAdminQuery/deleteBlockQuery, userId = " + userId,
                e3);
    }
    try {
        if (conn != null) {
            conn.setAutoCommit(true);
            conn.close();
        }
    } catch (Exception e4) {
        throw new BaseDaoException(
                " conn.close() exception, for commit(), addAuthors(), addAdminQuery/deleteBlockQuery(), userId = "
                        + userId,
                e4);
    }

    /**
     *  member's list needs to be updated with the new query, 
     *  remove the old entries for this member
     */
    for (int i = 0; i < authorList.size(); i++) {
        String member = (String) authorList.get(i);
        if (!RegexStrUtil.isNull(member)) {
            Hdlogin hdlogin = getLoginid(member);
            if (hdlogin == null) {
                throw new BaseDaoException(
                        "addAuthors(), Hdlogin for member is null, addAuthor(), member= " + member);
            } else {
                memberId = hdlogin.getValue(DbConstants.LOGIN_ID);
            }
        }

        Fqn fqn = cacheUtil.fqn(DbConstants.AUTHORS_LIST);
        if (treeCache.exists(fqn, member)) {
            treeCache.remove(fqn, member);
        }

        fqn = cacheUtil.fqn(DbConstants.USER_PAGE);
        if (treeCache.exists(fqn, member)) {
            treeCache.remove(fqn, member);
        }

        fqn = cacheUtil.fqn(DbConstants.AUTHORS_DIRECTORIES);
        if (treeCache.exists(fqn, member)) {
            treeCache.remove(fqn, member);
        }

        StringBuffer sb = new StringBuffer(directoryId);
        sb.append("-");
        sb.append(memberId);
        fqn = cacheUtil.fqn(DbConstants.DIR_AUTHOR);
        if (treeCache.exists(fqn, sb.toString())) {
            treeCache.remove(fqn, sb.toString());
        }

        fqn = cacheUtil.fqn(DbConstants.AUTHOR_BLOCKED_DIRS);
        if (treeCache.exists(fqn, memberId)) {
            treeCache.remove(fqn, memberId);
        }
    }

    Fqn fqn = cacheUtil.fqn(DbConstants.AUTHORS_LIST);
    if (treeCache.exists(fqn, userLogin)) {
        treeCache.remove(fqn, userLogin);
    }

    fqn = cacheUtil.fqn(DbConstants.DIRECTORY);
    if (treeCache.exists(fqn, directoryId)) {
        treeCache.remove(fqn, directoryId);
    }

    fqn = cacheUtil.fqn(DbConstants.DIR_AUTHORS);
    if (treeCache.exists(fqn, directoryId)) {
        treeCache.remove(fqn, directoryId);
    }

    fqn = cacheUtil.fqn(DbConstants.DIR_ADMINS);
    if (treeCache.exists(fqn, directoryId)) {
        treeCache.remove(fqn, directoryId);
    }

    fqn = cacheUtil.fqn(DbConstants.ADMIN_DIRS);
    if (treeCache.exists(fqn, DbConstants.ADMIN_DIRS)) {
        treeCache.remove(fqn, DbConstants.ADMIN_DIRS);
    }
}