List of usage examples for java.sql Connection rollback
void rollback() throws SQLException;
Connection
object. From source file:dao.DirectoryDaoDb.java
/** * Add a new subdirectory for a directory. * User permissions are checked before the user is allowed to add it. * If the user is the userid or administrator, add subdirectory. * If the parent directory (parentid) permission id is set to addchild, add subdirectory * if the user has permission to addchild for parentid, add subdirectory. * @param dirname - directory name//from w w w . j a v a2 s.c om * @param keywords - keywords for this directory * @param parentId - parent id * @param scopeid - scope identifier * @param desc - description of the directory * @param userId of the parent * @param userLogin - of the parent * @param operations - assign authors/all are authors * @param addSanFlag - true - add san directory, false otherwise * @throws BaseDaoException If we have a problem interpreting the data or the data is missing or incorrect */ public void addDirectory(String dirname, String keywords, String parentId, String scopeid, String desc, String userId, String userLogin, String operations, boolean addSanFlag) throws BaseDaoException { /** * An entry is added in the following tables: directory, dirtree, dirscope, diradmin */ if (RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(dirname)) { throw new BaseDaoException("params are null"); } boolean isAddable = false; String dirpath, dirlink, stateid; dirpath = dirlink = stateid = ""; /** * Does any directory exist in the database * New node is added as the root of the directory */ if (RegexStrUtil.isNull(parentId) && !checkIfDirectoryExists()) { stateid = "3"; if (addSanFlag) { try { addSanDir(dirpath, dirname); } catch (SanException e) { throw new BaseDaoException("addSanDir()" + e.getMessage(), e); } } addRootNode(dirname, keywords, desc, dirlink, dirpath, stateid, userId, scopeid); return; } else { if (diaryAdmin.isDiaryAdmin(userLogin)) { isAddable = true; } // allow the users who login for the first time to add directory to // the sanConstants.sanUserDirectory level with their login name as dirname if (GlobalConst.enableMyFilesDirectory) { if (dirname.equals(userLogin)) { isAddable = true; } } } /** * check global flag * operations: 1 (Designate Specific Members As Authors), 2 (Automatically Allow All Members To Be Authors) * status: 1 (Hidden), 2 (Ready) should we check for the status for operations (2) * if set to 1, check if this user is the designated author */ if (!isAddable) { DirScope dirscope = getDirectoryScope(parentId); //if ( dirscope.getValue(DbConstants.OPERATIONS).equals((Object)"2") ) { if (dirscope.getValue(DbConstants.OPERATIONS).equals(dirScope.getAllasauthorsoperation())) { isAddable = true; } else { if (isAuthor(parentId, userId)) { isAddable = true; } } } if (!isAddable) { throw new BaseDaoException("Donot have the permission to add to this directory, userId = " + userId); } /** * Get scalability datasource for directory - not partitioned */ String sourceName = scalabilityManager.getWriteZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, getSource() " + sourceName); } // dirpath ? // dirlink ? // stateid ? /** * Jboss methods * fqn - full qualified name * check if the parent direpath already set in the cache * If it exists, return the parent dirpath from the cache. */ Fqn fqn = cacheUtil.fqn(DbConstants.DIRECTORY); Object obj = treeCache.get(fqn, parentId); if (obj != null) { StringBuffer sb = new StringBuffer(); if (sb == null) { throw new BaseDaoException("new StringBufer() is null"); } String dpath = ((Directory) obj).getValue(DbConstants.DIRPATH); if (!RegexStrUtil.isNull(dpath)) { /** trims leading and trailing white spaces */ sb.append(dpath.trim()); } //logger.info("sb=" + sb.toString()); /** * This takes care of the root such as "Top" where dirpath is null. */ if (!RegexStrUtil.isNull(sb.toString())) { sb.append(DbConstants.DIRPATH_COLON); } //logger.info("sb=" + sb.toString()); sb.append(((Directory) obj).getValue(DbConstants.DIRNAME)); //logger.info("sb=" + sb.toString()); sb.append(DbConstants.DIRPATH_PIPE); sb.append(parentId); dirpath = sb.toString(); } else { Directory parentInfo = getParentInfo(parentId); if (parentInfo != null) { StringBuffer sb = new StringBuffer(); if (sb == null) { throw new BaseDaoException("new StringBufer(), getParentInfo() is null"); } String dpath = parentInfo.getValue(DbConstants.DIRPATH); if (!RegexStrUtil.isNull(dpath)) { sb.append(dpath.trim()); } /** * This takes care of the root such as "Top" where dirpath is null. */ //logger.info("sb=" + sb.toString()); if (!RegexStrUtil.isNull(sb.toString())) { sb.append(DbConstants.DIRPATH_COLON); } sb.append(parentInfo.getValue(DbConstants.DIRNAME)); //logger.info("sb=" + sb.toString()); sb.append(DbConstants.DIRPATH_PIPE); sb.append(parentId); dirpath = sb.toString(); //dirpath = parentInfo.getValue(DbConstants.DIRPATH) + DbConstants.DIRPATH_COLON + parentInfo.getValue(DbConstants.DIRNAME) + DbConstants.DIRPATH_PIPE + parentId; } } if (addSanFlag && WebUtil.isSanEnabled()) { try { addSanDir(dirpath, dirname); } catch (SanException e) { throw new BaseDaoException("addSanDir()" + e.getMessage(), e); } } //logger.info("dirpath dirname = " + dirpath + dirname); /* if (WebUtil.isSanEnabled()) { logger.info("isSanEnabled"); getSanUtils(); if (sanUtils != null) { try { logger.info("dirpath = " + dirpath + " sanPath " + SanConstants.sanPath + " dirName = " + dirname); sanUtils.addSanDir(dirpath, SanConstants.sanPath, dirname); logger.info("addSanDir completed"); } catch(SanException e) { throw new SanException("addSanDirectory() error", e); } } else { throw new BaseDaoException("sanUtils is null in addDirectory()"); } } */ Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); directoryAddQuery.run(conn, dirname, keywords, desc, dirlink, dirpath, stateid, userId); /** * default dirscope table * Operations is 1 (assign authors) * Operations is 2 (allasauthors) * Scope is 1 (web) * Status is 2 (ready) * * Creator becomes admin in "diradmin" table * Date defaults to creation date */ addChildQuery.run(conn, "LAST_INSERT_ID()", parentId); scopeAddQuery.run(conn, "LAST_INSERT_ID()", operations, dirScope.getReadystatus(), scopeid); /** * This needs to be the last as the order is important, * sql behaviour changes for LAST_INSERT_ID. It will take entryId * for this table as the last_insert_id for the tables below it. */ addAdminQuery.run(conn, "LAST_INSERT_ID()", userId); } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException( "conn.close() exception for rollback(), for add directory/scope tables params (6) " + " dirname = " + dirname + " keywords = " + keywords + " parentId = " + parentId + " scopeid = " + scopeid + " desc = " + desc + " userId = " + userId, e2); } throw new BaseDaoException("rollback() exception, for add directory/scope tables params (6) " + " dirname = " + dirname + " keywords = " + keywords + " parentId = " + parentId + " scopeid = " + scopeid + " desc = " + desc + " userId = " + userId, e1); } throw new BaseDaoException("for add directory/scope tables params (6) " + " dirname = " + dirname + " keywords = " + keywords + " parentId = " + parentId + " scopeid = " + scopeid + " desc = " + desc + " userId = " + userId, e); } // connection commit try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit() exception, for add directory/permatcat tables params (6) " + " dirname = " + dirname + " keywords = " + keywords + " parentId = " + parentId + " scopeid = " + scopeid + " desc = " + desc + " userId = " + userId, e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException("conn.close() exception for commit(), add directory/scope tables " + " dirname = " + dirname + " keywords = " + keywords + " parentId = " + parentId + " scopeid = " + scopeid + " desc = " + desc + " userId = " + userId, e4); } /** * remove this directory from cache */ fqn = cacheUtil.fqn(DbConstants.DIRECTORY); if (treeCache.exists(fqn, parentId)) { treeCache.remove(fqn, parentId); } fqn = cacheUtil.fqn(DbConstants.AUTHORS_DIRECTORIES); if (treeCache.exists(fqn, userLogin)) { treeCache.remove(fqn, userLogin); } fqn = cacheUtil.fqn(DbConstants.AUTHORS_LIST); if (treeCache.exists(fqn, userLogin)) { treeCache.remove(fqn, userLogin); } fqn = cacheUtil.fqn(DbConstants.USER_PAGE); if (treeCache.exists(fqn, userLogin)) { treeCache.remove(fqn, userLogin); } }
From source file:dao.DirectoryDaoDb.java
/** * setQuotaSize - sets the user hard quota * @param memberId - members whose disk quota size is set * @param login - admin's login//w w w. ja va 2 s .co m * @param quotaSize - quota size * @return none * @throws BaseDaoException for errors */ public void setQuotaSize(String memberId, String login, String quotaSize) { if (RegexStrUtil.isNull(memberId) || RegexStrUtil.isNull(login) || RegexStrUtil.isNull(quotaSize)) { throw new BaseDaoException("params are null"); } if (!isAdmin(login)) { throw new BaseDaoException("Not an admin, cannot set quota size for the users, " + login); } String sourceName = scalabilityManager.getWriteZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, setQuotaSize() " + sourceName + " memberId = " + memberId); } /** * Get scalability datasource - not partitioned * accessFlag - the access flag */ Connection conn = null; String queryName = scalabilityManager.getWriteZeroScalability("setquotaquery"); setQuotaQuery = getQueryMapper().getCommonQuery(queryName); if (setQuotaQuery == null) { throw new BaseDaoException("setQuotaQuery is null, login= " + login + " memberId = " + memberId); } queryName = scalabilityManager.getWriteZeroScalability("updatequotaquery"); updateQuotaQuery = getQueryMapper().getCommonQuery(queryName); if (updateQuotaQuery == null) { throw new BaseDaoException("updateQuotaQuery is null, login= " + login + " memberId = " + memberId); } /* queryName = scalabilityManager.getWriteZeroScalability("quotaexistsquery"); quotaExistsQuery = getQueryMapper().getQuery(queryName); List result = quotaExistsQuery.execute(); */ List result = getQuotaSize(memberId, DbConstants.READ_FROM_MASTER); try { conn = ds.getConnection(); conn.setAutoCommit(false); String params[] = { memberId, quotaSize }; if (result != null && result.size() > 0) { updateQuotaQuery.run(conn, params); } else { setQuotaQuery.run(conn, params); } } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException("conn.close() exception for rollback(), for setQuotaQuery() login = " + login + " memberId = " + memberId, e2); } throw new BaseDaoException( "rollback() exception, for SetQuotaQuery() login =" + login + " memberId = " + memberId, e1); } throw new BaseDaoException( "error in executing SetQuotaQuery, login=" + login + " memberId = " + memberId, e); } // connection commit try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit() exception, for SetQuotaQuery/UpdateQuotaQuery", e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException("conn.close() exception for setAutoCommit() SetQuotaQuery/UpdateQuotaQuery", e4); } Fqn fqn = cacheUtil.fqn(DbConstants.QSIZE); if (treeCache.exists(fqn, memberId)) { treeCache.remove(fqn, memberId); } }
From source file:com.pari.nm.utils.db.InventoryDBHelper.java
public static void updateIosVersionDetails(NetworkNode device) throws Exception { PreparedStatement ps = null;//from w w w .j av a2s .c o m Connection c = null; try { c = DBHelper.getConnection(); c.setAutoCommit(false); String query = "delete from ios_version where id=" + device.getNodeId() + ""; DBHelper.executeUpdateNoCommit(c, query); ps = c.prepareStatement(DBHelperConstants.IOS_VERSION_INSERT); Version ver = device.getVersion(); ps.setInt(1, device.getNodeId()); ps.setString(2, ver.getVersionStr()); ps.setString(3, ver.getBuild()); ps.setString(4, ver.getProductId()); ps.setString(5, ver.getDeviceType()); ps.setString(6, ver.getNodeName()); ps.setString(7, ver.getFlash()); ps.setString(8, ver.getMemory()); ps.setString(9, ver.getSerialNumber()); ps.setString(10, ver.getImageFileName()); ps.executeUpdate(); c.commit(); } catch (Exception ee) { System.err.println("NODE ID:" + device.getNodeId()); ee.printStackTrace(); c.rollback(); logger.warn("Error in updateIosVersionDetails", ee); throw ee; } finally { try { c.setAutoCommit(true); } catch (Exception ex) { } try { ps.close(); } catch (Exception ee) { } try { DBHelper.releaseConnection(c); } catch (Exception ee) { } } dumpFullIosVersion(device); }
From source file:com.emr.utilities.CSVLoader.java
/** * Parse CSV file using OpenCSV library and load in * given database table. /*from w w w . ja va 2 s. c om*/ * @param csvFile {@link String} Input CSV file * @param tableName {@link String} Database table name to import data * @param truncateBeforeLoad {@link boolean} Truncate the table before inserting * new records. * @param destinationColumns {@link String[]} Array containing the destination columns */ public void loadCSV(String csvFile, String tableName, boolean truncateBeforeLoad, String[] destinationColumns, List columnsToBeMapped) throws Exception { CSVReader csvReader = null; if (null == this.connection) { throw new Exception("Not a valid connection."); } try { csvReader = new CSVReader(new FileReader(csvFile), this.seprator); } catch (Exception e) { String stacktrace = org.apache.commons.lang3.exception.ExceptionUtils.getStackTrace(e); JOptionPane.showMessageDialog(null, "Error occured while executing file. Error Details: " + stacktrace, "File Error", JOptionPane.ERROR_MESSAGE); throw new Exception("Error occured while executing file. " + stacktrace); } String[] headerRow = csvReader.readNext(); if (null == headerRow) { throw new FileNotFoundException( "No columns defined in given CSV file." + "Please check the CSV file format."); } //Get indices of columns to be mapped List mapColumnsIndices = new ArrayList(); for (Object o : columnsToBeMapped) { String column = (String) o; column = column.substring(column.lastIndexOf(".") + 1, column.length()); int i; for (i = 0; i < headerRow.length; i++) { if (headerRow[i].equals(column)) { mapColumnsIndices.add(i); } } } String questionmarks = StringUtils.repeat("?,", headerRow.length); questionmarks = (String) questionmarks.subSequence(0, questionmarks.length() - 1); String query = SQL_INSERT.replaceFirst(TABLE_REGEX, tableName); query = query.replaceFirst(KEYS_REGEX, StringUtils.join(destinationColumns, ",")); query = query.replaceFirst(VALUES_REGEX, questionmarks); String log_query = query.substring(0, query.indexOf("VALUES(")); String[] nextLine; Connection con = null; PreparedStatement ps = null; PreparedStatement ps2 = null; PreparedStatement reader = null; ResultSet rs = null; try { con = this.connection; con.setAutoCommit(false); ps = con.prepareStatement(query); File file = new File("sqlite/db"); if (!file.exists()) { file.createNewFile(); } db = new SQLiteConnection(file); db.open(true); //if destination table==person, also add an entry in the table person_identifier //get column indices for the person_id and uuid columns int person_id_column_index = -1; int uuid_column_index = -1; int maxLength = 100; int firstname_index = -1; int middlename_index = -1; int lastname_index = -1; int clanname_index = -1; int othername_index = -1; if (tableName.equals("person")) { int i; ps2 = con.prepareStatement( "insert ignore into person_identifier(person_id,identifier_type_id,identifier) values(?,?,?)"); for (i = 0; i < headerRow.length; i++) { if (headerRow[i].equals("person_id")) { person_id_column_index = i; } if (headerRow[i].equals("uuid")) { uuid_column_index = i; } /*if(headerRow[i].equals("first_name")){ System.out.println("Found firstname index: " + i); firstname_index=i; } if(headerRow[i].equals("middle_name")){ System.out.println("Found firstname index: " + i); middlename_index=i; } if(headerRow[i].equals("last_name")){ System.out.println("Found firstname index: " + i); lastname_index=i; } if(headerRow[i].equals("clan_name")){ System.out.println("Found firstname index: " + i); clanname_index=i; } if(headerRow[i].equals("other_name")){ System.out.println("Found firstname index: " + i); othername_index=i; }*/ } } if (truncateBeforeLoad) { //delete data from table before loading csv try (Statement stmnt = con.createStatement()) { stmnt.execute("DELETE FROM " + tableName); stmnt.close(); } } if (tableName.equals("person")) { try (Statement stmt2 = con.createStatement()) { stmt2.execute( "ALTER TABLE person CHANGE COLUMN first_name first_name VARCHAR(50) NULL DEFAULT NULL AFTER person_guid,CHANGE COLUMN middle_name middle_name VARCHAR(50) NULL DEFAULT NULL AFTER first_name,CHANGE COLUMN last_name last_name VARCHAR(50) NULL DEFAULT NULL AFTER middle_name;"); stmt2.close(); } } final int batchSize = 1000; int count = 0; Date date = null; while ((nextLine = csvReader.readNext()) != null) { if (null != nextLine) { int index = 1; int person_id = -1; String uuid = ""; int identifier_type_id = 3; if (tableName.equals("person")) { reader = con.prepareStatement( "select identifier_type_id from identifier_type where identifier_type_name='UUID'"); rs = reader.executeQuery(); if (!rs.isBeforeFirst()) { //no uuid row //insert it Integer numero = 0; Statement stmt = con.createStatement(); numero = stmt.executeUpdate( "insert into identifier_type(identifier_type_id,identifier_type_name) values(50,'UUID')", Statement.RETURN_GENERATED_KEYS); ResultSet rs2 = stmt.getGeneratedKeys(); if (rs2.next()) { identifier_type_id = rs2.getInt(1); } rs2.close(); stmt.close(); } else { while (rs.next()) { identifier_type_id = rs.getInt("identifier_type_id"); } } } int counter = 1; String temp_log = log_query + "VALUES("; //string to be logged for (String string : nextLine) { //if current index is in the list of columns to be mapped, we apply that mapping for (Object o : mapColumnsIndices) { int i = (int) o; if (index == (i + 1)) { //apply mapping to this column string = applyDataMapping(string); } } if (tableName.equals("person")) { //get person_id and uuid if (index == (person_id_column_index + 1)) { person_id = Integer.parseInt(string); } if (index == (uuid_column_index + 1)) { uuid = string; } } //check if string is a date if (string.matches("\\d{2}-[a-zA-Z]{3}-\\d{4} \\d{2}:\\d{2}:\\d{2}") || string.matches("\\d{2}-[a-zA-Z]{3}-\\d{4}")) { java.sql.Date dt = formatDate(string); temp_log = temp_log + "'" + dt.toString() + "'"; ps.setDate(index++, dt); } else { if ("".equals(string)) { temp_log = temp_log + "''"; ps.setNull(index++, Types.NULL); } else { temp_log = temp_log + "'" + string + "'"; ps.setString(index++, string); } } if (counter < headerRow.length) { temp_log = temp_log + ","; } else { temp_log = temp_log + ");"; System.out.println(temp_log); } counter++; } if (tableName.equals("person")) { if (!"".equals(uuid) && person_id != -1) { ps2.setInt(1, person_id); ps2.setInt(2, identifier_type_id); ps2.setString(3, uuid); ps2.addBatch(); } } ps.addBatch(); } if (++count % batchSize == 0) { ps.executeBatch(); if (tableName.equals("person")) { ps2.executeBatch(); } } } ps.executeBatch(); // insert remaining records if (tableName.equals("person")) { ps2.executeBatch(); } con.commit(); } catch (Exception e) { if (con != null) con.rollback(); if (db != null) db.dispose(); String stacktrace = org.apache.commons.lang3.exception.ExceptionUtils.getStackTrace(e); JOptionPane.showMessageDialog(null, "Error occured while executing file. Error Details: " + stacktrace, "File Error", JOptionPane.ERROR_MESSAGE); throw new Exception("Error occured while executing file. " + stacktrace); } finally { if (null != reader) reader.close(); if (null != ps) ps.close(); if (null != ps2) ps2.close(); if (null != con) con.close(); csvReader.close(); } }
From source file:com.pari.nm.utils.db.InventoryDBHelper.java
public static void insertSnmpScalarsInBatch(int deviceId, Map<String, ISnmpScalar> snmpScalars, String type) { String insQuery = DBHelperConstants.INSERT_SNMP_MIB_QUERY; Connection con = null; PreparedStatement ps = null;//from ww w. ja va2 s . com try { con = DBHelper.getConnection(); ps = con.prepareStatement(insQuery); con.setAutoCommit(false); for (String oid : snmpScalars.keySet()) { ISnmpScalar value = snmpScalars.get(oid); String xmlValue = value.toXml(); ps.setInt(1, deviceId); ps.setString(2, oid); // There's no table oid for scalar values ps.setString(3, null); ps.setString(6, null); // TODO: Do we need to compress data before storing? if (ps instanceof OraclePreparedStatement) { ((OraclePreparedStatement) ps).setStringForClob(4, xmlValue); } else { logger.debug("PS is not OraclePreparedStatement, inserting as regular string"); ps.setString(4, xmlValue); } ps.setString(5, type); ps.addBatch(); } ps.executeBatch(); con.commit(); } catch (SQLException sqlex) { logger.error("Error while inserting rows to database", sqlex); try { if (con != null) { con.rollback(); } } catch (SQLException ex) { logger.error("Error while calling rollback on db conn", ex); } } catch (Exception ex) { logger.error("Error while inserting snmp data in batch", ex); } finally { try { if (con != null) { con.setAutoCommit(true); } } catch (SQLException sqlex) { logger.error("Error while calling setAutoCommit", sqlex); } try { ps.close(); } catch (SQLException sqlex) { logger.error("Error while closing ps", sqlex); } DBHelper.releaseConnection(con); } }
From source file:com.stratelia.webactiv.beans.admin.Admin.java
/** * add a space instance in database/*w w w . ja v a 2s . c o m*/ * * @param userId Id of user who add the space * @param spaceInst SpaceInst object containing information about the space to be created * @return the created space id */ public String addSpaceInst(String userId, SpaceInst spaceInst) throws AdminException { Connection connectionProd = null; DomainDriverManager domainDriverManager = DomainDriverManagerFactory.getCurrentDomainDriverManager(); domainDriverManager.startTransaction(false); try { SilverTrace.info(MODULE_ADMIN, "admin.addSpaceInst", PARAM_MSG_KEY, "Space Name : " + spaceInst.getName() + " NbCompo: " + spaceInst.getNumComponentInst()); connectionProd = openConnection(false); // Open the connections with auto-commit to false if (!spaceInst.isRoot()) { // It's a subspace // Convert the client id in driver id spaceInst.setDomainFatherId(getDriverSpaceId(spaceInst.getDomainFatherId())); if (useProfileInheritance && !spaceInst.isInheritanceBlocked()) { // inherits profiles from super space // set super space profiles to new space setSpaceProfilesToSubSpace(spaceInst, null); } } // Create the space instance spaceInst.setCreatorUserId(userId); String sSpaceInstId = spaceManager.createSpaceInst(spaceInst, domainDriverManager); spaceInst.setId(getClientSpaceId(sSpaceInstId)); // put new space in cache cache.opAddSpace(getSpaceInstById(sSpaceInstId, true)); // Instantiate the components ArrayList<ComponentInst> alCompoInst = spaceInst.getAllComponentsInst(); for (ComponentInst componentInst : alCompoInst) { componentInst.setDomainFatherId(spaceInst.getId()); addComponentInst(userId, componentInst, false); } // commit the transactions domainDriverManager.commit(); connectionProd.commit(); SpaceInstLight space = getSpaceInstLight(sSpaceInstId); addSpaceInTreeCache(space, true); // indexation de l'espace SilverTrace.info(MODULE_ADMIN, "admin.addSpaceInst", PARAM_MSG_KEY, "Indexation : spaceInst = " + spaceInst.getName()); createSpaceIndex(space); return spaceInst.getId(); } catch (Exception e) { try { // Roll back the transactions domainDriverManager.rollback(); connectionProd.rollback(); cache.resetCache(); } catch (Exception e1) { SilverTrace.error(MODULE_ADMIN, "Admin.addSpaceInst", "root.EX_ERR_ROLLBACK", e1); } throw new AdminException("Admin.addSpaceInst", SilverpeasException.ERROR, "admin.EX_ERR_ADD_SPACE", "space name : '" + spaceInst.getName() + "'", e); } finally { // close connection domainDriverManager.releaseOrganizationSchema(); DBUtil.close(connectionProd); } }
From source file:com.pari.nm.utils.db.InventoryDBHelper.java
public static void insertCustomerWingSettings(int customerId, String instanceName, String wingSettings) throws PariException { int MAX_WING_SETTINGS_SIZE = 4000; Customer customer = null;// w w w . j ava2s . c o m if (ServerProperties.getInstance().getProductProfile().getProductShortName() .equals(Constants.PARITRA_PRODUCT)) { customer = CustomerManager.getInstance().getCustomerById(customerId); if (customer == null) { throw (new PariException(-1, "Unable to save Customer Wing Settings. \nSelected Customer may have been deleted.")); } } PreparedStatement ps = null; Connection c = null; try { c = DBHelper.getConnection(); c.setAutoCommit(false); String query = null; if (instanceName == null) { query = "delete from customer_wing_settings where customer_id=" + customerId; } else { query = "delete from customer_wing_settings where customer_id=" + customerId + " and instance_name='" + instanceName + "'"; } DBHelper.executeUpdateNoCommit(c, query); int size = wingSettings.length(); int noOfDBRows = size / MAX_WING_SETTINGS_SIZE; if ((noOfDBRows * MAX_WING_SETTINGS_SIZE) < size) { noOfDBRows += 1; } ps = c.prepareStatement(DBHelperConstants.INSERT_CUSTOMER_WING_SETTINGS); for (int i = 0; i < noOfDBRows; i++) { int startIndex = i * MAX_WING_SETTINGS_SIZE; int endIndex = (i + 1) * MAX_WING_SETTINGS_SIZE; if (endIndex > size) { endIndex = size; } String conf = wingSettings.substring(startIndex, endIndex); ps.setInt(1, customerId); ps.setString(2, instanceName); ps.setInt(3, i); ps.setString(4, conf); ps.executeUpdate(); } c.commit(); } catch (Exception ex) { ex.printStackTrace(); try { c.rollback(); } catch (Exception sqlEx) { } logger.warn("Error while trying to save wing settings for customer", ex); throw (new PariException(-1, "Error while saving Customer Wing Settings." + (customer == null ? "" : " for the customer " + customer.getCustomerName()))); } finally { if (ps != null) { try { ps.close(); } catch (Exception psex) { } } try { if (c != null) { c.setAutoCommit(true); } } catch (Exception ee) { } DBHelper.releaseConnection(c); } updateWingSettingsDetailsModifiedTime(customerId, instanceName); }
From source file:com.pari.nm.utils.db.InventoryDBHelper.java
public static void insertSnmpColsInBatch(int deviceId, Map<String, ISnmpColumn> snmpCols, String type) { String insQuery = DBHelperConstants.INSERT_SNMP_MIB_QUERY; Connection con = null; PreparedStatement ps = null;/*from w ww. j a v a 2 s . c o m*/ try { con = DBHelper.getConnection(); ps = con.prepareStatement(insQuery); con.setAutoCommit(false); for (String colOid : snmpCols.keySet()) { // e.g. if colOid is .1.3.6.1.2.1.47.1.1.1.1.5.2, then entryOid // is .1.3.6.1.2.1.47.1.1.1.1.5 String entryOid = colOid.substring(0, colOid.lastIndexOf(".")); // and tableOid is .1.3.6.1.2.1.47.1.1.1.1 String tableOid = entryOid.substring(0, entryOid.lastIndexOf(".")); ISnmpColumn snmpCol = snmpCols.get(colOid); String colXml = snmpCol.toXml(); ps.setInt(1, deviceId); ps.setString(2, colOid); ps.setString(3, tableOid); ps.setString(6, snmpCol.getTitle()); // TODO: Do we need to compress data before storing? if (ps instanceof OraclePreparedStatement) { ((OraclePreparedStatement) ps).setStringForClob(4, colXml); } else { logger.debug("PS is not OraclePreparedStatement, inserting as regular string"); ps.setString(4, colXml); } ps.setString(5, type); ps.addBatch(); } ps.executeBatch(); con.commit(); } catch (SQLException sqlex) { logger.error("Error while inserting rows to database", sqlex); try { if (con != null) { con.rollback(); } } catch (SQLException ex) { logger.error("Error while calling rollback on db conn", ex); } } catch (Exception ex) { logger.error("Error while inserting snmp data in batch", ex); } finally { try { if (con != null) { con.setAutoCommit(true); } } catch (SQLException sqlex) { logger.error("Error while calling setAutoCommit", sqlex); } try { ps.close(); } catch (SQLException sqlex) { logger.error("Error while closing ps", sqlex); } DBHelper.releaseConnection(con); } }
From source file:dao.DirectoryDaoDb.java
/** * paste the directory into the parent/current directory * @param parentId - the parent id where a directory is being moved * @param userLogin - the user login//from w w w.j a va2 s .c om * @param userId - the user id whose directory is moved or who is the author of this directory * @throws BaseDaoException - when error occurs */ public void pasteDirectory(String parentId, String userLogin, String userId) throws BaseDaoException { if (RegexStrUtil.isNull(parentId) || RegexStrUtil.isNull(userLogin) || RegexStrUtil.isNull(userId)) { throw new BaseDaoException("params are null in pasteDirectory"); } /** * does not have permission to paste the directory */ if (!diaryAdmin.isDiaryAdmin(userLogin) && !isAuthor(parentId, userId)) { return; } /** * Get scalability datasource for dircopy - not partitioned */ String sourceName = scalabilityManager.getReadZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null " + sourceName); } /** * only one directory per user is allowed to be moved, * we don't want to store more than one moves in DB * get the directoryId that needs to be pasted. */ String directoryId = null; String dirName = null; String srcDirPath = null; Object[] myparams = { (Object) userId }; try { Directory obj = isMoveExists(myparams); if (obj != null) { directoryId = ((Directory) obj).getValue(DbConstants.DIRECTORY_ID); } else { throw new BaseDaoException("dirmove for userId is null, userId " + userId); } } catch (BaseDaoException e) { throw new BaseDaoException("isMovExists() error=" + e.getMessage(), e); } if (directoryId == null) { throw new BaseDaoException("directoryId that is being pasted is null"); } /** * get the directory path of the directory that is being moved */ if (!RegexStrUtil.isNull(directoryId)) { Fqn fqn = cacheUtil.fqn(DbConstants.DIRECTORY); Object obj = treeCache.get(fqn, directoryId); if (obj != null) { removeDirPathsFromCache(((Directory) obj).getValue(DbConstants.DIRPATH)); dirName = ((Directory) obj).getValue(DbConstants.DIRNAME); srcDirPath = ((Directory) obj).getValue(DbConstants.DIRPATH); } else { Directory dir = getDirectory(directoryId, userId, DbConstants.READ_FROM_SLAVE); if (dir != null) { dirName = dir.getValue(DbConstants.DIRNAME); srcDirPath = dir.getValue(DbConstants.DIRPATH); } } } /** * Jboss methods * fqn - full qualified name * check if the parent dirpath already set in the cache * or get it from DB. * If it exists, return the parent dirpath from the cache. * This parent is the new parent for the above directoryId */ StringBuffer newDirPath = null; Fqn fqn = cacheUtil.fqn(DbConstants.DIRECTORY); Object obj = treeCache.get(fqn, parentId); if (obj != null) { newDirPath = new StringBuffer(((Directory) obj).getValue(DbConstants.DIRPATH)); newDirPath.append(DbConstants.DIRPATH_COLON); newDirPath.append(((Directory) obj).getValue(DbConstants.DIRNAME)); } else { Directory parentInfo = getParentInfo(parentId); if (parentInfo != null) { newDirPath = new StringBuffer(parentInfo.getValue(DbConstants.DIRPATH)); newDirPath.append(DbConstants.DIRPATH_COLON); newDirPath.append(parentInfo.getValue(DbConstants.DIRNAME)); } } String newPath = null; if (newDirPath != null) { newDirPath.append(DbConstants.DIRPATH_PIPE); newDirPath.append(parentId); newPath = newDirPath.toString(); } /** * When a directory is moved, make sure you move all * the subdirectories that belong to subdirectory. */ Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); dirMoveDeleteQuery.run(conn, directoryId, userId); /** * Allowing moves + paste(s) when the directory has children * get from dirtree all the children of the directoryid * update the children (directories) dirpath */ if (WebUtil.isDirTreePasteEnabled()) { if (WebUtil.isSanEnabled()) { try { getSanUtils(); sanUtils.moveSanDir(srcDirPath, dirName, SanConstants.sanPath, newPath, dirName); } catch (SanException e) { throw new BaseDaoException( "moveSanDir() error, " + directoryId + " error message " + e.getMessage(), e); } } // complete isSanEnabled /** * If this directory is moving up in the parent tree * then the path replacement requires change * E dirpath = A|1::B|2::C|3::D|4 (srcDirPath) * Directory E moved upto level B: * E dirpath now will be: A|1::B|2 (newDirPath) * E's children dirpath: A|1::B|2::E|5 */ dirPasteQuery.run(conn, directoryId, newPath); /** * for the mainDirectory that is being moved, set it to true * for children directories, update the path */ updatePath(directoryId, newPath, srcDirPath, conn, true); } else { /** * * if there are no children, do not recurse */ dirPasteQuery.run(conn, directoryId, newPath); } /** * only the top level directory gets affected with the change * in the dirtree parentid, the rest below the directory i.e * subdirs parents remain the same. So the subdirs are not * changed. */ dirChildUpdateQuery.run(conn, directoryId, parentId); } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException( "conn.close() exception for rollback(), for pasteDirectory() directoryId =" + directoryId + " parentId = " + parentId, e2); } throw new BaseDaoException("rollback() exception, for pasteDirectory() directoryId =" + directoryId + " parentId = " + parentId, e1); } throw new BaseDaoException( "autocommit() exception, directoryId =" + directoryId + " parentId = " + parentId, e); } // connection commit try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit() exception, for pasteDirectory() directoryId= " + directoryId + "parentId = " + parentId, e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException("conn.close() exception for commit(), pasteDirectory(), directoryId=" + directoryId + " parentId =" + parentId, e4); } fqn = cacheUtil.fqn(DbConstants.DIRECTORY); if (treeCache.exists(fqn, directoryId)) { treeCache.remove(fqn, directoryId); } if (treeCache.exists(fqn, parentId)) { treeCache.remove(fqn, parentId); } Fqn moveFqn = cacheUtil.fqn(DbConstants.DIR_MOVE); if (treeCache.exists(moveFqn, userId)) { treeCache.remove(moveFqn, userId); } }
From source file:dao.DirectoryDaoDb.java
/** * saveGlobalQuotas - updates/saves global quotas * @param login - admin's login//from w w w . ja v a 2 s. co m * @param entryList - list of entryid that need to be update with quotas * @param entryListVals - list of vals that need to be update with quotas * @param qNameList - list of names for whose quota has to be updated * (these are new entries) * @param qNameListVals - list of vals for whose quota has to be updated * @param qtype - quota type (section/area/division/organization/group) * @return none * @throws BaseDaoException for errors */ public void saveGlobalQuotas(String login, List entryList, List entryListVals, List qNameList, List qNameListVals, List qTypeList) { if (RegexStrUtil.isNull(login)) { throw new BaseDaoException("params are null"); } if (!isAdmin(login)) { throw new BaseDaoException("Not an admin, cannot set quota size for the users, " + login); } String sourceName = scalabilityManager.getWriteZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { throw new BaseDaoException("ds null, saveGlobalQuotas() " + sourceName + " login = " + login); } /** * Get scalability datasource - not partitioned * accessFlag - the access flag */ if (entryList != null && entryList.size() > 0 && entryListVals != null && entryListVals.size() > 0) { String queryName = scalabilityManager.getWriteZeroScalability("updateglobalquotaquery"); updateGlobalQuotaQuery = getQueryMapper().getCommonQuery(queryName); if (updateGlobalQuotaQuery == null) { throw new BaseDaoException("updateGlobalQuotaQuery is null, login= " + login); } } if ((qNameList != null && qNameList.size() > 0) && (qNameListVals != null && qNameListVals.size() > 0) && (qTypeList != null && qTypeList.size() > 0)) { String queryName = scalabilityManager.getWriteZeroScalability("addglobalquotaquery"); addGlobalQuotaQuery = getQueryMapper().getCommonQuery(queryName); if (addGlobalQuotaQuery == null) { throw new BaseDaoException("addGlobalQuotaQuery is null, login= " + login); } } /** * update global quotas for existing ones */ Connection conn = null; try { conn = ds.getConnection(); conn.setAutoCommit(false); if (updateGlobalQuotaQuery != null) { for (int i = 0; i < entryList.size(); i++) { if (RegexStrUtil.isNull((String) entryList.get(i))) { continue; } else { if (RegexStrUtil.isNull((String) entryListVals.get(i))) { entryListVals.set(i, (Object) "0"); } String params[] = { (String) entryList.get(i), (String) entryListVals.get(i) }; updateGlobalQuotaQuery.run(conn, params); } } } /** * add global quotas for new ones */ if (addGlobalQuotaQuery != null) { if (qTypeList.size() != qNameList.size()) { throw new BaseDaoException("qType missing for some quota name categories, qNameList.size() = " + qNameList.size() + " qTypeList.size() = " + qTypeList.size()); } if (qNameList.size() != qNameListVals.size()) { throw new BaseDaoException( "quotaValue missing for some quota name categories qNameList.size()= " + qNameList.size() + " qNameListVals.size() = " + qNameListVals.size()); } for (int i = 0; i < qNameList.size(); i++) { if (RegexStrUtil.isNull((String) qNameList.get(i))) { continue; } else { if (RegexStrUtil.isNull((String) qTypeList.get(i))) { throw new BaseDaoException("quota type is missing for quotName =" + qNameList.get(i)); } if (RegexStrUtil.isNull((String) qNameListVals.get(i))) { qNameListVals.set(i, (Object) "0"); } String params[] = { (String) qTypeList.get(i), (String) qNameList.get(i), (String) qNameListVals.get(i) }; addGlobalQuotaQuery.run(conn, params); } } } } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { throw new BaseDaoException( "conn.close() exception for rollback(), for updateGlobalQuotaQuery()/addGlobalQuotaQuery login = " + login, e2); } throw new BaseDaoException( "rollback() exception, for UpdateGlobalQuotaQuery()/addGlobalQuotaQuery login =" + login, e1); } throw new BaseDaoException( "error in executing UpdateGlobalQuotaQuery/AddGlobalQuotaQuery, login=" + login, e); } // connection commit try { conn.commit(); } catch (Exception e3) { throw new BaseDaoException("commit() exception, for AddGlobalQuotaQuery/UpdateGlobalQuotaQuery", e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { throw new BaseDaoException( "conn.close() exception for setAutoCommit() addGlobalQuotaQuery/UpdateGlobalQuotaQuery", e4); } }