List of usage examples for java.util HashSet iterator
public Iterator<E> iterator()
From source file:org.apache.hadoop.hive.ql.parse.TaskCompiler.java
@SuppressWarnings({ "nls", "unchecked" }) public void compile(final ParseContext pCtx, final List<Task<? extends Serializable>> rootTasks, final HashSet<ReadEntity> inputs, final HashSet<WriteEntity> outputs) throws SemanticException { Context ctx = pCtx.getContext(); GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx(); List<Task<MoveWork>> mvTask = new ArrayList<Task<MoveWork>>(); List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork(); List<LoadFileDesc> loadFileWork = pCtx.getLoadFileWork(); boolean isCStats = pCtx.getQueryProperties().isAnalyzeRewrite(); int outerQueryLimit = pCtx.getQueryProperties().getOuterQueryLimit(); if (pCtx.getFetchTask() != null) { return;//from www . j a v a2 s . c o m } optimizeOperatorPlan(pCtx, inputs, outputs); /* * In case of a select, use a fetch task instead of a move task. * If the select is from analyze table column rewrite, don't create a fetch task. Instead create * a column stats task later. */ if (pCtx.getQueryProperties().isQuery() && !isCStats) { if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) { throw new SemanticException(ErrorMsg.GENERIC_ERROR.getMsg()); } LoadFileDesc loadFileDesc = loadFileWork.get(0); String cols = loadFileDesc.getColumns(); String colTypes = loadFileDesc.getColumnTypes(); TableDesc resultTab = pCtx.getFetchTableDesc(); if (resultTab == null) { String resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT); resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat); } FetchWork fetch = new FetchWork(loadFileDesc.getSourcePath(), resultTab, outerQueryLimit); fetch.setSource(pCtx.getFetchSource()); fetch.setSink(pCtx.getFetchSink()); pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch, conf)); // For the FetchTask, the limit optimization requires we fetch all the rows // in memory and count how many rows we get. It's not practical if the // limit factor is too big int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH); if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) { LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimiztion."); globalLimitCtx.disableOpt(); } if (outerQueryLimit == 0) { // Believe it or not, some tools do generate queries with limit 0 and than expect // query to run quickly. Lets meet their requirement. LOG.info("Limit 0. No query execution needed."); return; } } else if (!isCStats) { for (LoadTableDesc ltd : loadTableWork) { Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); // Check to see if we are stale'ing any indexes and auto-update them if we want if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) { IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf); try { List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater.generateUpdateTasks(); for (Task<? extends Serializable> updateTask : indexUpdateTasks) { tsk.addDependentTask(updateTask); } } catch (HiveException e) { console.printInfo("WARNING: could not auto-update stale indexes, which are not in sync"); } } } boolean oneLoadFile = true; for (LoadFileDesc lfd : loadFileWork) { if (pCtx.getQueryProperties().isCTAS()) { assert (oneLoadFile); // should not have more than 1 load file for // CTAS // make the movetask's destination directory the table's destination. Path location; String loc = pCtx.getCreateTable().getLocation(); if (loc == null) { // get the table's default location Path targetPath; try { String[] names = Utilities.getDbTableName(pCtx.getCreateTable().getTableName()); if (!db.databaseExists(names[0])) { throw new SemanticException("ERROR: The database " + names[0] + " does not exist."); } Warehouse wh = new Warehouse(conf); targetPath = wh.getTablePath(db.getDatabase(names[0]), names[1]); } catch (HiveException e) { throw new SemanticException(e); } catch (MetaException e) { throw new SemanticException(e); } location = targetPath; } else { location = new Path(loc); } lfd.setTargetDir(location); oneLoadFile = false; } mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false), conf)); } } generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs); /* * If the query was the result of analyze table column compute statistics rewrite, create * a column stats task instead of a fetch task to persist stats to the metastore. */ if (isCStats) { genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadTableWork, loadFileWork, rootTasks, outerQueryLimit); } // For each task, set the key descriptor for the reducer for (Task<? extends Serializable> rootTask : rootTasks) { GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask); } // If a task contains an operator which instructs bucketizedhiveinputformat // to be used, please do so for (Task<? extends Serializable> rootTask : rootTasks) { setInputFormat(rootTask); } optimizeTaskPlan(rootTasks, pCtx, ctx); decideExecMode(rootTasks, ctx, globalLimitCtx); if (pCtx.getQueryProperties().isCTAS()) { // generate a DDL task and make it a dependent task of the leaf CreateTableDesc crtTblDesc = pCtx.getCreateTable(); crtTblDesc.validate(conf); // clear the mapredWork output file from outputs for CTAS // DDLWork at the tail of the chain will have the output Iterator<WriteEntity> outIter = outputs.iterator(); while (outIter.hasNext()) { switch (outIter.next().getType()) { case DFS_DIR: case LOCAL_DIR: outIter.remove(); break; default: break; } } Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc), conf); // find all leaf tasks and make the DDLTask as a dependent task of all of // them HashSet<Task<? extends Serializable>> leaves = new LinkedHashSet<Task<? extends Serializable>>(); getLeafTasks(rootTasks, leaves); assert (leaves.size() > 0); for (Task<? extends Serializable> task : leaves) { if (task instanceof StatsTask) { // StatsTask require table to already exist for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()) { parentOfStatsTask.addDependentTask(crtTblTask); } for (Task<? extends Serializable> parentOfCrtTblTask : crtTblTask.getParentTasks()) { parentOfCrtTblTask.removeDependentTask(task); } crtTblTask.addDependentTask(task); } else { task.addDependentTask(crtTblTask); } } } if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) { LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit()); pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit()); } if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) { LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit()); globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit()); List<ExecDriver> mrTasks = Utilities.getMRTasks(rootTasks); for (ExecDriver tsk : mrTasks) { tsk.setRetryCmdWhenFail(true); } List<SparkTask> sparkTasks = Utilities.getSparkTasks(rootTasks); for (SparkTask sparkTask : sparkTasks) { sparkTask.setRetryCmdWhenFail(true); } } Interner<TableDesc> interner = Interners.newStrongInterner(); for (Task<? extends Serializable> rootTask : rootTasks) { GenMapRedUtils.internTableDesc(rootTask, interner); } }
From source file:com.krawler.formbuilder.servlet.ReportBuilderDaoImpl.java
public String saveReportGridConfig(String jsonstr, String reportid, boolean createTable, String tbar, String bbar) throws ServiceException { String result = "{\"success\":true}"; String tableName = ""; // String jsonstr = request.getParameter("jsondata"); try {/*from w w w . ja va 2s . c o m*/ JSONObject jobj = new JSONObject(); // String reportid = request.getParameter("reportid"); // boolean createTable = Boolean.parseBoolean(request.getParameter("createtable")); mb_reportlist report = (mb_reportlist) get(mb_reportlist.class, reportid); if (createTable) { tableName = "rb_" + toLZ(report.getReportkey(), 3) + "_" + report.getReportname().replace(" ", "").toLowerCase(); } else { tableName = report.getTablename(); } HashSet<String> hashSet = new HashSet<String>(); HashSet<String> finalHashSet = new HashSet<String>(); String hql = "delete from com.krawler.esp.hibernate.impl.mb_gridconfig as mb_gridconfig where mb_gridconfig.reportid = ? "; int numDelRec = executeUpdate(hql, new Object[] { report }); JSONArray jsonArray = new JSONArray(jsonstr); int confCnt = 0; for (int k = 0; k < jsonArray.length(); k++) { jobj = jsonArray.getJSONObject(k); if (!jobj.getString("name").equals("id")) { com.krawler.esp.hibernate.impl.mb_gridconfig gridConf = new com.krawler.esp.hibernate.impl.mb_gridconfig(); // java.text.SimpleDateFormat sdf = new java.text.SimpleDateFormat("yyyy-MM-d HH:mm:ss"); // java.sql.Timestamp timestamp1 = Timestamp.valueOf(sdf.format(new java.util.Date())); if (jobj.getString("name").indexOf(".") > -1) { String[] tablecolumn = jobj.getString("name").split("\\."); gridConf.setName( tablecolumn[0] + PropsValues.REPORT_HARDCODE_STR + tablecolumn[1].toLowerCase()); } else { if (jobj.getString("name").indexOf(PropsValues.REPORT_HARDCODE_STR) == -1) { // String Columnname = moduleBuilderMethods.getColumnName(moduleBuilderMethods.getcolumnNameStr(jobj.getString("name").toLowerCase())); String Columnname = jobj.getString("name").toLowerCase(); gridConf.setName(tableName + PropsValues.REPORT_HARDCODE_STR + Columnname); } } if (StringUtil.isNullOrEmpty(jobj.getString("displayfield"))) gridConf.setDisplayfield(jobj.getString("name")); else gridConf.setDisplayfield(jobj.getString("displayfield")); if (!StringUtil.isNullOrEmpty(jobj.getString("reftable"))) { gridConf.setReftable(jobj.getString("reftable")); } else if (StringUtil.isNullOrEmpty(jobj.getString("reftable")) && !jobj.getString("combogridconfig").equals("-1")) { gridConf.setReftable(""); } else { if (createTable) gridConf.setReftable(tableName); } gridConf.setXtype(jobj.getString("xtype")); renderer render = null; if (jobj.getString("renderer").length() > 0) { render = (renderer) get(renderer.class, jobj.getString("renderer")); } else { render = (renderer) get(renderer.class, "0"); } // gridConf.setRenderer(render); // gridConf.setFilter(jobj.getString("filter")); gridConf.setSummaryType(jobj.getString("summaryType")); gridConf.setDefaultValue(jobj.getString("defaultValue")); gridConf.setHidden(Boolean.parseBoolean(jobj.getString("hidden"))); gridConf.setCountflag(Boolean.parseBoolean(jobj.getString("countflag"))); String combogridconfig = "-1"; String refTable = jobj.getString("reftable"); String xtype = jobj.getString("xtype"); if (xtype.equals("Combobox") && !StringUtil.isNullOrEmpty(refTable) && !refTable.equals(tableName)) { String SELECT_QUERY = "Select mb_reportlist.reportid from com.krawler.esp.hibernate.impl.mb_reportlist as mb_reportlist " + " where mb_reportlist.tablename = ?"; List list = find(SELECT_QUERY, new Object[] { refTable }); Iterator ite = list.iterator(); String reportid1 = null; if (ite.hasNext()) { reportid1 = (String) ite.next(); } if (reportid1 != null) { String name = null; if (jobj.getString("name").indexOf(".") > -1) { String[] tablecolumn = jobj.getString("name").split("\\."); name = tablecolumn[0] + PropsValues.REPORT_HARDCODE_STR + tablecolumn[1].toLowerCase(); } else { if (jobj.getString("name").indexOf(PropsValues.REPORT_HARDCODE_STR) == -1) name = tableName + PropsValues.REPORT_HARDCODE_STR + jobj.getString("name").toLowerCase(); } mb_reportlist report1 = (mb_reportlist) get(mb_reportlist.class, reportid1); SELECT_QUERY = "select mb_gridconfig.combogridconfig from com.krawler.esp.hibernate.impl.mb_gridconfig as mb_gridconfig " + "where mb_gridconfig.reportid = ? and mb_gridconfig.name = ?"; list = find(SELECT_QUERY, new Object[] { report1, name }); ite = list.iterator(); if (ite.hasNext()) { combogridconfig = (String) ite.next(); } } } else if (!jobj.getString("combogridconfig").equals("-1")) { combogridconfig = jobj.getString("combogridconfig"); } gridConf.setCombogridconfig(combogridconfig); gridConf.setColumnindex(k); gridConf.setReportid(report); save(gridConf); String strid = gridConf.getId(); confCnt++; if (!StringUtil.isNullOrEmpty(jobj.getString("reftable")) && !jobj.getString("reftable").equals(tableName)) { String fkKeyName = jobj.getString("reftable") + "." + (getPrimaryColName(jobj.getString("reftable"))); if (fkKeyName.equals(jobj.getString("name"))) { hashSet.add(fkKeyName); finalHashSet.remove(fkKeyName); } else if (!hashSet.contains(fkKeyName)) { finalHashSet.add(fkKeyName); } } } } if (finalHashSet.size() > 0) { Iterator itr = finalHashSet.iterator(); while (itr.hasNext()) { //Insert id fields of reference tables com.krawler.esp.hibernate.impl.mb_gridconfig gridConf = new com.krawler.esp.hibernate.impl.mb_gridconfig(); String tablecolumn = itr.next().toString(); tablecolumn = tablecolumn.replace(".", PropsValues.REPORT_HARDCODE_STR); gridConf.setName(tablecolumn); gridConf.setDisplayfield(tablecolumn); gridConf.setReftable(tablecolumn.split(PropsValues.REPORT_HARDCODE_STR)[0]); gridConf.setXtype("None"); gridConf.setHidden(true); renderer render = (renderer) get(renderer.class, "0"); gridConf.setRenderer(render); gridConf.setColumnindex(confCnt++); gridConf.setReportid(report); gridConf.setCombogridconfig("-1"); //gridConf.setFilter(""); gridConf.setCountflag(false); save(gridConf); } // String actionType = "Add Report Grid Config"; // String details = "Grid Config added for Report "+report.getReportname(); // long actionId = AuditTrialHandler.getActionId(session, actionType); // AuditTrialHandler.insertAuditLog(session, actionId, details, request); } if (createTable) { int cnt = 0; //Insert id field of new table com.krawler.esp.hibernate.impl.mb_gridconfig gridConf = new com.krawler.esp.hibernate.impl.mb_gridconfig(); gridConf.setName(tableName + PropsValues.REPORT_HARDCODE_STR + "id"); gridConf.setDisplayfield("id"); gridConf.setReftable(tableName); gridConf.setXtype("None"); gridConf.setHidden(true); renderer render = (renderer) get(renderer.class, "0"); gridConf.setRenderer(render); gridConf.setColumnindex(confCnt++); gridConf.setReportid(report); gridConf.setCombogridconfig("-1"); // gridConf.setFilter(""); gridConf.setCountflag(false); save(gridConf); // save report table name report.setTablename(tableName); save(report); ArrayList<Hashtable<String, Object>> aList = new ArrayList<Hashtable<String, Object>>(); Object[] objArrField = new Object[] { "name", "type", "primaryid", "default" }; Object[] objArr = new Object[] { "id", "String", "true", "" }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); objArrField = new Object[] { "name", "type", "default" }; objArr = new Object[] { "createdby", "String", "" }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); objArr = new Object[] { "createddate", "Date", "" }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); objArr = new Object[] { "modifieddate", "Date", "" }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); objArr = new Object[] { "deleteflag", "double", "" }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); HashSet<String> hs = new HashSet<String>(); for (int k = 0; k < jsonArray.length(); k++) { JSONObject obj = jsonArray.getJSONObject(k); if (!StringUtil.isNullOrEmpty(obj.getString("reftable")) && !obj.getString("reftable").equals(tableName)) { if (!Boolean.parseBoolean(obj.getString("countflag"))) { if (hs.add(obj.getString("reftable"))) { Object[] objArrField1 = new Object[] { "name", "reftable", "type", "foreignid", "default" }; String fkKeyName = obj.getString("reftable") .concat(getPrimaryColName(obj.getString("reftable"))); objArr = new Object[] { fkKeyName, obj.getString("reftable"), "String", true, obj.getString("defaultValue") }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField1); } } } else { if (!obj.getString("name").equals("id")) { String type = ""; if (obj.getString("xtype").equals("Checkbox") || obj.getString("xtype").equals("Radio")) { type = "boolean"; } else if (obj.getString("xtype").equals("Date")) { type = "Date"; } else if (obj.getString("xtype").equals("Number(Integer)")) { type = "int"; } else if (obj.getString("xtype").equals("Number(Float)")) { type = "double"; } else if (obj.getString("xtype").equals("Combobox")) { type = "String"; } else { type = "String"; } objArr = new Object[] { obj.getString("name").toLowerCase(), type, obj.getString("defaultValue") }; moduleBuilderGenerateTable.makeEntryToArrayList(cnt, aList, objArr, objArrField); } } } hs.clear(); ServiceBuilder sb = new ServiceBuilder(); // sb.createServiceXMLFile(aList, tableName); sb.createJavaFile(tableName, true); // String actionType = "Add Report Grid Config Table"; // String details = "Grid Cofig Table added for Report "+report.getReportname(); // long actionId = AuditTrialHandler.getActionId(session, actionType); // AuditTrialHandler.insertAuditLog(session, actionId, details, request); } else { String className = "rb_" + toLZ(report.getReportkey(), 3) + "_" + report.getReportname().replace(" ", "").toLowerCase(); // save report table name //report.setTablename(className); //session.save(report); //Create only implementation java class for report for which no new table is created. ServiceBuilder sb = new ServiceBuilder(); sb.createImplJavaFile(className, true); } // if(numDelRec==0) { // if first time store then add permission entry for add/edit/delete action // mb_permgrmaster permgrmaster = new mb_permgrmaster(); // accessRight.addPermGrp(session,permgrmaster,report); // com.krawler.esp.hibernate.impl.mb_permmaster permmaster = null; // for(int i=2;i<9;i++) { // permmaster = new com.krawler.esp.hibernate.impl.mb_permmaster(); // mb_permactions permaction = (mb_permactions) session.load(mb_permactions.class,i); // permmaster.setPermaction(permaction); // permmaster.setPermname(permaction.getName()); // permmaster.setDescription(permaction.getName()); // permmaster.setPermgrid(permgrmaster); // permmaster.setPermid(accessRight.getMaxPermid(session, permgrmaster.getPermgrid())); // session.save(permmaster); // } // } storeToolbarConf(reportid, tbar, bbar); hql = "SELECT mb_gridconfig.columnindex,mb_gridconfig.hidden,mb_gridconfig.reftable,mb_gridconfig.renderer,mb_gridconfig.xtype,mb_gridconfig.displayfield,mb_gridconfig.name " + "FROM com.krawler.esp.hibernate.impl.mb_gridconfig AS mb_gridconfig " + "WHERE mb_gridconfig.reportid = ?"; List list = find(hql, new Object[] { report }); Iterator ite = list.iterator(); JSONObject r = new JSONObject(); while (ite.hasNext()) { Object[] row = (Object[]) ite.next(); JSONObject temp = new JSONObject(); temp.put("index", row[0]); temp.put("hidden", row[1]); temp.put("reftable", row[2]); temp.put("renderer", row[3]); temp.put("xtype", row[4]); temp.put("displayfield", row[5]); temp.put("name", row[6]); r.append("data", temp); } r.put("success", true); r.put("reportId", reportid); r.put("tablename", tableName); result = r.toString(); } catch (JSONException e) { logger.warn(e.getMessage(), e); result = "{\"success\":false}"; throw ServiceException.FAILURE("reportbuilder.saveReportGridConfig", e); } catch (Exception e) { logger.warn(e.getMessage(), e); result = "{\"success\":false}"; throw ServiceException.FAILURE("reportbuilder.saveReportGridConfig", e); } return result; }
From source file:dao.CollabrumDaoDb.java
private void deleteRBCollabrum(String directoryId, String collabrumId, String userId, String userLogin) throws BaseDaoException { if (RegexStrUtil.isNull(userId) || RegexStrUtil.isNull(collabrumId) || RegexStrUtil.isNull(directoryId) || RegexStrUtil.isNull(userLogin)) { throw new BaseDaoException("params are null"); }//from w w w. j a va2 s .c o m List tidList = getTidList(collabrumId); List blobEntryList = getBlobEntryList(collabrumId); Vector ridVector = new Vector(); for (int i = 0; i < tidList.size(); i++) { /* get list of rids from collmessages */ List ridList = getRidList((String) ((ColTopic) tidList.get(i)).getValue(DbConstants.TID)); ridVector.add(ridList); } /** * get the members list from collmembers, then access each record in this table * collblock (deleteAllColBlockQuery) partitioned on loginid * deleteColBlockQuery.run(conn, collabrumId); */ deleteBlockedMembers(collabrumId); /** * Get scalability datasource with no partitions for colladmin, collmembers, dircoll, collabrum */ String sourceName = scalabilityManager.getWriteZeroScalability(); ds = scalabilityManager.getSource(sourceName); if (ds == null) { StringBuffer sb = new StringBuffer("ds is null, deleteCollabrum() "); sb.append(sourceName); sb.append(" collabrumId = "); sb.append(collabrumId); throw new BaseDaoException(sb.toString()); } HashSet result = null; Connection conn = null; /** * non partitioned tables */ try { conn = ds.getConnection(); conn.setAutoCommit(false); result = listModeratorQuery.run(conn, collabrumId); /** * Not partitioned * collabrum, (deleteQuery) * colladmin (deleteAdminQuery) * dircoll (deleteDirColQuery) * collmembers (deleteColMembersQuery), * * collblobtags (deleteColBlobTagsQuery) * collblogtags (deleteColBlogTagsQuery) * collabrum_ind, (deleteCollabrumIndexQuery) * collblob_ind, (deleteColBlobIndexQuery) * collmessages_ind, (deleteColMessagesIndexQuery) * colltopics_ind, (deleteColTopicsIndexQuery) */ deleteQuery.run(conn, collabrumId); deleteAdminQuery.run(conn, collabrumId); deleteDircollQuery.run(conn, collabrumId); deleteAllMembersQuery.run(conn, collabrumId); /* new ones */ deleteColBlobTagsQuery.run(conn, collabrumId); deleteColBlogTagsQuery.run(conn, collabrumId); deleteCollabrumIndexQuery.run(conn, collabrumId); for (int i = 0; i < blobEntryList.size(); i++) { deleteColBlobIndexQuery.run(conn, (String) ((Photo) blobEntryList.get(i)).getValue(DbConstants.ENTRYID)); } for (int i = 0; i < tidList.size(); i++) { deleteColTopicsIndexQuery.run(conn, (String) ((ColTopic) tidList.get(i)).getValue(DbConstants.TID)); } for (int i = 0; i < ridVector.size(); i++) { List ridList = (List) ridVector.elementAt(i); for (int j = 0; i < ridList.size(); j++) { deleteColMessagesIndexQuery.run(conn, (String) ((ColMessage) ridList.get(j)).getValue(DbConstants.RID)); } } } catch (Exception e) { try { conn.rollback(); } catch (Exception e1) { try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e2) { StringBuffer sb = new StringBuffer( "conn.close exception for rollback(), for deleteCollabrum() "); sb.append("collabrumId = "); sb.append(collabrumId); sb.append(" userId = "); sb.append(userId); throw new BaseDaoException(sb.toString(), e2); } StringBuffer sb = new StringBuffer(" rollback() exception, for deleteCollabrum() "); sb.append("collabrumId = "); sb.append(collabrumId); sb.append(" userId = "); sb.append(userId); throw new BaseDaoException(sb.toString(), e1); } } // connection commit try { conn.commit(); } catch (Exception e3) { StringBuffer sb = new StringBuffer(" commit() exception, for deleteCollabrum() collabrumId = "); sb.append(collabrumId); sb.append(" userId = "); sb.append(userId); throw new BaseDaoException(sb.toString(), e3); } try { if (conn != null) { conn.setAutoCommit(true); conn.close(); } } catch (Exception e4) { StringBuffer sb = new StringBuffer( " conn.close() exception, for commit(), deleteCollabrum() collabrumId = "); sb.append(collabrumId); sb.append(" userId = "); sb.append(userId); throw new BaseDaoException(sb.toString(), e4); } deleteCollMessages(collabrumId, tidList); deleteCollTopics(collabrumId, tidList); /** * Jboss methods * fqn - full qualified name * check if the collabrum already exists in the cache * If it exists, remove the collabrum from the cache */ Fqn fqn = cacheUtil.fqn(DbConstants.COLLABRUM); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.ORGANIZERS); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.COLLABRUM_EDIT); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.DIRECTORY); if (treeCache.exists(fqn, directoryId)) { treeCache.remove(fqn, directoryId); } fqn = cacheUtil.fqn(DbConstants.COLTOPICS); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } fqn = cacheUtil.fqn(DbConstants.COLTRAFFIC); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } /** * delete collabrum messages */ for (int i = 0; i < tidList.size(); i++) { StringBuffer sb = new StringBuffer(collabrumId); sb.append("-"); sb.append(tidList.get(i)); String key = sb.toString(); fqn = cacheUtil.fqn(DbConstants.COLMSGS); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } fqn = cacheUtil.fqn(DbConstants.COLTOPIC); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } } fqn = cacheUtil.fqn(DbConstants.COLLABRUM_STREAM_BLOBS); if (treeCache.exists(fqn, collabrumId)) { treeCache.remove(fqn, collabrumId); } // deleting user pages for each admin as we want them to be updated if ((result != null) && (result.size() > 0)) { Iterator it = result.iterator(); StringBuffer sb = new StringBuffer(); while (it.hasNext()) { Collabrum collabrum = (Collabrum) it.next(); String adminUser = collabrum.getValue(DbConstants.LOGIN); if (!RegexStrUtil.isNull(adminUser)) { fqn = cacheUtil.fqn(DbConstants.USER_PAGE); if (treeCache.exists(fqn, adminUser)) { treeCache.remove(fqn, adminUser); } fqn = cacheUtil.fqn(DbConstants.MEM_AS_ORGANIZER_LIST); if (treeCache.exists(fqn, adminUser)) { treeCache.remove(fqn, adminUser); } fqn = cacheUtil.fqn(DbConstants.MEM_AS_MODERATOR_LIST); if (treeCache.exists(fqn, adminUser)) { treeCache.remove(fqn, adminUser); } String adminId = collabrum.getValue(DbConstants.LOGIN_ID); fqn = cacheUtil.fqn(DbConstants.BLOCKED_COLLABRUM_LIST); if (treeCache.exists(fqn, adminId)) { treeCache.remove(fqn, adminId); } // delete organizer key = collabrumid-memberid sb.delete(0, sb.length()); sb.append(collabrumId); sb.append("-"); sb.append(adminId); fqn = cacheUtil.fqn(DbConstants.ORGANIZER); if (treeCache.exists(fqn, sb.toString())) { treeCache.remove(fqn, sb.toString()); } } } } fqn = cacheUtil.fqn(DbConstants.COLLABRUM_LIST); if (treeCache.exists(fqn, directoryId)) { treeCache.remove(fqn, directoryId); } /** * Jboss methods - * fqn - full qualified name * check if the streamblob already set in the cache * If it exists, remove the bean from the cache. */ for (int i = 0; i < blobEntryList.size(); i++) { String entryId = (String) ((Photo) blobEntryList.get(i)).getValue(DbConstants.ENTRYID); fqn = cacheUtil.fqn(DbConstants.PHOTO); if (treeCache.exists(fqn, entryId)) { treeCache.remove(fqn, entryId); } StringBuffer buf = new StringBuffer(collabrumId); buf.append("-"); buf.append(entryId); String key = buf.toString(); fqn = cacheUtil.fqn(DbConstants.COL_STREAM_BLOB); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } fqn = cacheUtil.fqn(DbConstants.DEFAULT_PHOTO); if (treeCache.exists(fqn, key)) { treeCache.remove(fqn, key); } } fqn = cacheUtil.fqn(DbConstants.COLL_CAT); StringBuffer sb = new StringBuffer(collabrumId); sb.append("-"); sb.append(DbConstants.PHOTO_CATEGORY); if (treeCache.exists(fqn, sb.toString())) { treeCache.remove(fqn, sb.toString()); } sb.delete(0, sb.length()); sb.append(collabrumId); sb.append("-"); sb.append(DbConstants.FILE_CATEGORY); if (treeCache.exists(fqn, sb.toString())) { treeCache.remove(fqn, sb.toString()); } }
From source file:com.zimbra.cs.account.ldap.LdapProvisioning.java
private void removeDynamicGroupMembers(LdapDynamicGroup group, String[] members, boolean externalOnly) throws ServiceException { if (group.isMembershipDefinedByCustomURL()) { throw ServiceException.INVALID_REQUEST(String.format( "cannot remove members from dynamic group '%s' with custom memberURL", group.getName()), null); }//from w w w . j a va 2s . co m String groupId = group.getId(); List<Account> accts = new ArrayList<Account>(); List<String> externalAddrs = new ArrayList<String>(); HashSet<String> failed = new HashSet<String>(); // check for errors, and put valid accts to the queue for (String member : members) { String memberName = member.toLowerCase(); boolean isBadAddr = false; try { memberName = IDNUtil.toAsciiEmail(memberName); } catch (ServiceException e) { // if the addr is not a valid email address, maybe they want to // remove a bogus addr that somehow got in, just let it through. memberName = member; isBadAddr = true; } // always add all addrs to "externalAddrs". externalAddrs.add(memberName); if (!externalOnly) { Account acct = isBadAddr ? null : get(AccountBy.name, member); if (acct != null) { Set<String> memberOf = acct.getMultiAttrSet(Provisioning.A_zimbraMemberOf); if (memberOf.contains(groupId)) { accts.add(acct); } else { // else the addr is not in the group, throw exception failed.add(memberName); } } } } if (!failed.isEmpty()) { StringBuilder sb = new StringBuilder(); Iterator<String> iter = failed.iterator(); while (true) { sb.append(iter.next()); if (!iter.hasNext()) break; sb.append(","); } throw AccountServiceException.NO_SUCH_MEMBER(group.getName(), sb.toString()); } ZLdapContext zlc = null; try { zlc = LdapClient.getContext(LdapServerType.MASTER, LdapUsage.REMOVE_GROUP_MEMBER); /* * remove internal members */ for (Account acct : accts) { Map<String, Object> attrs = new HashMap<String, Object>(); attrs.put("-" + Provisioning.A_zimbraMemberOf, groupId); modifyLdapAttrs(acct, zlc, attrs); clearUpwardMembershipCache(acct); } /* * remove external members on the static unit */ LdapDynamicGroup.StaticUnit staticUnit = group.getStaticUnit(); Set<String> existingAddrs = staticUnit.getMembersSet(); List<String> addrsToRemove = Lists.newArrayList(); for (String addr : externalAddrs) { if (existingAddrs.contains(addr)) { addrsToRemove.add(addr); } } if (!addrsToRemove.isEmpty()) { Map<String, String[]> attrs = new HashMap<String, String[]>(); attrs.put("-" + LdapDynamicGroup.StaticUnit.MEMBER_ATTR, addrsToRemove.toArray(new String[addrsToRemove.size()])); modifyLdapAttrs(staticUnit, zlc, attrs); } } finally { LdapClient.closeContext(zlc); } PermissionCache.invalidateCache(); cleanGroupMembersCache(group); }
From source file:com.zimbra.cs.account.ldap.LdapProvisioning.java
private void removeDistributionListMembers(DistributionList dl, String[] members) throws ServiceException { Set<String> curMembers = dl.getMultiAttrSet(Provisioning.A_zimbraMailForwardingAddress); // bug 46219, need a case insentitive Set Set<String> existing = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER); existing.addAll(curMembers);//from w w w . j a v a2 s .c o m Set<String> mods = new HashSet<String>(); HashSet<String> failed = new HashSet<String>(); for (int i = 0; i < members.length; i++) { String memberName = members[i].toLowerCase(); memberName = IDNUtil.toAsciiEmail(memberName); if (memberName.length() == 0) { throw ServiceException.INVALID_REQUEST("invalid member email address: " + memberName, null); } // We do not do any further validation of the remove address for // syntax - removes should be liberal so any bad entries added by // some other means can be removed // // members[] can contain: // - the primary address of an account or another DL // - an alias of an account or another DL // - junk (allAddrs will be returned as null) AddrsOfEntry addrsOfEntry = getAllAddressesOfEntry(memberName); List<String> allAddrs = addrsOfEntry.getAll(); if (mods.contains(memberName)) { // already been added in mods (is the primary or alias of previous entries in members[]) } else if (existing.contains(memberName)) { if (!allAddrs.isEmpty()) { mods.addAll(allAddrs); } else { mods.add(memberName); // just get rid of it regardless what it is } } else { boolean inList = false; if (allAddrs.size() > 0) { // go through all addresses of the entry see if any is on the DL for (String addr : allAddrs) { if (!inList) { break; } if (existing.contains(addr)) { mods.addAll(allAddrs); inList = true; } } } if (!inList) { failed.add(memberName); } } // clear the DL cache on accounts/dl String primary = addrsOfEntry.getPrimary(); if (primary != null) { if (addrsOfEntry.isAccount()) { Account acct = getFromCache(AccountBy.name, primary); if (acct != null) clearUpwardMembershipCache(acct); } else { removeGroupFromCache(Key.DistributionListBy.name, primary); } } } if (!failed.isEmpty()) { StringBuilder sb = new StringBuilder(); Iterator<String> iter = failed.iterator(); while (true) { sb.append(iter.next()); if (!iter.hasNext()) break; sb.append(","); } throw AccountServiceException.NO_SUCH_MEMBER(dl.getName(), sb.toString()); } if (mods.isEmpty()) { throw ServiceException.INVALID_REQUEST("empty remove set", null); } PermissionCache.invalidateCache(); cleanGroupMembersCache(dl); Map<String, String[]> modmap = new HashMap<String, String[]>(); modmap.put("-" + Provisioning.A_zimbraMailForwardingAddress, mods.toArray(new String[0])); modifyAttrs(dl, modmap); }
From source file:StreamFlusher.java
public Object visit(ASTsymtab_report_statement node, Object data) { HashSet<String> hs = mainFrame.keySet(); Iterator<String> i = hs.iterator(); PseudoTerminalInternalFrame terminal = null; if (((InterpData) data).getInGUI() == true) { terminal = ((InterpData) data).getGUI().getTerminal(); }/*from w w w . j a v a 2 s .c o m*/ while (i.hasNext()) { String id = i.next(); if (terminal != null) { terminal.appendToHistory("// " + id); } else { System.out.println("// " + id); } } return data; }
From source file:StreamFlusher.java
public Object visit(ASTgsymtab_report_statement node, Object data) { HashSet<String> hs = mainFrame.getStaticMother().keySet(); Iterator<String> i = hs.iterator(); PseudoTerminalInternalFrame terminal = null; if (((InterpData) data).getInGUI() == true) { terminal = ((InterpData) data).getGUI().getTerminal(); }//from w w w. j av a2 s . c om while (i.hasNext()) { String id = i.next(); if (terminal != null) { terminal.appendToHistory("// " + id); } else { System.out.println("// " + id); } } return data; }
From source file:StreamFlusher.java
private void getSigmaStrings(Fst fst, StringBuilder sbhex, StringBuilder sb) { HashSet<Integer> sigma = fst.getSigma(); int cpv;/*from w w w. ja va 2s.com*/ for (Iterator<Integer> iter = sigma.iterator(); iter.hasNext();) { cpv = iter.next().intValue(); sbhex.append(Integer.toString(cpv, 16) + " "); sb.append(symmap.getsym(cpv)); if (iter.hasNext()) { sb.append(", "); } else { sb.append(" "); } } }
From source file:StreamFlusher.java
public Object visit(ASTnet_list_get_sigma_func_call node, Object data) { // $@^__getSigma($fst) // one daughter // regexp // return the sigma as an array of nets, one for each symbol in // the sigma (minus special chars starting "__" that should not be // considered when promoting OTHER) node.jjtGetChild(0).jjtAccept(this, data); Fst fst = (Fst) (stack.pop());// w w w .j a v a 2s. c om NetList resultList = new NetList(); String specialSymbolPrefix = "__"; HashSet<Integer> sigma = fst.getSigma(); String symbolName = ""; if (!sigma.isEmpty()) { for (Iterator<Integer> iter = sigma.iterator(); iter.hasNext();) { int cpv = iter.next().intValue(); symbolName = symmap.getsym(cpv); if (!(symbolName.startsWith(specialSymbolPrefix))) { resultList.add(lib.OneArcFst(cpv)); } } } stack.push(resultList); return data; }