List of usage examples for java.util HashSet contains
public boolean contains(Object o)
From source file:it.cnr.icar.eric.server.persistence.rdb.RegistryObjectDAO.java
private void getRegistryObjectsIdsFromResultSet(ResultSet rs, int startIndex, int maxResults, StringBuffer adhocQuerys, StringBuffer associations, StringBuffer auEvents, StringBuffer classifications, StringBuffer schemes, StringBuffer classificationNodes, StringBuffer externalIds, StringBuffer externalLinks, StringBuffer extrinsicObjects, StringBuffer federations, StringBuffer organizations, StringBuffer registrys, StringBuffer packages, StringBuffer serviceBindings, StringBuffer services, StringBuffer specificationLinks, StringBuffer subscriptions, StringBuffer users, StringBuffer persons) throws SQLException, RegistryException { HashSet<String> processed = new HashSet<String>(); if (startIndex > 0) { // calling rs.next() is a workaround for some drivers, such // as Derby's, that do not set the cursor during call to // rs.relative(...) rs.next();//from w ww. j av a 2 s . c o m @SuppressWarnings("unused") boolean onRow = rs.relative(startIndex - 1); } int cnt = 0; while (rs.next()) { String id = rs.getString("id"); // Only process if not already processed // This avoid OutOfMemoryError when huge number of objects match // Currently this happens when name and desc are null and their // predicates get pruned but the tablename stays. // TODO: Fix query pruning so tableName is pruned if not used. if (!(processed.contains(id))) { cnt++; String type = rs.getString("objectType"); // System.err.println("id=" + id + " objectType=" + type + // " extrinsicObjects=" + extrinsicObjects); // log.info(ServerResourceBundle.getInstance().getString("message.objectType=''", // new Object[]{type})); if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_AdhocQuery)) { if (adhocQuerys.length() == 0) { adhocQuerys.append("'" + id + "'"); } else { adhocQuerys.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Association)) { if (associations.length() == 0) { associations.append("'" + id + "'"); } else { associations.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_AuditableEvent)) { if (auEvents.length() == 0) { auEvents.append("'" + id + "'"); } else { auEvents.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Classification)) { if (classifications.length() == 0) { classifications.append("'" + id + "'"); } else { classifications.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ClassificationNode)) { if (classificationNodes.length() == 0) { classificationNodes.append("'" + id + "'"); } else { classificationNodes.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ClassificationScheme)) { if (schemes.length() == 0) { schemes.append("'" + id + "'"); } else { schemes.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ExternalIdentifier)) { if (externalIds.length() == 0) { externalIds.append("'" + id + "'"); } else { externalIds.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ExternalLink)) { if (externalLinks.length() == 0) { externalLinks.append("'" + id + "'"); } else { externalLinks.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ExtrinsicObject)) { if (extrinsicObjects.length() == 0) { extrinsicObjects.append("'" + id + "'"); } else { extrinsicObjects.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Federation)) { if (federations.length() == 0) { federations.append("'" + id + "'"); } else { federations.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Organization)) { if (organizations.length() == 0) { organizations.append("'" + id + "'"); } else { organizations.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Registry)) { if (registrys.length() == 0) { registrys.append("'" + id + "'"); } else { registrys.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_RegistryPackage)) { if (packages.length() == 0) { packages.append("'" + id + "'"); } else { packages.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_ServiceBinding)) { if (serviceBindings.length() == 0) { serviceBindings.append("'" + id + "'"); } else { serviceBindings.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Service)) { if (services.length() == 0) { services.append("'" + id + "'"); } else { services.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_SpecificationLink)) { if (specificationLinks.length() == 0) { specificationLinks.append("'" + id + "'"); } else { specificationLinks.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Subscription)) { if (subscriptions.length() == 0) { subscriptions.append("'" + id + "'"); } else { subscriptions.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_User)) { if (users.length() == 0) { users.append("'" + id + "'"); } else { users.append(",'" + id + "'"); } } else if (type.equalsIgnoreCase(BindingUtility.CANONICAL_OBJECT_TYPE_ID_Person)) { if (persons.length() == 0) { persons.append("'" + id + "'"); } else { persons.append(",'" + id + "'"); } } else { // Type is user defined. Table could be either // ExtrinsicObject or ExternalLink SQLPersistenceManagerImpl pm = SQLPersistenceManagerImpl.getInstance(); ArrayList<String> queryParams = new ArrayList<String>(); queryParams.add(id.toUpperCase()); ExtrinsicObjectType eo = (ExtrinsicObjectType) pm.getRegistryObjectMatchingQuery(context, "SELECT * from ExtrinsicObject where UPPER(id) = ?", queryParams, "ExtrinsicObject"); if (eo != null) { if (extrinsicObjects.length() == 0) { extrinsicObjects.append("'" + id + "'"); } else { extrinsicObjects.append(",'" + id + "'"); } } else { ExternalLinkType el = (ExternalLinkType) pm.getRegistryObjectMatchingQuery(context, "SELECT * from ExternalLink where UPPER(id) = ?", queryParams, "ExternalLink"); if (el != null) { if (externalLinks.length() == 0) { externalLinks.append("'" + id + "'"); } else { externalLinks.append(",'" + id + "'"); } } else { throw new RegistryException(ServerResourceBundle.getInstance() .getString("message.unknownObjectType", new Object[] { type })); } } } processed.add(id); if (cnt == maxResults) { break; } } } if (cnt > 1000) { log.warn(ServerResourceBundle.getInstance().getString("message.WarningExcessiveResultSetSizeQUery", new Object[] { new Integer(cnt) })); } }
From source file:com.dell.asm.asmcore.asmmanager.util.deployment.HardwareUtil.java
private RAIDConfiguration assignDisks(List<Controller> controllers, FilterEnvironment filterEnvironment, boolean forExternal) { AsmManagerNotEnoughDisksException lastError = null; RAIDConfiguration raidConfiguration = null; // Embedded Controller unable to deploy to ESX or RedHat6/CentOS 6 (Windows and SLES OK) if (!filterEnvironment.isWindowsOS()) { rejectEmbeddedController(controllers); }//from ww w . ja v a 2 s . com ControllerComparator controllerComparator = new ControllerComparator(); Collections.sort(controllers, controllerComparator); for (Controller controller : controllers) { //RaidConfigWrapper will return data related to internal or external depending on how it's initialized. RaidConfigWrapper raidConfigWrapper = new RaidConfigWrapper(filterEnvironment.getRaidConfiguration(), controller, forExternal); // reset prepared configuration raidConfigWrapper.reset(); HashSet<String> enclosures = raidConfigWrapper.getEnclosures(); try { /** * For each controller: * Create list of SSDs sorted by Size and Drive number (available SSDs) * Create list of HDDs sorted by Size and Drive number (available HDDs) */ List<PhysicalDisk> physicalDisks = controller.getPhysicalDisks(); List<PhysicalDisk> availableSsds = new ArrayList<>(); List<PhysicalDisk> availableHdds = new ArrayList<>(); List<PhysicalDisk> ssdPool = new ArrayList<>(); List<PhysicalDisk> hddPool = new ArrayList<>(); List<PhysicalDisk> diskPool = new ArrayList<>(); // all available disks, need to detect disk type when we ask First or Last for (PhysicalDisk pd : physicalDisks) { // //Disk FQDD will be in the form of DiskName:EnclosureFQDD, so we can get the Enclosure FQDD easily from it String enclosureFqdd = pd.getFqdd().split(":", 2)[1]; boolean isEmbeddedController = controller.getFqdd().contains("Embedded"); // Embedded Controllers do not have enclosures if (enclosures.contains(enclosureFqdd) || (isEmbeddedController && !forExternal)) { if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.SSD) { availableSsds.add(pd); ssdPool.add(pd); } else if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.HDD) { availableHdds.add(pd); hddPool.add(pd); } } } int requestedDrivesNum = 0; int ssdCount = 0; int hddCount = 0; /** * Distribute minimum disks. For each VD: If Type is Require SSD, consume min # required from available SSDs. If not enough, fail If Type is Require HDD, consume min # required from available HDDs. If not enough, fail If Type is Any, consume min # required from SSDs, falling back to HDDs if needed. If not enough, fail */ int vdCnt = 0; Comparator<PhysicalDisk> lastComparator = null; List<VirtualDisk> currentVirtualDisks = raidConfigWrapper.getWorkingVirtualDisks(); for (VirtualDiskConfiguration vdc : raidConfigWrapper.getVirtualDisks()) { Comparator<PhysicalDisk> comparator; // first VD requires special sorting if (vdCnt == 0) { if (VirtualDiskConfiguration.DiskMediaType.first.equals(vdc.getDisktype())) { comparator = new DiskComparatorAscending(); } else { comparator = new DiskComparator(); } } else { comparator = new DiskComparator(); } if (comparator != lastComparator) { Collections.sort(availableSsds, comparator); Collections.sort(availableHdds, comparator); Collections.sort(ssdPool, comparator); Collections.sort(hddPool, comparator); diskPool.clear(); diskPool.addAll(ssdPool); diskPool.addAll(hddPool); Collections.sort(diskPool, comparator); lastComparator = comparator; } vdCnt++; // for Hadoop cases First and Last we detect media type from the first // drive in all available list. This lits is already sorted as needed. PhysicalDisk.PhysicalMediaType mt = null; if (vdc.getDisktype().isPhysiclaDisdType()) { mt = PhysicalDisk.PhysicalMediaType.fromUIValue(vdc.getDisktype().name()); } else { if (diskPool.size() > 0) { mt = diskPool.get(0).getMediaType(); } else { // this is just a safe harbor, if we don't have available disks - nothing will happen mt = PhysicalDisk.PhysicalMediaType.ANY; } } VirtualDisk virtualDisk = new VirtualDisk(); virtualDisk.setController(controller.getFqdd()); virtualDisk.setConfiguration(vdc); virtualDisk.setRaidLevel(vdc.getRaidlevel()); virtualDisk.setMediaType(mt); currentVirtualDisks.add(virtualDisk); if (vdc.getComparator() == VirtualDiskConfiguration.ComparatorValue.minimum || vdc.getComparator() == VirtualDiskConfiguration.ComparatorValue.exact) requestedDrivesNum = vdc.getNumberofdisks(); else { requestedDrivesNum = RaidLevel.fromUIValue(vdc.getRaidlevel().name()).getMinDisksRequired(); } if (PhysicalDisk.PhysicalMediaType.SSD.equals(mt)) { requestedDrivesNum = consumeDisks(availableSsds, virtualDisk.getPhysicalDisks(), requestedDrivesNum); if (requestedDrivesNum > 0) { throw new AsmManagerNotEnoughDisksException( "Not enough SSDs for virtual disk #" + vdCnt); } } else if (PhysicalDisk.PhysicalMediaType.HDD.equals(mt)) { requestedDrivesNum = consumeDisks(availableHdds, virtualDisk.getPhysicalDisks(), requestedDrivesNum); if (requestedDrivesNum > 0) { throw new AsmManagerNotEnoughDisksException( "Not enough HDDs for virtual disk #" + vdCnt); } } else { int remains = consumeDisks(availableSsds, virtualDisk.getPhysicalDisks(), requestedDrivesNum); if (remains > 0) { // can't get all from SSD, release releaseDisks(ssdPool, availableSsds, virtualDisk.getPhysicalDisks()); // try HDDs remains = consumeDisks(availableHdds, virtualDisk.getPhysicalDisks(), requestedDrivesNum); } if (remains > 0) { throw new AsmManagerNotEnoughDisksException( "Not enough SSDs and HDDs for virtual disk #" + vdCnt); } } } // Consume Min SSD Hot Spares from available SSDs. If not enough, fail (go to next controller) if (raidConfigWrapper.isEnableGlobalHotspares()) { /* * If only SSD type virtual disks are created, the Global Hot spare must be all SSD. * If only HDD type virtual disks are created, the Global Hot spare must be all HDD. */ for (VirtualDisk vd : raidConfigWrapper.getWorkingVirtualDisks()) { for (String fqdd : vd.getPhysicalDisks()) { for (PhysicalDisk pd : physicalDisks) { if (pd.getFqdd().equals(fqdd)) { if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.SSD) { ssdCount++; } else if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.HDD) { hddCount++; } } } } } // if no HDD VD, all hot spare must be SSD int requestedSSDHS = 0; requestedDrivesNum = (hddCount == 0) ? raidConfigWrapper.getGlobalHotspares() : raidConfigWrapper.getMinimumSsd(); requestedSSDHS = requestedDrivesNum; if (availableSsds.size() >= requestedDrivesNum) { requestedDrivesNum = consumeDisks(availableSsds, raidConfigWrapper.getWorkingSsdHotSpares(), requestedDrivesNum); } if (requestedDrivesNum > 0) { throw new AsmManagerNotEnoughDisksException("Not enough SSDs for hot spares"); } // Consume (Min total hot spares Min SSD Hot Spares) from available HDDs, falling back to available SSDs if needed. If not enough, fail requestedDrivesNum = raidConfigWrapper.getGlobalHotspares() - requestedSSDHS; if ((availableHdds.size() + availableSsds.size()) >= requestedDrivesNum) { if (hddCount > 0) { requestedDrivesNum = consumeDisks(availableHdds, raidConfigWrapper.getWorkingHddHotSpares(), requestedDrivesNum); } if (requestedDrivesNum > 0 && ssdCount > 0) { requestedDrivesNum = consumeDisks(availableSsds, raidConfigWrapper.getWorkingSsdHotSpares(), requestedDrivesNum); } } if (requestedDrivesNum > 0) { throw new AsmManagerNotEnoughDisksException("Not enough SSD and HDD for hot spares"); } } /** * Distribute remaining disks. For each VD: * If VD contains SSDs (as determined previously), and has Minimum or Maximum set, consume one SSD if the total number of disks in the VD is less than the Maximum (if any) * If VD contains HDDs (as determined previously), and has Minimum or Maximum set, consume one HDD if the total number of disks in the VD is less than the Maximum (if any) */ int maxAllowed = 0; int numAllocatedDisks = 0; for (VirtualDisk vd : currentVirtualDisks) { if (vd.getConfiguration().getComparator() == VirtualDiskConfiguration.ComparatorValue.maximum) { maxAllowed = vd.getConfiguration().getNumberofdisks(); } else { maxAllowed = Integer.MAX_VALUE; } numAllocatedDisks += vd.getPhysicalDisks().size(); } while ((availableSsds.size() > 0 || availableHdds.size() > 0) && numAllocatedDisks < maxAllowed) { int nAlloc = numAllocatedDisks; for (VirtualDisk vd : currentVirtualDisks) { if (vd.getConfiguration() .getComparator() != VirtualDiskConfiguration.ComparatorValue.exact) { if (vd.getConfiguration() .getComparator() == VirtualDiskConfiguration.ComparatorValue.maximum && vd.getPhysicalDisks().size() == vd.getConfiguration().getNumberofdisks()) { continue; } // special case: limit max drives for raid1 if (vd.getRaidLevel() == VirtualDiskConfiguration.UIRaidLevel.raid1 && vd.getPhysicalDisks().size() == MAX_DRIVES_RAID1) { continue; } int incrementNumber = getIncrementNumber(vd.getRaidLevel()); if (vd.getConfiguration() .getDisktype() == VirtualDiskConfiguration.DiskMediaType.requiressd && availableSsds.size() >= incrementNumber) { int remains = consumeDisks(availableSsds, vd.getPhysicalDisks(), incrementNumber); if (remains == 0) numAllocatedDisks += incrementNumber; else releaseDisks(ssdPool, availableSsds, vd.getPhysicalDisks()); } else if (vd.getConfiguration() .getDisktype() == VirtualDiskConfiguration.DiskMediaType.requirehdd && availableHdds.size() >= incrementNumber) { int remains = consumeDisks(availableHdds, vd.getPhysicalDisks(), incrementNumber); if (remains == 0) numAllocatedDisks += incrementNumber; else releaseDisks(hddPool, availableHdds, vd.getPhysicalDisks()); } else { // what type we choose for ANY? find that by the first disk PhysicalDisk.PhysicalMediaType mediaType = null; for (PhysicalDisk pd : physicalDisks) { if (pd.getFqdd().equals(vd.getPhysicalDisks().get(0))) { mediaType = pd.getMediaType(); break; } } if (mediaType == PhysicalDisk.PhysicalMediaType.SSD && availableSsds.size() >= incrementNumber) { int remains = consumeDisks(availableSsds, vd.getPhysicalDisks(), incrementNumber); if (remains == 0) numAllocatedDisks += incrementNumber; else releaseDisks(ssdPool, availableSsds, vd.getPhysicalDisks()); } else if (mediaType == PhysicalDisk.PhysicalMediaType.HDD && availableHdds.size() >= incrementNumber) { int remains = consumeDisks(availableHdds, vd.getPhysicalDisks(), incrementNumber); if (remains == 0) numAllocatedDisks += incrementNumber; else releaseDisks(hddPool, availableHdds, vd.getPhysicalDisks()); } } // check if no more disks left if ((availableSsds.size() == 0 && availableHdds.size() == 0) || numAllocatedDisks == maxAllowed) break; } } // if we were unable to allocate any disks for any VD - quit if (nAlloc == numAllocatedDisks) break; } /** * All remaining disks go into the Global Hot Spare. */ if (raidConfigWrapper.isEnableGlobalHotspares()) { // recalculate ssd and hdd VD ssdCount = 0; hddCount = 0; for (VirtualDisk vd : raidConfigWrapper.getWorkingVirtualDisks()) { for (String fqdd : vd.getPhysicalDisks()) { for (PhysicalDisk pd : physicalDisks) { if (pd.getFqdd().equals(fqdd)) { if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.SSD) { ssdCount++; } else if (pd.getMediaType() == PhysicalDisk.PhysicalMediaType.HDD) { hddCount++; } } } } } if (availableSsds.size() > 0 && ssdCount > 0) { consumeDisks(availableSsds, raidConfigWrapper.getWorkingSsdHotSpares(), availableSsds.size()); } if (availableHdds.size() > 0 && hddCount > 0) { consumeDisks(availableSsds, raidConfigWrapper.getWorkingHddHotSpares(), availableHdds.size()); } } /** * If here, break and use this controller */ lastError = null; raidConfiguration = raidConfigWrapper.getWorkingRaidConfig(); break; } catch (AsmManagerNotEnoughDisksException e) { // use next controller logger.debug("Controller " + controller.getFqdd() + " failed to satisfy RAID requirements: " + e.getMessage()); lastError = e; } } if (lastError != null) throw lastError; return raidConfiguration; }
From source file:edu.ku.brc.specify.tools.StrLocalizerApp.java
/** * @param hideExisting/*from w ww . jav a 2 s . com*/ * @return */ private Locale doChooseLangLocale(final boolean hideExisting) { HashSet<String> existingLocs = new HashSet<String>(); if (hideExisting) { for (String nm : rootDir.list()) { if (!nm.startsWith(".")) { existingLocs.add(nm); } } } Vector<Locale> locales = new Vector<Locale>(); for (Locale l : Locale.getAvailableLocales()) { if (!hideExisting || !existingLocs.contains(getFullLang(l))) { locales.add(l); } } Collections.sort(locales, new Comparator<Locale>() { public int compare(Locale o1, Locale o2) { return o1.getDisplayName().compareTo(o2.getDisplayName()); } }); Vector<String> localeNames = new Vector<String>(); for (Locale l : locales) { localeNames.add(l.getDisplayName()); } ToggleButtonChooserDlg<String> chooser = new ToggleButtonChooserDlg<String>((Frame) null, "CHOOSE_LOCALE", localeNames, ToggleButtonChooserPanel.Type.RadioButton); chooser.setUseScrollPane(true); chooser.setVisible(true); if (!chooser.isCancelled()) { return locales.get(chooser.getSelectedIndex()); } return null; }
From source file:com.rinke.solutions.pinball.PinDmdEditor.java
/** * checks all pal mappings and releases masks if not used anymore *//* w w w . j a v a2s . c o m*/ private void checkReleaseMask() { HashSet<Integer> useMasks = new HashSet<>(); for (PalMapping p : project.palMappings) { if (p.withMask) { useMasks.add(p.maskNumber); } } for (int i = 0; i < project.masks.size(); i++) { project.masks.get(i).locked = useMasks.contains(i); } switchMask(useMask); }
From source file:at.treedb.db.Base.java
/** * Invokes a callback before updating the entity. * //from w w w. j av a 2 s . com * @param dao * {@code DAOiface} data access object * @param user * user {@code User} who updates the entity * @param map * map containing the data * @param info * optional context object * @return {@code true} if a update takes place, {@code false} if not * @throws Exception */ private boolean invokeCallbackUpdate(DAOiface dao, User user, UpdateMap map, Object context) throws Exception { HashSet<String> set = callbackUpdateFields.get(this.getClass()); if (set == null) { return false; } for (Enum<?> e : map.getMap().keySet()) { if (set.contains(e.name())) { this.callbackUpdate(dao, user, map, context); return true; } } return false; }
From source file:com.vmware.identity.idm.server.provider.ldap.LdapProvider.java
Set<Group> getNestedGroups(ILdapConnectionEx connection, String membershipId, boolean groupNameOnly) throws NoSuchGroupException, InvalidPrincipalException { Set<Group> groups = new HashSet<Group>(); if (ServerUtils.isNullOrEmpty(membershipId) == false) { final String ATTR_NAME_GROUP_CN = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeAccountName); final String ATTR_DESCRIPTION = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeDescription); final String ATTR_ENTRY_UUID = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeObjectId); ArrayList<String> attributeNames = getAttributesList(ATTR_NAME_GROUP_CN, ATTR_ENTRY_UUID, ATTR_DESCRIPTION, !groupNameOnly); HashSet<String> groupsProcessed = new HashSet<String>(); Stack<String> groupsToProcess = new Stack<String>(); groupsToProcess.push(membershipId); while (groupsToProcess.isEmpty() == false) { String currentMembershipId = groupsToProcess.pop(); if (groupsProcessed.contains(currentMembershipId) == false) { String filter = String.format(_ldapSchemaMapping.getDirectParentGroupsQuery(), LdapFilterString.encode(currentMembershipId)); Collection<ILdapMessage> messages = null; try { messages = ldap_search(connection, getStoreDataEx().getGroupBaseDn(), LdapScope.SCOPE_SUBTREE, filter, attributeNames, DEFAULT_PAGE_SIZE, -1); String groupMembershipId = null; if (messages != null && messages.size() > 0) { for (ILdapMessage message : messages) { ILdapEntry[] entries = message.getEntries(); if ((entries != null) && (entries.length > 0)) { for (ILdapEntry entry : entries) { Group g = buildGroupObject(entry, ATTR_NAME_GROUP_CN, ATTR_ENTRY_UUID, ATTR_DESCRIPTION, !groupNameOnly); if (this._groupGroupMembersListLinkIsDn) { groupMembershipId = entry.getDN(); } else if (this._groupGroupMembersListLinkExists) { groupMembershipId = getOptionalFirstStringValue(entry .getAttributeValues(GROUP_GROUP_MEMBERS_LIST_LINK_ATTRIBUTE)); }//from w w w. j av a 2s . co m groups.add(g); if (ServerUtils.isNullOrEmpty(groupMembershipId) == false) { groupsToProcess.push(groupMembershipId); } } } } } } catch (NoSuchObjectLdapException e) { log.error( String.format("Failed to search for grup membership for [%s]", currentMembershipId), e); throw e; } finally { ServerUtils.disposeLdapMessages(messages); } // try groupsProcessed.add(currentMembershipId); } } } return groups; }
From source file:gedi.riboseq.inference.orf.OrfFinder.java
private void overlapUniqueCoverage(List<OrfWithCodons> orfs) { HashMap<Codon, HashSet<OrfWithCodons>> cod2Orf = new HashMap<Codon, HashSet<OrfWithCodons>>(); int numCond = -1; for (OrfWithCodons orf : orfs) for (Codon c : orf.getCodons()) { cod2Orf.computeIfAbsent(c, x -> new HashSet<>()).add(orf); numCond = c.getActivity().length; }/*from w w w . j a va2 s . com*/ // now equivalence classes: gives you all codons that are consistent with a specific combination of orfs HashMap<HashSet<OrfWithCodons>, HashSet<Codon>> equi = new HashMap<HashSet<OrfWithCodons>, HashSet<Codon>>(); for (Codon c : cod2Orf.keySet()) { equi.computeIfAbsent(cod2Orf.get(c), x -> new HashSet<>()).add(c); } // compute equi regions for their length HashMap<HashSet<OrfWithCodons>, Integer> equiLengths = new HashMap<HashSet<OrfWithCodons>, Integer>(); for (HashSet<OrfWithCodons> e : equi.keySet()) { LinkedList<ArrayGenomicRegion> equiCodons = null; for (OrfWithCodons orf : e) { if (equiCodons == null) { equiCodons = new LinkedList<ArrayGenomicRegion>(); for (int i = 0; i < orf.getRegion().getTotalLength(); i += 3) equiCodons.add(orf.getRegion().map(new ArrayGenomicRegion(i, i + 3))); } else { Iterator<ArrayGenomicRegion> it = equiCodons.iterator(); while (it.hasNext()) { ArrayGenomicRegion cod = it.next(); if (!orf.getRegion().containsUnspliced(cod) || orf.getRegion().induce(cod.getStart()) % 3 != 0) it.remove(); } } } for (OrfWithCodons orf : orfs) { if (!e.contains(orf)) { Iterator<ArrayGenomicRegion> it = equiCodons.iterator(); while (it.hasNext()) { ArrayGenomicRegion cod = it.next(); if (orf.getRegion().containsUnspliced(cod) && orf.getRegion().induce(cod.getStart()) % 3 == 0) it.remove(); } } } equiLengths.put(e, equiCodons.size()); } HashMap<OrfWithCodons, double[]> total = estimateByCoverage(equi, equiLengths, c -> c.getTotalActivity()); double sum = EI.wrap(total.values()).mapToDouble(a -> a[0]).sum(); for (OrfWithCodons orf : total.keySet()) orf.setEstimatedTotalActivity(total.get(orf)[0], total.get(orf)[0] / sum); for (int i = 0; i < numCond; i++) { int ei = i; total = estimateByCoverage(equi, equiLengths, c -> c.getActivity()[ei]); sum = EI.wrap(total.values()).mapToDouble(a -> a[0]).sum(); for (OrfWithCodons orf : total.keySet()) orf.setEstimatedTotalActivity(i, total.get(orf)[0], total.get(orf)[0] / sum); } }
From source file:de.unijena.bioinf.FragmentationTreeConstruction.computation.FragmentationPatternAnalysis.java
/** * Step 6: Decomposition/*www . ja v a2 s. c o m*/ * Decompose each peak as well as the parent peak */ public ProcessedInput performDecomposition(ProcessedInput input) { final FormulaConstraints constraints = input.getMeasurementProfile().getFormulaConstraints(); final Ms2Experiment experiment = input.getExperimentInformation(); final Deviation parentDeviation = input.getMeasurementProfile().getAllowedMassDeviation(); // sort again... final ArrayList<ProcessedPeak> processedPeaks = new ArrayList<ProcessedPeak>(input.getMergedPeaks()); Collections.sort(processedPeaks, new ProcessedPeak.MassComparator()); final ProcessedPeak parentPeak = processedPeaks.get(processedPeaks.size() - 1); // decompose peaks final PeakAnnotation<DecompositionList> decompositionList = input .getOrCreatePeakAnnotation(DecompositionList.class); final MassToFormulaDecomposer decomposer = decomposers.getDecomposer(constraints.getChemicalAlphabet()); final Ionization ion = experiment.getPrecursorIonType().getIonization(); final Deviation fragmentDeviation = input.getMeasurementProfile().getAllowedMassDeviation(); final List<MolecularFormula> pmds = decomposer.decomposeToFormulas( experiment.getPrecursorIonType().subtractIonAndAdduct(parentPeak.getOriginalMz()), parentDeviation, constraints); // add adduct to molecular formula of the ion - because the adduct might get lost during fragmentation { final MolecularFormula adduct = experiment.getPrecursorIonType().getAdduct(); final ListIterator<MolecularFormula> iter = pmds.listIterator(); while (iter.hasNext()) { final MolecularFormula f = iter.next(); iter.set(f.add(adduct)); } } decompositionList.set(parentPeak, DecompositionList.fromFormulas(pmds)); int j = 0; for (ProcessedPeak peak : processedPeaks.subList(0, processedPeaks.size() - 1)) { peak.setIndex(j++); decompositionList.set(peak, DecompositionList.fromFormulas( decomposer.decomposeToFormulas(peak.getUnmodifiedMass(), fragmentDeviation, constraints))); } parentPeak.setIndex(processedPeaks.size() - 1); assert parentPeak == processedPeaks.get(processedPeaks.size() - 1); // important: for each two peaks which are within 2*massrange: // => make decomposition list disjoint final Deviation window = fragmentDeviation.multiply(2); for (int i = 1; i < processedPeaks.size() - 1; ++i) { if (window.inErrorWindow(processedPeaks.get(i).getMz(), processedPeaks.get(i - 1).getMz())) { final HashSet<MolecularFormula> right = new HashSet<MolecularFormula>( decompositionList.get(processedPeaks.get(i)).getFormulas()); final ArrayList<MolecularFormula> left = new ArrayList<MolecularFormula>( decompositionList.get(processedPeaks.get(i - 1)).getFormulas()); final double leftMass = ion.subtractFromMass(processedPeaks.get(i - 1).getMass()); final double rightMass = ion.subtractFromMass(processedPeaks.get(i).getMass()); final Iterator<MolecularFormula> leftIter = left.iterator(); while (leftIter.hasNext()) { final MolecularFormula leftFormula = leftIter.next(); if (right.contains(leftFormula)) { if (Math.abs(leftFormula.getMass() - leftMass) < Math .abs(leftFormula.getMass() - rightMass)) { right.remove(leftFormula); } else { leftIter.remove(); } } } decompositionList.set(processedPeaks.get(i - 1), DecompositionList.fromFormulas(left)); decompositionList.set(processedPeaks.get(i), DecompositionList.fromFormulas(right)); } } return postProcess(PostProcessor.Stage.AFTER_DECOMPOSING, input); }
From source file:com.microsoft.tfs.client.common.ui.controls.vc.FilterItemsExistOnServerCommand.java
@Override protected IStatus doRun(final IProgressMonitor progressMonitor) throws Exception { if (recursion) { localItems = listAllFiles(filterPath); } else {/*from ww w . j a v a 2 s .c o m*/ localItems = new File(filterPath).listFiles(filesFilter); } final TFSRepository repository = getRepository(); if (localItems == null || repository == null) { localItems = new File[0]; return Status.OK_STATUS; } final VersionControlClient vcClient = repository.getVersionControlClient(); final HashSet<String> excludePaths = new HashSet<String>(); final List<File> filteredFiles = new ArrayList<File>(); /* * Query the pending changes cache to filter files already added. */ for (final File f : localItems) { final PendingChange pendingChange = repository.getPendingChangeCache() .getPendingChangeByLocalPath(f.getPath()); if (pendingChange != null && pendingChange.getChangeType().contains(ChangeType.ADD)) { excludePaths.add(f.getPath()); } } final Workspace workspace = repository.getWorkspace(); final String serverPath = workspace.getMappedServerPath(filterPath); if (serverPath != null) { final RecursionType recursionType = recursion ? RecursionType.FULL : RecursionType.ONE_LEVEL; final QueryItemsCommand queryCommand = new QueryItemsCommand(vcClient, new ItemSpec[] { new ItemSpec(serverPath, recursionType) }, LatestVersionSpec.INSTANCE, DeletedState.NON_DELETED, ItemType.ANY, GetItemsOptions.INCLUDE_BRANCH_INFO); final IStatus status = new CommandExecutor().execute(queryCommand); if (!status.isOK()) { return status; } if (queryCommand.getItemSets() != null && queryCommand.getItemSets().length > 0) { final ItemSet itemSet = queryCommand.getItemSets()[0]; if (itemSet != null) { final Item[] items = itemSet.getItems(); if (items != null) { for (final Item item : items) { final String localPath = workspace.getMappedLocalPath(item.getServerItem()); excludePaths.add(localPath); } } } } } for (final File f : localItems) { if (recursion) { if (!excludePaths.contains(f.getPath())) { filteredFiles.add(f); } } // always show sub-folders when user browse a folder to add else if (f.isDirectory() || !excludePaths.contains(f.getPath())) { filteredFiles.add(f); } } localItems = filteredFiles.toArray(new File[filteredFiles.size()]); return Status.OK_STATUS; }
From source file:com.ikanow.infinit.e.api.config.source.SourceHandler.java
/** * testSource/*from ww w .j a va 2 s . com*/ * @param sourceJson * @param nNumDocsToReturn * @param bReturnFullText * @param userIdStr * @return */ public ResponsePojo testSource(String sourceJson, int nNumDocsToReturn, boolean bReturnFullText, boolean bRealDedup, String userIdStr) { ResponsePojo rp = new ResponsePojo(); try { SourcePojo source = null; SourcePojoSubstitutionApiMap apiMap = new SourcePojoSubstitutionApiMap(new ObjectId(userIdStr)); try { source = ApiManager.mapFromApi(sourceJson, SourcePojo.class, apiMap); source.fillInSourcePipelineFields(); } catch (Exception e) { rp.setResponse(new ResponseObject("Test Source", false, "Error deserializing source (JSON is valid but does not match schema): " + e.getMessage())); return rp; } if (null == source.getKey()) { source.setKey(source.generateSourceKey()); // (a dummy value, not guaranteed to be unique) } if ((null == source.getExtractType()) || !source.getExtractType().equals("Federated")) { String testUrl = source.getRepresentativeUrl(); if (null == testUrl) { rp.setResponse( new ResponseObject("Test Source", false, "Error, source contains no URL to harvest")); return rp; } } if (null == source.getTags()) { source.setTags(new HashSet<String>()); } // This is the only field that you don't normally need to specify in save but will cause // problems if it's not populated in test. ObjectId userId = new ObjectId(userIdStr); // Set owner (overwrite, for security reasons) source.setOwnerId(userId); if (null == source.getCommunityIds()) { source.setCommunityIds(new TreeSet<ObjectId>()); } if (!source.getCommunityIds().isEmpty()) { // need to check that I'm allowed the specified community... if ((1 == source.getCommunityIds().size()) && (userId.equals(source.getCommunityIds().iterator().next()))) { // we're OK only community id is user community } //TESTED else { HashSet<ObjectId> communities = SocialUtils.getUserCommunities(userIdStr); Iterator<ObjectId> it = source.getCommunityIds().iterator(); while (it.hasNext()) { ObjectId src = it.next(); if (!communities.contains(src)) { rp.setResponse(new ResponseObject("Test Source", false, "Authentication error: you don't belong to this community: " + src)); return rp; } //TESTED } } //TESTED } // Always add the userId to the source community Id (so harvesters can tell if they're running in test mode or not...) source.addToCommunityIds(userId); // (ie user's personal community, always has same _id - not that it matters) // Check the source's admin status source.setOwnedByAdmin(RESTTools.adminLookup(userId.toString(), false)); if (bRealDedup) { // Want to test update code, so ignore update cycle if (null != source.getRssConfig()) { source.getRssConfig().setUpdateCycle_secs(1); // always update } } HarvestController harvester = new HarvestController(true); if (nNumDocsToReturn > 100) { // (seems reasonable) nNumDocsToReturn = 100; } harvester.setStandaloneMode(nNumDocsToReturn, bRealDedup); List<DocumentPojo> toAdd = new LinkedList<DocumentPojo>(); List<DocumentPojo> toUpdate = new LinkedList<DocumentPojo>(); List<DocumentPojo> toRemove = new LinkedList<DocumentPojo>(); if (null == source.getHarvestStatus()) { source.setHarvestStatus(new SourceHarvestStatusPojo()); } String oldMessage = source.getHarvestStatus().getHarvest_message(); // SPECIAL CASE: FOR FEDERATED QUERIES if ((null != source.getExtractType()) && source.getExtractType().equals("Federated")) { int federatedQueryEnts = 0; SourceFederatedQueryConfigPojo endpoint = null; try { endpoint = source.getProcessingPipeline().get(0).federatedQuery; } catch (Exception e) { } if (null == endpoint) { rp.setResponse( new ResponseObject("Test Source", false, "source error: no federated query specified")); return rp; } AdvancedQueryPojo testQuery = null; String errMessage = "no query specified"; try { testQuery = AdvancedQueryPojo.fromApi(endpoint.testQueryJson, AdvancedQueryPojo.class); } catch (Exception e) { errMessage = e.getMessage(); } if (null == testQuery) { rp.setResponse(new ResponseObject("Test Source", false, "source error: need to specifiy a valid IKANOW query to test federated queries, error: " + errMessage)); return rp; } // OK if we're here then we can test the query SimpleFederatedQueryEngine testFederatedQuery = new SimpleFederatedQueryEngine(); endpoint.parentSource = source; testFederatedQuery.addEndpoint(endpoint); ObjectId queryId = new ObjectId(); String[] communityIdStrs = new String[source.getCommunityIds().size()]; int i = 0; for (ObjectId commId : source.getCommunityIds()) { communityIdStrs[i] = commId.toString(); i++; } testFederatedQuery.setTestMode(true); testFederatedQuery.preQueryActivities(queryId, testQuery, communityIdStrs); StatisticsPojo stats = new StatisticsPojo(); stats.setSavedScores(0, 0); rp.setStats(stats); ArrayList<BasicDBObject> toAddTemp = new ArrayList<BasicDBObject>(1); testFederatedQuery.postQueryActivities(queryId, toAddTemp, rp); for (BasicDBObject docObj : toAddTemp) { DocumentPojo doc = DocumentPojo.fromDb(docObj, DocumentPojo.class); if (bReturnFullText) { doc.setFullText(docObj.getString(DocumentPojo.fullText_)); doc.makeFullTextNonTransient(); } if (null != doc.getEntities()) { federatedQueryEnts += doc.getEntities().size(); } //Metadata workaround: @SuppressWarnings("unchecked") LinkedHashMap<String, Object[]> meta = (LinkedHashMap<String, Object[]>) docObj .get(DocumentPojo.metadata_); if (null != meta) { Object metaJson = meta.get("json"); if (metaJson instanceof Object[]) { // (in this case ... non-cached, need to recopy in, I forget why) doc.addToMetadata("json", (Object[]) metaJson); } } toAdd.add(doc); } // (currently can't run harvest source federated query) if (0 == federatedQueryEnts) { // (more fed query exceptions) source.getHarvestStatus().setHarvest_message( "Warning: no entities extracted, probably docConversionMap is wrong?"); } else { source.getHarvestStatus().setHarvest_message(federatedQueryEnts + " entities extracted"); } } //TESTED (END FEDERATED QUERY TEST MODE, WHICH IS A BIT DIFFERENT) else { harvester.harvestSource(source, toAdd, toUpdate, toRemove); } // (don't parrot the old message back - v confusing) if (oldMessage == source.getHarvestStatus().getHarvest_message()) { // (ptr ==) source.getHarvestStatus() .setHarvest_message("(no documents extracted - likely a source or configuration error)"); } //TESTED String message = null; if ((null != source.getHarvestStatus()) && (null != source.getHarvestStatus().getHarvest_message())) { message = source.getHarvestStatus().getHarvest_message(); } else { message = ""; } List<String> errMessagesFromSourceDeser = apiMap.getErrorMessages(); if (null != errMessagesFromSourceDeser) { StringBuffer sbApiMapErr = new StringBuffer("Substitution errors:\n"); for (String err : errMessagesFromSourceDeser) { sbApiMapErr.append(err).append("\n"); } message = message + "\n" + sbApiMapErr.toString(); } //TESTED (by hand) if ((null != source.getHarvestStatus()) && (HarvestEnum.error == source.getHarvestStatus().getHarvest_status())) { rp.setResponse(new ResponseObject("Test Source", false, "source error: " + message)); rp.setData(toAdd, new DocumentPojoApiMap()); } else { if ((null == message) || message.isEmpty()) { message = "no messages from harvester"; } rp.setResponse(new ResponseObject("Test Source", true, "successfully returned " + toAdd.size() + " docs: " + message)); try { // If grabbing full text // Also some logstash/custom specific logic - these aren't docs so just output the entire record boolean isLogstash = (null != source.getExtractType()) && source.getExtractType().equalsIgnoreCase("logstash"); boolean isCustom = (null != source.getExtractType()) && source.getExtractType().equalsIgnoreCase("custom"); List<BasicDBObject> records = null; if (bReturnFullText || isLogstash || isCustom) { for (DocumentPojo doc : toAdd) { if (isLogstash || isCustom) { if (null == records) { records = new ArrayList<BasicDBObject>(toAdd.size()); } BasicDBObject dbo = (BasicDBObject) doc.getMetadata().get("record")[0]; Object test = dbo.get("_id"); if ((null != test) && (test instanceof ObjectId)) { dbo.remove("_id"); // (unless it's a custom _id added from logstash then remove it) } records.add(dbo); } //TESTED else if (bReturnFullText) { doc.makeFullTextNonTransient(); } } } //TESTED if (null != records) { rp.setData(records, (BasePojoApiMap<BasicDBObject>) null); } //TESTED else { rp.setData(toAdd, new DocumentPojoApiMap()); } //TESTED //Test deserialization: rp.toApi(); } catch (Exception e) { //e.printStackTrace(); StringBuffer sb = new StringBuffer(); Globals.populateStackTrace(sb, e); rp.setData( new BasicDBObject("error_message", "Error deserializing documents: " + sb.toString()), null); } } } catch (Exception e) { // If an exception occurs log the error logger.error("Exception Message: " + e.getMessage(), e); rp.setResponse(new ResponseObject("Test Source", false, "Error testing source: " + e.getMessage())); } catch (Error e) { // If an exception occurs log the error logger.error("Exception Message: " + e.getMessage(), e); rp.setResponse(new ResponseObject("Test Source", false, "Configuration/Installation error: " + e.getMessage())); } return rp; }