Example usage for java.util Collection remove

List of usage examples for java.util Collection remove

Introduction

In this page you can find the example usage for java.util Collection remove.

Prototype

boolean remove(Object o);

Source Link

Document

Removes a single instance of the specified element from this collection, if it is present (optional operation).

Usage

From source file:com.hiperf.common.ui.server.storage.impl.PersistenceHelper.java

private void processRemovedManyToMany(ObjectsToPersist toPersist,
        Map<com.hiperf.common.ui.shared.util.Id, INakedObject> res, Map<Object, IdHolder> newIdByOldId,
        EntityManager em) throws ClassNotFoundException, IntrospectionException, PersistenceException,
        IllegalAccessException, InvocationTargetException, NoSuchFieldException {
    Map<String, Map<com.hiperf.common.ui.shared.util.Id, Map<String, List<com.hiperf.common.ui.shared.util.Id>>>> manyToManyRemoved = toPersist
            .getManyToManyRemovedByClassName();
    if (manyToManyRemoved != null && !manyToManyRemoved.isEmpty()) {
        for (Entry<String, Map<com.hiperf.common.ui.shared.util.Id, Map<String, List<com.hiperf.common.ui.shared.util.Id>>>> e : manyToManyRemoved
                .entrySet()) {/*from ww w. jav  a2s  .c o m*/
            String className = e.getKey();
            Map<com.hiperf.common.ui.shared.util.Id, Map<String, List<com.hiperf.common.ui.shared.util.Id>>> map = e
                    .getValue();
            if (map != null && !map.isEmpty()) {
                Class<?> clazz = Class.forName(className);
                for (Entry<com.hiperf.common.ui.shared.util.Id, Map<String, List<com.hiperf.common.ui.shared.util.Id>>> entry : map
                        .entrySet()) {
                    com.hiperf.common.ui.shared.util.Id id = entry.getKey();
                    Map<String, List<com.hiperf.common.ui.shared.util.Id>> m = entry.getValue();
                    if (m != null && !m.isEmpty()) {
                        Object objId = id.getFieldValues().get(0);
                        if (newIdByOldId.containsKey(objId))
                            objId = newIdByOldId.get(objId).getId();
                        Object o = em.find(clazz, objId);
                        if (o != null) {
                            PropertyDescriptor[] pds = propertyDescriptorsByClassName.get(className);
                            for (Entry<String, List<com.hiperf.common.ui.shared.util.Id>> ee : m.entrySet()) {
                                String attr = ee.getKey();
                                List<com.hiperf.common.ui.shared.util.Id> ll = ee.getValue();
                                if (ll != null && !ll.isEmpty()) {
                                    Collection coll = null;
                                    Class classInColl = null;
                                    PropertyDescriptor myPd = null;
                                    for (PropertyDescriptor pd : pds) {
                                        if (pd.getName().equals(attr)) {
                                            myPd = pd;
                                            coll = (Collection) pd.getReadMethod().invoke(o,
                                                    StorageService.emptyArg);
                                            break;
                                        }
                                    }
                                    if (coll != null) {
                                        ParameterizedType genericType = (ParameterizedType) clazz
                                                .getDeclaredField(myPd.getName()).getGenericType();
                                        if (genericType != null) {
                                            for (Type t : genericType.getActualTypeArguments()) {

                                                if (t instanceof Class
                                                        && INakedObject.class.isAssignableFrom((Class) t)) {
                                                    classInColl = (Class) t;
                                                    break;
                                                }
                                            }
                                        }
                                        for (com.hiperf.common.ui.shared.util.Id i : ll) {
                                            Object idObj = i.getFieldValues().get(0);
                                            if (newIdByOldId.containsKey(idObj))
                                                idObj = newIdByOldId.get(idObj);
                                            Object linkedObj = em.find(classInColl, idObj);
                                            coll.remove(linkedObj);
                                        }
                                    }
                                }
                            }
                            res.put(id, (INakedObject) em.merge(o));
                        }
                    }

                }
            }
        }
    }
}

From source file:com.ah.ui.actions.BaseAction.java

protected boolean checkCVGUsed(Collection<Long> toRemoveIds) {
    if ((getSelectedL2FeatureKey().equals(L2_FEATURE_MANAGED_HIVE_APS)
            || getSelectedL2FeatureKey().equals(L2_FEATURE_VPN_GATEWAYS)
            || getSelectedL2FeatureKey().equals(L2_FEATURE_CONFIG_HIVE_APS)
            || getSelectedL2FeatureKey().equals(L2_FEATURE_CONFIG_VPN_GATEWAYS)) && toRemoveIds != null
            && !toRemoveIds.isEmpty()) {
        String rmIds = null;//  ww  w .  j  a  v a 2 s .c  o m
        boolean bindCvg = false;
        for (Long rmId : toRemoveIds) {
            if (rmIds == null || "".equals(rmIds)) {
                rmIds = String.valueOf(rmId);
            } else {
                rmIds += "," + String.valueOf(rmId);
            }
        }
        if (rmIds != null && !"".equals(rmIds)) {
            rmIds = "(" + rmIds + ")";
        }
        String sqlStr = "select hiveApId from VPN_GATEWAY_SETTING where hiveApId in " + rmIds;
        List<?> rmHiveApIds = QueryUtil.executeNativeQuery(sqlStr);
        for (Object rmId : rmHiveApIds) {
            Long id = Long.valueOf(String.valueOf(rmId));
            toRemoveIds.remove(id);
            bindCvg = true;
        }
        if (bindCvg) {
            addActionError(getText("error.hiveap.cvg.rmError.beBind"));
            return false;
        }
    }
    return true;
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSNamesystem.java

/**
 * Modify (block-->datanode) map.  Possibly generate 
 * replication tasks, if the removed block is still valid.
 *//*from  w w w.  ja v  a2 s .c  o m*/
synchronized void removeStoredBlock(Block block, DatanodeDescriptor node) {
    NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + block + " from " + node.getName());
    if (!blocksMap.removeNode(block, node)) {
        NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " + block
                + " has already been removed from node " + node);
        return;
    }

    //
    // It's possible that the block was removed because of a datanode
    // failure.  If the block is still valid, check if replication is
    // necessary.  In that case, put block on a possibly-will-
    // be-replicated list.
    //
    INode fileINode = blocksMap.getINode(block);
    if (fileINode != null) {
        decrementSafeBlockCount(block);
        updateNeededReplications(block, -1, 0);
    }

    //
    // We've removed a block from a node, so it's definitely no longer
    // in "excess" there.
    //
    Collection<Block> excessBlocks = excessReplicateMap.get(node.getStorageID());
    if (excessBlocks != null) {
        if (excessBlocks.remove(block)) {
            excessBlocksCount--;
            NameNode.stateChangeLog
                    .debug("BLOCK* NameSystem.removeStoredBlock: " + block + " is removed from excessBlocks");
            if (excessBlocks.size() == 0) {
                excessReplicateMap.remove(node.getStorageID());
            }
        }
    }

    // Remove the replica from corruptReplicas
    corruptReplicas.removeFromCorruptReplicasMap(block, node);
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSNamesystem.java

/**
 * Append to an existing file in the namespace.
 */// w  w  w.  j a va  2s  . co m
LocatedBlock appendFile(String src, String holder, String clientMachine) throws IOException {
    if (supportAppends == false) {
        throw new IOException("Append to hdfs not supported."
                + " Please refer to dfs.support.append configuration parameter.");
    }
    startFileInternal(src, null, holder, clientMachine, false, true, false, (short) maxReplication, (long) 0);
    getEditLog().logSync();

    //
    // Create a LocatedBlock object for the last block of the file
    // to be returned to the client. Return null if the file does not
    // have a partial block at the end.
    //
    LocatedBlock lb = null;
    synchronized (this) {
        // Need to re-check existence here, since the file may have been deleted
        // in between the synchronized blocks
        INodeFileUnderConstruction file = checkLease(src, holder);

        Block[] blocks = file.getBlocks();
        if (blocks != null && blocks.length > 0) {
            Block last = blocks[blocks.length - 1];
            BlockInfo storedBlock = blocksMap.getStoredBlock(last);
            if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
                long fileLength = file.computeContentSummary().getLength();
                DatanodeDescriptor[] targets = new DatanodeDescriptor[blocksMap.numNodes(last)];
                Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(last);
                for (int i = 0; it != null && it.hasNext(); i++) {
                    targets[i] = it.next();
                }
                // remove the replica locations of this block from the blocksMap
                for (int i = 0; i < targets.length; i++) {
                    targets[i].removeBlock(storedBlock);
                }
                // set the locations of the last block in the lease record
                file.setLastBlock(storedBlock, targets);

                lb = new LocatedBlock(last, targets, fileLength - storedBlock.getNumBytes());
                if (isAccessTokenEnabled) {
                    lb.setBlockToken(accessTokenHandler.generateToken(lb.getBlock(),
                            EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
                }

                // Remove block from replication queue.
                updateNeededReplications(last, 0, 0);

                // remove this block from the list of pending blocks to be deleted. 
                // This reduces the possibility of triggering HADOOP-1349.
                //
                for (DatanodeDescriptor dd : targets) {
                    String datanodeId = dd.getStorageID();
                    Collection<Block> v = recentInvalidateSets.get(datanodeId);
                    if (v != null && v.remove(last)) {
                        if (v.isEmpty()) {
                            recentInvalidateSets.remove(datanodeId);
                        }
                        pendingDeletionBlocksCount--;
                    }
                }
            }
        }
    }
    if (lb != null) {
        if (NameNode.stateChangeLog.isDebugEnabled()) {
            NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file " + src + " for " + holder + " at "
                    + clientMachine + " block " + lb.getBlock() + " block size " + lb.getBlock().getNumBytes());
        }
    }

    if (auditLog.isInfoEnabled() && isExternalInvocation()) {
        logAuditEvent(UserGroupInformation.getCurrentUser(), Server.getRemoteIp(), "append", src, null, null);
    }
    return lb;
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.java

/**
 * We want "replication" replicates for the block, but we now have too many.  
 * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that:
 *
 * srcNodes.size() - dstNodes.size() == replication
 *
 * We pick node that make sure that replicas are spread across racks and
 * also try hard to pick one with least free space.
 * The algorithm is first to pick a node with least free space from nodes
 * that are on a rack holding more than one replicas of the block.
 * So removing such a replica won't remove a rack. 
 * If no such a node is available,//from   ww  w.  ja va  2s  . com
 * then pick a node with least free space
 */
private void chooseExcessReplicates(final Collection<DatanodeStorageInfo> nonExcess, Block b, short replication,
        DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint, BlockPlacementPolicy replicator) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    BlockCollection bc = getBlockCollection(b);
    final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID());
    final List<StorageType> excessTypes = storagePolicy.chooseExcess(replication,
            DatanodeStorageInfo.toStorageTypes(nonExcess));

    final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<String, List<DatanodeStorageInfo>>();
    final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>();
    final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>();

    // split nodes into two sets
    // moreThanOne contains nodes on rack with more than one replica
    // exactlyOne contains the remaining nodes
    replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne);

    // pick one node to delete that favors the delete hint
    // otherwise pick one with least space from priSet if it is not empty
    // otherwise one node with least space from remains
    boolean firstOne = true;
    final DatanodeStorageInfo delNodeHintStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess,
            delNodeHint);
    final DatanodeStorageInfo addedNodeStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess,
            addedNode);
    while (nonExcess.size() - replication > 0) {
        final DatanodeStorageInfo cur;
        if (useDelHint(firstOne, delNodeHintStorage, addedNodeStorage, moreThanOne, excessTypes)) {
            cur = delNodeHintStorage;
        } else { // regular excessive replica removal
            cur = replicator.chooseReplicaToDelete(bc, b, replication, moreThanOne, exactlyOne, excessTypes);
        }
        firstOne = false;

        // adjust rackmap, moreThanOne, and exactlyOne
        replicator.adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);

        nonExcess.remove(cur);
        addToExcessReplicate(cur.getDatanodeDescriptor(), b);

        //
        // The 'excessblocks' tracks blocks until we get confirmation
        // that the datanode has deleted them; the only way we remove them
        // is when we get a "removeBlock" message.  
        //
        // The 'invalidate' list is used to inform the datanode the block 
        // should be deleted.  Items are removed from the invalidate list
        // upon giving instructions to the namenode.
        //
        addToInvalidates(b, cur.getDatanodeDescriptor());
        blockLog.info("BLOCK* chooseExcessReplicates: " + "(" + cur + ", " + b
                + ") is added to invalidated blocks set");
    }
}

From source file:pcgen.core.Equipment.java

/**
 * Where a magic eqmod is present, remove the masterwork eqmod from the
 * list.//from w  w  w .  j  a  v a  2  s .co m
 * 
 * @param commonList
 *            The list of eqmods on both heads (or only head)
 */
private void suppressMasterwork(Collection<EquipmentModifier> commonList) {
    // Look for a modifier named "masterwork" (assumption: this is marked as
    // "assigntoall")
    EquipmentModifier eqMaster = commonList.stream().filter(
            eqMod -> "MASTERWORK".equalsIgnoreCase(eqMod.getDisplayName()) || eqMod.isIType("Masterwork"))
            .findFirst().orElse(null);

    if (eqMaster == null) {
        return;
    }
    if (heads.stream().anyMatch(head -> getMagicBonus(head.getListFor(ListKey.EQMOD)) != null)) {
        commonList.remove(eqMaster);
    }
}

From source file:com.nextep.designer.sqlgen.generic.impl.JDBCCapturer.java

/**
 * Returns a <code>Collection</code> of the indexes for the specified table
 * present in the data source pointed to by the connection object provided
 * by the specified <code>context</code> and notifies the specified
 * <code>monitor</code> while capturing.
 * /*from w ww  . j  a  v  a  2  s .c o  m*/
 * @param context
 *            a {@link ICaptureContext} to store the captured objects
 * @param monitor
 *            the {@link IProgressMonitor} to notify while capturing objects
 * @param table
 *            the {@link IBasicTable} for which foreign keys must be
 *            captured
 * @return a {@link Collection} of {@link IIndex} objects if the specified
 *         table has indexes, an empty <code>Collection</code> otherwise
 */
private Collection<IIndex> getTableIndexes(ICaptureContext context, IProgressMonitor monitor,
        IBasicTable table) {
    Collection<IIndex> indexes = new ArrayList<IIndex>();
    IFormatter formatter = getConnectionVendor(context).getNameFormatter();

    final String tableName = table.getName();
    try {
        final DatabaseMetaData md = ((Connection) context.getConnectionObject()).getMetaData();

        ResultSet rset = null;
        if (md != null) {
            rset = md.getIndexInfo(getObjectOrContextCatalog(context, table),
                    getObjectOrContextSchema(context, table), tableName, false, false);
            CaptureHelper.updateMonitor(monitor, getCounter(), 1, 1);
        }

        if (rset != null) {
            IIndex currIndex = null;
            String currIndexName = null;
            boolean indexIsValid = false;

            try {
                while (rset.next()) {
                    final String indexName = rset.getString(COLUMN_NAME_INDEX_NAME);
                    final boolean nonUnique = rset.getBoolean(COLUMN_NAME_NON_UNIQUE);
                    final String indexColumnName = rset.getString(COLUMN_NAME_COLUMN_NAME);
                    final String ascOrDesc = rset.getString(COLUMN_NAME_ASC_OR_DESC);
                    final short indexType = rset.getShort(COLUMN_NAME_TYPE);

                    if (indexName != null && !"".equals(indexName.trim())) { //$NON-NLS-1$
                        if (LOGGER.isDebugEnabled()) {
                            String logPrefix = "[" + indexName + "]"; //$NON-NLS-1$ //$NON-NLS-2$
                            LOGGER.debug("= " + logPrefix + " Index Metadata ="); //$NON-NLS-1$ //$NON-NLS-2$
                            LOGGER.debug(logPrefix + "[" + COLUMN_NAME_INDEX_NAME + "] " //$NON-NLS-1$ //$NON-NLS-2$
                                    + indexName);
                            LOGGER.debug(logPrefix + "[" + COLUMN_NAME_NON_UNIQUE + "] " //$NON-NLS-1$ //$NON-NLS-2$
                                    + nonUnique);
                            LOGGER.debug(logPrefix + "[" + COLUMN_NAME_COLUMN_NAME + "] " //$NON-NLS-1$ //$NON-NLS-2$
                                    + indexColumnName);
                            LOGGER.debug(logPrefix + "[" + COLUMN_NAME_ASC_OR_DESC + "] " //$NON-NLS-1$ //$NON-NLS-2$
                                    + ascOrDesc);
                            LOGGER.debug(logPrefix + "[" + COLUMN_NAME_TYPE + "] " + indexType); //$NON-NLS-1$ //$NON-NLS-2$
                        }

                        if (null == currIndexName || !currIndexName.equals(indexName) || indexIsValid) {
                            currIndexName = indexName;
                            final String formatIndexName = formatter.format(indexName);
                            final String formatIndexColumnName = formatter.format(indexColumnName);

                            if (null == currIndex || !formatIndexName.equals(currIndex.getIndexName())) {
                                IVersionable<IIndex> v = VersionableFactory.createVersionable(IIndex.class,
                                        context.getConnection().getDBVendor());
                                currIndex = v.getVersionnedObject().getModel();
                                currIndex.setName(formatIndexName);
                                currIndex.setIndexType(
                                        nonUnique ? CaptureHelper.getIndexType(indexType) : IndexType.UNIQUE);
                                indexes.add(currIndex);
                                indexIsValid = true;
                            }

                            final IBasicColumn column = (IBasicColumn) context.getCapturedObject(
                                    IElementType.getInstance(IBasicColumn.TYPE_ID),
                                    CaptureHelper.getUniqueObjectName(tableName, formatIndexColumnName));
                            if (column != null) {
                                /*
                                 * Columns are ordered by INDEX_NAME,
                                 * ORDINAL_POSITION in the returned
                                 * ResultSet, so we don't have to specify
                                 * the position of the index column when
                                 * adding it to the index.
                                 */
                                currIndex.addColumnRef(column.getReference());
                            } else {
                                LOGGER.warn("Index [" + formatIndexName
                                        + "] has been partially captured during import because the referencing column ["
                                        + tableName + "[" + formatIndexColumnName //$NON-NLS-1$
                                        + "]] could not be found in the current workspace");
                                indexIsValid = false;

                                /*
                                 * Now the index is invalid, we remove it
                                 * from the indexes list that will be
                                 * returned to the caller of this method.
                                 */
                                indexes.remove(currIndex);
                            }
                        }
                    }
                }
            } finally {
                CaptureHelper.safeClose(rset, null);
            }
        }
    } catch (SQLException sqle) {
        LOGGER.error("Unable to fetch indexes for table [" + tableName + "] from "
                + getConnectionVendorName(context) + " server: " + sqle.getMessage(), sqle);
    }

    return indexes;
}

From source file:au.org.theark.core.dao.StudyDao.java

private void createOrUpdateFields(SearchVO searchVO) {

    Search search = searchVO.getSearch();

    //start save demographic fields
    Collection<DemographicField> listOfDemographicFieldsFromVO = searchVO.getSelectedDemographicFields();
    List<DemographicFieldSearch> nonPoppableDFS = new ArrayList<DemographicFieldSearch>();
    nonPoppableDFS.addAll(search.getDemographicFieldsToReturn());
    List<DemographicField> nonPoppableDemographicFieldsFromVO = new ArrayList<DemographicField>();
    nonPoppableDemographicFieldsFromVO.addAll(listOfDemographicFieldsFromVO);
    for (DemographicFieldSearch dfs : nonPoppableDFS) {
        boolean toBeDeleted = true; // if we find no match along the way, conclude that it has been deleted.

        for (DemographicField field : nonPoppableDemographicFieldsFromVO) {
            if (dfs.getDemographicField().getId().equals(field.getId())) {
                toBeDeleted = false;/*from   ww  w. j av  a2 s  .c om*/
                listOfDemographicFieldsFromVO.remove(field); // we found it, therefore  remove it  from the list that will ultimately be added as DFS's
            }
        }
        if (toBeDeleted) {
            search.getDemographicFieldsToReturn().remove(dfs);
            getSession().update(search);
            getSession().delete(dfs);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (DemographicField field : listOfDemographicFieldsFromVO) {
        DemographicFieldSearch dfs = new DemographicFieldSearch(field, search);
        getSession().save(dfs);
    }
    searchVO.setSelectedDemographicFields(nonPoppableDemographicFieldsFromVO);
    //end save demographic fields

    //start save biospecimen fields
    Collection<BiospecimenField> listOfBiospecimenFieldsFromVO = searchVO.getSelectedBiospecimenFields();
    List<BiospecimenFieldSearch> nonPoppableBiospecimenFS = new ArrayList<BiospecimenFieldSearch>();
    nonPoppableBiospecimenFS.addAll(search.getBiospecimenFieldsToReturn());
    List<BiospecimenField> nonPoppableBiospecimenFieldsFromVO = new ArrayList<BiospecimenField>();
    nonPoppableBiospecimenFieldsFromVO.addAll(listOfBiospecimenFieldsFromVO);
    for (BiospecimenFieldSearch bfs : nonPoppableBiospecimenFS) {
        boolean toBeDeleted = true; // if we find no match along the way, conclude that it has been deleted.

        for (BiospecimenField field : nonPoppableBiospecimenFieldsFromVO) {
            if (bfs.getBiospecimenField().getId().equals(field.getId())) {
                toBeDeleted = false;
                listOfBiospecimenFieldsFromVO.remove(field);// we found it, therefore  remove it  from the list that will ultimately be added as DFS's
            }
        }
        if (toBeDeleted) {
            search.getBiospecimenFieldsToReturn().remove(bfs);
            getSession().update(search);
            getSession().delete(bfs);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (BiospecimenField field : listOfBiospecimenFieldsFromVO) {
        BiospecimenFieldSearch bfs = new BiospecimenFieldSearch(field, search);
        getSession().save(bfs);
    }
    searchVO.setSelectedBiospecimenFields(nonPoppableBiospecimenFieldsFromVO);
    //end save biospecimen fields

    //start save biocollection fields
    Collection<BiocollectionField> listOfBiocollectionFieldsFromVO = searchVO.getSelectedBiocollectionFields();
    List<BiocollectionFieldSearch> nonPoppableBiocollectionFS = new ArrayList<BiocollectionFieldSearch>();
    nonPoppableBiocollectionFS.addAll(search.getBiocollectionFieldsToReturn());
    List<BiocollectionField> nonPoppableBiocollectionFieldsFromVO = new ArrayList<BiocollectionField>();
    nonPoppableBiocollectionFieldsFromVO.addAll(listOfBiocollectionFieldsFromVO);
    for (BiocollectionFieldSearch bfs : nonPoppableBiocollectionFS) {
        boolean toBeDeleted = true; // if we find no match along the way, conclude that it has been deleted.

        for (BiocollectionField field : nonPoppableBiocollectionFieldsFromVO) {
            if (bfs.getBiocollectionField().getId().equals(field.getId())) {
                toBeDeleted = false;
                listOfBiocollectionFieldsFromVO.remove(field);// we found it, therefore  remove it  from the list that will ultimately be added as DFS's
            }
        }
        if (toBeDeleted) {
            search.getBiocollectionFieldsToReturn().remove(bfs);
            getSession().update(search);
            getSession().delete(bfs);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (BiocollectionField field : listOfBiocollectionFieldsFromVO) {
        BiocollectionFieldSearch bfs = new BiocollectionFieldSearch(field, search);
        getSession().save(bfs);
    }
    searchVO.setSelectedBiocollectionFields(nonPoppableBiocollectionFieldsFromVO);
    //end save biocollection fields

    //start saving all custom display fields      
    Collection<CustomFieldDisplay> listOfSubjectCustomFieldDisplaysFromVO = searchVO
            .getSelectedSubjectCustomFieldDisplays();
    Collection<CustomFieldDisplay> listOfBiospecimenCustomFieldDisplaysFromVO = searchVO
            .getSelectedBiospecimenCustomFieldDisplays();
    Collection<CustomFieldDisplay> listOfBiocollectionCustomFieldDisplaysFromVO = searchVO
            .getSelectedBiocollectionCustomFieldDisplays();// we really can add them all here and add to one collections
    List<CustomFieldDisplaySearch> nonPoppablePhenoCFDs = new ArrayList<CustomFieldDisplaySearch>();
    nonPoppablePhenoCFDs.addAll(search.getCustomFieldsToReturn());
    List<CustomFieldDisplay> nonPoppableCustomFieldsFromVO = new ArrayList<CustomFieldDisplay>();
    //nonPoppableCustomFieldsFromVO.addAll(listOfPhenoCustomFieldDisplaysFromVO);
    nonPoppableCustomFieldsFromVO.addAll(listOfSubjectCustomFieldDisplaysFromVO);
    nonPoppableCustomFieldsFromVO.addAll(listOfBiospecimenCustomFieldDisplaysFromVO);
    nonPoppableCustomFieldsFromVO.addAll(listOfBiocollectionCustomFieldDisplaysFromVO);
    List<CustomFieldDisplay> poppableCustomFieldsFromVO = new ArrayList<CustomFieldDisplay>();
    //poppableCustomFieldsFromVO.addAll(listOfPhenoCustomFieldDisplaysFromVO);
    poppableCustomFieldsFromVO.addAll(listOfSubjectCustomFieldDisplaysFromVO);
    poppableCustomFieldsFromVO.addAll(listOfBiospecimenCustomFieldDisplaysFromVO);
    poppableCustomFieldsFromVO.addAll(listOfBiocollectionCustomFieldDisplaysFromVO);

    for (CustomFieldDisplaySearch cfds : nonPoppablePhenoCFDs) {
        log.info("fields to return=" + search.getCustomFieldsToReturn().size());
        boolean toBeDeleted = true; // if we find no match along the way, conclude that it has been deleted.

        for (CustomFieldDisplay field : nonPoppableCustomFieldsFromVO) {
            if (cfds.getCustomFieldDisplay().getId().equals(field.getId())) {
                toBeDeleted = false;
                poppableCustomFieldsFromVO.remove(field);// we found it, therefore remove it from the list that will ultimately be added as DFS's
            }
        }
        if (toBeDeleted) {
            search.getCustomFieldsToReturn().remove(cfds);
            getSession().update(search);
            getSession().delete(cfds);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (CustomFieldDisplay field : poppableCustomFieldsFromVO) { // listOfPhenoCustomFieldDisplaysFromVO){
        CustomFieldDisplaySearch cfds = new CustomFieldDisplaySearch(field, search);
        getSession().save(cfds);
    }

    //Pheno DataSet Fields:
    Collection<PhenoDataSetFieldDisplay> listOfPhenoDataSetFieldDisplaysFromVO = searchVO
            .getSelectedPhenoDataSetFieldDisplays();
    List<PhenoDataSetFieldDisplaySearch> nonPoppablePhenoDataSetFieldDisplaySearch = new ArrayList<>();
    nonPoppablePhenoDataSetFieldDisplaySearch.addAll(search.getPhenoDataSetFieldsToReturn());
    List<PhenoDataSetFieldDisplay> nonPoppablePhenoDataSetFieldDisplays = new ArrayList<>();
    nonPoppablePhenoDataSetFieldDisplays.addAll(listOfPhenoDataSetFieldDisplaysFromVO);
    List<PhenoDataSetFieldDisplay> poppablePhenoDataSetFieldDisplays = new ArrayList<>();
    poppablePhenoDataSetFieldDisplays.addAll(listOfPhenoDataSetFieldDisplaysFromVO);

    for (PhenoDataSetFieldDisplaySearch phenoSearch : nonPoppablePhenoDataSetFieldDisplaySearch) {
        log.info("pheno fields to return = " + search.getPhenoDataSetFieldsToReturn().size());
        boolean toBeDeleted = true;
        for (PhenoDataSetFieldDisplay field : nonPoppablePhenoDataSetFieldDisplays) {
            if (phenoSearch.getPhenoDataSetFieldDisplay().getId().equals(field.getId())) {
                toBeDeleted = false;
                poppablePhenoDataSetFieldDisplays.remove(field);
            }
        }

        if (toBeDeleted) {
            search.getPhenoDataSetFieldsToReturn().remove(phenoSearch);
            getSession().update(search);
            getSession().delete(phenoSearch);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (PhenoDataSetFieldDisplay field : poppablePhenoDataSetFieldDisplays) {
        PhenoDataSetFieldDisplaySearch phenoSearch = new PhenoDataSetFieldDisplaySearch(field, search);
        getSession().save(phenoSearch);
    }

    // is all of this necessary now...investigate// searchVO.setSelectedPhenoCustomFieldDisplays(nonPoppableCustomFieldsFromVO);
    //end save all custom field displays

    Collection<ConsentStatusField> listOfConsentStatusFieldsFromVO = searchVO.getSelectedConsentStatusFields();
    List<ConsentStatusFieldSearch> nonPoppableConsentStatusFieldSearch = new ArrayList<ConsentStatusFieldSearch>();
    nonPoppableConsentStatusFieldSearch.addAll(search.getConsentStatusFieldsToReturn());
    List<ConsentStatusField> nonPoppableConsentStatusFieldsFromVO = new ArrayList<ConsentStatusField>();
    nonPoppableConsentStatusFieldsFromVO.addAll(listOfConsentStatusFieldsFromVO);
    for (ConsentStatusFieldSearch csfs : nonPoppableConsentStatusFieldSearch) {
        boolean toBeDeleted = true;

        for (ConsentStatusField field : nonPoppableConsentStatusFieldsFromVO) {
            log.info("consentstfld = " + field.toString() + " " + field.getPublicFieldName());
            log.info("csfs.getid. == field.getid: "
                    + csfs.getConsentStatusField().getId().equals(field.getId()));
            if (csfs.getConsentStatusField().getId().equals(field.getId())) {
                toBeDeleted = false;
                listOfConsentStatusFieldsFromVO.remove(field);
            }
        }
        if (toBeDeleted) {
            search.getConsentStatusFieldsToReturn().remove(csfs);
            getSession().update(search);
            getSession().delete(csfs);
            getSession().flush();
            getSession().refresh(search);
        }
    }

    for (ConsentStatusField field : listOfConsentStatusFieldsFromVO) {
        ConsentStatusFieldSearch csfs = new ConsentStatusFieldSearch(field, search);
        getSession().save(csfs);
    }
    searchVO.setSelectedConsentStatusFields(getSelectedConsentStatusFieldsForSearch(search));
}

From source file:org.apache.hadoop.dfs.FSNamesystem.java

/**
 * We want "replication" replicates for the block, but we now have too many.  
 * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that:
 *
 * srcNodes.size() - dstNodes.size() == replication
 *
 * We pick node that make sure that replicas are spread across racks and
 * also try hard to pick one with least free space.
 * The algorithm is first to pick a node with least free space from nodes
 * that are on a rack holding more than one replicas of the block.
 * So removing such a replica won't remove a rack. 
 * If no such a node is available,//  w w w.  j  ava2s  .  c  o  m
 * then pick a node with least free space
 */
void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess, Block b, short replication,
        DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
    // first form a rack to datanodes map and
    HashMap<String, ArrayList<DatanodeDescriptor>> rackMap = new HashMap<String, ArrayList<DatanodeDescriptor>>();
    for (Iterator<DatanodeDescriptor> iter = nonExcess.iterator(); iter.hasNext();) {
        DatanodeDescriptor node = iter.next();
        String rackName = node.getNetworkLocation();
        ArrayList<DatanodeDescriptor> datanodeList = rackMap.get(rackName);
        if (datanodeList == null) {
            datanodeList = new ArrayList<DatanodeDescriptor>();
        }
        datanodeList.add(node);
        rackMap.put(rackName, datanodeList);
    }

    // split nodes into two sets
    // priSet contains nodes on rack with more than one replica
    // remains contains the remaining nodes
    ArrayList<DatanodeDescriptor> priSet = new ArrayList<DatanodeDescriptor>();
    ArrayList<DatanodeDescriptor> remains = new ArrayList<DatanodeDescriptor>();
    for (Iterator<Entry<String, ArrayList<DatanodeDescriptor>>> iter = rackMap.entrySet().iterator(); iter
            .hasNext();) {
        Entry<String, ArrayList<DatanodeDescriptor>> rackEntry = iter.next();
        ArrayList<DatanodeDescriptor> datanodeList = rackEntry.getValue();
        if (datanodeList.size() == 1) {
            remains.add(datanodeList.get(0));
        } else {
            priSet.addAll(datanodeList);
        }
    }

    // pick one node to delete that favors the delete hint
    // otherwise pick one with least space from priSet if it is not empty
    // otherwise one node with least space from remains
    boolean firstOne = true;
    while (nonExcess.size() - replication > 0) {
        DatanodeInfo cur = null;
        long minSpace = Long.MAX_VALUE;

        // check if we can del delNodeHint
        if (firstOne && delNodeHint != null && nonExcess.contains(delNodeHint)
                && (priSet.contains(delNodeHint) || (addedNode != null && !priSet.contains(addedNode)))) {
            cur = delNodeHint;
        } else { // regular excessive replica removal
            Iterator<DatanodeDescriptor> iter = priSet.isEmpty() ? remains.iterator() : priSet.iterator();
            while (iter.hasNext()) {
                DatanodeDescriptor node = iter.next();
                long free = node.getRemaining();

                if (minSpace > free) {
                    minSpace = free;
                    cur = node;
                }
            }
        }

        firstOne = false;
        // adjust rackmap, priSet, and remains
        String rack = cur.getNetworkLocation();
        ArrayList<DatanodeDescriptor> datanodes = rackMap.get(rack);
        datanodes.remove(cur);
        if (datanodes.isEmpty()) {
            rackMap.remove(rack);
        }
        if (priSet.remove(cur)) {
            if (datanodes.size() == 1) {
                priSet.remove(datanodes.get(0));
                remains.add(datanodes.get(0));
            }
        } else {
            remains.remove(cur);
        }

        nonExcess.remove(cur);

        Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID());
        if (excessBlocks == null) {
            excessBlocks = new TreeSet<Block>();
            excessReplicateMap.put(cur.getStorageID(), excessBlocks);
        }
        excessBlocks.add(b);
        NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates: " + "(" + cur.getName() + ", "
                + b + ") is added to excessReplicateMap");

        //
        // The 'excessblocks' tracks blocks until we get confirmation
        // that the datanode has deleted them; the only way we remove them
        // is when we get a "removeBlock" message.  
        //
        // The 'invalidate' list is used to inform the datanode the block 
        // should be deleted.  Items are removed from the invalidate list
        // upon giving instructions to the namenode.
        //
        addToInvalidatesNoLog(b, cur);
        NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: " + "(" + cur.getName() + ", "
                + b + ") is added to recentInvalidateSets");
    }
}

From source file:org.biomart.configurator.controller.MartController.java

private void processParentDatasetTable(TransformationUnit parentTU, DatasetTable parentDsTable,
        DatasetTable dsTable, DatasetTableType type, List<DatasetColumn> sourceDSCols,
        Set<DatasetColumn> unusedCols, Collection<ForeignKey> unusedFKs, Map<String, Integer> uniqueBases) {
    parentTU = new SelectFromTable(parentDsTable);
    dsTable.addTransformationUnit(parentTU);

    // Make a list to hold the child table's FK cols.
    final List<Column> dsTableFKCols = new ArrayList<Column>();

    // Get the primary key of the parent DS table.
    final PrimaryKey parentDSTablePK = parentDsTable.getPrimaryKey();

    // Loop over each column in the parent table. If this is
    // a subclass table, add it. If it is a dimension table,
    // only add it if it is in the PK or is in the first underlying
    // key. In either case, if it is in the PK, add it both to the
    // child PK and the child FK. Also inherit if it is involved
    // in a restriction on the very first join.
    for (final Iterator<Column> i = parentDsTable.getColumnList().iterator(); i.hasNext();) {
        final DatasetColumn parentDSCol = (DatasetColumn) i.next();
        boolean inRelationRestriction = false;
        // If this is not a subclass table, we need to filter columns.
        if (!type.equals(DatasetTableType.MAIN_SUBCLASS)) {
            // Skip columns that are not in the primary key.
            final boolean inPK = parentDSTablePK.getColumns().contains(parentDSCol);
            final boolean inSourceKey = sourceDSCols.contains(parentDSCol);
            // If the column is in a restricted relation
            // on the source relation, we need to inherit it.
            // Inherit it?
            if (!inPK && !inSourceKey && !inRelationRestriction)
                continue;
        }/*from w w w  .j a v a2  s.  c o m*/
        // Only unfiltered columns reach this point. Create a copy of
        // the column.
        final InheritedColum dsCol;
        if (!dsTable.getColumnNames().contains(parentDSCol.getName())) {
            WrappedColumn tmpDsc = (WrappedColumn) parentDsTable.getColumnByName(parentDSCol.getName());
            // WrappedColumn tmpDsc = new
            // WrappedColumn(parentDSCol.getSourceColumn(),parentDSCol.getName(),dsTable);
            dsCol = new InheritedColum(dsTable, tmpDsc);
            dsTable.addColumn(dsCol);
            // If any other col has modified name same as
            // inherited col's modified name, then rename the
            // other column to avoid clash.
            for (final Iterator<Column> j = dsTable.getColumnList().iterator(); j.hasNext();) {
                final DatasetColumn cand = (DatasetColumn) j.next();
                if (!(cand instanceof InheritedColum) && cand.getName().equals(dsCol.getName())) {
                    final DatasetColumn renameCol = inRelationRestriction ? cand : (DatasetColumn) dsCol;
                    if (renameCol.getName().endsWith(Resources.get("keySuffix"))) {
                        renameCol.setName(renameCol.getName().substring(0,
                                renameCol.getName().indexOf(Resources.get("keySuffix"))) + "_clash"
                                + Resources.get("keySuffix"));
                        renameCol.setInternalName(renameCol.getInternalName().substring(0,
                                renameCol.getInternalName().indexOf(Resources.get("keySuffix"))) + "_clash"
                                + Resources.get("keySuffix"));
                    } else {
                        renameCol.setName(renameCol.getName() + "_clash");
                        renameCol.setInternalName(renameCol.getInternalName() + "_clash");
                    }
                }
            }
        } else
            dsCol = (InheritedColum) dsTable.getColumnByName(parentDSCol.getName());
        unusedCols.remove(dsCol);
        parentTU.getNewColumnNameMap().put(parentDSCol, (DatasetColumn) dsCol);
        dsCol.setTransformationUnit(parentTU);
        uniqueBases.put(parentDSCol.getName(), new Integer(0));
        // Add the column to the child's FK, but only if it was in
        // the parent PK.
        if (parentDSTablePK.getColumns().contains(parentDSCol))
            dsTableFKCols.add(dsCol);
    }

    try {
        // Create the child FK.
        List<Column> columns = new ArrayList<Column>();
        for (Column cc : dsTableFKCols) {
            columns.add(cc);
        }
        ForeignKey fkObject = new ForeignKey(columns);

        // KeyController dsTableFK = new KeyController(fkObject);

        // Create only if not already exists.
        for (final Iterator<ForeignKey> i = dsTable.getForeignKeys().iterator(); i.hasNext();) {
            final ForeignKey cand = i.next();
            if (cand.equals(fkObject))
                fkObject = cand;
        }
        if (!dsTable.getForeignKeys().contains(fkObject)) {
            dsTable.getForeignKeys().add(fkObject);
            // dsTable.getForeignKeys().add(dsTableFK);
            // Link the child FK to the parent PK.
            new RelationTarget(parentDSTablePK, fkObject, Cardinality.MANY_A);
            // parentDSTablePK.getObject().addRelation(relation);
            // dsTableFK.getObject().addRelation(relation);
        }
        unusedFKs.remove(fkObject);
    } catch (final Throwable t) {
        throw new BioMartError(t);
    }

    // Copy all parent FKs and add to child, but WITHOUT
    // relations. Subclasses only!
    if (type.equals(DatasetTableType.MAIN_SUBCLASS))
        for (final Iterator<ForeignKey> i = parentDsTable.getForeignKeys().iterator(); i.hasNext();) {
            final ForeignKey parentFK = i.next();
            final List<Column> childFKCols = new ArrayList<Column>();
            for (int j = 0; j < parentFK.getColumns().size(); j++)
                childFKCols.add(parentTU.getNewColumnNameMap().get(parentFK.getColumns().get(j)));
            try {
                // Create the child FK.
                List<Column> columns = new ArrayList<Column>();
                for (Column cc : childFKCols) {
                    columns.add(cc);
                }
                ForeignKey fkObject = new ForeignKey(columns);

                // KeyController dsTableFK = (KeyController)(fkObject.getWrapper());

                // Create only if not already exists.
                for (final Iterator<ForeignKey> j = dsTable.getForeignKeys().iterator(); j.hasNext();) {
                    final ForeignKey cand = j.next();
                    if (cand.equals(fkObject))
                        fkObject = cand;
                }
                if (dsTable.getForeignKeys().contains(fkObject))
                    dsTable.getForeignKeys().add(fkObject);
                // dsTable.getForeignKeys().add(dsTableFK);
                unusedFKs.remove(fkObject);
            } catch (final Throwable t) {
                throw new BioMartError(t);
            }
        }
}