Example usage for java.util Set removeAll

List of usage examples for java.util Set removeAll

Introduction

In this page you can find the example usage for java.util Set removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

@Override
public boolean addNodeAspects(Long nodeId, Set<QName> aspectQNames) {
    if (aspectQNames.size() == 0) {
        return false;
    }//from www . j  a  v a2 s. c om
    // Copy the inbound set
    Set<QName> aspectQNamesToAdd = new HashSet<QName>(aspectQNames);
    // Get existing
    Set<QName> existingAspectQNames = getNodeAspectsCached(nodeId);
    // Find out what needs adding
    aspectQNamesToAdd.removeAll(existingAspectQNames);
    aspectQNamesToAdd.remove(ContentModel.ASPECT_REFERENCEABLE); // Implicit
    aspectQNamesToAdd.remove(ContentModel.ASPECT_LOCALIZED); // Implicit
    if (aspectQNamesToAdd.isEmpty()) {
        // Nothing to do
        return false;
    }
    // Add them
    Set<Long> aspectQNameIds = qnameDAO.convertQNamesToIds(aspectQNamesToAdd, true);
    startBatch();
    try {
        for (Long aspectQNameId : aspectQNameIds) {
            insertNodeAspect(nodeId, aspectQNameId);
        }
    } catch (RuntimeException e) {
        // This could be because the cache is out of date
        invalidateNodeCaches(nodeId);
        throw e;
    } finally {
        executeBatch();
    }

    // Collate the new aspect set, so that touch recognizes the addtion of cm:auditable
    Set<QName> newAspectQNames = new HashSet<QName>(existingAspectQNames);
    newAspectQNames.addAll(aspectQNamesToAdd);

    // Handle sys:aspect_root
    if (aspectQNames.contains(ContentModel.ASPECT_ROOT)) {
        // invalidate root nodes cache for the store
        StoreRef storeRef = getNodeNotNull(nodeId, false).getStore().getStoreRef();
        allRootNodesCache.remove(storeRef);
        // Touch the node; parent assocs need invalidation
        touchNode(nodeId, null, newAspectQNames, false, false, true);
    } else {
        // Touch the node; all caches are fine
        touchNode(nodeId, null, newAspectQNames, false, false, false);
    }

    // Manually update the cache
    setNodeAspectsCached(nodeId, newAspectQNames);

    // Done
    return true;
}

From source file:com.confighub.core.store.Store.java

/**
 * @param repository/*from   w  w w. j  ava 2  s. c o m*/
 * @param user
 * @param id
 * @param name
 * @param type
 * @param assignmentIds
 * @param depthLabel
 * @return
 * @throws ConfigException
 */
public CtxLevel updateOrCreateLevel(final Repository repository, final UserAccount user, final Long id,
        final String name, final CtxLevel.LevelType type, final Collection<Long> assignmentIds,
        final String depthLabel) throws ConfigException {
    if (Utils.anyNull(repository, name)) {
        throw new ConfigException(Error.Code.MISSING_PARAMS);
    }

    if (!repository.hasWriteAccess(user)) {
        throw new ConfigException(Error.Code.USER_ACCESS_DENIED);
    }

    if (!repository.canUserManageContext(user)) {
        throw new ConfigException(Error.Code.CONTEXT_EDIT_DISABLED);
    }

    Depth depth = repository.getDepthFromLabel(depthLabel);
    CtxLevel ctxLevel;

    boolean updatePropertyContextStrings = false;
    boolean isNew = id == null;
    boolean levelTypeChanged = false;

    if (!isNew) {
        ctxLevel = getLevel(id, repository);

        if (null == ctxLevel) {
            throw new ConfigException(Error.Code.NOT_FOUND);
        }

        updatePropertyContextStrings = !ctxLevel.getName().equals(name);
        ctxLevel.setName(name);
        levelTypeChanged = !ctxLevel.getType().equals(type);
    } else {
        ctxLevel = new CtxLevel(repository, depth);
        ctxLevel.setName(name);
    }

    // when do the property contexts need to be re-written ?
    // - if a Level type changes to Group
    // - if a Level type changes from Group
    // - if level name has changed

    updatePropertyContextStrings |= levelTypeChanged
            && (CtxLevel.LevelType.Group.equals(type) || CtxLevel.LevelType.Group.equals(ctxLevel.getType()));

    // Check access to the edited level
    AccessRuleWrapper accessRuleWrapper = null;
    boolean accessControlled = repository.isAccessControlEnabled();
    if (accessControlled) {
        accessRuleWrapper = repository.getRulesWrapper(user);

        if (!isLevelModificationAllowed(accessRuleWrapper, accessControlled, ctxLevel)) {
            throw new ConfigException(Error.Code.LEVEL_EDITING_ACCESS_DENIED, ctxLevel.toJson());
        }
    }

    // Collect all level assignments;
    // Check write access to them
    // Make sure they are assignable to this level
    Set<CtxLevel> assignments = new HashSet<>();
    if (null != assignmentIds) {
        for (Long lid : assignmentIds) {
            CtxLevel l = getLevel(lid, repository);
            if (null == l) {
                throw new ConfigException(Error.Code.NOT_FOUND);
            }

            // ToDo, this should be a better check - not just that type is same
            if (l.getType().equals(type)) {
                throw new ConfigException(Error.Code.GROUP_TO_GROUP_ASSIGNMENT, l.toJson());
            }

            if (!isLevelModificationAllowed(accessRuleWrapper, accessControlled, l)) {
                throw new ConfigException(Error.Code.LEVEL_EDITING_ACCESS_DENIED, l.toJson());
            }

            assignments.add(l);
        }
    }

    // If this is a new level, just save and return
    if (isNew) {
        ctxLevel.setType(type);
        if (CtxLevel.LevelType.Group == type) {
            ctxLevel.setMembers(assignments);
        } else if (CtxLevel.LevelType.Member == type) {
            assignments.forEach(group -> group.addMember(ctxLevel));
        }

        // ToDo: should we save assignments

        saveOrUpdateAudited(user, repository, ctxLevel);
        return ctxLevel;
    }

    // This is an edited level, so...

    // Type of the level has not changed.
    if (!levelTypeChanged) {

        if (CtxLevel.LevelType.Group == type) {
            ctxLevel.setMembers(assignments);
        } else if (CtxLevel.LevelType.Member == type) {
            Set<CtxLevel> currentGroups = ctxLevel.getGroups();

            if (null != currentGroups) {
                // get groups that should not longer have level as a member
                currentGroups.removeAll(assignments);
                currentGroups.forEach(group -> group.removeMember(ctxLevel));
            }

            // refresh assignments
            assignments.forEach(group -> group.addMember(ctxLevel));
        } else if (CtxLevel.LevelType.Standalone == type) {
            ctxLevel.setMembers(null);
        }

        // ToDo: should we save assignments

        saveOrUpdateAudited(user, repository, ctxLevel);

        if (updatePropertyContextStrings) {
            if (null != ctxLevel.getProperties()) {
                ctxLevel.getProperties().forEach(Property::updateContextString);
            }

            if (null != ctxLevel.getFiles()) {
                ctxLevel.getFiles().forEach(RepoFile::updateContextString);
            }
        }

        return ctxLevel;
    }

    // Level type has changed
    switch (ctxLevel.getType()) {
    case Group: {
        ctxLevel.setMembers(null);
        if (CtxLevel.LevelType.Member == type) {
            assignments.forEach(group -> group.addMember(ctxLevel));
        }

        break;
    }

    case Member: {
        Set<CtxLevel> currentGroups = ctxLevel.getGroups();
        if (null != currentGroups) {
            currentGroups.forEach(group -> group.removeMember(ctxLevel));
        }

        if (CtxLevel.LevelType.Group == type) {
            ctxLevel.setMembers(assignments);
        }

        break;
    }

    case Standalone: {
        if (CtxLevel.LevelType.Group == type) {
            ctxLevel.setMembers(assignments);
        }

        else if (CtxLevel.LevelType.Member == type) {
            assignments.forEach(group -> group.addMember(ctxLevel));
        }

        break;
    }
    }

    ctxLevel.setType(type);
    if (updatePropertyContextStrings) {
        if (null != ctxLevel.getProperties()) {
            ctxLevel.getProperties().forEach(Property::updateContextString);
        }
        if (null != ctxLevel.getFiles()) {
            ctxLevel.getFiles().forEach(RepoFile::updateContextString);
        }
    }

    saveOrUpdateAudited(user, repository, ctxLevel);

    return ctxLevel;
}

From source file:com.google.code.facebook.graph.sna.applet.VertexCollapseDemo.java

public VertexCollapseDemo() {

    // create a simple graph for the demo
    graph = TestGraphs.getOneComponentGraph();
    collapser = new GraphCollapser(graph);

    layout = new FRLayout(graph);

    Dimension preferredSize = new Dimension(400, 400);
    final VisualizationModel visualizationModel = new DefaultVisualizationModel(layout, preferredSize);
    vv = new VisualizationViewer(visualizationModel, preferredSize);

    vv.getRenderContext().setVertexShapeTransformer(new ClusterVertexShapeFunction());

    final PredicatedParallelEdgeIndexFunction eif = PredicatedParallelEdgeIndexFunction.getInstance();
    final Set exclusions = new HashSet();
    eif.setPredicate(new Predicate() {

        public boolean evaluate(Object e) {

            return exclusions.contains(e);
        }/*from   w  w w  . j a v a2 s  .c om*/
    });

    vv.getRenderContext().setParallelEdgeIndexFunction(eif);

    vv.setBackground(Color.white);

    // add a listener for ToolTips
    vv.setVertexToolTipTransformer(new ToStringLabeller() {

        /* (non-Javadoc)
         * @see edu.uci.ics.jung.visualization.decorators.DefaultToolTipFunction#getToolTipText(java.lang.Object)
         */
        @Override
        public String transform(Object v) {
            if (v instanceof Graph) {
                return ((Graph) v).getVertices().toString();
            }
            return super.transform(v);
        }
    });

    /**
     * the regular graph mouse for the normal view
     */
    final DefaultModalGraphMouse graphMouse = new DefaultModalGraphMouse();

    vv.setGraphMouse(graphMouse);

    Container content = getContentPane();
    GraphZoomScrollPane gzsp = new GraphZoomScrollPane(vv);
    content.add(gzsp);

    JComboBox modeBox = graphMouse.getModeComboBox();
    modeBox.addItemListener(graphMouse.getModeListener());
    graphMouse.setMode(ModalGraphMouse.Mode.PICKING);

    final ScalingControl scaler = new CrossoverScalingControl();

    JButton plus = new JButton("+");
    plus.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            scaler.scale(vv, 1.1f, vv.getCenter());
        }
    });
    JButton minus = new JButton("-");
    minus.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            scaler.scale(vv, 1 / 1.1f, vv.getCenter());
        }
    });

    JButton collapse = new JButton("Collapse");
    collapse.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = new HashSet(vv.getPickedVertexState().getPicked());
            if (picked.size() > 1) {
                Graph inGraph = layout.getGraph();
                Graph clusterGraph = collapser.getClusterGraph(inGraph, picked);

                Graph g = collapser.collapse(layout.getGraph(), clusterGraph);
                double sumx = 0;
                double sumy = 0;
                for (Object v : picked) {
                    Point2D p = (Point2D) layout.transform(v);
                    sumx += p.getX();
                    sumy += p.getY();
                }
                Point2D cp = new Point2D.Double(sumx / picked.size(), sumy / picked.size());
                vv.getRenderContext().getParallelEdgeIndexFunction().reset();
                layout.setGraph(g);
                layout.setLocation(clusterGraph, cp);
                vv.getPickedVertexState().clear();
                vv.repaint();
            }
        }
    });

    JButton compressEdges = new JButton("Compress Edges");
    compressEdges.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = vv.getPickedVertexState().getPicked();
            if (picked.size() == 2) {
                Pair pair = new Pair(picked);
                Graph graph = layout.getGraph();
                Collection edges = new HashSet(graph.getIncidentEdges(pair.getFirst()));
                edges.retainAll(graph.getIncidentEdges(pair.getSecond()));
                exclusions.addAll(edges);
                vv.repaint();
            }

        }
    });

    JButton expandEdges = new JButton("Expand Edges");
    expandEdges.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = vv.getPickedVertexState().getPicked();
            if (picked.size() == 2) {
                Pair pair = new Pair(picked);
                Graph graph = layout.getGraph();
                Collection edges = new HashSet(graph.getIncidentEdges(pair.getFirst()));
                edges.retainAll(graph.getIncidentEdges(pair.getSecond()));
                exclusions.removeAll(edges);
                vv.repaint();
            }

        }
    });

    JButton expand = new JButton("Expand");
    expand.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = new HashSet(vv.getPickedVertexState().getPicked());
            for (Object v : picked) {
                if (v instanceof Graph) {

                    Graph g = collapser.expand(layout.getGraph(), (Graph) v);
                    vv.getRenderContext().getParallelEdgeIndexFunction().reset();
                    layout.setGraph(g);
                }
                vv.getPickedVertexState().clear();
                vv.repaint();
            }
        }
    });

    JButton reset = new JButton("Reset");
    reset.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            layout.setGraph(graph);
            exclusions.clear();
            vv.repaint();
        }
    });

    JButton help = new JButton("Help");
    help.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            JOptionPane.showMessageDialog((JComponent) e.getSource(), instructions, "Help",
                    JOptionPane.PLAIN_MESSAGE);
        }
    });

    JPanel controls = new JPanel();
    JPanel zoomControls = new JPanel(new GridLayout(2, 1));
    zoomControls.setBorder(BorderFactory.createTitledBorder("Zoom"));
    zoomControls.add(plus);
    zoomControls.add(minus);
    controls.add(zoomControls);
    JPanel collapseControls = new JPanel(new GridLayout(3, 1));
    collapseControls.setBorder(BorderFactory.createTitledBorder("Picked"));
    collapseControls.add(collapse);
    collapseControls.add(expand);
    collapseControls.add(compressEdges);
    collapseControls.add(expandEdges);
    collapseControls.add(reset);
    controls.add(collapseControls);
    controls.add(modeBox);
    controls.add(help);
    content.add(controls, BorderLayout.SOUTH);
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testNavigableKeySet() {
    K[] keys = getSortedKeys();//w  w w. j  a  va2s  .  c  o m
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);

    Set<K> keySet = map.navigableKeySet();
    _assertEquals(keySet, map.navigableKeySet());

    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    _assertEquals(map.navigableKeySet(), keySet);
    _assertEquals(keySet, keySet);

    try {
        keySet.add(keys[3]);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }
    try {
        keySet.add(null);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }
    try {
        keySet.addAll(null);
        fail("should throw NullPointerException");
    } catch (NullPointerException expected) {
    }
    Collection<K> collection = new ArrayList<K>();
    keySet.addAll(collection);
    try {
        collection.add(keys[3]);
        keySet.addAll(collection);
        fail("should throw UnsupportedOperationException");
    } catch (UnsupportedOperationException expected) {
    }

    Iterator<K> iter = keySet.iterator();
    iter.next();
    iter.remove();
    assertFalse(map.containsKey(keys[0]));

    collection = new ArrayList<K>();
    collection.add(keys[2]);
    keySet.retainAll(collection);
    assertEquals(1, map.size());
    assertTrue(keySet.contains(keys[2]));

    keySet.removeAll(collection);
    _assertEmpty(map);

    map.put(keys[0], values[0]);
    assertEquals(1, map.size());
    assertTrue(keySet.contains(keys[0]));

    keySet.clear();
    _assertEmpty(map);
}

From source file:com.google.code.facebook.graph.sna.applet.VertexCollapseDemoWithLayouts.java

public VertexCollapseDemoWithLayouts() {

    // create a simple graph for the demo
    graph = TestGraphs.getOneComponentGraph();
    collapsedGraph = graph;//from w  w  w  .  j a va 2 s .c o m
    collapser = new GraphCollapser(graph);

    layout = new FRLayout(graph);

    Dimension preferredSize = new Dimension(400, 400);
    final VisualizationModel visualizationModel = new DefaultVisualizationModel(layout, preferredSize);
    vv = new VisualizationViewer(visualizationModel, preferredSize);

    vv.getRenderContext().setVertexShapeTransformer(new ClusterVertexShapeFunction());

    final PredicatedParallelEdgeIndexFunction eif = PredicatedParallelEdgeIndexFunction.getInstance();
    final Set exclusions = new HashSet();
    eif.setPredicate(new Predicate() {

        public boolean evaluate(Object e) {

            return exclusions.contains(e);
        }
    });

    vv.getRenderContext().setParallelEdgeIndexFunction(eif);

    vv.setBackground(Color.white);

    // add a listener for ToolTips
    vv.setVertexToolTipTransformer(new ToStringLabeller() {

        /* (non-Javadoc)
         * @see edu.uci.ics.jung.visualization.decorators.DefaultToolTipFunction#getToolTipText(java.lang.Object)
         */
        @Override
        public String transform(Object v) {
            if (v instanceof Graph) {
                return ((Graph) v).getVertices().toString();
            }
            return super.transform(v);
        }
    });

    /**
     * the regular graph mouse for the normal view
     */
    final DefaultModalGraphMouse graphMouse = new DefaultModalGraphMouse();

    vv.setGraphMouse(graphMouse);

    Container content = getContentPane();
    GraphZoomScrollPane gzsp = new GraphZoomScrollPane(vv);
    content.add(gzsp);

    JComboBox modeBox = graphMouse.getModeComboBox();
    modeBox.addItemListener(graphMouse.getModeListener());
    graphMouse.setMode(ModalGraphMouse.Mode.PICKING);

    final ScalingControl scaler = new CrossoverScalingControl();

    JButton plus = new JButton("+");
    plus.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            scaler.scale(vv, 1.1f, vv.getCenter());
        }
    });
    JButton minus = new JButton("-");
    minus.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            scaler.scale(vv, 1 / 1.1f, vv.getCenter());
        }
    });

    JButton collapse = new JButton("Collapse");
    collapse.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = new HashSet(vv.getPickedVertexState().getPicked());
            if (picked.size() > 1) {
                Graph inGraph = layout.getGraph();
                Graph clusterGraph = collapser.getClusterGraph(inGraph, picked);

                Graph g = collapser.collapse(layout.getGraph(), clusterGraph);
                collapsedGraph = g;
                double sumx = 0;
                double sumy = 0;
                for (Object v : picked) {
                    Point2D p = (Point2D) layout.transform(v);
                    sumx += p.getX();
                    sumy += p.getY();
                }
                Point2D cp = new Point2D.Double(sumx / picked.size(), sumy / picked.size());
                vv.getRenderContext().getParallelEdgeIndexFunction().reset();
                layout.setGraph(g);
                layout.setLocation(clusterGraph, cp);
                vv.getPickedVertexState().clear();
                vv.repaint();
            }
        }
    });

    JButton compressEdges = new JButton("Compress Edges");
    compressEdges.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = vv.getPickedVertexState().getPicked();
            if (picked.size() == 2) {
                Pair pair = new Pair(picked);
                Graph graph = layout.getGraph();
                Collection edges = new HashSet(graph.getIncidentEdges(pair.getFirst()));
                edges.retainAll(graph.getIncidentEdges(pair.getSecond()));
                exclusions.addAll(edges);
                vv.repaint();
            }

        }
    });

    JButton expandEdges = new JButton("Expand Edges");
    expandEdges.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = vv.getPickedVertexState().getPicked();
            if (picked.size() == 2) {
                Pair pair = new Pair(picked);
                Graph graph = layout.getGraph();
                Collection edges = new HashSet(graph.getIncidentEdges(pair.getFirst()));
                edges.retainAll(graph.getIncidentEdges(pair.getSecond()));
                exclusions.removeAll(edges);
                vv.repaint();
            }

        }
    });

    JButton expand = new JButton("Expand");
    expand.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            Collection picked = new HashSet(vv.getPickedVertexState().getPicked());
            for (Object v : picked) {
                if (v instanceof Graph) {

                    Graph g = collapser.expand(layout.getGraph(), (Graph) v);
                    vv.getRenderContext().getParallelEdgeIndexFunction().reset();
                    layout.setGraph(g);
                }
                vv.getPickedVertexState().clear();
                vv.repaint();
            }
        }
    });

    JButton reset = new JButton("Reset");
    reset.addActionListener(new ActionListener() {

        public void actionPerformed(ActionEvent e) {
            layout.setGraph(graph);
            exclusions.clear();
            vv.repaint();
        }
    });

    JButton help = new JButton("Help");
    help.addActionListener(new ActionListener() {
        public void actionPerformed(ActionEvent e) {
            JOptionPane.showMessageDialog((JComponent) e.getSource(), instructions, "Help",
                    JOptionPane.PLAIN_MESSAGE);
        }
    });
    Class[] combos = getCombos();
    final JComboBox jcb = new JComboBox(combos);
    // use a renderer to shorten the layout name presentation
    jcb.setRenderer(new DefaultListCellRenderer() {
        public Component getListCellRendererComponent(JList list, Object value, int index, boolean isSelected,
                boolean cellHasFocus) {
            String valueString = value.toString();
            valueString = valueString.substring(valueString.lastIndexOf('.') + 1);
            return super.getListCellRendererComponent(list, valueString, index, isSelected, cellHasFocus);
        }
    });
    jcb.addActionListener(new LayoutChooser(jcb, vv));
    jcb.setSelectedItem(FRLayout.class);

    JPanel controls = new JPanel();
    JPanel zoomControls = new JPanel(new GridLayout(2, 1));
    zoomControls.setBorder(BorderFactory.createTitledBorder("Zoom"));
    zoomControls.add(plus);
    zoomControls.add(minus);
    controls.add(zoomControls);
    JPanel collapseControls = new JPanel(new GridLayout(3, 1));
    collapseControls.setBorder(BorderFactory.createTitledBorder("Picked"));
    collapseControls.add(collapse);
    collapseControls.add(expand);
    collapseControls.add(compressEdges);
    collapseControls.add(expandEdges);
    collapseControls.add(reset);
    controls.add(collapseControls);
    controls.add(modeBox);
    controls.add(help);
    controls.add(jcb);
    content.add(controls, BorderLayout.SOUTH);
}

From source file:android.content.pm.PackageParser.java

private boolean parseKeySets(Package owner, Resources res, XmlPullParser parser, AttributeSet attrs,
        String[] outError) throws XmlPullParserException, IOException {
    // we've encountered the 'key-sets' tag
    // all the keys and keysets that we want must be defined here
    // so we're going to iterate over the parser and pull out the things we want
    int outerDepth = parser.getDepth();
    int currentKeySetDepth = -1;
    int type;/* ww  w  .  jav  a 2 s .  co m*/
    String currentKeySet = null;
    ArrayMap<String, PublicKey> publicKeys = new ArrayMap<String, PublicKey>();
    ArraySet<String> upgradeKeySets = new ArraySet<String>();
    ArrayMap<String, ArraySet<String>> definedKeySets = new ArrayMap<String, ArraySet<String>>();
    ArraySet<String> improperKeySets = new ArraySet<String>();
    while ((type = parser.next()) != XmlPullParser.END_DOCUMENT
            && (type != XmlPullParser.END_TAG || parser.getDepth() > outerDepth)) {
        if (type == XmlPullParser.END_TAG) {
            if (parser.getDepth() == currentKeySetDepth) {
                currentKeySet = null;
                currentKeySetDepth = -1;
            }
            continue;
        }
        String tagName = parser.getName();
        if (tagName.equals("key-set")) {
            if (currentKeySet != null) {
                outError[0] = "Improperly nested 'key-set' tag at " + parser.getPositionDescription();
                mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
                return false;
            }
            final TypedArray sa = res.obtainAttributes(attrs,
                    com.android.internal.R.styleable.AndroidManifestKeySet);
            final String keysetName = sa
                    .getNonResourceString(com.android.internal.R.styleable.AndroidManifestKeySet_name);
            definedKeySets.put(keysetName, new ArraySet<String>());
            currentKeySet = keysetName;
            currentKeySetDepth = parser.getDepth();
            sa.recycle();
        } else if (tagName.equals("public-key")) {
            if (currentKeySet == null) {
                outError[0] = "Improperly nested 'key-set' tag at " + parser.getPositionDescription();
                mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
                return false;
            }
            final TypedArray sa = res.obtainAttributes(attrs,
                    com.android.internal.R.styleable.AndroidManifestPublicKey);
            final String publicKeyName = sa
                    .getNonResourceString(com.android.internal.R.styleable.AndroidManifestPublicKey_name);
            final String encodedKey = sa
                    .getNonResourceString(com.android.internal.R.styleable.AndroidManifestPublicKey_value);
            if (encodedKey == null && publicKeys.get(publicKeyName) == null) {
                outError[0] = "'public-key' " + publicKeyName + " must define a public-key value"
                        + " on first use at " + parser.getPositionDescription();
                mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
                sa.recycle();
                return false;
            } else if (encodedKey != null) {
                PublicKey currentKey = parsePublicKey(encodedKey);
                if (currentKey == null) {
                    Slog.w(TAG,
                            "No recognized valid key in 'public-key' tag at " + parser.getPositionDescription()
                                    + " key-set " + currentKeySet
                                    + " will not be added to the package's defined key-sets.");
                    sa.recycle();
                    improperKeySets.add(currentKeySet);
                    XmlUtils.skipCurrentTag(parser);
                    continue;
                }
                if (publicKeys.get(publicKeyName) == null || publicKeys.get(publicKeyName).equals(currentKey)) {

                    /* public-key first definition, or matches old definition */
                    publicKeys.put(publicKeyName, currentKey);
                } else {
                    outError[0] = "Value of 'public-key' " + publicKeyName
                            + " conflicts with previously defined value at " + parser.getPositionDescription();
                    mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
                    sa.recycle();
                    return false;
                }
            }
            definedKeySets.get(currentKeySet).add(publicKeyName);
            sa.recycle();
            XmlUtils.skipCurrentTag(parser);
        } else if (tagName.equals("upgrade-key-set")) {
            final TypedArray sa = res.obtainAttributes(attrs,
                    com.android.internal.R.styleable.AndroidManifestUpgradeKeySet);
            String name = sa
                    .getNonResourceString(com.android.internal.R.styleable.AndroidManifestUpgradeKeySet_name);
            upgradeKeySets.add(name);
            sa.recycle();
            XmlUtils.skipCurrentTag(parser);
        } else if (RIGID_PARSER) {
            outError[0] = "Bad element under <key-sets>: " + parser.getName() + " at " + mArchiveSourcePath
                    + " " + parser.getPositionDescription();
            mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
            return false;
        } else {
            Slog.w(TAG, "Unknown element under <key-sets>: " + parser.getName() + " at " + mArchiveSourcePath
                    + " " + parser.getPositionDescription());
            XmlUtils.skipCurrentTag(parser);
            continue;
        }
    }
    Set<String> publicKeyNames = publicKeys.keySet();
    if (publicKeyNames.removeAll(definedKeySets.keySet())) {
        outError[0] = "Package" + owner.packageName + " AndroidManifext.xml "
                + "'key-set' and 'public-key' names must be distinct.";
        mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
        return false;
    }
    owner.mKeySetMapping = new ArrayMap<String, ArraySet<PublicKey>>();
    for (ArrayMap.Entry<String, ArraySet<String>> e : definedKeySets.entrySet()) {
        final String keySetName = e.getKey();
        if (e.getValue().size() == 0) {
            Slog.w(TAG,
                    "Package" + owner.packageName + " AndroidManifext.xml " + "'key-set' " + keySetName
                            + " has no valid associated 'public-key'."
                            + " Not including in package's defined key-sets.");
            continue;
        } else if (improperKeySets.contains(keySetName)) {
            Slog.w(TAG,
                    "Package" + owner.packageName + " AndroidManifext.xml " + "'key-set' " + keySetName
                            + " contained improper 'public-key'"
                            + " tags. Not including in package's defined key-sets.");
            continue;
        }
        owner.mKeySetMapping.put(keySetName, new ArraySet<PublicKey>());
        for (String s : e.getValue()) {
            owner.mKeySetMapping.get(keySetName).add(publicKeys.get(s));
        }
    }
    if (owner.mKeySetMapping.keySet().containsAll(upgradeKeySets)) {
        owner.mUpgradeKeySets = upgradeKeySets;
    } else {
        outError[0] = "Package" + owner.packageName + " AndroidManifext.xml "
                + "does not define all 'upgrade-key-set's .";
        mParseError = PackageManager.INSTALL_PARSE_FAILED_MANIFEST_MALFORMED;
        return false;
    }
    return true;
}

From source file:com.ephesoft.dcma.webservice.EphesoftWebServiceAPI.java

/**
 * To restart Batch Instance./*from w  ww .jav  a  2 s  . co m*/
 * @param identifier {@link String}
 * @param moduleName {@link String}
 * @param resp {@link HttpServletResponse}
 * @param req {@link HttpServletRequest}
 * @return {@link String}
 */
@RequestMapping(value = "/restartBatchInstance/{batchInstanceIdentifier}/{restartAtModuleName}", method = RequestMethod.GET)
@ResponseBody
public String restartBatchInstance(@PathVariable("batchInstanceIdentifier") final String identifier,
        @PathVariable("restartAtModuleName") String moduleName, final HttpServletResponse resp,
        final HttpServletRequest req) {
    LOGGER.info("Start processing web service for restart batch instance");
    boolean isSuccess = false;
    String moduleNameLocal = moduleName;
    Set<String> loggedInUserRole = getUserRoles(req);
    String respStr = WebServiceUtil.EMPTY_STRING;
    if (identifier != null && !identifier.isEmpty()) {
        LOGGER.info("Start processing of restarting batch for batch instance:" + identifier);
        BatchInstance batchInstance = biService.getBatchInstanceByIdentifier(identifier);
        // only batch instance with these status can be restarted
        if (batchInstance != null && (batchInstance.getStatus().equals(BatchInstanceStatus.ERROR)
                || batchInstance.getStatus().equals(BatchInstanceStatus.READY_FOR_REVIEW)
                || batchInstance.getStatus().equals(BatchInstanceStatus.READY_FOR_VALIDATION)
                || batchInstance.getStatus().equals(BatchInstanceStatus.RUNNING))) {
            LOGGER.info("Batch is in the valid state to restart.Restarting batch instance:" + batchInstance);

            Set<String> batchInstanceRoles = biService.getRolesForBatchInstance(batchInstance);
            if (isSuperAdmin(req) || batchInstanceRoles.removeAll(loggedInUserRole)) {
                LOGGER.info("User is authorized to perform operation on the batch instance:" + identifier);
                final String batchClassIdentifier = biService.getBatchClassIdentifier(identifier);
                String executedBatchInstanceModules = batchInstance.getExecutedModules();
                if (executedBatchInstanceModules != null) {
                    String[] executedModulesArray = executedBatchInstanceModules.split(";");
                    if (batchClassIdentifier != null) {
                        LOGGER.info(
                                "Restarting the batch instance for the  batch class:" + batchClassIdentifier);
                        final BatchClassModule batchClassModuleItem = bcModuleService
                                .getBatchClassModuleByWorkflowName(batchClassIdentifier, moduleNameLocal);
                        if (batchClassModuleItem != null) {
                            for (String executedModule : executedModulesArray) {
                                if (executedModule.equalsIgnoreCase(
                                        String.valueOf(batchClassModuleItem.getModule().getId()))) {
                                    isSuccess = true;
                                    break;
                                }
                            }
                        }
                    }
                } else {
                    isSuccess = true;
                    List<BatchClassModule> batchClassModuleList = batchInstance.getBatchClass()
                            .getBatchClassModules();
                    moduleNameLocal = batchClassModuleList.get(0).getWorkflowName();
                    LOGGER.info("Restarting the batch from first module." + moduleNameLocal
                            + " as the executed module list is empty.");
                }
                final boolean isZipSwitchOn = bsService.isZipSwitchOn();
                LOGGER.info("Zipped Batch XML switch is:" + isZipSwitchOn);

                final String activeModule = workflowService.getActiveModule(batchInstance);
                LOGGER.info("The activeModule of batch:" + activeModule);
                if (isSuccess) {
                    LOGGER.info("All parameters for restarting the batch are valid.");
                    respStr = processRestartingBatchInternal(identifier, moduleNameLocal, respStr,
                            batchInstance, batchClassIdentifier, isZipSwitchOn, activeModule);
                } else {
                    isSuccess = false;
                    List<BatchClassModule> batchClassModules = bcModuleService
                            .getAllBatchClassModulesByIdentifier(batchClassIdentifier);
                    String[] executedModulesArray = executedBatchInstanceModules.split(";");
                    Set<String> executedWorkflows = new HashSet<String>();
                    for (String executedModuleId : executedModulesArray) {
                        for (BatchClassModule batchClassModule : batchClassModules) {
                            if (batchClassModule != null && executedModuleId
                                    .equalsIgnoreCase(String.valueOf(batchClassModule.getModule().getId()))) {
                                executedWorkflows.add(batchClassModule.getWorkflowName());
                                break;
                            }
                        }
                    }
                    respStr = "Invalid parameter for restarting batch instance. Batch is being restarted from a module:"
                            + moduleNameLocal
                            + " that may not yet be executed or is non existent. Please select the valid module name for restart from the following :"
                            + executedWorkflows.toString();
                    LOGGER.error(SERVER_ERROR_MSG + respStr);
                }
            } else {
                respStr = "User is not authorized to perform operation on this batch instance." + identifier;
                LOGGER.error(SERVER_ERROR_MSG + respStr);
            }
        } else {
            respStr = "Either Batch instance does not exist with batch instance identifier " + identifier
                    + " or batch exists with incorrect status to be restarted. Batch instance should be of status:-"
                    + "ERROR, READY_FOR_REVIEW, READY_FOR_VALIDATION, RUNNING";
            isSuccess = false;
        }
    }
    if (!respStr.isEmpty()) {
        try {
            resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, respStr);
            LOGGER.error(SERVER_ERROR_MSG + respStr);
        } catch (final IOException ioe) {
            LOGGER.debug(ERROR_WHILE_SENDING_ERROR_RESPONSE_TO_CLIENT + ioe, ioe);
        }
    }
    return (isSuccess ? "Batch restarted successfully from module:" + moduleNameLocal
            : "Failure while restarting batch instance.");
}

From source file:com.ephesoft.dcma.webservice.util.WebServiceHelper.java

/**
 * Delete batch instance and return the Success or the error Message when it tries to process the request and raise an exception
 * when unable to process the request/*  w  ww . j  a  va  2  s .c  o m*/
 * 
 * @param identifier {@link PathVariable} The path variable which is the identifier of the batch instance to be deleted
 * @param loggedInUserRole {@link Set} Set of the roles the user is assigned to.
 * @param isSuperAdmin {@link Boolean} The flag which determines the user is SuperAdmin or not
 * @return the success/error message when process the request
 * @throws ValidationException When could not validate the input provided
 * @throws UnAuthorisedAccessException When the user is not authorized to make the request.
 * @throws InternalServerException When the request could not be processed by the server or any run time exception being generated
 * @throws DCMAApplicationException
 */
public String deleteBatchInstance(final String identifier, final Set<String> loggedInUserRole,
        final boolean isSuperAdmin)
        throws ValidationException, UnAuthorisedAccessException, InternalServerException {
    boolean isDeleted = false;
    if (StringUtils.isNotBlank(identifier)) {
        final BatchInstance batchInstance = batchInstanceService.getBatchInstanceByIdentifier(identifier);
        // Status for which a batch can be deleted:
        if (batchInstance == null) {
            LOGGER.error(
                    "Error response at server: " + WebServiceConstants.INVALID_BATCH_INSTANCE_IDENTIFIER_MESSAGE
                            + getAdditionalInfo(identifier));
            throw new ValidationException(
                    WebServiceConstants.INVALID_BATCH_INSTANCE_IDENTIFIER_MESSAGE
                            + getAdditionalInfo(identifier),
                    createUnprocessableEntityRestError(
                            WebServiceConstants.INVALID_BATCH_INSTANCE_IDENTIFIER_MESSAGE
                                    + getAdditionalInfo(identifier),
                            WebServiceConstants.INVALID_BATCH_INSTANCE_IDENTIFIER_CODE));
        } else if (BatchInstanceStatus.DELETED.equals(batchInstance.getStatus())) {
            LOGGER.error(
                    "Error response at server: " + WebServiceConstants.BATCH_INSTANCE_ALREADY_DELETED_MESSAGE
                            + getAdditionalInfo(identifier));
            throw new ValidationException(
                    WebServiceConstants.BATCH_INSTANCE_ALREADY_DELETED_MESSAGE + getAdditionalInfo(identifier),
                    createUnprocessableEntityRestError(
                            WebServiceConstants.BATCH_INSTANCE_ALREADY_DELETED_MESSAGE
                                    + getAdditionalInfo(identifier),
                            WebServiceConstants.BATCH_INSTANCE_ALREADY_DELETED_CODE));
        } else if (!BatchInstanceStatus.restartableStatusList().contains(batchInstance.getStatus())) {
            LOGGER.error("Error response at server: "
                    + WebServiceConstants.BATCH_INSTANCE_CANNOT_BE_DELETED_MESSAGE);
            throw new ValidationException(
                    WebServiceConstants.BATCH_INSTANCE_CANNOT_BE_DELETED_MESSAGE
                            + getAdditionalInfo(identifier + " " + batchInstance.getStatus()),
                    createUnprocessableEntityRestError(
                            WebServiceConstants.BATCH_INSTANCE_CANNOT_BE_DELETED_MESSAGE
                                    + getAdditionalInfo(identifier + " " + batchInstance.getStatus()),
                            WebServiceConstants.BATCH_INSTANCE_CANNOT_BE_DELETED_CODE));
        } else if (StringUtils.isNotBlank(batchInstance.getCurrentUser())) {
            // the batch is locked by some user, cannot be restarted/deleted
            LOGGER.error("Error response at server: " + WebServiceConstants.BATCH_INSTANCE_LOCKED_MESSAGE
                    + getAdditionalInfo(identifier));
            throw new InternalServerException(
                    WebServiceConstants.BATCH_INSTANCE_LOCKED_MESSAGE + getAdditionalInfo(identifier),
                    createUnprocessableEntityRestError(
                            WebServiceConstants.BATCH_INSTANCE_LOCKED_MESSAGE + getAdditionalInfo(identifier),
                            WebServiceConstants.BATCH_INSTANCE_LOCKED_CODE));
        } else {
            final Set<String> batchInstanceRoles = batchInstanceService.getRolesForBatchInstance(batchInstance);
            if (isSuperAdmin || batchInstanceRoles.removeAll(loggedInUserRole)) {
                LOGGER.info("Deleting the batch instance:" + identifier);

                BatchInstanceThread batchInstanceThread = ThreadPool.getBatchInstanceThreadList(identifier);
                if (batchInstanceThread != null) {
                    batchInstanceThread.remove();
                    try {
                        Thread.sleep(90000);
                    } catch (InterruptedException e) {
                        LOGGER.info("Unable to sleep for 90000 mili seconds.");
                    }
                }
                pluginPropertiesService.clearCache(identifier);
                engineService.deleteProcessInstanceByBatchInstance(batchInstance, true);
                batchInstance.setStatus(BatchInstanceStatus.DELETED);
                batchInstanceService.updateBatchInstance(batchInstance);
                batchInstanceGroupsService.deleteBatchInstanceFromGrps(identifier);
                final File uncFile = new File(batchInstance.getUncSubfolder());
                LOGGER.info("uncFile for the batch instance:" + uncFile);
                if (null != uncFile) {
                    FileUtils.deleteDirectoryAndContentsRecursive(uncFile);
                    LOGGER.info("Deleted the unc folders of batch instance:" + identifier + " successfully.");
                }
                deleteBatchFolder(batchInstance);
                deleteSerFile(batchInstance);
                isDeleted = true;
            } else {
                throw new UnAuthorisedAccessException();
            }
        }
    }
    return isDeleted ? WebServiceConstants.BATCH_DELETED_SUCCESS_MESSAGE + getAdditionalInfo(identifier)
            : WebServiceConstants.BATCH_DELETED_FAILURE_MESSAGE;
}

From source file:com.datatorrent.stram.plan.physical.PhysicalPlanTest.java

/**
 * MxN partitioning. When source and sink of a stream are partitioned, a
 * separate unifier is created container local with each downstream partition.
 *///from w  w w . j  a  v a2 s .  c o m
@Test
public void testMxNPartitioning() {

    LogicalPlan dag = new LogicalPlan();

    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    dag.setAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));
    dag.setAttribute(o1, OperatorContext.STATS_LISTENERS,
            Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
    OperatorMeta o1Meta = dag.getMeta(o1);

    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.setAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
    dag.setAttribute(o2, OperatorContext.STATS_LISTENERS,
            Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    OperatorMeta o2Meta = dag.getMeta(o2);

    dag.addStream("o1.outport1", o1.outport, o2.inport1);

    int maxContainers = 10;
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);

    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);

    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 5, plan.getContainers().size());

    List<PTOperator> inputOperators = new ArrayList<PTOperator>();
    for (int i = 0; i < 2; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o1Meta.getName(),
                container.getOperators().get(0).getOperatorMeta().getName());
        inputOperators.add(container.getOperators().get(0));
    }

    for (int i = 2; i < 5; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 2, container.getOperators().size());
        Assert.assertEquals("operators " + container, o2Meta.getName(),
                container.getOperators().get(0).getOperatorMeta().getName());
        Set<String> expectedLogicalNames = Sets
                .newHashSet(o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), o2Meta.getName());
        Map<String, PTOperator> actualOperators = new HashMap<String, PTOperator>();
        for (PTOperator p : container.getOperators()) {
            actualOperators.put(p.getOperatorMeta().getName(), p);
        }
        Assert.assertEquals("", expectedLogicalNames, actualOperators.keySet());

        PTOperator pUnifier = actualOperators.get(o1Meta.getMeta(o1.outport).getUnifierMeta().getName());
        Assert.assertNotNull("" + pUnifier, pUnifier.getContainer());
        Assert.assertTrue("" + pUnifier, pUnifier.isUnifier());
        // input from each upstream partition
        Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
        int numberPartitionKeys = (i == 2) ? 2 : 1;
        for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
            PTInput input = pUnifier.getInputs().get(inputIndex);
            Assert.assertEquals("" + pUnifier, "outport", input.source.portName);
            Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
            Assert.assertEquals("partition keys " + input.partitions, numberPartitionKeys,
                    input.partitions.partitions.size());
        }
        // output to single downstream partition
        Assert.assertEquals("" + pUnifier, 1, pUnifier.getOutputs().size());
        Assert.assertTrue("" + actualOperators.get(o2Meta.getName()).getOperatorMeta().getOperator(),
                actualOperators.get(o2Meta.getName()).getOperatorMeta()
                        .getOperator() instanceof GenericTestOperator);

        PTOperator p = actualOperators.get(o2Meta.getName());
        Assert.assertEquals("partition inputs " + p.getInputs(), 1, p.getInputs().size());
        Assert.assertEquals("partition inputs " + p.getInputs(), pUnifier, p.getInputs().get(0).source.source);
        Assert.assertEquals("input partition keys " + p.getInputs(), null, p.getInputs().get(0).partitions);
        Assert.assertTrue("partitioned unifier container local " + p.getInputs().get(0).source,
                p.getInputs().get(0).source.isDownStreamInline());
    }

    // Test Dynamic change
    // for M x N partition
    // scale down N from 3 to 2 and then from 2 to 1
    for (int i = 0; i < 2; i++) {
        List<PTOperator> ptos = plan.getOperators(o2Meta);
        Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
        for (PTOperator ptOperator : ptos) {
            expUndeploy.addAll(ptOperator.upstreamMerge.values());
            PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
            plan.onStatusUpdate(ptOperator);
        }
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        // Either unifiers for each partition or single unifier for single partition is expected to be deployed
        expDeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.addAll(ptOperator.upstreamMerge.values());
        }
        // from 3 to 2 the containers decrease from 5 to 4, but from 2 to 1 the container remains same because single unifier are not inline with single operator partition
        Assert.assertEquals("number of containers", 4, plan.getContainers().size());
        Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
        Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
    }

    // scale up N from 1 to 2 and then from 2 to 3
    for (int i = 0; i < 2; i++) {

        List<PTOperator> unChangedOps = new LinkedList<PTOperator>(plan.getOperators(o2Meta));
        PTOperator o2p1 = unChangedOps.remove(0);
        Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
        // Either single unifier for one partition or merged unifiers for each partition is expected to be undeployed
        expUndeploy.addAll(plan.getMergeOperators(o1Meta));
        expUndeploy.addAll(o2p1.upstreamMerge.values());
        List<PTOperator> nOps = new LinkedList<PTOperator>();
        for (Iterator<PTOperator> iterator = unChangedOps.iterator(); iterator.hasNext();) {
            PTOperator ptOperator = iterator.next();
            nOps.addAll(ptOperator.upstreamMerge.values());
        }
        unChangedOps.addAll(nOps);

        PartitioningTest.PartitionLoadWatch.put(o2p1, 1);

        plan.onStatusUpdate(o2p1);
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();

        Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
        Assert.assertTrue("no unifiers", plan.getMergeOperators(o1Meta).isEmpty());

        for (PTOperator o : plan.getOperators(o2Meta)) {
            Assert.assertNotNull(o.container);
            PTOperator unifier = o.upstreamMerge.values().iterator().next();
            Assert.assertNotNull(unifier.container);
            Assert.assertSame("unifier in same container", o.container, unifier.container);
            Assert.assertEquals("container operators " + o.container,
                    Sets.newHashSet(o.container.getOperators()), Sets.newHashSet(o, unifier));
        }
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.addAll(ptOperator.upstreamMerge.values());
        }
        expDeploy.removeAll(unChangedOps);
        Assert.assertEquals("number of containers", 4 + i, plan.getContainers().size());
        Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);

    }

    // scale down M to 1
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.addAll(o2p.upstreamMerge.values());
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
        }

        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, -1);
            plan.onStatusUpdate(o1p);
        }

        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();

        Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
        expUndeploy.removeAll(plan.getOperators(o1Meta));

        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            Assert.assertTrue("merge unifier " + o2p + " " + o2p.upstreamMerge, o2p.upstreamMerge.isEmpty());
        }

        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }

    // scale up M to 2
    Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, 1);
            plan.onStatusUpdate(o1p);
        }

        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();

        Assert.assertEquals("M partitions after scale up " + o1Meta, 2, plan.getOperators(o1Meta).size());
        expDeploy.addAll(plan.getOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
            Assert.assertEquals("merge unifier " + o2p + " " + o2p.upstreamMerge, 1, o2p.upstreamMerge.size());
            expDeploy.addAll(o2p.upstreamMerge.values());
        }
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }

}

From source file:com.ephesoft.dcma.webservice.EphesoftWebServiceAPI.java

/**
 * To add User Roles to Batch Instance.//from   ww w  .  j av a 2  s. c  o  m
 * @param identifier {@link String}
 * @param userRole {@link String}
 * @param resp {@link HttpServletResponse}
 * @param req {@link HttpServletRequest}
 * @return {@link String}
 * @throws IOException in case of error
 */
@RequestMapping(value = "/addUserRolesToBatchInstance/{batchInstanceIdentifier}/{userRole}", method = RequestMethod.GET)
@ResponseBody
public String addUserRolesToBatchInstance(@PathVariable("batchInstanceIdentifier") final String identifier,
        @PathVariable("userRole") final String userRole, final HttpServletResponse resp,
        final HttpServletRequest req) throws IOException {
    LOGGER.info("Start processing web service for adding user roles to batch instance identifier");

    String respStr = WebServiceUtil.EMPTY_STRING;
    boolean isSuccess = false;
    if (identifier != null && !identifier.isEmpty()) {
        final BatchInstance batchInstance = biService.getBatchInstanceByIdentifier(identifier);
        Set<String> allRoles = userConnectivityService.getAllGroups();
        if (allRoles != null && allRoles.contains(userRole)) {
            try {
                if (batchInstance != null) {
                    Set<String> batchInstanceRoles = biService.getRolesForBatchInstance(batchInstance);
                    Set<String> loggedInUserRoles = getUserRoles(req);
                    if (isSuperAdmin(req) || batchInstanceRoles.removeAll(loggedInUserRoles)) {
                        batchInstanceGroupsService.addUserRolesToBatchInstanceIdentifier(identifier, userRole);
                        resp.setStatus(HttpServletResponse.SC_OK);
                        isSuccess = true;
                    } else {
                        respStr = "User is not authorized to perform operation on this batch instance."
                                + identifier;
                        LOGGER.error(SERVER_ERROR_MSG + respStr);
                    }
                } else {
                    respStr = "Batch instance does not exist with batch instance identifier " + identifier;
                    LOGGER.error(SERVER_ERROR_MSG + respStr);
                }
            } catch (final Exception e) {
                respStr = "Error in adding roles to batch instance identifier: " + identifier + "."
                        + e.getMessage();
                LOGGER.error(SERVER_ERROR_MSG + respStr);
            }
        } else {
            respStr = "Invalid role provided. This is not amongst the list of valid roles.";
        }
    }
    if (!respStr.isEmpty()) {
        try {
            resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, respStr);
            LOGGER.error(SERVER_ERROR_MSG + respStr);
        } catch (final IOException ioe) {
            LOGGER.info(ERROR_WHILE_SENDING_ERROR_RESPONSE_TO_CLIENT + ioe, ioe);
        }
    }
    return (isSuccess ? "User role is added successfully to batch instance."
            : "Failure while adding roles to batch instance: " + identifier + ".");
}