Example usage for java.util Set remove

List of usage examples for java.util Set remove

Introduction

In this page you can find the example usage for java.util Set remove.

Prototype

boolean remove(Object o);

Source Link

Document

Removes the specified element from this set if it is present (optional operation).

Usage

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaPlaceDao.java

/**
 * Populate the droplet places table./*from w w  w . j  a  v  a 2  s.c o  m*/
 * 
 * @param drops
 */
private void insertDropletPlaces(List<Drop> drops) {

    // List of drop IDs in the drops list
    List<Long> dropIds = new ArrayList<Long>();
    // List of places in a drop
    Map<Long, Set<Long>> dropletPlacesMap = new HashMap<Long, Set<Long>>();
    for (Drop drop : drops) {

        if (drop.getPlaces() == null)
            continue;

        dropIds.add(drop.getId());

        for (Place place : drop.getPlaces()) {
            Set<Long> places = null;
            if (dropletPlacesMap.containsKey(drop.getId())) {
                places = dropletPlacesMap.get(drop.getId());
            } else {
                places = new HashSet<Long>();
                dropletPlacesMap.put(drop.getId(), places);
            }

            places.add(place.getId());
        }
    }

    // Find droplet places that already exist in the db
    String sql = "SELECT droplet_id, place_id FROM droplets_places WHERE droplet_id in (:ids)";

    MapSqlParameterSource params = new MapSqlParameterSource();
    params.addValue("ids", dropIds);

    List<Map<String, Object>> results = this.namedJdbcTemplate.queryForList(sql, params);

    // Remove already existing droplet_places from our Set
    for (Map<String, Object> result : results) {
        long dropletId = ((Number) result.get("droplet_id")).longValue();
        long placeId = ((Number) result.get("place_id")).longValue();

        Set<Long> placeSet = dropletPlacesMap.get(dropletId);
        if (placeSet != null) {
            placeSet.remove(placeId);
        }
    }

    // Insert the remaining items in the set into the db
    sql = "INSERT INTO droplets_places (droplet_id, place_id) VALUES (?,?)";

    final List<long[]> dropletPlacesList = new ArrayList<long[]>();
    for (Long dropletId : dropletPlacesMap.keySet()) {
        for (Long placeId : dropletPlacesMap.get(dropletId)) {
            long[] dropletPlace = { dropletId, placeId };
            dropletPlacesList.add(dropletPlace);
        }
    }
    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            long[] dropletPlace = dropletPlacesList.get(i);
            ps.setLong(1, dropletPlace[0]);
            ps.setLong(2, dropletPlace[1]);
        }

        public int getBatchSize() {
            return dropletPlacesList.size();
        }
    });
}

From source file:edu.cornell.mannlib.vitro.webapp.visualization.coauthorship.CoAuthorshipVisCodeGenerator.java

/**
 * This method is used to setup parameters for the sparkline value object. These parameters
 * will be used in the template to construct the actual html/javascript code.
 * @param visMode/*from   w  w  w . java 2  s .c  om*/
 * @param visContainer
 */
private SparklineData setupSparklineParameters(String visMode, String providedVisContainerID) {

    SparklineData sparklineData = new SparklineData();

    int numOfYearsToBeRendered = 0;

    /*
     * It was decided that to prevent downward curve that happens if there are no publications 
     * in the current year seems a bit harsh, so we consider only publications from the last 10
     * complete years. 
     * */
    int currentYear = Calendar.getInstance().get(Calendar.YEAR) - 1;
    int shortSparkMinYear = currentYear - VisConstants.MINIMUM_YEARS_CONSIDERED_FOR_SPARKLINE + 1;

    /*
     * This is required because when deciding the range of years over which the vis
     * was rendered we dont want to be influenced by the "DEFAULT_PUBLICATION_YEAR".
     * */
    Set<String> publishedYears = new HashSet<String>(yearToUniqueCoauthors.keySet());
    publishedYears.remove(VOConstants.DEFAULT_PUBLICATION_YEAR);

    /*
     * We are setting the default value of minPublishedYear to be 10 years before 
     * the current year (which is suitably represented by the shortSparkMinYear),
     * this in case we run into invalid set of published years.
     * */
    int minPublishedYear = shortSparkMinYear;

    String visContainerID = null;

    if (yearToUniqueCoauthors.size() > 0) {
        try {
            minPublishedYear = Integer.parseInt(Collections.min(publishedYears));
        } catch (NoSuchElementException e1) {
            log.debug("vis: " + e1.getMessage() + " error occurred for " + yearToUniqueCoauthors.toString());
        } catch (NumberFormatException e2) {
            log.debug("vis: " + e2.getMessage() + " error occurred for " + yearToUniqueCoauthors.toString());
        }
    }

    int minPubYearConsidered = 0;

    /*
     * There might be a case that the author has made his first publication within the 
     * last 10 years but we want to make sure that the sparkline is representative of 
     * at least the last 10 years, so we will set the minPubYearConsidered to 
     * "currentYear - 10" which is also given by "shortSparkMinYear".
     * */
    if (minPublishedYear > shortSparkMinYear) {
        minPubYearConsidered = shortSparkMinYear;
    } else {
        minPubYearConsidered = minPublishedYear;
    }

    numOfYearsToBeRendered = currentYear - minPubYearConsidered + 1;

    sparklineData.setNumOfYearsToBeRendered(numOfYearsToBeRendered);

    int uniqueCoAuthorCounter = 0;
    Set<Collaborator> allCoAuthorsWithKnownAuthorshipYears = new HashSet<Collaborator>();
    List<YearToEntityCountDataElement> yearToUniqueCoauthorsCountDataTable = new ArrayList<YearToEntityCountDataElement>();

    for (int publicationYear = minPubYearConsidered; publicationYear <= currentYear; publicationYear++) {

        String publicationYearAsString = String.valueOf(publicationYear);
        Set<Collaborator> currentCoAuthors = yearToUniqueCoauthors.get(publicationYearAsString);

        Integer currentUniqueCoAuthors = null;

        if (currentCoAuthors != null) {
            currentUniqueCoAuthors = currentCoAuthors.size();
            allCoAuthorsWithKnownAuthorshipYears.addAll(currentCoAuthors);
        } else {
            currentUniqueCoAuthors = 0;
        }

        yearToUniqueCoauthorsCountDataTable.add(new YearToEntityCountDataElement(uniqueCoAuthorCounter,
                publicationYearAsString, currentUniqueCoAuthors));
        uniqueCoAuthorCounter++;
    }

    /*
     * For the purpose of this visualization I have come up with a term "Sparks" which 
     * essentially means data points. 
     * Sparks that will be rendered in full mode will always be the one's which have any year
     * associated with it. Hence.
     * */
    sparklineData.setRenderedSparks(allCoAuthorsWithKnownAuthorshipYears.size());

    sparklineData.setYearToEntityCountDataTable(yearToUniqueCoauthorsCountDataTable);

    /*
     * This is required only for the sparklines which convey collaborationships like 
     * coinvestigatorships and coauthorship. There are edge cases where a collaborator can be 
     * present for in a collaboration with known & unknown year. We do not want to repeat the 
     * count for this collaborator when we present it in the front-end. 
     * */
    Set<Collaborator> totalUniqueCoInvestigators = new HashSet<Collaborator>(
            allCoAuthorsWithKnownAuthorshipYears);

    /*
     * Total publications will also consider publications that have no year associated with
     * them. Hence.
     * */
    Integer unknownYearCoauthors = 0;
    if (yearToUniqueCoauthors.get(VOConstants.DEFAULT_PUBLICATION_YEAR) != null) {
        unknownYearCoauthors = yearToUniqueCoauthors.get(VOConstants.DEFAULT_PUBLICATION_YEAR).size();

        totalUniqueCoInvestigators.addAll(yearToUniqueCoauthors.get(VOConstants.DEFAULT_GRANT_YEAR));
    }

    sparklineData.setUnknownYearPublications(unknownYearCoauthors);

    sparklineData.setTotalCollaborationshipCount(totalUniqueCoInvestigators.size());

    if (providedVisContainerID != null) {
        visContainerID = providedVisContainerID;
    } else {
        visContainerID = DEFAULT_VISCONTAINER_DIV_ID;
    }

    sparklineData.setVisContainerDivID(visContainerID);

    /*
     * By default these represents the range of the rendered sparks. Only in case of
     * "short" sparkline mode we will set the Earliest RenderedPublication year to
     * "currentYear - 10". 
     * */
    sparklineData.setEarliestYearConsidered(minPubYearConsidered);
    sparklineData.setEarliestRenderedPublicationYear(minPublishedYear);
    sparklineData.setLatestRenderedPublicationYear(currentYear);

    /*
     * The Full Sparkline will be rendered by default. Only if the url has specific mention of
     * SHORT_SPARKLINE_MODE_KEY then we render the short sparkline and not otherwise.
     * */
    if (VisualizationFrameworkConstants.SHORT_SPARKLINE_VIS_MODE.equalsIgnoreCase(visMode)) {

        sparklineData.setEarliestRenderedPublicationYear(shortSparkMinYear);
        sparklineData.setShortVisMode(true);

    } else {
        sparklineData.setShortVisMode(false);
    }

    if (yearToUniqueCoauthors.size() > 0) {

        sparklineData.setFullTimelineNetworkLink(UtilityFunctions.getCollaboratorshipNetworkLink(individualURI,
                VisualizationFrameworkConstants.PERSON_LEVEL_VIS,
                VisualizationFrameworkConstants.COAUTHOR_VIS_MODE));

        sparklineData.setDownloadDataLink(UtilityFunctions.getCSVDownloadURL(individualURI,
                VisualizationFrameworkConstants.COAUTHORSHIP_VIS,
                VisualizationFrameworkConstants.COAUTHORS_COUNT_PER_YEAR_VIS_MODE));

        Map<String, Integer> yearToUniqueCoauthorsCount = new HashMap<String, Integer>();

        for (Map.Entry<String, Set<Collaborator>> currentYearToCoAuthors : yearToUniqueCoauthors.entrySet()) {
            yearToUniqueCoauthorsCount.put(currentYearToCoAuthors.getKey(),
                    currentYearToCoAuthors.getValue().size());
        }

        sparklineData.setYearToActivityCount(yearToUniqueCoauthorsCount);
    }

    return sparklineData;
}

From source file:net.groupbuy.controller.shop.CartController.java

/**
 * /*from ww  w.j  av  a 2 s .  c  o m*/
 */
@RequestMapping(value = "/delete", method = RequestMethod.POST)
public @ResponseBody Map<String, Object> delete(Long id) {
    Map<String, Object> data = new HashMap<String, Object>();
    Cart cart = cartService.getCurrent();
    if (cart == null || cart.isEmpty()) {
        data.put("message", Message.error("shop.cart.notEmpty"));
        return data;
    }
    CartItem cartItem = cartItemService.find(id);
    Set<CartItem> cartItems = cart.getCartItems();
    if (cartItem == null || cartItems == null || !cartItems.contains(cartItem)) {
        data.put("message", Message.error("shop.cart.cartItemNotExsit"));
        return data;
    }
    cartItems.remove(cartItem);
    cartItemService.delete(cartItem);

    data.put("message", SUCCESS_MESSAGE);
    data.put("quantity", cart.getQuantity());
    data.put("effectivePoint", cart.getEffectivePoint());
    data.put("effectivePrice", cart.getEffectivePrice());
    data.put("promotions", cart.getPromotions());
    data.put("isLowStock", cart.getIsLowStock());
    return data;
}

From source file:mitm.common.postfix.PostfixMainConfigBuilder.java

public void setMatchSubdomains(boolean match) {
    Set<String> matchingDomains = splitMultivalue(getValue(PARENT_DOMAIN_MATCHES_SUBDOMAINS, ""));

    if (match) {//from  w  w  w . j  av  a2s  . co m
        matchingDomains.add("relay_domains");
    } else {
        matchingDomains.remove("relay_domains");
    }

    setValue(PARENT_DOMAIN_MATCHES_SUBDOMAINS, StringUtils.join(matchingDomains, ", "));
}

From source file:org.craftercms.profile.services.TenantServiceIT.java

@Test
public void testRemoveRoles() throws Exception {
    tenantService.createTenant(getCorporateTenant());
    try {// ww w . j a  v a  2  s  .  c om
        Tenant tenant = tenantService.removeRoles(CORPORATE_TENANT_NAME, Arrays.asList(ADMIN_ROLE));

        Set<String> expectedRoles = new HashSet<>(CORPORATE_ROLES);
        expectedRoles.remove(ADMIN_ROLE);

        assertNotNull(tenant);
        assertEquals(expectedRoles, tenant.getAvailableRoles());
    } finally {
        tenantService.deleteTenant(CORPORATE_TENANT_NAME);
    }
}

From source file:net.brtly.monkeyboard.plugin.core.PluginManager.java

/**
 * Return a set of plugin IDs that map to available plugins matching a list of filters.
 * If any given filter DOES NOT apply to a plugin, it is removed from the set.
 * @param filters/*  ww  w.  ja  v  a2 s. c  o  m*/
 * @return
 */
public Set<String> getPluginIDs(IPluginFilter... filters) {
    if (filters == null || filters.length == 0) {
        return Collections.unmodifiableSet(_plugins.keySet());
    }

    Set<String> rv;
    synchronized (_plugins) {
        rv = new HashSet<String>(_plugins.keySet());

        for (String id : _plugins.keySet()) {
            PluginLoader loader = getPluginLoader(id);
            if (loader == null) {
                rv.remove(id);
            } else {
                for (IPluginFilter filter : filters) {
                    if (!filter.appliesTo(loader)) {
                        rv.remove(id);
                    }
                }
            }
        }
    }
    return Collections.unmodifiableSet(rv);
}

From source file:org.cyberjos.jcconf2014.node.HazelcastHelper.java

/**
 * Creates and returns a new producer.//  w  ww .  j a v a  2  s. c  o  m
 *
 * @param cloudNode the cloud node which runs this new producer
 * @return a new producer
 */
private Runnable createProducer(final CloudNode cloudNode) {
    return () -> {
        logger.info("Producer thread started.");
        while (this.isMaster(cloudNode)) {
            try {
                final Set<String> nodes = new HashSet<>(HazelcastHelper.this.getActiveNodes());
                nodes.remove(cloudNode.getName());
                if (nodes.size() > 0) {
                    final IAtomicLong serialNumber = getAtomicLong(TASK_NUMBER);
                    final String taskName = String.format("TASK-%d-%d", serialNumber.incrementAndGet(),
                            System.currentTimeMillis());
                    HazelcastHelper.this.getTaskQueue().put(taskName);
                    logger.info("Added task {}", taskName);
                }
                Thread.sleep(RandomUtils.nextInt(1500, 4000));
            } catch (final Exception ex) {
                logger.error("Exception occurred!", ex);
            }
        }
    };
}

From source file:com.devicehive.websockets.handlers.CommandHandlers.java

private Set<String> prepareActualList(Set<String> deviceIdSet, final String deviceId) {
    if (deviceId == null && deviceIdSet == null) {
        return null;
    }//w w w. j a v  a 2 s  . c om
    if (deviceIdSet != null && deviceId == null) {
        deviceIdSet.remove(null);
        return deviceIdSet;
    }
    if (deviceIdSet == null) {
        return new HashSet<String>() {
            {
                add(deviceId);
            }

            private static final long serialVersionUID = -8657632518613033661L;
        };
    }
    throw new HiveException(Messages.INVALID_REQUEST_PARAMETERS, SC_BAD_REQUEST);
}

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaLinkDao.java

/**
 * Populate the droplet links table./*from  w  w w  .j a  v a  2s  .  c  o  m*/
 * 
 * @param drops
 */
private void insertDropletLinks(List<Drop> drops) {

    // List of drop IDs in the drops list
    List<Long> dropIds = new ArrayList<Long>();
    // List of links in a drop
    Map<Long, Set<Long>> dropletLinksMap = new HashMap<Long, Set<Long>>();
    // List of drops and the link that is their original url
    final List<long[]> originalUrls = new ArrayList<long[]>();
    for (Drop drop : drops) {

        if (drop.getLinks() == null)
            continue;

        dropIds.add(drop.getId());

        for (Link link : drop.getLinks()) {
            Set<Long> links = null;
            if (dropletLinksMap.containsKey(drop.getId())) {
                links = dropletLinksMap.get(drop.getId());
            } else {
                links = new HashSet<Long>();
                dropletLinksMap.put(drop.getId(), links);
            }

            // Is this link the original url?
            if (drop.getOriginalUrl() != null && link.getUrl().equals(drop.getOriginalUrl().getUrl())) {
                long[] originalUrl = { drop.getId(), link.getId() };
                originalUrls.add(originalUrl);
            }

            links.add(link.getId());
        }
    }

    // Find droplet links that already exist in the db
    String sql = "SELECT droplet_id, link_id FROM droplets_links WHERE droplet_id in (:ids)";

    MapSqlParameterSource params = new MapSqlParameterSource();
    params.addValue("ids", dropIds);

    List<Map<String, Object>> results = this.namedJdbcTemplate.queryForList(sql, params);

    // Remove already existing droplet_links from our Set
    for (Map<String, Object> result : results) {
        long dropletId = ((Number) result.get("droplet_id")).longValue();
        long linkId = ((Number) result.get("link_id")).longValue();

        Set<Long> linkSet = dropletLinksMap.get(dropletId);
        if (linkSet != null) {
            linkSet.remove(linkId);
        }
    }

    // Insert the remaining items in the set into the db
    sql = "INSERT INTO droplets_links (droplet_id, link_id) VALUES (?,?)";

    final List<long[]> dropletLinksList = new ArrayList<long[]>();
    for (Long dropletId : dropletLinksMap.keySet()) {
        for (Long linkId : dropletLinksMap.get(dropletId)) {
            long[] dropletLink = { dropletId, linkId };
            dropletLinksList.add(dropletLink);
        }
    }
    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            long[] dropletLink = dropletLinksList.get(i);
            ps.setLong(1, dropletLink[0]);
            ps.setLong(2, dropletLink[1]);
        }

        public int getBatchSize() {
            return dropletLinksList.size();
        }
    });

    if (originalUrls.size() > 0) {
        sql = "UPDATE droplets SET original_url = ? WHERE id = ?";
        jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
            public void setValues(PreparedStatement ps, int i) throws SQLException {
                long[] update = originalUrls.get(i);
                ps.setLong(1, update[1]);
                ps.setLong(2, update[0]);
            }

            public int getBatchSize() {
                return originalUrls.size();
            }
        });
    }
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.evaluation.ArgumentSequenceLabelingEvaluation.java

public Map<String, Object> createDimReaders(DocumentDomain documentDomain, boolean inDomain,
        String corpusFilePathTrain, Set<DocumentRegister> trainingRegister,
        Set<DocumentRegister> testRegister) {
    Map<String, Object> result = new HashMap<>();

    // we take all documents regardless of domain
    if (documentDomain == null) {
        // normal CV
        if (trainingRegister == null && testRegister == null) {
            result.put(Constants.DIM_READER_TRAIN, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TRAIN_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_LENIENT, false));
        } else {//  w  ww .  j  ava 2  s .c  om
            // we have cross-register train-test
            result.put(Constants.DIM_READER_TRAIN, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TRAIN_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_DOCUMENT_REGISTER,
                            StringUtils.join(trainingRegister, " ")));
            result.put(Constants.DIM_READER_TEST, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TEST_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_LENIENT, false,
                            ArgumentSequenceSentenceLevelReader.PARAM_DOCUMENT_REGISTER,
                            StringUtils.join(testRegister, " ")));
        }
    } else {
        if (inDomain) {
            // in domain cross validation
            result.put(Constants.DIM_READER_TRAIN, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TRAIN_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_LENIENT, false,
                            ArgumentSequenceSentenceLevelReader.PARAM_DOCUMENT_DOMAIN,
                            documentDomain.toString()));
        } else {
            // get all domains minus documentDomain
            Set<DocumentDomain> trainingDomains = new HashSet<>();
            trainingDomains.addAll(Arrays.asList(DocumentDomain.values()));
            trainingDomains.remove(documentDomain);
            String trainingDomainsAsParam = StringUtils.join(trainingDomains, " ");

            // we have cross-domain train-test (param documentDomain is the test domain)
            result.put(Constants.DIM_READER_TRAIN, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TRAIN_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_LENIENT, false,
                            ArgumentSequenceSentenceLevelReader.PARAM_DOCUMENT_DOMAIN, trainingDomainsAsParam));

            // we have cross-domain train-test (param documentDomain is the test domain)
            result.put(Constants.DIM_READER_TEST, ArgumentSequenceSentenceLevelReader.class);
            result.put(Constants.DIM_READER_TEST_PARAMS,
                    Arrays.asList(ArgumentSequenceSentenceLevelReader.PARAM_SOURCE_LOCATION,
                            corpusFilePathTrain, ArgumentSequenceSentenceLevelReader.PARAM_PATTERNS,
                            ArgumentSequenceSentenceLevelReader.INCLUDE_PREFIX + "*.xmi",
                            ArgumentSequenceSentenceLevelReader.PARAM_LENIENT, false,
                            ArgumentSequenceSentenceLevelReader.PARAM_DOCUMENT_DOMAIN,
                            documentDomain.toString()));
        }
    }

    return result;
}