Example usage for java.util Set contains

List of usage examples for java.util Set contains

Introduction

In this page you can find the example usage for java.util Set contains.

Prototype

boolean contains(Object o);

Source Link

Document

Returns true if this set contains the specified element.

Usage

From source file:eu.stratosphere.nephele.jobmanager.scheduler.RecoveryLogic.java

private static void findVerticesToRestart(final ExecutionVertex failedVertex,
        final Set<ExecutionVertex> verticesToBeCanceled) {

    final Queue<ExecutionVertex> verticesToTest = new ArrayDeque<ExecutionVertex>();
    final Set<ExecutionVertex> visited = new HashSet<ExecutionVertex>();
    verticesToTest.add(failedVertex);/* w  w  w  .  j  av  a  2  s .co m*/

    while (!verticesToTest.isEmpty()) {

        final ExecutionVertex vertex = verticesToTest.poll();

        // Predecessors must be either checkpoints or need to be restarted, too
        for (int j = 0; j < vertex.getNumberOfPredecessors(); j++) {
            final ExecutionVertex predecessor = vertex.getPredecessor(j);

            if (hasInstanceAssigned(predecessor)) {
                verticesToBeCanceled.add(predecessor);
            }

            if (!visited.contains(predecessor)) {
                verticesToTest.add(predecessor);
            }
        }
        visited.add(vertex);
    }
}

From source file:de.xwic.sandbox.base.model.StringUtil.java

/**
 * @param emailsString//from   w  ww .jav a  2s.  c om
 * @return list of emails computed from the emailsString parameter
 */
public static List<String> parseEmails(String emailsString) {
    // preserve order
    Set<String> emails = new LinkedHashSet<String>();

    if (emailsString != null && !emailsString.trim().isEmpty()) {
        String[] arr = emailsString.split(";");

        for (String str : arr) {
            str = str.trim();
            if (!str.isEmpty() && !emails.contains(str)) {
                //   if (isValidEmailAddress(str)) {
                emails.add(str);
                //      } else {
                //         log.warn("Found bad address in '".concat(emailsString).concat("' : ").concat(str));
                //      }
            }
        }
    }

    return new ArrayList<String>(emails);
}

From source file:com.hmsinc.epicenter.classifier.util.ClassifierUtils.java

public static CharSequence filter(final CharSequence complaint, final Set<String> stopwords) {

    String ret = "";
    if (complaint != null) {

        // Lowercase, alphabetic only, remove extra spaces..
        final String cleaned = StringUtils.trimToNull(complaint.toString().toLowerCase(Locale.getDefault())
                .replaceAll("h/a", "headache").replaceAll("n/v", "nausea vomiting").replaceAll("[/,]", " ")
                .replaceAll("[^a-z\\s]", " "));

        if (cleaned != null) {

            final StringBuilder buf = new StringBuilder();

            final String[] sp = cleaned.split("\\s");

            for (int i = 0; i < sp.length; i++) {
                if (sp[i] != null && sp[i].length() > 1 && !stopwords.contains(sp[i])) {
                    if (buf.length() > 0) {
                        buf.append(" ");
                    }/*from w w w. j ava  2  s. co m*/
                    buf.append(sp[i]);
                }
            }
            if (buf.length() > 0) {
                ret = buf.toString();
            }
        }
    }
    return ret;
}

From source file:de.xwic.sandbox.server.installer.StringUtil.java

/**
 * @param emailsString//w w  w .j a  v  a 2s  . c o  m
 * @return list of emails computed from the emailsString parameter
 */
public static List<String> parseEmails(String emailsString) {
    // preserve order
    Set<String> emails = new LinkedHashSet<String>();

    if (emailsString != null && !emailsString.trim().isEmpty()) {
        String[] arr = emailsString.split(";");

        for (String str : arr) {
            str = str.trim();
            if (!str.isEmpty() && !emails.contains(str)) {
                if (isValidEmailAddress(str)) {
                    emails.add(str);
                } else {
                    log.warn("Found bad address in '".concat(emailsString).concat("' : ").concat(str));
                }
            }
        }
    }

    return new ArrayList<String>(emails);
}

From source file:com.hmsinc.epicenter.classifier.util.ClassifierUtils.java

public static CharSequence filterAllowNumbers(final CharSequence complaint, final Set<String> stopwords) {
    String ret = "";
    if (complaint != null) {

        // Lowercase, alphabetic only, remove extra spaces..
        final String cleaned = StringUtils.trimToNull(complaint.toString().toLowerCase(Locale.getDefault())
                .replaceAll("h/a", "headache").replaceAll("n/v", "nausea vomiting").replaceAll("[/,]", " ")
                .replaceAll("[^a-z\\s\\d]", " "));

        if (cleaned != null) {

            final StringBuilder buf = new StringBuilder();

            final String[] sp = cleaned.split("\\s");

            for (int i = 0; i < sp.length; i++) {
                if (sp[i] != null && sp[i].length() > 1 && !stopwords.contains(sp[i])) {
                    if (buf.length() > 0) {
                        buf.append(" ");
                    }//from w w  w .ja v  a2s  . c o m
                    buf.append(sp[i]);
                }
            }
            if (buf.length() > 0) {
                ret = buf.toString();
            }
        }
    }
    return ret;
}

From source file:com.ikanow.aleph2.analytics.spark.utils.SparkTechnologyUtils.java

/** Builds objects for all the aleph2 inputs and provides a method to use them in context-dependent ways 
 * @param context//from   w w w  . j  av a  2 s .c o m
 * @param bucket
 * @param job
 * @param config
 * @param per_input_action - user lambda that determines how they are used
 */
public static final void buildAleph2Inputs(final IAnalyticsContext context, final DataBucketBean bucket,
        final AnalyticThreadJobBean job, final Optional<ProcessingTestSpecBean> maybe_test_spec,
        final Configuration config, final Set<String> exclude_names,
        BiConsumer<AnalyticThreadJobInputBean, Job> per_input_action) {
    transformInputBean(Optionals.ofNullable(job.inputs()).stream(), maybe_test_spec)
            .filter(input -> !exclude_names.contains(input.name()))
            .forEach(Lambdas.wrap_consumer_u(input_with_test_settings -> {

                final Optional<IBucketLogger> a2_logger = Optional
                        .ofNullable(context.getLogger(Optional.of(bucket)));

                final List<String> paths = context.getInputPaths(Optional.empty(), job,
                        input_with_test_settings);

                if (!paths.isEmpty()) {

                    _logger.info(ErrorUtils.get("Adding storage paths for bucket {0}: {1}", bucket.full_name(),
                            paths.stream().collect(Collectors.joining(";"))));

                    a2_logger.ifPresent(l -> l.log(Level.INFO, true,
                            () -> ErrorUtils.get("Adding storage paths for bucket {0}: {1}", bucket.full_name(),
                                    paths.stream().collect(Collectors.joining(";"))),
                            () -> SparkTechnologyService.class.getSimpleName() + "."
                                    + Optional.ofNullable(job.name()).orElse("no_name"),
                            () -> "startAnalyticJobOrTest"));

                    //DEBUG
                    //System.out.println(ErrorUtils.get("Adding storage paths for bucket {0}: {1}", bucket.full_name(), paths.stream().collect(Collectors.joining(";"))));   

                    final Job input_job = Job.getInstance(config);
                    input_job.setInputFormatClass(BeFileInputFormat_Pure.class);
                    paths.stream().forEach(Lambdas
                            .wrap_consumer_u(path -> FileInputFormat.addInputPath(input_job, new Path(path))));
                    // (Add the input config in)
                    input_job.getConfiguration().set(HadoopBatchEnrichmentUtils.BE_BUCKET_INPUT_CONFIG,
                            BeanTemplateUtils.toJson(input_with_test_settings).toString());
                    per_input_action.accept(input_with_test_settings, input_job);
                } else { // not easily available in HDFS directory format, try getting from the context

                    Optional<HadoopBatchEnrichmentUtils.HadoopAccessContext> input_format_info = context
                            .getServiceInput(HadoopBatchEnrichmentUtils.HadoopAccessContext.class,
                                    Optional.empty(), job, input_with_test_settings);
                    if (!input_format_info.isPresent()) {
                        _logger.warn(ErrorUtils.get("Tried but failed to get input format from {0}",
                                BeanTemplateUtils.toJson(input_with_test_settings)));

                        a2_logger.ifPresent(l -> l.log(Level.WARN, true,
                                () -> ErrorUtils.get("Tried but failed to get input format from {0}",
                                        BeanTemplateUtils.toJson(input_with_test_settings)),
                                () -> SparkTechnologyService.class.getSimpleName() + "."
                                        + Optional.ofNullable(job.name()).orElse("no_name"),
                                () -> "startAnalyticJobOrTest"));

                        //DEBUG
                        //System.out.println(ErrorUtils.get("Tried but failed to get input format from {0}", BeanTemplateUtils.toJson(input_with_test_settings)));
                    } else {
                        _logger.info(ErrorUtils.get("Adding data service path for bucket {0}: {1}",
                                bucket.full_name(), input_format_info.get().describe()));

                        a2_logger.ifPresent(l -> l.log(Level.INFO, true,
                                () -> ErrorUtils.get("Adding data service path for bucket {0}: {1}",
                                        bucket.full_name(), input_format_info.get().describe()),
                                () -> SparkTechnologyService.class.getSimpleName() + "."
                                        + Optional.ofNullable(job.name()).orElse("no_name"),
                                () -> "startAnalyticJobOrTest"));

                        //DEBUG
                        //System.out.println(ErrorUtils.get("Adding data service path for bucket {0}: {1}", bucket.full_name(),input_format_info.get().describe()));

                        final Job input_job = Job.getInstance(config);
                        input_job.setInputFormatClass(
                                input_format_info.get().getAccessService().either(l -> l.getClass(), r -> r));
                        input_format_info.get().getAccessConfig().ifPresent(map -> {
                            map.entrySet().forEach(kv -> input_job.getConfiguration().set(kv.getKey(),
                                    kv.getValue().toString()));
                        });
                        per_input_action.accept(input_with_test_settings, input_job);
                    }
                }
            }));
}

From source file:edu.brown.utils.CollectionUtil.java

@SuppressWarnings("unchecked")
public static <E extends Enum<?>> Set<E> getAllExcluding(E elements[], E... excluding) {
    Set<E> exclude_set = new HashSet<E>();
    for (E e : excluding)
        exclude_set.add(e);/*from  w  w w. j  a  v  a 2  s  .c  o m*/

    Set<E> elements_set = new HashSet<E>();
    for (int i = 0; i < elements.length; i++) {
        if (!exclude_set.contains(elements[i]))
            elements_set.add(elements[i]);
    } // FOR
    return (elements_set);
    // Crappy java....
    // Object new_elements[] = new Object[elements_set.size()];
    // elements_set.toArray(new_elements);
    // return ((E[])new_elements);
}

From source file:com.linkedin.pinot.common.utils.request.RequestUtils.java

/**
 * Return whether the query is fit for star tree index.
 * <p>The query is fit for star tree index if the following conditions are met:
 * <ul>//from   w  w  w  .j a v a2  s  .  c  o  m
 *   <li>Segment contains star tree</li>
 *   <li>BrokerRequest debug options have not explicitly disabled use of star tree</li>
 *   <li>Query is aggregation/group-by with all aggregation functions in {@link #STAR_TREE_AGGREGATION_FUNCTIONS}</li>
 *   <li>The aggregations must apply on metric column</li>
 *   <li>All predicate columns and group-by columns are materialized dimensions</li>
 *   <li>All predicates are conjoined by AND</li>
 * </ul>
 */
public static boolean isFitForStarTreeIndex(SegmentMetadata segmentMetadata, BrokerRequest brokerRequest,
        FilterQueryTree rootFilterNode) {
    // Check whether segment contains star tree
    if (!segmentMetadata.hasStarTree()) {
        return false;
    }

    // Check whether star tree is disabled explicitly in BrokerRequest
    Map<String, String> debugOptions = brokerRequest.getDebugOptions();
    if (debugOptions != null
            && StringUtils.compareIgnoreCase(debugOptions.get(USE_STAR_TREE_KEY), "false") == 0) {
        return false;
    }

    // Get all metrics
    // NOTE: we treat all non-metric columns as dimensions in star tree
    Set<String> metrics = new HashSet<>(segmentMetadata.getSchema().getMetricNames());

    // Check whether all aggregation functions are supported and apply on metric column
    List<AggregationInfo> aggregationsInfo = brokerRequest.getAggregationsInfo();
    if (aggregationsInfo == null) {
        return false;
    }
    for (AggregationInfo aggregationInfo : aggregationsInfo) {
        if (!STAR_TREE_AGGREGATION_FUNCTIONS.contains(aggregationInfo.getAggregationType().toLowerCase())) {
            return false;
        }
        if (!metrics.contains(aggregationInfo.getAggregationParams().get("column").trim())) {
            return false;
        }
    }

    // Get all un-materialized dimensions
    StarTreeMetadata starTreeMetadata = segmentMetadata.getStarTreeMetadata();
    Preconditions.checkNotNull(starTreeMetadata);
    Set<String> unMaterializedDimensions = new HashSet<>(
            starTreeMetadata.getSkipMaterializationForDimensions());

    // Check whether all group-by columns are materialized dimensions
    GroupBy groupBy = brokerRequest.getGroupBy();
    if (groupBy != null) {
        Set<String> groupByColumns = getAllGroupByColumns(groupBy);
        for (String groupByColumn : groupByColumns) {
            if (metrics.contains(groupByColumn) || unMaterializedDimensions.contains(groupByColumn)) {
                return false;
            }
        }
    }

    // Check whether all predicate columns are materialized dimensions, and all predicates are conjoined by AND
    return rootFilterNode == null
            || checkPredicatesForStarTree(rootFilterNode, metrics, unMaterializedDimensions);
}

From source file:com.neutti.webframe.util.StringUtil.java

/**
 * Replaces all HTML-sensitive characters with their entity equivalents in
  * all string values in specified model. Some model values are skipped
  * untouched (their names are contained in exceptions set).
 * @param model Map of pairs <code>variable -&gt; value</code>.
 * @return Map with encoded string values
 *//* ww w  .  ja v a2s .  c om*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static Map htmlEncodeModelWithExceptions(Map model, Set exceptions) {
    Map result = new HashMap(model.size());
    for (Iterator i = model.entrySet().iterator(); i.hasNext();) {
        Map.Entry entry = (Map.Entry) i.next();
        String key = (String) entry.getKey();
        Object value = (Object) entry.getValue();
        if (exceptions.contains(key)) {
            result.put(key, value);
        } else {
            if (value instanceof String) {
                result.put(key, htmlEncode((String) value));
            } else {
                result.put(key, value);
            }
        }
    }
    return result;
}

From source file:com.amazon.carbonado.repo.replicated.ReplicatedRepository.java

/**
 * Utility method to select the natural ordering of a storage, by looking
 * for a clustered index on the primary key. Returns null if no clustered
 * index was found.// w w  w .  jav a  2 s .  c  o m
 *
 * TODO: Try to incorporate this into standard storage interface somehow.
 */
private static String[] selectNaturalOrder(Repository repo, Class<? extends Storable> type)
        throws RepositoryException {
    IndexInfoCapability capability = repo.getCapability(IndexInfoCapability.class);
    if (capability == null) {
        return null;
    }
    IndexInfo info = null;
    for (IndexInfo candidate : capability.getIndexInfo(type)) {
        if (candidate.isClustered()) {
            info = candidate;
            break;
        }
    }
    if (info == null) {
        return null;
    }

    // Verify index is part of primary key.
    Set<String> pkSet = StorableIntrospector.examine(type).getPrimaryKeyProperties().keySet();

    String[] propNames = info.getPropertyNames();
    for (String prop : propNames) {
        if (!pkSet.contains(prop)) {
            return null;
        }
    }

    String[] orderBy = new String[pkSet.size()];

    Direction[] directions = info.getPropertyDirections();

    // Clone to remove elements.
    pkSet = new LinkedHashSet<String>(pkSet);

    int i;
    for (i = 0; i < propNames.length; i++) {
        orderBy[i] = ((directions[i] == Direction.DESCENDING) ? "-" : "+") + propNames[i];
        pkSet.remove(propNames[i]);
    }

    // Append any remaining pk properties, to ensure complete ordering.
    if (pkSet.size() > 0) {
        for (String prop : pkSet) {
            orderBy[i++] = prop;
        }
    }

    return orderBy;
}