Example usage for java.util LinkedHashSet isEmpty

List of usage examples for java.util LinkedHashSet isEmpty

Introduction

In this page you can find the example usage for java.util LinkedHashSet isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:solidbase.core.UpgradeProcessor.java

/**
 * Perform upgrade to the given target version. The target version can end with an '*', indicating whatever tip version that
 * matches the target prefix./*  ww  w.  j a v a2 s . c o  m*/
 *
 * @param target The target requested.
 * @param downgradeable Indicates that downgrade paths are allowed to reach the given target.
 * @throws SQLExecutionException When the execution of a command throws an {@link SQLException}.
 */
protected void upgrade(String target, boolean downgradeable) throws SQLExecutionException {
    setupControlTables();

    String version = this.dbVersion.getVersion();

    if (target == null) {
        LinkedHashSet<String> targets = getTargets(true, null, downgradeable);
        if (targets.size() > 1)
            throw new FatalException("More than one possible target found, you should specify a target.");
        Assert.notEmpty(targets);

        target = targets.iterator().next();
    } else if (target.endsWith("*")) {
        String targetPrefix = target.substring(0, target.length() - 1);
        LinkedHashSet<String> targets = getTargets(true, targetPrefix, downgradeable);
        if (targets.size() > 1)
            throw new FatalException("More than one possible target found for " + target);
        if (targets.isEmpty())
            throw new FatalException("Target " + target + " is not reachable from version "
                    + StringUtils.defaultString(version, "<no version>"));

        target = targets.iterator().next();
    } else {
        LinkedHashSet<String> targets = getTargets(false, null, downgradeable);
        Assert.notEmpty(targets);

        // TODO Refactor this, put this in getTargets()
        boolean found = false;
        for (String t : targets)
            if (ObjectUtils.equals(t, target)) {
                found = true;
                break;
            }

        if (!found)
            throw new FatalException("Target " + target + " is not reachable from version "
                    + StringUtils.defaultString(version, "<no version>"));
    }

    if (ObjectUtils.equals(target, version)) {
        this.progress.noUpgradeNeeded();
        return;
    }

    upgrade(version, target, downgradeable);
}

From source file:lv.semti.morphology.webservice.VerbResource.java

private String tagChunk(LinkedList<Word> tokens) {
     LinkedHashSet<String> tags = new LinkedHashSet<String>();
     // da?di minjumi. norm?li bu tikai ar sintakses analzi
     //tags.add(String.valueOf(tokens.size()));
     //tags.add(tokens.get(0).getToken());
     //tags.add(tokens.get(0).getPartOfSpeech());
     if (tokens.size() > 1 && tokens.get(0).isRecognized()
             && tokens.get(0).hasAttribute(AttributeNames.i_PartOfSpeech, AttributeNames.v_Preposition)) {
         // ja fr?ze s?kas ar priev?rdu
         for (Wordform wf : tokens.get(0).wordforms) {
             //tags.add(wf.getDescription());
             if (wf.isMatchingStrong(AttributeNames.i_PartOfSpeech, AttributeNames.v_Preposition)) {
                 String ncase = wf.getValue(AttributeNames.i_Rekcija);
                 if (ncase != null)
                     tags.add(wf.getToken() + caseCode(ncase));
             }//  ww w.j  av a 2s . co m
         }
     }

     //ja s?kas ar saikli, tad vareetu buut paliigteikums
     if (tokens.size() > 1 && tokens.get(0).isRecognized()
             && tokens.get(0).hasAttribute(AttributeNames.i_PartOfSpeech, AttributeNames.v_Conjunction)) {
         tags.add("S");
     }

     if (tags.isEmpty())
         return tagWord(tokens.getLast(), false); // Ja nesaprat?m, dodam pdj? v?rda analzi - Gunta teica, ka esot ticam?k t?

     return formatJSON(tags);
 }

From source file:org.sonatype.plugin.nexus.testenvironment.AbstractEnvironmentMojo.java

@SuppressWarnings("unchecked")
private Collection<Artifact> getNonTransitivePlugins(Set<Artifact> projectArtifacts)
        throws MojoExecutionException {
    Collection<Artifact> deps = new LinkedHashSet<Artifact>();

    for (Artifact artifact : projectArtifacts) {
        Artifact pomArtifact = artifactFactory.createArtifact(artifact.getGroupId(), artifact.getArtifactId(),
                artifact.getVersion(), artifact.getClassifier(), "pom");
        Set<Artifact> result;
        try {/*from  w  ww .  jav  a2  s  .c om*/
            MavenProject pomProject = mavenProjectBuilder.buildFromRepository(pomArtifact, remoteRepositories,
                    localRepository);

            Set<Artifact> artifacts = pomProject.createArtifacts(artifactFactory, null, null);
            artifacts = filterOutSystemDependencies(artifacts);
            ArtifactResolutionResult arr = resolver.resolveTransitively(artifacts, pomArtifact, localRepository,
                    remoteRepositories, artifactMetadataSource, null);
            result = arr.getArtifacts();
        } catch (Exception e) {
            throw new MojoExecutionException("Failed to resolve non-transitive deps " + e.getMessage(), e);
        }

        LinkedHashSet<Artifact> plugins = new LinkedHashSet<Artifact>();
        plugins.addAll(filtterArtifacts(result, getFilters(null, null, "nexus-plugin", null)));
        plugins.addAll(filtterArtifacts(result, getFilters(null, null, "zip", "bundle")));

        plugins.addAll(getNonTransitivePlugins(plugins));

        if (!plugins.isEmpty()) {
            getLog().debug("Adding non-transitive dependencies for: " + artifact + " -\n"
                    + plugins.toString().replace(',', '\n'));
        }

        deps.addAll(plugins);
    }

    return deps;
}

From source file:com.ge.predix.acs.service.policy.evaluation.PolicyEvaluationServiceImpl.java

@Override
public PolicyEvaluationResult evalPolicy(final PolicyEvaluationRequestV1 request) {
    ZoneEntity zone = this.zoneResolver.getZoneEntityOrFail();
    String uri = request.getResourceIdentifier();
    String subjectIdentifier = request.getSubjectIdentifier();
    String action = request.getAction();
    LinkedHashSet<String> policySetsEvaluationOrder = request.getPolicySetsEvaluationOrder();

    if (uri == null || subjectIdentifier == null || action == null) {
        LOGGER.error(String.format(
                "Policy evaluation request is missing required input parameters: "
                        + "resourceURI='%s' subjectIdentifier='%s' action='%s'",
                uri, subjectIdentifier, action));

        throw new IllegalArgumentException("Policy evaluation request is missing required input parameters. "
                + "Please review and resubmit the request.");
    }//w  w  w .  j  a  v  a 2s  . com

    List<PolicySet> allPolicySets = this.policyService.getAllPolicySets();

    if (allPolicySets.isEmpty()) {
        return new PolicyEvaluationResult(Effect.NOT_APPLICABLE);
    }

    LinkedHashSet<PolicySet> filteredPolicySets = filterPolicySetsByPriority(subjectIdentifier, uri,
            allPolicySets, policySetsEvaluationOrder);

    // At this point empty evaluation order means we have only one policy set.
    // Fixing policy evaluation order so we could build a cache key.
    PolicyEvaluationRequestCacheKey key;
    if (policySetsEvaluationOrder.isEmpty()) {
        key = new Builder().zoneId(zone.getName())
                .policySetIds(Stream.of(filteredPolicySets.iterator().next().getName())
                        .collect(Collectors.toCollection(LinkedHashSet::new)))
                .request(request).build();
    } else {
        key = new Builder().zoneId(zone.getName()).request(request).build();
    }

    PolicyEvaluationResult result = this.cache.get(key);
    if (null == result) {
        result = new PolicyEvaluationResult(Effect.NOT_APPLICABLE);

        HashSet<Attribute> supplementalResourceAttributes;
        if (null == request.getResourceAttributes()) {
            supplementalResourceAttributes = new HashSet<>();
        } else {
            supplementalResourceAttributes = new HashSet<>(request.getResourceAttributes());
        }
        HashSet<Attribute> supplementalSubjectAttributes;
        if (null == request.getSubjectAttributes()) {
            supplementalSubjectAttributes = new HashSet<>();
        } else {
            supplementalSubjectAttributes = new HashSet<>(request.getSubjectAttributes());
        }

        for (PolicySet policySet : filteredPolicySets) {
            result = evalPolicySet(policySet, subjectIdentifier, uri, action, supplementalResourceAttributes,
                    supplementalSubjectAttributes);
            if (result.getEffect() == Effect.NOT_APPLICABLE) {
                continue;
            } else {
                break;
            }
        }

        LOGGER.info(
                String.format(
                        "Processed Policy Evaluation for: "
                                + "resourceUri='%s', subjectIdentifier='%s', action='%s'," + " result='%s'",
                        uri, subjectIdentifier, action, result.getEffect()));
        this.cache.set(key, result);
    }
    return result;
}

From source file:org.mskcc.cbio.importer.converter.internal.ConverterImpl.java

/**
 * Generates case lists for the given portal.
 *
  * @param portal String/*from   ww w  .  j  a v a2 s  .  co m*/
 * @throws Exception
 */
@Override
public void generateCaseLists(String portal) throws Exception {

    if (LOG.isInfoEnabled()) {
        LOG.info("generateCaseLists()");
    }

    // check args
    if (portal == null) {
        throw new IllegalArgumentException("portal must not be null");
    }

    // get portal metadata
    PortalMetadata portalMetadata = config.getPortalMetadata(portal).iterator().next();
    if (portalMetadata == null) {
        if (LOG.isInfoEnabled()) {
            LOG.info("convertData(), cannot find PortalMetadata, returning");
        }
        return;
    }

    // get CaseListMetadata
    Collection<CaseListMetadata> caseListMetadatas = config.getCaseListMetadata(Config.ALL);

    // iterate over all cancer studies
    for (CancerStudyMetadata cancerStudyMetadata : config.getCancerStudyMetadata(portalMetadata.getName())) {
        // iterate over case lists
        for (CaseListMetadata caseListMetadata : caseListMetadatas) {
            if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), processing cancer study: " + cancerStudyMetadata
                        + ", case list: " + caseListMetadata.getCaseListFilename());
            }
            // how many staging files are we working with?
            String[] stagingFilenames = null;
            // setup union/intersection bools
            boolean unionCaseList = caseListMetadata.getStagingFilenames()
                    .contains(CaseListMetadata.CASE_LIST_UNION_DELIMITER);
            boolean intersectionCaseList = caseListMetadata.getStagingFilenames()
                    .contains(CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER);
            // union (like all cases)
            if (unionCaseList) {
                stagingFilenames = caseListMetadata.getStagingFilenames()
                        .split("\\" + CaseListMetadata.CASE_LIST_UNION_DELIMITER);
            }
            // intersection (like complete or cna-seq)
            else if (intersectionCaseList) {
                stagingFilenames = caseListMetadata.getStagingFilenames()
                        .split("\\" + CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER);
            }
            // just a single staging file
            else {
                stagingFilenames = new String[] { caseListMetadata.getStagingFilenames() };
            }
            if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), stagingFilenames: "
                        + java.util.Arrays.toString(stagingFilenames));
            }
            // this is the set we will pass to writeCaseListFile
            LinkedHashSet<String> caseSet = new LinkedHashSet<String>();
            // this indicates the number of staging files processed -
            // used to verify that an intersection should be written
            int numStagingFilesProcessed = 0;
            for (String stagingFilename : stagingFilenames) {
                if (LOG.isInfoEnabled()) {
                    LOG.info("generateCaseLists(), processing stagingFile: " + stagingFilename);
                }
                // compute the case set
                List<String> caseList = fileUtils.getCaseListFromStagingFile(caseIDs, portalMetadata,
                        cancerStudyMetadata, stagingFilename);
                // we may not have this datatype in study
                if (caseList.size() == 0) {
                    if (LOG.isInfoEnabled()) {
                        LOG.info("generateCaseLists(), stagingFileHeader is empty: " + stagingFilename
                                + ", skipping...");
                    }
                    continue;
                }
                // intersection 
                if (intersectionCaseList) {
                    if (caseSet.isEmpty()) {
                        caseSet.addAll(caseList);
                    } else {
                        caseSet.retainAll(caseList);
                    }
                }
                // otherwise union or single staging (treat the same)
                else {
                    caseSet.addAll(caseList);
                }
                ++numStagingFilesProcessed;
            }
            // write the case list file (don't make empty case lists)
            if (caseSet.size() > 0) {
                if (LOG.isInfoEnabled()) {
                    LOG.info("generateCaseLists(), calling writeCaseListFile()...");
                }
                // do not write out complete cases file unless we've processed all the files required
                if (intersectionCaseList && (numStagingFilesProcessed != stagingFilenames.length)) {
                    if (LOG.isInfoEnabled()) {
                        LOG.info(
                                "generateCaseLists(), number of staging files processed != number staging files required for cases_complete.txt, skipping call to writeCaseListFile()...");
                    }
                    continue;
                }
                fileUtils.writeCaseListFile(portalMetadata, cancerStudyMetadata, caseListMetadata,
                        caseSet.toArray(new String[0]));
            } else if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), caseSet.size() <= 0, skipping call to writeCaseListFile()...");
            }
            // if union, write out the cancer study metadata file
            if (caseSet.size() > 0 && caseListMetadata.getCaseListFilename().equals(ALL_CASES_FILENAME)) {
                if (LOG.isInfoEnabled()) {
                    LOG.info(
                            "generateCaseLists(), processed all cases list, we can now update cancerStudyMetadata file()...");
                }
                fileUtils.writeCancerStudyMetadataFile(portalMetadata, cancerStudyMetadata, caseSet.size());
            }
        }
    }

}

From source file:eionet.cr.web.action.HarvestSourcesActionBean.java

/**
 *
 * @return/*from  ww w.  j  ava  2 s  . c om*/
 * @throws DAOException
 */
public Resolution delete() throws DAOException {

    if (isUserLoggedIn()) {
        if (sourceUrl != null && !sourceUrl.isEmpty()) {

            // An authenticated user can delete sources he own. An
            // administrator can delete any source.
            // A priority source can not be deleted. The administrator must
            // first change it to a non-priority source, then delete it.

            LinkedHashSet<String> sourcesToDelete = new LinkedHashSet<String>();
            LinkedHashSet<String> notOwnedSources = new LinkedHashSet<String>();
            LinkedHashSet<String> prioritySources = new LinkedHashSet<String>();
            LinkedHashSet<String> currentlyHarvested = new LinkedHashSet<String>();

            for (String url : sourceUrl) {

                HarvestSourceDTO source = factory.getDao(HarvestSourceDAO.class).getHarvestSourceByUrl(url);
                if (source != null) {

                    if (CurrentHarvests.contains(url)) {
                        currentlyHarvested.add(url);
                    } else {
                        if (userCanDelete(source)) {
                            sourcesToDelete.add(url);
                        } else if (source.isPrioritySource()) {
                            prioritySources.add(url);
                        } else if (!getUserName().equals(source.getOwner())) {
                            notOwnedSources.add(url);
                        }
                    }
                }
            }

            logger.debug("Deleting the following sources: " + sourcesToDelete);
            factory.getDao(HarvestSourceDAO.class).removeHarvestSources(sourcesToDelete);

            if (!sourcesToDelete.isEmpty()) {
                StringBuffer msg = new StringBuffer();
                msg.append("The following sources were successfully removed from the system: <ul>");
                for (String uri : sourcesToDelete) {
                    msg.append("<li>").append(uri).append("</li>");
                }
                msg.append("</ul>");
                addSystemMessage(msg.toString());
            }

            StringBuffer warnings = new StringBuffer();
            if (!prioritySources.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because they are priority sources: <ul>");
                for (String url : prioritySources) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }
            if (!notOwnedSources.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because you are not their owner: <ul>");
                for (String url : notOwnedSources) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }
            if (!currentlyHarvested.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because they are curently being harvested: <ul>");
                for (String url : currentlyHarvested) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }

            if (warnings.length() > 0) {
                addWarningMessage(warnings.toString());
            }
        }
    } else {
        addWarningMessage(getBundle().getString("not.logged.in"));
    }
    return search();
}

From source file:org.ala.dao.FulltextSearchDaoImplSolr.java

/**
 * Applies a prefix and suffix to higlight the search terms in the
 * supplied list.//from ww  w  .j ava  2s .  co m
 *
 * NC: This is a workaround as I can not get SOLR highlighting to work for partial term matches.
 *
 * @param names
 * @param m
 * @return
 */
private List<String> getHighlightedNames(List<String> names, java.util.regex.Matcher m, String prefix,
        String suffix) {
    LinkedHashSet<String> hlnames = null;
    List<String> lnames = null;
    if (names != null) {
        hlnames = new LinkedHashSet<String>();
        for (String name : names) {
            String name1 = SolrUtils.concatName(name.trim());
            m.reset(name1);
            if (m.find()) {
                //insert <b> and </b>at the start and end index
                name = name.substring(0, m.start()) + prefix + name.substring(m.start(), m.end()) + suffix
                        + name.substring(m.end(), name.length());
                hlnames.add(name);
            }
        }
        if (!hlnames.isEmpty()) {
            lnames = new ArrayList<String>(hlnames);
            Collections.sort(lnames);
        } else {
            lnames = new ArrayList<String>();
        }
    }
    return lnames;
}

From source file:com.odoko.solrcli.actions.CrawlPostAction.java

/**
 * A very simple crawler, pulling URLs to fetch from a backlog and then
 * recurses N levels deep if recursive>0. Links are parsed from HTML
 * through first getting an XHTML version using SolrCell with extractOnly,
 * and followed if they are local. The crawler pauses for a default delay
 * of 10 seconds between each fetch, this can be configured in the delay
 * variable. This is only meant for test purposes, as it does not respect
 * robots or anything else fancy :)//from ww  w. j  av a2 s  .  c  o  m
 * @param level which level to crawl
 * @param out output stream to write to
 * @return number of pages crawled on this level and below
 */
protected int webCrawl(int level, OutputStream out) {
  int numPages = 0;
  LinkedHashSet<URL> stack = backlog.get(level);
  int rawStackSize = stack.size();
  stack.removeAll(visited);
  int stackSize = stack.size();
  LinkedHashSet<URL> subStack = new LinkedHashSet<URL>();
  info("Entering crawl at level "+level+" ("+rawStackSize+" links total, "+stackSize+" new)");
  for(URL u : stack) {
    try {
      visited.add(u);
      PageFetcherResult result = pageFetcher.readPageFromUrl(u);
      if(result.httpStatus == 200) {
        u = (result.redirectUrl != null) ? result.redirectUrl : u;
        URL postUrl = new URL(appendParam(solrUrl.toString(), 
            "literal.id="+URLEncoder.encode(u.toString(),"UTF-8") +
            "&literal.url="+URLEncoder.encode(u.toString(),"UTF-8")));
        boolean success = postData(new ByteArrayInputStream(result.content), null, out, result.contentType, postUrl);
        if (success) {
          info("POSTed web resource "+u+" (depth: "+level+")");
          Thread.sleep(delay * 1000);
          numPages++;
          // Pull links from HTML pages only
          if(recursive > level && result.contentType.equals("text/html")) {
            Set<URL> children = pageFetcher.getLinksFromWebPage(u, new ByteArrayInputStream(result.content), result.contentType, postUrl);
            subStack.addAll(children);
          }
        } else {
          warn("An error occurred while posting "+u);
        }
      } else {
        warn("The URL "+u+" returned a HTTP result status of "+result.httpStatus);
      }
    } catch (IOException e) {
      warn("Caught exception when trying to open connection to "+u+": "+e.getMessage());
    } catch (InterruptedException e) {
      throw new RuntimeException();
    }
  }
  if(!subStack.isEmpty()) {
    backlog.add(subStack);
    numPages += webCrawl(level+1, out);
  }
  return numPages;    
}

From source file:org.ala.dao.FulltextSearchDaoImplSolr.java

/**
 * if word highlight enabled then do the exact match, otherwise do the concat match
 * /*from ww w .  j a v a  2s .c  o  m*/
 * @param names
 * @param term
 * @param prefix
 * @param suffix
 * @return
 */
private List<String> getHighlightedNames(List<String> names, String term, String prefix, String suffix) {
    LinkedHashSet<String> hlnames = null;
    List<String> lnames = null;
    String value = null;
    boolean isHighlight = false;

    //have word highlight
    if (prefix != null && suffix != null && prefix.trim().length() > 0 && suffix.trim().length() > 0
            && term != null) {
        value = SolrUtils.cleanName(term.trim());
        isHighlight = true;
    } else {
        value = SolrUtils.concatName(term);
    }
    Pattern p = Pattern.compile(value, Pattern.CASE_INSENSITIVE);
    java.util.regex.Matcher m = p.matcher(value);
    if (names != null) {
        hlnames = new LinkedHashSet<String>();
        for (String name : names) {
            String name1 = null;
            name = name.trim();
            if (isHighlight) {
                name1 = name;
            } else {
                name1 = SolrUtils.concatName(name);
            }
            m.reset(name1);
            if (m.find()) {
                //insert <b> and </b>at the start and end index
                name = name.substring(0, m.start()) + prefix + name.substring(m.start(), m.end()) + suffix
                        + name.substring(m.end(), name.length());
                hlnames.add(name);
            }
        }
        if (!hlnames.isEmpty()) {
            lnames = new ArrayList<String>(hlnames);
            Collections.sort(lnames);
        } else {
            lnames = new ArrayList<String>();
        }
    }
    return lnames;
}