List of usage examples for java.util SortedSet isEmpty
boolean isEmpty();
From source file:io.wcm.config.core.management.impl.ParameterResolverImpl.java
/** * Apply configured values for given configuration id (except those for which the parameter names are locked on a * higher configuration level).//w w w . j av a 2 s . c o m * @param resolver Resource resolver * @param configurationId Configuration id * @param parameterValues Parameter values * @param ancestorLockedParameterNames Set of locked parameter names on the configuration levels above. * @return Set of locked parameter names on this configuration level combined with the from the levels above. */ private SortedSet<String> applyConfiguredValues(ResourceResolver resolver, String configurationId, Map<String, Object> parameterValues, SortedSet<String> ancestorLockedParameterNames) { // get data from persistence ParameterPersistenceData data = parameterPersistence.getData(resolver, configurationId); // ensure the types provided by persistence are valid Map<String, Object> configuredValues = ensureValidValueTypes(data.getValues()); // put parameter values to map (respect locked parameter names that may be defined on ancestor level) if (!ancestorLockedParameterNames.isEmpty()) { for (Map.Entry<String, Object> entry : configuredValues.entrySet()) { if (!ancestorLockedParameterNames.contains(entry.getKey())) { parameterValues.put(entry.getKey(), entry.getValue()); } } } else { parameterValues.putAll(configuredValues); } // aggregate set of locked parameter names from ancestor levels and this level SortedSet<String> lockedParameterNames = ancestorLockedParameterNames; if (!data.getLockedParameterNames().isEmpty()) { lockedParameterNames = new TreeSet<>(); lockedParameterNames.addAll(ancestorLockedParameterNames); lockedParameterNames.addAll(data.getLockedParameterNames()); } return lockedParameterNames; }
From source file:net.sf.jabref.gui.ContentSelectorDialog2.java
/** * Set the contents of the field selector list. * *///from w w w . j a va 2 s .c o m private void setupFieldSelector() { fieldListModel.clear(); SortedSet<String> contents = new TreeSet<>(); for (String s : metaData) { if (s.startsWith(MetaData.SELECTOR_META_PREFIX)) { contents.add(s.substring(MetaData.SELECTOR_META_PREFIX.length())); } } if (contents.isEmpty()) { // if nothing was added, put the default fields (as described in the help) fieldListModel.addElement(FieldName.AUTHOR); fieldListModel.addElement(FieldName.JOURNAL); fieldListModel.addElement(FieldName.KEYWORDS); fieldListModel.addElement(FieldName.PUBLISHER); } else { for (String s : contents) { fieldListModel.addElement(s); } } if (currentField == null) { // if dialog is created for the whole database, // select the first field to avoid confusions in GUI usage fieldList.setSelectedIndex(0); } else { // a specific field has been chosen at the constructor // select this field int i = fieldListModel.indexOf(currentField); if (i != -1) { // field has been found in list, select it fieldList.setSelectedIndex(i); } } }
From source file:edu.harvard.med.screensaver.model.screenresults.ScreenResult.java
/** * The last {@link AdministrativeActivityType#SCREEN_RESULT_DATA_LOADING} * screen result data loading activity that full or incremental data was * loaded for this ScreenResult.//from w ww.j av a 2 s. com */ @Transient public AdministrativeActivity getLastDataLoadingActivity() { SortedSet<AdministrativeActivity> screenResultDataLoadings = Sets .newTreeSet(Iterables.filter(getScreen().getUpdateActivities(), AdministrativeActivityType.SCREEN_RESULT_DATA_LOADING.isValuePredicate())); if (screenResultDataLoadings.isEmpty()) { return null; } return screenResultDataLoadings.last(); }
From source file:com.spotify.heroic.filter.AndFilter.java
static Filter optimize(final SortedSet<Filter> filters) { final SortedSet<Filter> result = new TreeSet<>(); for (final Filter f : filters) { if (f instanceof NotFilter) { // Optimize away expressions which are always false. // Example: foo = bar and !(foo = bar) if (filters.contains(((NotFilter) f).getFilter())) { return FalseFilter.get(); }//from w w w .j a va2s. c o m } else if (f instanceof StartsWithFilter) { // Optimize away prefixes which encompass each other. // Example: foo ^ hello and foo ^ helloworld -> foo ^ helloworld if (FilterUtils.containsPrefixedWith(filters, (StartsWithFilter) f, (inner, outer) -> FilterUtils.prefixedWith(inner.getValue(), outer.getValue()))) { continue; } } else if (f instanceof MatchTagFilter) { // Optimize matchTag expressions which are always false. // Example: foo = bar and foo = baz if (FilterUtils.containsConflictingMatchTag(filters, (MatchTagFilter) f)) { return FalseFilter.get(); } } else if (f instanceof MatchKeyFilter) { // Optimize matchTag expressions which are always false. // Example: $key = bar and $key = baz if (FilterUtils.containsConflictingMatchKey(filters, (MatchKeyFilter) f)) { return FalseFilter.get(); } } result.add(f); } if (result.isEmpty()) { return FalseFilter.get(); } if (result.size() == 1) { return result.iterator().next(); } return new AndFilter(ImmutableList.copyOf(result)); }
From source file:mvm.rya.indexing.accumulo.freetext.AccumuloFreeTextIndexer.java
private void storeStatement(Statement statement) throws IOException { // if the predicate list is empty, accept all predicates. // Otherwise, make sure the predicate is on the "valid" list boolean isValidPredicate = validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate()); if (isValidPredicate && (statement.getObject() instanceof Literal)) { // Get the tokens String text = statement.getObject().stringValue().toLowerCase(); SortedSet<String> tokens = tokenizer.tokenize(text); if (!tokens.isEmpty()) { // Get Document Data String docContent = StatementSerializer.writeStatement(statement); String docId = Md5Hash.md5Base64(docContent); // Setup partition Text partition = genPartition(docContent.hashCode(), docTableNumPartitions); Mutation docTableMut = new Mutation(partition); List<Mutation> termTableMutations = new ArrayList<Mutation>(); Text docIdText = new Text(docId); // Store the Document Data docTableMut.put(ColumnPrefixes.DOCS_CF_PREFIX, docIdText, new Value(docContent.getBytes(Charsets.UTF_8))); // index the statement parts docTableMut.put(ColumnPrefixes.getSubjColFam(statement), docIdText, EMPTY_VALUE); docTableMut.put(ColumnPrefixes.getPredColFam(statement), docIdText, EMPTY_VALUE); docTableMut.put(ColumnPrefixes.getObjColFam(statement), docIdText, EMPTY_VALUE); docTableMut.put(ColumnPrefixes.getContextColFam(statement), docIdText, EMPTY_VALUE); // index the statement terms for (String token : tokens) { // tie the token to the document docTableMut.put(ColumnPrefixes.getTermColFam(token), docIdText, EMPTY_VALUE); // store the term in the term table (useful for wildcard searches) termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getTermListColFam(token))); termTableMutations.add(createEmptyPutMutation(ColumnPrefixes.getRevTermListColFam(token))); }//from w w w . ja v a2 s . co m // write the mutations try { docTableBw.addMutation(docTableMut); termTableBw.addMutations(termTableMutations); } catch (MutationsRejectedException e) { logger.error("error adding mutation", e); throw new IOException(e); } } } }
From source file:mvm.rya.indexing.accumulo.freetext.AccumuloFreeTextIndexer.java
private void deleteStatement(Statement statement) throws IOException { // if the predicate list is empty, accept all predicates. // Otherwise, make sure the predicate is on the "valid" list boolean isValidPredicate = validPredicates.isEmpty() || validPredicates.contains(statement.getPredicate()); if (isValidPredicate && (statement.getObject() instanceof Literal)) { // Get the tokens String text = statement.getObject().stringValue().toLowerCase(); SortedSet<String> tokens = tokenizer.tokenize(text); if (!tokens.isEmpty()) { // Get Document Data String docContent = StatementSerializer.writeStatement(statement); String docId = Md5Hash.md5Base64(docContent); // Setup partition Text partition = genPartition(docContent.hashCode(), docTableNumPartitions); Mutation docTableMut = new Mutation(partition); List<Mutation> termTableMutations = new ArrayList<Mutation>(); Text docIdText = new Text(docId); // Delete the Document Data docTableMut.putDelete(ColumnPrefixes.DOCS_CF_PREFIX, docIdText); // Delete the statement parts in index docTableMut.putDelete(ColumnPrefixes.getSubjColFam(statement), docIdText); docTableMut.putDelete(ColumnPrefixes.getPredColFam(statement), docIdText); docTableMut.putDelete(ColumnPrefixes.getObjColFam(statement), docIdText); docTableMut.putDelete(ColumnPrefixes.getContextColFam(statement), docIdText); // Delete the statement terms in index for (String token : tokens) { if (IS_TERM_TABLE_TOKEN_DELETION_ENABLED) { int rowId = Integer.parseInt(partition.toString()); boolean doesTermExistInOtherDocs = doesTermExistInOtherDocs(token, rowId, docIdText); // Only delete the term from the term table if it doesn't appear in other docs if (!doesTermExistInOtherDocs) { // Delete the term in the term table termTableMutations .add(createEmptyPutDeleteMutation(ColumnPrefixes.getTermListColFam(token))); termTableMutations .add(createEmptyPutDeleteMutation(ColumnPrefixes.getRevTermListColFam(token))); }/*from w w w .j a v a 2s .com*/ } // Un-tie the token to the document docTableMut.putDelete(ColumnPrefixes.getTermColFam(token), docIdText); } // write the mutations try { docTableBw.addMutation(docTableMut); termTableBw.addMutations(termTableMutations); } catch (MutationsRejectedException e) { logger.error("error adding mutation", e); throw new IOException(e); } } } }
From source file:com.tdclighthouse.prototype.maven.PrototypeSupperClassHandler.java
@SuppressWarnings("unchecked") private ClassReference extendsExistingBeans(List<String> supertypes) { ClassReference result = null;/*from w w w . j a v a 2 s. com*/ SortedSet<Class<? extends HippoBean>> supperClasses = new TreeSet<Class<? extends HippoBean>>( classExtensionComparator); for (String superType : supertypes) { if (Constants.NodeType.HIPPO_COMPOUND.equals(superType)) { Class<TdcDocument> tdcDocumentClass = TdcDocument.class; supperClasses.add(tdcDocumentClass); } else if (getBeansOnClassPath().containsKey(superType)) { HippoBeanClass hippoBeanClass = getBeansOnClassPath().get(superType); Class<?> clazz = getClass(hippoBeanClass); supperClasses.add((Class<? extends HippoBean>) clazz); } } if (!supperClasses.isEmpty()) { result = new ClassReference(supperClasses.last()); } return result; }
From source file:net.sourceforge.mavenhippo.gen.DefaultSupperClassHandler.java
@SuppressWarnings("unchecked") private ClassReference extendsExistingBeans(List<String> supertypes) { ClassReference result = null;/*from w w w . j av a 2s . c o m*/ SortedSet<Class<? extends HippoBean>> supperClasses = new TreeSet<Class<? extends HippoBean>>( classExtensionComparator); for (String superType : supertypes) { if (Constants.NodeType.HIPPO_COMPOUND.equals(superType)) { Class<HippoCompound> hippoCompoundClass = HippoCompound.class; supperClasses.add(hippoCompoundClass); } else if (getBeansOnClassPath().containsKey(superType)) { HippoBeanClass hippoBeanClass = getBeansOnClassPath().get(superType); Class<?> clazz = getClass(hippoBeanClass); supperClasses.add((Class<? extends HippoBean>) clazz); } } if (!supperClasses.isEmpty()) { result = new ClassReference(supperClasses.last()); } return result; }
From source file:eu.ggnet.dwoss.misc.op.listings.SalesListingProducerOperation.java
/** * Create a filejacket from a collection of lines that are filtered by configuration parameters. * Lines are filtered by brand and group. * <p>/*from ww w . j ava2 s .co m*/ * @param config configuration for filtering and file creation * @param all lines to be considered * @return a filejacket from a collection of lines that are filtered by configuration parameters. */ private FileJacket createListing(ListingConfiguration config, Collection<StackedLine> all) { try { SortedSet<StackedLine> filtered = all.stream() .filter(line -> (config.getAllBrands().contains(line.getBrand()) && config.getGroups().contains(line.getGroup()))) .collect(Collectors.toCollection(TreeSet::new)); if (filtered.isEmpty()) return null; L.info("Creating listing {} with {} lines", config.getName(), filtered.size()); JRDataSource datasource = new JRBeanCollectionDataSource(filtered); JasperPrint jasperPrint = JasperFillManager.fillReport(config.getJasperTemplateFile(), config.toReportParamters(), datasource); byte[] pdfContend = JasperExportManager.exportReportToPdf(jasperPrint); return new FileJacket(config.getFilePrefix() + config.getName(), ".pdf", pdfContend); } catch (JRException ex) { throw new RuntimeException(ex); } }
From source file:org.wrml.runtime.DefaultModel.java
private final Keys buildKeys(final URI schemaUri, final Map<String, Object> readOnlySlotMap, final KeysBuilder keysBuilder) { final Context context = getContext(); final SchemaLoader schemaLoader = context.getSchemaLoader(); final Prototype prototype = schemaLoader.getPrototype(schemaUri); final SortedSet<String> keySlotNames = prototype.getDeclaredKeySlotNames(); if (keySlotNames != null && !keySlotNames.isEmpty()) { final Object keyValue; if (keySlotNames.size() == 1) { final String keySlotName = keySlotNames.first(); if (readOnlySlotMap.containsKey(keySlotName)) { keyValue = readOnlySlotMap.get(keySlotName); } else { keyValue = null;/* ww w .java2 s . c o m*/ } } else { final SortedMap<String, Object> keySlots = new TreeMap<String, Object>(); for (final String keySlotName : keySlotNames) { final Object keySlotValue = readOnlySlotMap.get(keySlotName); keySlots.put(keySlotName, keySlotValue); } keyValue = new CompositeKey(keySlots); } if (keyValue != null) { keysBuilder.addKey(schemaUri, keyValue); } } final Set<URI> baseSchemaUris = prototype.getAllBaseSchemaUris(); if (baseSchemaUris != null && !baseSchemaUris.isEmpty()) { for (final URI baseSchemaUri : baseSchemaUris) { buildKeys(baseSchemaUri, readOnlySlotMap, keysBuilder); } } return keysBuilder.toKeys(); }