List of usage examples for java.util SortedSet addAll
boolean addAll(Collection<? extends E> c);
From source file:org.apache.fop.fonts.truetype.OpenFont.java
/** * Returns the order in which the tables in a TrueType font should be written to file. * @param directoryTabs the map that is to be sorted. * @return TTFTablesNames[] an array of table names sorted in the order they should appear in * the TTF file./*from w w w . j a v a2 s. c o m*/ */ SortedSet<Map.Entry<OFTableName, OFDirTabEntry>> sortDirTabMap(Map<OFTableName, OFDirTabEntry> directoryTabs) { SortedSet<Map.Entry<OFTableName, OFDirTabEntry>> sortedSet = new TreeSet<Map.Entry<OFTableName, OFDirTabEntry>>( new Comparator<Map.Entry<OFTableName, OFDirTabEntry>>() { public int compare(Entry<OFTableName, OFDirTabEntry> o1, Entry<OFTableName, OFDirTabEntry> o2) { return (int) (o1.getValue().getOffset() - o2.getValue().getOffset()); } }); // @SuppressFBWarnings("DMI_ENTRY_SETS_MAY_REUSE_ENTRY_OBJECTS") sortedSet.addAll(directoryTabs.entrySet()); return sortedSet; }
From source file:org.dllearner.reasoning.SPARQLReasoner.java
public SortedSet<OWLIndividual> getOWLIndividuals() { ResultSet rs;//from w w w. j a v a 2 s . c o m if (!laxMode) { rs = executeSelectQuery(SPARQLQueryUtils.SELECT_INDIVIDUALS_QUERY); } else { rs = executeSelectQuery(SPARQLQueryUtils.SELECT_INDIVIDUALS_QUERY_ALT); } SortedSet<OWLIndividual> individuals = new TreeSet<>(); individuals.addAll(asOWLEntities(EntityType.NAMED_INDIVIDUAL, rs, "var1")); return individuals; }
From source file:net.sourceforge.fenixedu.domain.DegreeCurricularPlan.java
public SortedSet<DegreeModuleScope> getDegreeModuleScopes() { final SortedSet<DegreeModuleScope> degreeModuleScopes = new TreeSet<DegreeModuleScope>( DegreeModuleScope.COMPARATOR_BY_CURRICULAR_YEAR_AND_SEMESTER_AND_CURRICULAR_COURSE_NAME); for (final CurricularCourse curricularCourse : this.getCurricularCoursesSet()) { degreeModuleScopes.addAll(curricularCourse.getDegreeModuleScopes()); }//from w ww .j av a2 s . co m return degreeModuleScopes; }
From source file:edu.ku.brc.specify.tasks.QueryTask.java
/** * @param parent//from w w w . j a v a 2 s . c om * @param parentTT * * Recursively constructs tableTree defined by "querybuilder.xml" schema. */ protected void processForTables(final Element parent, final TableTree parentTT) { String tableName = XMLHelper.getAttr(parent, "name", null); DBTableInfo tableInfo = DBTableIdMgr.getInstance().getByShortClassName(tableName); if (!tableInfo.isHidden() && (!AppContextMgr.isSecurityOn() || tableInfo.getPermissions().canView())) { String fieldName = XMLHelper.getAttr(parent, "field", null); if (StringUtils.isEmpty(fieldName)) { fieldName = tableName.substring(0, 1).toLowerCase() + tableName.substring(1); } String abbrev = XMLHelper.getAttr(parent, "abbrev", null); TableTree newTreeNode = parentTT.addKid(new TableTree(tableName, fieldName, abbrev, tableInfo)); if (Treeable.class.isAssignableFrom(tableInfo.getClassObj())) { try { TreeDefIface<?, ?, ?> treeDef = getTreeDefForTreeLevelQRI(fieldName, parentTT, tableInfo); SortedSet<TreeDefItemIface<?, ?, ?>> defItems = new TreeSet<TreeDefItemIface<?, ?, ?>>( new Comparator<TreeDefItemIface<?, ?, ?>>() { public int compare(TreeDefItemIface<?, ?, ?> o1, TreeDefItemIface<?, ?, ?> o2) { Integer r1 = o1.getRankId(); Integer r2 = o2.getRankId(); return r1.compareTo(r2); } }); defItems.addAll(treeDef.getTreeDefItems()); for (TreeDefItemIface<?, ?, ?> defItem : defItems) { if (defItem.getRankId() > 0) { //skip root, just because. try { //newTreeNode.getTableQRI().addField( // new TreeLevelQRI(newTreeNode.getTableQRI(), null, defItem // .getRankId())); newTreeNode.getTableQRI().addField(new TreeLevelQRI(newTreeNode.getTableQRI(), null, defItem.getRankId(), "name", treeDef)); if (defItem instanceof TaxonTreeDefItem) { DBFieldInfo fi = DBTableIdMgr.getInstance().getInfoById(Taxon.getClassTableId()) .getFieldByName("author"); if (fi != null && !fi.isHidden()) { newTreeNode.getTableQRI() .addField(new TreeLevelQRI(newTreeNode.getTableQRI(), null, defItem.getRankId(), "author", treeDef)); } fi = DBTableIdMgr.getInstance().getInfoById(Taxon.getClassTableId()) .getFieldByName("groupNumber"); if (fi != null && !fi.isHidden()) { newTreeNode.getTableQRI() .addField(new TreeLevelQRI(newTreeNode.getTableQRI(), null, defItem.getRankId(), "groupNumber", treeDef)); } } } catch (Exception ex) { // if there is no TreeDefItem for the rank then just skip it. if (ex instanceof TreeLevelQRI.NoTreeDefItemException) { log.error(ex); } // else something is really messed up else { UsageTracker.incrHandledUsageCount(); edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(QueryTask.class, ex); ex.printStackTrace(); } } } } } catch (Exception ex) { UsageTracker.incrHandledUsageCount(); edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(QueryTask.class, ex); ex.printStackTrace(); } } for (Object kidObj : parent.selectNodes("table")) { Element kidElement = (Element) kidObj; processForTables(kidElement, newTreeNode); } for (Object obj : parent.selectNodes("alias")) { Element kidElement = (Element) obj; String kidClassName = XMLHelper.getAttr(kidElement, "name", null); tableInfo = DBTableIdMgr.getInstance().getByShortClassName(kidClassName); if (!tableInfo.isHidden() && (!AppContextMgr.isSecurityOn() || tableInfo.getPermissions().canView())) { tableName = XMLHelper.getAttr(kidElement, "name", null); fieldName = XMLHelper.getAttr(kidElement, "field", null); if (StringUtils.isEmpty(fieldName)) { fieldName = tableName.substring(0, 1).toLowerCase() + tableName.substring(1); } newTreeNode.addKid(new TableTree(kidClassName, fieldName, true)); } } } }
From source file:org.dllearner.reasoning.SPARQLReasoner.java
public SortedSet<OWLClassExpression> getSubClasses(OWLClassExpression description, boolean direct) { if (description.isAnonymous()) { throw new IllegalArgumentException("Only named classes are supported."); }/*from w ww . j av a2s.c o m*/ SortedSet<OWLClassExpression> subClasses = new TreeSet<>(); String query; if (description.isOWLThing()) { query = SPARQLQueryUtils.SELECT_TOP_LEVEL_OWL_CLASSES; } else { query = String.format(SPARQLQueryUtils.SELECT_SUBCLASS_OF_QUERY, description.asOWLClass().toStringID()); if (direct) { } else { } } ResultSet rs = executeSelectQuery(query); subClasses.addAll(asOWLEntities(EntityType.CLASS, rs, "var1")); subClasses.remove(description); subClasses.remove(df.getOWLNothing()); // System.out.println("Sub(" + description + "):" + subClasses); return new TreeSet<>(subClasses); }
From source file:org.wrml.runtime.schema.generator.SchemaGenerator.java
/** * Generate a {@link JavaBytecodeClass} from the specified {@link Schema}. * <p/>//from w w w . j ava2 s . c om * Like a *big* regex (regular expression), we can compile all of the * WRML schema metadata (as if it is a single *big* String input) into a * loaded Java class so that the rest of the WRML *runtime* can use * (Prototype-optimized) reflection to access WRML's type system. * * @param schema The Schema to represent as a Java class. * @return The Java class representation of the specified schema. */ public JavaBytecodeClass generateSchemaInterface(final Schema schema) { /* * Create the simple POJO that will return the transformation * information. By the end of this method, this will be full of Java * bytecode-oriented information gleaned from this method's schema * parameter. */ final JavaBytecodeClass javaBytecodeClass = new JavaBytecodeClass(); final JavaBytecodeAnnotation wrmlAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_WRML); wrmlAnnotation.setAttributeValue(AnnotationParameterName.uniqueName.name(), schema.getUniqueName().getFullName()); javaBytecodeClass.getAnnotations().add(wrmlAnnotation); final SortedSet<String> keySlotNameSet = new TreeSet<>(); /* * If the schema declares any key slots, note them with a * class-level annotation. */ final List<String> keySlotNames = schema.getKeySlotNames(); if (keySlotNames != null && keySlotNames.size() > 0) { keySlotNameSet.addAll(keySlotNames); } if (!keySlotNameSet.isEmpty()) { final String[] keySlotNamesArray = new String[keySlotNameSet.size()]; wrmlAnnotation.setAttributeValue(AnnotationParameterName.keySlotNames.name(), keySlotNameSet.toArray(keySlotNamesArray)); } /* * If the schema declares any comparable slots, note them with a * class-level annotation. */ final List<String> comparableSlotNames = schema.getComparableSlotNames(); if (comparableSlotNames != null && comparableSlotNames.size() > 0) { final String[] comparableSlotNamesArray = new String[comparableSlotNames.size()]; wrmlAnnotation.setAttributeValue(AnnotationParameterName.comparableSlotNames.name(), comparableSlotNames.toArray(comparableSlotNamesArray)); } wrmlAnnotation.setAttributeValue(AnnotationParameterName.titleSlotName.name(), schema.getTitleSlotName()); /* * In Java, all interfaces extend java.lang.Object, so this can * remain constant for Schema too. */ javaBytecodeClass.setSuperName(SchemaGenerator.OBJECT_INTERNAL_NAME); /* * Go from schema id (URI) to internal Java class name. This is a * simple matter of stripping the leading forward slash from the * URI's path. Internally (in the bytecode), Java's class names use * forward slash (/) instead of full stop dots (.). */ final URI schemaUri = schema.getUri(); final String interfaceInternalName = uriToInternalTypeName(schemaUri); // if (schema.getUniqueName() == null) // { // schema.setUniqueName(new UniqueName(schemaUri.getPath())); // } javaBytecodeClass.setInternalName(interfaceInternalName); /* * Add the class-level Description annotation to capture the * schema's description. */ final String schemaDescription = schema.getDescription(); if (schemaDescription != null && !schemaDescription.trim().isEmpty()) { final JavaBytecodeAnnotation schemaDescriptionAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_DESCRIPTION); schemaDescriptionAnnotation.setAttributeValue(AnnotationParameterName.value.name(), schemaDescription); javaBytecodeClass.getAnnotations().add(schemaDescriptionAnnotation); } String schemaTitle = schema.getTitle(); if (schemaTitle == null || schemaTitle.trim().isEmpty()) { schemaTitle = schema.getUniqueName().getLocalName(); } final JavaBytecodeAnnotation schemaTitleAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_TITLE); schemaTitleAnnotation.setAttributeValue(AnnotationParameterName.value.name(), schemaTitle); javaBytecodeClass.getAnnotations().add(schemaTitleAnnotation); final URI schemaThumbnailImageLocation = schema.getThumbnailLocation(); if (schemaThumbnailImageLocation != null) { final JavaBytecodeAnnotation schemaThumbnailImageAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_THUMBNAIL_IMAGE); schemaThumbnailImageAnnotation.setAttributeValue(AnnotationParameterName.value.name(), schemaThumbnailImageLocation.toString()); javaBytecodeClass.getAnnotations().add(schemaThumbnailImageAnnotation); } boolean isAggregate = false; /* * Turn the schema's base schema list into our Java class's base * (aka extended) interfaces. */ final List<URI> baseSchemaUris = schema.getBaseSchemaUris(); for (final URI baseSchemaUri : baseSchemaUris) { final String baseSchemaInternalName = uriToInternalTypeName(baseSchemaUri); javaBytecodeClass.getInterfaces().add(baseSchemaInternalName); if (!isAggregate && getSchemaLoader().getAggregateDocumentSchemaUri().equals(baseSchemaUri)) { isAggregate = true; final List<Slot> slots = schema.getSlots(); for (final Slot slot : slots) { final Value value = slot.getValue(); if (!(value instanceof LinkValue || value instanceof ModelValue || value instanceof MultiSelectValue)) { keySlotNameSet.add(slot.getName()); } } } } // Add the Model base interface javaBytecodeClass.getInterfaces().add(SchemaGenerator.MODEL_INTERFACE_INTERNAL_NAME); /* * Add the class-level Tags annotation to capture the schema's tags. */ final List<String> schemaTags = schema.getTags(); if (schemaTags != null && schemaTags.size() > 0) { final JavaBytecodeAnnotation tagsAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_TAGS); final String[] tagsArray = new String[schemaTags.size()]; tagsAnnotation.setAttributeValue(AnnotationParameterName.value.name(), schemaTags.toArray(tagsArray)); javaBytecodeClass.getAnnotations().add(tagsAnnotation); } final Long schemaVersion = schema.getVersion(); if (schemaVersion != null) { final JavaBytecodeAnnotation versionAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_VERSION); versionAnnotation.setAttributeValue(AnnotationParameterName.value.name(), schemaVersion); javaBytecodeClass.getAnnotations().add(versionAnnotation); } final Boolean maybeReadOnly = schema.isReadOnly(); if (maybeReadOnly != null && maybeReadOnly) { final JavaBytecodeAnnotation readOnlyAnnotation = new JavaBytecodeAnnotation( SchemaGenerator.ANNOTATION_INTERNAL_NAME_READ_ONLY); javaBytecodeClass.getAnnotations().add(readOnlyAnnotation); } /* * Generate the interface method signatures. */ generateSchemaInterfaceMethods(schema, javaBytecodeClass, isAggregate); // TODO: "Open slots" with signatures. Track the open slots via // the JavaBytecode types. // // TODO: The signature will need to be changed for generics: // Example: // // Java: public interface Test<T extends List<?>> extends List<T> // Class File: public abstract interface org.wrml.schema.Test // extends java.util.List // Signature: // <T::Ljava/util/List<*>;>Ljava/lang/Object;Ljava/util/List<TT;>; // javaBytecodeClass.setSignature(null); generateSchemaInterfaceBytecode(javaBytecodeClass); return javaBytecodeClass; }
From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java
public SortedSet<Shift> getShiftsOrderedByLessons() { final SortedSet<Shift> shifts = new TreeSet<Shift>(Shift.SHIFT_COMPARATOR_BY_TYPE_AND_ORDERED_LESSONS); shifts.addAll(getAssociatedShifts()); return shifts; }
From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java
public SortedSet<Professorship> getProfessorshipsSortedAlphabetically() { final SortedSet<Professorship> professorhips = new TreeSet<Professorship>( Professorship.COMPARATOR_BY_PERSON_NAME); professorhips.addAll(getProfessorshipsSet()); return professorhips; }
From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java
public SortedSet<CurricularCourse> getCurricularCoursesSortedByDegreeAndCurricularCourseName() { final SortedSet<CurricularCourse> curricularCourses = new TreeSet<CurricularCourse>( CurricularCourse.CURRICULAR_COURSE_COMPARATOR_BY_DEGREE_AND_NAME); curricularCourses.addAll(getAssociatedCurricularCoursesSet()); return curricularCourses; }
From source file:org.apache.cassandra.db.ColumnFamilyStore.java
public List<Row> scan(IndexClause clause, AbstractBounds range, IFilter dataFilter) { // Start with the most-restrictive indexed clause, then apply remaining clauses // to each row matching that clause. // TODO: allow merge join instead of just one index + loop IndexExpression primary = highestSelectivityPredicate(clause); ColumnFamilyStore indexCFS = getIndexedColumnFamilyStore(primary.column_name); if (logger.isDebugEnabled()) logger.debug("Primary scan clause is " + getComparator().getString(primary.column_name)); assert indexCFS != null; DecoratedKey indexKey = indexCFS.partitioner.decorateKey(primary.value); // if the slicepredicate doesn't contain all the columns for which we have expressions to evaluate, // it needs to be expanded to include those too IFilter firstFilter = dataFilter;//from w w w. j a v a2 s . c o m NamesQueryFilter extraFilter = null; if (clause.expressions.size() > 1) { if (dataFilter instanceof SliceQueryFilter) { // if we have a high chance of getting all the columns in a single index slice, do that. // otherwise, create an extraFilter to fetch by name the columns referenced by the additional expressions. if (getMaxRowSize() < DatabaseDescriptor.getColumnIndexSize()) { logger.debug("Expanding slice filter to entire row to cover additional expressions"); firstFilter = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, ((SliceQueryFilter) dataFilter).reversed, Integer.MAX_VALUE); } else { logger.debug("adding extraFilter to cover additional expressions"); SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(getComparator()); for (IndexExpression expr : clause.expressions) { if (expr == primary) continue; columns.add(expr.column_name); } extraFilter = new NamesQueryFilter(columns); } } else { logger.debug("adding columns to firstFilter to cover additional expressions"); // just add in columns that are not part of the resultset assert dataFilter instanceof NamesQueryFilter; SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(getComparator()); for (IndexExpression expr : clause.expressions) { if (expr == primary || ((NamesQueryFilter) dataFilter).columns.contains(expr.column_name)) continue; columns.add(expr.column_name); } if (columns.size() > 0) { columns.addAll(((NamesQueryFilter) dataFilter).columns); firstFilter = new NamesQueryFilter(columns); } } } List<Row> rows = new ArrayList<Row>(); ByteBuffer startKey = clause.start_key; QueryPath path = new QueryPath(columnFamily); // we need to store last data key accessed to avoid duplicate results // because in the while loop new iteration we can access the same column if start_key was not set ByteBuffer lastDataKey = null; // fetch row keys matching the primary expression, fetch the slice predicate for each // and filter by remaining expressions. repeat until finished w/ assigned range or index row is exhausted. outer: while (true) { /* we don't have a way to get the key back from the DK -- we just have a token -- * so, we need to loop after starting with start_key, until we get to keys in the given `range`. * But, if the calling StorageProxy is doing a good job estimating data from each range, the range * should be pretty close to `start_key`. */ if (logger.isDebugEnabled()) logger.debug(String.format("Scanning index %s starting with %s", expressionString(primary), indexCFS.getComparator().getString(startKey))); // We shouldn't fetch only 1 row as this provides buggy paging in case the first row doesn't satisfy all clauses int count = Math.max(clause.count, 2); QueryFilter indexFilter = QueryFilter.getSliceFilter(indexKey, new QueryPath(indexCFS.getColumnFamilyName()), startKey, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, count); ColumnFamily indexRow = indexCFS.getColumnFamily(indexFilter); logger.debug("fetched {}", indexRow); if (indexRow == null) break; ByteBuffer dataKey = null; int n = 0; for (IColumn column : indexRow.getSortedColumns()) { if (column.isMarkedForDelete()) continue; dataKey = column.name(); n++; DecoratedKey dk = partitioner.decorateKey(dataKey); if (!range.right.equals(partitioner.getMinimumToken()) && range.right.compareTo(dk.token) < 0) break outer; if (!range.contains(dk.token) || dataKey.equals(lastDataKey)) continue; // get the row columns requested, and additional columns for the expressions if necessary ColumnFamily data = getColumnFamily(new QueryFilter(dk, path, firstFilter)); assert data != null : String.format( "No data found for %s in %s:%s (original filter %s) from expression %s", firstFilter, dk, path, dataFilter, expressionString(primary)); logger.debug("fetched data row {}", data); if (extraFilter != null) { // we might have gotten the expression columns in with the main data slice, but // we can't know for sure until that slice is done. So, we'll do the extra query // if we go through and any expression columns are not present. for (IndexExpression expr : clause.expressions) { if (expr != primary && data.getColumn(expr.column_name) == null) { data.addAll(getColumnFamily(new QueryFilter(dk, path, extraFilter))); break; } } } if (satisfies(data, clause, primary)) { logger.debug("row {} satisfies all clauses", data); // cut the resultset back to what was requested, if necessary if (firstFilter != dataFilter) { ColumnFamily expandedData = data; data = expandedData.cloneMeShallow(); IColumnIterator iter = dataFilter.getMemtableColumnIterator(expandedData, dk, getComparator()); new QueryFilter(dk, path, dataFilter).collectCollatedColumns(data, iter, gcBefore()); } rows.add(new Row(dk, data)); } if (rows.size() == clause.count) break outer; } if (n < clause.count || startKey.equals(dataKey)) break; lastDataKey = startKey = dataKey; } return rows; }