List of usage examples for java.util Set size
int size();
From source file:com.act.lcms.db.analysis.Utils.java
/** * Finds all chemical targets for a set of LCMS wells. Throws an IllegalArgumentException if more than one targets * are shared by the wells./*ww w .ja v a2 s . com*/ * @param db The DB to query for information about the wells/targets. * @param wells A set of wells whose targets to scan. * @return The single shared target of all the wells, or null. * @throws SQLException * @throws IllegalArgumentException Thrown when the wells share more than one target chemical. */ public static CuratedChemical requireOneTarget(DB db, List<LCMSWell> wells) throws SQLException, IllegalArgumentException { Set<CuratedChemical> chemicals = extractTargetsForWells(db, wells); if (chemicals.size() > 1) { // TODO: is there a foreach approach that we can use here that won't break backwards compatibility? List<String> chemicalNames = new ArrayList<>(chemicals.size()); for (CuratedChemical chemical : chemicals) { chemicalNames.add(chemical.getName()); } throw new IllegalArgumentException( String.format("Found multiple target chemicals where one required: %s", StringUtils.join(chemicalNames, ", "))); } else if (chemicals.size() < 1) { return null; } return chemicals.iterator().next(); }
From source file:CollectionsX.java
/** Tests whether two sets has any intersection. *///from w w w .j a v a2 s . c om public static final boolean isIntersected(Set a, Set b) { final int sza = a != null ? a.size() : 0; final int szb = b != null ? b.size() : 0; if (sza == 0 || szb == 0) return false; final Set large, small; if (sza > szb) { large = a; small = b; } else { large = b; small = a; } for (final Iterator it = small.iterator(); it.hasNext();) if (large.contains(it.next())) return true; return false; }
From source file:com.amazon.janusgraph.example.MarvelGraphFactory.java
public static void load(final JanusGraph graph, final int rowsToLoad, final boolean report) throws Exception { JanusGraphManagement mgmt = graph.openManagement(); if (mgmt.getGraphIndex(CHARACTER) == null) { final PropertyKey characterKey = mgmt.makePropertyKey(CHARACTER).dataType(String.class).make(); mgmt.buildIndex(CHARACTER, Vertex.class).addKey(characterKey).unique().buildCompositeIndex(); }//from ww w . j a va 2s . co m if (mgmt.getGraphIndex(COMIC_BOOK) == null) { final PropertyKey comicBookKey = mgmt.makePropertyKey(COMIC_BOOK).dataType(String.class).make(); mgmt.buildIndex(COMIC_BOOK, Vertex.class).addKey(comicBookKey).unique().buildCompositeIndex(); mgmt.makePropertyKey(WEAPON).dataType(String.class).make(); mgmt.makeEdgeLabel(APPEARED).multiplicity(Multiplicity.MULTI).make(); } mgmt.commit(); ClassLoader classLoader = MarvelGraphFactory.class.getClassLoader(); URL resource = classLoader.getResource("META-INF/marvel.csv"); int line = 0; Map<String, Set<String>> comicToCharacter = new HashMap<>(); Map<String, Set<String>> characterToComic = new HashMap<>(); Set<String> characters = new HashSet<>(); BlockingQueue<Runnable> creationQueue = new LinkedBlockingQueue<>(); try (CSVReader reader = new CSVReader(new InputStreamReader(resource.openStream()))) { String[] nextLine; while ((nextLine = reader.readNext()) != null && line < rowsToLoad) { line++; String comicBook = nextLine[1]; String[] characterNames = nextLine[0].split("/"); if (!comicToCharacter.containsKey(comicBook)) { comicToCharacter.put(comicBook, new HashSet<String>()); } List<String> comicCharacters = Arrays.asList(characterNames); comicToCharacter.get(comicBook).addAll(comicCharacters); characters.addAll(comicCharacters); } } for (String character : characters) { creationQueue.add(new CharacterCreationCommand(character, graph)); } BlockingQueue<Runnable> appearedQueue = new LinkedBlockingQueue<>(); for (String comicBook : comicToCharacter.keySet()) { creationQueue.add(new ComicBookCreationCommand(comicBook, graph)); Set<String> comicCharacters = comicToCharacter.get(comicBook); for (String character : comicCharacters) { AppearedCommand lineCommand = new AppearedCommand(graph, new Appeared(character, comicBook)); appearedQueue.add(lineCommand); if (!characterToComic.containsKey(character)) { characterToComic.put(character, new HashSet<String>()); } characterToComic.get(character).add(comicBook); } REGISTRY.histogram("histogram.comic-to-character").update(comicCharacters.size()); } int maxAppearances = 0; String maxCharacter = ""; for (String character : characterToComic.keySet()) { Set<String> comicBookSet = characterToComic.get(character); int numberOfAppearances = comicBookSet.size(); REGISTRY.histogram("histogram.character-to-comic").update(numberOfAppearances); if (numberOfAppearances > maxAppearances) { maxCharacter = character; maxAppearances = numberOfAppearances; } } LOG.info("Character {} has most appearances at {}", maxCharacter, maxAppearances); ExecutorService executor = Executors.newFixedThreadPool(POOL_SIZE); for (int i = 0; i < POOL_SIZE; i++) { executor.execute(new BatchCommand(graph, creationQueue)); } executor.shutdown(); while (!executor.awaitTermination(60, TimeUnit.SECONDS)) { LOG.info("Awaiting:" + creationQueue.size()); if (report) { REPORTER.report(); } } executor = Executors.newSingleThreadExecutor(); executor.execute(new BatchCommand(graph, appearedQueue)); executor.shutdown(); while (!executor.awaitTermination(60, TimeUnit.SECONDS)) { LOG.info("Awaiting:" + appearedQueue.size()); if (report) { REPORTER.report(); } } LOG.info("MarvelGraphFactory.load complete"); }
From source file:juicebox.data.HiCFileTools.java
/** * Set intersection// w w w . ja va 2 s . com * http://stackoverflow.com/questions/7574311/efficiently-compute-intersection-of-two-sets-in-java * * @param set1 * @param set2 * @return intersection of set1 and set2 */ public static Set<Chromosome> getSetIntersection(Set<Chromosome> set1, Set<Chromosome> set2) { boolean set1IsLarger = set1.size() > set2.size(); Set<Chromosome> cloneSet = new HashSet<Chromosome>(set1IsLarger ? set2 : set1); cloneSet.retainAll(set1IsLarger ? set1 : set2); return cloneSet; }
From source file:com.google.gwt.dev.javac.CachedCompilationUnit.java
public static void save(SourceFileCompilationUnit unit, OutputStream outputStream) throws Exception { DataOutputStream dos = null;//from w w w . j a va 2 s . c o m try { dos = new DataOutputStream(new BufferedOutputStream(outputStream)); // version dos.writeLong(CompilationUnitDiskCache.CACHE_VERSION); // simple stuff dos.writeLong(unit.getLastModified()); dos.writeUTF(unit.getDisplayLocation()); dos.writeUTF(unit.getTypeName()); dos.writeUTF(unit.getContentId().get()); dos.writeBoolean(unit.isSuperSource()); // compiled classes { Collection<CompiledClass> compiledClasses = unit.getCompiledClasses(); int size = compiledClasses.size(); dos.writeInt(size); if (size > 0) { // sort in enclosing order to be able to restore enclosing classes by name CompiledClass[] compiledClassesArray = compiledClasses .toArray(new CompiledClass[compiledClasses.size()]); Arrays.sort(compiledClassesArray, new Comparator<CompiledClass>() { public int compare(CompiledClass o1, CompiledClass o2) { int o1count = countMatches(o1.getInternalName(), Signature.C_DOLLAR); int o2count = countMatches(o2.getInternalName(), Signature.C_DOLLAR); return o1count - o2count; } }); // store for (CompiledClass compiledClass : compiledClassesArray) { // internal name dos.writeUTF(compiledClass.getInternalName()); // is local dos.writeBoolean(compiledClass.isLocal()); // bytes byte[] bytes = compiledClass.getBytes(); dos.writeInt(bytes.length); dos.write(bytes); // enclosing class, write the name only CompiledClass enclosingClass = compiledClass.getEnclosingClass(); String enclosingClassName = enclosingClass != null ? enclosingClass.getInternalName() : ""; dos.writeUTF(enclosingClassName); } } } // dependencies { Set<ContentId> dependencies = unit.getDependencies(); int size = dependencies.size(); dos.writeInt(size); if (size > 0) { for (ContentId contentId : dependencies) { dos.writeUTF(contentId.get()); } } } // JSNI methods { List<JsniMethod> jsniMethods = unit.getJsniMethods(); int size = jsniMethods.size(); dos.writeInt(size); if (size > 0) { for (JsniMethod jsniMethod : jsniMethods) { dos.writeUTF(jsniMethod.name()); JsFunction function = jsniMethod.function(); SourceInfo sourceInfo = function.getSourceInfo(); dos.writeInt(sourceInfo.getStartPos()); dos.writeInt(sourceInfo.getEndPos()); dos.writeInt(sourceInfo.getStartLine()); dos.writeUTF(function.toSource()); } } } // Method lookup { MethodArgNamesLookup methodArgs = unit.getMethodArgs(); MethodArgNamesLookup.save(methodArgs, dos); } } finally { IOUtils.closeQuietly(dos); } }
From source file:de.hpi.fgis.hdrs.client.TripleOutputStream.java
static InetSocketAddress[] getAddresses(Set<Peer> peers) { InetSocketAddress[] addr = new InetSocketAddress[peers.size()]; int i = 0;//from w w w . j a v a 2 s. c o m for (Peer peer : peers) { addr[i++] = peer.getConnectAddress(); } return addr; }
From source file:com.kibana.multitenancy.plugin.kibana.KibanaSeed.java
public static void setDashboards(String user, Set<String> projects, Set<String> roles, Client esClient, String kibanaIndex, String kibanaVersion) { //GET .../.kibana/index-pattern/_search?pretty=true&fields= // compare results to projects; handle any deltas (create, delete?) //check projects for default and remove for (String project : BLACKLIST_PROJECTS) if (projects.contains(project)) { logger.debug("Black-listed project '{}' found. Not adding as an index pattern", project); projects.remove(project);/*from w w w .j a v a2 s .com*/ } Set<String> indexPatterns = getIndexPatterns(user, esClient, kibanaIndex); logger.debug("Found '{}' Index patterns for user", indexPatterns.size()); // Check roles here, if user is a cluster-admin we should add .operations to their project? -- correct way to do this? logger.debug("Checking for '{}' in users roles '{}'", OPERATIONS_ROLES, roles); /*for ( String role : OPERATIONS_ROLES ) if ( roles.contains(role) ) { logger.debug("{} is an admin user", user); projects.add(OPERATIONS_PROJECT); break; }*/ List<String> sortedProjects = new ArrayList<String>(projects); Collections.sort(sortedProjects); if (sortedProjects.isEmpty()) sortedProjects.add(BLANK_PROJECT); logger.debug("Setting dashboards given user '{}' and projects '{}'", user, projects); // If none have been set yet if (indexPatterns.isEmpty()) { create(user, sortedProjects, true, esClient, kibanaIndex, kibanaVersion); //TODO : Currently it is generating wrong search properties when integrated with ES 2.1 //createSearchProperties(user, esClient, kibanaIndex); } else { List<String> common = new ArrayList<String>(indexPatterns); // Get a list of all projects that are common common.retainAll(sortedProjects); sortedProjects.removeAll(common); indexPatterns.removeAll(common); // for any to create (remaining in projects) call createIndices, createSearchmapping?, create dashboard create(user, sortedProjects, false, esClient, kibanaIndex, kibanaVersion); // cull any that are in ES but not in OS (remaining in indexPatterns) remove(user, indexPatterns, esClient, kibanaIndex); common.addAll(sortedProjects); Collections.sort(common); // Set default index to first index in common if we removed the default String defaultIndex = getDefaultIndex(user, esClient, kibanaIndex, kibanaVersion); logger.debug("Checking if '{}' contains '{}'", indexPatterns, defaultIndex); if (indexPatterns.contains(defaultIndex) || StringUtils.isEmpty(defaultIndex)) { logger.debug("'{}' does contain '{}' and common size is {}", indexPatterns, defaultIndex, common.size()); if (common.size() > 0) setDefaultIndex(user, common.get(0), esClient, kibanaIndex, kibanaVersion); } } }
From source file:com.mirth.connect.util.MirthSSLUtil.java
public static String[] getEnabledHttpsProtocols(String[] requestedProtocols) { logger.debug("Requested SSL protocols: " + Arrays.toString(requestedProtocols)); SSLContext sslContext = SSLContexts.createDefault(); String[] supportedProtocols = sslContext.getSupportedSSLParameters().getProtocols(); Set<String> enabledProtocols = new LinkedHashSet<String>(); for (String protocol : requestedProtocols) { if (ArrayUtils.contains(supportedProtocols, protocol)) { enabledProtocols.add(protocol); }/*from w w w . ja va 2 s . co m*/ } logger.debug("Enabled SSL protocols: " + String.valueOf(enabledProtocols)); return enabledProtocols.toArray(new String[enabledProtocols.size()]); }
From source file:org.bremersee.common.spring.autoconfigure.JaxbAutoConfiguration.java
public static Jaxb2Marshaller createJaxbMarshaller(final Collection<String> packages) { final Set<String> packageSet = createPackageSet(packages); Jaxb2Marshaller m = new Jaxb2Marshaller(); Map<String, Object> marshallerProperties = new HashMap<>(); marshallerProperties.put(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); m.setMarshallerProperties(marshallerProperties); m.setContextPaths(packageSet.toArray(new String[packageSet.size()])); return m;/*from w ww. ja v a 2 s . c o m*/ }
From source file:ai.grakn.graph.internal.computer.GraknSparkExecutor.java
public static <M> JavaPairRDD<Object, ViewIncomingPayload<M>> executeVertexProgramIteration( final JavaPairRDD<Object, VertexWritable> graphRDD, final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final GraknSparkMemory memory, final Configuration apacheConfiguration) { // the graphRDD and the viewRDD must have the same partitioner if (null != viewIncomingRDD) assert graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get()); final JavaPairRDD<Object, ViewOutgoingPayload<M>> viewOutgoingRDD = (((null == viewIncomingRDD) ? graphRDD.mapValues(/* ww w .j ava 2s. c o m*/ vertexWritable -> new Tuple2<>(vertexWritable, Optional.<ViewIncomingPayload<M>>absent())) : // first iteration will not have any views or messages graphRDD.leftOuterJoin(viewIncomingRDD)) // every other iteration may have views and messages // for each partition of vertices emit a view and their outgoing messages .mapPartitionsToPair(partitionIterator -> { HadoopPools.initialize(apacheConfiguration); final VertexProgram<M> workerVertexProgram = VertexProgram .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(apacheConfiguration), apacheConfiguration); // each partition(Spark)/worker(TP3) has a local copy of the vertex program (a worker's task) final Set<String> elementComputeKeys = workerVertexProgram.getElementComputeKeys(); // the compute keys as a set final String[] elementComputeKeysArray = elementComputeKeys.size() == 0 ? EMPTY_ARRAY : elementComputeKeys.toArray(new String[elementComputeKeys.size()]); // the compute keys as an array final SparkMessenger<M> messenger = new SparkMessenger<>(); workerVertexProgram.workerIterationStart(memory.asImmutable()); // start the worker return () -> IteratorUtils.map(partitionIterator, vertexViewIncoming -> { final StarGraph.StarVertex vertex = vertexViewIncoming._2()._1().get(); // get the vertex from the vertex writable synchronized (vertex) { // drop any computed properties that are cached in memory if (elementComputeKeysArray.length > 0) { vertex.dropVertexProperties(elementComputeKeysArray); } final boolean hasViewAndMessages = vertexViewIncoming._2()._2().isPresent(); // if this is the first iteration, then there are no views or messages final List<DetachedVertexProperty<Object>> previousView = hasViewAndMessages ? vertexViewIncoming._2()._2().get().getView() : Collections.emptyList(); final List<M> incomingMessages = hasViewAndMessages ? vertexViewIncoming._2()._2().get().getIncomingMessages() : Collections.emptyList(); previousView .forEach(property -> property.attach(Attachable.Method.create(vertex))); // attach the view to the vertex // previousView.clear(); // no longer needed so kill it from memory /// messenger.setVertexAndIncomingMessages(vertex, incomingMessages); // set the messenger with the incoming messages workerVertexProgram.execute( ComputerGraph.vertexProgram(vertex, workerVertexProgram), messenger, memory); // execute the vertex program on this vertex for this iteration // incomingMessages.clear(); // no longer needed so kill it from memory /// final List<DetachedVertexProperty<Object>> nextView = elementComputeKeysArray.length == 0 ? // not all vertex programs have compute keys Collections.emptyList() : IteratorUtils.list( IteratorUtils.map(vertex.properties(elementComputeKeysArray), property -> DetachedFactory.detach(property, true))); final List<Tuple2<Object, M>> outgoingMessages = messenger .getOutgoingMessages(); // get the outgoing messages // if no more vertices in the partition, end the worker's iteration if (!partitionIterator.hasNext()) { workerVertexProgram.workerIterationEnd(memory.asImmutable()); } return new Tuple2<>(vertex.id(), new ViewOutgoingPayload<>(nextView, outgoingMessages)); } }); }, true)); // true means that the partition is preserved // the graphRDD and the viewRDD must have the same partitioner assert graphRDD.partitioner().get().equals(viewOutgoingRDD.partitioner().get()); // "message pass" by reducing on the vertex object id of the view and message payloads final MessageCombiner<M> messageCombiner = VertexProgram .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(apacheConfiguration), apacheConfiguration) .getMessageCombiner().orElse(null); final JavaPairRDD<Object, ViewIncomingPayload<M>> newViewIncomingRDD = viewOutgoingRDD .flatMapToPair(tuple -> () -> IteratorUtils.<Tuple2<Object, Payload>>concat( IteratorUtils.of(new Tuple2<>(tuple._1(), tuple._2().getView())), // emit the view payload IteratorUtils.map(tuple._2().getOutgoingMessages().iterator(), message -> new Tuple2<>(message._1(), new MessagePayload<>(message._2()))))) // emit the outgoing message payloads one by one .reduceByKey(graphRDD.partitioner().get(), (a, b) -> { // reduce the view and outgoing messages into a single payload object representing the new view and incoming messages for a vertex if (a instanceof ViewIncomingPayload) { ((ViewIncomingPayload<M>) a).mergePayload(b, messageCombiner); return a; } else if (b instanceof ViewIncomingPayload) { ((ViewIncomingPayload<M>) b).mergePayload(a, messageCombiner); return b; } else { final ViewIncomingPayload<M> c = new ViewIncomingPayload<>(messageCombiner); c.mergePayload(a, messageCombiner); c.mergePayload(b, messageCombiner); return c; } }).filter(payload -> !(payload._2() instanceof MessagePayload)) // this happens if there is a message to a vertex that does not exist .filter(payload -> !((payload._2() instanceof ViewIncomingPayload) && !((ViewIncomingPayload<M>) payload._2()).hasView())) // this happens if there are many messages to a vertex that does not exist .mapValues(payload -> payload instanceof ViewIncomingPayload ? (ViewIncomingPayload<M>) payload : // this happens if there is a vertex with incoming messages new ViewIncomingPayload<>((ViewPayload) payload)); // this happens if there is a vertex with no incoming messages // the graphRDD and the viewRDD must have the same partitioner assert graphRDD.partitioner().get().equals(newViewIncomingRDD.partitioner().get()); newViewIncomingRDD.foreachPartition(partitionIterator -> { HadoopPools.initialize(apacheConfiguration); }); // need to complete a task so its BSP and the memory for this iteration is updated return newViewIncomingRDD; }