List of usage examples for com.google.common.collect Multimap isEmpty
boolean isEmpty();
From source file:com.palantir.atlasdb.keyvalue.cassandra.CassandraClientPool.java
private void sanityCheckRingConsistency() { Multimap<Set<TokenRange>, InetSocketAddress> tokenRangesToHost = HashMultimap.create(); for (InetSocketAddress host : currentPools.keySet()) { Cassandra.Client client = null;/*from w w w .j av a2 s. c o m*/ try { client = CassandraClientFactory.getClientInternal(host, config.ssl(), config.socketTimeoutMillis(), config.socketQueryTimeoutMillis()); try { client.describe_keyspace(config.keyspace()); } catch (NotFoundException e) { return; // don't care to check for ring consistency when we're not even fully initialized } tokenRangesToHost.put(ImmutableSet.copyOf(client.describe_ring(config.keyspace())), host); } catch (Exception e) { log.warn("failed to get ring info from host: {}", host, e); } finally { if (client != null) { client.getOutputProtocol().getTransport().close(); } } if (tokenRangesToHost.isEmpty()) { log.warn( "Failed to get ring info for entire Cassandra cluster ({}); ring could not be checked for consistency.", config.keyspace()); return; } if (tokenRangesToHost.keySet().size() == 1) { // all nodes agree on a consistent view of the cluster. Good. return; } RuntimeException e = new IllegalStateException( "Hosts have differing ring descriptions. This can lead to inconsistent reads and lost data. "); log.error("QA-86204 " + e.getMessage() + tokenRangesToHost, e); // provide some easier to grok logging for the two most common cases if (tokenRangesToHost.size() > 2) { for (Map.Entry<Set<TokenRange>, Collection<InetSocketAddress>> entry : tokenRangesToHost.asMap() .entrySet()) { if (entry.getValue().size() == 1) { log.error("Host: " + entry.getValue().iterator().next() + " disagrees with the other nodes about the ring state."); } } } if (tokenRangesToHost.keySet().size() == 2) { ImmutableList<Set<TokenRange>> sets = ImmutableList.copyOf(tokenRangesToHost.keySet()); Set<TokenRange> set1 = sets.get(0); Set<TokenRange> set2 = sets.get(1); log.error("Hosts are split. group1: " + tokenRangesToHost.get(set1) + " group2: " + tokenRangesToHost.get(set2)); } CassandraVerifier.logErrorOrThrow(e.getMessage(), config.safetyDisabled()); } }
From source file:ai.grakn.graql.internal.reasoner.atom.binary.RelationAtom.java
/** * infer relation types that this relation atom can potentially have * NB: entity types and role types are treated separately as they behave differently: * entity types only play the explicitly defined roles (not the relevant part of the hierarchy of the specified role) * @return list of relation types this atom can have ordered by the number of compatible role types *//*w w w . ja v a 2s. c o m*/ public List<RelationType> inferPossibleRelationTypes(Answer sub) { if (getPredicate() != null) return Collections.singletonList(getOntologyConcept().asRelationType()); //look at available role types Multimap<RelationType, Role> compatibleTypesFromRoles = getCompatibleRelationTypesWithRoles( getExplicitRoleTypes(), new RoleTypeConverter()); //look at entity types Map<Var, OntologyConcept> varTypeMap = getParentQuery().getVarOntologyConceptMap(); //explicit types Set<OntologyConcept> types = getRolePlayers().stream().filter(varTypeMap::containsKey).map(varTypeMap::get) .collect(toSet()); //types deduced from substitution inferEntityTypes(sub).forEach(types::add); Multimap<RelationType, Role> compatibleTypesFromTypes = getCompatibleRelationTypesWithRoles(types, new OntologyConceptConverterImpl()); Multimap<RelationType, Role> compatibleTypes; //intersect relation types from roles and types if (compatibleTypesFromRoles.isEmpty()) { compatibleTypes = compatibleTypesFromTypes; } else if (!compatibleTypesFromTypes.isEmpty()) { compatibleTypes = multimapIntersection(compatibleTypesFromTypes, compatibleTypesFromRoles); } else { compatibleTypes = compatibleTypesFromRoles; } return compatibleTypes.asMap().entrySet().stream().sorted(Comparator.comparing(e -> -e.getValue().size())) .map(Map.Entry::getKey) .filter(t -> Sets.intersection(getSupers(t), compatibleTypes.keySet()).isEmpty()) .collect(Collectors.toList()); }
From source file:com.foundationdb.server.store.OnlineHelper.java
private void buildIndexesInternal(Session session, QueryContext context) { Collection<ChangeSet> changeSets = schemaManager.getOnlineChangeSets(session); ChangeLevel changeLevel = commonChangeLevel(changeSets); assert (changeLevel == ChangeLevel.INDEX || changeLevel == ChangeLevel.INDEX_CONSTRAINT) : changeSets; TransformCache transformCache = getTransformCache(session, null); Multimap<Group, RowType> tableIndexes = HashMultimap.create(); Set<GroupIndex> groupIndexes = new HashSet<>(); for (ChangeSet cs : changeSets) { TableTransform transform = transformCache.get(cs.getTableId()); tableIndexes.put(transform.rowType.table().getGroup(), transform.rowType); groupIndexes.addAll(transform.groupIndexes); }//from ww w .j a v a 2s .c o m StoreAdapter adapter = store.createAdapter(session); if (!tableIndexes.isEmpty()) { buildTableIndexes(session, context, adapter, transformCache, tableIndexes); } if (!groupIndexes.isEmpty()) { if (changeLevel == ChangeLevel.INDEX_CONSTRAINT) { throw new IllegalStateException("Constraint and group indexes"); } buildGroupIndexes(session, context, adapter, groupIndexes); } }
From source file:grakn.core.graql.executor.WriteExecutor.java
/** * Produce a valid ordering of the properties by using the given dependency information. * This method uses a topological sort (Kahn's algorithm) in order to find a valid ordering. *///ww w .j a va2 s . com private ImmutableList<Writer> sortedWriters() { ImmutableList.Builder<Writer> sorted = ImmutableList.builder(); // invertedDependencies is intended to just be a 'view' on dependencies, so when dependencies is modified // we should always also modify invertedDependencies (and vice-versa). Multimap<Writer, Writer> dependencies = HashMultimap.create(this.dependencies); Multimap<Writer, Writer> invertedDependencies = HashMultimap.create(); Multimaps.invertFrom(dependencies, invertedDependencies); Queue<Writer> writerWithoutDependencies = new ArrayDeque<>( Sets.filter(writers, property -> dependencies.get(property).isEmpty())); Writer property; // Retrieve the next property without any dependencies while ((property = writerWithoutDependencies.poll()) != null) { sorted.add(property); // We copy this into a new list because the underlying collection gets modified during iteration Collection<Writer> dependents = Lists.newArrayList(invertedDependencies.get(property)); for (Writer dependent : dependents) { // Because the property has been removed, the dependent no longer needs to depend on it dependencies.remove(dependent, property); invertedDependencies.remove(property, dependent); boolean hasNoDependencies = dependencies.get(dependent).isEmpty(); if (hasNoDependencies) { writerWithoutDependencies.add(dependent); } } } if (!dependencies.isEmpty()) { // This means there must have been a loop. Pick an arbitrary remaining var to display Variable var = dependencies.keys().iterator().next().var(); throw GraqlSemanticException.insertRecursive(printableRepresentation(var)); } return sorted.build(); }
From source file:io.takari.maven.plugins.compile.AbstractCompileMojo.java
private Proc getEffectiveProc(List<File> classpath) { Proc proc = this.proc; if (proc == null) { Multimap<File, String> processors = TreeMultimap.create(); for (File jar : classpath) { if (jar.isFile()) { try (ZipFile zip = new ZipFile(jar)) { ZipEntry entry = zip.getEntry("META-INF/services/javax.annotation.processing.Processor"); if (entry != null) { try (Reader r = new InputStreamReader(zip.getInputStream(entry), Charsets.UTF_8)) { processors.putAll(jar, CharStreams.readLines(r)); }//from w w w . j a v a 2s. c om } } catch (IOException e) { // ignore, compiler won't be able to use this jar either } } else if (jar.isDirectory()) { try { processors.putAll(jar, Files.readLines( new File(jar, "META-INF/services/javax.annotation.processing.Processor"), Charsets.UTF_8)); } catch (IOException e) { // ignore, compiler won't be able to use this jar either } } } if (!processors.isEmpty()) { StringBuilder msg = new StringBuilder( "<proc> must be one of 'none', 'only' or 'proc'. Processors found: "); for (File jar : processors.keySet()) { msg.append("\n ").append(jar).append(" ").append(processors.get(jar)); } throw new IllegalArgumentException(msg.toString()); } proc = Proc.none; } return proc; }
From source file:org.apache.cassandra.locator.AbstractReplicationStrategy.java
/** * returns <tt>Multimap</tt> of {live destination: ultimate targets}, where if target is not the same * as the destination, it is a "hinted" write, and will need to be sent to * the ultimate target when it becomes alive again. *//* w w w .ja va 2 s. c o m*/ public Multimap<InetAddress, InetAddress> getHintedEndpoints(Collection<InetAddress> targets) { Multimap<InetAddress, InetAddress> map = HashMultimap.create(targets.size(), 1); // first, add the live endpoints for (InetAddress ep : targets) { if (FailureDetector.instance.isAlive(ep)) map.put(ep, ep); } // if everything was alive or we're not doing HH on this keyspace, stop with just the live nodes if (map.size() == targets.size() || !StorageProxy.isHintedHandoffEnabled()) return map; // assign dead endpoints to be hinted to the closest live one, or to the local node // (since it is trivially the closest) if none are alive. This way, the cost of doing // a hint is only adding the hint header, rather than doing a full extra write, if any // destination nodes are alive. // // we do a 2nd pass on targets instead of using temporary storage, // to optimize for the common case (everything was alive). InetAddress localAddress = FBUtilities.getLocalAddress(); for (InetAddress ep : targets) { if (map.containsKey(ep)) continue; if (!StorageProxy.shouldHint(ep)) { if (logger.isDebugEnabled()) logger.debug("not hinting " + ep + " which has been down " + Gossiper.instance.getEndpointDowntime(ep) + "ms"); continue; } //hint destination InetAddress destination = map.isEmpty() ? localAddress : snitch.getSortedListByProximity(localAddress, map.keySet()).get(0); map.put(destination, ep); } return map; }
From source file:gtu._work.ui.CheckJavaClassPathUI.java
private void executeBtnActionPerformed(ActionEvent evt) { try {//from w ww. ja va 2 s . com Validate.notBlank(srcPathText.getText(), "java source??"); Validate.isTrue(classPathList.getModel().getSize() != 0, "classPathList??"); logArea.setText(""); PrintStream out = JCommonUtil.getNewPrintStream2JTextArea(logArea, 0, false); Pattern importPattern = Pattern.compile("import\\s+([\\w\\.]+)\\;"); Pattern classNamePattern = Pattern.compile("(class|interface)\\s+\\w+[\\w\\s]*\\{"); Set<String> classNameSet = new HashSet<String>(); DefaultListModel model = (DefaultListModel) classPathList.getModel(); for (int ii = 0; ii < model.getSize(); ii++) { String className = (String) model.getElementAt(ii); classNameSet.add(className); } File srcFile = JCommonUtil.filePathCheck(srcPathText.getText(), "java source", true); List<File> fileList = new ArrayList<File>(); FileUtil.searchFilefind(srcFile, ".*\\.[jJ][aA][vV][aA]", fileList); Multimap<File, String> javaContainMap = ArrayListMultimap.create(); Matcher matcher = null; for (File file : fileList) { BufferedReader reader = new BufferedReader( new InputStreamReader(new FileInputStream(file), "utf8")); for (String line = null; (line = reader.readLine()) != null;) { if (classNamePattern.matcher(line).find()) { break; } matcher = importPattern.matcher(line); if (matcher.find()) { String importClass = matcher.group(1); for (String ptn : classNameSet) { Pattern pt2 = null; try { pt2 = Pattern.compile(ptn); } catch (Exception ex) { } if (StringUtils.equals(importClass, ptn)) { javaContainMap.put(file, importClass); } else if (importClass.contains(ptn)) { javaContainMap.put(file, importClass); } else if (pt2.matcher(importClass).find()) { javaContainMap.put(file, importClass); } } } } reader.close(); } if (javaContainMap.isEmpty()) { out.println("java?importClass"); } else { out.println("?importClass"); for (File errorFile : javaContainMap.keySet()) { out.println(errorFile); for (String clzName : javaContainMap.get(errorFile)) { out.println("\t" + clzName); } } } out.println("java : " + fileList.size()); configBean.getConfigProp().put(SRCPATHTEXT_KEY, srcFile.getAbsolutePath()); for (String clzName : classNameSet) { if (configBean.getConfigProp().containsValue(clzName)) { continue; } int availableIndex = -1; for (int ii = 0;; ii++) { if (!configBean.getConfigProp().containsKey(CLASSNAME_KEY + ii)) { availableIndex = ii; break; } } configBean.getConfigProp().put(CLASSNAME_KEY + availableIndex, clzName); } configBean.store(); JCommonUtil._jOptionPane_showMessageDialog_info("???!"); // C:/workspace/workspace_farEastStone/estore/fet_estore_search_engie_revamp/revamp_source_code } catch (Exception ex) { JCommonUtil.handleException(ex); } }
From source file:org.artifactory.build.BuildServiceImpl.java
@Override public Set<ArtifactoryBuildArtifact> getBuildArtifactsFileInfos(Build build, boolean useFallBack, String sourceRepo) {/*from ww w . j a va2 s. c o m*/ AqlBase.AndClause and = and(); log.debug("Executing Artifacts search for build {}:{}", build.getName(), build.getNumber()); if (StringUtils.isNotBlank(sourceRepo)) { log.debug("Search limited to repo: {}", sourceRepo); and.append(AqlApiItem.repo().equal(sourceRepo)); } and.append(AqlApiItem.property().property("build.name", AqlComparatorEnum.equals, build.getName())); and.append(AqlApiItem.property().property("build.number", AqlComparatorEnum.equals, build.getNumber())); AqlBase buildArtifactsQuery = AqlApiItem.create().filter(and); AqlEagerResult<AqlBaseFullRowImpl> aqlResult = aqlService.executeQueryEager(buildArtifactsQuery); log.debug("Search returned {} artifacts", aqlResult.getSize()); Multimap<String, Artifact> buildArtifacts = BuildServiceUtils.getBuildArtifacts(build); log.debug("This build contains {} artifacts (taken from build info)", buildArtifacts.size()); List<String> virtualRepoKeys = getVirtualRepoKeys(); Set<ArtifactoryBuildArtifact> matchedArtifacts = matchArtifactsToFileInfos(aqlResult.getResults(), buildArtifacts, virtualRepoKeys); log.debug("Matched {} build artifacts to actual paths returned by search", matchedArtifacts.size()); //buildArtifacts contains all remaining artifacts that weren't matched - match them with the weak search //only if indicated and if such remaining unmatched artifacts still exist in the map. if (useFallBack && !buildArtifacts.isEmpty()) { log.debug("Unmatched artifacts exist and 'use weak match fallback' flag is lit - executing weak match"); Set<ArtifactoryBuildArtifact> weaklyMatchedArtifacts = matchUnmatchedArtifactsNonStrict(build, sourceRepo, buildArtifacts, virtualRepoKeys); log.debug("Weak match has matched {} additional artifacts", weaklyMatchedArtifacts); matchedArtifacts.addAll(weaklyMatchedArtifacts); } //Lastly, populate matchedArtifacts with all remaining unmatched artifacts with null values to help users of //this function know if all build artifacts were found. log.debug("{} artifacts were not matched to actual paths", buildArtifacts.size()); for (Artifact artifact : buildArtifacts.values()) { matchedArtifacts.add(new ArtifactoryBuildArtifact(artifact, null)); } return matchedArtifacts; }
From source file:ai.grakn.graql.internal.query.QueryOperationExecutor.java
/** * Produce a valid ordering of the properties by using the given dependency information. * * <p>// w w w . j av a 2 s . c o m * This method uses a topological sort (Kahn's algorithm) in order to find a valid ordering. * </p> */ private ImmutableList<VarAndProperty> sortProperties() { ImmutableList.Builder<VarAndProperty> sorted = ImmutableList.builder(); // invertedDependencies is intended to just be a 'view' on dependencies, so when dependencies is modified // we should always also modify invertedDependencies (and vice-versa). Multimap<VarAndProperty, VarAndProperty> dependencies = HashMultimap.create(this.dependencies); Multimap<VarAndProperty, VarAndProperty> invertedDependencies = HashMultimap.create(); Multimaps.invertFrom(dependencies, invertedDependencies); Queue<VarAndProperty> propertiesWithoutDependencies = new ArrayDeque<>( Sets.filter(properties, property -> dependencies.get(property).isEmpty())); VarAndProperty property; // Retrieve the next property without any dependencies while ((property = propertiesWithoutDependencies.poll()) != null) { sorted.add(property); // We copy this into a new list because the underlying collection gets modified during iteration Collection<VarAndProperty> dependents = Lists.newArrayList(invertedDependencies.get(property)); for (VarAndProperty dependent : dependents) { // Because the property has been removed, the dependent no longer needs to depend on it dependencies.remove(dependent, property); invertedDependencies.remove(property, dependent); boolean hasNoDependencies = dependencies.get(dependent).isEmpty(); if (hasNoDependencies) { propertiesWithoutDependencies.add(dependent); } } } if (!dependencies.isEmpty()) { // This means there must have been a loop. Pick an arbitrary remaining var to display Var var = dependencies.keys().iterator().next().var(); throw GraqlQueryException.insertRecursive(printableRepresentation(var)); } return sorted.build(); }
From source file:com.trebogeer.jcql.JCQLMain.java
/** * Generates java model (pojos) from existing cassandra CQL schema. * // TODO - segregate mappers from pojos and make them separately configurable via options. The whole stack of generated code might not always be needed. * * @param beans udt definitions/*from w w w . ja va 2s. com*/ * @param tables table definitions * @param partitionKeys partition keys from table metadata */ private void generateModelCode(Multimap<String, Pair<String, DataType>> beans, Multimap<String, Pair<String, ColumnMetadata>> tables, ArrayListMultimap<String, String> partitionKeys) { JDefinedClass rowMapper; JDefinedClass toUDTMapper = null; JDefinedClass binder = null; String commonsPackage = (cfg.cpackage != null && !"".equals(cfg.cpackage)) ? cfg.cpackage : cfg.jpackage; try { rowMapper = model._class(PUBLIC, commonsPackage + ".RowMapper", INTERFACE); rowMapper._extends(model.ref(Serializable.class)); JTypeVar jtv = rowMapper.generify("T"); JTypeVar jtvRow = rowMapper.generify("R").bound(model.ref(com.datastax.driver.core.GettableData.class)); rowMapper.method(NONE, jtv, "map").param(jtvRow, "data"); } catch (Exception e) { throw new RuntimeException("Failed to generate mapper interface.", e); } if (tables != null && !tables.isEmpty()) { try { binder = model._class(PUBLIC, commonsPackage + ".TableBindMapper", INTERFACE); binder._extends(model.ref(Serializable.class)); JTypeVar jtv = binder.generify("T"); JMethod jm = binder.method(NONE, model.VOID, "bind"); jm.param(jtv, "data"); jm.param(model.ref(BoundStatement.class), "st"); jm.param(model.ref(Session.class), "session"); } catch (Exception e) { throw new RuntimeException("Failed to generate table bind interface.", e); } } if (beans != null && beans.size() > 0) { try { toUDTMapper = model._class(PUBLIC, commonsPackage + ".BeanToUDTMapper", INTERFACE); toUDTMapper._extends(model.ref(Serializable.class)); JTypeVar jtv = toUDTMapper.generify("T"); JMethod toUDT = toUDTMapper.method(NONE, model.ref(UDTValue.class), "toUDT"); JVar toUDTArg0 = toUDT.param(jtv, "data"); JVar toUDTArg1 = toUDT.param(Session.class, "session"); } catch (JClassAlreadyExistsException e) { throw new RuntimeException("Failed to generate UDT mapper interface.", e); } } if (beans != null) { for (String cl : beans.keySet()) { try { String camName = camelize(cl); JDefinedClass clazz = JCQLUtils.getBeanClass(cfg.jpackage, camName, model); clazz.field(PRIVATE | STATIC | FINAL, model.LONG, "serialVersionUID", JExpr.lit((long) ((cfg.jpackage + "." + camName).hashCode()))); // row mapper rowMapperCode(clazz, rowMapper, beans.get(cl), model.ref(com.datastax.driver.core.GettableData.class)); // pojo to UDT mapper toUDTMapperCode(clazz, toUDTMapper, beans.get(cl), cl); // fields/getters/setters/annotations clazz.annotate(UDT.class).param("keyspace", cfg.keysapce).param("name", cl); // JExpr.newArray(codeModel.ref(String.class)).add(ID).add(CODE).add(NAME) for (Pair<String, DataType> field : beans.get(cl)) { javaBeanFieldWithGetterSetter(clazz, field.getValue1(), field.getValue0(), -1, com.datastax.driver.mapping.annotations.Field.class); } } catch (JClassAlreadyExistsException e) { logger.warn("Class '{}' already exists for UDT, skipping ", cl); } } } if (tables != null && !tables.isEmpty()) { for (String table : tables.keySet()) { try { String camName = camelize(table); JDefinedClass clazz = JCQLUtils.getBeanClass(cfg.jpackage, camName, model); clazz.field(PRIVATE | STATIC | FINAL, model.LONG, "serialVersionUID", JExpr.lit((long) ((cfg.jpackage + "." + camName).hashCode()))); Collection<Pair<String, DataType>> dataTypes = Collections2 .filter(Collections2.transform(tables.get(table), new Function<Pair<String, ColumnMetadata>, Pair<String, DataType>>() { @Override public Pair<String, DataType> apply(Pair<String, ColumnMetadata> input) { return Pair.with(input.getValue0(), input.getValue1().getType()); } }), input -> input != null && !"solr_query".equalsIgnoreCase(input.getValue0())); // row mapper rowMapperCode(clazz, rowMapper, dataTypes, model.ref(com.datastax.driver.core.Row.class)); // bind to statement code binderToStatemet(clazz, binder, dataTypes); // fields/getters/setters/annotations clazz.annotate(Table.class).param("keyspace", cfg.keysapce).param("name", table); List<String> pkList = partitionKeys.get(table); Set<String> pks = new HashSet<>(pkList); for (Pair<String, ColumnMetadata> field : tables.get(table)) { String fieldName = field.getValue0(); int order = -1; if (pks.contains(fieldName)) { order = 0; if (pks.size() > 1) { order = pkList.indexOf(field.getValue0()); } } javaBeanFieldWithGetterSetter(clazz, field.getValue1().getType(), fieldName, order, Column.class); } } catch (JClassAlreadyExistsException ex) { logger.warn("Class '{}' already exists for table, skipping ", table); } } } }