List of usage examples for com.google.common.collect Iterables tryFind
public static <T> Optional<T> tryFind(Iterable<T> iterable, Predicate<? super T> predicate)
From source file:clocker.mesos.entity.MesosClusterImpl.java
public List<String> scanFrameworks(JsonArray frameworks) { List<String> frameworkNames = MutableList.<String>of(); for (int i = 0; i < frameworks.size(); i++) { JsonObject task = frameworks.get(i).getAsJsonObject(); String id = task.get("id").getAsString(); JsonElement pidObj = task.get("pid"); String pid = null;/*from w ww . jav a 2 s.c o m*/ if (pidObj != null && !pidObj.isJsonNull()) { pid = pidObj.getAsString(); } String name = task.get("name").getAsString(); String url = task.get("webui_url").getAsString(); frameworkNames.add(name); Optional<Entity> entity = Iterables.tryFind(sensors().get(MESOS_FRAMEWORKS).getMembers(), Predicates .compose(Predicates.equalTo(id), EntityFunctions.attribute(MesosFramework.FRAMEWORK_ID))); if (entity.isPresent()) continue; EntitySpec<? extends MesosFramework> frameworkSpec = EntitySpec .create(FRAMEWORKS.containsKey(name) ? FRAMEWORKS.get(name) : EntitySpec.create(MesosFramework.class)) .configure(MesosFramework.FRAMEWORK_ID, id).configure(MesosFramework.FRAMEWORK_PID, pid) .configure(MesosFramework.FRAMEWORK_NAME, name).configure(MesosFramework.FRAMEWORK_URL, url) .configure(MesosFramework.MESOS_CLUSTER, this) .displayName(String.format("%s Framework", Strings.toInitialCapOnly(name))); MesosFramework added = sensors().get(MESOS_FRAMEWORKS).addMemberChild(frameworkSpec); added.start(ImmutableList.<Location>of()); } return frameworkNames; }
From source file:com.google.jenkins.plugins.storage.AbstractUpload.java
private static List<ObjectAccessControl> addPublicReadAccess(List<ObjectAccessControl> defaultAcl) { List<ObjectAccessControl> acl = Lists.newArrayList(defaultAcl); final String publicEntity = "allUsers"; boolean alreadyShared = Iterables.tryFind(acl, new Predicate<ObjectAccessControl>() { @Override//from w w w. j a v a 2s . c om public boolean apply(ObjectAccessControl access) { return Objects.equal(access.getEntity(), publicEntity); } }).isPresent(); /* If the entity 'allUsers' didn't already has READER or OWNER access, grant READER. This is to avoid having both an OWNER record and a READER record for that same entity */ if (!alreadyShared) { acl.add(new ObjectAccessControl().setEntity("allUsers").setRole("READER")); } return acl; }
From source file:net.shibboleth.idp.saml.profile.impl.PopulateBindingAndEndpointContexts.java
/** {@inheritDoc} */ @Override/*from w w w . j a v a 2s. co m*/ protected void doExecute(@Nonnull final ProfileRequestContext profileRequestContext) { if (handleSynchronousRequest(profileRequestContext)) { return; } else if (endpointType == null) { log.error("Front-channel binding used, but no endpoint type set"); ActionSupport.buildEvent(profileRequestContext, SAMLEventIds.ENDPOINT_RESOLUTION_FAILED); return; } log.debug("{} Attempting to resolve endpoint of type {} for outbound message", getLogPrefix(), endpointType); // Compile binding list. final List<String> bindings = new ArrayList<>(bindingDescriptors.size()); for (final BindingDescriptor bindingDescriptor : bindingDescriptors) { if (bindingDescriptor.apply(profileRequestContext)) { bindings.add(bindingDescriptor.getId()); } } if (bindings.isEmpty()) { log.warn("{} No outbound bindings are eligible for use", getLogPrefix()); ActionSupport.buildEvent(profileRequestContext, SAMLEventIds.ENDPOINT_RESOLUTION_FAILED); return; } log.trace("{} Candidate outbound bindings: {}", getLogPrefix(), bindings); // Build criteria for the resolver. final CriteriaSet criteria = new CriteriaSet(new BindingCriterion(bindings), buildEndpointCriterion(bindings.get(0))); if (mdContext != null && mdContext.getRoleDescriptor() != null) { criteria.add(new RoleDescriptorCriterion(mdContext.getRoleDescriptor())); } else { log.debug("{} No metadata available for endpoint resolution", getLogPrefix()); } // Attempt resolution. Endpoint resolvedEndpoint = null; try { resolvedEndpoint = endpointResolver.resolveSingle(criteria); } catch (final ResolverException e) { log.error("{} Error resolving outbound message endpoint", getLogPrefix(), e); } if (resolvedEndpoint == null) { log.warn("{} Unable to resolve outbound message endpoint", getLogPrefix()); ActionSupport.buildEvent(profileRequestContext, SAMLEventIds.ENDPOINT_RESOLUTION_FAILED); return; } final String bindingURI = resolvedEndpoint.getBinding(); log.debug("{} Resolved endpoint at location {} using binding {}", new Object[] { getLogPrefix(), resolvedEndpoint.getLocation(), bindingURI, }); // Transfer results to contexts. final SAMLEndpointContext endpointContext = endpointContextLookupStrategy.apply(profileRequestContext); endpointContext.setEndpoint(resolvedEndpoint); final SAMLBindingContext bindingCtx = bindingContextLookupStrategy.apply(profileRequestContext); bindingCtx .setRelayState(SAMLBindingSupport.getRelayState(profileRequestContext.getInboundMessageContext())); final Optional<BindingDescriptor> bindingDescriptor = Iterables.tryFind(bindingDescriptors, new Predicate<BindingDescriptor>() { public boolean apply(BindingDescriptor input) { return input.getId().equals(bindingURI); } }); if (bindingDescriptor.isPresent()) { bindingCtx.setBindingDescriptor(bindingDescriptor.get()); } else { bindingCtx.setBindingUri(resolvedEndpoint.getBinding()); } // Handle artifact details. if (bindingDescriptor.isPresent() && bindingDescriptor.get().isArtifact()) { if (artifactConfiguration != null) { final SAMLArtifactContext artifactCtx = artifactContextLookupStrategy.apply(profileRequestContext); artifactCtx.setArtifactType(artifactConfiguration.getArtifactType()); artifactCtx.setSourceArtifactResolutionServiceEndpointURL( artifactConfiguration.getArtifactResolutionServiceURL()); artifactCtx.setSourceArtifactResolutionServiceEndpointIndex( artifactConfiguration.getArtifactResolutionServiceIndex()); } if (artifactImpliesSecureChannel) { log.debug("{} Use of artifact binding implies the channel will be secure, " + "overriding MessageChannelSecurityContext flags", getLogPrefix()); final MessageChannelSecurityContext channelCtx = profileRequestContext .getSubcontext(MessageChannelSecurityContext.class, true); channelCtx.setIntegrityActive(true); channelCtx.setConfidentialityActive(true); } } }
From source file:org.killbill.billing.invoice.dao.InvoiceDaoHelper.java
private void setInvoicesWrittenOff(final Iterable<InvoiceModelDao> invoices, final List<Tag> invoicesTags) { final Iterable<Tag> writtenOffTags = filterForWrittenOff(invoicesTags); for (final Tag cur : writtenOffTags) { final InvoiceModelDao foundInvoice = Iterables.tryFind(invoices, new Predicate<InvoiceModelDao>() { @Override/*w w w. j av a 2 s. c om*/ public boolean apply(final InvoiceModelDao input) { return input.getId().equals(cur.getObjectId()); } }).orNull(); if (foundInvoice != null) { foundInvoice.setIsWrittenOff(true); } } }
From source file:org.apache.brooklyn.entity.database.mysql.InitSlaveTaskBody.java
private boolean isReplicationInfoValid(ReplicationSnapshot replicationSnapshot) { MySqlNode master = getMaster();//from ww w . jav a 2s .c o m String dataDir = Strings.nullToEmpty(master.getConfig(MySqlNode.DATA_DIR)); if (!checkFileExistsOnEntity(master, Os.mergePathsUnix(dataDir, replicationSnapshot.getBinLogName()))) { return false; } if (replicationSnapshot.getEntityId() != null) { Optional<Entity> snapshotSlave = Iterables.tryFind(cluster.getChildren(), EntityPredicates.idEqualTo(replicationSnapshot.getEntityId())); if (!snapshotSlave.isPresent()) { log.info("MySql cluster " + cluster + " missing node " + replicationSnapshot.getEntityId() + " with last snapshot " + replicationSnapshot.getSnapshotPath() + ". Will generate new snapshot."); return false; } if (!checkFileExistsOnEntity(snapshotSlave.get(), replicationSnapshot.getSnapshotPath())) { log.info("MySql cluster " + cluster + ", node " + snapshotSlave.get() + " missing replication snapshot " + replicationSnapshot.getSnapshotPath() + ". Will generate new snapshot."); return false; } } return true; }
From source file:brooklyn.entity.nosql.cassandra.CassandraFabricImpl.java
@Override public void update() { synchronized (mutex) { for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) { member.update();/*from ww w.jav a 2 s. c o m*/ } calculateServiceUp(); // Choose the first available location to set host and port (and compute one-up) Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE)); if (upNode.isPresent()) { setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME)); setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT)); } } }
From source file:org.eclipse.emf.compare.merge.AbstractMerger.java
/** * Even within 'equivalent' differences, there might be one that we need to consider as the "master", one * part of the equivalence that should take precedence over the others when merging. * <p>// ww w. ja va 2 s.co m * There are four main cases in which this happens : * <ol> * <li>Equivalent differences regarding two "eOpposite" sides, with one side being a single-valued * reference while the other side is a multi-valued reference (one-to-many). In such a case, we need the * 'many' side of that equivalence to be merged over the 'single' side, so as to avoid potential ordering * issues. Additionally, to avoid losing information, equivalent differences with * {@link DifferenceKind.ADD} instead of {@link DifferenceKind.REMOVE} must be merged first.</li> * <li>Equivalent differences regarding two "eOpposite" sides, with both sides being a single-valued * reference (one-to-one). In such a case, we need to merge the difference that results in setting a * feature value over the difference unsetting a feature. This is needed to prevent information loss.</li> * <li>Equivalent differences with conflicts: basically, if one of the diffs of an equivalence relation is * in conflict while the others are not, then none of the equivalent differences can be automatically * merged. We need to consider the conflict to be taking precedence over the others to make sure that the * conflict is resolved before even trying to merge anything.</li> * <li>Equivalent {@link ReferenceChange} and {@link FeatureMapChange} differences: in this case the * {@link FeatureMapChange} difference will take precedence over the {@link ReferenceChange} when the the * resulting operation actively modifies a FeatureMap. The {@link ReferenceChange} will take precedence * when a FeatureMap is only modified implicitly. This happens in order to prevent special cases in which * the {@link ReferenceChangeMerger} cannot ensure the correct order of the feature map attribute.</li> * </ol> * </p> * * @param diff * The diff we need to check the equivalence for a 'master' difference. * @param mergeRightToLeft * Direction of the merge operation. * @return The master difference of this equivalence relation. May be <code>null</code> if there are none. */ private Diff findMasterEquivalence(Diff diff, boolean mergeRightToLeft) { final List<Diff> equivalentDiffs = diff.getEquivalence().getDifferences(); final Optional<Diff> firstConflicting = Iterables.tryFind(equivalentDiffs, hasConflict(ConflictKind.REAL)); final Diff idealMasterDiff; if (diff instanceof ReferenceChange) { final ReferenceChange referenceChange = (ReferenceChange) diff; idealMasterDiff = getMasterEquivalenceForReferenceChange(referenceChange, mergeRightToLeft); } else if (diff instanceof FeatureMapChange) { final FeatureMapChange featureMapChange = (FeatureMapChange) diff; idealMasterDiff = getMasterEquivalenceForFeatureMapChange(featureMapChange, mergeRightToLeft); } else { idealMasterDiff = null; } final Diff masterDiff; // conflicting equivalents take precedence over the ideal master equivalence if (firstConflicting.isPresent() && !hasRealConflict(idealMasterDiff)) { if (hasRealConflict(diff)) { masterDiff = null; } else { masterDiff = firstConflicting.get(); } } else { masterDiff = idealMasterDiff; } return masterDiff; }
From source file:org.apache.brooklyn.entity.nosql.cassandra.CassandraFabricImpl.java
@Override public void update() { synchronized (mutex) { for (CassandraDatacenter member : Iterables.filter(getMembers(), CassandraDatacenter.class)) { member.update();/* w w w.j a v a 2s.c om*/ } calculateServiceUp(); // Choose the first available location to set host and port (and compute one-up) Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE)); if (upNode.isPresent()) { sensors().set(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME)); sensors().set(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT)); } } }
From source file:clocker.mesos.entity.MesosClusterImpl.java
public List<String> scanSlaves(JsonArray slaves) throws UnknownHostException { List<String> slaveIds = MutableList.<String>of(); for (int i = 0; i < slaves.size(); i++) { JsonObject slave = slaves.get(i).getAsJsonObject(); boolean active = slave.get("active").getAsBoolean(); String id = slave.get("id").getAsString(); String hostname = slave.get("hostname").getAsString(); Double registered = slave.get("registered_time").getAsDouble(); Group group = sensors().get(MESOS_SLAVES); Optional<Entity> entity = Iterables.tryFind(group.getMembers(), Predicates .compose(Predicates.equalTo(id), EntityFunctions.attribute(MesosSlave.MESOS_SLAVE_ID))); if (entity.isPresent()) { Entity found = entity.get(); found.sensors().set(MesosSlave.SLAVE_ACTIVE, active); if (!active) { Lifecycle state = found.sensors().get(Attributes.SERVICE_STATE_ACTUAL); if (Lifecycle.ON_FIRE.equals(state) || Lifecycle.STARTING.equals(state)) { continue; } else if (Lifecycle.STOPPING.equals(state) || Lifecycle.STOPPED.equals(state)) { group.removeMember(found); group.removeChild(found); Entities.unmanage(found); } else { ServiceStateLogic.setExpectedState(found, Lifecycle.STOPPING); }//from w ww .j av a2s . com } } else if (active) { LocationSpec<SshMachineLocation> spec = LocationSpec.create(SshMachineLocation.class) .configure(SshMachineLocation.SSH_HOST, hostname) .configure("address", InetAddress.getByName(hostname)).displayName(hostname); if (config().get(MESOS_SLAVE_ACCESSIBLE)) { spec.configure(CloudLocationConfig.WAIT_FOR_SSHABLE, "true") .configure(SshMachineLocation.DETECT_MACHINE_DETAILS, true) .configure(SshMachineLocation.SSH_PORT, config().get(MesosSlave.SLAVE_SSH_PORT)) .configure(LocationConfigKeys.USER, config().get(MesosSlave.SLAVE_SSH_USER)) .configure(LocationConfigKeys.PASSWORD, config().get(MesosSlave.SLAVE_SSH_PASSWORD)) .configure(SshTool.PROP_PASSWORD, config().get(MesosSlave.SLAVE_SSH_PASSWORD)) .configure(SshTool.PROP_PORT, config().get(MesosSlave.SLAVE_SSH_PORT)) .configure(LocationConfigKeys.PRIVATE_KEY_DATA, config().get(MesosSlave.SLAVE_SSH_PRIVATE_KEY_DATA)) .configure(LocationConfigKeys.PRIVATE_KEY_FILE, config().get(MesosSlave.SLAVE_SSH_PRIVATE_KEY_FILE)); } else { spec.configure(CloudLocationConfig.WAIT_FOR_SSHABLE, "false") .configure(SshMachineLocation.DETECT_MACHINE_DETAILS, false); } SshMachineLocation machine = getManagementContext().getLocationManager().createLocation(spec); // Setup port forwarding MarathonPortForwarder portForwarder = new MarathonPortForwarder(); portForwarder.setManagementContext(getManagementContext()); EntitySpec<MesosSlave> slaveSpec = EntitySpec.create(MesosSlave.class) .configure(MesosSlave.MESOS_SLAVE_ID, id) .configure(MesosSlave.REGISTERED_AT, registered.longValue()) .configure(MesosSlave.MESOS_CLUSTER, this).displayName("Mesos Slave (" + hostname + ")"); MesosSlave added = sensors().get(MESOS_SLAVES).addMemberChild(slaveSpec); added.sensors().set(MesosSlave.SLAVE_ACTIVE, active); added.sensors().set(MesosSlave.HOSTNAME, hostname); added.sensors().set(MesosSlave.ADDRESS, hostname); added.start(ImmutableList.of(machine)); portForwarder.init(hostname, this); // Setup subnet tier SubnetTier subnetTier = added.addChild( EntitySpec.create(SubnetTier.class).configure(SubnetTier.PORT_FORWARDER, portForwarder) .configure(SubnetTier.SUBNET_CIDR, Cidr.UNIVERSAL)); Entities.start(subnetTier, ImmutableList.of(machine)); added.sensors().set(MesosSlave.SUBNET_TIER, subnetTier); } if (active) slaveIds.add(id); } return slaveIds; }
From source file:brooklyn.entity.nosql.cassandra.CassandraDatacenterImpl.java
@Override public void update() { synchronized (mutex) { // Update our seeds, as necessary seedTracker.refreshSeeds();// w w w.ja v a2 s. c o m // Choose the first available cluster member to set host and port (and compute one-up) Optional<Entity> upNode = Iterables.tryFind(getMembers(), EntityPredicates.attributeEqualTo(SERVICE_UP, Boolean.TRUE)); if (upNode.isPresent()) { setAttribute(HOSTNAME, upNode.get().getAttribute(Attributes.HOSTNAME)); setAttribute(THRIFT_PORT, upNode.get().getAttribute(CassandraNode.THRIFT_PORT)); List<String> currentNodes = getAttribute(CASSANDRA_CLUSTER_NODES); Set<String> oldNodes = (currentNodes != null) ? ImmutableSet.copyOf(currentNodes) : ImmutableSet.<String>of(); Set<String> newNodes = MutableSet.<String>of(); for (Entity member : getMembers()) { if (member instanceof CassandraNode && Boolean.TRUE.equals(member.getAttribute(SERVICE_UP))) { String hostname = member.getAttribute(Attributes.HOSTNAME); Integer thriftPort = member.getAttribute(CassandraNode.THRIFT_PORT); if (hostname != null && thriftPort != null) { newNodes.add(HostAndPort.fromParts(hostname, thriftPort).toString()); } } } if (Sets.symmetricDifference(oldNodes, newNodes).size() > 0) { setAttribute(CASSANDRA_CLUSTER_NODES, MutableList.copyOf(newNodes)); } } else { setAttribute(HOSTNAME, null); setAttribute(THRIFT_PORT, null); setAttribute(CASSANDRA_CLUSTER_NODES, Collections.<String>emptyList()); } ServiceNotUpLogic.updateNotUpIndicatorRequiringNonEmptyList(this, CASSANDRA_CLUSTER_NODES); } }