Example usage for com.google.common.collect Iterables contains

List of usage examples for com.google.common.collect Iterables contains

Introduction

In this page you can find the example usage for com.google.common.collect Iterables contains.

Prototype

public static boolean contains(Iterable<?> iterable, @Nullable Object element) 

Source Link

Document

Returns true if iterable contains any object for which equals(element) is true.

Usage

From source file:org.lamport.tla.toolbox.jcloud.CloudDistributedTLCJob.java

@Override
protected IStatus run(final IProgressMonitor monitor) {
    monitor.beginTask("Starting TLC model checker in the cloud", 90 + (nodes > 1 ? 20 : 0));
    // Validate credentials and fail fast if null or syntactically incorrect
    if (!params.validateCredentials().equals(Status.OK_STATUS)) {
        return params.validateCredentials();
    }//  w w w  .  ja va  2 s .  co  m

    ComputeServiceContext context = null;
    try {
        // Tweak tla2tools in a background thread. It takes a couple of seconds to run
        // pack200 to shrink the files size but we can lookup or launch a cloud instance
        // in the meantime.
        monitor.subTask("Tweaking tla2tools.jar to contain the spec & model (in background)");
        final ExecutorService executor = Executors.newSingleThreadExecutor();
        final Future<Payload> future = executor.submit(() -> {
            return PayloadHelper.appendModel2Jar(modelPath, props.getProperty(TLCJobFactory.MAIN_CLASS), props,
                    monitor);
        });
        executor.shutdown();
        // User has canceled the job, thus stop it (there will be more
        // cancelled checks down below).
        if (monitor.isCanceled()) {
            return Status.CANCEL_STATUS;
        }

        // example of specific properties, in this case optimizing image
        // list to only amazon supplied
        final Properties properties = new Properties();
        params.mungeProperties(properties);

        // Create compute environment in the cloud and inject an ssh
        // implementation. ssh is our means of communicating with the node.
        final Iterable<AbstractModule> modules = ImmutableSet.<AbstractModule>of(new SshjSshClientModule(),
                isCLI ? new ConsoleLoggingModule() : new SLF4JLoggingModule());

        final ContextBuilder builder = ContextBuilder.newBuilder(params.getCloudProvider())
                .credentials(params.getIdentity(), params.getCredentials()).modules(modules)
                .overrides(properties);
        params.mungeBuilder(builder);

        monitor.subTask("Initializing " + builder.getApiMetadata().getName());
        context = builder.buildView(ComputeServiceContext.class);
        final ComputeService compute = context.getComputeService();
        monitor.worked(10);
        if (monitor.isCanceled()) {
            return Status.CANCEL_STATUS;
        }

        //TODO Support instance reuse with Cloud distributed TLC.
        monitor.subTask("Looking for resusable nodes to quick-start model checking");
        final Set<NodeMetadata> createNodesInGroup = nodes > 1 ? new HashSet<>()
                : findReusableNodes(compute, monitor);
        monitor.worked(5);
        if (monitor.isCanceled()) {
            return Status.CANCEL_STATUS;
        } else if (createNodesInGroup.isEmpty()) {
            createNodesInGroup.addAll(provisionNodes(compute, monitor));
            if (monitor.isCanceled()) {
                return Status.CANCEL_STATUS;
            }
        } else {
            // skipped provisionNodes(...) which takes 35 steps.
            monitor.worked(35);
        }

        // Choose one of the nodes to be the master and create an
        // identifying predicate.
        final NodeMetadata master = Iterables.getLast(createNodesInGroup);
        final String hostname = Iterables.getOnlyElement(master.getPublicAddresses()); // master.getHostname() only returns internal name

        // Copy tlatools.jar to _one_ remote host (do not exhaust upload of
        // the machine running the toolbox).
        // TODO Share the tla2tools.jar with the worker nodes by making it
        // available on the master's webserver for the clients to download.
        // On the other hand this means we are making the spec
        // world-readable. It is cloud-readable already through the RMI api.
        monitor.subTask("Copying tla2tools.jar to master node at " + hostname);
        SshClient sshClient = context.utils().sshForNode().apply(master);
        sshClient.put("/tmp/tla2tools.pack.gz", future.get());
        monitor.worked(10);
        if (monitor.isCanceled()) {
            return Status.CANCEL_STATUS;
        }

        final String tlcMasterCommand = " shutdown -c && rm -rf /mnt/tlc/* && " // Cancel and remove any pending shutdown and leftovers from previous runs.
                + "cd /mnt/tlc/ && "
                // Decompress tla2tools.pack.gz
                + "unpack200 /tmp/tla2tools.pack.gz /tmp/tla2tools.jar" + " && "
                // Execute TLC (java) process inside screen
                // and shutdown on TLC's completion. But
                // detach from screen directly. Name screen 
                // session "tlc".
                // (see http://stackoverflow.com/a/10126799)
                + (isCLI ? "" : "screen -dm -S tlc bash -c \" ")
                // This requires a modified version where all parameters and
                // all spec modules are stored in files in a model/ folder
                // inside of the jar.
                // This is done in anticipation of other cloud providers
                // where one cannot easily pass in parameters on the command
                // line because there is no command line.
                + "java " + params.getJavaVMArgs() + " " + (doJfr ? params.getFlightRecording() + " " : "")
                // Write all tmp files to the ephemeral instance
                // storage which is expected to have a higher IOPS
                // compared to non-local storage.
                + "-Djava.io.tmpdir=/mnt/tlc/ "
                // These properties cannot be "backed" into
                // the payload jar as java itself does not 
                // support this.
                // It might be able to read the properties from 
                // the config file with 'com.sun.management.config.file=path',
                // but I haven't tried if the path can point into the jar.
                + "-Dcom.sun.management.jmxremote " + "-Dcom.sun.management.jmxremote.port=5400 "
                + "-Dcom.sun.management.jmxremote.ssl=false "
                + "-Dcom.sun.management.jmxremote.authenticate=false "
                // TLC tuning options
                + params.getJavaSystemProperties() + " " + "-jar /tmp/tla2tools.jar "
                + params.getTLCParameters() + " " + (isCLI ? "|& tee /mnt/tlc/MC.out " : "") + "&& "
                // Run any cloud specific cleanup tasks.
                // When CloudDistributedTLCJob runs in synchronous CLI mode (isCLI), it will destroy
                // the VMs (nodes) via the jclouds API. No need to deallocate nodes
                // via special logic.
                + (isCLI ? "/bin/true" : params.getCloudAPIShutdown()) + " && "
                // Let the machine power down immediately after
                // finishing model checking to cut costs. However,
                // do not shut down (hence "&&") when TLC finished
                // with an error.
                // It uses "sudo" because the script is explicitly
                // run as a user. No need to run the TLC process as
                // root.
                + "sudo shutdown -h +" + SHUTDOWN_AFTER + (isCLI ? "" : "\""); // closing opening '"' of screen/bash -c
        if (isCLI) {
            monitor.subTask("Starting TLC model checker process");
            // Execute command via ssh instead of as a script to get access to the TLC
            // processes' stdout and stderr.
            //TODO Better handle error case.
            ExecChannel channel = sshClient.execChannel(tlcMasterCommand);
            // Send remote TLC's stdout to local stdout (this throws a TransportException
            // unless shutdown is postponed by a few minutes above).
            ByteStreams.copy(channel.getOutput(), System.out);
            if (doJfr) {
                // Get Java Flight Recording from remote machine and save if to a local file in
                // the current working directory. We call "cat" because sftclient#get fails with
                // the old net.schmizz.sshj and an update to the newer com.hierynomus seems 
                // awful lot of work.
                channel = sshClient.execChannel("cat /mnt/tlc/tlc.jfr");
                final InputStream output = channel.getOutput();
                final String cwd = Paths.get(".").toAbsolutePath().normalize().toString() + File.separator;
                final File jfr = new File(cwd + "tlc.jfr");
                ByteStreams.copy(output, new FileOutputStream(jfr));
                if (jfr.length() == 0) {
                    System.err.println("Received empty Java Flight recording. Not creating tlc.jfr file");
                    jfr.delete();
                }
            }
            // Finally close the ssh connection.
            sshClient.disconnect();
            monitor.subTask("TLC model checker process finished");
            // Eagerly destroy the instance after we pulled the tlc.jfr file from it. No
            // point in waiting for shutdown -h +10 to shutdown the instance.
            destroyNodes(context, groupNameUUID);
        } else {
            sshClient.disconnect();

            // Run model checker master on master
            monitor.subTask("Starting TLC model checker process on the master node (in background)");
            final ExecResponse response = compute.runScriptOnNode(master.getId(), exec(tlcMasterCommand),
                    new TemplateOptions().overrideLoginCredentials(master.getCredentials()).runAsRoot(false)
                            .wrapInInitScript(true).blockOnComplete(false).blockUntilRunning(false));
            throwExceptionOnErrorResponse(master, response,
                    "Starting TLC model checker process on the master node");
            monitor.worked(5);

            if (nodes > 1) {
                // The predicate will be applied to ALL instances owned by the
                // cloud account (ie AWS), even the ones in different regions
                // completely unrelated to TLC.
                final Predicate<NodeMetadata> isMaster = new Predicate<NodeMetadata>() {
                    private final String masterHostname = master.getHostname();

                    public boolean apply(NodeMetadata nodeMetadata) {
                        // hostname can be null if instance is terminated.
                        final String hostname = nodeMetadata.getHostname();
                        return masterHostname.equals(hostname);
                    };
                };
                // copy the tla2tools.jar to the root of the master's webserver
                // to make it available to workers. However, strip the spec
                // (*.tla/*.cfg/...) from the jar file to not share the spec
                // with the world.
                monitor.subTask("Make TLC code available to all worker node(s)");
                Map<? extends NodeMetadata, ExecResponse> execResponse = compute.runScriptOnNodesMatching(
                        isMaster,
                        exec("cp /tmp/tla2tools.jar /var/www/html/tla2tools.jar && "
                                + "zip -d /var/www/html/tla2tools.jar model/*.tla model/*.cfg model/generated.properties"),
                        new TemplateOptions().runAsRoot(true).wrapInInitScript(false));
                throwExceptionOnErrorResponse(execResponse, "Make TLC code available to all worker node");
                monitor.worked(10);
                if (monitor.isCanceled()) {
                    return Status.CANCEL_STATUS;
                }

                // The predicate will be applied to ALL instances owned by the
                // AWS account, even the ones in different regions completely
                // unrelated to TLC.
                final Predicate<NodeMetadata> onWorkers = new Predicate<NodeMetadata>() {
                    // Remove the master from the set of our nodes.
                    private final Iterable<? extends NodeMetadata> workers = Iterables
                            .filter(createNodesInGroup, new Predicate<NodeMetadata>() {
                                private final String masterHostname = master.getHostname();

                                public boolean apply(NodeMetadata nodeMetadata) {
                                    // nodeMetadata.getHostname is null for terminated hosts.
                                    return !masterHostname.equals(nodeMetadata.getHostname());
                                };
                            });

                    public boolean apply(NodeMetadata nodeMetadata) {
                        return Iterables.contains(workers, nodeMetadata);
                    };
                };

                // see master startup for comments
                monitor.subTask("Starting TLC workers on the remaining node(s) (in background)");
                final String privateHostname = Iterables.getOnlyElement(master.getPrivateAddresses());
                execResponse = compute.runScriptOnNodesMatching(onWorkers,
                        exec("cd /mnt/tlc/ && " + "wget http://" + privateHostname + "/tla2tools.jar && "
                                + "screen -dm -S tlc bash -c \" " + "java " + params.getJavaWorkerVMArgs() + " "
                                + "-Djava.io.tmpdir=/mnt/tlc/ " + "-Dcom.sun.management.jmxremote "
                                + "-Dcom.sun.management.jmxremote.port=5400 "
                                + "-Dcom.sun.management.jmxremote.ssl=false "
                                + "-Dcom.sun.management.jmxremote.authenticate=false "
                                + params.getJavaWorkerSystemProperties() + " " + "-cp /mnt/tlc/tla2tools.jar "
                                + params.getTLCWorkerParameters() + " " + privateHostname + " " // Use host's internal ip due to firewall reasons.
                                + "&& "
                                // Terminate regardless of TLCWorker process
                                // exit value. E.g. TLCWorker can terminate due
                                // to a NoRouteToHostException when the master
                                // shut down caused by a violation among the
                                // init states.
                                // Run any cloud specific cleanup tasks.
                                + params.getCloudAPIShutdown() + " && " + "sudo shutdown -h now" + "\""),
                        new TemplateOptions().runAsRoot(false).wrapInInitScript(true).blockOnComplete(false)
                                .blockUntilRunning(false));
                throwExceptionOnErrorResponse(execResponse, "Starting TLC workers");
                monitor.worked(10);
            }

        }

        // Get the output from the remote instance and attach the corresponding
        // InputStream to the CloudStatus. A UI can then read the InputStream and show
        // the output of the TLC process to a user. The SSH connection automatically
        // terminates when the TLC process finishes.
        // https://askubuntu.com/questions/509881/tail-reading-an-entire-file-and-then-following         
        ExecChannel execChannel = null;
        if (!isCLI) {
            execChannel = sshClient
                    .execChannel("tail -q -f -n +1 /mnt/tlc/MC.out --pid $(pgrep -f tla2tools.jar)");
        }

        // Communicate result to user
        monitor.done();
        return new CloudStatus(Status.OK, "org.lamport.tla.toolbox.jcloud", Status.OK,
                String.format(
                        "TLC is model checking at host %s. "
                                + "Expect to receive an email at %s with the model checking result eventually.",
                        hostname, props.get("result.mail.address")),
                null, new URL("http://" + hostname + "/munin/"),
                execChannel == null ? null : execChannel.getOutput(), sshClient);
    } catch (ExecutionException | InterruptedException | RunNodesException | IOException
            | RunScriptOnNodesException | NoSuchElementException | AuthorizationException | SshException e) {
        e.printStackTrace();
        if (context != null) {
            destroyNodes(context, groupNameUUID);
        }
        // signal error to caller
        return new Status(Status.ERROR, "org.lamport.tla.toolbox.jcloud", e.getMessage(), e);
    } catch (ScriptException e) {
        if (context != null) {
            destroyNodes(context, groupNameUUID);
        }
        // signal error to caller
        return new Status(Status.ERROR, "org.lamport.tla.toolbox.jcloud", e.getTitle(), e);
    } finally {
        if (context != null) {
            // The user has canceled the Toolbox job, take this as a request
            // to destroy all nodes this job has created.
            if (monitor.isCanceled()) {
                destroyNodes(context, groupNameUUID);
            }
            context.close();
        }
    }
}

From source file:edu.mit.streamjit.util.bytecode.MethodUnresolver.java

private void emit(CallInst i, InsnList insns) {
    Method m = i.getMethod();//  w w w  . j  a v a  2 s.  c  o m
    boolean callingSuperCtor = false;
    if (m.isConstructor()) {
        //If we're calling super(), load this.
        //TODO: this will get confused if we call a superclass constructor
        //for any reason other than our own initialization!
        if (method.isConstructor() && method.getParent().getSuperclass().equals(m.getParent())) {
            load(method.arguments().get(0), insns);
            callingSuperCtor = true;
        } else {
            insns.add(new TypeInsnNode(Opcodes.NEW, internalName(m.getType().getReturnType().getKlass())));
            insns.add(new InsnNode(Opcodes.DUP));
        }
    }
    int opcode;
    if (m.modifiers().contains(Modifier.STATIC))
        opcode = Opcodes.INVOKESTATIC;
    else if (m.isConstructor() || m.getAccess().equals(Access.PRIVATE) ||
    //We're calling a superclass method we've overridden.
            (Iterables.contains(method.getParent().superclasses(), m.getParent()))
                    && method.getParent().getMethodByVirtual(m.getName(), m.getType()) != m)
        opcode = Opcodes.INVOKESPECIAL;
    else if (m.getParent().modifiers().contains(Modifier.INTERFACE))
        //TODO: may not be correct?
        opcode = Opcodes.INVOKEINTERFACE;
    else
        opcode = Opcodes.INVOKEVIRTUAL;

    String owner = internalName(m.getParent());
    //hack to make cloning arrays work
    if (opcode == Opcodes.INVOKESPECIAL && m.getName().equals("clone")
            && i.getArgument(0).getType() instanceof ArrayType) {
        opcode = Opcodes.INVOKEVIRTUAL;
        owner = internalName(((ArrayType) i.getArgument(0).getType()).getKlass());
    }

    for (Value v : i.arguments())
        load(v, insns);
    insns.add(new MethodInsnNode(opcode, owner, m.getName(), i.callDescriptor()));

    if (!(i.getType() instanceof VoidType) && !callingSuperCtor)
        store(i, insns);
}

From source file:com.eucalyptus.cluster.callback.CloudWatchHelper.java

public List<PutMetricDataType> collectMetricData(DescribeSensorsResponse msg) throws Exception {
    ArrayList<PutMetricDataType> putMetricDataList = new ArrayList<PutMetricDataType>();
    final Iterable<String> uuidList = instanceInfoProvider.getRunningInstanceUUIDList();

    // cloudwatch metric caches
    final ConcurrentMap<String, DiskReadWriteMetricTypeCache> metricCacheMap = Maps.newConcurrentMap();

    final EC2DiskMetricCache ec2DiskMetricCache = new EC2DiskMetricCache();

    for (final SensorsResourceType sensorData : msg.getSensorsResources()) {
        if (!RESOURCE_TYPE_INSTANCE.equals(sensorData.getResourceType())
                || !Iterables.contains(uuidList, sensorData.getResourceUuid()))
            continue;

        for (final MetricsResourceType metricType : sensorData.getMetrics()) {
            for (final MetricCounterType counterType : metricType.getCounters()) {
                for (final MetricDimensionsType dimensionType : counterType.getDimensions()) {
                    // find and fire most recent value for metric/dimension
                    final List<MetricDimensionsValuesType> values = Lists
                            .newArrayList(stripMilliseconds(dimensionType.getValues()));
                    ;/*  w w w. j  a v a2 s .co m*/

                    //CloudWatch use case of metric data
                    // best to enter older data first...
                    Collections.sort(values, Ordering.natural().onResultOf(GetTimestamp.INSTANCE));
                    if (!values.isEmpty()) {

                        for (MetricDimensionsValuesType value : values) {
                            LOG.trace("ResourceUUID: " + sensorData.getResourceUuid());
                            LOG.trace("ResourceName: " + sensorData.getResourceName());
                            LOG.trace("Metric: " + metricType.getMetricName());
                            LOG.trace("Dimension: " + dimensionType.getDimensionName());
                            LOG.trace("Timestamp: " + value.getTimestamp());
                            LOG.trace("Value: " + value.getValue());
                            final Long currentTimeStamp = value.getTimestamp().getTime();
                            final Double currentValue = value.getValue();
                            if (currentValue == null) {
                                LOG.debug("Event received with null 'value', skipping for cloudwatch");
                                continue;
                            }
                            boolean hasEc2DiskMetricName = EC2_DISK_METRICS
                                    .contains(metricType.getMetricName().replace("Volume", "Disk"));
                            // Let's try only creating "zero" points for timestamps from disks
                            if (hasEc2DiskMetricName) {
                                ec2DiskMetricCache.initializeMetrics(sensorData.getResourceUuid(),
                                        sensorData.getResourceName(), currentTimeStamp); // Put a place holder in in case we don't have any non-EBS volumes
                            }
                            boolean isEbsMetric = dimensionType.getDimensionName().startsWith("vol-");
                            boolean isEc2DiskMetric = !isEbsMetric && hasEc2DiskMetricName;

                            if (isEbsMetric || !isEc2DiskMetric) {
                                addToPutMetricDataList(putMetricDataList, new Supplier<InstanceUsageEvent>() {
                                    @Override
                                    public InstanceUsageEvent get() {
                                        return new InstanceUsageEvent(sensorData.getResourceUuid(),
                                                sensorData.getResourceName(), metricType.getMetricName(),
                                                dimensionType.getSequenceNum(),
                                                dimensionType.getDimensionName(), currentValue,
                                                currentTimeStamp);
                                    }
                                });

                                if (isEbsMetric) {
                                    // special case to calculate VolumeConsumedReadWriteOps
                                    // As it is (VolumeThroughputPercentage / 100) * (VolumeReadOps + VolumeWriteOps), and we are hard coding
                                    // VolumeThroughputPercentage as 100%, we will just use VolumeReadOps + VolumeWriteOps

                                    // And just in case VolumeReadOps is called DiskReadOps we do both cases...
                                    addToPutMetricDataList(putMetricDataList,
                                            combineReadWriteDiskMetric("DiskReadOps", "DiskWriteOps",
                                                    metricCacheMap, "DiskConsumedReadWriteOps", metricType,
                                                    sensorData, dimensionType, value));
                                    addToPutMetricDataList(putMetricDataList,
                                            combineReadWriteDiskMetric("VolumeReadOps", "VolumeWriteOps",
                                                    metricCacheMap, "VolumeConsumedReadWriteOps", metricType,
                                                    sensorData, dimensionType, value));

                                    // Also need VolumeTotalReadWriteTime to compute VolumeIdleTime
                                    addToPutMetricDataList(putMetricDataList,
                                            combineReadWriteDiskMetric("VolumeTotalReadTime",
                                                    "VolumeTotalWriteTime", metricCacheMap,
                                                    "VolumeTotalReadWriteTime", metricType, sensorData,
                                                    dimensionType, value));
                                }
                            } else {
                                // see if it is a volume metric
                                String metricName = metricType.getMetricName().replace("Volume", "Disk");
                                ec2DiskMetricCache.addToMetric(sensorData.getResourceUuid(),
                                        sensorData.getResourceName(), metricName, currentValue,
                                        currentTimeStamp);
                            }
                        }
                    }
                }
            }
        }
    }
    Collection<Supplier<InstanceUsageEvent>> ec2DiskMetrics = ec2DiskMetricCache.getMetrics();
    List<Supplier<InstanceUsageEvent>> ec2DiskMetricsSorted = Lists.newArrayList(ec2DiskMetrics);
    Collections.sort(ec2DiskMetricsSorted,
            Ordering.natural().onResultOf(new Function<Supplier<InstanceUsageEvent>, Long>() {
                @Override
                @Nullable
                public Long apply(@Nullable Supplier<InstanceUsageEvent> supplier) {
                    return supplier.get().getValueTimestamp();
                }
            }));
    for (Supplier<InstanceUsageEvent> ec2DiskMetric : ec2DiskMetricsSorted) {
        try {
            addToPutMetricDataList(putMetricDataList, ec2DiskMetric);
        } catch (Exception ex) {
            LOG.debug("Unable to add system metric " + ec2DiskMetric, ex);
        }
    }
    return putMetricDataList;
}

From source file:com.google.devtools.build.lib.rules.objc.ObjcProvider.java

/**
 * Indicates whether {@code flag} is set on this provider.
 */
public boolean is(Flag flag) {
    return Iterables.contains(get(FLAG), flag);
}

From source file:com.google.devtools.build.lib.rules.objc.LegacyCompilationSupport.java

private boolean isDynamicLib(CommandLine commandLine) {
    return Iterables.contains(commandLine.arguments(), "-dynamiclib");
}

From source file:org.apache.brooklyn.location.jclouds.networking.JcloudsLocationSecurityGroupCustomizer.java

/**
 * Creates a security group with rules to:
 * <ul>//  w  ww.j  a v  a2 s .  co m
 *     <li>Allow SSH access on port 22 from the world</li>
 *     <li>Allow TCP, UDP and ICMP communication between machines in the same group</li>
 * </ul>
 *
 * It needs to consider locationId as port ranges and groupId are cloud provider-dependent e.g openstack nova
 * wants from 1-65535 while aws-ec2 accepts from 0-65535.
 *
 *
 * @param groupName The name of the security group to create
 * @param location The location in which the security group will be created
 * @param securityApi The API to use to create the security group
 *
 * @return the created security group
 */
private SecurityGroup createBaseSecurityGroupInLocation(String groupName, Location location,
        SecurityGroupExtension securityApi) {
    SecurityGroup group = addSecurityGroupInLocation(groupName, location, securityApi);

    Set<String> openstackNovaIds = getJcloudsLocationIds("openstack-nova");

    String groupId = group.getProviderId();
    int fromPort = 0;
    if (location.getParent() != null && Iterables.contains(openstackNovaIds, location.getParent().getId())) {
        groupId = group.getId();
        fromPort = 1;
    }
    // Note: For groupName to work with GCE we also need to tag the machines with the same ID.
    // See sourceTags section at https://developers.google.com/compute/docs/networking#firewalls
    IpPermission.Builder allWithinGroup = IpPermission.builder().groupId(groupId).fromPort(fromPort)
            .toPort(65535);
    addPermission(allWithinGroup.ipProtocol(IpProtocol.TCP).build(), group, securityApi);
    addPermission(allWithinGroup.ipProtocol(IpProtocol.UDP).build(), group, securityApi);
    addPermission(allWithinGroup.ipProtocol(IpProtocol.ICMP).fromPort(-1).toPort(-1).build(), group,
            securityApi);

    IpPermission sshPermission = IpPermission.builder().fromPort(22).toPort(22).ipProtocol(IpProtocol.TCP)
            .cidrBlock(getBrooklynCidrBlock()).build();
    addPermission(sshPermission, group, securityApi);

    return group;
}

From source file:org.eclipse.qvtd.compiler.internal.scheduler.AbstractNode.java

@Override
public final void removeIncomingConnection(@NonNull Connection connection) {
    assert Iterables.contains(connection.getTargets(), this);
    //      assert edge.getRegion() == getRegion();
    List<Connection> incomingConnections2 = incomingConnections;
    assert incomingConnections2 != null;
    boolean wasRemoved = incomingConnections2.remove(connection);
    assert wasRemoved;
}

From source file:edu.mit.streamjit.impl.compiler.Compiler.java

/**
 * Make the work method for the given worker.  We actually make two methods
 * here: first we make a copy with a dummy receiver argument, just to have a
 * copy to work with.  After remapping every use of that receiver (remapping
 * field accesses to the worker's static fields, remapping JIT-hooks to
 * their implementations, and remapping utility methods in the worker class
 * recursively), we then externalEdges the actual work method without the receiver
 * argument./*from  www . ja  v  a  2  s .  c  o m*/
 * @param worker
 */
private void makeWorkMethod(Worker<?, ?> worker) {
    StreamNode node = streamNodes.get(worker);
    int id = Workers.getIdentifier(worker);
    int numInputs = getNumInputs(worker);
    int numOutputs = getNumOutputs(worker);
    Klass workerKlass = module.getKlass(worker.getClass());
    Method oldWork = workerKlass.getMethodByVirtual("work",
            module.types().getMethodType(void.class, worker.getClass()));
    oldWork.resolve();

    //Add a dummy receiver argument so we can clone the user's work method.
    MethodType rworkMethodType = workMethodType.prependArgument(module.types().getRegularType(workerKlass));
    Method newWork = new Method("rwork" + id, rworkMethodType, EnumSet.of(Modifier.PRIVATE, Modifier.STATIC),
            blobKlass);
    newWork.arguments().get(0).setName("dummyReceiver");
    newWork.arguments().get(1).setName("ichannels");
    newWork.arguments().get(2).setName("ioffsets");
    newWork.arguments().get(3).setName("iincrements");
    newWork.arguments().get(4).setName("ochannels");
    newWork.arguments().get(5).setName("ooffsets");
    newWork.arguments().get(6).setName("oincrements");

    Map<Value, Value> vmap = new IdentityHashMap<>();
    vmap.put(oldWork.arguments().get(0), newWork.arguments().get(0));
    Cloning.cloneMethod(oldWork, newWork, vmap);

    BasicBlock entryBlock = new BasicBlock(module, "entry");
    newWork.basicBlocks().add(0, entryBlock);

    //We make copies of the offset arrays.  (int[].clone() returns Object,
    //so we have to cast.)
    //Actually, we don't!  We need the updates to carry over to further
    //iterations within the nodework.  My thinking was that we could
    //precompute these to avoid repeated allocations, or something.
    //      Method clone = Iterables.getOnlyElement(module.getKlass(Object.class).getMethods("clone"));
    //      CallInst ioffsetCloneCall = new CallInst(clone, newWork.arguments().get(2));
    //      entryBlock.instructions().add(ioffsetCloneCall);
    //      CastInst ioffsetCast = new CastInst(module.types().getArrayType(int[].class), ioffsetCloneCall);
    //      entryBlock.instructions().add(ioffsetCast);
    Argument ioffsetCast = newWork.arguments().get(2);
    LocalVariable ioffsetCopy = new LocalVariable((RegularType) ioffsetCast.getType(), "ioffsetCopy", newWork);
    StoreInst popCountInit = new StoreInst(ioffsetCopy, ioffsetCast);
    popCountInit.setName("ioffsetInit");
    entryBlock.instructions().add(popCountInit);

    //      CallInst ooffsetCloneCall = new CallInst(clone, newWork.arguments().get(5));
    //      entryBlock.instructions().add(ooffsetCloneCall);
    //      CastInst ooffsetCast = new CastInst(module.types().getArrayType(int[].class), ooffsetCloneCall);
    //      entryBlock.instructions().add(ooffsetCast);
    Argument ooffsetCast = newWork.arguments().get(5);
    LocalVariable ooffsetCopy = new LocalVariable((RegularType) ooffsetCast.getType(), "ooffsetCopy", newWork);
    StoreInst pushCountInit = new StoreInst(ooffsetCopy, ooffsetCast);
    pushCountInit.setName("ooffsetInit");
    entryBlock.instructions().add(pushCountInit);

    entryBlock.instructions().add(new JumpInst(newWork.basicBlocks().get(1)));

    //Remap stuff in rwork.
    for (BasicBlock b : newWork.basicBlocks())
        for (Instruction i : ImmutableList.copyOf(b.instructions()))
            if (Iterables.contains(i.operands(), newWork.arguments().get(0)))
                remapEliminiatingReceiver(i, worker);

    //At this point, we've replaced all uses of the dummy receiver argument.
    assert newWork.arguments().get(0).uses().isEmpty();
    Method trueWork = new Method("work" + id, workMethodType, EnumSet.of(Modifier.PRIVATE, Modifier.STATIC),
            blobKlass);
    vmap.clear();
    vmap.put(newWork.arguments().get(0), null);
    for (int i = 1; i < newWork.arguments().size(); ++i)
        vmap.put(newWork.arguments().get(i), trueWork.arguments().get(i - 1));
    Cloning.cloneMethod(newWork, trueWork, vmap);
    workerWorkMethods.put(worker, trueWork);
    newWork.eraseFromParent();
}

From source file:org.eclipse.qvtd.compiler.internal.scheduler.AbstractNode.java

@Override
public final void removeOutgoingConnection(@NonNull Connection connection) {
    assert Iterables.contains(connection.getSources(), this);
    //      assert edge.getRegion() == getRegion();
    List<Connection> outgoingConnections2 = outgoingConnections;
    assert outgoingConnections2 != null;
    boolean wasRemoved = outgoingConnections2.remove(connection);
    assert wasRemoved;
}

From source file:org.trancecode.xproc.step.Step.java

public Step addLog(final String port, final String href) {
    LOG.trace("{@method} step = {} ; port = {} ; href = {}", name, port, href);
    final Log log = new Log(port, href);
    assert !Iterables.contains(logs, log) : name + " / " + logs + " / " + log;
    return new Step(node, type, name, internalName, location, stepProcessor, compoundStep, variables,
            parameters, ports, steps, TcLists.immutableList(logs, log));
}