List of usage examples for com.google.common.collect Iterables toString
public static String toString(Iterable<?> iterable)
From source file:org.eclipse.emf.compare.ide.ui.internal.logical.ProjectModelResolver.java
/** * When executing local comparisons, we resolve the full logical model of both (or "all three of") the * compared files.//from www. j a va 2 s . c o m * <p> * If there is one resource in the scope that references all of these starting points, then we'll have * perfectly identical logical models for all comparison sides. Because of that, we need to constrain the * logical model of each starting point to only parts that are not accessible from other starting points. * This might cause coherence issues as merging could thus "break" references from other files to our * compared ones. * </p> * <p> * This method will be used to browse the files that are removed from the logical model, and log a warning * for the files that are removed even though they are "parents" of one of the starting points. * </p> * * @param startingPoints * Starting points of the comparison. * @param removedFromModel * All files that have been removed from the comparison scope. */ private void logCoherenceThreats(Set<IFile> startingPoints, Set<IStorage> removedFromModel) { final Set<URI> coherenceThreats = new LinkedHashSet<URI>(); for (IStorage start : startingPoints) { final URI startURI = createURIFor(start); for (IStorage removed : removedFromModel) { final URI removedURI = createURIFor(removed); if (dependencyGraph.hasChild(removedURI, startURI)) { coherenceThreats.add(removedURI); } } } if (!coherenceThreats.isEmpty()) { final String message = EMFCompareIDEUIMessages.getString("ModelResolver.coherenceWarning"); //$NON-NLS-1$ final String details = Iterables.toString(coherenceThreats); EMFCompareIDEUIPlugin.getDefault().getLog() .log(new Status(IStatus.WARNING, EMFCompareIDEUIPlugin.PLUGIN_ID, message + '\n' + details)); } }
From source file:ezbake.deployer.publishers.EzOpenShiftPublisher.java
@Override public void unpublish(DeploymentArtifact artifact, EzSecurityToken callerToken) throws DeploymentException { final String openShiftAppName = getOpenShiftAppName(artifact); final String openShiftDomainName = getOpenShiftDomainName(artifact); maybeUnRegisterAllReverseProxy(artifact); List<RhcApplication> currentInstances = findAllApplicationInstances(openShiftAppName, openShiftDomainName); for (RhcApplication appInstance : currentInstances) { log.info("Removing {} from openshift! ", appInstance.getApplicationInstance().getName()); appInstance.delete();/*from w w w . j a v a2s. c om*/ } String deleted = Iterables .toString(Iterables.transform(currentInstances, new Function<RhcApplication, String>() { @Override public String apply(RhcApplication input) { return input.getApplicationName(); } })); log.info("Unpublished " + deleted + " from openShift."); }
From source file:org.jclouds.docker.compute.strategy.DockerComputeServiceAdapter.java
@SuppressWarnings({ "rawtypes", "unchecked" }) @Override/*from w w w. j a v a2s. c o m*/ public NodeAndInitialCredentials<Container> createNodeWithGroupEncodedIntoName(String group, String name, Template template) { checkNotNull(template, "template was null"); TemplateOptions options = template.getOptions(); checkNotNull(options, "template options was null"); String imageId = checkNotNull(template.getImage().getId(), "template image id must not be null"); String loginUser = template.getImage().getDefaultCredentials().getUser(); String loginUserPassword = template.getImage().getDefaultCredentials().getOptionalPassword().or("password"); DockerTemplateOptions templateOptions = DockerTemplateOptions.class.cast(options); Config containerConfig = null; Config.Builder containerConfigBuilder = templateOptions.getConfigBuilder(); if (containerConfigBuilder == null) { containerConfigBuilder = Config.builder().image(imageId); containerConfigBuilder.entrypoint(templateOptions.getEntrypoint()); containerConfigBuilder.cmd(templateOptions.getCommands()); containerConfigBuilder.memory(templateOptions.getMemory()); containerConfigBuilder.hostname(templateOptions.getHostname()); containerConfigBuilder.cpuShares(templateOptions.getCpuShares()); containerConfigBuilder.openStdin(templateOptions.getOpenStdin()); containerConfigBuilder.env(templateOptions.getEnv()); if (!templateOptions.getVolumes().isEmpty()) { Map<String, Object> volumes = Maps.newLinkedHashMap(); for (String containerDir : templateOptions.getVolumes().values()) { volumes.put(containerDir, Maps.newHashMap()); } containerConfigBuilder.volumes(volumes); } HostConfig.Builder hostConfigBuilder = HostConfig.builder().publishAllPorts(true) .privileged(templateOptions.getPrivileged()); if (!templateOptions.getPortBindings().isEmpty()) { Map<String, List<Map<String, String>>> portBindings = Maps.newHashMap(); for (Map.Entry<Integer, Integer> entry : templateOptions.getPortBindings().entrySet()) { portBindings.put(entry.getValue() + "/tcp", Lists.<Map<String, String>>newArrayList( ImmutableMap.of("HostIp", "0.0.0.0", "HostPort", Integer.toString(entry.getKey())))); } hostConfigBuilder.portBindings(portBindings); } if (!templateOptions.getDns().isEmpty()) { hostConfigBuilder.dns(templateOptions.getDns()); } if (!templateOptions.getExtraHosts().isEmpty()) { List<String> extraHosts = Lists.newArrayList(); for (Map.Entry<String, String> entry : templateOptions.getExtraHosts().entrySet()) { extraHosts.add(entry.getKey() + ":" + entry.getValue()); } hostConfigBuilder.extraHosts(extraHosts); } if (!templateOptions.getVolumes().isEmpty()) { for (Map.Entry<String, String> entry : templateOptions.getVolumes().entrySet()) { hostConfigBuilder.binds(ImmutableList.of(entry.getKey() + ":" + entry.getValue())); } } if (!templateOptions.getVolumesFrom().isEmpty()) { hostConfigBuilder.volumesFrom(templateOptions.getVolumesFrom()); } hostConfigBuilder.networkMode(templateOptions.getNetworkMode()); containerConfigBuilder.hostConfig(hostConfigBuilder.build()); // add the inbound ports into exposed ports map containerConfig = containerConfigBuilder.build(); Map<String, Object> exposedPorts = Maps.newHashMap(); if (containerConfig.exposedPorts() == null) { exposedPorts.putAll(containerConfig.exposedPorts()); } for (int inboundPort : templateOptions.getInboundPorts()) { String portKey = inboundPort + "/tcp"; if (!exposedPorts.containsKey(portKey)) { exposedPorts.put(portKey, Maps.newHashMap()); } } containerConfigBuilder.exposedPorts(exposedPorts); // build once more after setting inboundPorts containerConfig = containerConfigBuilder.build(); // finally update port bindings Map<String, List<Map<String, String>>> portBindings = Maps.newHashMap(); Map<String, List<Map<String, String>>> existingBindings = containerConfig.hostConfig().portBindings(); if (existingBindings != null) { portBindings.putAll(existingBindings); } for (String exposedPort : containerConfig.exposedPorts().keySet()) { if (!portBindings.containsKey(exposedPort)) { portBindings.put(exposedPort, Lists.<Map<String, String>>newArrayList(ImmutableMap.of("HostIp", "0.0.0.0"))); } } hostConfigBuilder = HostConfig.builder().fromHostConfig(containerConfig.hostConfig()); hostConfigBuilder.portBindings(portBindings); containerConfigBuilder.hostConfig(hostConfigBuilder.build()); } else { containerConfigBuilder.image(imageId); } containerConfig = containerConfigBuilder.build(); logger.debug(">> creating new container with containerConfig(%s)", containerConfig); Container container = api.getContainerApi().createContainer(name, containerConfig); logger.trace("<< container(%s)", container.id()); if (templateOptions.getNetworks() != null) { logger.debug(">> connecting container(%s) to networks(%s)", container.id(), Iterables.toString(templateOptions.getNetworks())); for (String networkIdOrName : templateOptions.getNetworks()) { api.getNetworkApi().connectContainerToNetwork(networkIdOrName, container.id()); } logger.trace("<< connected(%s)", container.id()); } HostConfig hostConfig = containerConfig.hostConfig(); logger.debug(">> starting container(%s) with hostConfig(%s)", container.id(), hostConfig); api.getContainerApi().startContainer(container.id(), hostConfig); logger.trace("<< started(%s)", container.id()); container = api.getContainerApi().inspectContainer(container.id()); return new NodeAndInitialCredentials(container, container.id(), LoginCredentials.builder().user(loginUser).password(loginUserPassword).build()); }
From source file:org.apache.cassandra.db.ConsistencyLevel.java
public void assureSufficientLiveNodes(Keyspace keyspace, Iterable<InetAddress> liveEndpoints) throws UnavailableException { int blockFor = blockFor(keyspace); switch (this) { case ANY:/*from w w w . jav a2 s .co m*/ // local hint is acceptable, and local node is always live break; case LOCAL_ONE: if (countLocalEndpoints(liveEndpoints) == 0) throw new UnavailableException(this, 1, 0); break; case LOCAL_QUORUM: int localLive = countLocalEndpoints(liveEndpoints); if (localLive < blockFor) { if (logger.isTraceEnabled()) { StringBuilder builder = new StringBuilder("Local replicas ["); for (InetAddress endpoint : liveEndpoints) { if (isLocal(endpoint)) builder.append(endpoint).append(","); } builder.append("] are insufficient to satisfy LOCAL_QUORUM requirement of ").append(blockFor) .append(" live nodes in '").append(DatabaseDescriptor.getLocalDataCenter()).append("'"); logger.trace(builder.toString()); } throw new UnavailableException(this, blockFor, localLive); } break; case EACH_QUORUM: if (keyspace.getReplicationStrategy() instanceof NetworkTopologyStrategy) { for (Map.Entry<String, Integer> entry : countPerDCEndpoints(keyspace, liveEndpoints).entrySet()) { int dcBlockFor = localQuorumFor(keyspace, entry.getKey()); int dcLive = entry.getValue(); if (dcLive < dcBlockFor) throw new UnavailableException(this, dcBlockFor, dcLive); } break; } // Fallthough on purpose for SimpleStrategy default: int live = Iterables.size(liveEndpoints); if (live < blockFor) { logger.trace("Live nodes {} do not satisfy ConsistencyLevel ({} required)", Iterables.toString(liveEndpoints), blockFor); throw new UnavailableException(this, blockFor, live); } break; } }
From source file:org.jclouds.vcloud.director.v1_5.domain.Checks.java
public static void checkType(String type, Collection<String> validTypes) { assertTrue(validTypes.contains(type), String.format(REQUIRED_VALUE_FMT, "Type", type, Iterables.toString(validTypes))); }
From source file:org.jclouds.vcloud.director.v1_5.domain.Checks.java
public static void checkLink(Link link) { // Check required fields assertNotNull(link.getRel(), String.format(NOT_NULL_OBJ_FIELD_FMT, "Rel", "Link")); assertTrue(Link.Rel.ALL.contains(link.getRel()), String.format(REQUIRED_VALUE_OBJECT_FMT, "Rel", "Link", link.getRel(), Iterables.toString(Link.Rel.ALL))); // Check parent type checkReferenceType(link);/* www .ja va 2 s . co m*/ }
From source file:org.eclipse.emf.compare.ide.ui.internal.logical.resolver.LocalModelsResolution.java
/** * When executing local comparisons, we resolve the full logical model of both (or "all three of") the * compared files./*w w w .ja va 2s. c o m*/ * <p> * If there is one resource in the scope that references all of these starting points, then we'll have * perfectly identical logical models for all comparison sides. Because of that, we need to constrain the * logical model of each starting point to only parts that are not accessible from other starting points. * This might cause coherence issues as merging could thus "break" references from other files to our * compared ones. * </p> * <p> * This method will be used to browse the files that are removed from the logical model, and log a warning * for the files that are removed even though they are "parents" of one of the starting points. * </p> * * @param startingPoints * Starting points of the comparison. * @param removedFromModel * All files that have been removed from the comparison scope. */ private void logCoherenceThreats(Iterable<URI> startingPoints, Iterable<URI> removedFromModel) { final Set<URI> coherenceThreats = new LinkedHashSet<URI>(); for (URI start : startingPoints) { for (URI removed : removedFromModel) { if (context.getDependencyProvider().hasChild(removed, start)) { coherenceThreats.add(removed); } } } if (!coherenceThreats.isEmpty()) { // FIXME: should be added to diagnostic instead final String message = EMFCompareIDEUIMessages.getString("ModelResolver.coherenceWarning"); //$NON-NLS-1$ final String details = Iterables.toString(coherenceThreats); EMFCompareIDEUIPlugin.getDefault().getLog() .log(new Status(IStatus.WARNING, EMFCompareIDEUIPlugin.PLUGIN_ID, message + '\n' + details)); } }
From source file:org.jclouds.vcloud.director.v1_5.domain.Checks.java
public static void checkTask(Task task) { // Check required fields assertNotNull(task.getStatus(), String.format(NOT_NULL_OBJ_FIELD_FMT, "Status", "Task")); assertTrue(Task.Status.ALL.contains(task.getStatus()), String.format(REQUIRED_VALUE_OBJECT_FMT, "Status", "Task", task.getStatus(), Iterables.toString(Task.Status.ALL))); // Check optional fields // NOTE operation cannot be checked // NOTE operationName cannot be checked // NOTE startTime cannot be checked // NOTE endTime cannot be checked // NOTE expiryTimecannot be checked Reference owner = task.getOwner(); if (owner != null) checkReferenceType(owner);//from w w w .j a va2s . co m Error error = task.getError(); if (error != null) checkError(error); Reference user = task.getUser(); if (user != null) checkReferenceType(user); Reference org = task.get(); if (org != null) checkReferenceType(org); Integer progress = task.getProgress(); if (progress != null) checkProgress(progress); // NOTE params cannot be checked // Check parent type checkEntityType(task); }
From source file:eu.numberfour.n4js.external.NpmPackageToProjectAdapter.java
/** * Add type definitions (N4JSDs) to the npm package. Types are added only if matching version is found. * * This method suppresses any potential issues as adding type definitions to some npm package does not * affect overall npm usage. Still, errors are {@link #LOGGER logged} to help troubleshooting potential * issues and returns with an {@link IStatus status} instance that represents the problem if any. * * @param packageRoot// ww w . jav a 2s.c o m * npm package folder. * @param packageJson * {@link PackageJson package.json} of that package. * @param manifest * file that will be adjusted according to manifest fragments. * @param definitionsFolder * root folder for npm type definitions. * * @return a status representing the outcome of performed the operation. */ /* default */ IStatus addTypeDefinitions(File packageRoot, PackageJson packageJson, File manifest, File definitionsFolder) { String packageName = packageRoot.getName(); File packageN4JSDsRoot = new File(definitionsFolder, packageName); if (!(packageN4JSDsRoot.exists() && packageN4JSDsRoot.isDirectory())) { LOGGER.info("No type definitions found for '" + packageName + "' npm package."); return statusHelper.OK(); } String packageJsonVersion = packageJson.getVersion(); Version packageVersion = Version.createFromString(packageJsonVersion); String[] list = packageN4JSDsRoot.list(); Set<Version> availableTypeDefinitionsVersions = new HashSet<>(); for (int i = 0; i < list.length; i++) { String version = list[i]; Version availableTypeDefinitionsVersion = Version.createFromString(version); if (!Version.MISSING.equals(availableTypeDefinitionsVersion)) { availableTypeDefinitionsVersions.add(availableTypeDefinitionsVersion); } } Version closestMatchingVersion = Version.findClosestMatching(availableTypeDefinitionsVersions, packageVersion); if (Version.MISSING.equals(closestMatchingVersion)) { LOGGER.info("No proper versions can be found for '" + packageName + "' npm package."); LOGGER.info("Desired version was: " + packageVersion + "."); if (availableTypeDefinitionsVersions.isEmpty()) { LOGGER.info("No versions were available."); } else if (1 == availableTypeDefinitionsVersions.size()) { final Version head = availableTypeDefinitionsVersions.iterator().next(); LOGGER.info("The following version was available for '" + packageName + "': " + head + "."); } else { final String versions = Iterables.toString(availableTypeDefinitionsVersions); LOGGER.info("The following versions were available for '" + packageName + "': " + versions + "."); } return statusHelper.OK(); } File packageVersionedN4JSD = new File(packageN4JSDsRoot, closestMatchingVersion.toString()); if (!(definitionsFolder.exists() && definitionsFolder.isDirectory())) { final String message = "Cannot find type definitions folder for '" + packageName + "' npm package for version '" + closestMatchingVersion + "'."; LOGGER.error(message); return statusHelper.createError(message); } try { FileCopier.copy(packageVersionedN4JSD.toPath(), packageRoot.toPath()); } catch (IOException e) { final String message = "Error while trying to update type definitions content for '" + packageName + "' npm package."; LOGGER.error(message); return statusHelper.createError(message, e); } // adjust manifest according to type definitions manifest fragments File[] manifestFragments = packageRoot.listFiles(ONLY_MANIFEST_FRAGMENTS); return adjustManifest(manifest, manifestFragments); }
From source file:clocker.mesos.entity.MesosClusterImpl.java
/** * De-register our {@link MesosLocation} and its children. *//*from w ww .j a v a2s . c o m*/ @Override public void stop() { disconnectSensors(); sensors().set(SERVICE_UP, Boolean.FALSE); deleteLocation(); Duration timeout = config().get(SHUTDOWN_TIMEOUT); // Find all applications and stop, blocking for up to five minutes until ended try { Iterable<Entity> entities = Iterables.filter(getManagementContext().getEntityManager().getEntities(), Predicates.and(MesosUtils.sameCluster(this), Predicates.not(EntityPredicates.applicationIdEqualTo(getApplicationId())))); Set<Application> applications = ImmutableSet .copyOf(Iterables.transform(entities, new Function<Entity, Application>() { @Override public Application apply(Entity input) { return input.getApplication(); } })); LOG.debug("Stopping applications: {}", Iterables.toString(applications)); Entities.invokeEffectorList(this, applications, Startable.STOP).get(timeout); } catch (Exception e) { LOG.warn("Error stopping applications", e); } // Stop all framework tasks in parallel try { Group frameworks = sensors().get(MESOS_FRAMEWORKS); LOG.debug("Stopping framework tasks in: {}", Iterables.toString(frameworks.getMembers())); Entities.invokeEffectorList(this, frameworks.getMembers(), Startable.STOP).get(timeout); } catch (Exception e) { LOG.warn("Error stopping frameworks", e); } // Stop anything else left over // TODO Stop slave entities try { super.stop(); } catch (Exception e) { LOG.warn("Error stopping children", e); } }