Example usage for com.google.common.collect Iterables all

List of usage examples for com.google.common.collect Iterables all

Introduction

In this page you can find the example usage for com.google.common.collect Iterables all.

Prototype

public static <T> boolean all(Iterable<T> iterable, Predicate<? super T> predicate) 

Source Link

Document

Returns true if every element in iterable satisfies the predicate.

Usage

From source file:org.eclipse.tracecompass.internal.analysis.os.linux.ui.views.cpuusage.CpuUsageView.java

/**
 * Signal handler for when a cpu is selected
 *
 * @param signal//from   w ww .jav a  2 s  . co  m
 *            the cpu being selected
 * @since 2.0
 */
@TmfSignalHandler
public void cpuSelect(TmfCpuSelectedSignal signal) {
    final @Nullable CpuUsageXYViewer xyViewer = fXYViewer;
    final @Nullable CpuUsageComposite treeViewer = fTreeViewer;
    if (xyViewer != null && treeViewer != null) {
        Object data = getData(CPU_USAGE_FOLLOW_CPU);
        if (data == null) {
            data = new TreeSet<Integer>();
        }
        if (data instanceof Set<?>) {
            Set<?> set = (Set<?>) data;
            int core = signal.getCore();
            if (core >= 0) {
                xyViewer.addCpu(core);
                treeViewer.addCpu(core);
                if (Iterables.all(set, Predicates.instanceOf(Integer.class))) {
                    @SuppressWarnings("unchecked")
                    Set<Integer> intSet = (Set<Integer>) set;
                    intSet.add(core);
                }
            } else {
                xyViewer.clearCpu();
                treeViewer.clearCpu();
                ((Set<?>) data).clear();
            }
            saveData(CPU_USAGE_FOLLOW_CPU, data);
        } else {
            Activator.getDefault().logError("The followed cores should have been store in a Set"); //$NON-NLS-1$
        }
    }
}

From source file:net.conquiris.api.index.IndexInfo.java

private IndexInfo(String checkpoint, String targetCheckpoint, int documents, long timestamp, long sequence,
        Map<String, String> properties) {
    checkArgument(documents >= 0);/*from  www .  j  a va  2s  .c o m*/
    checkArgument(timestamp >= 0);
    checkArgument(sequence >= 0);
    checkNotNull(properties);
    checkArgument(Iterables.all(properties.entrySet(), isUserProperty()));
    this.checkpoint = checkpoint;
    this.targetCheckpoint = targetCheckpoint;
    this.documents = documents;
    this.timestamp = timestamp;
    this.sequence = sequence;
    this.properties = ImmutableMap.copyOf(properties);
}

From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumerFactory.java

private MultiLiveStreamFileReader createReader(final StreamConfig streamConfig,
        StreamConsumerState consumerState) throws IOException {
    Location streamLocation = streamConfig.getLocation();
    Preconditions.checkNotNull(streamLocation, "Stream location is null for %s", streamConfig.getStreamId());

    // Look for the latest stream generation
    final int generation = StreamUtils.getGeneration(streamConfig);
    streamLocation = StreamUtils.createGenerationLocation(streamLocation, generation);

    final long currentTime = System.currentTimeMillis();

    if (!Iterables.isEmpty(consumerState.getState())) {
        // See if any offset has a different generation or is expired. If so, don't use the old states.
        boolean useStoredStates = Iterables.all(consumerState.getState(), new Predicate<StreamFileOffset>() {
            @Override//  w ww.  ja  v  a2  s  . co m
            public boolean apply(StreamFileOffset input) {
                boolean isExpired = input.getPartitionEnd() < currentTime - streamConfig.getTTL();
                boolean sameGeneration = generation == input.getGeneration();
                return !isExpired && sameGeneration;
            }
        });

        if (useStoredStates) {
            LOG.info("Create file reader with consumer state: {}", consumerState);
            // Has existing offsets, just resume from there.
            MultiLiveStreamFileReader reader = new MultiLiveStreamFileReader(streamConfig,
                    consumerState.getState());
            reader.initialize();
            return reader;
        }
    }

    // TODO: Support starting from some time rather then from beginning.
    // Otherwise, search for files with the smallest partition start time
    // If no partition exists for the stream, start with one partition earlier than current time to make sure
    // no event will be lost if events start flowing in about the same time.
    long startTime = StreamUtils.getPartitionStartTime(currentTime - streamConfig.getPartitionDuration(),
            streamConfig.getPartitionDuration());
    long earliestNonExpiredTime = StreamUtils.getPartitionStartTime(currentTime - streamConfig.getTTL(),
            streamConfig.getPartitionDuration());

    for (Location partitionLocation : streamLocation.list()) {
        if (!partitionLocation.isDirectory() || !StreamUtils.isPartition(partitionLocation.getName())) {
            // Partition should be a directory
            continue;
        }

        long partitionStartTime = StreamUtils.getPartitionStartTime(partitionLocation.getName());
        boolean isPartitionExpired = partitionStartTime < earliestNonExpiredTime;
        if (!isPartitionExpired && partitionStartTime < startTime) {
            startTime = partitionStartTime;
        }
    }

    // Create file offsets
    // TODO: Be able to support dynamic name of stream writer instances.
    // Maybe it's done through MultiLiveStreamHandler to alter list of file offsets dynamically
    Location partitionLocation = StreamUtils.createPartitionLocation(streamLocation, startTime,
            streamConfig.getPartitionDuration());
    List<StreamFileOffset> fileOffsets = Lists.newArrayList();
    getFileOffsets(partitionLocation, fileOffsets, generation);

    LOG.info("Empty consumer state. Create file reader with file offsets: groupId={}, instanceId={} states={}",
            consumerState.getGroupId(), consumerState.getInstanceId(), fileOffsets);

    MultiLiveStreamFileReader reader = new MultiLiveStreamFileReader(streamConfig, fileOffsets);
    reader.initialize();
    return reader;
}

From source file:com.google.gerrit.pgm.RebuildNoteDb.java

@Override
public int run() throws Exception {
    mustHaveValidSite();/*from   ww  w  .j  av a  2  s .c  o  m*/
    dbInjector = createDbInjector(MULTI_USER);
    threads = ThreadLimiter.limitThreads(dbInjector, threads);

    LifecycleManager dbManager = new LifecycleManager();
    dbManager.add(dbInjector);
    dbManager.start();

    sysInjector = createSysInjector();
    sysInjector.injectMembers(this);
    if (!notesMigration.enabled()) {
        throw die("NoteDb is not enabled.");
    }
    LifecycleManager sysManager = new LifecycleManager();
    sysManager.add(sysInjector);
    sysManager.start();

    ListeningExecutorService executor = newExecutor();
    System.out.println("Rebuilding the NoteDb");

    ImmutableListMultimap<Project.NameKey, Change.Id> changesByProject = getChangesByProject();
    boolean ok;
    Stopwatch sw = Stopwatch.createStarted();
    try (Repository allUsersRepo = repoManager.openRepository(allUsersName)) {
        deleteRefs(RefNames.REFS_DRAFT_COMMENTS, allUsersRepo);

        List<ListenableFuture<Boolean>> futures = new ArrayList<>();
        List<Project.NameKey> projectNames = Ordering.usingToString().sortedCopy(changesByProject.keySet());
        for (Project.NameKey project : projectNames) {
            ListenableFuture<Boolean> future = executor.submit(() -> {
                try (ReviewDb db = unwrapDb(schemaFactory.open())) {
                    return rebuildProject(db, changesByProject, project, allUsersRepo);
                } catch (Exception e) {
                    log.error("Error rebuilding project " + project, e);
                    return false;
                }
            });
            futures.add(future);
        }

        try {
            ok = Iterables.all(Futures.allAsList(futures).get(), Predicates.equalTo(true));
        } catch (InterruptedException | ExecutionException e) {
            log.error("Error rebuilding projects", e);
            ok = false;
        }
    }

    double t = sw.elapsed(TimeUnit.MILLISECONDS) / 1000d;
    System.out.format("Rebuild %d changes in %.01fs (%.01f/s)\n", changesByProject.size(), t,
            changesByProject.size() / t);
    return ok ? 0 : 1;
}

From source file:com.isotrol.impe3.pms.core.support.Mappers.java

/**
 * Copy a default and localized names from the model to a dto.
 * @param value Value//from www .  ja  v a2 s.c  o  m
 * @param dto DTO.
 * @throws NullPointerException if any of the names is null.
 * @throws IllegalArgumentException if any of the locales is invalid.
 */
public static void dto2localizedName(WithLocalizedNameDTO dto, WithLocalizedName value) {
    // Validation
    final Map<String, NameDTO> dtoMap = dto.getLocalizedNames();
    Preconditions.checkArgument(Iterables.all(dtoMap.keySet(), MoreLocales.VALID));
    // Copy
    value.setName(DTO2NAME.apply(dto.getDefaultName()));
    Map<String, NameValue> map = value.getLocalizedNames();
    map.clear();
    map.putAll(Maps.transformValues(dtoMap, DTO2NAME));
}

From source file:de.cau.cs.kieler.klay.layered.p5edges.SplineEdgeRouter.java

/**
 * {@inheritDoc}//from  w  ww  . java  2  s. c o m
 */
public void process(final LGraph layeredGraph, final IKielerProgressMonitor monitor) {
    monitor.begin("Spline edge routing", 1);
    double spacing = layeredGraph.getProperty(Properties.OBJ_SPACING);
    float edgeSpaceFac = layeredGraph.getProperty(Properties.EDGE_SPACING_FACTOR);

    double xpos = 0.0, layerSpacing = 0.0;
    for (Layer layer : layeredGraph) {
        boolean externalLayer = Iterables.all(layer, PolylineEdgeRouter.PRED_EXTERNAL_PORT);
        // the rightmost layer is not given any node spacing
        if (externalLayer && xpos > 0) {
            xpos -= spacing;
        }

        // set horizontal coordinates for all nodes of the layer
        layer.placeNodes(xpos);

        double maxVertDiff = 0;
        for (LNode node : layer) {
            // count the maximal vertical difference of output edges
            for (LPort port : node.getPorts(PortType.OUTPUT)) {
                double sourcePos = port.getNode().getPosition().y + port.getPosition().y + port.getAnchor().y;
                for (LPort targetPort : port.getSuccessorPorts()) {
                    if (targetPort.getNode().getLayer() != node.getLayer()) {
                        double targetPos = targetPort.getNode().getPosition().y + targetPort.getPosition().y
                                + targetPort.getAnchor().y;
                        maxVertDiff = KielerMath.maxd(maxVertDiff, targetPos - sourcePos,
                                sourcePos - targetPos);
                    }
                }
            }
        }

        // Determine placement of next layer based on the maximal vertical difference (as the
        // maximum vertical difference edges span grows, the layer grows wider to allow enough
        // space for such sloped edges to avoid too harsh angles)
        layerSpacing = LAYER_SPACE_FAC * edgeSpaceFac * maxVertDiff + 1;
        if (!externalLayer) {
            layerSpacing += spacing;
        }
        xpos += layer.getSize().x + layerSpacing;
    }
    layeredGraph.getSize().x = xpos - layerSpacing;

    // process all edges
    for (Layer layer : layeredGraph) {
        for (LNode node : layer) {
            NodeType sourceNodeType = node.getProperty(Properties.NODE_TYPE);

            if (sourceNodeType != NodeType.LONG_EDGE && sourceNodeType != NodeType.LABEL) {

                for (LPort port : node.getPorts()) {
                    for (LEdge edge : port.getOutgoingEdges()) {
                        NodeType targetNodeType = edge.getTarget().getNode().getProperty(Properties.NODE_TYPE);

                        if (targetNodeType == NodeType.LONG_EDGE || targetNodeType == NodeType.LABEL) {

                            processLongEdge(edge);
                        } else {
                            processShortEdge(edge);
                        }
                    }
                }
            }
        }
    }

    monitor.done();
}

From source file:kr.debop4j.timeperiod.calendars.CalendarPeriodCollector.java

@Override
protected boolean onVisitYear(YearRange year, final CalendarPeriodCollectorContext context) {
    if (isTraceEnabled)
        log.trace("visit year... year=[{}]", year.getYear());

    if (context.getScope() != CalendarPeriodCollectorContext.CollectKind.Month)
        return true;

    if (getFilter().getCollectingMonths().size() == 0) {
        for (MonthRange month : year.getMonths()) {
            if (isMatchingMonth(month, context) && checkLimits(month)) {
                periods.add(month);//from  w  w w  .j a v a2s.  c om
            }
        }
    } else {
        for (MonthRangeInYear m : getFilter().getCollectingMonths()) {
            if (m.isSingleMonth()) {
                MonthRange month = new MonthRange(year.getYear(), m.getStartMonthOfYear(),
                        year.getTimeCalendar());
                if (isMatchingMonth(month, context) && checkLimits(month)) {
                    periods.add(month);
                }
            } else {
                MonthRangeCollection months = new MonthRangeCollection(year.getYear(), m.getStartMonthOfYear(),
                        m.getEndMonthOfYear() - m.getStartMonthOfYear(), year.getTimeCalendar());
                boolean isMatching = Iterables.all(months.getMonths(), new Predicate<MonthRange>() {
                    @Override
                    public boolean apply(@Nullable MonthRange input) {
                        return isMatchingMonth(input, context);
                    }
                });
                if (isMatching && checkLimits(months))
                    periods.addAll(months.getMonths());
            }
        }
    }
    return false;
}

From source file:org.splevo.jamopp.refactoring.java.caslicensehandler.cheatsheet.actions.MappingDialog.java

@Override
public boolean close() {
    if (getReturnCode() == Window.OK && !Iterables.all(variants, new Predicate<Variant>() {
        @Override//from   w ww .ja v  a  2 s. co m
        public boolean apply(Variant input) {
            return getLicenseConstantName(input.getId()) != null;
        }
    })) {
        setReturnCode(DATA_INCOMPLETE);
    }
    return super.close();
}

From source file:com.google.api.explorer.client.parameter.schema.FieldsEditor.java

/**
 * Returns this field's checked value, or if it has children, whether all its
 * children are checked.// ww  w.j a  va2 s .c  o  m
 */
@Override
public Boolean getValue() {
    if (children.isEmpty()) {
        return root.getValue();
    }

    return Iterables.all(children.entrySet(), new Predicate<Map.Entry<String, HasValue<Boolean>>>() {
        @Override
        public boolean apply(Entry<String, HasValue<Boolean>> input) {
            return input.getValue().getValue();
        }
    });
}

From source file:org.grouplens.lenskit.core.RecommenderInstantiator.java

/**
 * Prune the graph, returning the set of nodes for shareable objects
 * (objects that will be replaced with instance satisfactions in the
 * final graph)./*from   w ww .  j av a 2  s  .  c o  m*/
 *
 * @param graph The graph to analyze. The graph is not modified.
 * @return The set of root nodes - nodes that need to be instantiated and
 *         removed. These nodes are in topologically sorted order.
 */
private LinkedHashSet<Node> getShareableNodes(Graph graph) {
    LinkedHashSet<Node> shared = new LinkedHashSet<Node>();

    List<Node> nodes = graph.sort(graph.getNode(null));
    for (Node node : nodes) {
        if (!GraphtUtils.isShareable(node)) {
            continue;
        }

        // see if we depend on any non-shared nodes
        // since nodes are sorted, all shared nodes will have been seen
        Set<Edge> intransient = GraphtUtils.removeTransient(graph.getOutgoingEdges(node));
        boolean isShared = Iterables.all(Iterables.transform(intransient, GraphtUtils.edgeTail()),
                Predicates.in(shared));
        if (isShared) {
            shared.add(node);
        }
    }

    return shared;
}