Example usage for com.google.common.collect Sets newSetFromMap

List of usage examples for com.google.common.collect Sets newSetFromMap

Introduction

In this page you can find the example usage for com.google.common.collect Sets newSetFromMap.

Prototype

@Deprecated
public static <E> Set<E> newSetFromMap(Map<E, Boolean> map) 

Source Link

Document

Returns a set backed by the specified map.

Usage

From source file:co.cask.tigon.sql.internal.HealthInspector.java

/**
 * Constructor for HealthInspector which ensures the liveness of the SQL Compiler processes.
 * If the class fails to detect a heartbeat from every registered process in the last two seconds then this class
 * invokes the {@link ProcessMonitor#notifyFailure(java.util.Set)} defined in the {@link ProcessMonitor} object.
 *
 * @param processMonitor The reference of the class that implements
 * {@link ProcessMonitor#notifyFailure(java.util.Set)}
 *///from   w w  w.j  a  v  a  2  s  .c  o m
public HealthInspector(ProcessMonitor processMonitor) {
    serviceFailed = processMonitor;
    Map<String, Boolean> heartbeatMap = Maps.newConcurrentMap();
    Map<String, Boolean> modelMap = Maps.newConcurrentMap();
    heartbeatCounter = Sets.newSetFromMap(heartbeatMap);
    modelCounter = Sets.newSetFromMap(modelMap);
}

From source file:com.opengamma.engine.view.compilation.ViewCompilationContext.java

ViewCompilationContext(final ViewDefinition viewDefinition,
        final ViewCompilationServices compilationServices, final Instant valuationTime,
        final VersionCorrection resolverVersionCorrection,
        final ConcurrentMap<ComputationTargetReference, UniqueId> resolutions) {
    _viewDefinition = viewDefinition;/*from www .  j a  va2  s.  c o  m*/
    _services = compilationServices;
    _builders = new LinkedList<DependencyGraphBuilder>();
    _expiredResolutions = Sets.newSetFromMap(new ConcurrentHashMap<UniqueId, Boolean>());
    _rules = compilationServices.getFunctionResolver().compile(valuationTime).getAllResolutionRules();
    _targetResolver = TargetResolutionLogger.of(compilationServices.getFunctionCompilationContext()
            .getRawComputationTargetResolver().atVersionCorrection(resolverVersionCorrection), resolutions,
            _expiredResolutions);
    for (final ViewCalculationConfiguration calcConfig : viewDefinition.getAllCalculationConfigurations()) {
        _builders.add(createBuilder(calcConfig));
    }
    _resolverVersionCorrection = resolverVersionCorrection;
    _graphs = new ArrayList<DependencyGraph>(_builders.size());
    _activeResolutions = resolutions;
}

From source file:com.spotify.hdfs2cass.cassandra.thrift.ProgressIndicator.java

public void handleStreamEvent(StreamEvent event) {
    if (event.eventType == StreamEvent.Type.STREAM_PREPARED) {
        SessionInfo session = ((StreamEvent.SessionPreparedEvent) event).session;
        sessionsByHost.put(session.peer, session);
    } else if (event.eventType == StreamEvent.Type.FILE_PROGRESS) {
        ProgressInfo progressInfo = ((StreamEvent.ProgressEvent) event).progress;

        // update progress
        Set<ProgressInfo> progresses = progressByHost.get(progressInfo.peer);
        if (progresses == null) {
            progresses = Sets.newSetFromMap(Maps.<ProgressInfo, Boolean>newConcurrentMap());
            progressByHost.put(progressInfo.peer, progresses);
        }/*ww  w  . ja va 2 s .  co  m*/
        if (progresses.contains(progressInfo))
            progresses.remove(progressInfo);
        progresses.add(progressInfo);

        StringBuilder sb = new StringBuilder();
        sb.append("\rprogress: ");

        long totalProgress = 0;
        long totalSize = 0;
        for (Map.Entry<InetAddress, Set<ProgressInfo>> entry : progressByHost.entrySet()) {
            SessionInfo session = sessionsByHost.get(entry.getKey());

            long size = session.getTotalSizeToSend();
            long current = 0;
            int completed = 0;
            for (ProgressInfo progress : entry.getValue()) {
                if (progress.currentBytes == progress.totalBytes)
                    completed++;
                current += progress.currentBytes;
            }
            totalProgress += current;
            totalSize += size;
            sb.append("[").append(entry.getKey());
            sb.append(" ").append(completed).append("/").append(session.getTotalFilesToSend());
            sb.append(" (").append(size == 0 ? 100L : current * 100L / size).append("%)] ");
        }
        long time = System.nanoTime();
        long deltaTime = TimeUnit.NANOSECONDS.toMillis(time - lastTime);
        lastTime = time;
        long deltaProgress = totalProgress - lastProgress;
        lastProgress = totalProgress;

        sb.append("[total: ").append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize).append("% - ");
        sb.append(mbPerSec(deltaProgress, deltaTime)).append("MB/s");
        sb.append(" (avg: ").append(mbPerSec(totalProgress, TimeUnit.NANOSECONDS.toMillis(time - start)))
                .append("MB/s)]");

        System.out.print(sb.toString());
    }
}

From source file:com.comphenix.xp.extra.ServiceProvider.java

/**
 * Copy everything from the given provider.
 * @param other - the given provider.//from  w ww  .ja v a 2s.  co m
 */
public ServiceProvider(ServiceProvider<TService> other) {
    this.defaultName = other.defaultName;
    this.nameLookup = new ConcurrentHashMap<String, TService>(other.nameLookup);
    this.disabledLookup = Sets.newSetFromMap(new ConcurrentHashMap<String, Boolean>());

    // Insert all disabled elements
    for (String disabled : other.disabledLookup) {
        this.disabledLookup.add(disabled);
    }
}

From source file:org.jboss.weld.bootstrap.ConcurrentValidator.java

@Override
public void validateBeans(Collection<? extends Bean<?>> beans, final BeanManagerImpl manager) {
    final List<RuntimeException> problems = new CopyOnWriteArrayList<RuntimeException>();
    final Set<CommonBean<?>> specializedBeans = Sets
            .newSetFromMap(new ConcurrentHashMap<CommonBean<?>, Boolean>());

    executor.invokeAllAndCheckForExceptions(new IterativeWorkerTaskFactory<Bean<?>>(beans) {
        protected void doWork(Bean<?> bean) {
            validateBean(bean, specializedBeans, manager, problems);
        }/*from   w  w w . j  a v  a2  s  .c  o m*/
    });

    if (!problems.isEmpty()) {
        if (problems.size() == 1) {
            throw problems.get(0);
        } else {
            throw new DeploymentException(problems);
        }
    }
}

From source file:org.apache.hadoop.util.curator.ChildReaper.java

/**
 * Creates a thread-safe set backed by a hash map. The set is backed by a
 * {@link ConcurrentHashMap} instance, and thus carries the same concurrency
 * guarantees./*w w  w  .ja  v a 2 s . co  m*/
 *
 * <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be
 * used as an element. The set is serializable.
 *
 * @return a new, empty thread-safe {@code Set}
 * @since 15.0
 */
public static <E> Set<E> newConcurrentHashSet() {
    return Sets.newSetFromMap(new ConcurrentHashMap<E, Boolean>());
}

From source file:com.leacox.dagger.servlet.ManagedFilterPipeline.java

@Override
public synchronized void initPipeline(ServletContext servletContext) throws ServletException {
    //double-checked lock, prevents duplicate initialization
    if (initialized)
        return;//  w w w .  j  a v a 2s.  c  o  m

    // Used to prevent duplicate initialization.
    Set<Filter> initializedSoFar = Sets.newSetFromMap(Maps.<Filter, Boolean>newIdentityHashMap());

    for (FilterDefinition filterDefinition : filterDefinitions) {
        filterDefinition.init(servletContext, objectGraph, initializedSoFar);
    }

    //next, initialize servlets...
    servletPipeline.init(servletContext, objectGraph);

    //everything was ok...
    initialized = true;
}

From source file:com.volumetricpixels.rockyplugin.chunk.ChunkCache.java

/**
 * Gets the current player cache, if the player and the chunk doesn't have
 * an entry, then creates a new entry for each one of them
 * /*  w  w  w  . j a v a 2  s .  c  om*/
 * @param player
 *            the name of the player
 * @return the chunk cache structure
 */
public synchronized Set<Long> getPlayerCache(String player) {
    if (!playerCache.containsKey(player)) {
        playerCache.put(player, Sets.newSetFromMap(new ConcurrentHashMap<Long, Boolean>()));
    }
    return playerCache.get(player);
}

From source file:com.leacox.dagger.servlet.ManagedServletPipeline.java

public void destroy() {
    Set<HttpServlet> destroyedSoFar = Sets.newSetFromMap(Maps.<HttpServlet, Boolean>newIdentityHashMap());
    for (ServletDefinition servletDefinition : servletDefinitions) {
        servletDefinition.destroy(destroyedSoFar);
    }//from  w  w  w.ja va2s . c  o  m
}

From source file:uk.ac.cam.cl.dtg.picky.engine.Engine.java

public Engine(CachedReadingStrategy readingStrategy, Plan plan) {
    this.readingStrategy = readingStrategy;
    this.repository = new ReadingRepository(readingStrategy);
    this.plan = plan;
    this.chunksToDownload = Sets.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    this.chunksToDownload
            .addAll(plan.getChunksToDownload().stream().map(Chunk::getBlobId).collect(Collectors.toList()));
    this.chunksById = plan.getChunksToDownload().stream().collect(Collectors.toMap(Chunk::getBlobId, (c) -> c));

    int availableProcessors = Runtime.getRuntime().availableProcessors();

    this.sequentialExecutor = new PausableExecutor(1, new WorkerThreadFactory("engine_sequential_worker"));
    this.parallelExecutor = new PausableExecutor(availableProcessors,
            new WorkerThreadFactory("engine_parallel_worker"));

    this.totalActionNumber = new ImmutableMap.Builder<Action, Integer>()
            .put(Action.DELETE_DIR, plan.getDeleteDirActions().size())
            .put(Action.DELETE_FILE, plan.getDeleteFileActions().size())
            .put(Action.DOWNLOAD_CHUNK, plan.getChunksToDownload().size())
            .put(Action.INSTALL_FILE, plan.getInstallFileActions().size())
            .put(Action.MAKE_DIR, plan.getMakeDirActions().size())
            .put(Action.UPDATE_FILE, plan.getUpdateFileActions().size()).build();
}