Example usage for java.util Collections newSetFromMap

List of usage examples for java.util Collections newSetFromMap

Introduction

In this page you can find the example usage for java.util Collections newSetFromMap.

Prototype

public static <E> Set<E> newSetFromMap(Map<E, Boolean> map) 

Source Link

Document

Returns a set backed by the specified map.

Usage

From source file:org.opendaylight.groupbasedpolicy.renderer.opflex.PolicyManagerTest.java

public void testGroupEndpointUpdated() throws Exception {
    EgKey sepgKey = mock(EgKey.class);
    EgKey depgKey = mock(EgKey.class);
    Policy mockPolicy = mock(Policy.class);
    TenantId tId = mock(TenantId.class);
    EndpointGroupId epgId = mock(EndpointGroupId.class);

    Set<EgKey> egKeySet = Collections.newSetFromMap(new ConcurrentHashMap<EgKey, Boolean>());
    egKeySet.add(depgKey);//from   www. jav  a  2 s  . c  o  m

    when(mockResolver.getCurrentPolicy()).thenReturn(mockPolicyInfo);
    when(mockConnService.getOpflexAgent(anyString())).thenReturn(mockAgent);
    when(mockPolicyInfo.getPeers(sepgKey)).thenReturn(egKeySet);
    when(mockPolicyInfo.getPolicy(sepgKey, depgKey)).thenReturn(mockPolicy);
    when(mockAgent.getEndpoint()).thenReturn(dummyEndpoint);
    when(sepgKey.getTenantId()).thenReturn(tId);
    when(sepgKey.getEgId()).thenReturn(epgId);

    /*
     * Add some EPGs to enable messaging
     */
    //policyManager.dirty.get().addEndpointGroup(sepgKey);
    //policyManager.dirty.get().addEndpointGroup(depgKey);

    /*
     * Add a single agent
     */
    //policyManager.dirty.get().addAgent(TEST_AGENT_ID);

    //policyManager.groupEndpointUpdated(sepgKey, epKey);

    verify(mockAgent, timeout(TEST_TIMEOUT)).getEndpoint();

}

From source file:org.jboss.aerogear.sync.server.ServerSyncEngine.java

/**
 * Removes the specified {@link Subscriber}.
 *
 * @param subscriber the {@link Subscriber} to remove
 * @param documentId the document id that the subscriber subscribes to
 *///  www.ja v a  2  s  .  com
public void removeSubscriber(final Subscriber<?> subscriber, final String documentId) {
    while (true) {
        final Set<Subscriber<?>> currentClients = subscribers.get(documentId);
        if (currentClients == null || currentClients.isEmpty()) {
            break;
        }
        final Set<Subscriber<?>> newClients = Collections
                .newSetFromMap(new ConcurrentHashMap<Subscriber<?>, Boolean>());
        newClients.addAll(currentClients);
        final boolean removed = newClients.remove(subscriber);
        if (removed) {
            if (subscribers.replace(documentId, currentClients, newClients)) {
                break;
            }
        }
    }
}

From source file:io.reign.zk.ResilientZkClient.java

/**
 * getChildren() sets child watches/*from   w  w  w. ja  va  2  s. c  o m*/
 * 
 * @param path
 * @param watcher
 */
void trackChildWatch(String path, Watcher watcher) {
    Set<Watcher> watcherSet = childWatchesMap.get(path);
    if (watcherSet == null) {
        Set<Watcher> newWatcherSet = Collections
                .newSetFromMap(new ConcurrentHashMap<Watcher, Boolean>(4, 0.9f, 1));
        watcherSet = childWatchesMap.putIfAbsent(path, newWatcherSet);
        if (watcherSet == null) {
            watcherSet = newWatcherSet;
        }
    }
    watcherSet.add(watcher);
}

From source file:org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.java

/**
 * Create collection for the directories specified. Users must specify the
 * maximum percentage of disk utilization allowed and the minimum amount of
 * free space that must be available for the dir to be used. If either check
 * fails the dir is removed from the good dirs list.
 * //  w  w  w . j  a va  2  s .c  o  m
 * @param dirs
 *          directories to be monitored
 * @param utilizationPercentageCutOffHigh
 *          percentage of disk that can be used before the dir is taken out of
 *          the good dirs list
 * @param utilizationPercentageCutOffLow
 *          percentage of disk that can be used when the dir is moved from
 *          the bad dirs list to the good dirs list
 * @param utilizationSpaceCutOff
 *          minimum space, in MB, that must be available on the disk for the
 *          dir to be marked as good
 * 
 */
public DirectoryCollection(String[] dirs, float utilizationPercentageCutOffHigh,
        float utilizationPercentageCutOffLow, long utilizationSpaceCutOff) {
    localDirs = new CopyOnWriteArrayList<>(dirs);
    errorDirs = new CopyOnWriteArrayList<>();
    fullDirs = new CopyOnWriteArrayList<>();

    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
    this.readLock = lock.readLock();
    this.writeLock = lock.writeLock();

    diskUtilizationPercentageCutoffHigh = Math.max(0.0F, Math.min(100.0F, utilizationPercentageCutOffHigh));
    diskUtilizationPercentageCutoffLow = Math.max(0.0F,
            Math.min(diskUtilizationPercentageCutoffHigh, utilizationPercentageCutOffLow));
    diskUtilizationSpaceCutoff = utilizationSpaceCutOff < 0 ? 0 : utilizationSpaceCutOff;

    dirsChangeListeners = Collections.newSetFromMap(new ConcurrentHashMap<DirsChangeListener, Boolean>());
}

From source file:org.apache.helix.messaging.handling.HelixTaskExecutor.java

public HelixTaskExecutor(ParticipantStatusMonitor participantStatusMonitor,
        MessageQueueMonitor messageQueueMonitor) {
    _monitor = participantStatusMonitor;
    _messageQueueMonitor = messageQueueMonitor;

    _taskMap = new ConcurrentHashMap<>();

    _hdlrFtyRegistry = new ConcurrentHashMap<>();
    _executorMap = new ConcurrentHashMap<>();
    _messageTaskMap = new ConcurrentHashMap<>();
    _knownMessageIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    _batchMessageExecutorService = Executors.newCachedThreadPool();
    _monitor.createExecutorMonitor("BatchMessageExecutor", _batchMessageExecutorService);

    _resourcesThreadpoolChecked = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    _transitionTypeThreadpoolChecked = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());

    _lock = new Object();
    _statusUpdateUtil = new StatusUpdateUtil();

    _timer = new Timer(true); // created as a daemon timer thread to handle task timeout

    _isShuttingDown = false;//from   w  ww  . j  a va 2s .  c  o  m

    startMonitorThread();
}

From source file:org.apache.tez.runtime.library.broadcast.input.BroadcastShuffleManager.java

public BroadcastShuffleManager(TezInputContext inputContext, Configuration conf, int numInputs)
        throws IOException {
    this.inputContext = inputContext;
    this.conf = conf;
    this.numInputs = numInputs;

    if (ConfigUtils.isIntermediateInputCompressed(conf)) {
        Class<? extends CompressionCodec> codecClass = ConfigUtils.getIntermediateInputCompressorClass(conf,
                DefaultCodec.class);
        codec = ReflectionUtils.newInstance(codecClass, conf);
    } else {/*from  w w w  .  ja va2s .  c  o m*/
        codec = null;
    }

    this.ifileReadAhead = conf.getBoolean(TezJobConfig.TEZ_RUNTIME_IFILE_READAHEAD,
            TezJobConfig.TEZ_RUNTIME_IFILE_READAHEAD_DEFAULT);
    if (this.ifileReadAhead) {
        this.ifileReadAheadLength = conf.getInt(TezJobConfig.TEZ_RUNTIME_IFILE_READAHEAD_BYTES,
                TezJobConfig.TEZ_RUNTIME_IFILE_READAHEAD_BYTES_DEFAULT);
    } else {
        this.ifileReadAheadLength = 0;
    }
    this.ifileBufferSize = conf.getInt("io.file.buffer.size",
            TezJobConfig.TEZ_RUNTIME_IFILE_BUFFER_SIZE_DEFAULT);

    this.inputManager = new BroadcastInputManager(inputContext.getUniqueIdentifier(), conf);
    this.inputEventHandler = new BroadcastShuffleInputEventHandler(inputContext, this, this.inputManager, codec,
            ifileReadAhead, ifileReadAheadLength);

    completedInputSet = Collections.newSetFromMap(new ConcurrentHashMap<InputIdentifier, Boolean>(numInputs));
    completedInputs = new LinkedBlockingQueue<FetchedInput>(numInputs);
    knownSrcHosts = new ConcurrentHashMap<String, InputHost>();
    pendingHosts = new LinkedBlockingQueue<InputHost>();
    obsoletedInputs = Collections.newSetFromMap(new ConcurrentHashMap<InputAttemptIdentifier, Boolean>());

    int maxConfiguredFetchers = conf.getInt(TezJobConfig.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES,
            TezJobConfig.DEFAULT_TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES);

    this.numFetchers = Math.min(maxConfiguredFetchers, numInputs);

    this.fetcherRawExecutor = Executors.newFixedThreadPool(numFetchers, new ThreadFactoryBuilder()
            .setDaemon(true).setNameFormat("Fetcher [" + inputContext.getUniqueIdentifier() + "] #%d").build());
    this.fetcherExecutor = MoreExecutors.listeningDecorator(fetcherRawExecutor);

    this.schedulerRawExecutor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("ShuffleRunner [" + inputContext.getUniqueIdentifier() + "]").build());
    this.schedulerExecutor = MoreExecutors.listeningDecorator(schedulerRawExecutor);

    this.startTime = System.currentTimeMillis();
    this.lastProgressTime = startTime;

    this.shuffleSecret = ShuffleUtils.getJobTokenSecretFromTokenBytes(
            inputContext.getServiceConsumerMetaData(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID));

    this.connectionTimeout = conf.getInt(TezJobConfig.TEZ_RUNTIME_SHUFFLE_CONNECT_TIMEOUT,
            TezJobConfig.DEFAULT_TEZ_RUNTIME_SHUFFLE_STALLED_COPY_TIMEOUT);
    this.readTimeout = conf.getInt(TezJobConfig.TEZ_RUNTIME_SHUFFLE_READ_TIMEOUT,
            TezJobConfig.DEFAULT_TEZ_RUNTIME_SHUFFLE_READ_TIMEOUT);

    LOG.info("BroadcastShuffleManager -> numInputs: " + numInputs + " compressionCodec: "
            + (codec == null ? "NoCompressionCodec" : codec.getClass().getName()) + ", numFetchers: "
            + numFetchers);
}

From source file:mobi.tattu.utils.image.ImageCache.java

@SuppressLint("NewApi")
public void getReusableBitmaps() {
    // If we're running on Honeycomb or newer, then
    if (Utils.hasHoneycomb()) {
        mReusableBitmaps = Collections.newSetFromMap(new ConcurrentHashMap<SoftReference<Bitmap>, Boolean>());
    }//from  ww  w  . j  a v  a 2 s .  co m
}

From source file:edu.umass.cs.gnsclient.client.testing.ThroughputAsynchMultiClientTest.java

/**
 * Creates a ThroughputStress instance with the given arguments.
 *
 * @param alias - the alias to use to create the account guid. null uses "boo@hoo.com".
 *//*w  w  w .  j ava 2s  .co m*/
public ThroughputAsynchMultiClientTest(String alias) {
    InetSocketAddress address;
    if (alias != null) {
        accountAlias = alias;
    }

    clients = new GNSClientCommands[numberOfClients];
    subGuids = new String[numberOfGuids];
    commmandPackets = new CommandPacket[numberOfGuids][numberOfClients];
    execPool = Executors.newFixedThreadPool(numberOfClients);
    chosen = Collections.newSetFromMap(new ConcurrentHashMap<Integer, Boolean>());
    try {
        for (int i = 0; i < numberOfClients; i++) {
            clients[i] = new GNSClientCommands(null);
        }
    } catch (IOException e) {
        System.out.println("Unable to create client: " + e);
        e.printStackTrace();
        System.exit(1);
    }
    try {
        masterGuid = GuidUtils.lookupOrCreateAccountGuid(clients[0], accountAlias, "password", true);
    } catch (Exception e) {
        System.out.println("Exception when we were not expecting it: " + e);
        e.printStackTrace();
        System.exit(1);
    }
}

From source file:com.mirth.connect.connectors.ws.WebServiceDispatcher.java

@Override
public void onStart() throws ConnectorTaskException {
    if (timeout == 0) {
        executor = Executors.newCachedThreadPool();
        dispatchTasks = Collections.newSetFromMap(new ConcurrentHashMap<DispatchTask<SOAPMessage>, Boolean>());
    }//from   w  ww  .  j  a va  2 s  . c  om
}

From source file:org.springframework.integration.redis.util.RedisLockRegistry.java

/**
 * Weak referenced locks, lock is kept here when actual lock is NOT gained.
 * Used for obtaining same lock object within same thread and key.
 * To avoid memory leaks lock objects without actual lock are kept as weak references.
 * After gaining the actual lock, lock object moves from weak reference to hard reference and vise a versa.
 *///from  w  w  w.  j av a2  s .c  o  m
private Collection<RedisLock> getWeakThreadLocks() {
    Set<RedisLock> locks = this.weakThreadLocks.get();
    if (locks == null) {
        locks = Collections.newSetFromMap(new WeakHashMap<RedisLock, Boolean>());
        this.weakThreadLocks.set(locks);
    }
    return locks;
}