Example usage for java.util Collections newSetFromMap

List of usage examples for java.util Collections newSetFromMap

Introduction

In this page you can find the example usage for java.util Collections newSetFromMap.

Prototype

public static <E> Set<E> newSetFromMap(Map<E, Boolean> map) 

Source Link

Document

Returns a set backed by the specified map.

Usage

From source file:org.apache.tajo.master.DefaultFragmentScheduleAlgorithm.java

private void addFragment(String host, Integer diskId, FragmentPair fragmentPair) {
    // update the fragment maps per host
    String normalizeHost = NetUtils.normalizeHost(host);
    Map<Integer, FragmentsPerDisk> diskFragmentMap;
    if (fragmentHostMapping.containsKey(normalizeHost)) {
        diskFragmentMap = fragmentHostMapping.get(normalizeHost);
    } else {//w w w  .j  ava 2s  .  c  om
        diskFragmentMap = new HashMap<Integer, FragmentsPerDisk>();
        fragmentHostMapping.put(normalizeHost, diskFragmentMap);
    }
    FragmentsPerDisk fragmentsPerDisk;
    if (diskFragmentMap.containsKey(diskId)) {
        fragmentsPerDisk = diskFragmentMap.get(diskId);
    } else {
        fragmentsPerDisk = new FragmentsPerDisk(diskId);
        diskFragmentMap.put(diskId, fragmentsPerDisk);
    }
    fragmentsPerDisk.addFragmentPair(fragmentPair);

    // update the fragment maps per rack
    String rack = RackResolver.resolve(normalizeHost).getNetworkLocation();
    Set<FragmentPair> fragmentPairList;
    if (rackFragmentMapping.containsKey(rack)) {
        fragmentPairList = rackFragmentMapping.get(rack);
    } else {
        fragmentPairList = Collections.newSetFromMap(new HashMap<FragmentPair, Boolean>());
        rackFragmentMapping.put(rack, fragmentPairList);
    }
    fragmentPairList.add(fragmentPair);
}

From source file:knowledgeMiner.InformationDripBootstrapping.java

/**
 * Run the experiment by starting with a seed concept/article and rippling
 * outwards to other linked concepts/articles. When max ripple is reached,
 * repeat for as many repeats as defined.
 */// w  w w . j  a  v a  2 s  . co m
private void run() {
    ResourceAccess.newInstance();
    IOManager.newInstance();
    KnowledgeMiner.readInOntologyMappings(initialRunNumber_);
    Executor executor = Executors.newFixedThreadPool(KnowledgeMiner.getNumThreads());
    pool_ = new ExecutorCompletionService<Collection<ConceptModule>>(executor);
    for (int i = 0; i < repeats_; i++) {
        KnowledgeMiner.runID_ = initialRunNumber_ + i;

        // Set up completed collections
        Set<OntologyConcept> completedConcepts = Collections
                .newSetFromMap(new ConcurrentHashMap<OntologyConcept, Boolean>());
        Set<Integer> completedArticles = Collections.newSetFromMap(new ConcurrentHashMap<Integer, Boolean>());

        // Add the initial
        Collection<ConceptModule> rippleLayer = new HashSet<>();
        rippleLayer.add(initial_);

        int maxRipples = (maxRipple_ == -1) ? Integer.MAX_VALUE : maxRipple_;
        for (int r = 0; r <= maxRipples; r++) {
            System.out.println("\nRipple " + r + ": " + rippleLayer.size() + " tasks to process.\n");
            int count = 0;

            // Simultaneously process every concept in the ripple layer
            System.out.print(count++ + ": ");
            for (ConceptModule cm : rippleLayer) {
                pool_.submit(new RippleTask(cm, r != maxRipples, completedArticles, completedConcepts));
            }

            // Wait for the tasks to finish and store results
            Collection<ConceptModule> nextLayer = new HashSet<>();
            for (int j = 0; j < rippleLayer.size(); j++) {
                try {
                    // Get the results and process them.
                    Collection<ConceptModule> result = pool_.take().get();
                    if (count <= rippleLayer.size())
                        System.out.print(count++ + ": ");
                    if (r == maxRipples)
                        continue;

                    // Add the articles/concepts to the next ripple layer
                    for (ConceptModule cm : result) {
                        if (cm.getConcept() != null && !completedConcepts.contains(cm.getConcept()))
                            nextLayer.add(cm);
                        else if (cm.getArticle() != -1 && !completedArticles.contains(cm.getArticle()))
                            nextLayer.add(cm);
                    }
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
            rippleLayer = nextLayer;

            // TODO Record details of this run

            // Clear preprocessed data
            KnowledgeMinerPreprocessor.getInstance().writeHeuristics();

            if (rippleLayer.isEmpty())
                break;
        }
    }
}

From source file:org.jenkinsci.plugins.mesos.JenkinsScheduler.java

public JenkinsScheduler(String jenkinsMaster, MesosCloud mesosCloud) {
    LOGGER.info("JenkinsScheduler instantiated with jenkins " + jenkinsMaster + " and mesos "
            + mesosCloud.getMaster());/* w  w w.  j  a  va  2  s .com*/

    this.jenkinsMaster = jenkinsMaster;
    this.mesosCloud = mesosCloud;

    requests = new LinkedList<Request>();
    results = new HashMap<TaskID, Result>();
    finishedTasks = Collections.newSetFromMap(new ConcurrentHashMap<TaskID, Boolean>());
}

From source file:org.opendaylight.groupbasedpolicy.renderer.opflex.PolicyManagerTest.java

public void testPolicyUpdated() throws Exception {
    EgKey sepgKey = mock(EgKey.class);
    EgKey depgKey = mock(EgKey.class);
    Policy mockPolicy = mock(Policy.class);

    Set<EgKey> degKeySet = Collections.newSetFromMap(new ConcurrentHashMap<EgKey, Boolean>());
    degKeySet.add(depgKey);/*from   w  w w.j  a  va2  s.  co m*/
    Set<EgKey> segKeySet = Collections.newSetFromMap(new ConcurrentHashMap<EgKey, Boolean>());
    segKeySet.add(sepgKey);

    when(mockResolver.getCurrentPolicy()).thenReturn(mockPolicyInfo);
    when(mockConnService.getOpflexAgent(anyString())).thenReturn(mockAgent);
    when(mockPolicyInfo.getPeers(sepgKey)).thenReturn(degKeySet);
    when(mockPolicyInfo.getPolicy(sepgKey, depgKey)).thenReturn(mockPolicy);
    when(mockAgent.getEndpoint()).thenReturn(dummyEndpoint);

    /*
     * Add some EPGs to enable messaging
     */
    //policyManager.dirty.get().addEndpointGroup(sepgKey);
    //policyManager.dirty.get().addEndpointGroup(depgKey);

    /*
     * Add a single agent
     */
    //policyManager.dirty.get().addAgent(TEST_AGENT_ID);

    policyManager.policyUpdated(segKeySet);

    verify(mockAgent, timeout(TEST_TIMEOUT)).getEndpoint();

}

From source file:org.jboss.aerogear.sync.server.ServerSyncEngine.java

/**
 * Connects a subscriber to an already existing document.
 *
 * @param subscriber the {@link Subscriber} to add
 * @param documentId the id of the document that the subscriber wants to subscribe.
 *//*from   w  w w  . j a  va2  s .  co m*/
public void connectSubscriber(final Subscriber<?> subscriber, final String documentId) {
    final Set<Subscriber<?>> newSub = Collections
            .newSetFromMap(new ConcurrentHashMap<Subscriber<?>, Boolean>());
    newSub.add(subscriber);
    while (true) {
        final Set<Subscriber<?>> currentClients = subscribers.get(documentId);
        if (currentClients == null) {
            final Set<Subscriber<?>> previous = subscribers.putIfAbsent(documentId, newSub);
            if (previous != null) {
                newSub.addAll(previous);
                if (subscribers.replace(documentId, previous, newSub)) {
                    break;
                }
            }
        } else {
            newSub.addAll(currentClients);
            if (subscribers.replace(documentId, currentClients, newSub)) {
                break;
            }
        }
    }
}

From source file:elasticsearch.ElasticSearchPlugin.java

/**
 * This method is called when the application starts - It will start ES instance
 * /*from   w w  w . jav a 2  s  .  c o  m*/
 * @see play.PlayPlugin#onApplicationStart()
 */
@Override
public void onApplicationStart() {
    // (re-)set caches
    mappers = new ConcurrentHashMap<Class<?>, ModelMapper<?>>();
    modelLookup = new ConcurrentHashMap<String, Class<?>>();
    indicesStarted = Collections.newSetFromMap(new ConcurrentHashMap<Class<?>, Boolean>());
    ReflectionUtil.clearCache();

    // Make sure it doesn't get started more than once
    if ((client != null) || started) {
        Logger.debug("Elastic Search Started Already!");
        return;
    }

    // Start Node Builder
    final Builder settings = ImmutableSettings.settingsBuilder();
    // settings.put("client.transport.sniff", true);

    // Import anything from play configuration that starts with elasticsearch.native.
    final Enumeration<Object> keys = Play.configuration.keys();
    while (keys.hasMoreElements()) {
        final String key = (String) keys.nextElement();
        if (key.startsWith("elasticsearch.native.")) {
            final String nativeKey = key.replaceFirst("elasticsearch.native.", "");
            Logger.error("Adding native [" + nativeKey + "," + Play.configuration.getProperty(key) + "]");
            settings.put(nativeKey, Play.configuration.getProperty(key));
        }
    }

    settings.build();

    // Check Model
    if (this.isLocalMode()) {
        Logger.info("Starting Elastic Search for Play! in Local Mode");
        final NodeBuilder nb = nodeBuilder().settings(settings).local(true).client(false).data(true);
        final Node node = nb.node();
        client = node.client();

    } else {
        Logger.info("Connecting Play! to Elastic Search in Client Mode");
        final TransportClient c = new TransportClient(settings);
        if (Play.configuration.getProperty("elasticsearch.client") == null) {
            throw new RuntimeException(
                    "Configuration required - elasticsearch.client when local model is disabled!");
        }
        final String[] hosts = getHosts().trim().split(",");
        boolean done = false;
        for (final String host : hosts) {
            final String[] parts = host.split(":");
            if (parts.length != 2) {
                throw new RuntimeException("Invalid Host: " + host);
            }
            Logger.info("Transport Client - Host: %s Port: %s", parts[0], parts[1]);
            if (Integer.valueOf(parts[1]) == 9200)
                Logger.info(
                        "Note: Port 9200 is usually used by the HTTP Transport. You might want to use 9300 instead.");
            c.addTransportAddress(new InetSocketTransportAddress(parts[0], Integer.valueOf(parts[1])));
            done = true;
        }
        if (done == false) {
            throw new RuntimeException("No Hosts Provided for Elastic Search!");
        }
        client = c;
    }

    // Configure current delivery mode
    setDeliveryMode(getDeliveryModeFromConfiguration());

    // Bind Admin
    Router.addRoute("GET", "/es-admin", "elasticsearch.ElasticSearchAdmin.index");

    // Check Client
    if (client == null) {
        throw new RuntimeException(
                "Elastic Search Client cannot be null - please check the configuration provided and the health of your Elastic Search instances.");
    }
}

From source file:org.apereo.lap.services.BaseInputHandlerService.java

public void init() {
    logger.info("INIT");
    loadedInputCollections = new ConcurrentHashMap<>();
    //noinspection unchecked
    loadedInputTypes = Collections.newSetFromMap(new ConcurrentHashMap());
}

From source file:org.geowebcache.storage.blobstore.memory.guava.GuavaCacheProvider.java

public GuavaCacheProvider(CacheConfiguration config) {
    // Initialization of the Layer set and of the Atomic parameters
    layers = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    configured = new AtomicBoolean(false);
    actualOperations = new AtomicLong(0);
    configure(config);/* ww  w  . j ava  2 s  .  com*/
}

From source file:io.reign.zk.ResilientZkClient.java

/**
 * getData() and exists() set data watches.
 * /*from   ww w.j ava  2  s  . com*/
 * @param path
 * @param watcher
 */
void trackDataWatch(String path, Watcher watcher) {
    Set<Watcher> watcherSet = dataWatchesMap.get(path);
    if (watcherSet == null) {
        Set<Watcher> newWatcherSet = Collections
                .newSetFromMap(new ConcurrentHashMap<Watcher, Boolean>(4, 0.9f, 1));
        watcherSet = dataWatchesMap.putIfAbsent(path, newWatcherSet);
        if (watcherSet == null) {
            watcherSet = newWatcherSet;
        }
    }
    watcherSet.add(watcher);
}

From source file:com.amazon.sqs.javamessaging.SQSSession.java

SQSSession(SQSConnection parentSQSConnection, AcknowledgeMode acknowledgeMode) throws JMSException {
    this(parentSQSConnection, acknowledgeMode,
            Collections.newSetFromMap(new ConcurrentHashMap<SQSMessageConsumer, Boolean>()),
            Collections.newSetFromMap(new ConcurrentHashMap<SQSMessageProducer, Boolean>()));
}