Example usage for java.util Collections newSetFromMap

List of usage examples for java.util Collections newSetFromMap

Introduction

In this page you can find the example usage for java.util Collections newSetFromMap.

Prototype

public static <E> Set<E> newSetFromMap(Map<E, Boolean> map) 

Source Link

Document

Returns a set backed by the specified map.

Usage

From source file:org.onosproject.store.hz.SMap.java

private Set<byte[]> serializeKeySet(Set<K> keys) {
    Set<byte[]> sk = Collections.newSetFromMap(new IdentityHashMap<byte[], Boolean>(keys.size()));
    for (K key : keys) {
        sk.add(serializeKey(key));//from w w  w  . j  av a2 s  .  c o  m
    }
    return sk;
}

From source file:org.alfresco.repo.activities.feed.cleanup.FeedCleaner.java

public void beforeDeleteNodePerson(NodeRef personNodeRef) {
    String userId = (String) nodeService.getProperty(personNodeRef, ContentModel.PROP_USERNAME);
    //MNT-9104 If username contains uppercase letters the action of joining a site will not be displayed in "My activities" 
    if (!userNamesAreCaseSensitive) {
        userId = userId.toLowerCase();/*from   www.j av a  2s.com*/
    }
    Set<String> deletedUserIds = (Set<String>) AlfrescoTransactionSupport.getResource(KEY_DELETED_USER_IDS);
    if (deletedUserIds == null) {
        deletedUserIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); // Java 6
        AlfrescoTransactionSupport.bindResource(KEY_DELETED_USER_IDS, deletedUserIds);
    }

    deletedUserIds.add(userId);

    AlfrescoTransactionSupport.bindListener(deletePersonTransactionListener);
}

From source file:org.alfresco.repo.activities.feed.cleanup.FeedCleaner.java

public void beforeDeleteNodeSite(NodeRef siteNodeRef) {
    String siteId = (String) nodeService.getProperty(siteNodeRef, ContentModel.PROP_NAME);

    Set<String> deletedSiteIds = (Set<String>) AlfrescoTransactionSupport.getResource(KEY_DELETED_SITE_IDS);
    if (deletedSiteIds == null) {
        deletedSiteIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); // Java 6
        AlfrescoTransactionSupport.bindResource(KEY_DELETED_SITE_IDS, deletedSiteIds);
    }//from ww w. j  a  va  2s  .c  o  m

    deletedSiteIds.add(siteId);

    AlfrescoTransactionSupport.bindListener(deleteSiteTransactionListener);
}

From source file:net.dempsy.container.TestContainer.java

@Test
public void testInvokeOutput() throws Exception {
    outputMessages = Collections.newSetFromMap(new ConcurrentHashMap<>());
    cache = new ConcurrentHashMap<>();

    final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
    assertNotNull(adaptor.dispatcher);//  www .  java 2  s  . c  o  m
    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo"));
    adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("bar"));

    assertTrue(poll(container, c -> c.getProcessorCount() > 1));
    Thread.sleep(100);

    assertEquals("number of MP instances", 2, container.getProcessorCount());

    try (NodeManager nman = addOutputCatchStage();) {

        final TestProcessor mp = cache.get("foo");
        assertTrue(poll(mp, m -> mp.invocationCount > 0));
        Thread.sleep(100);
        assertEquals("invocation count, 1st message", 1, mp.invocationCount);

        // because the sessionFactory is shared and the appname is the same, we should be in the same app
        container.outputPass();

        assertTrue(poll(outputMessages, o -> o.size() > 1));
        Thread.sleep(100);
        assertEquals(2, outputMessages.size());

        // no new mps created in the first one
        assertEquals("did not create MP", 2, container.getProcessorCount());

        // but the invocation count should have increased since the output cycles feeds messages back to this cluster
        assertTrue(poll(mp, m -> mp.invocationCount > 1));
        Thread.sleep(100);
        assertEquals("invocation count, 1st message", 2, mp.invocationCount);

        // // order of messages is not guaranteed, so we need to aggregate keys
        final HashSet<String> messageKeys = new HashSet<String>();

        final Iterator<OutputMessage> iter = outputMessages.iterator();
        messageKeys.add(iter.next().getKey());
        messageKeys.add(iter.next().getKey());
        assertTrue("first MP sent output", messageKeys.contains("foo"));
        assertTrue("second MP sent output", messageKeys.contains("bar"));
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.java

public AsyncRequestFutureImpl(TableName tableName, List<Action<Row>> actions, long nonceGroup,
        ExecutorService pool, boolean needResults, Object[] results, Batch.Callback<CResult> callback,
        CancellableRegionServerCallable callable, int operationTimeout, int rpcTimeout,
        AsyncProcess asyncProcess) {//from  w ww .j  a v a2s . c  o m
    this.pool = pool;
    this.callback = callback;
    this.nonceGroup = nonceGroup;
    this.tableName = tableName;
    this.actionsInProgress.set(actions.size());
    if (results != null) {
        assert needResults;
        if (results.length != actions.size()) {
            throw new AssertionError("results.length");
        }
        this.results = results;
        for (int i = 0; i != this.results.length; ++i) {
            results[i] = null;
        }
    } else {
        this.results = needResults ? new Object[actions.size()] : null;
    }
    List<Integer> replicaGetIndices = null;
    boolean hasAnyReplicaGets = false;
    if (needResults) {
        // Check to see if any requests might require replica calls.
        // We expect that many requests will consist of all or no multi-replica gets; in such
        // cases we would just use a boolean (hasAnyReplicaGets). If there's a mix, we will
        // store the list of action indexes for which replica gets are possible, and set
        // hasAnyReplicaGets to true.
        boolean hasAnyNonReplicaReqs = false;
        int posInList = 0;
        for (Action<Row> action : actions) {
            boolean isReplicaGet = AsyncProcess.isReplicaGet(action.getAction());
            if (isReplicaGet) {
                hasAnyReplicaGets = true;
                if (hasAnyNonReplicaReqs) { // Mixed case
                    if (replicaGetIndices == null) {
                        replicaGetIndices = new ArrayList<Integer>(actions.size() - 1);
                    }
                    replicaGetIndices.add(posInList);
                }
            } else if (!hasAnyNonReplicaReqs) {
                // The first non-multi-replica request in the action list.
                hasAnyNonReplicaReqs = true;
                if (posInList > 0) {
                    // Add all the previous requests to the index lists. We know they are all
                    // replica-gets because this is the first non-multi-replica request in the list.
                    replicaGetIndices = new ArrayList<Integer>(actions.size() - 1);
                    for (int i = 0; i < posInList; ++i) {
                        replicaGetIndices.add(i);
                    }
                }
            }
            ++posInList;
        }
    }
    this.hasAnyReplicaGets = hasAnyReplicaGets;
    if (replicaGetIndices != null) {
        this.replicaGetIndices = new int[replicaGetIndices.size()];
        int i = 0;
        for (Integer el : replicaGetIndices) {
            this.replicaGetIndices[i++] = el;
        }
    } else {
        this.replicaGetIndices = null;
    }
    this.callsInProgress = !hasAnyReplicaGets ? null
            : Collections.newSetFromMap(new ConcurrentHashMap<CancellableRegionServerCallable, Boolean>());
    this.asyncProcess = asyncProcess;
    this.errorsByServer = createServerErrorTracker();
    this.errors = (asyncProcess.globalErrors != null) ? asyncProcess.globalErrors : new BatchErrors();
    this.operationTimeout = operationTimeout;
    this.rpcTimeout = rpcTimeout;
    this.currentCallable = callable;
    if (callable == null) {
        tracker = new RetryingTimeTracker().start();
    }
}

From source file:net.dempsy.container.TestContainer.java

@Test
public void testMtInvokeOutput() throws Exception {
    outputMessages = Collections.newSetFromMap(new ConcurrentHashMap<>());
    final int numInstances = 20;
    final int concurrency = 5;

    container.setOutputConcurrency(concurrency);

    final TestAdaptor adaptor = context.getBean(TestAdaptor.class);
    assertNotNull(adaptor.dispatcher);//from  www  .  ja v  a 2 s .  co  m
    for (int i = 0; i < numInstances; i++)
        adaptor.dispatcher.dispatchAnnotated(new ContainerTestMessage("foo" + i));

    assertTrue(poll(container, c -> c.getProcessorCount() > 19));
    Thread.sleep(100);
    assertEquals("number of MP instances", 20, container.getProcessorCount());

    try (NodeManager nman = addOutputCatchStage();) {
        container.outputPass();
        assertTrue(poll(outputMessages, o -> o.size() > 19));
        Thread.sleep(100);
        assertEquals(20, outputMessages.size());
    }
}

From source file:org.runbuddy.tomahawk.services.PlaybackService.java

private void fillStation(final StationPlaylist stationPlaylist) {
    Promise<List<Query>, Throwable, Void> promise = stationPlaylist.fillPlaylist(10);
    if (promise != null) {
        Log.d(TAG, "filling " + stationPlaylist);
        promise.done(new DoneCallback<List<Query>>() {
            @Override//from  www. ja v a 2s. c o m
            public void onDone(List<Query> result) {
                Log.d(TAG, "found " + result.size() + " candidates to fill " + stationPlaylist);
                for (Query query : result) {
                    mCorrespondingQueries.add(query);
                    if (!mStationQueries.containsKey(stationPlaylist)) {
                        Set<Query> querySet = Collections
                                .newSetFromMap(new ConcurrentHashMap<Query, Boolean>());
                        mStationQueries.put(stationPlaylist, querySet);
                    }
                    mStationQueries.get(stationPlaylist).add(query);
                    PipeLine.get().resolve(query);
                }
            }
        });
        promise.fail(new FailCallback<Throwable>() {
            @Override
            public void onFail(final Throwable result) {
                new Handler(Looper.getMainLooper()).post(new Runnable() {
                    @Override
                    public void run() {
                        Toast.makeText(TomahawkApp.getContext(), result.getMessage(), Toast.LENGTH_LONG).show();
                    }
                });
            }
        });
    }
}

From source file:org.onosproject.segmentrouting.grouphandler.DefaultGroupHandler.java

protected void addNeighborAtPort(DeviceId neighborId, PortNumber portToNeighbor) {
    // Update DeviceToPort database
    log.debug("Device {} addNeighborAtPort: neighbor {} at port {}", deviceId, neighborId, portToNeighbor);
    Set<PortNumber> ports = Collections.newSetFromMap(new ConcurrentHashMap<PortNumber, Boolean>());
    ports.add(portToNeighbor);/*from w w w .j  a  v  a 2  s  . c o  m*/
    Set<PortNumber> portnums = devicePortMap.putIfAbsent(neighborId, ports);
    if (portnums != null) {
        portnums.add(portToNeighbor);
    }

    // Update portToDevice database
    DeviceId prev = portDeviceMap.putIfAbsent(portToNeighbor, neighborId);
    if (prev != null) {
        log.warn("Device: {} port: {} has neighbor: {}. NOT updating " + "to neighbor: {}", deviceId,
                portToNeighbor, prev, neighborId);
    }
}

From source file:net.floodlightcontroller.devicemanager.internal.DeviceManagerImpl.java

@Override
public void init(FloodlightModuleContext fmc) {
    Device.deviceManager = this;
    secondaryIndexMap = null; //ColumnTable_.getTable(new ColumnProxy((int) Thread.currentThread().getId()), "SECONDARY",Serializer.LONG ,AnnotatedColumnObject.newAnnotatedColumnObject(Device.class));
    //XXX - clean up request.

    PropertiesConfiguration config = null;
    // create and load default properties
    try {//from   w  ww .j  av  a2s .  c o  m
        config = new PropertiesConfiguration("datastore.config");
    } catch (ConfigurationException e) {
        System.err.println("Could not read configuration file");
        System.exit(-1);
    }

    if (config.getBoolean("benchmark")) {
        RequestLogger.startRequestLogger(config.getString("benchmark.output"));
    }

    deviceMap = new ColumnWorkloadLogger<Long, Device>("DEVICES", RequestLogger.getRequestLogger(),
            Serializer.LONG, AnnotatedColumnObject.newAnnotatedColumnObject(Device.class));
    //        deviceMap = ColumnTable_.getTable(new ColumnProxy((int) Thread.currentThread().getId()), "DEVICES",Serializer.LONG ,AnnotatedColumnObject.newAnnotatedColumnObject(Device.class)); 

    classStateMap = new ConcurrentHashMap<String, ClassState>();

    apComparator = new AttachmentPointComparator();

    perClassIndices = Maps.newConcurrentMap();
    addIndex(true, EnumSet.of(DeviceField.IPV4));

    this.deviceListeners = new ListenerDispatcher<String, IDeviceListener>();
    this.suppressAPs = Collections.newSetFromMap(new ConcurrentHashMap<SwitchPort, Boolean>());

    this.floodlightProvider = fmc.getServiceImpl(IFloodlightProviderService.class);
    this.storageSource = fmc.getServiceImpl(IStorageSourceService.class);
    this.topology = fmc.getServiceImpl(ITopologyService.class);
    this.restApi = fmc.getServiceImpl(IRestApiService.class);
    this.threadPool = fmc.getServiceImpl(IThreadPoolService.class);
    this.flowReconcileMgr = fmc.getServiceImpl(IFlowReconcileService.class);
    this.entityClassifier = fmc.getServiceImpl(IEntityClassifierService.class);

}

From source file:org.wso2.andes.kernel.slot.SlotManagerClusterMode.java

/**
 * Recover any messages that are persisted but not notified to the slot coordinator from killed nodes.
 * <p>/*w  w w  .j a va  2s  .com*/
 * For instance if a node get killed after persisting messages but before submitting slots,
 * until another message is published to any remaining node a new slot will not be created.
 * Hence these messages will not get delivered until another message is published.
 * <p>
 * Recover mechanism here will schedule tasks for each queue so that if no message get received within the
 * given time period that queue slot manager will create a slot and capture those messages it self.
 *
 * @param deletedNodeId node id of delete node
 */
public void deletePublisherNode(final String deletedNodeId) {

    int threadPoolCount = 1; // Single thread is suffice for this task
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("RecoverSlotsThreadPool")
            .build();
    ScheduledExecutorService recoverSlotScheduler = Executors.newScheduledThreadPool(threadPoolCount,
            namedThreadFactory);

    // this is accessed from another thread therefore using a set that supports concurrency

    Set<String> concurrentSet;

    try {
        concurrentSet = Collections
                .newSetFromMap(new ConcurrentHashMap<String, Boolean>(slotAgent.getAllQueues().size()));
        concurrentSet.addAll(slotAgent.getAllQueues());
        queuesToRecover = concurrentSet;
    } catch (AndesException ex) {
        log.error("Failed to get all queue names", ex);
    }

    recoverSlotScheduler.schedule(new Runnable() {
        @Override
        public void run() {

            try {
                long lastId = SlotMessageCounter.getInstance().getCurrentNodeSafeZoneId();
                //TODO: Delete if the queue has not progressed
                for (String queueName : queuesToRecover) {
                    // Trigger a submit slot for each queue so that new slots are created
                    // for queues that have not published any messages after a node crash
                    try {
                        updateMessageID(queueName, deletedNodeId, lastId - 1, lastId, lastId);
                    } catch (AndesException ex) {
                        log.error("Failed to update message id", ex);
                    }
                }
                slotRecoveryScheduled.set(false);
                try {
                    if (log.isDebugEnabled()) {
                        log.debug("Removing " + deletedNodeId + " from safe zone calculation.");
                    }
                    slotAgent.removePublisherNode(deletedNodeId);
                } catch (AndesException e) {
                    log.error("Failed to remove publisher node ID from safe zone calculation", e);
                }

            } catch (Throwable e) {
                log.error("Error occurred while trying to run recover slot scheduler", e);
            }
        }
    }, SlotMessageCounter.getInstance().SLOT_SUBMIT_TIMEOUT, TimeUnit.MILLISECONDS);

    slotRecoveryScheduled.set(true);

}