Example usage for java.util Collections emptySet

List of usage examples for java.util Collections emptySet

Introduction

In this page you can find the example usage for java.util Collections emptySet.

Prototype

@SuppressWarnings("unchecked")
public static final <T> Set<T> emptySet() 

Source Link

Document

Returns an empty set (immutable).

Usage

From source file:com.xpn.xwiki.plugin.workspacesmanager.apps.DefaultWorkspaceApplicationManager.java

/**
 * @param context the XWiki context at the moment this method is called.
 * @return a set of all available applications names that can be installed in a workspace.
 *//*from ww  w.  ja v a2 s  .  c o  m*/
private Set<String> getAllWorkspacesApps(XWikiContext context) {
    if (allWorkspacesApps == null) {
        allWorkspacesApps = new HashSet<String>();
        try {
            List<XWikiApplication> allApps = getXWikiApplicationManagerApi(context)
                    .getApplicationDocumentList();
            for (XWikiApplication app : allApps) {
                if (app.getObject(WORKSPACE_APPLICATION_CLASS) != null) {
                    allWorkspacesApps.add(app.getAppName());
                }
            }
        } catch (XWikiException e) {
            allWorkspacesApps = null;
            return Collections.emptySet();
        }
    }
    return allWorkspacesApps;
}

From source file:org.jboss.aerogear.simplepush.server.datastore.CouchDBDataStore.java

@Override
public Set<String> getChannelIds(final String uaid) {
    final ViewResult viewResult = db.queryView(query(Views.UAID.viewName(), uaid));
    final List<Row> rows = viewResult.getRows();
    if (rows.isEmpty()) {
        return Collections.emptySet();
    }//  w  w w .j a v  a 2s  .  com
    final Set<String> channelIds = new HashSet<String>(rows.size());
    for (Row row : rows) {
        channelIds.add(row.getValueAsNode().get(DOC_FIELD).get(CHID_FIELD).asText());
    }
    return channelIds;
}

From source file:com.amalto.core.storage.inmemory.InMemoryStorage.java

@Override
public Set<String> getFullTextSuggestion(String keyword, FullTextSuggestion mode, int suggestionSize) {
    return Collections.emptySet();
}

From source file:de.uniba.wiai.kinf.pw.projects.lillytab.reasoner.tbox.AssertedRBox.java

@Override
public Collection<R> getRoles() {
    final Collection<R> value = _roleTypeMap.keySet();
    if (value != null) {
        return Collections.unmodifiableCollection(value);
    } else {//from  w ww .  ja va  2  s.co  m
        return Collections.emptySet();
    }
}

From source file:com.azaptree.services.security.dao.HashedCredentialDAO.java

@Override
public Set<HashedCredential> findBySubjectId(final UUID subjectId) throws DAOException {
    Assert.notNull(subjectId, "subjectId is required");
    final String sql = "select * from t_hashed_credential where subject_id = ?";
    final Object[] args = { subjectId };
    final List<HashedCredential> rs = jdbc.query(sql, args, rowMapper);
    if (rs.isEmpty()) {
        return Collections.emptySet();
    }/*  w w  w  .ja  v  a2  s  .  c  om*/
    return ImmutableSet.<HashedCredential>builder().addAll(rs).build();
}

From source file:com.spotify.heroic.test.AbstractSuggestBackendIT.java

@Test
public void keySuggestNoIdx() throws Exception {
    final Set<String> result = getKeySuggest(keySuggestReq);

    assertEquals(Collections.emptySet(), result);
}

From source file:com.amalto.core.storage.inmemory.InMemoryJoinResults.java

private static Set<Object> _evaluateConditions(Storage storage, InMemoryJoinNode node) {
    if (node.expression != null) {
        Set<Object> expressionIds = new HashSet<Object>();
        StorageResults results = storage.fetch(node.expression); // Expects an active transaction here
        try {//from w  ww. j  ava2  s.  c om
            for (DataRecord result : results) {
                for (FieldMetadata field : result.getSetFields()) {
                    expressionIds.add(result.get(field));
                }
            }
        } finally {
            results.close();
        }
        return expressionIds;
    } else {
        Executor executor = getExecutor(node);
        Set<Object> ids = new HashSet<Object>();
        switch (node.merge) {
        case UNION:
        case NONE:
            for (InMemoryJoinNode child : node.children.keySet()) {
                ids.addAll(executor.execute(storage, child));
            }
            break;
        case INTERSECTION:
            for (InMemoryJoinNode child : node.children.keySet()) {
                if (ids.isEmpty()) {
                    ids.addAll(executor.execute(storage, child));
                } else {
                    ids.retainAll(executor.execute(storage, child));
                }
            }
            break;
        default:
            throw new NotImplementedException("No support for '" + node.merge + "'.");
        }
        //
        Set<Object> returnIds = new HashSet<Object>();
        if (node.childProperty != null) {
            if (ids.isEmpty()) {
                return Collections.emptySet();
            }
            long execTime = System.currentTimeMillis();
            {
                UserQueryBuilder qb = from(node.type).selectId(node.type)
                        .where(buildConditionFromValues(null, node.childProperty, ids));
                node.expression = qb.getSelect();
                StorageResults results = storage.fetch(qb.getSelect()); // Expects an active transaction here
                try {
                    for (DataRecord result : results) {
                        for (FieldMetadata field : result.getSetFields()) {
                            returnIds.add(result.get(field));
                        }
                    }
                } finally {
                    results.close();
                }
            }
            node.execTime = System.currentTimeMillis() - execTime;
        }
        return returnIds;
    }
}

From source file:com.googlecode.icegem.cacheutils.regioncomparator.CompareTool.java

public void execute(String[] args, boolean debugEnabled, boolean quiet) {
    AdminDistributedSystem adminDs = AdminDistributedSystemFactory
            .getDistributedSystem(AdminDistributedSystemFactory.defineDistributedSystem());
    adminDs.connect();//ww  w.  j  a  v  a2  s  .  com

    parseCommandLineArguments(args);

    List<Pool> poolList = new ArrayList<Pool>();
    if (serversOption != null && serversOption.length() > 0)
        for (String serverOption : serversOption.split(",")) {
            String serverHost = serverOption.substring(0, serverOption.indexOf("["));
            String serverPort = serverOption.substring(serverOption.indexOf("[") + 1,
                    serverOption.indexOf("]"));
            poolList.add(PoolManager.createFactory().addServer(serverHost, Integer.parseInt(serverPort))
                    .create("poolTo" + serverHost + serverPort));
        }
    if (locatorsProperties != null && !locatorsProperties.isEmpty())
        for (Object poolOption : locatorsProperties.keySet()) {
            String locator = (String) locatorsProperties.get(poolOption);
            String serverHost = locator.substring(0, locator.indexOf("["));
            String serverPort = locator.substring(locator.indexOf("[") + 1, locator.indexOf("]"));
            poolList.add(PoolManager.createFactory().addLocator(serverHost, Integer.parseInt(serverPort)) //todo: check when we have two identical locators options: exception a pool name already exist
                    .create("poolTo" + serverHost + serverPort));
        }

    //todo: insert checking that each cluster contains region and one's type is equal (Partitioned, Replicated)

    boolean partitioned = false; //todo: insert CLI usage  + throw exception if real region has another type

    List<ServerLocation> serverFromPool = new ArrayList<ServerLocation>();
    List<Pool> emptyPools = new ArrayList<Pool>(); //contains pool with no available servers
    for (Pool pool : poolList) {
        List<ServerLocation> allServers = null;
        if (!pool.getLocators().isEmpty())
            allServers = ((AutoConnectionSourceImpl) ((PoolImpl) pool).getConnectionSource()).findAllServers(); //todo: ConnectionError if locator doesn't exist
        else if (!pool.getServers().isEmpty())
            allServers = Arrays
                    .asList((((PoolImpl) pool).getConnectionSource()).findServer(Collections.emptySet()));

        if (allServers != null)
            serverFromPool.addAll(allServers);
        else {
            log.info("not found servers on locator {}", pool);
            emptyPools.add(pool);
        }
    }
    poolList.removeAll(emptyPools);

    if (serverFromPool.size() == 0) {
        log.info("no servers available");
        return;
    }

    printServerLocationDetails(serverFromPool);

    //source for comparison //todo: if this node doesn't contain region! it's problem
    Pool sourcePool;
    if (!partitioned) {
        int randomServerLocation = new Random().nextInt(serverFromPool.size());
        sourcePool = PoolManager.createFactory()
                .addServer(serverFromPool.get(randomServerLocation).getHostName(),
                        serverFromPool.get(randomServerLocation).getPort())
                .create("target");
    } else {
        sourcePool = poolList.get(0);
        poolList.remove(0);
    }

    FunctionService.registerFunction(new RegionInfoFunction());
    ResultCollector regionInfoResult = FunctionService.onServers(sourcePool).withArgs(regionName)
            .execute(new RegionInfoFunction());

    Map regionInfo = (HashMap) ((ArrayList) regionInfoResult.getResult()).get(0);
    System.out.println("region info: " + regionInfo);

    int totalNumBuckets = (Integer) regionInfo.get("totalNumBuckets");
    //log.debug("total keys' batch counts is ", totalNumBuckets);
    System.out.println("total keys' batch counts is " + totalNumBuckets);
    KeyExtractor keyExtractor = new KeyExtractor(regionName, sourcePool, partitioned, totalNumBuckets);

    Map<String, Map<String, Set>> clusterDifference = new HashMap<String, Map<String, Set>>(); //key: memeberId list: absent keys, diff values

    List<PoolResult> taskResults = new ArrayList<PoolResult>();
    List<Future<PoolResult>> collectTasks = new ArrayList<Future<PoolResult>>(poolList.size());
    ExecutorService executorService = Executors.newFixedThreadPool(poolList.size());
    while (keyExtractor.hasKeys()) {
        Set keys = keyExtractor.getNextKeysBatch();
        System.out.println("keys to check: " + keys);
        for (Pool nextPool : poolList)
            collectTasks.add(executorService.submit(new CollectorTask(keys, nextPool, regionName)));
        System.out.println("active tasks: " + collectTasks.size());
        try {
            //for (Future<ResultCollector> futureTask : collectTasks) {
            for (Future<PoolResult> futureTask : collectTasks) {
                taskResults.add(futureTask.get());
            }
        } catch (InterruptedException ie) {
            ie.printStackTrace();
        } catch (ExecutionException ee) {
            ee.printStackTrace();
        }
        collectTasks.clear();

        System.out.println("compare contents..");
        //getting source contents
        Map sourceData = new HashMap();

        //getting source map
        FutureTask<PoolResult> ft = new FutureTask<PoolResult>(new CollectorTask(keys, sourcePool, regionName));
        ft.run();
        try {
            PoolResult rc = ft.get();
            List poolResult = (List) rc.getResultCollector().getResult();
            for (Object singleResult : poolResult) {
                sourceData.putAll((Map) ((HashMap) singleResult).get("map"));
            }
        } catch (Exception e) {
            throw new RuntimeException("error getting key-hash from pool: " + sourcePool, e);
        }
        //todo: aggregate members' data from one cluster

        System.out.println("source data is: " + sourceData);
        //for (ResultCollector taskResultFromPool : taskResults) {
        for (PoolResult taskResultFromPool : taskResults) {
            List poolResult = (ArrayList) taskResultFromPool.getResultCollector().getResult();
            if (!partitioned) {
                for (Object resultFromMember : poolResult) {
                    Map result = (HashMap) resultFromMember;
                    String memberId = (String) result.get("memberId");
                    if (regionInfo.get("id").equals(result.get("memberId"))) //for replicated region
                        continue;
                    Map<String, Set> aggregationInfo = compareAndAggregate(sourceData,
                            (HashMap) result.get("map"));
                    System.out.println("result of comparing is: " + aggregationInfo);
                    if (!clusterDifference.containsKey(memberId)) {
                        aggregationInfo.put("absentKeys", new HashSet());
                        clusterDifference.put(memberId, aggregationInfo);
                    } else {
                        Map<String, Set> difference = clusterDifference.get(memberId);
                        difference.get("absentKeys").addAll((Set) result.get("absentKeys"));
                        difference.get("diffValues").addAll(aggregationInfo.get("diffValues"));
                        clusterDifference.put(memberId, difference);
                    }
                }
            } else {
                Map targetData = new HashMap();
                Set absentKeysFromPool = new HashSet();

                //aggregate data from different members with partition region
                for (Object resultFromMember : poolResult) {
                    targetData.putAll((Map) ((HashMap) resultFromMember).get("map"));
                    absentKeysFromPool.addAll((Set) ((HashMap) resultFromMember).get("absentKeys"));
                }

                Map<String, Set> aggregationInfo = compareAndAggregate(sourceData, targetData);
                System.out.println("result of comparing is: " + aggregationInfo);
                String keyForPartitionRegionType = taskResultFromPool.getPool().toString();
                if (!clusterDifference.containsKey(keyForPartitionRegionType)) {
                    clusterDifference.put(keyForPartitionRegionType, aggregationInfo);
                } else {
                    Map<String, Set> difference = clusterDifference.get(keyForPartitionRegionType);
                    difference.get("absentKeys").addAll(aggregationInfo.get("absentKeys"));
                    difference.get("diffValues").addAll(aggregationInfo.get("diffValues"));
                    clusterDifference.put(keyForPartitionRegionType, difference);
                }
            }
        }

        taskResults.clear();
    }

    System.out.println("____________________________");
    System.out.println("difference: ");
    System.out.println(clusterDifference);
    executorService.shutdown();
    adminDs.disconnect();
}

From source file:nu.yona.server.subscriptions.rest.BuddyController.java

private Set<GoalDto> getGoals(UUID buddyId) {
    return buddyService.getBuddy(buddyId).getGoals().orElse(Collections.emptySet());
}

From source file:com.flipkart.flux.resource.StateMachineResource.java

private FsmGraph getGraphData(Long fsmId) throws IOException {
    StateMachine stateMachine = stateMachinesDAO.findById(fsmId);

    if (stateMachine == null) {
        throw new WebApplicationException(Response.Status.NOT_FOUND);
    }/*from ww  w. j  a v  a  2s  . c  o  m*/
    final FsmGraph fsmGraph = new FsmGraph();

    Map<String, Event> stateMachineEvents = eventsDAO.findBySMInstanceId(fsmId).stream()
            .collect(Collectors.<Event, String, Event>toMap(Event::getName, (event -> event)));
    Set<String> allOutputEventNames = new HashSet<>();

    final RAMContext ramContext = new RAMContext(System.currentTimeMillis(), null, stateMachine);
    /* After this operation, we'll have nodes for each state and its corresponding output event along with the output event's dependencies mapped out*/
    for (State state : stateMachine.getStates()) {
        if (state.getOutputEvent() != null) {
            EventDefinition eventDefinition = objectMapper.readValue(state.getOutputEvent(),
                    EventDefinition.class);
            final Event outputEvent = stateMachineEvents.get(eventDefinition.getName());
            final FsmGraphVertex vertex = new FsmGraphVertex(state.getId(), getDisplayName(state.getName()));
            fsmGraph.addVertex(vertex, new FsmGraphEdge(getDisplayName(outputEvent.getName()),
                    outputEvent.getStatus().name(), outputEvent.getEventSource()));
            final Set<State> dependantStates = ramContext.getDependantStates(outputEvent.getName());
            dependantStates.forEach((aState) -> fsmGraph.addOutgoingEdge(vertex, aState.getId()));
            allOutputEventNames.add(outputEvent.getName()); // we collect all output event names and use them below.
        } else {
            fsmGraph.addVertex(new FsmGraphVertex(state.getId(), this.getDisplayName(state.getName())), null);
        }
    }

    /* Handle states with no dependencies, i.e the states that can be triggered as soon as we execute the state machine */
    final Set<State> initialStates = ramContext.getInitialStates(Collections.emptySet());// hackety hack.  We're fooling the context to give us only events that depend on nothing
    if (!initialStates.isEmpty()) {
        final FsmGraphEdge initEdge = new FsmGraphEdge(TRIGGER, Event.EventStatus.triggered.name(), TRIGGER);
        initialStates.forEach((state) -> {
            initEdge.addOutgoingVertex(state.getId());
        });
        fsmGraph.addInitStateEdge(initEdge);
    }
    /* Now we handle events that were not "output-ed" by any state, which means that they were given to the workflow at the time of invocation or supplied externally*/
    final HashSet<String> eventsGivenOnWorkflowTrigger = new HashSet<>(stateMachineEvents.keySet());
    eventsGivenOnWorkflowTrigger.removeAll(allOutputEventNames);
    eventsGivenOnWorkflowTrigger.forEach((workflowTriggeredEventName) -> {
        final Event correspondingEvent = stateMachineEvents.get(workflowTriggeredEventName);
        final FsmGraphEdge initEdge = new FsmGraphEdge(this.getDisplayName(workflowTriggeredEventName),
                correspondingEvent.getStatus().name(), correspondingEvent.getEventSource());
        final Set<State> dependantStates = ramContext.getDependantStates(workflowTriggeredEventName);
        dependantStates.forEach((state) -> initEdge.addOutgoingVertex(state.getId()));
        fsmGraph.addInitStateEdge(initEdge);
    });
    return fsmGraph;
}