List of usage examples for java.util Map getOrDefault
default V getOrDefault(Object key, V defaultValue)
From source file:org.codice.alliance.plugin.nitf.NitfPostIngestPlugin.java
private boolean shouldGenerateContentItems(Metacard metacard, Map<String, Serializable> properties) { Attribute type = metacard.getAttribute(Media.TYPE); return type != null && IMAGE_NITF.equals(type.getValue()) && !(boolean) properties.getOrDefault(NITF_PROCESSING_KEY, false); }
From source file:cc.kave.commons.pointsto.evaluation.ProjectTrainValidateEvaluation.java
private List<List<ProjectIdentifier>> createProjectFolds(Set<ProjectIdentifier> projects, ICoReTypeName type, List<ProjectUsageStore> usageStores) throws IOException { Map<ProjectIdentifier, Double> numberOfUsages = new HashMap<>(projects.size()); for (ProjectUsageStore store : usageStores) { Map<ProjectIdentifier, Integer> numberOfStoreUsages = store.getNumberOfUsagesPerProject(type); store.flush();/*from www . j a v a 2 s . c o m*/ for (Map.Entry<ProjectIdentifier, Integer> entry : numberOfStoreUsages.entrySet()) { double currentAverage = numberOfUsages.getOrDefault(entry.getKey(), 0.0); numberOfUsages.put(entry.getKey(), currentAverage + (1.0 / usageStores.size()) * entry.getValue()); } } List<ProjectIdentifier> sortedProjects = new ArrayList<>(projects); sortedProjects.sort(new Comparator<ProjectIdentifier>() { @Override public int compare(ProjectIdentifier o1, ProjectIdentifier o2) { double avg1 = numberOfUsages.get(o1); double avg2 = numberOfUsages.get(o2); return Double.compare(avg1, avg2); } }); return foldBuilder.createFolds(sortedProjects, numberOfUsages); }
From source file:org.apache.samza.container.grouper.task.GroupByContainerIds.java
/** * {@inheritDoc}// w ww . j ava 2s. c o m * * When the are `t` tasks and `p` processors, where t <= p, a fair task distribution should ideally assign * (t / p) tasks to each processor. In addition to guaranteeing a fair distribution, this {@link TaskNameGrouper} * implementation generates a locationId aware task assignment to processors where it makes best efforts in assigning * the tasks to processors with the same locality. * * Task assignment to processors is accomplished through the following two phases: * * 1. In the first phase, each task(T) is assigned to a processor(P) that satisfies the following constraints: * A. The processor(P) should have the same locality of the task(T). * B. Number of tasks already assigned to the processor should be less than the (number of tasks / number of processors). * * 2. Each unassigned task from phase 1 are then mapped to any processor with task count less than the * (number of tasks / number of processors). When no such processor exists, then the unassigned * task is mapped to any processor from available processors in a round robin fashion. */ @Override public Set<ContainerModel> group(Set<TaskModel> taskModels, GrouperMetadata grouperMetadata) { // Validate that the task models are not empty. Map<TaskName, LocationId> taskLocality = grouperMetadata.getTaskLocality(); Preconditions.checkArgument(!taskModels.isEmpty(), "No tasks found. Likely due to no input partitions. Can't run a job with no tasks."); // Invoke the default grouper when the processor locality does not exist. if (MapUtils.isEmpty(grouperMetadata.getProcessorLocality())) { LOG.info("ProcessorLocality is empty. Generating with the default group method."); return group(taskModels, new ArrayList<>()); } Map<String, LocationId> processorLocality = new TreeMap<>(grouperMetadata.getProcessorLocality()); /** * When there're more task models than processors then choose the lexicographically least `x` processors(where x = tasks.size()). */ if (processorLocality.size() > taskModels.size()) { processorLocality = processorLocality.entrySet().stream().limit(taskModels.size()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } Map<LocationId, List<String>> locationIdToProcessors = new HashMap<>(); Map<String, TaskGroup> processorIdToTaskGroup = new HashMap<>(); // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping. processorLocality.forEach((processorId, locationId) -> { List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>()); processorIds.add(processorId); locationIdToProcessors.put(locationId, processorIds); processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>())); }); int numTasksPerProcessor = taskModels.size() / processorLocality.size(); Set<TaskName> assignedTasks = new HashSet<>(); /** * A processor is considered under-assigned when number of tasks assigned to it is less than * (number of tasks / number of processors). * Map the tasks to the under-assigned processors with same locality. */ for (TaskModel taskModel : taskModels) { LocationId taskLocationId = taskLocality.get(taskModel.getTaskName()); if (taskLocationId != null) { List<String> processorIds = locationIdToProcessors.getOrDefault(taskLocationId, new ArrayList<>()); for (String processorId : processorIds) { TaskGroup taskGroup = processorIdToTaskGroup.get(processorId); if (taskGroup.size() < numTasksPerProcessor) { taskGroup.addTaskName(taskModel.getTaskName().getTaskName()); assignedTasks.add(taskModel.getTaskName()); break; } } } } /** * In some scenarios, the task either might not have any previous locality or might not have any * processor that maps to its previous locality. This cyclic processorId's iterator helps us in * those scenarios to assign the processorIds to those kind of tasks in a round robin fashion. */ Iterator<String> processorIdsCyclicIterator = Iterators.cycle(processorLocality.keySet()); // Order the taskGroups to choose a task group in a deterministic fashion for unassigned tasks. List<TaskGroup> taskGroups = new ArrayList<>(processorIdToTaskGroup.values()); taskGroups.sort(Comparator.comparing(TaskGroup::getContainerId)); /** * For the tasks left over from the previous stage, map them to any under-assigned processor. * When a under-assigned processor doesn't exist, then map them to any processor from the * available processors in a round robin manner. */ for (TaskModel taskModel : taskModels) { if (!assignedTasks.contains(taskModel.getTaskName())) { Optional<TaskGroup> underAssignedTaskGroup = taskGroups.stream() .filter(taskGroup -> taskGroup.size() < numTasksPerProcessor).findFirst(); if (underAssignedTaskGroup.isPresent()) { underAssignedTaskGroup.get().addTaskName(taskModel.getTaskName().getTaskName()); } else { TaskGroup taskGroup = processorIdToTaskGroup.get(processorIdsCyclicIterator.next()); taskGroup.addTaskName(taskModel.getTaskName().getTaskName()); } assignedTasks.add(taskModel.getTaskName()); } } return TaskGroup.buildContainerModels(taskModels, taskGroups); }
From source file:org.lightjason.examples.pokemon.CConfiguration.java
/** * creates the moving agent based on the configuration * * @param p_agentconfiguration subsection for agent configuration * @param p_elements element list//from ww w.ja v a2s . c o m * @param p_agentprint disables / enables agent printing * @throws IOException thrown on ASL reading error */ @SuppressWarnings("unchecked") private void createAgent(final Map<String, Object> p_agentconfiguration, final List<IAgent> p_elements, final boolean p_agentprint) throws IOException { final Map<String, IAgentGenerator<IAgent>> l_agentgenerator = new HashMap<>(); final Set<IAction> l_action = Collections .unmodifiableSet(Stream .concat(p_agentprint ? Stream.of() : Stream.of(new CEmptyPrint()), Stream.concat(org.lightjason.agentspeak.common.CCommon.actionsFromPackage(), org.lightjason.agentspeak.common.CCommon .actionsFromAgentClass(CPokemon.class))) .collect(Collectors.toSet())); p_agentconfiguration.entrySet().forEach(i -> { final Map<String, Object> l_parameter = (Map<String, Object>) i.getValue(); // read ASL item from configuration and get the path relative to configuration final String l_asl = m_configurationpath + ((String) l_parameter.getOrDefault("asl", "")).trim(); try ( // open filestream of ASL content final InputStream l_stream = new URL(l_asl).openStream();) { // get existing agent generator or create a new one based on the ASL // and push it back if generator does not exists final IAgentGenerator<IAgent> l_generator = l_agentgenerator.getOrDefault(l_asl, new CPokemonGenerator(m_environment, l_stream, l_action, IAggregation.EMPTY)); l_agentgenerator.putIfAbsent(l_asl, l_generator); // generate agents and put it to the list l_generator.generatemultiple((int) l_parameter.getOrDefault("number", 0), //EForceFactory.valueOf( ( (String) l_parameter.getOrDefault( "force", "" ) ).trim().toUpperCase() ).get(), (String) l_parameter.getOrDefault("pokemon", "") ).sequential().forEach(p_elements::add); } catch (final Exception l_exception) { System.err.println(MessageFormat.format("error on agent generation: {0}", l_exception)); } }); }
From source file:org.apache.storm.utils.Utils.java
/** * Get a map of version to worker main from the conf Config.SUPERVISOR_WORKER_VERSION_MAIN_MAP * @param conf what to read it out of//www. j a va2s . c o m * @return the map */ public static NavigableMap<SimpleVersion, String> getConfiguredWorkerMainVersions(Map<String, Object> conf) { TreeMap<SimpleVersion, String> ret = new TreeMap<>(); Map<String, String> fromConf = (Map<String, String>) conf .getOrDefault(Config.SUPERVISOR_WORKER_VERSION_MAIN_MAP, Collections.emptyMap()); for (Map.Entry<String, String> entry : fromConf.entrySet()) { ret.put(new SimpleVersion(entry.getKey()), entry.getValue()); } ret.put(VersionInfo.OUR_VERSION, "org.apache.storm.daemon.worker.Worker"); return ret; }
From source file:org.apache.storm.utils.Utils.java
/** * Get a map of version to worker log writer from the conf Config.SUPERVISOR_WORKER_VERSION_LOGWRITER_MAP * @param conf what to read it out of//from w ww.j ava2 s . com * @return the map */ public static NavigableMap<SimpleVersion, String> getConfiguredWorkerLogWriterVersions( Map<String, Object> conf) { TreeMap<SimpleVersion, String> ret = new TreeMap<>(); Map<String, String> fromConf = (Map<String, String>) conf .getOrDefault(Config.SUPERVISOR_WORKER_VERSION_LOGWRITER_MAP, Collections.emptyMap()); for (Map.Entry<String, String> entry : fromConf.entrySet()) { ret.put(new SimpleVersion(entry.getKey()), entry.getValue()); } ret.put(VersionInfo.OUR_VERSION, "org.apache.storm.LogWriter"); return ret; }
From source file:com.homeadvisor.kafdrop.config.ServiceDiscoveryConfiguration.java
public Map<String, Object> serviceDetails(Integer serverPort) { Map<String, Object> details = new LinkedHashMap<>(); Optional.ofNullable(infoEndpoint.invoke()).ifPresent( infoMap -> Optional.ofNullable((Map<String, Object>) infoMap.get("build")).ifPresent(buildInfo -> { details.put("serviceName", buildInfo.get("artifact")); details.put("serviceDescription", buildInfo.get("description")); details.put("serviceVersion", buildInfo.get("version")); }));/*ww w . ja va 2 s.co m*/ final String name = (String) details.getOrDefault("serviceName", "kafdrop"); String host = null; try { host = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { host = "<unknown>"; } details.put("id", Stream.of(name, host, UUID.randomUUID().toString()).collect(Collectors.joining("_"))); details.put("name", name); details.put("host", host); details.put("jmxPort", JmxUtils.getJmxPort(environment)); details.put("jmxHealthMBean", jmxDomain + ":name=" + healthCheckBeanName() + ",type=" + ClassUtils.getShortName(HealthCheckConfiguration.HealthCheck.class)); details.put("port", serverPort); return details; }
From source file:org.zanata.rest.search.service.SearchService.java
@GET @Path("/projects") public Response searchProjects(@QueryParam("q") @DefaultValue("") String query, @DefaultValue("1") @QueryParam("page") int page, @DefaultValue("20") @QueryParam("sizePerPage") int sizePerPage, @DefaultValue("false") @QueryParam("includeVersion") boolean includeVersion) { int offset = (validatePage(page) - 1) * validatePageSize(sizePerPage); try {/*from w w w . ja va2 s. co m*/ int totalCount; List<HProject> projects; if (StringUtils.isEmpty(query)) { totalCount = projectDAO.getFilterProjectSize(false, false, true); projects = projectDAO.getOffsetList(offset, validatePageSize(sizePerPage), false, false, true); } else { totalCount = projectDAO.getQueryProjectSize(query, false); projects = projectDAO.searchProjects(query, validatePageSize(sizePerPage), offset, false); } Map<String, List<HProjectIteration>> projectSlugToVersions = Maps.newHashMap(); if (includeVersion && !projects.isEmpty()) { List<HProjectIteration> versions = projectIterationDAO.searchByProjectsExcludeObsolete(projects); versions.forEach(ver -> { String projectSlug = ver.getProject().getSlug(); List<HProjectIteration> iterations = projectSlugToVersions.getOrDefault(projectSlug, Lists.newLinkedList()); iterations.add(ver); projectSlugToVersions.put(projectSlug, iterations); }); } List<SearchResult> results = projects.stream().map(p -> { ProjectSearchResult result = new ProjectSearchResult(); result.setId(p.getSlug()); result.setStatus(p.getStatus()); result.setTitle(p.getName()); result.setDescription(p.getDescription()); if (includeVersion) { List<HProjectIteration> iterations = projectSlugToVersions.get(p.getSlug()); result.setVersions(iterations == null ? null : iterations.stream() .map(iteration -> new ProjectVersionSearchResult(iteration.getSlug(), iteration.getStatus())) .collect(Collectors.toList())); } // TODO: include contributor count when data is available return result; }).collect(Collectors.toList()); SearchResults searchResults = new SearchResults(totalCount, results, SearchResult.SearchResultType.Project); return Response.ok(searchResults).build(); } catch (ParseException e) { return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } }
From source file:com.netflix.spinnaker.halyard.deploy.deployment.v1.DistributedDeployer.java
private <T extends Account> void reapOrcaServerGroups(AccountDeploymentDetails<T> details, SpinnakerRuntimeSettings runtimeSettings, DistributedService<Orca, T> orcaService) { Orca orca = orcaService.connectToPrimaryService(details, runtimeSettings); Map<String, ActiveExecutions> executions = orca.getActiveExecutions(); ServiceSettings orcaSettings = runtimeSettings.getServiceSettings(orcaService.getService()); RunningServiceDetails orcaDetails = orcaService.getRunningServiceDetails(details, runtimeSettings); Map<String, Integer> executionsByInstance = new HashMap<>(); executions.forEach((s, e) -> {/*from w w w . jav a 2s . c om*/ String instanceId = s.split("@")[1]; executionsByInstance.put(instanceId, e.getCount()); }); Map<Integer, Integer> executionsByServerGroupVersion = new HashMap<>(); orcaDetails.getInstances().forEach((s, is) -> { int count = is.stream().reduce(0, (c, i) -> c + executionsByInstance.getOrDefault(i.getId(), 0), (a, b) -> a + b); executionsByServerGroupVersion.put(s, count); }); // Omit the last deployed orcas from being deleted, since they are kept around for rollbacks. List<Integer> allOrcas = new ArrayList<>(executionsByServerGroupVersion.keySet()); allOrcas.sort(Integer::compareTo); int orcaCount = allOrcas.size(); if (orcaCount <= MAX_REMAINING_SERVER_GROUPS) { return; } allOrcas = allOrcas.subList(0, orcaCount - MAX_REMAINING_SERVER_GROUPS); for (Integer orcaVersion : allOrcas) { // TODO(lwander) consult clouddriver to ensure this orca isn't enabled if (executionsByServerGroupVersion.get(orcaVersion) == 0) { DaemonTaskHandler.message("Reaping old orca instance " + orcaVersion); orcaService.deleteVersion(details, orcaSettings, orcaVersion); } } }
From source file:org.codice.ddf.catalog.transformer.zip.ZipCompression.java
/** * Transforms a SourceResponse with a list of {@link Metacard}s into a {@link BinaryContent} item * with an {@link InputStream}. This transformation expects a key-value pair * "fileName"-zipFileName to be present. * * @param sourceResponse - a SourceResponse with a list of {@link Metacard}s to compress * @param arguments - a map of arguments to use for processing. This method expects "fileName" to * be set/*from ww w .jav a 2 s .com*/ * @return - a {@link BinaryContent} item with the {@link InputStream} for the Zip file * @throws CatalogTransformerException when the transformation fails */ @Override public BinaryContent transform(SourceResponse sourceResponse, Map<String, Serializable> arguments) throws CatalogTransformerException { if (sourceResponse == null || CollectionUtils.isEmpty(sourceResponse.getResults())) { throw new CatalogTransformerException( "The source response does not contain any metacards to transform."); } if (arguments.get(TRANSFORMER_ID) == null) { throw new CatalogTransformerException("Transformer ID cannot be null"); } String transformerId = arguments.getOrDefault(TRANSFORMER_ID, "").toString(); if (StringUtils.isBlank(transformerId)) { throw new CatalogTransformerException("A valid transformer ID must be provided."); } InputStream inputStream = createZip(sourceResponse, transformerId); return new BinaryContentImpl(inputStream, mimeType); }