Example usage for java.util.stream Collectors toMap

List of usage examples for java.util.stream Collectors toMap

Introduction

In this page you can find the example usage for java.util.stream Collectors toMap.

Prototype

public static <T, K, U> Collector<T, ?, Map<K, U>> toMap(Function<? super T, ? extends K> keyMapper,
        Function<? super T, ? extends U> valueMapper) 

Source Link

Document

Returns a Collector that accumulates elements into a Map whose keys and values are the result of applying the provided mapping functions to the input elements.

Usage

From source file:org.ligoj.app.plugin.prov.aws.in.ProvAwsPriceImportResource.java

/**
 * Install AWS prices from the CSV file.
 *
 * @param context//  w w  w .  j  a  va  2  s  . c o m
 *            The update context.
 * @param api
 *            The API name, only for log.
 * @param endpoint
 *            The prices end-point JSON URL.
 * @param apiClass
 *            The mapping model from JSON at region level.
 * @param mapper
 *            The mapping function from JSON at region level to JPA entity.
 */
private void installEfsPrices(final UpdateContext context) throws IOException, URISyntaxException {
    log.info("AWS EFS prices ...");
    importCatalogResource.nextStep(context.getNode().getId(), t -> t.setPhase("efs"));

    // Track the created instance to cache partial costs
    final ProvStorageType efs = stRepository
            .findAllBy(BY_NODE, context.getNode().getId(), new String[] { "name" }, "efs").get(0);
    final Map<ProvLocation, ProvStoragePrice> previous = spRepository.findAllBy("type", efs).stream()
            .collect(Collectors.toMap(ProvStoragePrice::getLocation, Function.identity()));

    int priceCounter = 0;
    // Get the remote prices stream
    try (BufferedReader reader = new BufferedReader(new InputStreamReader(
            new URI(configuration.get(CONF_URL_EFS_PRICES, EFS_PRICES)).toURL().openStream()))) {
        // Pipe to the CSV reader
        final CsvForBeanEfs csvReader = new CsvForBeanEfs(reader);

        // Build the AWS instance prices from the CSV
        AwsCsvPrice csv = null;
        do {
            // Read the next one
            csv = csvReader.read();
            if (csv == null) {
                // EOF
                break;
            }
            final ProvLocation location = getRegionByHumanName(context, csv.getLocation());
            if (location != null) {
                // Supported location
                instalEfsPrice(efs, previous, csv, location);
                priceCounter++;
            }
        } while (true);
    } finally {
        // Report
        log.info("AWS EFS finished : {} prices", priceCounter);
        nextStep(context, null, 1);
    }
}

From source file:com.epam.catgenome.manager.vcf.VcfManager.java

/**
 * Creates a feature index for {@link VcfFile}. If an index already exists, it will be deleted and created
 * from scratch/*  www .  j a v a2 s  . c om*/
 * @param vcfFileId an ID of VCF file to reindex.
 * @throws FeatureIndexException if an error occurred while writing index
 */
public VcfFile reindexVcfFile(long vcfFileId) throws FeatureIndexException {
    VcfFile vcfFile = vcfFileManager.loadVcfFile(vcfFileId);
    Reference reference = referenceGenomeManager.loadReferenceGenome(vcfFile.getReferenceId());
    Map<String, Chromosome> chromosomeMap = reference.getChromosomes().stream()
            .collect(Collectors.toMap(BaseEntity::getName, chromosome -> chromosome));
    List<GeneFile> geneFiles = reference.getGeneFile() != null
            ? Collections.singletonList(reference.getGeneFile())
            : Collections.emptyList();

    try {
        fileManager.deleteFileFeatureIndex(vcfFile);

        try (FeatureReader<VariantContext> reader = AbstractFeatureReader.getFeatureReader(vcfFile.getPath(),
                new VCFCodec(), false)) {
            VcfFilterInfo info = getFiltersInfo(reader);
            featureIndexManager.makeIndexForVcfReader(vcfFile, reader, geneFiles, chromosomeMap, info);
        }
    } catch (IOException e) {
        throw new FeatureIndexException(vcfFile, e);
    }

    return vcfFile;
}

From source file:com.haulmont.cuba.web.gui.components.WebCalendar.java

@Override
public Map<Month, String> getMonthNames() {
    List<String> months = Arrays.asList(component.getMonthNamesShort());

    return months.stream().collect(Collectors.toMap((String m) -> Month.of(months.indexOf(m) + 1), m -> m));
}

From source file:com.spankingrpgs.model.GameState.java

public Map<String, Gender> getCharacterGender() {
    return characters.entrySet().stream()
            .filter(nameCharacter -> nameCharacter.getValue() != getPlayerCharacter()).collect(
                    Collectors.toMap(Map.Entry::getKey, nameCharacter -> nameCharacter.getValue().getGender()));
}

From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.distributed.kubernetes.v2.KubernetesV2Service.java

default List<ConfigSource> stageConfig(AccountDeploymentDetails<KubernetesAccount> details,
        GenerateService.ResolvedConfiguration resolvedConfiguration) {
    Map<String, Profile> profiles = resolvedConfiguration.getProfilesForService(getService().getType());
    String stagingPath = getSpinnakerStagingPath(details.getDeploymentName());
    SpinnakerRuntimeSettings runtimeSettings = resolvedConfiguration.getRuntimeSettings();

    Map<String, Set<Profile>> profilesByDirectory = new HashMap<>();
    List<String> requiredFiles = new ArrayList<>();
    List<ConfigSource> configSources = new ArrayList<>();
    String secretNamePrefix = getServiceName() + "-files";
    String namespace = getNamespace(resolvedConfiguration.getServiceSettings(getService()));
    KubernetesAccount account = details.getAccount();

    for (SidecarService sidecarService : getSidecars(runtimeSettings)) {
        for (Profile profile : sidecarService.getSidecarProfiles(resolvedConfiguration, getService())) {
            if (profile == null) {
                throw new HalException(Problem.Severity.FATAL,
                        "Service " + sidecarService.getService().getCanonicalName()
                                + " is required but was not supplied for deployment.");
            }//from   w ww  .ja  va  2 s . c  o  m

            profiles.put(profile.getName(), profile);
            requiredFiles.addAll(profile.getRequiredFiles());
        }
    }

    for (Entry<String, Profile> entry : profiles.entrySet()) {
        Profile profile = entry.getValue();
        String outputFile = profile.getOutputFile();
        String mountPoint = Paths.get(outputFile).getParent().toString();

        Set<Profile> profilesInDirectory = profilesByDirectory.getOrDefault(mountPoint, new HashSet<>());
        profilesInDirectory.add(profile);

        requiredFiles.addAll(profile.getRequiredFiles());
        profilesByDirectory.put(mountPoint, profilesInDirectory);
    }

    for (Entry<String, Set<Profile>> entry : profilesByDirectory.entrySet()) {
        Set<Profile> profilesInDirectory = entry.getValue();
        String mountPath = entry.getKey();
        List<SecretMountPair> files = profilesInDirectory.stream().map(p -> {
            File input = new File(p.getStagedFile(stagingPath));
            File output = new File(p.getOutputFile());
            return new SecretMountPair(input, output);
        }).collect(Collectors.toList());

        Map<String, String> env = profilesInDirectory.stream().map(Profile::getEnv).map(Map::entrySet)
                .flatMap(Collection::stream).collect(Collectors.toMap(Entry::getKey, Entry::getValue));

        String name = KubernetesV2Utils.createSecret(account, namespace, getService().getCanonicalName(),
                secretNamePrefix, files);
        configSources.add(new ConfigSource().setId(name).setMountPath(mountPath).setEnv(env));
    }

    if (!requiredFiles.isEmpty()) {
        List<SecretMountPair> files = requiredFiles.stream().map(File::new).map(SecretMountPair::new)
                .collect(Collectors.toList());

        String name = KubernetesV2Utils.createSecret(account, namespace, getService().getCanonicalName(),
                secretNamePrefix, files);
        configSources.add(new ConfigSource().setId(name).setMountPath(files.get(0).getContents().getParent()));
    }

    return configSources;
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.TestElasticsearchIndexUtils.java

@Test
public void test_columnarMapping_standalone() throws JsonProcessingException, IOException {
    final String both = Resources.toString(
            Resources/*w w w.j  a va2  s. c om*/
                    .getResource("com/ikanow/aleph2/search_service/elasticsearch/utils/full_mapping_test.json"),
            Charsets.UTF_8);
    final JsonNode both_json = _mapper.readTree(both);

    final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> field_lookups = ElasticsearchIndexUtils
            .parseDefaultMapping(both_json, Optional.empty(), Optional.empty(), Optional.empty(),
                    _config.search_technology_override(), _mapper);

    //DEBUG
    //      System.out.println("(Field lookups = " + field_lookups + ")");
    //      System.out.println("(Analyzed default = " + _config.columnar_technology_override().default_field_data_analyzed() + ")");
    //      System.out.println("(NotAnalyzed default = " + _config.columnar_technology_override().default_field_data_notanalyzed() + ")");

    // 1) Mappings - field name specified (include)
    {
        final Stream<String> test_stream1 = Stream.of("@version", "field_not_present", "@timestamp");

        final Stream<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>> test_stream_result_1 = ElasticsearchIndexUtils
                .createFieldIncludeLookups(test_stream1, fn -> Either.left(fn), field_lookups,
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_notanalyzed(),
                                JsonNode.class),
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_analyzed(),
                                JsonNode.class),
                        false, _config.search_technology_override(), Collections.emptyMap(), _mapper,
                        "_default_");

        final Map<Either<String, Tuple2<String, String>>, JsonNode> test_map_result_1 = test_stream_result_1
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));
        final String test_map_expected_1 = "{Left(@timestamp)={'type':'date','fielddata':{}}, Right((field_not_present,*))={'mapping':{'index':'not_analyzed','type':'{dynamic_type}','fielddata':{'format':'doc_values'}},'path_match':'field_not_present','match_mapping_type':'*'}, Left(@version)={'type':'string','index':'analyzed','fielddata':{'format':'paged_bytes'}}}";
        assertEquals(test_map_expected_1, strip(test_map_result_1.toString()));

        //DEBUG
        //System.out.println("(Field column lookups = " + test_map_result_1 + ")");
    }

    // 2) Mappings - field pattern specified (include)
    {
        final Stream<String> test_stream1 = Stream.of("*", "test*");

        final Stream<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>> test_stream_result_1 = ElasticsearchIndexUtils
                .createFieldIncludeLookups(test_stream1, fn -> Either.right(Tuples._2T(fn, "*")), field_lookups,
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_notanalyzed(),
                                JsonNode.class),
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_analyzed(),
                                JsonNode.class),
                        true, _config.search_technology_override(), Collections.emptyMap(), _mapper,
                        "_default_");

        final Map<Either<String, Tuple2<String, String>>, JsonNode> test_map_result_1 = test_stream_result_1
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));

        final String test_map_expected_1 = "{Right((test*,*))={'mapping':{'type':'string','index':'analyzed','omit_norms':true,'fields':{'raw':{'type':'string','index':'not_analyzed','ignore_above':256,'fielddata':{'format':'doc_values'}}},'fielddata':{'format':'paged_bytes'}},'path_match':'test*','match_mapping_type':'*'}, Right((*,*))={'mapping':{'index':'not_analyzed','type':'{dynamic_type}','fielddata':{'format':'doc_values'}},'path_match':'*','match_mapping_type':'*'}}";
        assertEquals(test_map_expected_1, strip(test_map_result_1.toString()));

        //DEBUG
        //System.out.println("(Field column lookups = " + test_map_result_1 + ")");         
    }

    // 3) Mappings - field name specified (exclude)
    {
        final Stream<String> test_stream1 = Stream.of("@version", "field_not_present", "@timestamp");

        final Stream<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>> test_stream_result_1 = ElasticsearchIndexUtils
                .createFieldExcludeLookups(test_stream1, fn -> Either.left(fn), field_lookups,
                        _config.search_technology_override(), Collections.emptyMap(), _mapper, "_default_");

        final Map<Either<String, Tuple2<String, String>>, JsonNode> test_map_result_1 = test_stream_result_1
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));
        final String test_map_expected_1 = "{Left(@timestamp)={'type':'date','fielddata':{'format':'disabled'}}, Right((field_not_present,*))={'mapping':{'index':'not_analyzed','type':'{dynamic_type}','fielddata':{'format':'disabled'}},'path_match':'field_not_present','match_mapping_type':'*'}, Left(@version)={'type':'string','index':'analyzed','fielddata':{'format':'disabled'}}}";
        assertEquals(test_map_expected_1, strip(test_map_result_1.toString()));

        //DEBUG
        //System.out.println("(Field column lookups = " + test_map_result_1 + ")");
    }

    // 4) Mappings - field type specified (exclude)
    {
        final Stream<String> test_stream1 = Stream.of("*", "test*");

        final Stream<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>> test_stream_result_1 = ElasticsearchIndexUtils
                .createFieldExcludeLookups(test_stream1, fn -> Either.right(Tuples._2T(fn, "*")), field_lookups,
                        _config.search_technology_override(), Collections.emptyMap(), _mapper, "_default_");

        final Map<Either<String, Tuple2<String, String>>, JsonNode> test_map_result_1 = test_stream_result_1
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));

        final String test_map_expected_1 = "{Right((test*,*))={'mapping':{'type':'string','index':'analyzed','omit_norms':true,'fields':{'raw':{'type':'string','index':'not_analyzed','ignore_above':256,'fielddata':{'format':'disabled'}}},'fielddata':{'format':'disabled'}},'path_match':'test*','match_mapping_type':'*'}, Right((*,*))={'mapping':{'index':'not_analyzed','type':'{dynamic_type}','fielddata':{'format':'disabled'}},'path_match':'*','match_mapping_type':'*'}}";
        assertEquals(test_map_expected_1, strip(test_map_result_1.toString()));

        //DEBUG
        //System.out.println("(Field column lookups = " + test_map_result_1 + ")");         

    }

    // 5) Check with type specific fielddata formats
    {
        assertEquals(2, _config.columnar_technology_override().enabled_field_data_analyzed().size());
        assertEquals(2, _config.columnar_technology_override().enabled_field_data_notanalyzed().size());
        assertTrue("Did override settings", _config.columnar_technology_override().enabled_field_data_analyzed()
                .containsKey("test_type_123"));
        assertTrue("Did override settings", _config.columnar_technology_override()
                .enabled_field_data_notanalyzed().containsKey("test_type_123"));

        final Stream<String> test_stream1 = Stream.of("test_type_123");

        final Stream<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>> test_stream_result_1 = ElasticsearchIndexUtils
                .createFieldIncludeLookups(test_stream1, fn -> Either.left(fn), field_lookups,
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_notanalyzed(),
                                JsonNode.class),
                        _mapper.convertValue(
                                _config.columnar_technology_override().enabled_field_data_analyzed(),
                                JsonNode.class),
                        false, _config.search_technology_override(), Collections.emptyMap(), _mapper,
                        "test_type_123");

        final Map<Either<String, Tuple2<String, String>>, JsonNode> test_map_result_1 = test_stream_result_1
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));

        final String test_map_expected_1 = "{Right((test_type_123,*))={'mapping':{'index':'not_analyzed','type':'{dynamic_type}','fielddata':{'format':'test2'}},'path_match':'test_type_123','match_mapping_type':'*'}}";
        assertEquals(test_map_expected_1, strip(test_map_result_1.toString()));

    }
}

From source file:org.obiba.mica.micaConfig.rest.MicaConfigResource.java

@GET
@Path("/languages")
@Timed// w  w  w. j a va 2 s  . co m
@RequiresAuthentication
public Map<String, String> getAvailableLanguages(@QueryParam("locale") @DefaultValue("en") String languageTag) {
    Locale locale = Locale.forLanguageTag(languageTag);
    return Arrays.stream(Locale.getISOLanguages())
            .collect(Collectors.toMap(lang -> lang, lang -> new Locale(lang).getDisplayLanguage(locale)));
}

From source file:eu.itesla_project.modules.validation.OfflineValidationTool.java

@Override
public void run(CommandLine line) throws Exception {
    OfflineConfig config = OfflineConfig.load();
    String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name")
            : OfflineConfig.DEFAULT_RULES_DB_NAME;
    String workflowId = line.getOptionValue("workflow");
    Path outputDir = Paths.get(line.getOptionValue("output-dir"));
    double purityThreshold = line.hasOption("purity-threshold")
            ? Double.parseDouble(line.getOptionValue("purity-threshold"))
            : DEFAULT_PURITY_THRESHOLD;/*from w w w . ja  va  2  s  .  c  o m*/
    Set<Country> countries = Arrays.stream(line.getOptionValue("base-case-countries").split(","))
            .map(Country::valueOf).collect(Collectors.toSet());
    Interval histoInterval = Interval.parse(line.getOptionValue("history-interval"));
    boolean mergeOptimized = line.hasOption("merge-optimized");
    CaseType caseType = CaseType.valueOf(line.getOptionValue("case-type"));

    CaseRepositoryFactory caseRepositoryFactory = config.getCaseRepositoryFactoryClass().newInstance();
    RulesDbClientFactory rulesDbClientFactory = config.getRulesDbClientFactoryClass().newInstance();
    ContingenciesAndActionsDatabaseClient contingencyDb = config.getContingencyDbClientFactoryClass()
            .newInstance().create();
    SimulatorFactory simulatorFactory = config.getSimulatorFactoryClass().newInstance();
    LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance();
    MergeOptimizerFactory mergeOptimizerFactory = config.getMergeOptimizerFactoryClass().newInstance();

    SimulationParameters simulationParameters = SimulationParameters.load();

    try (ComputationManager computationManager = new LocalComputationManager();
            RulesDbClient rulesDb = rulesDbClientFactory.create(rulesDbName);
            CsvMetricsDb metricsDb = new CsvMetricsDb(outputDir, true, "metrics")) {

        CaseRepository caseRepository = caseRepositoryFactory.create(computationManager);

        Queue<DateTime> dates = Queues.synchronizedDeque(
                new ArrayDeque<>(caseRepository.dataAvailable(caseType, countries, histoInterval)));

        Map<String, Map<RuleId, ValidationStatus>> statusPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());
        Map<String, Map<RuleId, Map<HistoDbAttributeId, Object>>> valuesPerRulePerCase = Collections
                .synchronizedMap(new TreeMap<>());

        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        try {
            List<Future<?>> tasks = new ArrayList<>(cores);
            for (int i = 0; i < cores; i++) {
                tasks.add(executorService.submit((Runnable) () -> {
                    while (dates.size() > 0) {
                        DateTime date = dates.poll();

                        try {
                            Network network = MergeUtil.merge(caseRepository, date, caseType, countries,
                                    loadFlowFactory, 0, mergeOptimizerFactory, computationManager,
                                    mergeOptimized);

                            System.out.println("case " + network.getId() + " loaded");

                            System.out.println("running simulation on " + network.getId() + "...");

                            network.getStateManager().allowStateMultiThreadAccess(true);
                            String baseStateId = network.getId();
                            network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId);
                            network.getStateManager().setWorkingState(baseStateId);

                            Map<RuleId, ValidationStatus> statusPerRule = new HashMap<>();
                            Map<RuleId, Map<HistoDbAttributeId, Object>> valuesPerRule = new HashMap<>();

                            LoadFlow loadFlow = loadFlowFactory.create(network, computationManager, 0);
                            LoadFlowResult loadFlowResult = loadFlow.run();

                            System.err.println("load flow terminated (" + loadFlowResult.isOk() + ") on "
                                    + network.getId());

                            if (loadFlowResult.isOk()) {
                                Stabilization stabilization = simulatorFactory.createStabilization(network,
                                        computationManager, 0);
                                ImpactAnalysis impactAnalysis = simulatorFactory.createImpactAnalysis(network,
                                        computationManager, 0, contingencyDb);
                                Map<String, Object> context = new HashMap<>();
                                stabilization.init(simulationParameters, context);
                                impactAnalysis.init(simulationParameters, context);
                                StabilizationResult stabilizationResult = stabilization.run();

                                System.err.println("stabilization terminated ("
                                        + stabilizationResult.getStatus() + ") on " + network.getId());

                                metricsDb.store(workflowId, network.getId(), "STABILIZATION",
                                        stabilizationResult.getMetrics());

                                if (stabilizationResult.getStatus() == StabilizationStatus.COMPLETED) {
                                    ImpactAnalysisResult impactAnalysisResult = impactAnalysis
                                            .run(stabilizationResult.getState());

                                    System.err.println("impact analysis terminated on " + network.getId());

                                    metricsDb.store(workflowId, network.getId(), "IMPACT_ANALYSIS",
                                            impactAnalysisResult.getMetrics());

                                    System.out.println("checking rules on " + network.getId() + "...");

                                    for (SecurityIndex securityIndex : impactAnalysisResult
                                            .getSecurityIndexes()) {
                                        for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                            statusPerRule.put(new RuleId(attributeSet, securityIndex.getId()),
                                                    new ValidationStatus(null, securityIndex.isOk()));
                                        }
                                    }
                                }
                            }

                            Map<HistoDbAttributeId, Object> values = IIDM2DB
                                    .extractCimValues(network, new IIDM2DB.Config(null, false))
                                    .getSingleValueMap();
                            for (RuleAttributeSet attributeSet : RuleAttributeSet.values()) {
                                for (Contingency contingency : contingencyDb.getContingencies(network)) {
                                    List<SecurityRule> securityRules = rulesDb.getRules(workflowId,
                                            attributeSet, contingency.getId(), null);
                                    for (SecurityRule securityRule : securityRules) {
                                        SecurityRuleExpression securityRuleExpression = securityRule
                                                .toExpression(purityThreshold);
                                        SecurityRuleCheckReport checkReport = securityRuleExpression
                                                .check(values);

                                        valuesPerRule.put(securityRule.getId(),
                                                ExpressionAttributeList
                                                        .list(securityRuleExpression.getCondition()).stream()
                                                        .collect(Collectors.toMap(attributeId -> attributeId,
                                                                new Function<HistoDbAttributeId, Object>() {
                                                                    @Override
                                                                    public Object apply(
                                                                            HistoDbAttributeId attributeId) {
                                                                        Object value = values.get(attributeId);
                                                                        return value != null ? value
                                                                                : Float.NaN;
                                                                    }
                                                                })));

                                        ValidationStatus status = statusPerRule.get(securityRule.getId());
                                        if (status == null) {
                                            status = new ValidationStatus(null, null);
                                            statusPerRule.put(securityRule.getId(), status);
                                        }
                                        if (checkReport.getMissingAttributes().isEmpty()) {
                                            status.setRuleOk(checkReport.isSafe());
                                        }
                                    }
                                }
                            }

                            statusPerRulePerCase.put(network.getId(), statusPerRule);
                            valuesPerRulePerCase.put(network.getId(), valuesPerRule);
                        } catch (Exception e) {
                            LOGGER.error(e.toString(), e);
                        }
                    }
                }));
            }
            for (Future<?> task : tasks) {
                task.get();
            }
        } finally {
            executorService.shutdown();
            executorService.awaitTermination(1, TimeUnit.MINUTES);
        }

        writeCsv(statusPerRulePerCase, valuesPerRulePerCase, outputDir);
    }
}

From source file:com.uber.hoodie.table.HoodieCopyOnWriteTable.java

private List<HoodieCleanStat> cleanPartitionPaths(List<String> partitionsToClean, JavaSparkContext jsc) {
    int cleanerParallelism = Math.min(partitionsToClean.size(), config.getCleanerParallelism());
    logger.info("Using cleanerParallelism: " + cleanerParallelism);
    List<Tuple2<String, PartitionCleanStat>> partitionCleanStats = jsc
            .parallelize(partitionsToClean, cleanerParallelism)
            .flatMapToPair(getFilesToDeleteFunc(this, config)).repartition(cleanerParallelism) // repartition to remove skews
            .mapPartitionsToPair(deleteFilesFunc(this)).reduceByKey(
                    // merge partition level clean stats below
                    (Function2<PartitionCleanStat, PartitionCleanStat, PartitionCleanStat>) (e1, e2) -> e1
                            .merge(e2))/*from  w ww.  j a  v  a  2  s. c o m*/
            .collect();

    Map<String, PartitionCleanStat> partitionCleanStatsMap = partitionCleanStats.stream()
            .collect(Collectors.toMap(e -> e._1(), e -> e._2()));

    HoodieCleanHelper cleaner = new HoodieCleanHelper(this, config);
    // Return PartitionCleanStat for each partition passed.
    return partitionsToClean.stream().map(partitionPath -> {
        PartitionCleanStat partitionCleanStat = (partitionCleanStatsMap.containsKey(partitionPath))
                ? partitionCleanStatsMap.get(partitionPath)
                : new PartitionCleanStat(partitionPath);
        return HoodieCleanStat.newBuilder().withPolicy(config.getCleanerPolicy())
                .withPartitionPath(partitionPath)
                .withEarliestCommitRetained(cleaner.getEarliestCommitToRetain())
                .withDeletePathPattern(partitionCleanStat.deletePathPatterns)
                .withSuccessfulDeletes(partitionCleanStat.successDeleteFiles)
                .withFailedDeletes(partitionCleanStat.failedDeleteFiles).build();
    }).collect(Collectors.toList());
}

From source file:org.ligoj.app.plugin.vm.azure.VmAzurePluginResource.java

/**
 * Return the available Azure sizes./*from  w  w  w . ja v a 2 s  .c o m*/
 * 
 * @param azSub
 *            The related Azure subscription identifier. Seem to duplicate the one inside the given parameters, but
 *            required for the cache key.
 * @param location
 *            The target location, required by Azure web service
 * @param parameters
 *            The credentials parameters
 */
@CacheResult(cacheName = "azure-sizes")
public Map<String, VmSize> getInstanceSizes(@CacheKey final String azSub, @CacheKey final String location,
        final Map<String, String> parameters) throws IOException {
    final String jsonSizes = getAzureResource(parameters,
            SIZES_URL.replace("{subscriptionId}", azSub).replace("{location}", location));
    return objectMapper.readValue(StringUtils.defaultString(jsonSizes, "{\"value\":[]}"), VmSizes.class)
            .getValue().stream().collect(Collectors.toMap(VmSize::getName, Function.identity()));
}