Example usage for java.util LinkedHashMap values

List of usage examples for java.util LinkedHashMap values

Introduction

In this page you can find the example usage for java.util LinkedHashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:ffx.ui.KeywordPanel.java

/**
 * <p>/*w  w w .j av a 2s  .c  o m*/
 * saveKeywords</p>
 *
 * @param keyFile a {@link java.io.File} object.
 * @param keywordHashMap a {@link java.util.LinkedHashMap} object.
 * @param comments a {@link java.lang.StringBuilder} object.
 * @return a boolean.
 */
public boolean saveKeywords(File keyFile, LinkedHashMap<String, KeywordComponent> keywordHashMap,
        StringBuilder comments) {
    synchronized (this) {
        FileWriter fw = null;
        BufferedWriter bw = null;
        try {
            fw = new FileWriter(keyFile);
            bw = new BufferedWriter(fw);
            boolean writegroup = false;
            String pgroup = null;
            // Write out keywords in groups
            for (KeywordComponent keyword : keywordHashMap.values()) {
                String group = keyword.getKeywordGroup();
                if (pgroup == null || !group.equalsIgnoreCase(pgroup)) {
                    writegroup = true;
                    pgroup = group;
                }
                String line = keyword.toString();
                if (line != null) {
                    if (writegroup == true) {
                        bw.newLine();
                        bw.write("# " + group);
                        bw.newLine();
                        writegroup = false;
                    }
                    bw.write(line);
                    bw.newLine();
                }
            }
            bw.newLine();
            String s = comments.toString();
            if (s != null && !s.trim().equals("")) {
                bw.write(s.trim());
            }
            bw.newLine();
            bw.flush();
            KeywordComponent.setKeywordModified(false);
        } catch (FileNotFoundException e) {
            logger.warning(e.toString());
            return false;
        } catch (IOException e) {
            logger.warning(e.toString());
            return false;
        } finally {
            try {
                if (bw != null) {
                    bw.close();
                }
                if (fw != null) {
                    fw.close();
                }
            } catch (Exception e) {
                logger.warning(e.toString());
            }
        }
        return true;
    }
}

From source file:com.uber.hoodie.TestHoodieClient.java

@Test
public void testUpserts() throws Exception {
    HoodieWriteConfig cfg = getConfig();
    HoodieWriteClient client = new HoodieWriteClient(jsc, cfg);
    HoodieIndex index = HoodieIndex.createIndex(cfg, jsc);
    FileSystem fs = FSUtils.getFs();

    /**/*from w  ww . j  av a2 s .co  m*/
     * Write 1 (only inserts)
     */
    String newCommitTime = "001";
    List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 200);
    JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);

    List<WriteStatus> statuses = client.upsert(writeRecords, newCommitTime).collect();
    assertNoWriteErrors(statuses);

    // check the partition metadata is written out
    assertPartitionMetadata(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS, fs);

    // verify that there is a commit
    HoodieReadClient readClient = new HoodieReadClient(jsc, basePath, sqlContext);
    assertEquals("Expecting a single commit.", readClient.listCommitsSince("000").size(), 1);
    assertEquals("Latest commit should be 001", readClient.latestCommit(), newCommitTime);
    assertEquals("Must contain 200 records", readClient.readCommit(newCommitTime).count(), records.size());
    // Should have 100 records in table (check using Index), all in locations marked at commit
    HoodieTableMetaClient metaClient = new HoodieTableMetaClient(fs, basePath);
    HoodieTable table = HoodieTable.getHoodieTable(metaClient, getConfig());

    List<HoodieRecord> taggedRecords = index.tagLocation(jsc.parallelize(records, 1), table).collect();
    checkTaggedRecords(taggedRecords, "001");

    /**
     * Write 2 (updates)
     */
    newCommitTime = "004";
    records = dataGen.generateUpdates(newCommitTime, 100);
    LinkedHashMap<HoodieKey, HoodieRecord> recordsMap = new LinkedHashMap<>();
    for (HoodieRecord rec : records) {
        if (!recordsMap.containsKey(rec.getKey())) {
            recordsMap.put(rec.getKey(), rec);
        }
    }
    List<HoodieRecord> dedupedRecords = new ArrayList<>(recordsMap.values());

    statuses = client.upsert(jsc.parallelize(records, 1), newCommitTime).collect();
    // Verify there are no errors
    assertNoWriteErrors(statuses);

    // verify there are now 2 commits
    readClient = new HoodieReadClient(jsc, basePath, sqlContext);
    assertEquals("Expecting two commits.", readClient.listCommitsSince("000").size(), 2);
    assertEquals("Latest commit should be 004", readClient.latestCommit(), newCommitTime);

    metaClient = new HoodieTableMetaClient(fs, basePath);
    table = HoodieTable.getHoodieTable(metaClient, getConfig());

    // Index should be able to locate all updates in correct locations.
    taggedRecords = index.tagLocation(jsc.parallelize(dedupedRecords, 1), table).collect();
    checkTaggedRecords(taggedRecords, "004");

    // Check the entire dataset has 100 records still
    String[] fullPartitionPaths = new String[dataGen.getPartitionPaths().length];
    for (int i = 0; i < fullPartitionPaths.length; i++) {
        fullPartitionPaths[i] = String.format("%s/%s/*", basePath, dataGen.getPartitionPaths()[i]);
    }
    assertEquals("Must contain 200 records", readClient.read(fullPartitionPaths).count(), 200);

    // Check that the incremental consumption from time 000
    assertEquals("Incremental consumption from time 002, should give all records in commit 004",
            readClient.readCommit(newCommitTime).count(), readClient.readSince("002").count());
    assertEquals("Incremental consumption from time 001, should give all records in commit 004",
            readClient.readCommit(newCommitTime).count(), readClient.readSince("001").count());
}

From source file:com.alibaba.wasp.fserver.EntityGroup.java

private boolean prepareDeleteEntity(DeleteAction action, Transaction transaction)
        throws IOException, StorageTableNotFoundException {
    long before = EnvironmentEdgeManager.currentTimeMillis();
    RowBuilder factory = RowBuilder.build();
    String entityTableName = StorageTableNameBuilder.buildEntityTableName(action.getFTableName());

    // fetch entity data
    Get get = new Get(
            factory.buildEntityRowKey(this.conf, action.getFTableName(), action.getCombinedPrimaryKey()));
    Result result = storageServices.getRowBeforeDelete(action, entityTableName, get);

    if (result == null || result.size() == 0) {
        return false;
    }/*from www. ja  v a  2  s .  c  o  m*/

    // entity delete
    Delete entityDelete = new Delete(get.getRow());
    transaction.addEntity(ProtobufUtil.toMutate(MutateType.DELETE, entityDelete, entityTableName));

    // index delete
    NavigableMap<byte[], NavigableMap<byte[], byte[]>> oldValues = result.getNoVersionMap();

    TableSchemaCacheReader metaReader = TableSchemaCacheReader.getInstance(this.conf);
    LinkedHashMap<String, Index> indexs = metaReader.getSchema(action.getFTableName()).getIndex();
    if (indexs != null) {
        for (Index index : indexs.values()) {
            Pair<byte[], String> delete = factory.buildIndexKey(index, oldValues, get.getRow());
            if (delete != null) {
                transaction.addEntity(ProtobufUtil.toMutate(MutateType.DELETE, new Delete(delete.getFirst()),
                        delete.getSecond()));
            }
        }
    }
    if (this.metricsEntityGroup != null) {
        this.metricsEntityGroup.updatePrepareDeleteEntity(EnvironmentEdgeManager.currentTimeMillis() - before);
    }
    return true;
}

From source file:org.cerberus.servlet.crud.testexecution.ReadTestCaseExecution.java

private AnswerItem findExecutionColumns(ApplicationContext appContext, HttpServletRequest request, String Tag)
        throws CerberusException, ParseException, JSONException {
    AnswerItem answer = new AnswerItem(new MessageEvent(MessageEventEnum.DATA_OPERATION_OK));
    JSONObject jsonResponse = new JSONObject();

    AnswerList testCaseExecutionList = new AnswerList();
    AnswerList testCaseExecutionListInQueue = new AnswerList();

    testCaseExecutionService = appContext.getBean(ITestCaseExecutionService.class);
    testCaseExecutionInQueueService = appContext.getBean(ITestCaseExecutionInQueueService.class);

    /**//ww  w  .  j  ava2  s. co  m
     * Get list of execution by tag, env, country, browser
     */
    testCaseExecutionList = testCaseExecutionService.readDistinctEnvCoutnryBrowserByTag(Tag);
    List<TestCaseExecution> testCaseExecutions = testCaseExecutionList.getDataList();

    /**
     * Get list of Execution in Queue by Tag
     */
    testCaseExecutionListInQueue = testCaseExecutionInQueueService.readDistinctEnvCoutnryBrowserByTag(Tag);
    List<TestCaseExecutionInQueue> testCaseExecutionsInQueue = testCaseExecutionListInQueue.getDataList();

    /**
     * Feed hash map with execution from the two list (to get only one by
     * test,testcase,country,env,browser)
     */
    LinkedHashMap<String, TestCaseExecution> testCaseExecutionsList = new LinkedHashMap();

    for (TestCaseExecution testCaseWithExecution : testCaseExecutions) {
        String key = testCaseWithExecution.getBrowser() + "_" + testCaseWithExecution.getCountry() + "_"
                + testCaseWithExecution.getEnvironment() + " " + testCaseWithExecution.getControlStatus();
        testCaseExecutionsList.put(key, testCaseWithExecution);
    }
    for (TestCaseExecutionInQueue testCaseWithExecutionInQueue : testCaseExecutionsInQueue) {
        TestCaseExecution testCaseExecution = testCaseExecutionInQueueService
                .convertToTestCaseExecution(testCaseWithExecutionInQueue);
        String key = testCaseExecution.getBrowser() + "_" + testCaseExecution.getCountry() + "_"
                + testCaseExecution.getEnvironment() + "_" + testCaseExecution.getControlStatus();
        testCaseExecutionsList.put(key, testCaseExecution);
    }

    testCaseExecutions = new ArrayList<TestCaseExecution>(testCaseExecutionsList.values());

    JSONObject statusFilter = getStatusList(request);
    JSONObject countryFilter = getCountryList(request, appContext);
    LinkedHashMap<String, JSONObject> columnMap = new LinkedHashMap<String, JSONObject>();

    for (TestCaseExecution testCaseWithExecution : testCaseExecutions) {
        String controlStatus = testCaseWithExecution.getControlStatus();
        if (statusFilter.get(controlStatus).equals("on")
                && countryFilter.get(testCaseWithExecution.getCountry()).equals("on")) {
            JSONObject column = new JSONObject();
            column.put("country", testCaseWithExecution.getCountry());
            column.put("environment", testCaseWithExecution.getEnvironment());
            column.put("browser", testCaseWithExecution.getBrowser());
            columnMap.put(testCaseWithExecution.getBrowser() + "_" + testCaseWithExecution.getCountry() + "_"
                    + testCaseWithExecution.getEnvironment(), column);
        }
    }

    jsonResponse.put("Columns", columnMap.values());
    answer.setItem(jsonResponse);
    answer.setResultMessage(new MessageEvent(MessageEventEnum.DATA_OPERATION_OK));
    return answer;
}

From source file:org.orcid.frontend.web.controllers.PublicProfileController.java

@RequestMapping(value = "/{orcid:(?:\\d{4}-){3,}\\d{3}[\\dX]}")
public ModelAndView publicPreview(HttpServletRequest request,
        @RequestParam(value = "page", defaultValue = "1") int pageNo,
        @RequestParam(value = "v", defaultValue = "0") int v,
        @RequestParam(value = "maxResults", defaultValue = "15") int maxResults,
        @PathVariable("orcid") String orcid) {

    OrcidProfile profile = orcidProfileCacheManager.retrievePublic(orcid);

    if (profile == null) {
        return new ModelAndView("error-404");
    }//from   w  ww . j  ava2s.co  m

    ModelAndView mav = null;
    mav = new ModelAndView("public_profile_v3");
    mav.addObject("isPublicProfile", true);

    boolean isProfileEmtpy = true;

    request.getSession().removeAttribute(PUBLIC_WORKS_RESULTS_ATTRIBUTE);

    mav.addObject("profile", profile);

    String countryName = getCountryName(profile, true);
    if (!StringUtil.isBlank(countryName))
        mav.addObject("countryName", countryName);

    LinkedHashMap<Long, WorkForm> minimizedWorksMap = new LinkedHashMap<>();
    LinkedHashMap<Long, Affiliation> affiliationMap = new LinkedHashMap<>();
    LinkedHashMap<Long, Funding> fundingMap = new LinkedHashMap<>();
    LinkedHashMap<Long, PeerReview> peerReviewMap = new LinkedHashMap<>();

    if (profile != null && profile.getOrcidBio() != null && profile.getOrcidBio().getBiography() != null
            && StringUtils.isNotBlank(profile.getOrcidBio().getBiography().getContent())) {
        isProfileEmtpy = false;
    }

    if (profile.isLocked()) {
        mav.addObject("locked", true);
    } else if (profile.getOrcidDeprecated() != null) {
        String primaryRecord = profile.getOrcidDeprecated().getPrimaryRecord().getOrcidIdentifier().getPath();
        mav.addObject("deprecated", true);
        mav.addObject("primaryRecord", primaryRecord);
    } else {
        minimizedWorksMap = minimizedWorksMap(orcid);
        if (minimizedWorksMap.size() > 0) {
            mav.addObject("works", minimizedWorksMap.values());
            isProfileEmtpy = false;
        } else {
            mav.addObject("worksEmpty", true);
        }

        affiliationMap = affiliationMap(orcid);
        if (affiliationMap.size() > 0) {
            mav.addObject("affilations", affiliationMap.values());
            isProfileEmtpy = false;
        } else {
            mav.addObject("affiliationsEmpty", true);
        }

        fundingMap = fundingMap(orcid);
        if (fundingMap.size() > 0)
            isProfileEmtpy = false;
        else {
            mav.addObject("fundingEmpty", true);
        }

        peerReviewMap = peerReviewMap(orcid);
        if (peerReviewMap.size() > 0) {
            mav.addObject("peerReviews", peerReviewMap.values());
            isProfileEmtpy = false;
        } else {
            mav.addObject("peerReviewsEmpty", true);
        }

    }
    ObjectMapper mapper = new ObjectMapper();

    try {
        String worksIdsJson = mapper.writeValueAsString(minimizedWorksMap.keySet());
        String affiliationIdsJson = mapper.writeValueAsString(affiliationMap.keySet());
        String fundingIdsJson = mapper.writeValueAsString(fundingMap.keySet());
        String peerReviewIdsJson = mapper.writeValueAsString(peerReviewMap.keySet());
        mav.addObject("workIdsJson", StringEscapeUtils.escapeEcmaScript(worksIdsJson));
        mav.addObject("affiliationIdsJson", StringEscapeUtils.escapeEcmaScript(affiliationIdsJson));
        mav.addObject("fundingIdsJson", StringEscapeUtils.escapeEcmaScript(fundingIdsJson));
        mav.addObject("peerReviewIdsJson", StringEscapeUtils.escapeEcmaScript(peerReviewIdsJson));
        mav.addObject("isProfileEmpty", isProfileEmtpy);

        String creditName = "";
        if (profile.getOrcidBio() != null && profile.getOrcidBio().getPersonalDetails() != null) {
            PersonalDetails personalDetails = profile.getOrcidBio().getPersonalDetails();
            if (personalDetails.getCreditName() != null
                    && !PojoUtil.isEmpty(personalDetails.getCreditName().getContent()))
                creditName = profile.getOrcidBio().getPersonalDetails().getCreditName().getContent();
            else {
                if (personalDetails.getGivenNames() != null
                        && !PojoUtil.isEmpty(personalDetails.getGivenNames().getContent()))
                    creditName += personalDetails.getGivenNames().getContent();
                if (personalDetails.getFamilyName() != null
                        && !PojoUtil.isEmpty(personalDetails.getFamilyName().getContent()))
                    creditName += " " + personalDetails.getFamilyName().getContent();
            }
        }
        if (!PojoUtil.isEmpty(creditName)) {
            // <Published Name> (<ORCID iD>) - ORCID | Connecting Research
            // and Researchers
            mav.addObject("title", getMessage("layout.public-layout.title", creditName.trim(), orcid));
        }

    } catch (JsonGenerationException e) {
        e.printStackTrace();
    } catch (JsonMappingException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }

    if (!profile.isReviewed()) {
        if (isProfileValidForIndex(profile)) {
            if (profile.isLocked() || profile.getCountTokens() == 0
                    || (!CreationMethod.WEBSITE.equals(profile.getOrcidHistory().getCreationMethod())
                            && !CreationMethod.DIRECT.equals(profile.getOrcidHistory().getCreationMethod()))) {
                mav.addObject("noIndex", true);
            }
        } else {
            mav.addObject("noIndex", true);
        }
    }

    return mav;
}

From source file:de.xwic.appkit.core.trace.impl.TraceDataManager.java

/**
 * Store statistic data./*from   w ww .j a  v a  2s .  co  m*/
 * 
 * @param sts
 * @param history
 */
protected void populateSystemTraceStatistic(ISystemTraceStatistic sts, long interval,
        List<ITraceContext> history) {

    Runtime rt = Runtime.getRuntime();
    long used = (rt.totalMemory() - rt.freeMemory()) >> 20; //show memory in MB

    sts.setMemoryUsed(used);
    sts.setHost(hostName);
    sts.setInstanceId(instanceId);

    if (!history.isEmpty()) {

        long max = System.currentTimeMillis() - (interval + 10); // make an "overlap of 10ms"
        long hisStart = history.get(0).getStartTime();
        long from = hisStart < max ? max : hisStart;
        long to = history.get(history.size() - 1).getStartTime();

        sts.setFromDate(new Date(from));
        if (to >= from) {
            sts.setToDate(new Date(to));
        } else {
            sts.setToDate(null);
        }

        int count = 0;
        long total = 0;

        int daoOps = 0;
        long daoDuration = 0;

        List<TraceStats> traceStats = new ArrayList<ISystemTraceStatistic.TraceStats>();

        for (String catName : systemTraceLogCategories) {
            TraceStats ts = new TraceStats();
            ts.setName(catName);
            traceStats.add(ts);
        }

        LinkedHashMap<Integer, TraceStats> traceIntervals = new LinkedHashMap<Integer, ISystemTraceStatistic.TraceStats>(
                traceIntervalBuckets.length);
        for (int x : traceIntervalBuckets) {
            TraceStats ts = new TraceStats();
            ts.setName("Duration-" + x);
            traceIntervals.put(x, ts);
        }

        for (ITraceContext tx : history) {
            if (tx.getStartTime() >= from) { // skip older entries
                count++;
                total += tx.getDuration();
                putInIntervalBucket(tx, traceIntervals);
                ITraceCategory daoCategory = tx.getTraceCategory(DAO.TRACE_CAT);
                if (daoCategory != null) {
                    daoOps += daoCategory.getCount();
                    daoDuration += daoCategory.getTotalDuration();
                }

                for (TraceStats ts : traceStats) {
                    countCat(ts, tx);
                }
            }
        }

        sts.setResponseCount(count);
        sts.setTotalResponseTime(total);
        sts.setAverageResponseTime(count != 0 ? total / count : 0d);

        sts.setTotalDAODuration(daoDuration);
        sts.setTotalDAOops(daoOps);

        traceStats.addAll(traceIntervals.values());
        sts.setTraceStats(traceStats);
    }
}

From source file:com.alibaba.wasp.fserver.EntityGroup.java

/**
 * Make insert action into transaction;//from  w  ww.  ja v  a 2  s  .  c  o m
 *
 * @param action
 *          insert action
 * @param transaction
 *          transaction
 * @throws java.io.IOException
 * @throws com.alibaba.wasp.storage.StorageTableNotFoundException
 */
private void prepareInsertEntity(InsertAction action, Transaction transaction)
        throws IOException, StorageTableNotFoundException {
    long before = EnvironmentEdgeManager.currentTimeMillis();
    RowBuilder builder = RowBuilder.build();
    TableSchemaCacheReader metaReader = TableSchemaCacheReader.getInstance(this.conf);
    LinkedHashMap<String, Index> indexs = metaReader.getSchema(action.getFTableName()).getIndex();
    if (LOG.isDebugEnabled()) {
        LOG.debug("prepareInsertEntity indexs:" + indexs.values());
    }

    NavigableMap<byte[], NavigableMap<byte[], byte[]>> set = new TreeMap<byte[], NavigableMap<byte[], byte[]>>(
            Bytes.BYTES_COMPARATOR);
    for (ColumnStruct col : action.getColumns()) {
        byte[] family = Bytes.toBytes(col.getFamilyName());
        NavigableMap<byte[], byte[]> cols = set.get(family);
        if (cols == null) {
            cols = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
        }
        set.put(family, cols);
        cols.put(Bytes.toBytes(col.getColumnName()), col.getValue());
    }

    String entityTableName = StorageTableNameBuilder.buildEntityTableName(action.getFTableName());
    // entity put
    Put entityPut = builder.buildPut(action);
    transaction.addEntity(ProtobufUtil.toMutate(MutateType.PUT, entityPut, entityTableName));
    storageServices.checkRowExistsBeforeInsert(action, entityTableName, entityPut);

    // index put
    if (indexs != null) {
        for (Index index : indexs.values()) {
            Pair<byte[], String> indexPut = builder.buildIndexKey(index, set, entityPut.getRow());
            if (indexPut != null) {
                Put put = new Put(indexPut.getFirst());
                put.add(FConstants.INDEX_STORING_FAMILY_BYTES, FConstants.INDEX_STORE_ROW_QUALIFIER,
                        entityPut.getRow());
                for (Entry<String, Field> entry : index.getStoring().entrySet()) {
                    ColumnStruct storing = action.getName2Column().get(entry.getKey());
                    if (storing != null) {
                        put.add(FConstants.INDEX_STORING_FAMILY_BYTES, Bytes.toBytes(entry.getKey()),
                                storing.getValue());
                    }
                }
                transaction.addEntity(ProtobufUtil.toMutate(MutateType.PUT, put, indexPut.getSecond()));
            }
        }
    }
    if (this.metricsEntityGroup != null) {
        this.metricsEntityGroup.updatePrepareInsertEntity(EnvironmentEdgeManager.currentTimeMillis() - before);
    }
}

From source file:com.evolveum.midpoint.wf.impl.processors.primary.policy.ProcessSpecifications.java

static ProcessSpecifications createFromRules(List<EvaluatedPolicyRule> rules, PrismContext prismContext)
        throws ObjectNotFoundException {
    // Step 1: plain list of approval actions -> map: process-spec -> list of related actions/rules ("collected")
    LinkedHashMap<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>> collectedSpecifications = new LinkedHashMap<>();
    for (EvaluatedPolicyRule rule : rules) {
        for (ApprovalPolicyActionType approvalAction : rule.getEnabledActions(ApprovalPolicyActionType.class)) {
            WfProcessSpecificationType spec = approvalAction.getProcessSpecification();
            collectedSpecifications.computeIfAbsent(spec, s -> new ArrayList<>())
                    .add(new ImmutablePair<>(approvalAction, rule));
        }//  ww w. ja v a 2 s .c o  m
    }
    // Step 2: resolve references
    for (WfProcessSpecificationType spec : new HashSet<>(collectedSpecifications.keySet())) { // cloned to avoid concurrent modification exception
        if (spec != null && spec.getRef() != null) {
            List<Map.Entry<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>>> matching = collectedSpecifications
                    .entrySet().stream()
                    .filter(e -> e.getKey() != null && spec.getRef().equals(e.getKey().getName()))
                    .collect(Collectors.toList());
            if (matching.isEmpty()) {
                throw new IllegalStateException("Process specification named '" + spec.getRef()
                        + "' referenced from an approval action couldn't be found");
            } else if (matching.size() > 1) {
                throw new IllegalStateException("More than one process specification named '" + spec.getRef()
                        + "' referenced from an approval action: " + matching);
            } else {
                // move all actions/rules to the referenced process specification
                List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>> referencedSpecActions = matching
                        .get(0).getValue();
                referencedSpecActions.addAll(collectedSpecifications.get(spec));
                collectedSpecifications.remove(spec);
            }
        }
    }

    Map<String, Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>> actionsMap = null;

    // Step 3: include other actions
    for (Map.Entry<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>> processSpecificationEntry : collectedSpecifications
            .entrySet()) {
        WfProcessSpecificationType spec = processSpecificationEntry.getKey();
        if (spec == null || spec.getIncludeAction().isEmpty() && spec.getIncludeActionIfPresent().isEmpty()) {
            continue;
        }
        if (actionsMap == null) {
            actionsMap = createActionsMap(collectedSpecifications.values());
        }
        for (String actionToInclude : spec.getIncludeAction()) {
            processActionToInclude(actionToInclude, actionsMap, processSpecificationEntry, true);
        }
        for (String actionToInclude : spec.getIncludeActionIfPresent()) {
            processActionToInclude(actionToInclude, actionsMap, processSpecificationEntry, false);
        }
    }

    // Step 4: sorts process specifications and wraps into ProcessSpecification objects
    ProcessSpecifications rv = new ProcessSpecifications(prismContext);
    collectedSpecifications.entrySet().stream().sorted((ps1, ps2) -> {
        WfProcessSpecificationType key1 = ps1.getKey();
        WfProcessSpecificationType key2 = ps2.getKey();
        if (key1 == null) {
            return key2 == null ? 0 : 1; // non-empty (key2) records first
        } else if (key2 == null) {
            return -1; // non-empty (key1) record first
        }
        int order1 = defaultIfNull(key1.getOrder(), Integer.MAX_VALUE);
        int order2 = defaultIfNull(key2.getOrder(), Integer.MAX_VALUE);
        return Integer.compare(order1, order2);
    }).forEach(e -> rv.specifications.add(rv.new ProcessSpecification(e)));
    return rv;
}

From source file:com.indeed.imhotep.web.ImhotepMetadataCache.java

@Scheduled(fixedRate = 60000)
public void updateDatasets() {
    Map<String, DatasetInfo> datasetToShardList = imhotepClient.getDatasetToShardList();
    List<String> datasetNames = new ArrayList<String>(datasetToShardList.keySet());
    Collections.sort(datasetNames);

    if (datasetNames.size() == 0) { // if we get no data, just keep what we already have
        log.warn("Imhotep returns no datasets");
        return;//w w w.  j a va 2 s.  c  om
    }

    // First make empty DatasetMetadata instances
    final LinkedHashMap<String, DatasetMetadata> newDatasets = Maps.newLinkedHashMap();
    for (String datasetName : datasetNames) {
        final DatasetMetadata datasetMetadata = new DatasetMetadata(datasetName);
        newDatasets.put(datasetName, datasetMetadata);
    }

    // Now pre-fill the metadata with fields from Imhotep
    for (DatasetInfo datasetInfo : datasetToShardList.values()) {
        List<String> dsIntFields = Lists.newArrayList(datasetInfo.getIntFields());
        List<String> dsStringFields = Lists.newArrayList(datasetInfo.getStringFields());
        removeDisabledFields(dsIntFields);
        removeDisabledFields(dsStringFields);
        Collections.sort(dsIntFields);
        Collections.sort(dsStringFields);

        final String datasetName = datasetInfo.getDataset();
        final DatasetMetadata datasetMetadata = newDatasets.get(datasetName);
        final LinkedHashMap<String, FieldMetadata> fieldMetadatas = datasetMetadata.getFields();

        for (String intField : dsIntFields) {
            fieldMetadatas.put(intField, new FieldMetadata(intField, FieldType.Integer));
        }
        for (String stringField : dsStringFields) {
            fieldMetadatas.put(stringField, new FieldMetadata(stringField, FieldType.String));
        }
    }

    // now load the metadata from files
    loadMetadataFromFiles(newDatasets);
    for (final DatasetMetadata datasetMetadata : newDatasets.values()) {
        addStandardAliases(datasetMetadata);

        datasetMetadata.finishLoading();
    }

    // new metadata instance is ready for use
    datasets = newDatasets;
}

From source file:com.alibaba.wasp.client.WaspAdmin.java

/**
 * List all the userspace tables. In other words, scan the FMETA table.
 *
 * @return - returns an array of Table/*www .j  a  v  a 2  s.co  m*/
 * @throws java.io.IOException
 *           if a remote or network exception occurs
 */
public Index[] listIndexes(final String tableName) throws IOException {
    FTable ftable = getTableDescriptor(Bytes.toBytes(tableName));
    LinkedHashMap<String, Index> indexMap = ftable.getIndex();
    Collection<Index> indexes = indexMap.values();
    return indexes.toArray(new Index[0]);
}