List of usage examples for org.apache.commons.lang3.tuple ImmutablePair ImmutablePair
public ImmutablePair(final L left, final R right)
From source file:hu.ppke.itk.nlpg.purepos.decoder.AbstractDecoder.java
private Map<NGram<Integer>, Map<Integer, Pair<Double, Double>>> getNextForSeenToken( final Set<NGram<Integer>> prevTagsSet, IProbabilityModel<Integer, String> wordProbModel, String wordForm, boolean isSpec, Collection<Integer> tags, Collection<Integer> anals) { Collection<Integer> tagset = filterTagsWithMorphology(tags, anals, wordProbModel.getContextMapper()); Map<NGram<Integer>, Map<Integer, Pair<Double, Double>>> ret = new HashMap<NGram<Integer>, Map<Integer, Pair<Double, Double>>>(); for (NGram<Integer> prevTags : prevTagsSet) { Map<Integer, Pair<Double, Double>> tagProbs = new HashMap<Integer, Pair<Double, Double>>(); for (Integer tag : tagset) { Double tagProb = model.getTagTransitionModel().getLogProb(prevTags.toList(), tag); List<Integer> actTags = new ArrayList<Integer>(prevTags.toList()); actTags.add(tag);/*from ww w . j av a 2 s . c om*/ // // Double emissionProb = wordProbModel.getLogProb(actTags, wordForm); if (tagProb == Double.NEGATIVE_INFINITY) tagProb = UNKOWN_TAG_TRANSITION; if (emissionProb == Double.NEGATIVE_INFINITY) emissionProb = UNKNOWN_TAG_WEIGHT; tagProbs.put(tag, new ImmutablePair<Double, Double>(tagProb, emissionProb)); } ret.put(prevTags, tagProbs); } return ret; }
From source file:ee.ria.xroad.common.hashchain.HashChainVerifier.java
/** Retrieve hash step based on the URI. */ private Pair<HashStepType, HashChainType> fetchHashStep(String uri, HashChainType currentChain) throws Exception { // Find the fragment separator. int hashIndex = uri.indexOf('#'); if (hashIndex < 0) { throw new CodedException(X_MALFORMED_HASH_CHAIN, "Invalid hash step URI: %s", uri); }//from w w w. j a va2 s .c o m String baseUri = uri.substring(0, hashIndex); String fragment = uri.substring(hashIndex + 1); if (fragment.isEmpty()) { // Hash step must be indicated by a fragment in a hash chain. throw new CodedException(X_MALFORMED_HASH_CHAIN, "Invalid hash step URI: %s", uri); } HashChainType hashChain; if (baseUri.isEmpty()) { hashChain = currentChain; } else { hashChain = getHashChain(baseUri); } // Found the hash chain. Look for a step with given ID. for (HashStepType step : hashChain.getHashStep()) { if (fragment.equals(step.getId())) { return new ImmutablePair<>(step, hashChain); } } // No hash step with given fragment ID found. throw new CodedException(X_MALFORMED_HASH_CHAIN, "Invalid hash step URI: %s", uri); }
From source file:com.pinterest.terrapin.TerrapinUtil.java
static public List<Pair<Path, Long>> getS3FileList(AWSCredentials credentials, String s3Bucket, String s3KeyPrefix) {// ww w. j av a2 s .c o m List<Pair<Path, Long>> fileSizePairList = Lists.newArrayListWithCapacity(Constants.MAX_ALLOWED_SHARDS); AmazonS3Client s3Client = new AmazonS3Client(credentials); // List files and build the path using the s3n: prefix. // Note that keys > marker are retrieved where the > is by lexicographic order. String prefix = s3KeyPrefix; String marker = prefix; while (true) { boolean reachedEnd = false; ObjectListing listing = s3Client .listObjects(new ListObjectsRequest().withBucketName(s3Bucket).withMarker(marker)); List<S3ObjectSummary> summaries = listing.getObjectSummaries(); if (summaries.isEmpty()) { break; } for (S3ObjectSummary summary : summaries) { if (summary.getKey().startsWith(prefix)) { fileSizePairList.add(new ImmutablePair(new Path("s3n", s3Bucket, "/" + summary.getKey()), summary.getSize())); if (fileSizePairList.size() > Constants.MAX_ALLOWED_SHARDS) { throw new RuntimeException("Too many files " + fileSizePairList.size()); } } else { // We found a key which does not match the prefix, stop. reachedEnd = true; break; } } if (reachedEnd) { break; } marker = summaries.get(summaries.size() - 1).getKey(); } return fileSizePairList; }
From source file:android.databinding.tool.util.XmlEditor.java
private static ImmutablePair<Position, Position> findTerminalPositions(XMLParser.ElementContext node, ArrayList<String> lines) { Position endPosition = toEndPosition(node.getStop()); Position startPosition = toPosition(node.getStop()); int index;/*w w w . j ava2 s .c om*/ do { index = lines.get(startPosition.line).lastIndexOf("</"); startPosition.line--; } while (index < 0); startPosition.line++; startPosition.charIndex = index; //noinspection unchecked return new ImmutablePair<>(startPosition, endPosition); }
From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.distributed.kubernetes.v1.KubernetesV1DistributedService.java
default List<ConfigSource> stageProfiles(AccountDeploymentDetails<KubernetesAccount> details, GenerateService.ResolvedConfiguration resolvedConfiguration) { SpinnakerService thisService = getService(); ServiceSettings thisServiceSettings = resolvedConfiguration.getServiceSettings(thisService); SpinnakerRuntimeSettings runtimeSettings = resolvedConfiguration.getRuntimeSettings(); Integer version = getRunningServiceDetails(details, runtimeSettings).getLatestEnabledVersion(); if (version == null) { version = 0;// w w w .jav a 2 s. c o m } else { version++; } String namespace = getNamespace(thisServiceSettings); KubernetesV1ProviderUtils.createNamespace(details, namespace); String name = getServiceName(); Map<String, String> env = new HashMap<>(); List<ConfigSource> configSources = new ArrayList<>(); Map<String, Profile> serviceProfiles = resolvedConfiguration.getProfilesForService(thisService.getType()); Set<String> requiredFiles = new HashSet<>(); for (SidecarService sidecarService : getSidecars(runtimeSettings)) { for (Profile profile : sidecarService.getSidecarProfiles(resolvedConfiguration, thisService)) { if (profile == null) { throw new HalException(Problem.Severity.FATAL, "Service " + sidecarService.getService().getCanonicalName() + " is required but was not supplied for deployment."); } serviceProfiles.put(profile.getName(), profile); requiredFiles.addAll(profile.getRequiredFiles()); } } Map<String, Set<Profile>> collapseByDirectory = new HashMap<>(); for (Map.Entry<String, Profile> entry : serviceProfiles.entrySet()) { Profile profile = entry.getValue(); String mountPoint = Paths.get(profile.getOutputFile()).getParent().toString(); Set<Profile> profiles = collapseByDirectory.getOrDefault(mountPoint, new HashSet<>()); profiles.add(profile); requiredFiles.addAll(profile.getRequiredFiles()); collapseByDirectory.put(mountPoint, profiles); } String stagingPath = getSpinnakerStagingPath(details.getDeploymentName()); if (!requiredFiles.isEmpty()) { String secretName = KubernetesV1ProviderUtils.componentDependencies(name, version); String mountPoint = null; for (String file : requiredFiles) { String nextMountPoint = Paths.get(file).getParent().toString(); if (mountPoint == null) { mountPoint = nextMountPoint; } assert (mountPoint.equals(nextMountPoint)); } Set<Pair<File, String>> pairs = requiredFiles.stream().map(f -> { return new ImmutablePair<>(new File(f), new File(f).getName()); }).collect(Collectors.toSet()); KubernetesV1ProviderUtils.upsertSecret(details, pairs, secretName, namespace); configSources.add(new ConfigSource().setId(secretName).setMountPath(mountPoint)); } int ind = 0; for (Map.Entry<String, Set<Profile>> entry : collapseByDirectory.entrySet()) { env.clear(); String mountPoint = entry.getKey(); Set<Profile> profiles = entry.getValue(); env.putAll(profiles.stream().reduce(new HashMap<>(), (acc, profile) -> { acc.putAll(profile.getEnv()); return acc; }, (a, b) -> { a.putAll(b); return a; })); String secretName = KubernetesV1ProviderUtils.componentSecret(name + ind, version); ind += 1; Set<Pair<File, String>> pairs = profiles.stream().map(p -> { return new ImmutablePair<>(new File(stagingPath, p.getName()), new File(p.getOutputFile()).getName()); }).collect(Collectors.toSet()); KubernetesV1ProviderUtils.upsertSecret(details, pairs, secretName, namespace); configSources.add(new ConfigSource().setId(secretName).setMountPath(mountPoint).setEnv(env)); } return configSources; }
From source file:com.streamsets.pipeline.stage.it.DriftIT.java
@Test public void testAddColumnToNonPartitionedTableInternal() throws Exception { HiveMetadataProcessor processor = new HiveMetadataProcessorBuilder().table("tbl_no_partition") .partitions(new PartitionConfigBuilder().build()).build(); HiveMetastoreTarget hiveTarget = new HiveMetastoreTargetBuilder().build(); List<Record> records = new LinkedList<>(); Map<String, Field> map = new LinkedHashMap<>(); map.put("city", Field.create("San Francisco")); map.put("state", Field.create("California")); Record record = RecordCreator.create(); record.set(Field.create(map)); records.add(record);// w w w.j a v a 2s . c o m processRecords(processor, hiveTarget, records); assertQueryResult("select * from tbl_no_partition", new QueryValidator() { @Override public void validateResultSet(ResultSet rs) throws Exception { assertResultSetStructure(rs, new ImmutablePair("tbl_no_partition.city", Types.VARCHAR), new ImmutablePair("tbl_no_partition.state", Types.VARCHAR)); Assert.assertTrue("Table tbl_no_partition doesn't contain any rows", rs.next()); Assert.assertEquals("San Francisco", rs.getString(1)); Assert.assertEquals("California", rs.getString(2)); Assert.assertFalse("Unexpected number of rows", rs.next()); } }); }
From source file:io.lavagna.web.api.CardControllerTest.java
@Test public void createWithFiles() { CardData cardData = new CardData(); cardData.setName("name"); List<CardController.NewCardFile> files = new ArrayList<>(); CardController.NewCardFile file1 = new CardController.NewCardFile(); file1.setName("file.txt"); file1.setDigest("1234"); files.add(file1);/*from w w w .j ava 2 s . co m*/ cardData.setFiles(files); ImmutablePair<Boolean, io.lavagna.model.CardData> result = new ImmutablePair<>(true, new io.lavagna.model.CardData(1, cardId, null, CardType.FILE, null, 0)); Map<Permission, Permission> permissions = new HashMap<>(); permissions.put(Permission.CREATE_FILE, Permission.CREATE_FILE); when(user.getBasePermissions()).thenReturn(permissions); when(cardService.createCard(eq("name"), eq(columnId), any(Date.class), eq(user))).thenReturn(card); when(cardDataService.assignFileToCard(eq("file.txt"), eq("1234"), eq(cardId), eq(user), any(Date.class))) .thenReturn(result); cardController.create(columnId, cardData, user); verify(cardService).createCard(eq("name"), eq(columnId), any(Date.class), eq(user)); verify(eventEmitter).emitCreateCard(project.getShortName(), board.getShortName(), boardColumn.getId(), card, user); verify(cardDataService).assignFileToCard(eq("file.txt"), eq("1234"), eq(cardId), eq(user), any(Date.class)); verify(cardDataService, never()).updateDescription(eq(cardId), anyString(), any(Date.class), eq(userId)); verify(bulkOperationService, never()).addUserLabel(eq(projectShortName), anyInt(), any(CardLabelValue.LabelValue.class), ArgumentMatchers.<Integer>anyList(), eq(user)); verify(bulkOperationService, never()).setDueDate(eq(projectShortName), ArgumentMatchers.<Integer>anyList(), any(CardLabelValue.LabelValue.class), eq(user)); verify(bulkOperationService, never()).setMilestone(eq(projectShortName), ArgumentMatchers.<Integer>anyList(), any(CardLabelValue.LabelValue.class), eq(user)); verify(bulkOperationService, never()).assign(eq(projectShortName), ArgumentMatchers.<Integer>anyList(), any(CardLabelValue.LabelValue.class), eq(user)); }
From source file:io.cloudslang.intellij.lang.annotator.ExecutableAnnotator.java
private List<Pair<PsiElement, String>> getElementNamePairs(YAMLDocument yamlDocument, String elementName) { final PsiElement psiElement = findChildRecursively(yamlDocument, new String[] { elementName }); if (psiElement == null) { return Collections.emptyList(); }//from w ww. j a va 2s .c o m List<Pair<PsiElement, String>> elementStringPairs = new ArrayList<>(); try (BufferedReader reader = new BufferedReader(new StringReader(psiElement.getText()))) { for (String line; (line = reader.readLine()) != null;) { final Matcher matcher = keyInListPattern.matcher(line); if (matcher.find()) { String elementNameGroup = matcher.group(1); YAMLPsiElement childElement = findChildRecursively((YAMLPsiElement) psiElement, new String[] { elementNameGroup }); PsiElement elementToHighlight = (childElement != null) ? childElement : ((psiElement instanceof YAMLKeyValue) ? ((YAMLKeyValue) psiElement).getKey() : psiElement); elementStringPairs.add(new ImmutablePair<>(elementToHighlight, elementNameGroup)); } } } catch (IOException ignore) { // this code is never reached because the reader reads from memory } return elementStringPairs; }
From source file:com.epam.catgenome.manager.protein.ProteinSequenceManager.java
private List<List<ImmutablePair<Gene, List<Sequence>>>> combineData(final Map<Gene, List<List<Sequence>>> data, final Comparator<Gene> comparator) { List<List<ImmutablePair<Gene, List<Sequence>>>> source = data.entrySet().stream() .sorted((e1, e2) -> comparator.compare(e1.getKey(), e2.getKey())).map(e -> e.getValue().stream() .map(s -> new ImmutablePair<>(e.getKey(), s)).collect(Collectors.toList())) .collect(Collectors.toList()); if (CollectionUtils.isEmpty(source)) { return Collections.emptyList(); }/*from w w w . j a v a 2 s . co m*/ List<List<ImmutablePair<Gene, List<Sequence>>>> start = new ArrayList<>(); for (ImmutablePair<Gene, List<Sequence>> p : source.remove(0)) { List<ImmutablePair<Gene, List<Sequence>>> ll = new ArrayList<>(); ll.add(p); start.add(ll); } return recursiveCombine(start, source); }
From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java
/** * Creates txn on the specified stream.// w w w. jav a 2s . c o m * * Post-condition: * 1. If txn creation succeeds, then * (a) txn node is created in the store, * (b) txn segments are successfully created on respective segment stores, * (c) txn is present in the host-txn index of current host, * (d) txn's timeout is being tracked in timeout service. * * 2. If process fails after creating txn node, but before responding to the client, then since txn is * present in the host-txn index, some other controller process shall abort the txn after maxLeaseValue * * 3. If timeout service tracks timeout of specified txn, * then txn is also present in the host-txn index of current process. * * Invariant: * The following invariants are maintained throughout the execution of createTxn, pingTxn and sealTxn methods. * 1. If timeout service tracks timeout of a txn, then txn is also present in the host-txn index of current process. * 2. If txn znode is updated, then txn is also present in the host-txn index of current process. * * @param scope scope name. * @param stream stream name. * @param lease txn lease. * @param maxExecutionPeriod maximum amount of time for which txn may remain open. * @param scaleGracePeriod amount of time for which txn may remain open after scale operation is initiated. * @param ctx context. * @return identifier of the created txn. */ CompletableFuture<Pair<VersionedTransactionData, List<Segment>>> createTxnBody(final String scope, final String stream, final long lease, final long maxExecutionPeriod, final long scaleGracePeriod, final OperationContext ctx) { // Step 1. Validate parameters. CompletableFuture<Void> validate = validate(lease, maxExecutionPeriod, scaleGracePeriod); UUID txnId = UUID.randomUUID(); TxnResource resource = new TxnResource(scope, stream, txnId); // Step 2. Add txn to host-transaction index. CompletableFuture<Void> addIndex = validate .thenComposeAsync(ignore -> streamMetadataStore.addTxnToIndex(hostId, resource, 0), executor) .whenComplete((v, e) -> { if (e != null) { log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId); } else { log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId); } }); // Step 3. Create txn node in the store. CompletableFuture<VersionedTransactionData> txnFuture = addIndex .thenComposeAsync(ignore -> streamMetadataStore.createTransaction(scope, stream, txnId, lease, maxExecutionPeriod, scaleGracePeriod, ctx, executor), executor) .whenComplete((v, e) -> { if (e != null) { log.debug("Txn={}, failed creating txn in store", txnId); } else { log.debug("Txn={}, created in store", txnId); } }); // Step 4. Notify segment stores about new txn. CompletableFuture<List<Segment>> segmentsFuture = txnFuture.thenComposeAsync( txnData -> streamMetadataStore.getActiveSegments(scope, stream, txnData.getEpoch(), ctx, executor), executor); CompletableFuture<Void> notify = segmentsFuture .thenComposeAsync(activeSegments -> notifyTxnCreation(scope, stream, activeSegments, txnId), executor) .whenComplete((v, e) -> // Method notifyTxnCreation ensures that notification completes // even in the presence of n/w or segment store failures. log.debug("Txn={}, notified segments stores", txnId)); // Step 5. Start tracking txn in timeout service return notify.thenApplyAsync(y -> { int version = txnFuture.join().getVersion(); long executionExpiryTime = txnFuture.join().getMaxExecutionExpiryTime(); timeoutService.addTxn(scope, stream, txnId, version, lease, executionExpiryTime, scaleGracePeriod); log.debug("Txn={}, added to timeout service on host={}", txnId, hostId); return null; }, executor).thenApplyAsync(v -> new ImmutablePair<>(txnFuture.join(), segmentsFuture.join()), executor); }