List of usage examples for com.google.common.collect Maps filterKeys
@CheckReturnValue public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate)
From source file:io.prestosql.orc.StripeReader.java
public Stripe readStripe(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage) throws IOException { // read the stripe footer StripeFooter stripeFooter = readStripeFooter(stripe, systemMemoryUsage); List<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings(); // get streams for selected columns Map<StreamId, Stream> streams = new HashMap<>(); boolean hasRowGroupDictionary = false; for (Stream stream : stripeFooter.getStreams()) { if (includedOrcColumns.contains(stream.getColumn())) { streams.put(new StreamId(stream), stream); if (stream.getStreamKind() == StreamKind.IN_DICTIONARY) { ColumnEncoding columnEncoding = columnEncodings.get(stream.getColumn()); if (columnEncoding.getColumnEncodingKind() == DICTIONARY) { hasRowGroupDictionary = true; }/* w ww . j ava2 s . co m*/ Optional<List<DwrfSequenceEncoding>> additionalSequenceEncodings = columnEncoding .getAdditionalSequenceEncodings(); if (additionalSequenceEncodings.isPresent() && additionalSequenceEncodings.get().stream() .map(DwrfSequenceEncoding::getValueEncoding) .anyMatch(encoding -> encoding.getColumnEncodingKind() == DICTIONARY)) { hasRowGroupDictionary = true; } } } } // handle stripes with more than one row group or a dictionary boolean invalidCheckPoint = false; if ((stripe.getNumberOfRows() > rowsInRowGroup) || hasRowGroupDictionary) { // determine ranges of the stripe to read Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams()); diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet())); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); // read the bloom filter for each column Map<StreamId, List<HiveBloomFilter>> bloomFilterIndexes = readBloomFilterIndexes(streams, streamsData); // read the row index for each column Map<StreamId, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData, bloomFilterIndexes); if (writeValidation.isPresent()) { writeValidation.get().validateRowGroupStatistics(orcDataSource.getId(), stripe.getOffset(), columnIndexes); } // select the row groups matching the tuple domain Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes); // if all row groups are skipped, return null if (selectedRowGroups.isEmpty()) { // set accounted memory usage to zero systemMemoryUsage.close(); return null; } // value streams Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row groups try { List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams, columnIndexes, selectedRowGroups, columnEncodings); return new Stripe(stripe.getNumberOfRows(), columnEncodings, rowGroups, dictionaryStreamSources); } catch (InvalidCheckpointException e) { // The ORC file contains a corrupt checkpoint stream // If the file does not have a row group dictionary, treat the stripe as a single row group. Otherwise, // we must fail because the length of the row group dictionary is contained in the checkpoint stream. if (hasRowGroupDictionary) { throw new OrcCorruptionException(e, orcDataSource.getId(), "Checkpoints are corrupt"); } invalidCheckPoint = true; } } // stripe only has one row group and no dictionary ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder(); for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) { StreamId streamId = entry.getKey(); if (streams.keySet().contains(streamId)) { diskRangesBuilder.put(entry); } } ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build(); // read the file regions Map<StreamId, OrcInputStream> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage); long minAverageRowBytes = 0; for (Entry<StreamId, Stream> entry : streams.entrySet()) { if (entry.getKey().getStreamKind() == ROW_INDEX) { List<RowGroupIndex> rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, streamsData.get(entry.getKey())); checkState(rowGroupIndexes.size() == 1 || invalidCheckPoint, "expect a single row group or an invalid check point"); long totalBytes = 0; long totalRows = 0; for (RowGroupIndex rowGroupIndex : rowGroupIndexes) { ColumnStatistics columnStatistics = rowGroupIndex.getColumnStatistics(); if (columnStatistics.hasMinAverageValueSizeInBytes()) { totalBytes += columnStatistics.getMinAverageValueSizeInBytes() * columnStatistics.getNumberOfValues(); totalRows += columnStatistics.getNumberOfValues(); } } if (totalRows > 0) { minAverageRowBytes += totalBytes / totalRows; } } } // value streams Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings); // build the dictionary streams InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings); // build the row group ImmutableMap.Builder<StreamId, InputStreamSource<?>> builder = ImmutableMap.builder(); for (Entry<StreamId, ValueInputStream<?>> entry : valueStreams.entrySet()) { builder.put(entry.getKey(), new ValueInputStreamSource<>(entry.getValue())); } RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), minAverageRowBytes, new InputStreamSources(builder.build())); return new Stripe(stripe.getNumberOfRows(), columnEncodings, ImmutableList.of(rowGroup), dictionaryStreamSources); }
From source file:com.siemens.sw360.importer.ComponentImportUtils.java
public static RequestSummary writeAttachmentsToDatabase( FluentIterable<ComponentAttachmentCSVRecord> compCSVRecords, User user, ComponentService.Iface componentClient, AttachmentService.Iface attachmentClient) throws TException { final List<Component> componentDetailedSummaryForExport = componentClient .getComponentDetailedSummaryForExport(); final ImmutableMap<String, Component> componentsByName = getComponentsByName( componentDetailedSummaryForExport); final Map<String, Release> releasesByIdentifier = getReleasesByIdentifier( componentDetailedSummaryForExport); final Set<String> usedAttachmentContentIds = componentClient.getUsedAttachmentContentIds(); final Set<String> releaseIdentifiersToUpdate = new HashSet<>(); final Set<String> componentsToUpdate = new HashSet<>(); final Set<Attachment> attachmentStubsToDelete = new HashSet<>(); for (ComponentAttachmentCSVRecord compCSVRecord : compCSVRecords) { if (compCSVRecord.isSaveableAttachment()) { final Attachment attachment = compCSVRecord.getAttachment(); if (usedAttachmentContentIds.contains(attachment.getAttachmentContentId())) continue; if (compCSVRecord.isForComponent()) { final Component component = componentsByName.get(compCSVRecord.getComponentName()); if (component != null) { component.addToAttachments(attachment); componentsToUpdate.add(component.getName()); }/* ww w. j a va2 s . c o m*/ } else if (compCSVRecord.isForRelease()) { final Release release = releasesByIdentifier.get(compCSVRecord.getReleaseIdentifier()); if (release != null) { attachmentStubsToDelete .addAll(removeAutogeneratedAttachments(attachmentClient, attachment, release)); release.addToAttachments(attachment); releaseIdentifiersToUpdate.add(compCSVRecord.getReleaseIdentifier()); } } } } final HashSet<Release> updatedReleases = getUpdatedReleases(releasesByIdentifier, releaseIdentifiersToUpdate); final RequestSummary releaseRequestSummary = componentClient.updateReleases(updatedReleases, user); final HashSet<Component> updatedComponents = Sets .newHashSet(Maps.filterKeys(componentsByName, new Predicate<String>() { @Override public boolean apply(String input) { return componentsToUpdate.contains(input); } }).values()); final RequestSummary componentRequestSummary = componentClient.updateComponents(updatedComponents, user); RequestSummary attachmentSummary = null; if (!attachmentStubsToDelete.isEmpty()) { attachmentSummary = attachmentClient.bulkDelete(Lists .transform(Lists.newArrayList(attachmentStubsToDelete), new Function<Attachment, String>() { @Override public String apply(Attachment input) { return input.getAttachmentContentId(); } })); } RequestSummary requestSummary = CommonUtils.addRequestSummaries(releaseRequestSummary, "release", componentRequestSummary, "component"); if (attachmentSummary != null) { requestSummary = CommonUtils.addToMessage(requestSummary, attachmentSummary, "attachment deletion"); } return requestSummary; }
From source file:gobblin.broker.DefaultBrokerCache.java
/** * Invalidate all objects at scopes which are descendant of the input scope. Any such invalidated object that is a * {@link Closeable} will be closed, and any such object which is a {@link Service} will be shutdown. * @throws IOException//from w ww .j av a2s. co m */ public void close(ScopeWrapper<S> scope) throws IOException { List<Service> awaitShutdown = Lists.newArrayList(); for (Map.Entry<RawJobBrokerKey, Object> entry : Maps .filterKeys(this.sharedResourceCache.asMap(), new ScopeIsAncestorFilter(scope)).entrySet()) { this.sharedResourceCache.invalidate(entry.getKey()); if (entry.getValue() instanceof ResourceInstance) { Object obj = ((ResourceInstance) entry.getValue()).getResource(); SharedResourcesBrokerUtils.shutdownObject(obj, log); if (obj instanceof Service) { awaitShutdown.add((Service) obj); } } } for (Service service : awaitShutdown) { try { service.awaitTerminated(10, TimeUnit.SECONDS); } catch (TimeoutException te) { log.error("Failed to shutdown {}.", service); } } }
From source file:com.mgmtp.jfunk.web.ScopingWebDriverModule.java
@Provides protected Map<String, CredentialsProvider> provideHtmlUnitCredentialsProviderMap(final Configuration config) { Map<String, CredentialsProvider> result = newHashMapWithExpectedSize(1); // extract sorted credential keys Set<String> credentialKeys = newTreeSet(Maps .filterKeys(config,/* w w w. ja va 2 s . c o m*/ (Predicate<String>) input -> input.startsWith(WebConstants.HTMLUNIT_CREDENTIALS_PREFIX)) .keySet()); for (Iterator<String> it = credentialKeys.iterator(); it.hasNext();) { String key = it.next(); Matcher matcher = HOST_EXTRACTION_PATTERN.matcher(key); checkState(matcher.find(), "Could not extract host from property: " + key); String host = matcher.group(); String password = config.get(key); // as set is sorted, the next key is that for the username for the current host String username = config.get(it.next()); DefaultCredentialsProvider credentialsProvider = new DefaultCredentialsProvider(); credentialsProvider.addCredentials(username, password); result.put(host, credentialsProvider); } return result; }
From source file:io.sarl.lang.scoping.batch.SARLMapExtensions.java
/** Replies the elements of the given map except the pairs with the given keys. * * <p>The replied map is a view on the given map. It means that any change * in the original map is reflected to the result of this operation. * * @param <K> - type of the map keys. * @param <V> - type of the map values. * @param map - the map to update.//from ww w.j a v a 2s . c o m * @param keys - the keys of the pairs to remove. * @return the map with the content of the map except the pairs. */ @Pure public static <K, V> Map<K, V> operator_minus(Map<K, V> map, final Iterable<?> keys) { return Maps.filterKeys(map, new Predicate<K>() { @Override public boolean apply(K input) { return !Iterables.contains(keys, input); } }); }
From source file:org.killbill.billing.plugin.adyen.api.mapping.PaymentInfoMappingService.java
private static void set3DSecureFields(final PaymentInfo paymentInfo, final Iterable<PluginProperty> properties) { final String ccUserAgent = PluginProperties.findPluginPropertyValue(PROPERTY_USER_AGENT, properties); paymentInfo.setUserAgent(ccUserAgent); final String ccAcceptHeader = PluginProperties.findPluginPropertyValue(PROPERTY_ACCEPT_HEADER, properties); paymentInfo.setAcceptHeader(ccAcceptHeader); final String md = PluginProperties.findPluginPropertyValue(PROPERTY_MD, properties); if (md != null) { paymentInfo.setMd(decode(md));//from ww w . j av a 2 s . c o m } final String paRes = PluginProperties.findPluginPropertyValue(PROPERTY_PA_RES, properties); if (paRes != null) { paymentInfo.setPaRes(decode(paRes)); } final String threeDThreshold = PluginProperties.findPluginPropertyValue(PROPERTY_THREE_D_THRESHOLD, properties); if (!Strings.isNullOrEmpty(threeDThreshold)) { // Expected in minor units paymentInfo.setThreeDThreshold(Long.valueOf(threeDThreshold)); } final String mpiDataDirectoryResponse = PluginProperties .findPluginPropertyValue(PROPERTY_MPI_DATA_DIRECTORY_RESPONSE, properties); paymentInfo.setMpiDataDirectoryResponse(mpiDataDirectoryResponse); final String mpiDataAuthenticationResponse = PluginProperties .findPluginPropertyValue(PROPERTY_MPI_DATA_AUTHENTICATION_RESPONSE, properties); paymentInfo.setMpiDataAuthenticationResponse(mpiDataAuthenticationResponse); final String mpiDataCavv = PluginProperties.findPluginPropertyValue(PROPERTY_MPI_DATA_CAVV, properties); paymentInfo.setMpiDataCavv(mpiDataCavv); final String mpiDataCavvAlgorithm = PluginProperties .findPluginPropertyValue(PROPERTY_MPI_DATA_CAVV_ALGORITHM, properties); paymentInfo.setMpiDataCavvAlgorithm(mpiDataCavvAlgorithm); final String mpiDataXid = PluginProperties.findPluginPropertyValue(PROPERTY_MPI_DATA_XID, properties); paymentInfo.setMpiDataXid(mpiDataXid); final String mpiDataEci = PluginProperties.findPluginPropertyValue(PROPERTY_MPI_DATA_ECI, properties); paymentInfo.setMpiDataEci(mpiDataEci); final String mpiImplementationType = PluginProperties .findPluginPropertyValue(PROPERTY_MPI_IMPLEMENTATION_TYPE, properties); paymentInfo.setMpiImplementationType(mpiImplementationType); if (mpiImplementationType != null) { paymentInfo.setMpiImplementationTypeValues(Maps.filterKeys(PluginProperties.toStringMap(properties), Predicates.containsPattern(mpiImplementationType + "."))); } final String termUrl = PluginProperties.findPluginPropertyValue(PROPERTY_TERM_URL, properties); paymentInfo.setTermUrl(termUrl); }
From source file:org.apache.isis.core.runtime.services.publish.PublishingServiceInternalDefault.java
private void publishObjectsToPublisherServices( final Map<ObjectAdapter, ChangeKind> changeKindByEnlistedAdapter) { final Map<ObjectAdapter, ChangeKind> changeKindByPublishedAdapter = Maps .filterKeys(changeKindByEnlistedAdapter, PublishedObjectFacet.Predicates.isPublished()); if (changeKindByPublishedAdapter.isEmpty()) { return;/*from www. ja v a 2 s. co m*/ } final int numberLoaded = metricsService.numberObjectsLoaded(); final int numberObjectPropertiesModified = changedObjectsServiceInternal.numberObjectPropertiesModified(); final PublishedObjects publishedObjects = newPublishedObjects(numberLoaded, numberObjectPropertiesModified, changeKindByPublishedAdapter); for (PublisherService publisherService : publisherServices) { publisherService.publish(publishedObjects); } }
From source file:co.mitro.core.servlets.MutateOrganization.java
@Override protected MitroRPC processCommand(MitroRequestContext context) throws IOException, SQLException, MitroServletException { try {/*from w w w . j a va 2 s . c om*/ RPC.MutateOrganizationRequest in = gson.fromJson(context.jsonRequest, RPC.MutateOrganizationRequest.class); in.promotedMemberEncryptedKeys = MitroServlet.createMapIfNull(in.promotedMemberEncryptedKeys); in.newMemberGroupKeys = MitroServlet.createMapIfNull(in.newMemberGroupKeys); in.adminsToDemote = MitroServlet.uniquifyCollection(in.adminsToDemote); in.membersToRemove = MitroServlet.uniquifyCollection(in.membersToRemove); @SuppressWarnings("deprecation") AuthenticatedDB userDb = AuthenticatedDB.deprecatedNew(context.manager, context.requestor); DBGroup org = userDb.getOrganizationAsAdmin(in.orgId); Set<String> adminsToDemote = Sets.newHashSet(in.adminsToDemote); Collection<DBAcl> aclsToRemove = new HashSet<>(); Set<Integer> existingAdmins = new HashSet<>(); for (DBAcl acl : org.getAcls()) { DBIdentity u = acl.loadMemberIdentity(context.manager.identityDao); assert (u != null); // toplevel groups should not have group members. if (adminsToDemote.contains(u.getName())) { aclsToRemove.add(acl); adminsToDemote.remove(u.getName()); } else { existingAdmins.add(u.getId()); } } // check for an attempt to promote members who are already admins. Set<Integer> duplicateAdmins = Sets.intersection(existingAdmins, DBIdentity.getUserIdsFromNames(context.manager, in.promotedMemberEncryptedKeys.keySet())); if (!duplicateAdmins.isEmpty()) { throw new MitroServletException( "Operation would create duplicate admins: " + COMMA_JOINER.join(duplicateAdmins)); } if (!adminsToDemote.isEmpty()) { throw new MitroServletException("The following users are not admins and could not be deleted:" + COMMA_JOINER.join(adminsToDemote)); } if (existingAdmins.isEmpty() && in.promotedMemberEncryptedKeys.isEmpty()) { throw new UserVisibleException("You cannot remove all admins from an organization"); } // delete ACLs for the admin user on the group. This maybe should be using common code? context.manager.aclDao.delete(aclsToRemove); Map<Integer, Integer> currentMemberIdsToGroupIds = getMemberIdsAndPrivateGroupIdsForOrg(context.manager, org); // Promoted members (new admins) must be members after all changes Set<String> currentMembers = DBIdentity.getUserNamesFromIds(context.manager, currentMemberIdsToGroupIds.keySet()); Set<String> membersAfterChanges = Sets.difference( Sets.union(currentMembers, in.newMemberGroupKeys.keySet()), new HashSet<>(in.membersToRemove)); Set<String> nonMemberAdmins = Sets.difference(in.promotedMemberEncryptedKeys.keySet(), membersAfterChanges); if (!nonMemberAdmins.isEmpty()) { throw new MitroServletException( "Cannot add admins without them being members: " + COMMA_JOINER.join(nonMemberAdmins)); } // check for duplicate users Set<Integer> duplicateMembers = Sets.intersection(currentMemberIdsToGroupIds.keySet(), DBIdentity.getUserIdsFromNames(context.manager, in.newMemberGroupKeys.keySet())); if (!duplicateMembers.isEmpty()) { throw new MitroServletException( "Operation would create duplicate members: " + COMMA_JOINER.join(duplicateMembers)); } // delete all the private groups. This might orphan secrets, which is the intended result. Set<Integer> memberIdsToRemove = DBIdentity.getUserIdsFromNames(context.manager, in.membersToRemove); if (memberIdsToRemove.size() != in.membersToRemove.size()) { throw new MitroServletException("Invalid members to remove."); } Set<Integer> illegalRemovals = Sets.intersection(existingAdmins, memberIdsToRemove); if (!illegalRemovals.isEmpty()) { throw new MitroServletException( "Cannot remove members who are admins:" + COMMA_JOINER.join(illegalRemovals)); } Set<Integer> nonOrgUsers = Sets.difference(memberIdsToRemove, currentMemberIdsToGroupIds.keySet()); if (!nonOrgUsers.isEmpty()) { throw new MitroServletException("The following users are not members and cannot be removed:" + COMMA_JOINER.join(nonOrgUsers)); } Set<Integer> deleteGroupIds = Sets.newHashSet( Maps.filterKeys(currentMemberIdsToGroupIds, Predicates.in(memberIdsToRemove)).values()); if (!deleteGroupIds.isEmpty()) { context.manager.groupDao.deleteIds(deleteGroupIds); DeleteBuilder<DBAcl, Integer> deleter = context.manager.aclDao.deleteBuilder(); deleter.where().in(DBAcl.GROUP_ID_FIELD_NAME, deleteGroupIds); deleter.delete(); DeleteBuilder<DBGroupSecret, Integer> gsDeleter = context.manager.groupSecretDao.deleteBuilder(); gsDeleter.where().in(DBGroupSecret.GROUP_ID_NAME, deleteGroupIds); gsDeleter.delete(); } // Remove the user from all org-owned group to which he belongs. // Note: if the user has access to an org-owned secret via a non-org-owned group, // he will retain access. Set<Integer> allOrgGroupIds = Sets.newHashSet(); for (DBGroup g : org.getAllOrgGroups(context.manager)) { allOrgGroupIds.add(g.getId()); } if (!memberIdsToRemove.isEmpty()) { if (!allOrgGroupIds.isEmpty()) { // Remove users from organization-owned groups (named or otherwise) DeleteBuilder<DBAcl, Integer> deleter = context.manager.aclDao.deleteBuilder(); deleter.where().in(DBAcl.MEMBER_IDENTITY_FIELD_NAME, memberIdsToRemove).and() .in(DBAcl.GROUP_ID_FIELD_NAME, allOrgGroupIds); deleter.delete(); } // Remove users from any org-owned secrets (e.g. via non-org private or named groups) HashMap<Integer, SecretToPath> orgSecretsToPath = new HashMap<>(); ListMySecretsAndGroupKeys.getSecretInfo(context, AdminAccess.FORCE_ACCESS_VIA_TOPLEVEL_GROUPS, orgSecretsToPath, ImmutableSet.of(org.getId()), null, IncludeAuditLogInfo.NO_AUDIT_LOG_INFO); if (orgSecretsToPath.size() > 0) { // Delete any group secret giving these users access to org secrets // strange side effect: personal teams may be mysteriously removed from org secrets // TODO: Potential bug: removing the last personal team will "orphan" the secret String groupSecretDelete = String.format( "DELETE FROM group_secret WHERE id IN (" + "SELECT group_secret.id FROM group_secret, acl WHERE " + " group_secret.\"serverVisibleSecret_id\" IN (%s) AND " + " group_secret.group_id = acl.group_id AND acl.member_identity IN (%s))", COMMA_JOINER.join(orgSecretsToPath.keySet()), COMMA_JOINER.join(memberIdsToRemove)); context.manager.groupSecretDao.executeRaw(groupSecretDelete); } } List<DBAcl> organizationAcls = CreateOrganization.makeAdminAclsForOrganization(userDb, org, in.promotedMemberEncryptedKeys); // TODO: move to authdb? for (DBAcl acl : organizationAcls) { context.manager.aclDao.create(acl); } // create private groups for each new member CreateOrganization.addMembersToOrganization(userDb, org, in.newMemberGroupKeys, context.manager); context.manager.addAuditLog(DBAudit.ACTION.MUTATE_ORGANIZATION, null, null, org, null, ""); MutateOrganizationResponse out = new RPC.MutateOrganizationResponse(); // TODO: validate the group? return out; } catch (CyclicGroupError e) { throw new MitroServletException(e); } }
From source file:com.facebook.buck.json.PythonDslProjectBuildFileParser.java
/** Initialize the parser, starting buck.py. */ private void init() throws IOException { try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(buckEventBus, PerfEventId.of("ParserInit"))) { ImmutableMap.Builder<String, String> pythonEnvironmentBuilder = ImmutableMap.builder(); // Strip out PYTHONPATH. buck.py manually sets this to include only nailgun. We don't want // to inject nailgun into the parser's PYTHONPATH, so strip that value out. // If we wanted to pass on some environmental PYTHONPATH, we would have to do some actual // merging of this and the BuckConfig's python module search path. pythonEnvironmentBuilder.putAll(Maps.filterKeys(environment, k -> !PYTHONPATH_ENV_VAR_NAME.equals(k))); if (options.getPythonModuleSearchPath().isPresent()) { pythonEnvironmentBuilder.put(PYTHONPATH_ENV_VAR_NAME, options.getPythonModuleSearchPath().get()); }//from www .ja v a2 s. co m ImmutableMap<String, String> pythonEnvironment = pythonEnvironmentBuilder.build(); ProcessExecutorParams params = ProcessExecutorParams.builder().setCommand(buildArgs()) .setEnvironment(pythonEnvironment).build(); LOG.debug("Starting buck.py command: %s environment: %s", params.getCommand(), params.getEnvironment()); buckPyProcess = processExecutor.launchProcess(params); LOG.debug("Started process %s successfully", buckPyProcess); buckPyProcessInput = new CountingInputStream(buckPyProcess.getInputStream()); buckPyProcessJsonGenerator = ObjectMappers.createGenerator(buckPyProcess.getOutputStream()); // We have to wait to create the JsonParser until after we write our // first request, because Jackson "helpfully" synchronously reads // from the InputStream trying to detect whether the encoding is // UTF-8 or UTF-16 as soon as you create a JsonParser: // // https://git.io/vSgnA // // Since buck.py doesn't write any data until after it receives // a query, creating the JsonParser here would hang indefinitely. InputStream stderr = buckPyProcess.getErrorStream(); AtomicInteger numberOfLines = new AtomicInteger(0); AtomicReference<Path> lastPath = new AtomicReference<Path>(); InputStreamConsumer stderrConsumer = new InputStreamConsumer(stderr, (InputStreamConsumer.Handler) line -> { Path path = currentBuildFile.get(); if (!Objects.equals(path, lastPath.get())) { numberOfLines.set(0); lastPath.set(path); } int count = numberOfLines.getAndIncrement(); if (count == 0) { buckEventBus.post(ConsoleEvent.warning("WARNING: Output when parsing %s:", path)); } buckEventBus.post(ConsoleEvent.warning("| %s", line)); }); stderrConsumerTerminationFuture = new FutureTask<>(stderrConsumer); stderrConsumerThread = Threads.namedThread(PythonDslProjectBuildFileParser.class.getSimpleName(), stderrConsumerTerminationFuture); stderrConsumerThread.start(); } }
From source file:com.google.devtools.build.lib.skyframe.RecursiveDirectoryTraversalFunction.java
/** * Looks in the directory specified by {@code recursivePkgKey} for a package, does some work as * specified by {@link Visitor} if such a package exists, then recursively does work in each * non-excluded subdirectory as specified by {@link #getSkyKeyForSubdirectory}, and finally * aggregates the {@link Visitor} value along with values from each subdirectory as specified by * {@link #aggregateWithSubdirectorySkyValues}, and returns that aggregation. * * <p>Returns null if {@code env.valuesMissing()} is true, checked after each call to one of * {@link RecursiveDirectoryTraversalFunction}'s abstract methods that were given {@code env}. * (And after each of {@code visitDirectory}'s own uses of {@code env}, of course.) *//*from w ww .j a va2s . co m*/ TReturn visitDirectory(RecursivePkgKey recursivePkgKey, Environment env) throws InterruptedException { RootedPath rootedPath = recursivePkgKey.getRootedPath(); ProcessPackageDirectoryResult packageExistenceAndSubdirDeps = processPackageDirectory .getPackageExistenceAndSubdirDeps(rootedPath, recursivePkgKey.getRepository(), env, recursivePkgKey.getExcludedPaths()); if (env.valuesMissing()) { return null; } Iterable<SkyKey> childDeps = packageExistenceAndSubdirDeps.getChildDeps(); TVisitor visitor = getInitialVisitor(); Map<SkyKey, SkyValue> subdirectorySkyValues; if (packageExistenceAndSubdirDeps.packageExists()) { PathFragment rootRelativePath = rootedPath.getRelativePath(); SkyKey packageKey = PackageValue .key(PackageIdentifier.create(recursivePkgKey.getRepository(), rootRelativePath)); Map<SkyKey, ValueOrException<NoSuchPackageException>> dependentSkyValues = env.getValuesOrThrow( Iterables.concat(childDeps, ImmutableList.of(packageKey)), NoSuchPackageException.class); if (env.valuesMissing()) { return null; } Package pkg = null; try { PackageValue pkgValue = (PackageValue) dependentSkyValues.get(packageKey).get(); if (pkgValue == null) { return null; } pkg = pkgValue.getPackage(); if (pkg.containsErrors()) { env.getListener() .handle(Event.error("package contains errors: " + rootRelativePath.getPathString())); } } catch (NoSuchPackageException e) { // The package had errors, but don't fail-fast as there might be subpackages below the // current directory. env.getListener() .handle(Event.error("package contains errors: " + rootRelativePath.getPathString())); } if (pkg != null) { visitor.visitPackageValue(pkg, env); if (env.valuesMissing()) { return null; } } ImmutableMap.Builder<SkyKey, SkyValue> subdirectoryBuilder = ImmutableMap.builder(); for (Map.Entry<SkyKey, ValueOrException<NoSuchPackageException>> entry : Maps .filterKeys(dependentSkyValues, Predicates.not(Predicates.equalTo(packageKey))).entrySet()) { try { subdirectoryBuilder.put(entry.getKey(), entry.getValue().get()); } catch (NoSuchPackageException e) { // ignored. } } subdirectorySkyValues = subdirectoryBuilder.build(); } else { subdirectorySkyValues = env.getValues(childDeps); } if (env.valuesMissing()) { return null; } return aggregateWithSubdirectorySkyValues(visitor, subdirectorySkyValues); }