List of usage examples for com.google.common.collect ImmutableMap.Builder putAll
public final void putAll(Map<? extends K, ? extends V> map)
From source file:io.imaravic.log4j.logging.GoogleCloudLoggingManager.java
@VisibleForTesting GoogleCloudLoggingManager(final String name, final HttpTransport transport, final GoogleCloudMetadata googleCloudMetadata, final GoogleCloudCredentials googleCloudCredentials, final String googleCloudProjectId, final String googleCloudZone, String googleCloudLogName, final String virtualMachineId, final int maxRetryTimeMillis) throws GeneralSecurityException, IOException { super(name);//from w w w . j a v a2 s .c o m this.googleCloudProjectId = getGoogleCloudProjectId(googleCloudProjectId, googleCloudMetadata); this.googleCloudZone = getGoogleCloudZone(googleCloudZone, googleCloudMetadata); if (!googleCloudCredentials.usingComputeCredentials()) { serviceName = COMPUTE_SERVICE_NAME; commonLabels = getComputeServiceCommonLabels( getVirtualMachineId(virtualMachineId, googleCloudMetadata)); } else { final List<String> machineAttributes = getMachineAttributes(googleCloudMetadata); final ImmutableMap.Builder<String, String> commonLabelsBuilder = ImmutableMap.builder(); if (machineAttributes.contains("gae_backend_name") && machineAttributes.contains("gae_backend_version")) { serviceName = APPENGINE_SERVICE_NAME; // Add a prefix to Appengine logs to prevent namespace collisions googleCloudLogName = APPENGINE_SERVICE_NAME + "/" + googleCloudLogName; commonLabelsBuilder.putAll(getAppengineServiceCommonLabels(googleCloudMetadata)); } else if (machineAttributes.contains("job_id")) { serviceName = DATAFLOW_SERVICE_NAME; commonLabelsBuilder.putAll(getDataflowServiceCommonLabels(googleCloudMetadata)); } else { serviceName = COMPUTE_SERVICE_NAME; } if (!serviceName.equals(DATAFLOW_SERVICE_NAME)) { commonLabelsBuilder.putAll( getComputeServiceCommonLabels(getVirtualMachineId(virtualMachineId, googleCloudMetadata))); } commonLabels = commonLabelsBuilder.build(); } this.googleCloudLogName = URLEncoder.encode(googleCloudLogName, "UTF-8"); this.googleCloudCredentials = googleCloudCredentials; this.loggingClient = createLoggingClient(transport, googleCloudCredentials, maxRetryTimeMillis); }
From source file:com.arpnetworking.metrics.mad.parsers.JsonToRecordParser.java
com.arpnetworking.metrics.mad.model.Record parseV2fStenoLogLine(final JsonNode jsonNode) throws JsonProcessingException { final Version2fSteno model = OBJECT_MAPPER.treeToValue(jsonNode, Version2fSteno.class); final Version2fSteno.Data data = model.getData(); final Version2fSteno.Context context = model.getContext(); final Version2fSteno.Annotations annotations = data.getAnnotations(); final ImmutableMap.Builder<String, Metric> variables = ImmutableMap.builder(); putVariablesVersion2fSteno(data.getTimers(), MetricType.TIMER, variables); putVariablesVersion2fSteno(data.getCounters(), MetricType.COUNTER, variables); putVariablesVersion2fSteno(data.getGauges(), MetricType.GAUGE, variables); final ImmutableMap.Builder<String, String> annotationsBuilder = ImmutableMap.builder(); annotationsBuilder.putAll(annotations.getOtherAnnotations()); annotationsBuilder.put(PREFIXED_HOST_KEY, context.getHost()); final ImmutableMap<String, String> flatAnnotations = annotationsBuilder.build(); return new DefaultRecord.Builder().setMetrics(variables.build()).setTime(annotations.getEnd()) .setId(model.getId()).setAnnotations(flatAnnotations) .setDimensions(extractLegacyDimensions(flatAnnotations)).build(); }
From source file:com.spectralogic.dsbrowser.gui.services.tasks.Ds3PutJob.java
@Override public void executeJob() throws Exception { final boolean metadata = settings.getFilePropertiesSettings().isFilePropertiesEnabled(); final boolean hasPriority = !Guard.isStringNullOrEmpty(jobPriority); final Instant jobStartInstant = Instant.now(); final String startJobDate = dateTimeUtils.nowAsString(); final String jobInitiateTitleMessage = buildJobInitiatedTitleMessage(startJobDate, ds3Client); final String transferringMessage = buildTransferringMessage(resourceBundle); LOG.info(resourceBundle.getString("putJobStarted")); updateTitle(resourceBundle.getString("blackPearlHealth")); if (!CheckNetwork.isReachable(ds3Client)) { hostNotAvailable();/*from w w w . j a v a2 s . co m*/ return; } updateTitle(jobInitiateTitleMessage); updateMessage(transferringMessage); final ImmutableMap.Builder<String, Path> fileMapBuilder = ImmutableMap.builder(); final ImmutableMap.Builder<String, Path> folderMapBuilder = ImmutableMap.builder(); final ImmutableMap.Builder<String, Path> fileMapperBuilder = ImmutableMap.builder(); files.forEach( pair -> buildMaps(fileMapBuilder, folderMapBuilder, pair, loggingService, targetDir, delimiter)); final ImmutableMap<String, Path> fileMap = fileMapBuilder.build(); final ImmutableMap<String, Path> folderMap = folderMapBuilder.build(); fileMapperBuilder.putAll(fileMap); fileMapperBuilder.putAll(folderMap); final ImmutableMap<String, Path> fileMapper = fileMapperBuilder.build(); final ImmutableList<Ds3Object> objects = fileMapper.entrySet().stream().map(this::buildDs3Object) .filter(Optional::isPresent).map(Optional::get).collect(GuavaCollectors.immutableList()); if (objects.isEmpty()) { loggingService.logMessage("Job was empty, not sending", LogType.INFO); return; } this.job = Ds3ClientHelpers.wrap(ds3Client).startWriteJob(bucket, objects); final long totalJobSize = getTotalJobSize(); job.withMaxParallelRequests(maximumNumberOfParallelThreads); ParseJobInterruptionMap.saveValuesToFiles(jobInterruptionStore, fileMap, folderMap, ds3Client.getConnectionDetails().getEndpoint(), this.getJobId(), totalJobSize, targetDir, dateTimeUtils, PUT, bucket); updateMessage(StringBuilderUtil .transferringTotalJobString(FileSizeFormat.getFileSizeType(totalJobSize), targetDirectory) .toString()); if (hasPriority) { ds3Client.modifyJobSpectraS3( new ModifyJobSpectraS3Request(job.getJobId()).withPriority(Priority.valueOf(jobPriority))); } if (metadata) { LOG.info("Registering metadata access Implementation"); job.withMetadata(new MetadataAccessImpl(fileMapper)); } addWaitingForChunkListener(totalJobSize, bucket + StringConstants.DOUBLE_SLASH + targetDir); final AtomicLong totalSent = addDataTransferListener(totalJobSize); job.attachObjectCompletedListener( obj -> updateGuiOnComplete(startJobDate, jobStartInstant, totalJobSize, totalSent, obj)); job.transfer(file -> FileChannel.open(PathUtil.resolveForSymbolic(fileMapper.get(file)), StandardOpenOption.READ)); final Disposable d = waitForPermanentStorageTransfer(totalJobSize).observeOn(JavaFxScheduler.platform()) .doOnComplete(() -> { LOG.info("Job transferred to permanent storage location"); final String newDate = dateTimeUtils.nowAsString(); loggingService .logMessage(StringBuilderUtil .jobSuccessfullyTransferredString(PUT, FileSizeFormat.getFileSizeType(totalJobSize), bucket + "\\" + targetDir, newDate, resourceBundle.getString("permanentStorageLocation"), false) .toString(), LogType.SUCCESS); Ds3PanelService.throttledRefresh(remoteDestination); ParseJobInterruptionMap.removeJobID(jobInterruptionStore, this.getJobId().toString(), ds3Client.getConnectionDetails().getEndpoint(), deepStorageBrowserPresenter, loggingService); }).subscribe(); while (!d.isDisposed()) { Thread.sleep(1000); } }
From source file:com.facebook.buck.thrift.ThriftLibraryDescription.java
/** * Create the build rules which compile the input thrift sources into their respective * language specific sources.//from w w w .j a v a2 s . c om */ @VisibleForTesting protected ImmutableMap<String, ThriftCompiler> createThriftCompilerBuildRules(BuildRuleParams params, BuildRuleResolver resolver, CompilerType compilerType, ImmutableList<String> flags, String language, ImmutableSet<String> options, ImmutableMap<String, SourcePath> srcs, ImmutableSortedSet<ThriftLibrary> deps, ImmutableMap<String, ImmutableSortedSet<String>> generatedSources) { SourcePathRuleFinder ruleFinder = new SourcePathRuleFinder(resolver); Tool compiler = thriftBuckConfig.getCompiler(compilerType, resolver); // Build up the include roots to find thrift file deps and also the build rules that // generate them. ImmutableMap.Builder<Path, SourcePath> includesBuilder = ImmutableMap.builder(); ImmutableSortedSet.Builder<HeaderSymlinkTree> includeTreeRulesBuilder = ImmutableSortedSet.naturalOrder(); ImmutableList.Builder<Path> includeRootsBuilder = ImmutableList.builder(); ImmutableSet.Builder<Path> headerMapsBuilder = ImmutableSet.builder(); for (ThriftLibrary dep : deps) { includesBuilder.putAll(dep.getIncludes()); includeTreeRulesBuilder.add(dep.getIncludeTreeRule()); includeRootsBuilder.add(dep.getIncludeTreeRule().getIncludePath()); headerMapsBuilder.addAll(OptionalCompat.asSet(dep.getIncludeTreeRule().getHeaderMap())); } ImmutableMap<Path, SourcePath> includes = includesBuilder.build(); ImmutableSortedSet<HeaderSymlinkTree> includeTreeRules = includeTreeRulesBuilder.build(); ImmutableList<Path> includeRoots = includeRootsBuilder.build(); ImmutableSet<Path> headerMaps = headerMapsBuilder.build(); // For each thrift source, add a thrift compile rule to generate it's sources. ImmutableMap.Builder<String, ThriftCompiler> compileRules = ImmutableMap.builder(); for (ImmutableMap.Entry<String, SourcePath> ent : srcs.entrySet()) { String name = ent.getKey(); SourcePath source = ent.getValue(); ImmutableSortedSet<String> genSrcs = Preconditions.checkNotNull(generatedSources.get(name)); BuildTarget target = createThriftCompilerBuildTarget(params.getBuildTarget(), name); Path outputDir = getThriftCompilerOutputDir(params.getProjectFilesystem(), params.getBuildTarget(), name); compileRules.put(name, new ThriftCompiler( params.copyWithChanges(target, Suppliers.ofInstance(ImmutableSortedSet.<BuildRule>naturalOrder() .addAll(compiler.getDeps(ruleFinder)) .addAll(ruleFinder.filterBuildRuleInputs(ImmutableList.<SourcePath>builder().add(source) .addAll(includes.values()).build())) .addAll(includeTreeRules).build()), Suppliers.ofInstance(ImmutableSortedSet.of())), compiler, flags, outputDir, source, language, options, includeRoots, headerMaps, includes, genSrcs)); } return compileRules.build(); }
From source file:com.facebook.presto.sql.planner.QueryPlanner.java
private PlanBuilder explicitCoercionSymbols(PlanBuilder subPlan, Iterable<Symbol> alreadyCoerced, Iterable<? extends Expression> uncoerced) { TranslationMap translations = new TranslationMap(subPlan.getRelationPlan(), analysis); translations.copyMappingsFrom(subPlan.getTranslations()); ImmutableMap.Builder<Symbol, Expression> projections = ImmutableMap.builder(); projections.putAll(coerce(uncoerced, subPlan, translations)); for (Symbol symbol : alreadyCoerced) { projections.put(symbol, new QualifiedNameReference(symbol.toQualifiedName())); }/*from www. ja v a 2 s . c o m*/ return new PlanBuilder(translations, new ProjectNode(idAllocator.getNextId(), subPlan.getRoot(), projections.build()), subPlan.getSampleWeight()); }
From source file:org.apache.aurora.scheduler.state.CronJobManager.java
/** * Triggers execution of a cron job, depending on the cron collision policy for the job. * * @param cronJob The job to be triggered. *//*from w w w. j a v a 2s .c o m*/ private void cronTriggered(SanitizedCronJob cronJob) { SanitizedConfiguration config = cronJob.config; IJobConfiguration job = config.getJobConfig(); LOG.info(String.format("Cron triggered for %s at %s with policy %s", JobKeys.toPath(job), new Date(), job.getCronCollisionPolicy())); cronJobsTriggered.incrementAndGet(); ImmutableMap.Builder<Integer, ITaskConfig> builder = ImmutableMap.builder(); final Query.Builder activeQuery = Query.jobScoped(job.getKey()).active(); Set<IScheduledTask> activeTasks = Storage.Util.consistentFetchTasks(storage, activeQuery); if (activeTasks.isEmpty()) { builder.putAll(config.getTaskConfigs()); } else { // Assign a default collision policy. CronCollisionPolicy collisionPolicy = orDefault(job.getCronCollisionPolicy()); switch (collisionPolicy) { case KILL_EXISTING: try { schedulerCore.killTasks(activeQuery, CRON_USER); // Check immediately if the tasks are gone. This could happen if the existing tasks // were pending. if (!hasTasks(activeQuery)) { builder.putAll(config.getTaskConfigs()); } else { delayedRun(activeQuery, config); } } catch (ScheduleException e) { LOG.log(Level.SEVERE, "Failed to kill job.", e); } break; case CANCEL_NEW: break; case RUN_OVERLAP: LOG.severe("Ignoring trigger for job " + JobKeys.toPath(job) + " with deprecated collision policy RUN_OVERLAP due to unterminated active tasks."); break; default: LOG.severe("Unrecognized cron collision policy: " + job.getCronCollisionPolicy()); } } Map<Integer, ITaskConfig> newTasks = builder.build(); if (!newTasks.isEmpty()) { stateManager.insertPendingTasks(newTasks); } }
From source file:com.facebook.presto.sql.planner.QueryPlanner.java
private PlanBuilder explicitCoercionFields(PlanBuilder subPlan, Iterable<FieldOrExpression> alreadyCoerced, Iterable<? extends Expression> uncoerced) { TranslationMap translations = new TranslationMap(subPlan.getRelationPlan(), analysis); ImmutableMap.Builder<Symbol, Expression> projections = ImmutableMap.builder(); projections.putAll(coerce(uncoerced, subPlan, translations)); for (FieldOrExpression fieldOrExpression : alreadyCoerced) { Symbol symbol;//from w ww . java 2s . com if (fieldOrExpression.isFieldReference()) { Field field = subPlan.getRelationPlan().getDescriptor() .getFieldByIndex(fieldOrExpression.getFieldIndex()); symbol = symbolAllocator.newSymbol(field); } else { symbol = symbolAllocator.newSymbol(fieldOrExpression.getExpression(), analysis.getType(fieldOrExpression.getExpression())); } Expression rewritten = subPlan.rewrite(fieldOrExpression); projections.put(symbol, rewritten); translations.put(fieldOrExpression, symbol); } return new PlanBuilder(translations, new ProjectNode(idAllocator.getNextId(), subPlan.getRoot(), projections.build()), subPlan.getSampleWeight()); }
From source file:org.janusgraph.diskstorage.Backend.java
/** * Get information about all registered {@link IndexProvider}s. * * @return/*from ww w . j a v a 2 s .co m*/ */ public Map<String, IndexInformation> getIndexInformation() { ImmutableMap.Builder<String, IndexInformation> copy = ImmutableMap.builder(); copy.putAll(indexes); return copy.build(); }
From source file:com.github.rinde.datgen.pdptw.DatasetGenerator.java
static void writePropertiesFile(Scenario scen, GeneratorSettings settings, double actualDyn, long seed, String fileName) {/* www . ja v a 2 s . com*/ final DateTimeFormatter formatter = ISODateTimeFormat.dateHourMinuteSecondMillis(); final VanLon15ProblemClass pc = (VanLon15ProblemClass) scen.getProblemClass(); final ImmutableMap.Builder<String, Object> properties = ImmutableMap.<String, Object>builder() .put("problem_class", pc.getId()).put("id", scen.getProblemInstanceId()) .put("dynamism_bin", pc.getDynamism()).put("dynamism_actual", actualDyn) .put("urgency", pc.getUrgency()).put("scale", pc.getScale()).put("random_seed", seed) .put("creation_date", formatter.print(System.currentTimeMillis())) .put("creator", System.getProperty("user.name")).put("day_length", settings.getDayLength()) .put("office_opening_hours", settings.getOfficeHours()); properties.putAll(settings.getProperties()); final ImmutableMultiset<Class<?>> eventTypes = Metrics.getEventTypeCounts(scen); for (final Multiset.Entry<Class<?>> en : eventTypes.entrySet()) { properties.put(en.getElement().getSimpleName(), en.getCount()); } try { Files.write(Paths.get(fileName + ".properties"), asList(Joiner.on("\n").withKeyValueSeparator(" = ").join(properties.build())), Charsets.UTF_8); } catch (final IOException e) { throw new IllegalStateException(e); } }
From source file:com.twitter.aurora.scheduler.state.CronJobManager.java
/** * Triggers execution of a cron job, depending on the cron collision policy for the job. * * @param config The config of the job to be triggered. *///from w ww . j a v a 2s . c om @VisibleForTesting void cronTriggered(SanitizedConfiguration config) { IJobConfiguration job = config.getJobConfig(); LOG.info(String.format("Cron triggered for %s at %s with policy %s", JobKeys.toPath(job), new Date(), job.getCronCollisionPolicy())); cronJobsTriggered.incrementAndGet(); ImmutableMap.Builder<Integer, ITaskConfig> builder = ImmutableMap.builder(); final Query.Builder activeQuery = Query.jobScoped(job.getKey()).active(); Set<IScheduledTask> activeTasks = Storage.Util.consistentFetchTasks(storage, activeQuery); if (activeTasks.isEmpty()) { builder.putAll(config.getTaskConfigs()); } else { // Assign a default collision policy. CronCollisionPolicy collisionPolicy = orDefault(job.getCronCollisionPolicy()); switch (collisionPolicy) { case KILL_EXISTING: try { schedulerCore.killTasks(activeQuery, CRON_USER); // Check immediately if the tasks are gone. This could happen if the existing tasks // were pending. if (!hasTasks(activeQuery)) { builder.putAll(config.getTaskConfigs()); } else { delayedRun(activeQuery, config); } } catch (ScheduleException e) { LOG.log(Level.SEVERE, "Failed to kill job.", e); } break; case CANCEL_NEW: break; case RUN_OVERLAP: Map<Integer, IScheduledTask> byInstance = Maps.uniqueIndex(activeTasks, Tasks.SCHEDULED_TO_INSTANCE_ID); Map<Integer, ScheduleStatus> existingTasks = Maps.transformValues(byInstance, Tasks.GET_STATUS); if (existingTasks.isEmpty()) { builder.putAll(config.getTaskConfigs()); } else if (Iterables.any(existingTasks.values(), Predicates.equalTo(PENDING))) { LOG.info("Job " + JobKeys.toPath(job) + " has pending tasks, suppressing run."); } else { // To safely overlap this run, we need to adjust the instance IDs of the overlapping // run (maintaining the role/job/instance UUID invariant). int instanceOffset = Ordering.natural().max(existingTasks.keySet()) + 1; LOG.info("Adjusting instance IDs of " + JobKeys.toPath(job) + " by " + instanceOffset + " for overlapping cron run."); for (Map.Entry<Integer, ITaskConfig> entry : config.getTaskConfigs().entrySet()) { builder.put(entry.getKey() + instanceOffset, entry.getValue()); } } break; default: LOG.severe("Unrecognized cron collision policy: " + job.getCronCollisionPolicy()); } } Map<Integer, ITaskConfig> newTasks = builder.build(); if (!newTasks.isEmpty()) { stateManager.insertPendingTasks(newTasks); } }