List of usage examples for com.google.common.base Optional or
@Beta public abstract T or(Supplier<? extends T> supplier);
From source file:org.attribyte.relay.AsyncPublisher.java
/** * Creates a publisher with a specified notification queue size. * @param numProcessors The number of threads processing the notification queue. * @param maxQueueSize The maximum queue size. If < 1, notification queue is unbounded. * @param notificationTimeoutSeconds The notification send timeout. * @param acceptCodes The set of HTTP response codes that map to 'Accept'. * @param logger An optional logger./*from www.j a va 2s .c om*/ * @throws Exception on initialization error. */ public AsyncPublisher(final int numProcessors, final int maxQueueSize, final int notificationTimeoutSeconds, final Set<Integer> acceptCodes, final Optional<Logger> logger) throws Exception { final BlockingQueue<Runnable> notifications; assert (numProcessors > 0); if (maxQueueSize > 0) { notifications = new ArrayBlockingQueue<>(maxQueueSize); } else { notifications = new LinkedBlockingQueue<>(); } ThreadPoolExecutor executor = new ThreadPoolExecutor(numProcessors, numProcessors, 0L, TimeUnit.MILLISECONDS, notifications, new ThreadFactoryBuilder().setNameFormat("async-publisher-%d").build(), new ThreadPoolExecutor.AbortPolicy()); executor.prestartAllCoreThreads(); this.notificationExecutor = MoreExecutors.listeningDecorator(executor); this.notificationQueueSize = new CachedGauge<Integer>(15L, TimeUnit.SECONDS) { protected Integer loadValue() { return notifications.size(); } }; SslContextFactory sslContextFactory = new SslContextFactory(); this.httpClient = new HttpClient(sslContextFactory); this.httpClient.setFollowRedirects(false); this.httpClient.setConnectTimeout(notificationTimeoutSeconds * 1000L); this.httpClient.setCookieStore(new HttpCookieStore.Empty()); this.notificationTimeoutSeconds = notificationTimeoutSeconds; this.acceptCodes = ImmutableSet.copyOf(acceptCodes); this.logger = logger.or(new ConsoleLogger()); }
From source file:org.apache.james.imap.processor.SearchProcessor.java
/** * Create a {@link Criterion} for the given sequence-sets. * This include special handling which is needed for SEARCH to not return a BAD response on a invalid message-set. * See IMAP-292 for more details.// www. ja va 2 s . co m */ private Criterion sequence(IdRange[] sequenceNumbers, ImapSession session) throws MessageRangeException { final SelectedMailbox selected = session.getSelected(); // First of check if we have any messages in the mailbox // if not we don't need to go through all of this final List<SearchQuery.UidRange> ranges = new ArrayList<SearchQuery.UidRange>(); if (selected.existsCount() > 0) { for (IdRange range : sequenceNumbers) { long lowVal = range.getLowVal(); long highVal = range.getHighVal(); // Take care of "*" and "*:*" values by return the last // message in // the mailbox. See IMAP-289 if (lowVal == Long.MAX_VALUE && highVal == Long.MAX_VALUE) { MessageUid highUid = selected.getLastUid().or(MessageUid.MIN_VALUE); ranges.add(new SearchQuery.UidRange(highUid)); } else { Optional<MessageUid> lowUid; if (lowVal != Long.MIN_VALUE) { lowUid = selected.uid((int) lowVal); } else { lowUid = selected.getFirstUid(); } // The lowVal should never be // SelectedMailbox.NO_SUCH_MESSAGE but we check for it // just to be safe if (lowUid.isPresent()) { Optional<MessageUid> highUid = Optional.absent(); if (highVal != Long.MAX_VALUE) { highUid = selected.uid((int) highVal); if (!highUid.isPresent()) { // we requested a message with a MSN higher // then // the current msg count. So just use the // highest uid as max highUid = selected.getLastUid(); } } else { highUid = selected.getLastUid(); } ranges.add(new SearchQuery.UidRange(lowUid.or(MessageUid.MIN_VALUE), highUid.or(MessageUid.MAX_VALUE))); } } } } return SearchQuery.uid(ranges.toArray(new SearchQuery.UidRange[0])); }
From source file:com.twitter.common.zookeeper.ZooKeeperClient.java
/** * Creates an unconnected client that will lazily attempt to connect on the first call to * {@link #get}. All successful connections will be authenticated with the given * {@code credentials}.// w ww . j a v a 2 s .c om * * @param sessionTimeout the ZK session timeout * @param credentials the credentials to authenticate with * @param chrootPath an optional chroot path * @param zooKeeperServers the set of servers forming the ZK cluster */ public ZooKeeperClient(Amount<Integer, Time> sessionTimeout, Credentials credentials, Optional<String> chrootPath, Iterable<InetSocketAddress> zooKeeperServers) { this.sessionTimeoutMs = Preconditions.checkNotNull(sessionTimeout).as(Time.MILLISECONDS); this.credentials = Preconditions.checkNotNull(credentials); if (chrootPath.isPresent()) { PathUtils.validatePath(chrootPath.get()); } Preconditions.checkNotNull(zooKeeperServers); Preconditions.checkArgument(!Iterables.isEmpty(zooKeeperServers), "Must present at least 1 ZK server"); Thread watcherProcessor = new Thread("ZookeeperClient-watcherProcessor") { @Override public void run() { while (true) { try { WatchedEvent event = eventQueue.take(); for (Watcher watcher : watchers) { watcher.process(event); } } catch (InterruptedException e) { /* ignore */ } } } }; watcherProcessor.setDaemon(true); watcherProcessor.start(); Iterable<String> servers = Iterables.transform(ImmutableSet.copyOf(zooKeeperServers), InetSocketAddressHelper.INET_TO_STR); this.zooKeeperServers = Joiner.on(',').join(servers).concat(chrootPath.or("")); }
From source file:com.github.nethad.clustermeister.provisioning.ec2.AmazonInstanceManager.java
private void setTemplateOptions(Template template, AmazonNodeConfiguration nodeConfiguration, Optional<Map<String, String>> userMetadata) { //TODO: ports may not be correct template.getOptions().inboundPorts(AmazonNodeManager.DEFAULT_SSH_PORT, JPPFConstants.DEFAULT_SERVER_PORT, JPPFConstants.DEFAULT_MANAGEMENT_PORT, JPPFConstants.DEFAULT_MANAGEMENT_PORT + 1, JPPFConstants.DEFAULT_MANAGEMENT_RMI_PORT); AWSInstanceProfile nodeProfile = nodeConfiguration.getProfile(); Optional<Float> spotPrice = nodeProfile.getSpotPrice(); Optional<String> spotRequestType = nodeProfile.getSpotRequestType(); Optional<Date> validFrom = nodeProfile.getSpotRequestValidFrom(); Optional<Date> validTo = nodeProfile.getSpotRequestValidTo(); Optional<String> placementGroup = nodeProfile.getPlacementGroup(); AWSEC2TemplateOptions awsEC2Options = template.getOptions().as(AWSEC2TemplateOptions.class); if (placementGroup.isPresent()) { PlacementGroup placementGroupDesc = ec2Facade.getPlacementGroupDescription(nodeProfile.getRegion(), placementGroup.get());// www . j av a 2 s.co m if (placementGroupDesc == null || (placementGroupDesc.getState() != PlacementGroup.State.AVAILABLE && placementGroupDesc.getState() != PlacementGroup.State.PENDING)) { ec2Facade.createPlavementGroupInRegion(nodeProfile.getRegion(), placementGroup.get()); } awsEC2Options.placementGroup(placementGroup.get()); } Map<String, String> metadataMap = userMetadata.or(Maps.<String, String>newHashMap()); if (spotPrice.isPresent()) { awsEC2Options.spotPrice(spotPrice.get()); metadataMap.put(AmazonConfigurationLoader.SPOT_PRICE, String.valueOf(spotPrice.get())); RequestSpotInstancesOptions spotOptions = awsEC2Options.getSpotOptions(); SpotInstanceRequest.Type type; if (spotRequestType.isPresent()) { type = SpotInstanceRequest.Type.valueOf(spotRequestType.get().trim().toUpperCase()); } else { type = SpotInstanceRequest.Type.ONE_TIME; } spotOptions.type(type); metadataMap.put(AmazonConfigurationLoader.SPOT_REQUEST_TYPE, spotRequestType.get()); if (validFrom.isPresent()) { spotOptions.validFrom(validFrom.get()); metadataMap.put(AmazonConfigurationLoader.SPOT_REQUEST_VALID_FROM, validFrom.get().toString()); } if (validTo.isPresent()) { spotOptions.validUntil(validTo.get()); metadataMap.put(AmazonConfigurationLoader.SPOT_REQUEST_VALID_TO, validTo.get().toString()); } } template.getOptions().userMetadata(metadataMap); setLoginCredentials(template, nodeConfiguration); }
From source file:com.twitter.finagle.common.zookeeper.ZooKeeperClient.java
/** * Creates an unconnected client that will lazily attempt to connect on the first call to * {@link #get}. All successful connections will be authenticated with the given * {@code credentials}./*from w w w . java 2 s. c om*/ * * @param sessionTimeout the ZK session timeout * @param credentials the credentials to authenticate with * @param chrootPath an optional chroot path * @param zooKeeperServers the set of servers forming the ZK cluster */ public ZooKeeperClient(Duration sessionTimeout, Credentials credentials, Optional<String> chrootPath, Iterable<InetSocketAddress> zooKeeperServers) { this.sessionTimeoutMs = (int) Preconditions.checkNotNull(sessionTimeout).inMillis(); this.credentials = Preconditions.checkNotNull(credentials); if (chrootPath.isPresent()) { PathUtils.validatePath(chrootPath.get()); } Preconditions.checkNotNull(zooKeeperServers); Preconditions.checkArgument(!Iterables.isEmpty(zooKeeperServers), "Must present at least 1 ZK server"); Thread watcherProcessor = new Thread("ZookeeperClient-watcherProcessor") { @Override public void run() { while (true) { try { WatchedEvent event = eventQueue.take(); for (Watcher watcher : watchers) { watcher.process(event); } } catch (InterruptedException e) { /* ignore */ } } } }; watcherProcessor.setDaemon(true); watcherProcessor.start(); Iterable<String> servers = Iterables.transform(ImmutableSet.copyOf(zooKeeperServers), new Function<InetSocketAddress, String>() { @Override public String apply(InetSocketAddress addr) { return addr.getHostName() + ":" + addr.getPort(); } }); this.zooKeeperServers = Joiner.on(',').join(servers); this.connectString = this.zooKeeperServers.concat(chrootPath.or("")); }
From source file:com.facebook.buck.apple.ProjectGenerator.java
private Optional<PBXTarget> generateHalideLibraryTarget(PBXProject project, TargetNode<HalideLibraryDescription.Arg> targetNode) throws IOException { final BuildTarget buildTarget = targetNode.getBuildTarget(); String productName = getProductNameForBuildTarget(buildTarget); Path outputPath = getHalideOutputPath(buildTarget); Path scriptPath = halideBuckConfig.getXcodeCompileScriptPath(); Optional<String> script = projectFilesystem.readFileIfItExists(scriptPath); PBXShellScriptBuildPhase scriptPhase = new PBXShellScriptBuildPhase(); scriptPhase.setShellScript(script.or("")); NewNativeTargetProjectMutator mutator = new NewNativeTargetProjectMutator(pathRelativizer, sourcePathResolver);/* ww w . jav a 2 s .co m*/ mutator.setTargetName(getXcodeTargetName(buildTarget)) .setProduct(ProductType.STATIC_LIBRARY, productName, outputPath) .setPreBuildRunScriptPhases(ImmutableList.of(scriptPhase)); NewNativeTargetProjectMutator.Result targetBuilderResult; try { targetBuilderResult = mutator.buildTargetAndAddToProject(project); } catch (NoSuchBuildTargetException e) { throw new HumanReadableException(e); } BuildTarget compilerTarget = HalideLibraryDescription.createHalideCompilerBuildTarget(buildTarget); Path compilerPath = BuildTargets.getGenPath(compilerTarget, "%s"); ImmutableMap<String, String> appendedConfig = ImmutableMap.<String, String>of(); ImmutableMap<String, String> extraSettings = ImmutableMap.<String, String>of(); ImmutableMap.Builder<String, String> defaultSettingsBuilder = ImmutableMap.builder(); defaultSettingsBuilder.put("REPO_ROOT", projectFilesystem.getRootPath().toAbsolutePath().normalize().toString()); defaultSettingsBuilder.put("HALIDE_COMPILER_PATH", compilerPath.toString()); defaultSettingsBuilder.put("HALIDE_OUTPUT_PATH", outputPath.toString()); defaultSettingsBuilder.put("HALIDE_FUNC_NAME", buildTarget.getShortName()); defaultSettingsBuilder.put(PRODUCT_NAME, productName); Optional<ImmutableSortedMap<String, ImmutableMap<String, String>>> configs = getXcodeBuildConfigurationsForTargetNode( targetNode, appendedConfig); PBXNativeTarget target = targetBuilderResult.target; setTargetBuildConfigurations(getConfigurationNameToXcconfigPath(buildTarget), target, project.getMainGroup(), configs.get(), extraSettings, defaultSettingsBuilder.build(), appendedConfig); return Optional.<PBXTarget>of(target); }
From source file:com.treasuredata.client.TDHttpClient.java
public Request prepareRequest(TDApiRequest apiRequest, Optional<String> apiKeyCache) { String queryStr = ""; String portStr = config.port.transform(new Function<Integer, String>() { @Override//ww w.j ava 2s. c o m public String apply(Integer input) { return ":" + input.toString(); } }).or(""); String requestUri = String.format("%s://%s%s%s", config.useSSL ? "https" : "http", config.endpoint, portStr, apiRequest.getPath()); if (!apiRequest.getQueryParams().isEmpty()) { List<String> queryParamList = new ArrayList<String>(apiRequest.getQueryParams().size()); for (Map.Entry<String, String> queryParam : apiRequest.getQueryParams().entrySet()) { queryParamList.add( String.format("%s=%s", urlEncode(queryParam.getKey()), urlEncode(queryParam.getValue()))); } queryStr = Joiner.on("&").join(queryParamList); if (apiRequest.getMethod() == HttpMethod.GET || (apiRequest.getMethod() == HttpMethod.POST && apiRequest.getPostJson().isPresent())) { requestUri += "?" + queryStr; } } logger.debug("Sending API request to {}", requestUri); String dateHeader = RFC2822_FORMAT.get().format(new Date()); Request request = httpClient.newRequest(requestUri).agent(getClientName()) .scheme(config.useSSL ? "https" : "http").method(apiRequest.getMethod()) .header(HttpHeader.DATE, dateHeader).timeout(config.requestTimeoutMillis, TimeUnit.MILLISECONDS); request = setTDAuthHeaders(request, dateHeader); // Set API Key Optional<String> apiKey = apiKeyCache.or(config.apiKey); if (apiKey.isPresent()) { String auth; if (isNakedTD1Key(apiKey.get())) { auth = "TD1 " + apiKey.get(); } else { auth = apiKey.get(); } request.header(HttpHeader.AUTHORIZATION, auth); } // Set other headers for (Map.Entry<String, String> entry : headers.entries()) { request.header(entry.getKey(), entry.getValue()); } for (Map.Entry<String, String> entry : apiRequest.getHeaderParams().entries()) { request.header(entry.getKey(), entry.getValue()); } // Submit method specific headers switch (apiRequest.getMethod()) { case POST: if (apiRequest.getPostJson().isPresent()) { request.content(new StringContentProvider(apiRequest.getPostJson().get()), "application/json"); } else if (queryStr.length() > 0) { request.content(new StringContentProvider(queryStr), "application/x-www-form-urlencoded"); } else { // We should set content-length explicitly for an empty post request.header("Content-Length", "0"); } break; case PUT: if (apiRequest.getPutFile().isPresent()) { try { request.file(apiRequest.getPutFile().get().toPath(), "application/octet-stream"); } catch (IOException e) { throw new TDClientException(TDClientException.ErrorType.INVALID_INPUT, "Failed to read input file: " + apiRequest.getPutFile().get()); } } break; } // Configure redirect (302) following Optional<Boolean> followRedirects = apiRequest.getFollowRedirects(); if (followRedirects.isPresent()) { request.followRedirects(followRedirects.get()); } return request; }
From source file:com.ibm.common.activitystreams.ASObject.java
/** * Method getEnum./*from w ww .j av a2 s . c o m*/ * @param key String * @param _enumClass Class<E> * @param or E * @return E */ public <E extends Enum<E>> E getEnum(String key, Class<E> _enumClass, E or) { String val = getString(key); Optional<E> op = getIfPresent(_enumClass, val); return or != null ? op.or(or) : op.orNull(); }
From source file:org.apache.gobblin.data.management.copy.publisher.CopyDataPublisher.java
/** * Publish data for a {@link CopyableDataset}. * */// www . j av a2s. c o m private void publishFileSet(CopyEntity.DatasetAndPartition datasetAndPartition, Collection<WorkUnitState> datasetWorkUnitStates) throws IOException { Map<String, String> additionalMetadata = Maps.newHashMap(); Preconditions.checkArgument(!datasetWorkUnitStates.isEmpty(), "publishFileSet received an empty collection work units. This is an error in code."); CopyableDatasetMetadata metadata = CopyableDatasetMetadata.deserialize( datasetWorkUnitStates.iterator().next().getProp(CopySource.SERIALIZED_COPYABLE_DATASET)); Path datasetWriterOutputPath = new Path(this.writerOutputDir, datasetAndPartition.identifier()); log.info(String.format("[%s] Publishing fileSet from %s for dataset %s", datasetAndPartition.identifier(), datasetWriterOutputPath, metadata.getDatasetURN())); List<CommitStep> prePublish = getCommitSequence(datasetWorkUnitStates, PrePublishStep.class); List<CommitStep> postPublish = getCommitSequence(datasetWorkUnitStates, PostPublishStep.class); log.info(String.format("[%s] Found %d prePublish steps and %d postPublish steps.", datasetAndPartition.identifier(), prePublish.size(), postPublish.size())); executeCommitSequence(prePublish); if (hasCopyableFiles(datasetWorkUnitStates)) { // Targets are always absolute, so we start moving from root (will skip any existing directories). HadoopUtils.renameRecursively(this.fs, datasetWriterOutputPath, new Path("/")); } else { log.info(String.format("[%s] No copyable files in dataset. Proceeding to postpublish steps.", datasetAndPartition.identifier())); } executeCommitSequence(postPublish); this.fs.delete(datasetWriterOutputPath, true); long datasetOriginTimestamp = Long.MAX_VALUE; long datasetUpstreamTimestamp = Long.MAX_VALUE; Optional<String> fileSetRoot = Optional.<String>absent(); for (WorkUnitState wus : datasetWorkUnitStates) { if (wus.getWorkingState() == WorkingState.SUCCESSFUL) { wus.setWorkingState(WorkUnitState.WorkingState.COMMITTED); } CopyEntity copyEntity = CopySource.deserializeCopyEntity(wus); if (copyEntity instanceof CopyableFile) { CopyableFile copyableFile = (CopyableFile) copyEntity; if (wus.getWorkingState() == WorkingState.COMMITTED) { CopyEventSubmitterHelper.submitSuccessfulFilePublish(this.eventSubmitter, copyableFile, wus); // Dataset Output path is injected in each copyableFile. // This can be optimized by having a dataset level equivalent class for copyable entities // and storing dataset related information, e.g. dataset output path, there. // Currently datasetOutputPath is only present for hive datasets. if (!fileSetRoot.isPresent() && copyableFile.getDatasetOutputPath() != null) { fileSetRoot = Optional.of(copyableFile.getDatasetOutputPath()); } if (lineageInfo.isPresent()) { lineageInfo.get().putDestination(copyableFile.getDestinationDataset(), 0, wus); } } if (datasetOriginTimestamp > copyableFile.getOriginTimestamp()) { datasetOriginTimestamp = copyableFile.getOriginTimestamp(); } if (datasetUpstreamTimestamp > copyableFile.getUpstreamTimestamp()) { datasetUpstreamTimestamp = copyableFile.getUpstreamTimestamp(); } } } // if there are no valid values for datasetOriginTimestamp and datasetUpstreamTimestamp, use // something more readable if (Long.MAX_VALUE == datasetOriginTimestamp) { datasetOriginTimestamp = 0; } if (Long.MAX_VALUE == datasetUpstreamTimestamp) { datasetUpstreamTimestamp = 0; } additionalMetadata.put(SlaEventKeys.SOURCE_URI, this.state.getProp(SlaEventKeys.SOURCE_URI)); additionalMetadata.put(SlaEventKeys.DESTINATION_URI, this.state.getProp(SlaEventKeys.DESTINATION_URI)); additionalMetadata.put(SlaEventKeys.DATASET_OUTPUT_PATH, fileSetRoot.or("Unknown")); CopyEventSubmitterHelper.submitSuccessfulDatasetPublish(this.eventSubmitter, datasetAndPartition, Long.toString(datasetOriginTimestamp), Long.toString(datasetUpstreamTimestamp), additionalMetadata); }
From source file:com.eucalyptus.vm.VmInstance.java
public String getByKey(final String pathArg) { final String path = Objects.firstNonNull(pathArg, ""); final String pathNoSlash; LOG.debug("Servicing metadata request:" + path); if (path.endsWith("/")) { pathNoSlash = path.substring(0, path.length() - 1); } else {/*from ww w . jav a 2 s. co m*/ pathNoSlash = path; } Optional<MetadataGroup> groupOption = Optional.absent(); for (final MetadataGroup metadataGroup : MetadataGroup.values()) { if (metadataGroup.providesPath(pathNoSlash) || metadataGroup.providesPath(path)) { groupOption = Optional.of(metadataGroup); } } final MetadataGroup group = groupOption.or(MetadataGroup.Core); final Map<String, String> metadataMap = Optional.fromNullable(group.apply(this)) .or(Collections.<String, String>emptyMap()); final String value = metadataMap.get(path); return value == null ? metadataMap.get(pathNoSlash) : value; }