Example usage for com.google.common.collect ImmutableMap entrySet

List of usage examples for com.google.common.collect ImmutableMap entrySet

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableMap entrySet.

Prototype

public final ImmutableSet<Entry<K, V>> entrySet() 

Source Link

Usage

From source file:com.facebook.buck.rules.CachingBuildRuleBuilder.java

private boolean verifyRecordedPathHashes(BuildTarget target, ProjectFilesystem filesystem,
        ImmutableMap<String, String> recordedPathHashes) throws IOException {

    // Create a new `DefaultFileHashCache` to prevent caching from interfering with verification.
    ProjectFileHashCache fileHashCache = DefaultFileHashCache.createDefaultFileHashCache(filesystem,
            fileHashCacheMode);/*from   ww w .  j  a  v  a2 s.c  o  m*/

    // Verify each path from the recorded path hashes entry matches the actual on-disk version.
    for (Map.Entry<String, String> ent : recordedPathHashes.entrySet()) {
        Path path = filesystem.getPath(ent.getKey());
        HashCode cachedHashCode = HashCode.fromString(ent.getValue());
        HashCode realHashCode = fileHashCache.get(path);
        if (!realHashCode.equals(cachedHashCode)) {
            LOG.debug("%s: recorded hash for \"%s\" doesn't match actual hash: %s (cached) != %s (real).",
                    target, path, cachedHashCode, realHashCode);
            return false;
        }
    }

    return true;
}

From source file:com.facebook.buck.core.cell.AbstractCellConfig.java

/**
 * Translates the 'cell name'->override map into a 'Path'->override map.
 *
 * @param pathMapping a map containing paths to all of the cells we want to query.
 * @return 'Path'->override map/*www  . ja va2 s.  c  om*/
 */
public ImmutableMap<Path, RawConfig> getOverridesByPath(ImmutableMap<CellName, Path> pathMapping)
        throws InvalidCellOverrideException {

    ImmutableSet<CellName> relativeNamesOfCellsWithOverrides = FluentIterable.from(getValues().keySet())
            .filter(Predicates.not(CellName.ALL_CELLS_SPECIAL_NAME::equals)).toSet();
    ImmutableSet.Builder<Path> pathsWithOverrides = ImmutableSet.builder();
    for (CellName cellWithOverride : relativeNamesOfCellsWithOverrides) {
        if (!pathMapping.containsKey(cellWithOverride)) {
            throw new InvalidCellOverrideException(
                    String.format("Trying to override settings for unknown cell %s", cellWithOverride));
        }
        pathsWithOverrides.add(pathMapping.get(cellWithOverride));
    }

    ImmutableMultimap<Path, CellName> pathToRelativeName = Multimaps.index(pathMapping.keySet(),
            Functions.forMap(pathMapping));

    for (Path pathWithOverrides : pathsWithOverrides.build()) {
        ImmutableList<CellName> namesForPath = RichStream.from(pathToRelativeName.get(pathWithOverrides))
                .filter(name -> name.getLegacyName().isPresent()).toImmutableList();
        if (namesForPath.size() > 1) {
            throw new InvalidCellOverrideException(
                    String.format("Configuration override is ambiguous: cell rooted at %s is reachable "
                            + "as [%s]. Please override the config by placing a .buckconfig.local file in the "
                            + "cell's root folder.", pathWithOverrides, Joiner.on(',').join(namesForPath)));
        }
    }

    Map<Path, RawConfig> overridesByPath = new HashMap<>();
    for (Map.Entry<CellName, Path> entry : pathMapping.entrySet()) {
        CellName cellRelativeName = entry.getKey();
        Path cellPath = entry.getValue();
        RawConfig configFromOtherRelativeName = overridesByPath.get(cellPath);
        RawConfig config = getForCell(cellRelativeName);
        if (configFromOtherRelativeName != null) {
            // Merge configs
            RawConfig mergedConfig = RawConfig.builder().putAll(configFromOtherRelativeName).putAll(config)
                    .build();
            overridesByPath.put(cellPath, mergedConfig);
        } else {
            overridesByPath.put(cellPath, config);
        }
    }

    return ImmutableMap.copyOf(overridesByPath);
}

From source file:org.elasticsearch.snapshots.SnapshotsService.java

private ImmutableMap<ShardId, ShardSnapshotStatus> processWaitingShards(
        ImmutableMap<ShardId, ShardSnapshotStatus> snapshotShards, RoutingTable routingTable) {
    boolean snapshotChanged = false;
    ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder();
    for (ImmutableMap.Entry<ShardId, ShardSnapshotStatus> shardEntry : snapshotShards.entrySet()) {
        ShardSnapshotStatus shardStatus = shardEntry.getValue();
        if (shardStatus.state() == State.WAITING) {
            ShardId shardId = shardEntry.getKey();
            IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex());
            if (indexShardRoutingTable != null) {
                IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id());
                if (shardRouting != null && shardRouting.primaryShard() != null) {
                    if (shardRouting.primaryShard().started()) {
                        // Shard that we were waiting for has started on a node, let's process it
                        snapshotChanged = true;
                        logger.trace("starting shard that we were waiting for [{}] on node [{}]",
                                shardEntry.getKey(), shardStatus.nodeId());
                        shards.put(shardEntry.getKey(),
                                new ShardSnapshotStatus(shardRouting.primaryShard().currentNodeId()));
                        continue;
                    } else if (shardRouting.primaryShard().initializing()
                            || shardRouting.primaryShard().relocating()) {
                        // Shard that we were waiting for hasn't started yet or still relocating - will continue to wait
                        shards.put(shardEntry);
                        continue;
                    }/*w w  w .  jav  a  2 s . c  o m*/
                }
            }
            // Shard that we were waiting for went into unassigned state or disappeared - giving up
            snapshotChanged = true;
            logger.warn("failing snapshot of shard [{}] on unassigned shard [{}]", shardEntry.getKey(),
                    shardStatus.nodeId());
            shards.put(shardEntry.getKey(),
                    new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "shard is unassigned"));
        } else {
            shards.put(shardEntry);
        }
    }
    if (snapshotChanged) {
        return shards.build();
    } else {
        return null;
    }
}

From source file:edu.mit.streamjit.impl.compiler.Schedule.java

private static <T> Schedule<T> schedule(ImmutableSet<T> things,
        ImmutableSet<ExecutionConstraint<T>> executionConstraints,
        ImmutableSet<BufferingConstraint<T>> bufferingConstraints, int multiplier, int fireCost,
        int excessBufferCost) {
    ILPSolver solver = new ILPSolver();
    //There's one variable for each thing, which represents the number of
    //times it fires.  This uses the default bounds.  (TODO: perhaps a bound
    //at 1 if we're steady-state scheduling, maybe by marking things as
    //must-fire and marking the bottommost thing?)
    ImmutableMap.Builder<T, ILPSolver.Variable> variablesBuilder = ImmutableMap.builder();
    for (T thing : things)
        variablesBuilder.put(thing, solver.newVariable(thing.toString()));
    ImmutableMap<T, ILPSolver.Variable> variables = variablesBuilder.build();

    for (ExecutionConstraint<T> constraint : executionConstraints)
        solver.constrainAtLeast(variables.get(constraint.thing).asLinearExpr(1), constraint.minExecutions);

    HashMap<ILPSolver.Variable, Integer> sumOfConstraints = new HashMap<>();
    for (ILPSolver.Variable v : variables.values())
        sumOfConstraints.put(v, 0);/*from  w  w  w  . jav  a 2 s . c  o m*/
    for (BufferingConstraint<T> constraint : bufferingConstraints) {
        ILPSolver.Variable upstreamVar = variables.get(constraint.upstream),
                downstreamVar = variables.get(constraint.downstream);
        ILPSolver.LinearExpr expr = upstreamVar.asLinearExpr(constraint.pushRate).minus(constraint.popRate,
                downstreamVar);
        switch (constraint.condition) {
        case LESS_THAN_EQUAL:
            solver.constrainAtMost(expr, constraint.bufferDelta);
            break;
        case EQUAL:
            solver.constrainEquals(expr, constraint.bufferDelta);
            break;
        case GREATER_THAN_EQUAL:
            solver.constrainAtLeast(expr, constraint.bufferDelta);
            break;
        default:
            throw new AssertionError(constraint.condition);
        }

        sumOfConstraints.put(upstreamVar, sumOfConstraints.get(upstreamVar) + constraint.pushRate);
        sumOfConstraints.put(downstreamVar, sumOfConstraints.get(downstreamVar) - constraint.popRate);
    }

    //Add a special constraint to ensure at least one filter fires.
    //TODO: in init schedules we might not always need this...
    Iterator<ILPSolver.Variable> variablesIter = variables.values().iterator();
    ILPSolver.LinearExpr totalFirings = variablesIter.next().asLinearExpr(1);
    while (variablesIter.hasNext())
        totalFirings = totalFirings.plus(1, variablesIter.next());
    solver.constrainAtLeast(totalFirings, 1);

    for (ILPSolver.Variable v : variables.values())
        sumOfConstraints.put(v, sumOfConstraints.get(v) * excessBufferCost + fireCost);
    ILPSolver.ObjectiveFunction objFn = solver.minimize(
            solver.newLinearExpr(Maps.filterValues(sumOfConstraints, Predicates.not(Predicates.equalTo(0)))));

    try {
        solver.solve();
    } catch (SolverException ex) {
        throw new ScheduleException(ex);
    }

    ImmutableMap.Builder<T, Integer> schedule = ImmutableMap.builder();
    for (Map.Entry<T, ILPSolver.Variable> e : variables.entrySet())
        schedule.put(e.getKey(), e.getValue().value() * multiplier);
    return new Schedule<>(things, bufferingConstraints, schedule.build());
}

From source file:com.facebook.buck.io.ProjectFilesystem.java

/**
 * Similar to {@link #createZip(Collection, Path)}, but also takes a list of additional files to
 * write in the zip, including their contents, as a map. It's assumed only paths that should not
 * be ignored are passed to this method.
 */// ww w.  ja va2s  .  c  o  m
public void createZip(Collection<Path> pathsToIncludeInZip, Path out,
        ImmutableMap<Path, String> additionalFileContents) throws IOException {
    try (CustomZipOutputStream zip = ZipOutputStreams.newOutputStream(out)) {
        for (Path path : pathsToIncludeInZip) {
            boolean isDirectory = isDirectory(path);
            CustomZipEntry entry = new CustomZipEntry(path, isDirectory);

            // We want deterministic ZIPs, so avoid mtimes.
            entry.setFakeTime();

            entry.setExternalAttributes(getFileAttributesForZipEntry(path));

            zip.putNextEntry(entry);
            if (!isDirectory) {
                try (InputStream input = newFileInputStream(path)) {
                    ByteStreams.copy(input, zip);
                }
            }
            zip.closeEntry();
        }

        for (Map.Entry<Path, String> fileContentsEntry : additionalFileContents.entrySet()) {
            CustomZipEntry entry = new CustomZipEntry(fileContentsEntry.getKey());
            // We want deterministic ZIPs, so avoid mtimes.
            entry.setFakeTime();
            zip.putNextEntry(entry);
            try (InputStream stream = new ByteArrayInputStream(
                    fileContentsEntry.getValue().getBytes(Charsets.UTF_8))) {
                ByteStreams.copy(stream, zip);
            }
            zip.closeEntry();
        }
    }
}

From source file:org.basepom.mojo.duplicatefinder.DuplicateFinderMojo.java

private void writeResultFile(File resultFile,
        ImmutableMap<String, Entry<ResultCollector, ClasspathDescriptor>> results)
        throws MojoExecutionException, InvalidVersionSpecificationException, OverConstrainedVersionException {
    File parent = resultFile.getParentFile();
    if (!parent.exists()) {
        if (!parent.mkdirs()) {
            throw new MojoExecutionException("Could not create parent folders for " + parent.getAbsolutePath());
        }//from  w w w  . j  av a2 s .  c  o m
    }
    if (!parent.isDirectory() || !parent.canWrite()) {
        throw new MojoExecutionException("Can not create result file in " + parent.getAbsolutePath());
    }

    try {
        SMOutputFactory factory = new SMOutputFactory(XMLOutputFactory2.newFactory());
        SMOutputDocument resultDocument = factory.createOutputDocument(resultFile);
        resultDocument.setIndentation("\n" + Strings.repeat(" ", 64), 1, 4);

        SMOutputElement rootElement = resultDocument.addElement("duplicate-finder-result");
        XMLWriterUtils.addAttribute(rootElement, "version", SAVE_FILE_VERSION);

        XMLWriterUtils.addProjectInformation(rootElement, project);

        addConfiguration(rootElement);

        SMOutputElement resultsElement = rootElement.addElement("results");
        for (Map.Entry<String, Entry<ResultCollector, ClasspathDescriptor>> entry : results.entrySet()) {
            SMOutputElement resultElement = resultsElement.addElement("result");
            XMLWriterUtils.addAttribute(resultElement, "name", entry.getKey());
            XMLWriterUtils.addResultCollector(resultElement, entry.getValue().getKey());
            XMLWriterUtils.addClasspathDescriptor(resultElement, resultFileMinClasspathCount,
                    entry.getValue().getValue());
        }

        resultDocument.closeRootAndWriter();
    } catch (XMLStreamException e) {
        throw new MojoExecutionException("While writing result file", e);
    }
}

From source file:com.facebook.buck.thrift.ThriftLibraryDescription.java

/**
 * Create the build rules which compile the input thrift sources into their respective
 * language specific sources.//w  w w.  java 2 s  . c o  m
 */
@VisibleForTesting
protected ImmutableMap<String, ThriftCompiler> createThriftCompilerBuildRules(BuildRuleParams params,
        BuildRuleResolver resolver, CompilerType compilerType, ImmutableList<String> flags, String language,
        ImmutableSet<String> options, ImmutableMap<String, SourcePath> srcs,
        ImmutableSortedSet<ThriftLibrary> deps,
        ImmutableMap<String, ImmutableSortedSet<String>> generatedSources) {

    SourcePathRuleFinder ruleFinder = new SourcePathRuleFinder(resolver);
    Tool compiler = thriftBuckConfig.getCompiler(compilerType, resolver);

    // Build up the include roots to find thrift file deps and also the build rules that
    // generate them.
    ImmutableMap.Builder<Path, SourcePath> includesBuilder = ImmutableMap.builder();
    ImmutableSortedSet.Builder<HeaderSymlinkTree> includeTreeRulesBuilder = ImmutableSortedSet.naturalOrder();
    ImmutableList.Builder<Path> includeRootsBuilder = ImmutableList.builder();
    ImmutableSet.Builder<Path> headerMapsBuilder = ImmutableSet.builder();
    for (ThriftLibrary dep : deps) {
        includesBuilder.putAll(dep.getIncludes());
        includeTreeRulesBuilder.add(dep.getIncludeTreeRule());
        includeRootsBuilder.add(dep.getIncludeTreeRule().getIncludePath());
        headerMapsBuilder.addAll(OptionalCompat.asSet(dep.getIncludeTreeRule().getHeaderMap()));
    }
    ImmutableMap<Path, SourcePath> includes = includesBuilder.build();
    ImmutableSortedSet<HeaderSymlinkTree> includeTreeRules = includeTreeRulesBuilder.build();
    ImmutableList<Path> includeRoots = includeRootsBuilder.build();
    ImmutableSet<Path> headerMaps = headerMapsBuilder.build();

    // For each thrift source, add a thrift compile rule to generate it's sources.
    ImmutableMap.Builder<String, ThriftCompiler> compileRules = ImmutableMap.builder();
    for (ImmutableMap.Entry<String, SourcePath> ent : srcs.entrySet()) {
        String name = ent.getKey();
        SourcePath source = ent.getValue();
        ImmutableSortedSet<String> genSrcs = Preconditions.checkNotNull(generatedSources.get(name));

        BuildTarget target = createThriftCompilerBuildTarget(params.getBuildTarget(), name);
        Path outputDir = getThriftCompilerOutputDir(params.getProjectFilesystem(), params.getBuildTarget(),
                name);

        compileRules.put(name, new ThriftCompiler(
                params.copyWithChanges(target, Suppliers.ofInstance(ImmutableSortedSet.<BuildRule>naturalOrder()
                        .addAll(compiler.getDeps(ruleFinder))
                        .addAll(ruleFinder.filterBuildRuleInputs(ImmutableList.<SourcePath>builder().add(source)
                                .addAll(includes.values()).build()))
                        .addAll(includeTreeRules).build()), Suppliers.ofInstance(ImmutableSortedSet.of())),
                compiler, flags, outputDir, source, language, options, includeRoots, headerMaps, includes,
                genSrcs));
    }

    return compileRules.build();
}

From source file:org.kiji.scoring.impl.InternalFreshKijiTableReader.java

/** {@inheritDoc} */
@Override//from   w  w w . j  ava  2  s.  c o m
public KijiRowData get(final EntityId entityId, final KijiDataRequest dataRequest,
        final FreshRequestOptions options) throws IOException {
    requireState(LifecycleState.OPEN);
    // Get the start time for the request.
    final long startTime = System.nanoTime();

    final String id = String.format("%s#%s", mReaderUID, mUniqueIdGenerator.getNextUniqueId());
    LOG.debug("{} starting with EntityId: {} data request: {} request options: {}", id, entityId, dataRequest,
            options);

    final KijiTableReader requestReader = ScoringUtils.getPooledReader(mReaderPool);
    try {
        final ImmutableList<KijiColumnName> requestColumns = removeDisabledColumns(
                getColumnsFromRequest(dataRequest), options.getDisabledColumns());

        final ImmutableMap<KijiColumnName, Freshener> fresheners;
        final ImmutableMap<KijiColumnName, KijiFreshenerRecord> records;
        // Get a retained snapshot of the rereadable state.
        final RereadableState rereadableState = getRereadableState();
        try {
            // Collect the Fresheners and Records applicable to this request.
            fresheners = filterFresheners(requestColumns, rereadableState.mFresheners);
            records = filterRecords(rereadableState.mFreshenerRecords, requestColumns);
            // If there are no Fresheners attached to the requested columns, return the requested data.
            if (fresheners.isEmpty()) {
                return requestReader.get(entityId, dataRequest);
            } else {
                // Retain the Fresheners so that they cannot be cleaned up while in use.
                for (Map.Entry<KijiColumnName, Freshener> freshenerEntry : fresheners.entrySet()) {
                    freshenerEntry.getValue().retain();
                }
            }
        } finally {
            rereadableState.release();
        }

        LOG.debug("{} will run Fresheners: {}", id, fresheners.values());

        final Future<KijiRowData> clientDataFuture = ScoringUtils.getFuture(mExecutorService,
                new TableReadCallable(mReaderPool, entityId, dataRequest));

        final FresheningRequestContext requestContext = new FresheningRequestContext(id, startTime, fresheners,
                options.getParameters(), records, mReaderPool, entityId, dataRequest, clientDataFuture,
                mBufferedWriter, mAllowPartial, mStatisticGatheringMode, mStatisticsQueue, mExecutorService);

        final ImmutableList<Future<Boolean>> futures = requestContext.getFuturesForFresheners();

        final Future<List<Boolean>> superFuture = ScoringUtils.getFuture(mExecutorService,
                new FutureAggregatingCallable<Boolean>(futures));

        // If the options specify timeout of -1 this indicates we should use the configured timeout.
        final long timeout = (-1 == options.getTimeout()) ? mTimeout : options.getTimeout();
        try {
            if (ScoringUtils.getFromFuture(superFuture, timeout).contains(true)) {
                // If all Fresheners return in time and at least one has written a new value, read from
                // the table.
                LOG.debug("{} completed on time and data was written.", id);
                return requestReader.get(entityId, dataRequest);
            } else {
                // If all Fresheners return in time, but none have written new values, do not read from
                // the table.
                LOG.debug("{} completed on time and no data was written.", id);
                try {
                    return ScoringUtils.getFromFuture(clientDataFuture, 0L);
                } catch (TimeoutException te) {
                    // If client data is not immediately available, read from the table.
                    return requestReader.get(entityId, dataRequest);
                }
            }
        } catch (TimeoutException te) {
            requestContext.timeOut();
            // If superFuture times out, read partially freshened data from the table or return the
            // cached data based on whether partial freshness is allowed.
            LOG.debug("{} timed out, checking for partial writes.", id);
            return requestContext.checkAndRead();
        }
    } finally {
        // Return the reader to the pool.
        requestReader.close();
    }
}

From source file:org.grycap.gpf4med.DownloadService.java

/**
 * Uses a group of URIs to retrieve objects and writes them to the same number of files. This method will do 
 * its best effort to optimally handle the downloads, opening a pool of connections to the servers and reusing 
 * them as much as possible. Also, it will create several concurrent threads in the JVM in order to perform 
 * simultaneous downloads./* w  w w.j a  v  a 2 s  .  co m*/
 * @param requests a key-value map with the list of requests to handle. The source of the object is the key of
 *        the map, while the value is the destination file.
 * @param validator checks the file for correctness.
 * @param config download settings.
 * @param encryptionProvider an optional encryption provider that, when available, is used to encrypt the 
 *        files after download.
 * @param task an optional task that will be executed passing each individual file as parameter, when the download 
 *        of the file ends.
 * @return the requests that could not be served after exhausting the individual retries.
 * @throws IOException if an error occurs in the execution of the operation.
 */
public ImmutableMap<URI, File> download(final ImmutableMap<URI, File> requests,
        final @Nullable FileValidator validator, final DownloadConfiguration config,
        final @Nullable FileEncryptionProvider encryptionProvider, final @Nullable PostProcessTask<File> task)
        throws IOException {
    checkArgument(requests != null, "Uninitialized request");
    checkArgument(config != null, "Uninitialized configuration");
    ImmutableMap<URI, File> pending = ImmutableMap.copyOf(requests);
    final List<URI> cancelled = new ArrayList<URI>();
    try {
        for (int attempt = 0; attempt < config.getRetries() && !pending.isEmpty()
                && pending.size() > cancelled.size(); attempt++) {
            LOGGER.info("Attempt " + (attempt + 1) + " to download " + requests.size() + " files");
            // create connection manager
            final PoolingNHttpClientConnectionManager connectionManager = createConnectionManager();
            // create HTTP asynchronous client
            int eSoTimeoutMs = config.soToMs + (int) (config.soToMs * attempt
                    * (config.toIncPercent >= 0.0d && config.toIncPercent <= 1.0d ? config.toIncPercent
                            : 0.0d));
            int eConnTimeoutMs = config.connToMs + (int) (config.connToMs * attempt
                    * (config.toIncPercent >= 0.0d && config.toIncPercent <= 1.0d ? config.toIncPercent
                            : 0.0d));
            final RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(eConnTimeoutMs)
                    .setConnectionRequestTimeout(eConnTimeoutMs).setSocketTimeout(eSoTimeoutMs).build();
            final CloseableHttpAsyncClient httpclient = HttpAsyncClients.custom()
                    .setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
            httpclient.start();
            // attempt to perform download
            try {
                final CountDownLatch latch = new CountDownLatch(pending.size());
                for (final Map.Entry<URI, File> entry : pending.entrySet()) {
                    final URI uri = entry.getKey();
                    if (cancelled.contains(uri)) {
                        continue;
                    }
                    final File file = entry.getValue();
                    FileUtils.forceMkdir(file.getParentFile());
                    final HttpGet request = new HttpGet(uri);
                    final HttpAsyncRequestProducer producer = new BasicAsyncRequestProducer(
                            new HttpHost(uri.getHost(), uri.getPort(), uri.getScheme()), request);
                    final ZeroCopyConsumer<File> consumer = new ZeroCopyConsumer<File>(file) {
                        @Override
                        protected File process(final HttpResponse response, final File file,
                                final ContentType contentType) throws Exception {
                            releaseResources();
                            if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
                                FileUtils.deleteQuietly(file);
                                throw new ClientProtocolException(
                                        "Download failed: " + response.getStatusLine());
                            }
                            if (validator != null && !validator.isValid(file)) {
                                FileUtils.deleteQuietly(file);
                                cancelled.add(uri);
                                throw new IOException(
                                        file.getCanonicalPath() + " not recognised as a supported file format");
                            }
                            if (encryptionProvider != null) {
                                try {
                                    final File cipherFile = File
                                            .createTempFile(RandomStringUtils.random(8, true, true), ".tmp");
                                    encryptionProvider.encrypt(new FileInputStream(file),
                                            new FileOutputStream(cipherFile));
                                    FileUtils.deleteQuietly(file);
                                    FileUtils.moveFile(cipherFile, file);
                                    LOGGER.info("File encrypted: " + file.getCanonicalPath());
                                } catch (Exception e) {
                                    FileUtils.deleteQuietly(file);
                                    cancelled.add(uri);
                                    LOGGER.warn("Failed to encrypt: " + file.getCanonicalPath(), e);
                                    throw new IOException("File encryption failed");
                                }
                            }
                            LOGGER.info("Download succeed to file: " + file.getCanonicalPath());
                            return file;
                        }
                    };
                    httpclient.execute(producer, consumer, new FutureCallback<File>() {
                        @Override
                        public void completed(final File result) {
                            request.releaseConnection();
                            latch.countDown();
                            if (task != null) {
                                task.apply(result);
                            }
                            LOGGER.info("Request succeed: " + request.getRequestLine()
                                    + " => Response file length: " + result.length());
                        }

                        @Override
                        public void failed(final Exception ex) {
                            request.releaseConnection();
                            FileUtils.deleteQuietly(file);
                            latch.countDown();
                            LOGGER.error("Request failed: " + request.getRequestLine() + "=>" + ex);
                        }

                        @Override
                        public void cancelled() {
                            request.releaseConnection();
                            FileUtils.deleteQuietly(file);
                            latch.countDown();
                            LOGGER.error("Request cancelled: " + request.getRequestLine());
                        }
                    });
                }
                latch.await();
            } finally {
                try {
                    httpclient.close();
                } catch (Exception ignore) {
                }
                try {
                    shutdown(connectionManager, 0l);
                } catch (Exception ignore) {
                }
            }
            // populate the pending list with the files that does not exist
            final ImmutableMap.Builder<URI, File> builder = new ImmutableMap.Builder<URI, File>();
            for (final Map.Entry<URI, File> entry : requests.entrySet()) {
                if (!entry.getValue().exists()) {
                    builder.put(entry.getKey(), entry.getValue());
                }
            }
            pending = builder.build();
            if ((attempt + 1) < config.retries && !pending.isEmpty() && pending.size() > cancelled.size()) {
                final long waitingTime = (long) (config.soToMs * 0.1d);
                LOGGER.info("Waiting " + waitingTime + " ms before attempt " + (attempt + 2) + " to download "
                        + requests.size() + " pending files");
                Thread.sleep(waitingTime);
            }
        }
    } catch (IOException ioe) {
        throw ioe;
    } catch (Exception e) {
        throw new IOException("Download has failed", e);
    }
    return pending;
}

From source file:com.facebook.buck.apple.ProjectGenerator.java

/**
 * Create target level configuration entries.
 *
 * @param target      Xcode target for which the configurations will be set.
 * @param targetGroup Xcode group in which the configuration file references will be placed.
 * @param configurations  Configurations as extracted from the BUCK file.
 * @param overrideBuildSettings Build settings that will override ones defined elsewhere.
 * @param defaultBuildSettings  Target-inline level build settings that will be set if not already
 *                              defined.
 * @param appendBuildSettings   Target-inline level build settings that will incorporate the
 *                              existing value or values at a higher level.
 *//*  ww w. j a  v a 2s  . co m*/
private void setTargetBuildConfigurations(Function<String, Path> configurationNameToXcconfigPath,
        PBXTarget target, PBXGroup targetGroup,
        ImmutableMap<String, ImmutableMap<String, String>> configurations,
        ImmutableMap<String, String> overrideBuildSettings, ImmutableMap<String, String> defaultBuildSettings,
        ImmutableMap<String, String> appendBuildSettings) throws IOException {

    for (Map.Entry<String, ImmutableMap<String, String>> configurationEntry : configurations.entrySet()) {
        targetConfigNamesBuilder.add(configurationEntry.getKey());

        ImmutableMap<String, String> targetLevelInlineSettings = configurationEntry.getValue();

        XCBuildConfiguration outputConfiguration = target.getBuildConfigurationList()
                .getBuildConfigurationsByName().getUnchecked(configurationEntry.getKey());

        HashMap<String, String> combinedOverrideConfigs = Maps.newHashMap(overrideBuildSettings);
        for (Map.Entry<String, String> entry : defaultBuildSettings.entrySet()) {
            String existingSetting = targetLevelInlineSettings.get(entry.getKey());
            if (existingSetting == null) {
                combinedOverrideConfigs.put(entry.getKey(), entry.getValue());
            }
        }

        for (Map.Entry<String, String> entry : appendBuildSettings.entrySet()) {
            String existingSetting = targetLevelInlineSettings.get(entry.getKey());
            String settingPrefix = existingSetting != null ? existingSetting : "$(inherited)";
            combinedOverrideConfigs.put(entry.getKey(), settingPrefix + " " + entry.getValue());
        }

        Iterable<Map.Entry<String, String>> entries = Iterables.concat(targetLevelInlineSettings.entrySet(),
                combinedOverrideConfigs.entrySet());

        Path xcconfigPath = configurationNameToXcconfigPath.apply(configurationEntry.getKey());
        projectFilesystem.mkdirs(Preconditions.checkNotNull(xcconfigPath).getParent());

        StringBuilder stringBuilder = new StringBuilder();
        for (Map.Entry<String, String> entry : entries) {
            stringBuilder.append(entry.getKey());
            stringBuilder.append(" = ");
            stringBuilder.append(entry.getValue());
            stringBuilder.append('\n');
        }
        String xcconfigContents = stringBuilder.toString();

        if (MorePaths.fileContentsDiffer(new ByteArrayInputStream(xcconfigContents.getBytes(Charsets.UTF_8)),
                xcconfigPath, projectFilesystem)) {
            if (shouldGenerateReadOnlyFiles()) {
                projectFilesystem.writeContentsToPath(xcconfigContents, xcconfigPath, READ_ONLY_FILE_ATTRIBUTE);
            } else {
                projectFilesystem.writeContentsToPath(xcconfigContents, xcconfigPath);
            }
        }

        PBXFileReference fileReference = getConfigurationFileReference(targetGroup, xcconfigPath);
        outputConfiguration.setBaseConfigurationReference(fileReference);
    }
}