Example usage for java.util Map forEach

List of usage examples for java.util Map forEach

Introduction

In this page you can find the example usage for java.util Map forEach.

Prototype

default void forEach(BiConsumer<? super K, ? super V> action) 

Source Link

Document

Performs the given action for each entry in this map until all entries have been processed or the action throws an exception.

Usage

From source file:com.bouncestorage.swiftproxy.v1.ObjectResource.java

private Response.ResponseBuilder addObjectHeaders(Response.ResponseBuilder responseBuilder,
        BlobMetadata metaData, Optional<Map<String, Object>> overwrites) {
    Map<String, String> userMetadata = metaData.getUserMetadata();
    userMetadata.entrySet().stream().filter(entry -> !RESERVED_METADATA.contains(entry.getKey()))
            .forEach(entry -> responseBuilder.header(META_HEADER_PREFIX + entry.getKey(), entry.getValue()));
    if (userMetadata.containsKey(DYNAMIC_OBJECT_MANIFEST)) {
        responseBuilder.header(DYNAMIC_OBJECT_MANIFEST, userMetadata.get(DYNAMIC_OBJECT_MANIFEST));
    }//from   ww  w .jav  a2  s.  co m

    String contentType = Strings.isNullOrEmpty(metaData.getContentMetadata().getContentType())
            ? MediaType.APPLICATION_OCTET_STREAM
            : metaData.getContentMetadata().getContentType();

    Map<String, Supplier<Object>> defaultHeaders = ImmutableMap.<String, Supplier<Object>>builder()
            .put(HttpHeaders.CONTENT_DISPOSITION, () -> metaData.getContentMetadata().getContentDisposition())
            .put(HttpHeaders.CONTENT_ENCODING, () -> metaData.getContentMetadata().getContentEncoding())
            .put(HttpHeaders.CONTENT_LENGTH, metaData::getSize)
            .put(HttpHeaders.LAST_MODIFIED, metaData::getLastModified).put(HttpHeaders.ETAG, metaData::getETag)
            .put(STATIC_OBJECT_MANIFEST, () -> userMetadata.containsKey(STATIC_OBJECT_MANIFEST))
            .put(HttpHeaders.DATE, Date::new).put(HttpHeaders.CONTENT_TYPE, () -> contentType).build();

    overwrites.ifPresent(headers -> headers.forEach((k, v) -> responseBuilder.header(k, v)));
    defaultHeaders.forEach((k, v) -> {
        if (!overwrites.isPresent() || !overwrites.get().containsKey(k)) {
            responseBuilder.header(k, v.get());
        }
    });

    return responseBuilder;
}

From source file:org.wso2.carbon.apimgt.core.dao.impl.ApiDAOImplIT.java

@Test
public void testAddApiAndResourceSpecificEndpointToApi() throws APIMgtDAOException {
    Endpoint apiSpecificEndpoint = new Endpoint.Builder(SampleTestObjectCreator.createMockEndpoint())
            .applicableLevel(APIMgtConstants.API_SPECIFIC_ENDPOINT).build();
    Endpoint urlSpecificEndpoint = new Endpoint.Builder(SampleTestObjectCreator.createMockEndpoint())
            .id(UUID.randomUUID().toString()).applicableLevel(APIMgtConstants.API_SPECIFIC_ENDPOINT)
            .name("URI1").build();
    Endpoint endpointToInsert = SampleTestObjectCreator.createAlternativeEndpoint();
    Endpoint globalEndpoint = new Endpoint.Builder().applicableLevel(APIMgtConstants.GLOBAL_ENDPOINT)
            .id(endpointToInsert.getId()).build();
    Map<String, Endpoint> apiEndpointMap = new HashMap();

    apiEndpointMap.put(APIMgtConstants.PRODUCTION_ENDPOINT, apiSpecificEndpoint);
    apiEndpointMap.put(APIMgtConstants.SANDBOX_ENDPOINT, globalEndpoint);
    Map<String, Endpoint> uriTemplateEndpointMap = new HashMap();
    uriTemplateEndpointMap.put(APIMgtConstants.PRODUCTION_ENDPOINT, urlSpecificEndpoint);
    Map<String, UriTemplate> uriTemplateMap = SampleTestObjectCreator.getMockUriTemplates();
    uriTemplateMap.forEach((k, v) -> {
        UriTemplate uriTemplate = new UriTemplate.UriTemplateBuilder(v).endpoint(uriTemplateEndpointMap)
                .build();/*from   w  w  w.  j a  v a 2s  .c o m*/
        uriTemplateMap.replace(k, uriTemplate);
    });
    ApiDAO apiDAO = DAOFactory.getApiDAO();
    API api = SampleTestObjectCreator.createDefaultAPI().apiDefinition(SampleTestObjectCreator.apiDefinition)
            .endpoint(apiEndpointMap).uriTemplates(uriTemplateMap).build();
    apiDAO.addEndpoint(endpointToInsert);
    apiDAO.addAPI(api);
    Map<String, Endpoint> retrievedApiEndpoint = apiDAO.getAPI(api.getId()).getEndpoint();
    Assert.assertTrue(apiDAO.isEndpointAssociated(globalEndpoint.getId()));
    Assert.assertEquals(apiEndpointMap, retrievedApiEndpoint);
    apiDAO.deleteAPI(api.getId());
    Endpoint retrievedGlobal = apiDAO.getEndpoint(globalEndpoint.getId());
    Assert.assertNotNull(retrievedGlobal);
    Assert.assertEquals(endpointToInsert, retrievedGlobal);
}

From source file:org.apereo.portal.io.xml.portlet.ExternalPortletDefinitionUnmarshaller.java

void unmarshallLifecycle(final Lifecycle lifecycle,
        final IPortletDefinition portletDefinition) {

    /*//from   w ww.  j  a  va2s.  c  om
     * If this is an existing portletDefinition, it may (probably does) already contain
     * lifecycle entries.  We need to remove those, because the lifecycle of a portlet after
     * import should reflect what the document says exactly.
     */
    portletDefinition.clearLifecycle();

    if (lifecycle == null) {
        /*
         * For backwards-compatibility, a complete absence of
         * lifecycle information means the portlet is published.
         */
        portletDefinition.updateLifecycleState(PortletLifecycleState.PUBLISHED, systemUser);
    } else if (lifecycle.getEntries().isEmpty()) {
        /*
         * According to the comments for 4.3, we're supposed
         * to leave the portlet in CREATED state.
         */
        portletDefinition.updateLifecycleState(PortletLifecycleState.CREATED, systemUser);
    } else {
        /*
         * Use a TreeMap because we need to be certain the the entries
         * get applied to the new portlet definition in a sane order...
         */
        Map<IPortletLifecycleEntry, IPerson> convertedEntries = new TreeMap<>();
        /*
         * Convert each LifecycleEntry (JAXB) to an IPortletLifecycleEntry (internal)
         */
        for (LifecycleEntry entry : lifecycle.getEntries()) {
            final IPerson user = StringUtils.isNotBlank(entry.getUser())
                    ? userIdentityStore.getPerson(entry.getUser(), true)
                    : systemUser; // default
            // We will support case insensitivity of entry/@name in the XML
            final PortletLifecycleState state = PortletLifecycleState.valueOf(entry.getName().toUpperCase());
            // Entries added by an upgrade transform will not have a date
            final Date date = entry.getValue().equals(useCurrentDatetimeSignal) ? new Date()
                    : entry.getValue().getTime();
            convertedEntries.put(new IPortletLifecycleEntry() {
                @Override
                public int getUserId() {
                    return user.getID();
                }

                @Override
                public PortletLifecycleState getLifecycleState() {
                    return state;
                }

                @Override
                public Date getDate() {
                    return date;
                }

                @Override
                public int compareTo(IPortletLifecycleEntry o) {
                    int rslt = date.compareTo(o.getDate());
                    if (rslt == 0) {
                        rslt = state.getOrder() - o.getLifecycleState().getOrder();
                    }
                    return rslt;
                }
            }, user);
        }
        /*
         * Apply them to the portlet definition
         */
        convertedEntries.forEach((k, v) -> {
            portletDefinition.updateLifecycleState(k.getLifecycleState(), v, k.getDate());
        });
    }
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public boolean hmset(final String key, final Map<String, Object> map) {
    Assert.hasText(key);//from  ww w  . j  a  v  a  2 s  . c  o  m
    Assert.notEmpty(map);

    ShardedJedis jedis = null;
    try {
        final Map<String, String> newMap = Maps.newHashMap();
        map.forEach((field, value) -> newMap.put(field, toJSONString(value)));
        jedis = POOL.getJedis(config.getRedisType());
        return isOK(jedis.hmset(key, newMap));
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.apache.hadoop.hive.kafka.KafkaStorageHandler.java

@Override
public void commitInsertTable(Table table, boolean overwrite) throws MetaException {
    boolean isExactlyOnce = table.getParameters().get(KafkaTableProperties.WRITE_SEMANTIC_PROPERTY.getName())
            .equals(KafkaOutputFormat.WriteSemantic.EXACTLY_ONCE.name());
    String optimiticCommitVal = table.getParameters()
            .get(KafkaTableProperties.HIVE_KAFKA_OPTIMISTIC_COMMIT.getName());
    boolean isTwoPhaseCommit = !Boolean.parseBoolean(optimiticCommitVal);
    if (!isExactlyOnce || !isTwoPhaseCommit) {
        //Case it is not 2 phase commit no open transaction to handel.
        return;//from w  w  w .  j  ava 2 s.co m
    }

    final Path queryWorkingDir = getQueryWorkingDir(table);
    final Map<String, Pair<Long, Short>> transactionsMap;
    final int maxTries = Integer
            .parseInt(table.getParameters().get(KafkaTableProperties.MAX_RETRIES.getName()));
    // We have 4 Stages ahead of us:
    // 1 Fetch Transactions state from HDFS.
    // 2 Build/inti all the Kafka producers and perform a pre commit call to check if we can go ahead with commit.
    // 3 Commit Transactions one by one.
    // 4 Clean workingDirectory.

    //First stage fetch the Transactions states
    final RetryUtils.Task<Map<String, Pair<Long, Short>>> fetchTransactionStates = new RetryUtils.Task<Map<String, Pair<Long, Short>>>() {
        @Override
        public Map<String, Pair<Long, Short>> perform() throws Exception {
            return TransactionalKafkaWriter.getTransactionsState(FileSystem.get(getConf()), queryWorkingDir);
        }
    };

    try {
        transactionsMap = RetryUtils.retry(fetchTransactionStates, (error) -> (error instanceof IOException),
                maxTries);
    } catch (Exception e) {
        // Can not go further
        LOG.error("Can not fetch Transaction states due [{}]", e.getMessage());
        throw new MetaException(e.getMessage());
    }

    //Second Stage Resume Producers and Pre commit
    final Properties baseProducerPros = buildProducerProperties(table);
    final Map<String, HiveKafkaProducer> producersMap = new HashMap<>();
    final RetryUtils.Task<Void> buildProducersTask = new RetryUtils.Task<Void>() {
        @Override
        public Void perform() throws Exception {
            assert producersMap.size() == 0;
            transactionsMap.forEach((key, value) -> {
                // Base Producer propeties, missing the transaction Id.
                baseProducerPros.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, key);
                HiveKafkaProducer<byte[], byte[]> producer = new HiveKafkaProducer<>(baseProducerPros);
                producer.resumeTransaction(value.getLeft(), value.getRight());
                // This is a dummy RPC call to ensure that the producer still resumable and signal the Pre-commit as per :
                // https://cwiki.apache.org/confluence/display/KAFKA/Transactional+Messaging+in+Kafka#EndPhase
                producer.sendOffsetsToTransaction(ImmutableMap.of(), "__dry_run");
                producersMap.put(key, producer);
            });
            return null;
        }
    };

    RetryUtils.CleanupAfterFailure cleanUpTheMap = new RetryUtils.CleanupAfterFailure() {
        @Override
        public void cleanup() {
            producersMap.forEach((s, producer) -> producer.close(0, TimeUnit.MILLISECONDS));
            producersMap.clear();
        }
    };
    final Predicate<Throwable> isRetrayable = (error) -> !KafkaUtils.exceptionIsFatal(error)
            && !(error instanceof ProducerFencedException);
    try {
        RetryUtils.retry(buildProducersTask, isRetrayable, cleanUpTheMap, maxTries,
                "Error while Builing Producers");
    } catch (Exception e) {
        // Can not go further
        LOG.error("Can not fetch build produces due [{}]", e.getMessage());
        throw new MetaException(e.getMessage());
    }

    //Third Stage Commit Transactions, this part is the actual critical section.
    //The commit might be retried on error, but keep in mind in some cases, like open transaction can expire
    //after timeout duration of 15 mins it is not possible to go further.
    final Set<String> committedTx = new HashSet<>();
    final RetryUtils.Task<Void> commitTask = new RetryUtils.Task() {
        @Override
        public Object perform() throws Exception {
            producersMap.forEach((key, producer) -> {
                if (!committedTx.contains(key)) {
                    producer.commitTransaction();
                    committedTx.add(key);
                    producer.close();
                    LOG.info("Committed Transaction [{}]", key);
                }
            });
            return null;
        }
    };

    try {
        RetryUtils.retry(commitTask, isRetrayable, maxTries);
    } catch (Exception e) {
        // at this point we are in a funky state if one commit happend!! close and log it
        producersMap.forEach((key, producer) -> producer.close(0, TimeUnit.MILLISECONDS));
        LOG.error("Commit transaction failed", e);
        if (committedTx.size() > 0) {
            LOG.error("Partial Data Got Commited Some actions need to be Done");
            committedTx.stream().forEach(key -> LOG.error("Transaction [{}] is an orphen commit", key));
        }
        throw new MetaException(e.getMessage());
    }

    //Stage four, clean the Query Directory
    final RetryUtils.Task<Void> cleanQueryDirTask = new RetryUtils.Task<Void>() {
        @Override
        public Void perform() throws Exception {
            cleanWorkingDirectory(queryWorkingDir);
            return null;
        }
    };
    try {
        RetryUtils.retry(cleanQueryDirTask, (error) -> error instanceof IOException, maxTries);
    } catch (Exception e) {
        //just log it
        LOG.error("Faild to clean Query Working Directory [{}] due to [{}]", queryWorkingDir, e.getMessage());
    }
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, Long> scard(final String... keys) {
    Assert.notEmpty(keys);/*from   www.j  a v a  2  s.c  o  m*/
    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipeline = jedis.pipelined();
        final Map<String, Response<Long>> responses = Maps.newHashMap();
        for (String key : keys) {
            responses.put(key, pipeline.scard(key));
        }

        pipeline.sync();
        final Map<String, Long> values = Maps.newHashMap();
        responses.forEach((key, response) -> values.put(key, response.get()));
        return values;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, String> get(final String... keys) {
    Assert.notEmpty(keys);/*from ww  w  . j  av  a2  s .c om*/

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipelined = jedis.pipelined();
        final Map<String, Response<String>> values = Maps.newHashMap();
        for (String key : keys) {
            values.put(key, pipelined.get(key));
        }

        pipelined.sync();
        final Map<String, String> valueMap = Maps.newHashMap();
        values.forEach((key, response) -> valueMap.put(key, response.get()));
        return valueMap;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, Boolean> set(final Map<String, Object> map) {
    Assert.notEmpty(map);//from  w ww .  j  a v  a  2  s . c  o m

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipelined = jedis.pipelined();
        final Map<String, Response<String>> responses = Maps.newHashMap();
        map.forEach((key, value) -> responses.put(key, pipelined.set(key, toJSONString(value))));
        pipelined.sync();

        final Map<String, Boolean> values = Maps.newHashMap();
        responses.forEach((key, response) -> values.put(key, isOK(response.get())));
        return values;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:jp.co.iidev.subartifact1.divider1.mojo.ArtifactDividerMojo.java

public void execute() throws MojoExecutionException {

    Artifact projArt = project.getArtifact();
    Map<Dependency, Artifact> artifactsForDep = Maps.newHashMap();

    projArt = project.getArtifact();//from w w  w.j  av  a 2  s  . c  o  m

    {
        List<Dependency> dep = project.getDependencies();
        Set<Artifact> arts = project.getDependencyArtifacts();

        for (Dependency dx : dep) {
            String grpid = dx.getGroupId();
            String artid = dx.getArtifactId();
            String clsf = dx.getClassifier();

            for (Artifact art : arts) {
                boolean a = StringUtils.equals(art.getArtifactId(), artid);
                boolean g = StringUtils.equals(art.getGroupId(), grpid);
                boolean c = StringUtils.equals(art.getClassifier(), clsf);

                if (a && g && c) {
                    artifactsForDep.put(dx, art);
                }
            }
        }
    }

    {
        String version = project.getVersion();
        String groupId = project.getGroupId();

        LinkedHashMap<File, Dependency> compiletimeClasspath = Maps.newLinkedHashMap();

        File rtjar = Paths.get(System.getProperty("java.home"), "lib", "rt.jar").toFile();
        Dependency rtjar_dummyDep = new Dependency();
        {
            rtjar_dummyDep.setGroupId(SystemUtils.JAVA_VENDOR.replace(" ", "."));
            rtjar_dummyDep.setVersion(SystemUtils.JAVA_RUNTIME_VERSION);
            rtjar_dummyDep.setArtifactId(SystemUtils.JAVA_RUNTIME_NAME);
        }

        File targetJar = project.getArtifact().getFile();
        Dependency targetJarDep = new Dependency();
        {
            targetJarDep.setArtifactId(project.getArtifactId());
            targetJarDep.setGroupId(project.getGroupId());
            targetJarDep.setVersion(project.getVersion());
            targetJarDep.setClassifier(projArt.getClassifier());
        }

        compiletimeClasspath.put(rtjar, rtjar_dummyDep);
        compiletimeClasspath.put(targetJar, targetJarDep);
        artifactsForDep.forEach((d, a) -> {
            compiletimeClasspath.put(a.getFile(), d);
        });

        LoggableFactory lf = new LoggableFactory() {
            @Override
            public Loggable createLoggable(Class cx) {
                return new Loggable() {
                    Logger l = LoggerFactory.getLogger(cx);

                    @Override
                    public void warn(String text) {
                        l.warn(text);
                    }

                    @Override
                    public void info(String text) {
                        l.info(text);
                    }

                    @Override
                    public void error(String text) {
                        l.error(text);
                    }

                    @Override
                    public void debug(String text) {
                        l.debug(text);
                    }
                };
            }
        };
        try {
            LinkedHashMap<SubArtifactDefinition, SubArtifactDeployment> buildPlan = new DivisionExecutor(
                    lf.createLoggable(DivisionExecutor.class)).planDivision(targetJar, rootSubArtifactId,
                            Arrays.asList(subartifacts == null ? new SubArtifact[0] : subartifacts),
                            compiletimeClasspath, not(in(ImmutableSet.of(rtjar, targetJar))),
                            defaultRootTransitivePropagations, defaultRootSourceReferencePropagations,
                            defaultSubartifactSourceReferencePropagations, globalReferencePropagations, lf);

            Set<File> usableJar = Sets.newLinkedHashSet(compiletimeClasspath.keySet());
            usableJar.remove(targetJar);
            usableJar.remove(rtjar);

            int ix = 0;
            for (SubArtifact s : subartifacts) {
                for (Dependency d : s.getExtraDependencies()) {
                    buildPlan.get(s).getJarDeps().put(new File("x_xx_xyx_duMmy" + (ix++) + ".jar"), d);
                }
            }

            new PomSetGenerator(project.getBasedir().toPath().resolve("pom.xml"), outputDirectory.toPath(),
                    templateOutputDirectory.toPath(), lf).generate(groupId, version,
                            this.subArtifactsParentArtifactId, buildPlan);
        } catch (RuntimeException e) {
            throw e;
        } catch (Exception e) {
            throw new MojoExecutionException("division process error", e);
        }
    }

}

From source file:com.netflix.spinnaker.orca.libdiffs.LibraryDiffTool.java

public LibraryDiffs calculateLibraryDiffs(List<Library> sourceLibs, List<Library> targetLibs) {
    LibraryDiffs libraryDiffs = new LibraryDiffs();
    libraryDiffs.setTotalLibraries(targetLibs != null ? targetLibs.size() : 0);

    BiFunction<Library, String, Diff> buildDiff = (Library library, String display) -> {
        Diff diff = new Diff();
        diff.setLibrary(includeLibraryDetails ? library : null);
        diff.setDisplayDiff(display);/*from   w  w  w.  j av a  2s .co m*/
        return diff;
    };

    try {
        if (!targetLibs.isEmpty() && !sourceLibs.isEmpty()) {
            Set<Library> uniqueCurrentList = new HashSet<>(targetLibs);
            Map<String, List<Library>> duplicatesMap = filterValues(
                    targetLibs.stream().collect(groupingBy(Library::getName)), it -> it.size() > 1);
            sourceLibs.forEach((Library oldLib) -> {
                if (!duplicatesMap.keySet().contains(oldLib.getName())) {
                    Library currentLib = uniqueCurrentList.stream()
                            .filter(it -> it.getName().equals(oldLib.getName())).findFirst().orElse(null);
                    if (currentLib != null) {
                        if (isEmpty(currentLib.getVersion()) || isEmpty(oldLib.getVersion())) {
                            libraryDiffs.getUnknown().add(buildDiff.apply(oldLib, oldLib.getName()));
                        } else if (currentLib.getVersion() != null && oldLib.getVersion() != null) {
                            int comparison = comparableLooseVersion.compare(currentLib.getVersion(),
                                    oldLib.getVersion());
                            if (comparison == 1) {
                                libraryDiffs.getUpgraded().add(buildDiff.apply(oldLib, format("%s: %s -> %s",
                                        oldLib.getName(), oldLib.getVersion(), currentLib.getVersion())));
                            }
                            if (comparison == -1) {
                                libraryDiffs.getDowngraded().add(buildDiff.apply(oldLib, format("%s: %s -> %s",
                                        oldLib.getName(), oldLib.getVersion(), currentLib.getVersion())));
                            }
                        }
                    } else {
                        libraryDiffs.getRemoved().add(buildDiff.apply(oldLib,
                                format("%s: %s", oldLib.getName(), oldLib.getVersion())));
                    }
                }
            });

            uniqueCurrentList.stream().filter(it -> !sourceLibs.contains(it))
                    .forEach(newLib -> libraryDiffs.getAdded().add(
                            buildDiff.apply(newLib, format("%s: %s", newLib.getName(), newLib.getVersion()))));

            duplicatesMap.forEach((key, value) -> {
                Library currentLib = targetLibs.stream().filter(it -> it.getName().equals(key)).findFirst()
                        .orElse(null);
                if (currentLib != null) {
                    boolean valid = value.stream().map(Library::getVersion).filter(Objects::nonNull)
                            .collect(groupingBy(Function.identity())).keySet().size() > 1;
                    if (valid) {
                        String displayDiff = format("%s: %s", currentLib.getName(),
                                value.stream().map(Library::getVersion).collect(joining(", ")));
                        libraryDiffs.getDuplicates().add(buildDiff.apply(currentLib, displayDiff));
                    }
                }
            });

            libraryDiffs
                    .setHasDiff(!libraryDiffs.getDowngraded().isEmpty() || !libraryDiffs.getUpgraded().isEmpty()
                            || !libraryDiffs.getAdded().isEmpty() || !libraryDiffs.getRemoved().isEmpty());
        }

        return libraryDiffs;
    } catch (Exception e) {
        throw new RuntimeException("Exception occurred while calculating library diffs", e);
    }
}