List of usage examples for java.util Set forEach
default void forEach(Consumer<? super T> action)
From source file:spoon.IncrementalLauncher.java
/** * Creates a {@link Launcher} for incremental build. * @param inputResources Resources to be parsed to build the spoon model. * @param sourceClasspath Source classpath of the spoon model. * @param cacheDirectory The directory to store all incremental information. If it's empty, full rebuild will be performed. * @param forceRebuild Force to perform full rebuild, ignoring incremental cache. * @throws IllegalArgumentException/*from w ww . java2s. co m*/ * @throws SpoonException */ public IncrementalLauncher(Set<File> inputResources, Set<String> sourceClasspath, File cacheDirectory, boolean forceRebuild) { if (cacheDirectory == null) { throw new IllegalArgumentException("unable to create incremental launcher with null cache directory"); } mInputSources = getAllJavaFiles(inputResources); mSourceClasspath = new HashSet<>(sourceClasspath); mIncrementalCacheDirectory = cacheDirectory; mModelFile = new File(cacheDirectory, "model"); mCacheInfoFile = new File(cacheDirectory, "cache-info"); mClassFilesDir = new File(cacheDirectory, "class-files"); if (!mIncrementalCacheDirectory.exists() || !mModelFile.exists() || !mCacheInfoFile.exists() || !mClassFilesDir.exists()) { forceRebuild = true; } else { try { mCacheInfo = loadCacheInfo(mCacheInfoFile); } catch (InvalidClassException | SpoonException e) { // Incompatible cache version or unable to load cache. So force rebuild. forceRebuild = true; } } if (!mIncrementalCacheDirectory.exists() && !mIncrementalCacheDirectory.mkdirs()) { throw new SpoonException("unable to create cache directory"); } if (!mClassFilesDir.exists() && !mClassFilesDir.mkdirs()) { throw new SpoonException("unable to create class files directory"); } if (forceRebuild) { // Build model from scratch. factory = createFactory(); processArguments(); mInputSources.forEach(f -> addInputResource(f.getPath())); mChangesPresent = true; setBinaryOutputDirectory(mClassFilesDir); } else { // Load model from cache. Factory oldFactory = loadFactory(mModelFile); oldFactory.getModel().setBuildModelIsFinished(false); // Build model incrementally. mRemovedSources = new HashSet<>( CollectionUtils.subtract(mCacheInfo.inputSourcesMap.keySet(), mInputSources)); mAddedSources = new HashSet<>( CollectionUtils.subtract(mInputSources, mCacheInfo.inputSourcesMap.keySet())); mCommonSources = new HashSet<>( CollectionUtils.intersection(mCacheInfo.inputSourcesMap.keySet(), mInputSources)); Set<File> incrementalSources = new HashSet<>(mAddedSources); for (File e : mCommonSources) { if (e.lastModified() >= mCacheInfo.lastBuildTime) { incrementalSources.add(e); } } List<CtType<?>> oldTypes = oldFactory.Type().getAll(); Set<CtType<?>> changedTypes = new HashSet<>(); for (CtType<?> type : oldTypes) { File typeFile = type.getPosition().getFile(); if (incrementalSources.contains(typeFile)) { changedTypes.add(type); } } for (CtType<?> type : oldTypes) { File typeFile = type.getPosition().getFile(); if (mRemovedSources.contains(typeFile)) { type.delete(); continue; } for (CtType<?> changedType : changedTypes) { // We should also rebuild types, that refer to changed types. if (type.getReferencedTypes().contains(changedType.getReference())) { incrementalSources.add(typeFile); type.delete(); } } } try { mSourceClasspath.add(mClassFilesDir.getCanonicalPath()); } catch (IOException e2) { throw new SpoonException("unable to locate class files dir: " + mClassFilesDir); } Collection<CtPackage> oldPackages = oldFactory.Package().getAll(); for (CtPackage pkg : oldPackages) { if (pkg.getTypes().isEmpty() && pkg.getPackages().isEmpty() && !pkg.isUnnamedPackage()) { pkg.delete(); } } factory = oldFactory; processArguments(); incrementalSources.forEach(f -> addInputResource(f.getPath())); mChangesPresent = !mRemovedSources.isEmpty() || !mAddedSources.isEmpty() || !incrementalSources.isEmpty(); setBinaryOutputDirectory(mClassFilesDir); } getEnvironment().setSourceClasspath(mSourceClasspath.toArray(new String[0])); }
From source file:org.apache.hadoop.hbase.backup.impl.BackupSystemTable.java
/** * Add tables to global incremental backup set * @param tables set of tables/* w w w.j av a 2 s.c om*/ * @param backupRoot root directory path to backup * @throws IOException exception */ public void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Add incremental backup table set to backup system table. ROOT=" + backupRoot + " tables [" + StringUtils.join(tables, " ") + "]"); } if (LOG.isDebugEnabled()) { tables.forEach(table -> LOG.debug(Objects.toString(table))); } try (Table table = connection.getTable(tableName)) { Put put = createPutForIncrBackupTableSet(tables, backupRoot); table.put(put); } }
From source file:ai.grakn.engine.tasks.storage.TaskStateGraphStore.java
@Override public Boolean updateState(TaskState task) { // Existing resource relations to remove final Set<TypeLabel> resourcesToDettach = new HashSet<>(); // New resources to add Var resources = var(TASK_VAR); resourcesToDettach.add(SERIALISED_TASK); resources = resources.has(SERIALISED_TASK, var().val(serializeToString(task))); // TODO make sure all properties are being updated if (task.status() != null) { resourcesToDettach.add(STATUS);/*from www . ja v a 2 s. c o m*/ resourcesToDettach.add(STATUS_CHANGE_TIME); resources = resources.has(STATUS, var().val(task.status().toString())).has(STATUS_CHANGE_TIME, var().val(new Date().getTime())); } if (task.engineID() != null) { resourcesToDettach.add(ENGINE_ID); resources = resources.has(ENGINE_ID, var().val(task.engineID().value())); } else { resourcesToDettach.add(ENGINE_ID); } if (task.exception() != null) { resourcesToDettach.add(TASK_EXCEPTION); resourcesToDettach.add(STACK_TRACE); resources = resources.has(TASK_EXCEPTION, var().val(task.exception())); if (task.stackTrace() != null) { resources = resources.has(STACK_TRACE, var().val(task.stackTrace())); } } if (task.checkpoint() != null) { resourcesToDettach.add(TASK_CHECKPOINT); resources = resources.has(TASK_CHECKPOINT, var().val(task.checkpoint())); } if (task.configuration() != null) { resourcesToDettach.add(TASK_CONFIGURATION); resources = resources.has(TASK_CONFIGURATION, var().val(task.configuration().toString())); } Var finalResources = resources; Optional<Boolean> result = attemptCommitToSystemGraph((graph) -> { Instance taskConcept = graph.getResourcesByValue(task.getId().getValue()).iterator().next().owner(); // Remove relations to any resources we want to currently update resourcesToDettach.forEach(typeLabel -> { RoleType roleType = graph.getType(Schema.ImplicitType.HAS_OWNER.getLabel(typeLabel)); taskConcept.relations(roleType).forEach(Concept::delete); }); // Insert new resources with new values graph.graql().insert(finalResources.id(taskConcept.getId())).execute(); return true; }, true); return result.isPresent(); }
From source file:org.apereo.portal.security.oauth.IdTokenFactory.java
@PostConstruct public void init() { // Mappings for Standard Claims final Set<ClaimMapping> set = new HashSet<>(); set.add(new ClaimMapping("name", nameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("given_name", givenNameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("family_name", familyNameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("middle_name", middleNameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("nickname", nicknameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("preferred_username", preferredUsernameAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("profile", profileAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("picture", pictureAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("website", websiteAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("email", emailAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("email_verified", emailVerifiedAttr, DataTypeConverter.BOOLEAN)); set.add(new ClaimMapping("gender", genderAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("birthdate", birthdateAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("zoneinfo", zoneinfoAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("locale", localeAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("phone_number", phoneNumberAttr, DataTypeConverter.STRING)); set.add(new ClaimMapping("phone_number_verified", phoneNumberVerifiedAttr, DataTypeConverter.BOOLEAN)); set.add(new ClaimMapping("updated_at", updatedAtAttributeName, DataTypeConverter.NUMBER)); mappings = Collections.unmodifiableSet(set); if (logger.isInfoEnabled()) { final StringBuilder msg = new StringBuilder(); msg.append("Using the following mappings for OIDC Standard Claims:"); set.forEach(mapping -> msg.append("\n\t").append(mapping)); logger.info(msg.toString());/* w w w .j a va 2 s .com*/ } // Portal Groups ('groups' custom claim) groupsWhitelist = Collections.unmodifiableSet(Arrays.stream(groupsWhitelistProperty.split(LIST_SEPARATOR)) .map(String::trim).filter(item -> item.length() != 0).collect(Collectors.toSet())); logger.info("Using the following portal groups to build the custom 'groups' claim: {}", groupsWhitelist); // Other Custom Claims (a.k.a user attributes) customClaims = Collections.unmodifiableSet(Arrays.stream(customClaimsProperty.split(LIST_SEPARATOR)) .map(String::trim).filter(item -> item.length() != 0).collect(Collectors.toSet())); logger.info("Using the following custom claims: {}", customClaims); }
From source file:act.installer.metacyc.OrganismCompositionMongoWriter.java
/** * Extracts organism names from a BP element at some sub path, submits them to the DB, and returns a mapping of their * names to DB ids. **Does not do anything with NCBI ids at this time**. * @param rootElement The root path from which to search. * @param path The sub path to search for organisms. * @return A map from organism name to organism name DB id. *//*from www .j av a 2s . com*/ private Map<String, Long> extractOrganismsAtPath(BPElement rootElement, List<NXT> path) { Set<String> organismNames = new HashSet<>(); for (BPElement biosrc : this.src.traverse(rootElement, path)) { if (biosrc == null) { System.err.format("WARNING: got null organism for %s\n", rootElement.getID()); continue; } if (biosrc instanceof BioSource) { BioSource bs = (BioSource) biosrc; if (bs.getName().size() != 1) { // Assume only one name per BioSource entity. System.err.format("WARNING: found a BioSource with multiple names (%s): %s\n", bs.getID(), StringUtils.join(bs.getName(), ", ")); } organismNames.addAll(bs.getName()); } else { System.err.format("WARNING: found a non-BioSource organism (%s) for %s, using anyway\n", biosrc.getID(), rootElement.getID()); organismNames.addAll(biosrc.getName()); } // Ignore NCBI Taxonomy x-refs for now, as we don't have any use for them in our current model. } Map<String, Long> results = new HashMap<>(); organismNames.forEach(name -> results.put(name, this.getOrganismNameIdByNameFromDB(name))); return results; }
From source file:dhbw.clippinggorilla.objects.user.UserUtils.java
/** * Creates a new User Profile and saves it to the DB * * @param u The User of the new Interestprofile * @param name The name of the Interestprofile * @param sources The selected sources/*from w w w. j a v a 2 s . c o m*/ * @param tags The selected tags * @param categories The selected categories * @return Profile, if sth. went wrong: null */ public static InterestProfile createNewProfile(User u, String name, Map<Source, Boolean> sources, Set<String> tags, Map<Category, Boolean> categories) { //TODO: profile validation here! //Valdiation whether profile exists if (!InterestProfileUtils.checkNameUnique(u, name)) { return null; } try { //Insert profile data to user_profile table String sql = "INSERT INTO " + Tables.USER_PROFILE + " (" + Columns.USER_ID + ", " + Columns.NAME + ") VALUES (?, ?)"; PreparedStatement statement = Database.getConnection().prepareStatement(sql); statement.setInt(1, u.getId()); statement.setString(2, name); statement.executeUpdate(); //Get profile ID from table sql = "SELECT " + Columns.ID + " FROM " + Tables.USER_PROFILE + " WHERE " + Columns.USER_ID + " = ? AND " + Columns.NAME + " = ?"; statement = Database.getConnection().prepareStatement(sql); statement.setInt(1, u.getId()); statement.setString(2, name); ResultSet result = statement.executeQuery(); result.next(); int profileId = result.getInt(Columns.ID); //Insert sources sources.forEach((lambda_source, bool) -> { if (bool == true) { String sourceId = lambda_source.getId(); try { String lambda_sql = "INSERT INTO " + Tables.USER_PROFILE_SOURCE + " (" + Columns.PROFILE_ID + ", " + Columns.SOURCE + ") VALUES (?, ?)"; PreparedStatement lambda_statement = Database.getConnection().prepareStatement(lambda_sql); lambda_statement.setInt(1, profileId); lambda_statement.setString(2, sourceId); lambda_statement.executeUpdate(); } catch (SQLException ex) { Log.warning("Profile source insertion failed", ex); } } }); //Insert Tags tags.forEach((lambda_tags) -> { try { String lambda_sql = "INSERT INTO " + Tables.USER_PROFILE_TAG + " (" + Columns.PROFILE_ID + ", " + Columns.TAG + ") VALUES (?, ?)"; PreparedStatement lambda_statement = Database.getConnection().prepareStatement(lambda_sql); lambda_statement.setInt(1, profileId); lambda_statement.setString(2, lambda_tags); lambda_statement.executeUpdate(); } catch (SQLException ex) { Log.warning("Profile source insertion failed", ex); } }); //Insert categories categories.forEach((category, bool) -> { if (bool == true) { String categoryId = category.getId(); try { String lambda_sql = "INSERT INTO " + Tables.USER_PROFILE_CATEGORY + " (" + Columns.PROFILE_ID + ", " + Columns.CATEGORY + ") VALUES (?, ?)"; PreparedStatement lambda_statement = Database.getConnection().prepareStatement(lambda_sql); lambda_statement.setInt(1, profileId); lambda_statement.setString(2, categoryId); lambda_statement.executeUpdate(); } catch (SQLException ex) { Log.warning("Profile source insertion failed", ex); } } }); return InterestProfileUtils.getInterestProfile(profileId); } catch (SQLException ex) { Log.warning("Insertion of new profile failed", ex); return null; } }
From source file:org.hyperledger.fabric.sdk.ServiceDiscovery.java
SDNetwork networkDiscovery(TransactionContext ltransactionContext, boolean force) { logger.trace(format("Network discovery force: %b", force)); ArrayList<Peer> speers = new ArrayList<>(serviceDiscoveryPeers); Collections.shuffle(speers);// w ww. ja v a 2 s .co m SDNetwork ret = sdNetwork; if (!force && null != ret && ret.discoveryTime + SERVICE_DISCOVER_FREQ_SECONDS * 1000 > System.currentTimeMillis()) { return ret; } ret = null; for (Peer serviceDiscoveryPeer : speers) { try { SDNetwork lsdNetwork = new SDNetwork(); final byte[] clientTLSCertificateDigest = serviceDiscoveryPeer.getClientTLSCertificateDigest(); logger.info(format("Channel %s doing discovery with peer: %s", channelName, serviceDiscoveryPeer.toString())); if (null == clientTLSCertificateDigest) { throw new RuntimeException( format("Channel %s, peer %s requires mutual tls for service discovery.", channelName, serviceDiscoveryPeer)); } ByteString clientIdent = ltransactionContext.getIdentity().toByteString(); ByteString tlshash = ByteString.copyFrom(clientTLSCertificateDigest); Protocol.AuthInfo authentication = Protocol.AuthInfo.newBuilder().setClientIdentity(clientIdent) .setClientTlsCertHash(tlshash).build(); List<Protocol.Query> fq = new ArrayList<>(2); fq.add(Protocol.Query.newBuilder().setChannel(channelName) .setConfigQuery(Protocol.ConfigQuery.newBuilder().build()).build()); fq.add(Protocol.Query.newBuilder().setChannel(channelName) .setPeerQuery(Protocol.PeerMembershipQuery.newBuilder().build()).build()); Protocol.Request request = Protocol.Request.newBuilder().addAllQueries(fq) .setAuthentication(authentication).build(); ByteString payloadBytes = request.toByteString(); ByteString signatureBytes = ltransactionContext.signByteStrings(payloadBytes); Protocol.SignedRequest sr = Protocol.SignedRequest.newBuilder().setPayload(payloadBytes) .setSignature(signatureBytes).build(); if (IS_TRACE_LEVEL && null != diagnosticFileDumper) { // dump protobuf we sent logger.trace(format("Service discovery channel %s %s service chaincode query sent %s", channelName, serviceDiscoveryPeer, diagnosticFileDumper.createDiagnosticProtobufFile(sr.toByteArray()))); } final Protocol.Response response = serviceDiscoveryPeer.sendDiscoveryRequestAsync(sr) .get(SERVICE_DISCOVERY_WAITTIME, TimeUnit.MILLISECONDS); if (IS_TRACE_LEVEL && null != diagnosticFileDumper) { // dump protobuf we get logger.trace(format("Service discovery channel %s %s service discovery returned %s", channelName, serviceDiscoveryPeer, diagnosticFileDumper.createDiagnosticProtobufFile(response.toByteArray()))); } serviceDiscoveryPeer.hasConnected(); final List<Protocol.QueryResult> resultsList = response.getResultsList(); Protocol.QueryResult queryResult; Protocol.QueryResult queryResult2; queryResult = resultsList.get(0); //configquery if (queryResult.getResultCase().getNumber() == Protocol.QueryResult.ERROR_FIELD_NUMBER) { logger.warn(format("Channel %s peer: %s error during service discovery %s", channelName, serviceDiscoveryPeer.toString(), queryResult.getError().getContent())); continue; } queryResult2 = resultsList.get(1); if (queryResult2.getResultCase().getNumber() == Protocol.QueryResult.ERROR_FIELD_NUMBER) { logger.warn(format("Channel %s peer %s service discovery error %s", channelName, serviceDiscoveryPeer.toString(), queryResult2.getError().getContent())); continue; } Protocol.ConfigResult configResult = queryResult.getConfigResult(); Map<String, MspConfig.FabricMSPConfig> msps = configResult.getMspsMap(); Set<ByteString> cbbs = new HashSet<>(msps.size() * 4); for (Map.Entry<String, MspConfig.FabricMSPConfig> i : msps.entrySet()) { final MspConfig.FabricMSPConfig value = i.getValue(); final String mspid = value.getName(); cbbs.addAll(value.getRootCertsList()); cbbs.addAll(value.getIntermediateCertsList()); value.getTlsRootCertsList().forEach(bytes -> lsdNetwork.addTlsCert(mspid, bytes.toByteArray())); value.getTlsIntermediateCertsList() .forEach(bytes -> lsdNetwork.addTlsIntermCert(mspid, bytes.toByteArray())); } List<byte[]> toaddCerts = new LinkedList<>(); synchronized (certs) { cbbs.forEach(bytes -> { if (certs.add(bytes)) { toaddCerts.add(bytes.toByteArray()); } }); } if (!toaddCerts.isEmpty()) { // add them to crypto store. channel.client.getCryptoSuite().loadCACertificatesAsBytes(toaddCerts); } Map<String, SDOrderer> ordererEndpoints = new HashMap<>(); Map<String, Protocol.Endpoints> orderersMap = configResult.getOrderersMap(); for (Map.Entry<String, Protocol.Endpoints> i : orderersMap.entrySet()) { final String mspid = i.getKey(); Protocol.Endpoints value = i.getValue(); for (Protocol.Endpoint l : value.getEndpointList()) { logger.trace(format("Channel %s discovered orderer MSPID: %s, endpoint: %s:%s", channelName, mspid, l.getHost(), l.getPort())); String endpoint = (l.getHost() + ":" + l.getPort()).trim().toLowerCase(); final SDOrderer sdOrderer = new SDOrderer(mspid, endpoint, lsdNetwork.getTlsCerts(mspid), lsdNetwork.getTlsIntermediateCerts(mspid)); ordererEndpoints.put(sdOrderer.getEndPoint(), sdOrderer); } } lsdNetwork.ordererEndpoints = ordererEndpoints; Protocol.PeerMembershipResult membership = queryResult2.getMembers(); lsdNetwork.endorsers = new HashMap<>(); for (Map.Entry<String, Protocol.Peers> peers : membership.getPeersByOrgMap().entrySet()) { final String mspId = peers.getKey(); Protocol.Peers peer = peers.getValue(); for (Protocol.Peer pp : peer.getPeersList()) { SDEndorser ppp = new SDEndorser(pp, lsdNetwork.getTlsCerts(mspId), lsdNetwork.getTlsIntermediateCerts(mspId)); logger.trace(format("Channel %s discovered peer MSPID: %s, endpoint: %s", channelName, mspId, ppp.getEndpoint())); lsdNetwork.endorsers.put(ppp.getEndpoint(), ppp); } } lsdNetwork.discoveryTime = System.currentTimeMillis(); sdNetwork = lsdNetwork; ret = lsdNetwork; break; } catch (Exception e) { logger.warn(format("Channel %s peer %s service discovery error %s", channelName, serviceDiscoveryPeer, e.getMessage())); } } logger.debug(format("Channel %s service discovery completed: %b", channelName, ret != null)); return ret; }
From source file:org.roda.wui.api.controllers.BrowserHelper.java
public static String retrieveDescriptiveMetadataPreview(SupportedMetadataTypeBundle bundle) throws GenericException { String rawTemplate = bundle.getTemplate(); String result;/*from w w w . j av a 2 s . co m*/ if (StringUtils.isNotBlank(rawTemplate)) { Map<String, String> data = new HashMap<>(); Set<MetadataValue> values = bundle.getValues(); if (values != null) { values.forEach(metadataValue -> { String val = metadataValue.get("value"); if (val != null) { val = val.replaceAll("\\s", ""); if (!"".equals(val)) { data.put(metadataValue.get("name"), metadataValue.get("value")); } } }); } result = HandlebarsUtility.executeHandlebars(rawTemplate, data); // result = RodaUtils.indentXML(result); } else { result = rawTemplate; } return result; }
From source file:org.apache.zookeeper.MockZooKeeper.java
@Override public void delete(final String path, int version, final VoidCallback cb, final Object ctx) { mutex.lock();//ww w. j a v a 2s. co m if (executor.isShutdown()) { mutex.unlock(); cb.processResult(KeeperException.Code.SESSIONEXPIRED.intValue(), path, ctx); return; } final Set<Watcher> toNotifyDelete = Sets.newHashSet(); toNotifyDelete.addAll(watchers.get(path)); final Set<Watcher> toNotifyParent = Sets.newHashSet(); final String parent = path.substring(0, path.lastIndexOf("/")); if (!parent.isEmpty()) { toNotifyParent.addAll(watchers.get(parent)); } executor.execute(() -> { mutex.lock(); if (getProgrammedFailStatus()) { mutex.unlock(); cb.processResult(failReturnCode.intValue(), path, ctx); } else if (stopped) { mutex.unlock(); cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx); } else if (!tree.containsKey(path)) { mutex.unlock(); cb.processResult(KeeperException.Code.NONODE.intValue(), path, ctx); } else if (hasChildren(path)) { mutex.unlock(); cb.processResult(KeeperException.Code.NOTEMPTY.intValue(), path, ctx); } else { if (version != -1) { int currentVersion = tree.get(path).getRight(); if (version != currentVersion) { cb.processResult(KeeperException.Code.BADVERSION.intValue(), path, ctx); return; } } tree.remove(path); mutex.unlock(); cb.processResult(0, path, ctx); toNotifyDelete.forEach(watcher -> watcher .process(new WatchedEvent(EventType.NodeDeleted, KeeperState.SyncConnected, path))); toNotifyParent.forEach(watcher -> watcher.process( new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, parent))); } }); watchers.removeAll(path); mutex.unlock(); }