List of usage examples for java.util Set toString
public String toString()
From source file:com.simpligility.maven.plugins.androidndk.phase05compile.NdkBuildMojo.java
private void compile(CompileCommand compileCommand) throws MojoExecutionException { MakefileHelper.MakefileResponse makefileResponse = null; try {/* w ww.j a v a 2s .c o m*/ // Start setting up the command line to be executed final CommandExecutor executor = CommandExecutor.Factory.createDefaultCommmandExecutor(); // Add an error listener to the build - this allows the build to conditionally fail // depending on a) the output of the build b) whether or not build errors (output on stderr) should be // ignored and c) whether the pattern matches or not executor.setErrorListener(getNdkErrorListener()); final Set<Artifact> nativeLibraryArtifacts = compileCommand.getNativeLibraryDepedencies(); // If there are any static libraries the code needs to link to, include those in the make file final Set<Artifact> resolvedNativeLibraryArtifacts = getArtifactResolverHelper() .resolveArtifacts(nativeLibraryArtifacts); getLog().debug("resolveArtifacts found " + resolvedNativeLibraryArtifacts.size() + ": " + resolvedNativeLibraryArtifacts.toString()); final File buildFolder = new File(buildDirectory, "makefile"); buildFolder.mkdirs(); final File androidMavenMakefile = new File(buildFolder, "android_maven_plugin_makefile.mk"); final MakefileHelper makefileHelper = new MakefileHelper(project, getLog(), getArtifactResolverHelper(), harArtifactHandler, unpackedLibsFolder, buildDirectory); MakefileHelper.MakefileRequest makefileRequest = new MakefileHelper.MakefileRequest(); makefileRequest.artifacts = resolvedNativeLibraryArtifacts; makefileRequest.defaultNDKArchitecture = "armeabi"; makefileRequest.useHeaderArchives = useHeaderArchives; makefileRequest.ignoreHeaderFilesArchives = ignoreHeaderFilesArchives; makefileRequest.leaveTemporaryBuildArtifacts = leaveTemporaryBuildArtifacts; makefileRequest.architectures = compileCommand.getResolvedArchitectures(); makefileResponse = makefileHelper.createMakefileFromArtifacts(makefileRequest); final FileOutputStream output = new FileOutputStream(androidMavenMakefile); try { IOUtil.copy(makefileResponse.getMakeFile(), output); } finally { output.close(); } // Add the path to the generated makefile - this is picked up by the build (by an include from the user) executor.addEnvironment("ANDROID_MAVEN_PLUGIN_MAKEFILE", androidMavenMakefile.getAbsolutePath()); setupNativeLibraryEnvironment(executor, makefileResponse); // Adds the location of the Makefile capturer file - this file will after the build include // things like header files, flags etc. It is processed after the build to retrieve the headers // and also capture flags etc ... final File makefileCaptureFile = File.createTempFile("android_maven_plugin_makefile_captures", ".tmp", buildDirectory); if (!leaveTemporaryBuildArtifacts) { makefileCaptureFile.deleteOnExit(); } executor.addEnvironment(MakefileHelper.MAKEFILE_CAPTURE_FILE, makefileCaptureFile.getAbsolutePath()); // Add any defined system properties if (systemProperties != null && !systemProperties.isEmpty()) { for (Map.Entry<String, String> entry : systemProperties.entrySet()) { executor.addEnvironment(entry.getKey(), entry.getValue()); } } executor.setLogger(this.getLog()); // Setup the command line for the make final List<String> commands = new ArrayList<String>(); configureArchitectures(commands, compileCommand.getResolvedArchitectures()); configureBuildDirectory(compileCommand, commands); configureMakefile(commands); configureApplicationMakefile(commands); configureMaxJobs(commands); // Only allow configuration of the toolchain if the architecture being built is a single one! if (compileCommand.getResolvedArchitectures().length == 1) { configureNdkToolchain(compileCommand.getResolvedArchitectures()[0], commands); } configureAdditionalCommands(commands); // If a build target is specified, tag that onto the command line as the very last of the parameters commands.add(target != null ? target : "all"); final String ndkBuildPath = resolveNdkBuildExecutable(); getLog().debug(ndkBuildPath + " " + commands.toString()); getLog().info("Executing NDK make at : " + buildDirectory); executor.setCaptureStdOut(true); executor.executeCommand(ndkBuildPath, commands, buildDirectory, true); getLog().debug("Executed NDK make at : " + buildDirectory); if (attachLibrariesArtifacts) { // Attempt to attach the native libraries (shared only) for (int i = 0; i < compileCommand.getResolvedArchitectures().length; i++) { String architecture = compileCommand.getResolvedArchitectures()[i]; processCompiledArtifacts(compileCommand, architecture, makefileCaptureFile); } } else { getLog().info("Will skip attaching compiled libraries as per configuration"); } } catch (Exception e) { throw new MojoExecutionException("Failure during build: " + e.getMessage(), e); } finally { cleanupAfterBuild(makefileResponse); } }
From source file:org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.java
/** * Processes decommission command. Modifies the host components as needed and then * calls into the implementation of a custom command *//*from www . j a v a2 s . c o m*/ private void addDecommissionAction(final ActionExecutionContext actionExecutionContext, final RequestResourceFilter resourceFilter, Stage stage, boolean retryAllowed) throws AmbariException { String clusterName = actionExecutionContext.getClusterName(); final Cluster cluster = clusters.getCluster(clusterName); final String serviceName = resourceFilter.getServiceName(); String masterCompType = resourceFilter.getComponentName(); List<String> hosts = resourceFilter.getHostNames(); if (hosts != null && !hosts.isEmpty()) { throw new AmbariException("Decommission command cannot be issued with " + "target host(s) specified."); } //Get all hosts to be added and removed Set<String> excludedHosts = getHostList(actionExecutionContext.getParameters(), DECOM_EXCLUDED_HOSTS); Set<String> includedHosts = getHostList(actionExecutionContext.getParameters(), DECOM_INCLUDED_HOSTS); Set<String> cloneSet = new HashSet<String>(excludedHosts); cloneSet.retainAll(includedHosts); if (cloneSet.size() > 0) { throw new AmbariException("Same host cannot be specified for inclusion " + "as well as exclusion. Hosts: " + cloneSet.toString()); } Service service = cluster.getService(serviceName); if (service == null) { throw new AmbariException("Specified service " + serviceName + " is not a valid/deployed service."); } Map<String, ServiceComponent> svcComponents = service.getServiceComponents(); if (!svcComponents.containsKey(masterCompType)) { throw new AmbariException( "Specified component " + masterCompType + " does not belong to service " + serviceName + "."); } ServiceComponent masterComponent = svcComponents.get(masterCompType); if (!masterComponent.isMasterComponent()) { throw new AmbariException( "Specified component " + masterCompType + " is not a MASTER for service " + serviceName + "."); } if (!masterToSlaveMappingForDecom.containsKey(masterCompType)) { throw new AmbariException("Decommissioning is not supported for " + masterCompType); } // Find the slave component String slaveCompStr = actionExecutionContext.getParameters().get(DECOM_SLAVE_COMPONENT); final String slaveCompType; if (slaveCompStr == null || slaveCompStr.equals("")) { slaveCompType = masterToSlaveMappingForDecom.get(masterCompType); } else { slaveCompType = slaveCompStr; if (!masterToSlaveMappingForDecom.get(masterCompType).equals(slaveCompType)) { throw new AmbariException("Component " + slaveCompType + " is not supported for decommissioning."); } } String isDrainOnlyRequest = actionExecutionContext.getParameters().get(HBASE_MARK_DRAINING_ONLY); if (isDrainOnlyRequest != null && !slaveCompType.equals(Role.HBASE_REGIONSERVER.name())) { throw new AmbariException(HBASE_MARK_DRAINING_ONLY + " is not a valid parameter for " + masterCompType); } // Filtering hosts based on Maintenance State MaintenanceStateHelper.HostPredicate hostPredicate = new MaintenanceStateHelper.HostPredicate() { @Override public boolean shouldHostBeRemoved(final String hostname) throws AmbariException { //Get UPDATE_EXCLUDE_FILE_ONLY parameter as string String upd_excl_file_only_str = actionExecutionContext.getParameters() .get(UPDATE_EXCLUDE_FILE_ONLY); String decom_incl_hosts_str = actionExecutionContext.getParameters().get(DECOM_INCLUDED_HOSTS); if ((upd_excl_file_only_str != null && !upd_excl_file_only_str.trim().equals(""))) { upd_excl_file_only_str = upd_excl_file_only_str.trim(); } boolean upd_excl_file_only = false; //Parse of possible forms of value if (upd_excl_file_only_str != null && !upd_excl_file_only_str.equals("") && (upd_excl_file_only_str.equals("\"true\"") || upd_excl_file_only_str.equals("'true'") || upd_excl_file_only_str.equals("true"))) { upd_excl_file_only = true; } // If we just clear *.exclude and component have been already removed we will skip check if (upd_excl_file_only && decom_incl_hosts_str != null && !decom_incl_hosts_str.trim().equals("")) { return upd_excl_file_only; } else { return !maintenanceStateHelper.isOperationAllowed(cluster, actionExecutionContext.getOperationLevel(), resourceFilter, serviceName, slaveCompType, hostname); } } }; // Filter excluded hosts Set<String> filteredExcludedHosts = new HashSet<String>(excludedHosts); Set<String> ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(filteredExcludedHosts, hostPredicate); if (!ignoredHosts.isEmpty()) { String message = String.format("Some hosts (%s) from host exclude list " + "have been ignored " + "because components on them are in Maintenance state.", ignoredHosts); LOG.debug(message); } // Filter included hosts Set<String> filteredIncludedHosts = new HashSet<String>(includedHosts); ignoredHosts = maintenanceStateHelper.filterHostsInMaintenanceState(filteredIncludedHosts, hostPredicate); if (!ignoredHosts.isEmpty()) { String message = String.format("Some hosts (%s) from host include list " + "have been ignored " + "because components on them are in Maintenance state.", ignoredHosts); LOG.debug(message); } // Decommission only if the sch is in state STARTED or INSTALLED for (ServiceComponentHost sch : svcComponents.get(slaveCompType).getServiceComponentHosts().values()) { if (filteredExcludedHosts.contains(sch.getHostName()) && !"true".equals(isDrainOnlyRequest) && sch.getState() != State.STARTED) { throw new AmbariException( "Component " + slaveCompType + " on host " + sch.getHostName() + " cannot be " + "decommissioned as its not in STARTED state. Aborting the whole request."); } } String alignMtnStateStr = actionExecutionContext.getParameters().get(ALIGN_MAINTENANCE_STATE); boolean alignMtnState = "true".equals(alignMtnStateStr); // Set/reset decommissioned flag on all components List<String> listOfExcludedHosts = new ArrayList<String>(); for (ServiceComponentHost sch : svcComponents.get(slaveCompType).getServiceComponentHosts().values()) { if (filteredExcludedHosts.contains(sch.getHostName())) { sch.setComponentAdminState(HostComponentAdminState.DECOMMISSIONED); listOfExcludedHosts.add(sch.getHostName()); if (alignMtnState) { sch.setMaintenanceState(MaintenanceState.ON); } LOG.info( "Decommissioning " + slaveCompType + " and marking Maintenance=ON on " + sch.getHostName()); } if (filteredIncludedHosts.contains(sch.getHostName())) { sch.setComponentAdminState(HostComponentAdminState.INSERVICE); if (alignMtnState) { sch.setMaintenanceState(MaintenanceState.OFF); } LOG.info("Recommissioning " + slaveCompType + " and marking Maintenance=OFF on " + sch.getHostName()); } } // In the event there are more than one master host the following logic is applied // -- HDFS/DN, MR1/TT, YARN/NM call refresh node on both // -- HBASE/RS call only on one host // Ensure host is active Map<String, ServiceComponentHost> masterSchs = masterComponent.getServiceComponentHosts(); String primaryCandidate = null; for (String hostName : masterSchs.keySet()) { if (primaryCandidate == null) { primaryCandidate = hostName; } else { ServiceComponentHost sch = masterSchs.get(hostName); if (sch.getState() == State.STARTED) { primaryCandidate = hostName; } } } StringBuilder commandDetail = getReadableDecommissionCommandDetail(actionExecutionContext, filteredIncludedHosts, listOfExcludedHosts); for (String hostName : masterSchs.keySet()) { RequestResourceFilter commandFilter = new RequestResourceFilter(serviceName, masterComponent.getName(), Collections.singletonList(hostName)); List<RequestResourceFilter> resourceFilters = new ArrayList<RequestResourceFilter>(); resourceFilters.add(commandFilter); ActionExecutionContext commandContext = new ActionExecutionContext(clusterName, actionExecutionContext.getActionName(), resourceFilters); String clusterHostInfoJson = StageUtils.getGson().toJson( StageUtils.getClusterHostInfo(clusters.getHostsForCluster(cluster.getClusterName()), cluster)); // Reset cluster host info as it has changed stage.setClusterHostInfo(clusterHostInfoJson); Map<String, String> commandParams = new HashMap<String, String>(); if (serviceName.equals(Service.Type.HBASE.name())) { commandParams.put(DECOM_EXCLUDED_HOSTS, StringUtils.join(listOfExcludedHosts, ',')); if ((isDrainOnlyRequest != null) && isDrainOnlyRequest.equals("true")) { commandParams.put(HBASE_MARK_DRAINING_ONLY, isDrainOnlyRequest); } else { commandParams.put(HBASE_MARK_DRAINING_ONLY, "false"); } } if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) { commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false"); addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), retryAllowed); } } }
From source file:ai.grakn.graql.internal.reasoner.AtomicTest.java
@Test public void testUnification_MatchAllParentAtom() { GraknGraph graph = unificationTestSet.graph(); String parentString = "{$r($a, $x);}"; RelationAtom parent = (RelationAtom) ReasonerQueries.atomic(conjunction(parentString, graph), graph) .getAtom();//from ww w. j av a2 s . c o m PatternAdmin body = graph.graql().parsePattern("(role1: $z, role2: $b) isa relation1").admin(); PatternAdmin head = graph.graql().parsePattern("(role1: $z, role2: $b) isa relation1").admin(); InferenceRule rule = new InferenceRule(graph.admin().getMetaRuleInference().putRule(body, head), graph); Unifier unifier = rule.getUnifier(parent); Set<Var> vars = rule.getHead().getAtom().getVarNames(); Set<Var> correctVars = Sets.newHashSet(var("r"), var("a"), var("x")); assertTrue(!vars.contains(var(""))); assertTrue("Variables not in subset relation:\n" + correctVars.toString() + "\n" + vars.toString(), unifier.values().containsAll(correctVars)); }
From source file:br.com.bluesoft.pronto.controller.TicketController.java
private void definirDesenvolvedores(final Ticket ticket, final String[] desenvolvedor) throws SegurancaException { final Set<Usuario> desenvolvedoresAntigos = new TreeSet<Usuario>( ticketDao.listarDesenvolvedoresDoTicket(ticket.getTicketKey())); if (desenvolvedor != null && desenvolvedor.length > 0) { ticket.setDesenvolvedores(new TreeSet<Usuario>()); for (final String username : desenvolvedor) { ticket.addDesenvolvedor((Usuario) sessionFactory.getCurrentSession().get(Usuario.class, username)); }// ww w . ja va 2s . c om } final String desenvolvedoresAntigosStr = desenvolvedoresAntigos == null || desenvolvedoresAntigos.size() == 0 ? "nenhum" : desenvolvedoresAntigos.toString(); final String desenvolvedoresNovosStr = ticket.getDesenvolvedores() == null || ticket.getDesenvolvedores().size() == 0 ? "nenhum" : ticket.getDesenvolvedores().toString(); if (!desenvolvedoresAntigosStr.equals(desenvolvedoresNovosStr)) { ticket.addLogDeAlteracao("desenvolvedores", desenvolvedoresAntigosStr, desenvolvedoresNovosStr); } }
From source file:org.cloudfoundry.identity.uaa.oauth.ClientAdminEndpoints.java
private ClientDetails validateClient(ClientDetails prototype, boolean create) { BaseClientDetails client = new BaseClientDetails(prototype); client.setAdditionalInformation(prototype.getAdditionalInformation()); String clientId = client.getClientId(); if (create && reservedClientIds.contains(clientId)) { throw new InvalidClientDetailsException("Not allowed: " + clientId + " is a reserved client_id"); }//from w w w. j a va 2s. c o m Set<String> requestedGrantTypes = client.getAuthorizedGrantTypes(); if (requestedGrantTypes.isEmpty()) { throw new InvalidClientDetailsException( "An authorized grant type must be provided. Must be one of: " + VALID_GRANTS.toString()); } for (String grant : requestedGrantTypes) { if (!VALID_GRANTS.contains(grant)) { throw new InvalidClientDetailsException( grant + " is not an allowed grant type. Must be one of: " + VALID_GRANTS.toString()); } } if ((requestedGrantTypes.contains("authorization_code") || requestedGrantTypes.contains("password")) && !requestedGrantTypes.contains("refresh_token")) { logger.debug("requested grant type missing refresh_token: " + clientId); requestedGrantTypes.add("refresh_token"); } if (!securityContextAccessor.isAdmin()) { // Not admin, so be strict with grant types and scopes for (String grant : requestedGrantTypes) { if (NON_ADMIN_INVALID_GRANTS.contains(grant)) { throw new InvalidClientDetailsException( grant + " is not an allowed grant type for non-admin caller."); } } if (requestedGrantTypes.contains("implicit") && requestedGrantTypes.contains("authorization_code")) { throw new InvalidClientDetailsException( "Not allowed: implicit grant type is not allowed together with authorization_code"); } String callerId = securityContextAccessor.getClientId(); if (callerId != null) { // New scopes are allowed if they are for the caller or the new // client. String callerPrefix = callerId + "."; String clientPrefix = clientId + "."; ClientDetails caller = clientDetailsService.retrieve(callerId); Set<String> validScope = caller.getScope(); for (String scope : client.getScope()) { if (scope.startsWith(callerPrefix) || scope.startsWith(clientPrefix)) { // Allowed continue; } if (!validScope.contains(scope)) { throw new InvalidClientDetailsException(scope + " is not an allowed scope for caller=" + callerId + ". Must have prefix in [" + callerPrefix + "," + clientPrefix + "] or be one of: " + validScope.toString()); } } } else { // No client caller. Shouldn't happen in practice, but let's // be defensive // New scopes are allowed if they are for the caller or the new // client. String clientPrefix = clientId + "."; for (String scope : client.getScope()) { if (!scope.startsWith(clientPrefix)) { throw new InvalidClientDetailsException( scope + " is not an allowed scope for null caller and client_id=" + clientId + ". Must start with '" + clientPrefix + "'"); } } } Set<String> validAuthorities = new HashSet<String>(NON_ADMIN_VALID_AUTHORITIES); if (requestedGrantTypes.contains("client_credentials")) { // If client_credentials is used then the client might be a // resource server validAuthorities.add("uaa.resource"); } for (String authority : AuthorityUtils.authorityListToSet(client.getAuthorities())) { if (!validAuthorities.contains(authority)) { throw new InvalidClientDetailsException(authority + " is not an allowed authority for caller=" + callerId + ". Must be one of: " + validAuthorities.toString()); } } } if (client.getAuthorities().isEmpty()) { client.setAuthorities(AuthorityUtils.commaSeparatedStringToAuthorityList("uaa.none")); } // The UAA does not allow or require resource ids to be registered // because they are determined dynamically client.setResourceIds(Collections.singleton("none")); if (client.getScope().isEmpty()) { client.setScope(Collections.singleton("uaa.none")); } if (requestedGrantTypes.contains("implicit")) { if (StringUtils.hasText(client.getClientSecret())) { throw new InvalidClientDetailsException("Implicit grant should not have a client_secret"); } } if (create) { // Only check for missing secret if client is being created. if ((requestedGrantTypes.contains("client_credentials") || requestedGrantTypes.contains("authorization_code")) && !StringUtils.hasText(client.getClientSecret())) { throw new InvalidClientDetailsException( "Client secret is required for client_credentials and authorization_code grant types"); } } return client; }
From source file:com.redhat.rhn.frontend.xmlrpc.user.UserHandler.java
/** * Validates that the select roles is among the ones we support. * @param role the role that user wanted to be assigned * @param user the logged in user who wants to assign the given role. *//* ww w. ja v a 2s. c o m*/ private void validateRoleInputs(String role, User user) { Set<String> assignableRoles = getAssignableRoles(user); if (!assignableRoles.contains(role)) { String msg = "Role with the label [%s] cannot be " + "assigned/revoked from the user." + " Possible Roles assignable/revokable by this user %s"; throw new NoSuchRoleException(String.format(msg, role, assignableRoles.toString())); } }
From source file:org.talend.dataprofiler.core.ui.dialog.MatchRuleElementTreeSelectionDialog.java
/** * validate the selected rule ./*from w w w . jav a 2s . c o m*/ */ private void addValidator() { setValidator(new ISelectionStatusValidator() { public IStatus validate(Object[] selection) { IStatus status = new Status(IStatus.OK, CorePlugin.PLUGIN_ID, StringUtils.EMPTY); if (selection == null || (selection != null && selection.length > 1)) { status = new Status(IStatus.ERROR, CorePlugin.PLUGIN_ID, DefaultMessagesImpl.getString("MatchRuleCheckedTreeSelectionDialog.validate")); //$NON-NLS-1$ return status; } else { // when the selected rule has no match & block keys, not validate(has block,no match, can validate ) for (Object selectObject : selection) { MatchRuleDefinition matchRuleDef = null; if (selectObject instanceof RuleRepNode) { RuleRepNode node = (RuleRepNode) selectObject; matchRuleDef = (MatchRuleDefinition) node.getRule(); } if (matchRuleDef != null) { if (isEmptyRule(matchRuleDef)) { status = new Status(IStatus.ERROR, CorePlugin.PLUGIN_ID, DefaultMessagesImpl .getString("MatchRuleCheckedTreeSelectionDialog.emptyRule")); //$NON-NLS-1$ return status; } // check if exist duplicated Match Keys Set<String> duplicatedKeys = hasDuplicatedKeys(matchRuleDef); if (!duplicatedKeys.isEmpty()) { status = new Status(IStatus.ERROR, CorePlugin.PLUGIN_ID, DefaultMessagesImpl.getString( "MatchRuleCheckedTreeSelectionDialog.duplicatedMatchKey", //$NON-NLS-1$ duplicatedKeys.toString())); return status; } // for component tMatchGroup and tRecordMatching when the imported rule's algorithm is // "T_Swoosh", block importing, !!!!NOTE!!! these code are a temporary solution, we will // support the importing of Match Rule which's algorithm is t-swoosh for component // tMatchGroup and tRecordMatching later // if ((dialogType == MATCHGROUP_TYPE || dialogType == RECORD_MATCHING_TYPE) // && T_SWOOSH_ALGORITHM.equals(matchRuleDef.getRecordLinkageAlgorithm())) { // status = new Status(IStatus.ERROR, CorePlugin.PLUGIN_ID, // DefaultMessagesImpl // .getString("The algorithm of this Match Rule is t-swoosh, can't import it now!")); //$NON-NLS-1$ // return status; // } // ~~~~~~~~~~ if (isNeedColumnWarning(matchRuleDef)) { String warningMsg = DefaultMessagesImpl .getString("MatchRuleCheckedTreeSelectionDialog.noColumnMatchWarning"); //$NON-NLS-1$ status = new Status(IStatus.WARNING, CorePlugin.PLUGIN_ID, warningMsg); } } } } return status; } /** * check every block keys and match keys, if any key . * * @param matchRuleDef * @return */ private boolean isNeedColumnWarning(MatchRuleDefinition matchRuleDef) { boolean needColumnWarning = false; if (dialogType != MATCHGROUP_TYPE && dialogType != RECORD_MATCHING_TYPE) { for (BlockKeyDefinition bkd : matchRuleDef.getBlockKeys()) { if (!hasColumnMatchTheKey(bkd)) { needColumnWarning = true; break; } } } if (dialogType != GENKEY_TYPE) { for (MatchRule rule : matchRuleDef.getMatchRules()) { EList<MatchKeyDefinition> matchKeys = rule.getMatchKeys(); for (MatchKeyDefinition mkd : matchKeys) { if (!hasColumnMatchTheKey(mkd)) { needColumnWarning = true; break; } } if (needColumnWarning) { break; } } } return needColumnWarning; } /** * check if the key's name equals the . * * @param needColumnWarning * @param bkd * @return */ private boolean hasColumnMatchTheKey(KeyDefinition bkd) { for (String column : inputColumnNames) { if (isColumnNameEqualsWithKey(bkd, column)) { return true; } } return false; } /** * check if the match key or survivor key has .if the user has choose "overwrite",no need to judge then. * Judged according to the selected rule type(vsr or tswoosh) * * @param matchRuleDef * @return */ private Set<String> hasDuplicatedKeys(MatchRuleDefinition matchRuleDef) { Set<String> duplicatedKeys = new HashSet<String>(); if (isOverwrite || currentAnaMatchKeys == null) { return duplicatedKeys; } // check block key first --only for VSR if (RecordMatcherType.simpleVSRMatcher.name().equals(matchRuleDef.getRecordLinkageAlgorithm()) && matchRuleDef.getBlockKeys() != null && currentAnaBlockKeys != null) { for (BlockKeyDefinition blockKey : matchRuleDef.getBlockKeys()) { if (blockKey != null && currentAnaBlockKeys.contains(blockKey.getName())) { duplicatedKeys.add(blockKey.getName()); } } } // check match keys for (MatchRule rule : matchRuleDef.getMatchRules()) { EList<MatchKeyDefinition> matchKeys = rule.getMatchKeys(); for (MatchKeyDefinition mkd : matchKeys) { if (mkd != null && currentAnaMatchKeys.contains(mkd.getName())) { duplicatedKeys.add(mkd.getName()); } } } return duplicatedKeys; } /** * DOC yyin Comment method "isEmptyRule". * * @param matchRuleDef * @return */ private boolean isEmptyRule(MatchRuleDefinition matchRuleDef) { return (matchRuleDef.getBlockKeys() == null || matchRuleDef.getBlockKeys().size() < 1) && (matchRuleDef.getMatchRules() == null || matchRuleDef.getMatchRules().size() < 1); } }); }
From source file:com.verigreen.collector.systemtest.SystemTestBase.java
private void waitForVerificationComplete(String vgBranchname, String protectedBranch, long timeoutForTestInMilis, String commitId, Set<VerificationStatus> expectedStatuses) throws InterruptedException { RestClient restClient = new RestClientImpl(); CommitItemRequest verificationResultRequest = CollectorApi.getCommitItemRequest(vgBranchname, protectedBranch, commitId);/*from w w w .j av a 2 s . c o m*/ Assert.assertNotNull(verificationResultRequest); long startTime = System.currentTimeMillis(); boolean finalStateReached = checkIfFinalState(verificationResultRequest, restClient); while (!finalStateReached && (System.currentTimeMillis() < (startTime + timeoutForTestInMilis))) { Thread.sleep(1000 * 10); finalStateReached = checkIfFinalState(verificationResultRequest, restClient); } Assert.assertTrue(String.format("Timeout occurred... Checking commit id: %s, in branch: %s", commitId, protectedBranch), finalStateReached); VerificationStatus resultStatus = restClient.get(verificationResultRequest) .getEntity(CommitItemPresentation.class).getStatus(); Assert.assertTrue(String.format("Expected statuses: %s, result status: %s", expectedStatuses.toString(), resultStatus), expectedStatuses.contains(resultStatus)); }
From source file:org.apache.lens.cube.parse.join.AutoJoinContext.java
public Set<Dimension> pickOptionalTables(final DimHQLContext sc, Set<Dimension> qdims, CubeQueryContext cubeql) throws LensException { // Find the min cost join clause and add dimensions in the clause as optional dimensions Set<Dimension> joiningOptionalTables = new HashSet<>(); if (qdims == null) { return joiningOptionalTables; }//www .j ava 2s. c o m // find least cost path Iterator<JoinClause> itr = getJoinClausesForAllPaths(sc.getStorageCandidate(), qdims, cubeql); JoinClause minCostClause = null; while (itr.hasNext()) { JoinClause clause = itr.next(); if (minCostClause == null || minCostClause.getCost() > clause.getCost()) { minCostClause = clause; } } if (minCostClause == null) { throw new LensException(LensCubeErrorCode.NO_JOIN_PATH.getLensErrorInfo(), qdims.toString(), autoJoinTarget.getName()); } log.info("Fact: {} minCostClause:{}", sc, minCostClause); if (sc.getStorageCandidate() != null) { getFactClauses().put(sc.getStorageCandidate(), minCostClause); } else { setMinCostClause(minCostClause); } for (Dimension dim : minCostClause.getDimsInPath()) { if (!qdims.contains(dim)) { joiningOptionalTables.add(dim); } } minCostClause.initChainColumns(); // prune candidate dims of joiningOptionalTables wrt joining columns for (Dimension dim : joiningOptionalTables) { for (Iterator<CandidateDim> i = cubeql.getCandidateDimTables().get(dim).iterator(); i.hasNext();) { CandidateDim cDim = i.next(); if (!cDim.getColumns().containsAll(minCostClause.chainColumns.get(dim))) { i.remove(); log.info( "Not considering dimTable:{} as its columns are not part of any join paths. Join columns:{}", cDim.getTable(), minCostClause.chainColumns.get(dim)); cubeql.addDimPruningMsgs(dim, cDim.getTable(), CandidateTablePruneCause.noColumnPartOfAJoinPath(minCostClause.chainColumns.get(dim))); } } if (cubeql.getCandidateDimTables().get(dim).size() == 0) { throw new LensException(LensCubeErrorCode.NO_DIM_HAS_COLUMN.getLensErrorInfo(), dim.getName(), minCostClause.chainColumns.get(dim).toString()); } } return joiningOptionalTables; }