List of usage examples for org.apache.commons.lang StringUtils difference
public static String difference(String str1, String str2)
Compares two Strings, and returns the portion where they differ.
From source file:com.sillelien.dollar.DollarOperatorsRegressionTest.java
public void diff(String desc, String lhs, String rhs) { final String difference = StringUtils.difference(lhs, rhs); if (!difference.isEmpty()) { fail("Difference for " + desc + " is " + difference + "\nCompare previous: " + lhs + "\nWith current " + rhs);/*w w w .j a v a 2 s .c o m*/ } }
From source file:hudson.plugins.clearcase.ucm.UcmMakeBaselineComposite.java
/** * Retrieve all Clearcase UCM component (with pvob suffix) for a stream * * @param stream the stream name like 'P_EngDesk_Product_3.2_int@\P_ORC' * @param clearTool the clearcase launcher * @return component list attached to the stream like ['DocGen_PapeeteDoc@\P_ORC','DocMgt_Modulo@\P_ORC'] * @throws IOException//from w w w. j av a 2 s .co m * @throws InterruptedException */ private List<String> getComponentList(ClearTool clearTool, String stream) throws IOException, InterruptedException { String output = clearTool.lsstream(stream, null, "\"%[components]XCp\""); String comp[] = output.split(",\\s"); List<String> result = new ArrayList<String>(); final String prefix = "component:"; for (String c : comp) { if (StringUtils.startsWith(c, prefix)) { result.add(StringUtils.difference(prefix, c)); } else { throw new IOException( "Invalid format for component in output. Must starts with 'component:' : " + c); } } return result; }
From source file:com.hangum.tadpole.rdb.core.dialog.dbconnect.composite.AbstractLoginComposite.java
/** * db ?? ? .//from w w w .j a va 2 s . c om * * @param userDB * @param isTest * @return */ private boolean checkDatabase(final UserDBDAO userDB, boolean isTest) { try { if (userDB.getDBDefine() == DBDefine.MONGODB_DEFAULT) { MongoConnectionManager.getInstance(userDB); } else if (userDB.getDBDefine() == DBDefine.TAJO_DEFAULT) { new TajoConnectionManager().connectionCheck(userDB); } else if (userDB.getDBDefine() == DBDefine.SQLite_DEFAULT) { String strFileLoc = StringUtils.difference( StringUtils.remove(userDB.getDBDefine().getDB_URL_INFO(), "%s"), userDB.getUrl()); File fileDB = new File(strFileLoc); if (fileDB.exists()) { List<String> strArr = FileUtils.readLines(fileDB); if (!StringUtils.contains(strArr.get(0), "SQLite format")) { throw new SQLException("Doesn't SQLite files."); } } } else { SqlMapClient sqlClient = TadpoleSQLManager.getInstance(userDB); sqlClient.queryForList("connectionCheck", userDB.getDb()); //$NON-NLS-1$ } return true; } catch (Exception e) { String errMsg = e.getMessage(); // driver ? . try { Throwable cause = e.getCause().getCause(); if (cause instanceof ClassNotFoundException) { errMsg = String.format(Messages.get().TadpoleTableComposite_driverMsg, userDB.getDbms_type(), e.getMessage()); } } catch (Exception ee) { // igonre exception } logger.error("DB Connecting... [url]" + userDB.getUrl(), e); //$NON-NLS-1$ // If UserDBDao is not invalid, remove UserDBDao at internal cache TadpoleSQLManager.removeInstance(userDB); // mssql ??? ? ?? ?. . // https://github.com/hangum/TadpoleForDBTools/issues/512 if (!isTest) {// && loginInfo.getDBDefine() != DBDefine.MSSQL_DEFAULT) { TDBYesNoErroDialog dialog = new TDBYesNoErroDialog(getShell(), userDB.getDb() + " Test", String.format(Messages.get().AbstractLoginComposite_3, errMsg)); if (dialog.open() == IDialogConstants.OK_ID) return true; } else { TDBInfoDialog dialog = new TDBInfoDialog(getShell(), userDB.getDb() + " Test", errMsg); dialog.open(); } return false; } }
From source file:hudson.plugins.clearcase.ucm.UcmMakeBaseline.java
/** * Retrieve the read/write component list with PVOB * /*from ww w .ja va 2s . com*/ * @param clearToolLauncher * @param filePath * @return the read/write component like 'DeskCore@\P_ORC DeskShared@\P_ORC build_Product@\P_ORC' * @throws IOException * @throws InterruptedException * @throws Exception */ private List<String> getReadWriteComponent(ClearTool clearTool, String viewTag) throws IOException, InterruptedException { String output = clearTool.lsproject(viewTag, "%[mod_comps]Xp"); final String prefix = "component:"; if (StringUtils.startsWith(output, prefix)) { List<String> componentNames = new ArrayList<String>(); String[] componentNamesSplit = output.split(" "); for (String componentName : componentNamesSplit) { String componentNameTrimmed = StringUtils.difference(prefix, componentName).trim(); if (StringUtils.isNotEmpty(componentNameTrimmed)) { componentNames.add(componentNameTrimmed); } } return componentNames; } throw new IOException(output); }
From source file:hudson.plugins.clearcase.ucm.UcmMakeBaseline.java
/** * Get the component binding to the baseline * //from ww w .j a va 2 s. co m * @param clearToolLauncher * @param baselineName the baseline name like 'deskCore_3.2-146_2008-11-14_18-07-22.3543@\P_ORC' * @return the component name like 'Desk_Core@\P_ORC' * @throws InterruptedException * @throws IOException */ private String getComponentforBaseline(ClearTool clearTool, String baselineName) throws InterruptedException, IOException { String output = clearTool.lsbl(baselineName, "%[component]Xp"); String prefix = "component:"; if (StringUtils.startsWith(output, prefix)) { return StringUtils.difference(prefix, output); } throw new IOException("Incorrect output. Received " + output); }
From source file:com.hangum.tadpole.rdb.core.viewers.connections.ManagerViewer.java
/** * SQLite file download/* w ww. j av a 2 s. co m*/ */ public void download(final UserDBDAO userDB) { try { String strFileLoc = StringUtils .difference(StringUtils.remove(userDB.getDBDefine().getDB_URL_INFO(), "%s"), userDB.getUrl()); File dbFile = new File(strFileLoc); byte[] arrayData = FileUtils.readFileToByteArray(dbFile); downloadServiceHandler.setContentType(""); downloadServiceHandler.setName(dbFile.getName()); //$NON-NLS-1$ downloadServiceHandler.setByteContent(arrayData); DownloadUtils.provideDownload(compositeMainComposite, downloadServiceHandler.getId()); } catch (Exception e) { logger.error("SQLite file Download exception", e); //$NON-NLS-1$ Status errStatus = new Status(IStatus.ERROR, Activator.PLUGIN_ID, e.getMessage(), e); //$NON-NLS-1$ ExceptionDetailsErrorDialog.openError(null, Messages.get().Error, "DB Download Exception", errStatus); //$NON-NLS-1$ //$NON-NLS-2$ } }
From source file:com.baasbox.controllers.Admin.java
public static Result following(String username) { if (!UserService.exists(username)) { return notFound("User " + username + " does not exists"); }//ww w. j av a 2 s . c o m OUser user = UserService.getOUserByUsername(username); Set<ORole> roles = user.getRoles(); List<String> usernames = new ArrayList<String>(); for (ORole oRole : roles) { if (oRole.getName().startsWith(RoleDao.FRIENDS_OF_ROLE)) { usernames.add(StringUtils.difference(RoleDao.FRIENDS_OF_ROLE, oRole.getName())); } } if (usernames.isEmpty()) { return ok(User.prepareResponseToJson(new ArrayList<ODocument>())); } else { List<ODocument> followers; try { followers = UserService.getUserProfilebyUsernames(usernames); return ok(User.prepareResponseToJson(followers)); } catch (Exception e) { BaasBoxLogger.error(ExceptionUtils.getMessage(e)); return internalServerError(ExceptionUtils.getMessage(e)); } } }
From source file:org.apache.hive.hcatalog.mapreduce.FileOutputCommitterContainer.java
private void registerPartitions(JobContext context) throws IOException { if (dynamicPartitioningUsed) { discoverPartitions(context);//from w w w . j ava 2 s . co m } OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration()); Configuration conf = context.getConfiguration(); Table table = new Table(jobInfo.getTableInfo().getTable()); Path tblPath = new Path(table.getTTable().getSd().getLocation()); FileSystem fs = tblPath.getFileSystem(conf); IMetaStoreClient client = null; HCatTableInfo tableInfo = jobInfo.getTableInfo(); List<Partition> partitionsAdded = new ArrayList<Partition>(); try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); if (table.getPartitionKeys().size() == 0) { // Move data from temp directory the actual table directory // No metastore operation required. Path src = new Path(jobInfo.getLocation()); moveTaskOutputs(fs, src, src, tblPath, false, table.isImmutable()); if (!src.equals(tblPath)) { fs.delete(src, true); } if (table.getParameters() != null && table.getParameters().containsKey(StatsSetupConst.COLUMN_STATS_ACCURATE)) { table.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); } return; } StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); FileStatus tblStat = fs.getFileStatus(tblPath); String grpName = tblStat.getGroup(); FsPermission perms = tblStat.getPermission(); List<Partition> partitionsToAdd = new ArrayList<Partition>(); if (!dynamicPartitioningUsed) { partitionsToAdd.add(constructPartition(context, jobInfo, tblPath.toString(), null, jobInfo.getPartitionValues(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, fs, grpName, perms)); } else { for (Entry<String, Map<String, String>> entry : partitionsDiscoveredByPath.entrySet()) { partitionsToAdd.add(constructPartition(context, jobInfo, getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getKey(), entry.getValue(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, fs, grpName, perms)); } } ArrayList<Map<String, String>> ptnInfos = new ArrayList<Map<String, String>>(); for (Partition ptn : partitionsToAdd) { ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn)); } /** * Dynamic partitioning & Append incompatibility note: * * Currently, we do not support mixing dynamic partitioning and append in the * same job. One reason is that we need exhaustive testing of corner cases * for that, and a second reason is the behaviour of add_partitions. To support * dynamic partitioning with append, we'd have to have a add_partitions_if_not_exist * call, rather than an add_partitions call. Thus far, we've tried to keep the * implementation of append jobtype-agnostic, but here, in code, we assume that * a table is considered immutable if dynamic partitioning is enabled on the job. * * This does not mean that we can check before the job begins that this is going * to be a dynamic partition job on an immutable table and thus fail the job, since * it is quite possible to have a dynamic partitioning job run on an unpopulated * immutable table. It simply means that at the end of the job, as far as copying * in data is concerned, we will pretend that the table is immutable irrespective * of what table.isImmutable() tells us. */ //Publish the new partition(s) if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())) { if (!customDynamicLocationUsed) { Path src = new Path(ptnRootLocation); // check here for each dir we're copying out, to see if it // already exists, error out if so. // Also, treat dyn-writes as writes to immutable tables. moveTaskOutputs(fs, src, src, tblPath, true, true); // dryRun = true, immutable = true moveTaskOutputs(fs, src, src, tblPath, false, true); if (!src.equals(tblPath)) { fs.delete(src, true); } } else { moveCustomLocationTaskOutputs(fs, table, hiveConf); } try { updateTableSchema(client, table, jobInfo.getOutputSchema()); LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); client.add_partitions(partitionsToAdd); partitionsAdded = partitionsToAdd; } catch (Exception e) { // There was an error adding partitions : rollback fs copy and rethrow for (Partition p : partitionsToAdd) { Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation()))); if (fs.exists(ptnPath)) { fs.delete(ptnPath, true); } } throw e; } } else { // no harProcessor, regular operation updateTableSchema(client, table, jobInfo.getOutputSchema()); LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); if (partitionsToAdd.size() > 0) { if (!dynamicPartitioningUsed) { // regular single-partition write into a partitioned table. //Move data from temp directory the actual table directory if (partitionsToAdd.size() > 1) { throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, "More than one partition to publish in non-dynamic partitioning job"); } Partition p = partitionsToAdd.get(0); Path src = new Path(jobInfo.getLocation()); Path dest = new Path(p.getSd().getLocation()); moveTaskOutputs(fs, src, src, dest, true, table.isImmutable()); moveTaskOutputs(fs, src, src, dest, false, table.isImmutable()); if (!src.equals(dest)) { if (src.toString() .matches(".*" + Path.SEPARATOR + SCRATCH_DIR_NAME + "\\d\\.?\\d+.*")) { // src is scratch directory, need to trim the part key value pairs from path String diff = StringUtils.difference(src.toString(), dest.toString()); fs.delete(new Path(StringUtils.substringBefore(src.toString(), diff)), true); } else { fs.delete(src, true); } } // Now, we check if the partition already exists. If not, we go ahead. // If so, we error out if immutable, and if mutable, check that the partition's IF // matches our current job's IF (table's IF) to check for compatibility. If compatible, we // ignore and do not add. If incompatible, we error out again. boolean publishRequired = false; try { Partition existingP = client.getPartition(p.getDbName(), p.getTableName(), p.getValues()); if (existingP != null) { if (table.isImmutable()) { throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, "Attempted duplicate partition publish on to immutable table"); } else { if (!existingP.getSd().getInputFormat() .equals(table.getInputFormatClass().getName())) { throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, "Attempted partition append, where old partition format was " + existingP.getSd().getInputFormat() + " and table format was " + table.getInputFormatClass().getName()); } } } else { publishRequired = true; } } catch (NoSuchObjectException e) { // All good, no such partition exists, move on. publishRequired = true; } if (publishRequired) { client.add_partitions(partitionsToAdd); partitionsAdded = partitionsToAdd; } } else { // Dynamic partitioning usecase if (!customDynamicLocationUsed) { Path src = new Path(ptnRootLocation); moveTaskOutputs(fs, src, src, tblPath, true, true); // dryRun = true, immutable = true moveTaskOutputs(fs, src, src, tblPath, false, true); if (!src.equals(tblPath)) { fs.delete(src, true); } } else { moveCustomLocationTaskOutputs(fs, table, hiveConf); } client.add_partitions(partitionsToAdd); partitionsAdded = partitionsToAdd; } } // Set permissions appropriately for each of the partitions we just created // so as to have their permissions mimic the table permissions for (Partition p : partitionsAdded) { applyGroupAndPerms(fs, new Path(p.getSd().getLocation()), tblStat.getPermission(), tblStat.getGroup(), true); } } } catch (Exception e) { if (partitionsAdded.size() > 0) { try { // baseCommitter.cleanupJob failed, try to clean up the // metastore for (Partition p : partitionsAdded) { client.dropPartition(tableInfo.getDatabaseName(), tableInfo.getTableName(), p.getValues(), true); } } catch (Exception te) { // Keep cause as the original exception throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e); } } if (e instanceof HCatException) { throw (HCatException) e; } else { throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e); } } finally { HCatUtil.closeHiveClientQuietly(client); } }
From source file:org.apache.maven.plugins.enforcer.MatchArtifactIdAndBaseDir.java
@Override public void execute(final EnforcerRuleHelper helper) throws EnforcerRuleException { MavenProject mavenProject = getMavenProject(helper); String baseDir = mavenProject.getBasedir().getName(); String artifactId = mavenProject.getArtifactId(); String difference = StringUtils.difference(baseDir, artifactId); if (!difference.isEmpty()) { String template = "Artifact id: [%s] is not the same with base dir: [%s]. Difference is started at: [%s]"; throw new EnforcerRuleException(String.format(template, artifactId, baseDir, difference)); }/*from www . ja v a 2 s. com*/ }
From source file:org.apache.wookie.tests.conformance.PackagingAndConfiguration.java
private String getLocalIconPath(Element widget, Element iconElem) { String id = widget.getAttributeValue("identifier"); id = WidgetPackageUtils.convertIdToFolderName(id); String baseUrl = "http://localhost:8080/wookie/wservices/" + id + "/"; if (iconElem == null) return null; String iconUrl = iconElem.getText(); String icon = StringUtils.difference(baseUrl, iconUrl); return icon;//from w w w. j av a 2 s.co m }