List of usage examples for java.lang StringBuilder lastIndexOf
@Override public int lastIndexOf(String str)
From source file:net.duckling.ddl.service.resource.dao.ResourceDAOImpl.java
private String buildSphinxIdsSQL(List<Long> page_ids) { StringBuilder sb = new StringBuilder(); sb.append("("); for (Long l : page_ids) { sb.append(l + ","); }//from ww w. j ava2 s.c om sb.replace(sb.lastIndexOf(","), sb.length(), ")"); return sb.toString(); }
From source file:com.doctor.other.concurrent_hash_map_based_table.ConcurrentHashMapBasedTable.java
@Override public String toString() { final String padding = "-----------"; StringBuilder stringBuilder = new StringBuilder(256); table.forEach((row, value) -> {// w w w .j a v a2 s . com stringBuilder.append("row: ").append(row).append(", "); value.forEach((colum, value2) -> { stringBuilder.append("colum: ").append(colum).append(", "); value2.forEach((timesplice, value3) -> { stringBuilder.append("timesplice: [").append(timesplice).append("] "); stringBuilder.append(" values: ").append(value3).append("; "); }); stringBuilder.append("\n").append(padding); }); int index = stringBuilder.lastIndexOf(padding); index = index == -1 ? 0 : index; stringBuilder.delete(index, stringBuilder.length()); stringBuilder.append("\n"); }); return stringBuilder.toString(); }
From source file:com.gmail.at.faint545.fragments.QueueFragment.java
private String collectSelectedJobs() { StringBuilder selectedJobs = new StringBuilder(); /* Create a string of jobs to delete, separated by commas i.e: job1,job2,job3 */ for (int position : mSelectedPositions) { JSONObject job = mJobs.get(position); try {/*w w w .j av a2 s . c om*/ String id = job.getString(SabnzbdConstants.NZOID); selectedJobs.append(id).append(","); } catch (JSONException e) { e.printStackTrace(); } } return selectedJobs.substring(0, selectedJobs.lastIndexOf(",")); // Chop off last comma }
From source file:DatasetCreation.DatasetCSVBuilder.java
/** * Return CSV string which represent the dataset * * @param elements ArrayList of elements to build dataset from * @param featureExtractor a Feature Extractor object * @param selectedFeatures the top selected features to build the dataset * with//from ww w . j a va 2 s .co m * @param classification the classification of all the records in the * dataset * @param addElementIDColumn add prefix column identifying the record * @param addClassificationColumn add suffix column identifying the class of * the record * @return CSV string which represent the dataset */ public StringBuilder BuildDatabaseCSV(ArrayList<T> elements, IFeatureExtractor<T> featureExtractor, ArrayList<Pair<String, Integer>> selectedFeatures, int totalElementsNum, FeatureRepresentation featureRepresentation, Classification classification, boolean addElementIDColumn, boolean addClassificationColumn) { StringBuilder datasetCSV = new StringBuilder(); StringBuilder elementFeaturesVectorCSV; for (T element : elements) { elementFeaturesVectorCSV = GetFeaturesVectorCSV(element, featureExtractor, selectedFeatures, totalElementsNum, featureRepresentation, classification, addElementIDColumn, addClassificationColumn); if (elementFeaturesVectorCSV != null) { datasetCSV.append(elementFeaturesVectorCSV); datasetCSV.append("\n"); } } datasetCSV.deleteCharAt(datasetCSV.lastIndexOf("\n")); return datasetCSV; }
From source file:org.dspace.app.xmlui.aspect.administrative.authorization.EditPolicyForm.java
/** * Search for groups to add to this group. *//*ww w.j a va 2s. c o m*/ private void addGroupSearch(Division div, Group sourceGroup, DSpaceObject dso, String query, int page) throws WingException, SQLException { Group[] groups = Group.search(context, query, page * RESULTS_PER_PAGE, (page + 1) * RESULTS_PER_PAGE); int totalResults = Group.searchResultCount(context, query); ArrayList<ResourcePolicy> otherPolicies = (ArrayList<ResourcePolicy>) AuthorizeManager.getPolicies(context, dso); if (totalResults > RESULTS_PER_PAGE) { int firstIndex = page * RESULTS_PER_PAGE + 1; int lastIndex = page * RESULTS_PER_PAGE + groups.length; String baseURL = contextPath + "/admin/authorize?administrative-continue=" + knot.getId(); String nextURL = null, prevURL = null; if (page < ((totalResults - 1) / RESULTS_PER_PAGE)) { nextURL = baseURL + "&page=" + (page + 1); } if (page > 0) { prevURL = baseURL + "&page=" + (page - 1); } div.setSimplePagination(totalResults, firstIndex, lastIndex, prevURL, nextURL); } Table table = div.addTable("policy-edit-search-group", groups.length + 1, 1); Row header = table.addRow(Row.ROLE_HEADER); // Add the header row header = table.addRow(Row.ROLE_HEADER); header.addCell().addContent(T_groups_column1); header.addCell().addContent(T_groups_column2); header.addCell().addContent(T_groups_column3); header.addCell().addContent(T_groups_column4); // The rows of search results for (Group group : groups) { String groupID = String.valueOf(group.getID()); String name = group.getName(); url = contextPath + "/admin/groups?administrative-continue=" + knot.getId() + "&submit_edit_group&group_id=" + groupID; Row row = table.addRow(); row.addCell().addContent(groupID); row.addCell().addXref(url, name); // Iterate other other polices of our parent resource to see if any match the currently selected group StringBuilder otherAuthorizations = new StringBuilder(); int groupsMatched = 0; for (ResourcePolicy otherPolicy : otherPolicies) { if (otherPolicy.getGroup() == group) { otherAuthorizations.append(otherPolicy.getActionText()).append(", "); groupsMatched++; } } if (groupsMatched > 0) { row.addCell().addContent(otherAuthorizations.substring(0, otherAuthorizations.lastIndexOf(", "))); } else { row.addCell().addContent("-"); } if (group != sourceGroup) { row.addCell().addButton("submit_group_id_" + groupID).setValue(T_set_group); } else { row.addCell().addContent(T_current_group); } } if (groups.length <= 0) { table.addRow().addCell(1, 4).addContent(T_no_results); } }
From source file:org.deeplearning4j.util.StringGrid.java
public List<String> toLines() { List<String> lines = new ArrayList<>(); for (List<String> list : this) { StringBuilder sb = new StringBuilder(); for (String s : list) { sb.append(s.replaceAll(sep, " ")); sb.append(sep);//from w w w. j a v a 2 s .com } lines.add(sb.toString().substring(0, sb.lastIndexOf(sep))); } return lines; }
From source file:org.icefaces.samples.showcase.util.SourceCodeLoaderConnection.java
/** * Returns formatted source located in the cache, or adds to the cache and * returns the source found at sourceCodePath, removing oldest cached files * if MAX_CACHE_SIZE exceeded. * //w ww .j a v a 2 s. c o m * Implementing the map interface method `get` to allow parameter passing * in EL versions prior to 2.0 * * @param sourceCodePathObj The String location of the source file relative * to the web application root. * @return The XHTML formatted source code or a stack trace if the URL * could not be UTF-8 encoded. */ public String get(Object sourceCodePathObj) { if (SOURCE_SERVLET_URL == null || MAX_CACHE_SIZE == null || !(sourceCodePathObj instanceof String)) { return null; } String sourceCodePath; // Try encoding sourceCodePathObj parameter, return a stack trace // instead of source if it fails. try { sourceCodePath = URLEncoder.encode((String) sourceCodePathObj, "UTF-8"); } catch (UnsupportedEncodingException e) { logger.severe("UTF-8 is not supported by this platform."); return ""; } CachedSource cs; // Only allow the cache to be accessed by a single user when being used // within one of these blocks. synchronized (cache) { // Search the cache for formatted source code from this path. If it is // found, update the formatted source code with the current timestamp // and return the cached source. if ((cs = (CachedSource) cache.get(sourceCodePath)) != null) { logger.finer("Source Cache Hit."); logger.finest("Hit: " + sourceCodePath); cs.timestamp = System.currentTimeMillis() / 1000; cache.put(cs.path, cs); return cs.source; } } logger.finer("Source Cache Miss."); logger.finest("Miss: " + sourceCodePath); URL servletUrl = null; InputStream inputStream = null; InputStreamReader inputReader = null; try { if (!IS_SECURE) { servletUrl = new URL(SOURCE_SERVLET_URL + sourceCodePath); inputStream = (InputStream) servletUrl.getContent(); } else { // don't use a connection, just access methods directly inputStream = SourceCodeLoaderServlet.getServlet().getSource((String) sourceCodePathObj); } brokenUrl = false; } catch (Exception e) { e.printStackTrace(); logger.severe( "Broken URL for the source code loader (" + SOURCE_SERVLET_URL + "), check your web.xml."); brokenUrl = true; } if (!brokenUrl) { try { // Set up streams and buffers for source servlet reading inputReader = new InputStreamReader(inputStream, "UTF-8"); StringBuilder buf = new StringBuilder(16384); // Read into stringBuilder until EOF int readChar; while ((readChar = inputReader.read()) != -1) { buf.append((char) readChar); } // Extract page content from <body> tag and fix nbsp for valid XHTML String ret = buf.indexOf(" ") != -1 ? buf.substring(buf.indexOf("<body>") + 6, buf.lastIndexOf("</body>")).replace(" ", " ") : buf.toString(); synchronized (cache) { // If cache is full, remove files until the newly loaded string // will fit and add it to the cache while ((ret.length() * 16) + cacheSize > MAX_CACHE_SIZE) { OrderedBidiMap iCache = cache.inverseOrderedBidiMap(); CachedSource c = (CachedSource) iCache.firstKey(); cache.remove(c.path); cacheSize -= c.source.length() * 16; logger.finer("Cache Oversized. Removing oldest file."); logger.finest("Removed: " + c.path); } cache.put(sourceCodePath, new CachedSource(sourceCodePath, ret)); cacheSize += ret.length() * 16; } // Return newly loaded and cached source return ret; } catch (MalformedURLException e) { logger.severe("Attempted to connect to malformed URL."); logger.severe("Likely either EL param or web.xml SOURCE_SERVLET_URL param is incorrectly set"); e.printStackTrace(); } catch (UnsupportedEncodingException e) { logger.severe("UTF-8 is not supported by this platform."); e.printStackTrace(); } catch (IOException e) { logger.severe("IOException raised while reading characters from Servlet response stream."); e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } finally { if (inputStream != null) { try { inputStream.close(); } catch (Exception ignoredClose) { } } if (inputReader != null) { try { inputReader.close(); } catch (Exception ignoredClose) { } } } } return ""; }
From source file:org.bibsonomy.bibtex.util.BibtexParserUtils.java
/** * format a person field (author or editor) of a given BibtexEntry * according to//from w w w . j a v a2s. co m * FIRSTNAME LASTNAME and FIRSTNAME LASTNAME and ... * and do some consistency checks * * @param entry a BibTexEntry * @param field a field name (author or editor) * @return */ private static String getFormattedPersonString(final BibtexEntry entry, final personField field) { final BibtexAbstractValue fieldValue = entry.getFieldValue(field.getLabel()); log.debug("fieldValue: " + fieldValue); if (fieldValue instanceof BibtexPersonList) { final BibtexPersonList personsString = (BibtexPersonList) fieldValue; final StringBuilder personBuffer = new StringBuilder(); log.debug("personsString: " + personsString); if (personsString != null) { @SuppressWarnings("unchecked") // BibtexPersonList.getList specified to return a list of BibtexPersons final List<BibtexPerson> personList = personsString.getList(); log.debug("personList: " + personList); for (final BibtexPerson person : personList) { // build one person final StringBuilder personString = new StringBuilder(); final String first = person.getFirst(); if (present(first)) { personString.append(first); } final String preLast = person.getPreLast(); if (present(preLast)) { personString.append(" ").append(preLast); } final String last = person.getLast(); if (present(last)) { personString.append(" ").append(last); } if (person.isOthers()) { personString.append("others"); } personBuffer.append(personString).append(PersonNameUtils.PERSON_NAME_DELIMITER); log.debug("personString: " + personString); } /* remove last " and " */ if (!personList.isEmpty()) { return (personBuffer.substring(0, personBuffer.lastIndexOf(PersonNameUtils.PERSON_NAME_DELIMITER))); } // this means there was an error when trying to format this person log.error(BIBTEX_IS_INVALID_MSG + "Error while trying to format person list: " + personsString); throw new ValidationException( BIBTEX_IS_INVALID_MSG + "Error while trying to format person list: " + personsString); } } else if (fieldValue instanceof BibtexString) { log.error(BIBTEX_IS_INVALID_MSG + "Error while trying to format person list: " + fieldValue); throw new ValidationException( BIBTEX_IS_INVALID_MSG + "Error while trying to format person list: " + fieldValue); } // this means no author was given return null; }
From source file:org.jclouds.atmosonline.saas.filters.SignRequest.java
private void appendCanonicalizedHeaders(HttpRequest request, StringBuilder toSign) { // TreeSet == Sort the headers alphabetically. Set<String> headers = new TreeSet<String>(request.getHeaders().keySet()); for (String header : headers) { if (header.startsWith("x-emc-")) { // Convert all header names to lowercase. toSign.append(header.toLowerCase()).append(":"); // For headers with values that span multiple lines, convert them into one line by replacing any // newline characters and extra embedded white spaces in the value. for (String value : request.getHeaders().get(header)) toSign.append(value.replaceAll("\r?\n", "").replaceAll(" ", " ")).append(" "); toSign.deleteCharAt(toSign.lastIndexOf(" ")); // Concatenate all headers together, using newlines (\n) separating each header from the next one. toSign.append("\n"); }//from w w w. j av a2s.co m } // There should be no terminating newline character at the end of the last header. if (toSign.charAt(toSign.length() - 1) == '\n') toSign.deleteCharAt(toSign.length() - 1); }
From source file:net.acesinc.nifi.processors.security.ConvertSecurityMarkingAndAttrListIntoJson.java
public String getBuiltRegexForClassification(FlowAttrSecurityConfig secConfig) { final ComponentLog logger = getLogger(); String dlm = secConfig.getDelim(); StringBuilder regexPrefixForClass = new StringBuilder(); regexPrefixForClass.append("("); for (String abbrev : secConfig.getAbbreviatedLevelsCanReceive()) { regexPrefixForClass.append(abbrev); regexPrefixForClass.append("|"); }//from ww w . ja va2 s .co m for (String classPart : secConfig.getLevelsCanReceive()) { regexPrefixForClass.append(classPart); regexPrefixForClass.append("|"); } //this is awkward, but does the trick int lastIndexOfPipe = regexPrefixForClass.lastIndexOf("|"); regexPrefixForClass = regexPrefixForClass.replace(lastIndexOfPipe, regexPrefixForClass.length(), ""); regexPrefixForClass.append(")"); String regexSuffixForClass = "[A-Za-z" + dlm + "]+"; regexPrefixForClass.append(regexSuffixForClass); String regexForClass = regexPrefixForClass.toString(); logger.debug("built regex for classification part:=" + regexForClass); return regexForClass; }