List of usage examples for java.io StringWriter write
public void write(String str)
From source file:de.innovationgate.utils.WGUtils.java
/** * Replaces occurences of a substring inside a string by another substring. * This variant of the method takes a replace processor object that can be * used to further specify the replacing mechanism. * /*from w ww . j a va 2 s . c o m*/ * @param strText * The text to search for occurences of the substring * @param strFrom * The substring to search for * @param proc * The processor that will make the replacement and tell the * method where to continue searching. * @param bMultiple * Specify true if multiple occurences should be replaced. * Specify false if only the first occurence should be replaced. * @param exactCase * Determines if strings should be compared with exact case. * If false, string are matched case insensitive * @return The string with occurences of substring replaced. */ public static String strReplace(String strText, String strFrom, ReplaceProcessor proc, boolean bMultiple, boolean exactCase) { if (strText == null || strFrom == null) { return ""; } if (proc == null) { proc = new DefaultReplaceProcessor(""); } int iFromLength = strFrom.length(); String strLCText = (exactCase ? strText : strText.toLowerCase()); String strLCFrom = (exactCase ? strFrom : strFrom.toLowerCase()); int iOccurs = strLCText.indexOf(strLCFrom, 0); int iStartWith = 0; StringWriter out = new StringWriter(); try { while (iOccurs != -1) { out.write(strText.substring(iStartWith, iOccurs)); int iTo = iOccurs + iFromLength; iStartWith = proc.replace(strText, iOccurs, iTo, out); if (bMultiple) iOccurs = strLCText.toLowerCase().indexOf(strLCFrom, iStartWith); else iOccurs = -1; } if (iStartWith < strLCText.length()) { out.write(strText.substring(iStartWith)); } return out.toString(); } catch (IOException e) { e.printStackTrace(); return strText; } }
From source file:org.nuxeo.launcher.config.ConfigurationGenerator.java
private void writeConfiguration() throws ConfigurationException { final MessageDigest newContentDigest = DigestUtils.getMd5Digest(); StringWriter newContent = new StringWriter() { @Override/*from www .j a v a 2 s . c o m*/ public void write(String str) { if (str != null) { newContentDigest.update(str.getBytes()); } super.write(str); } }; // Copy back file content newContent.append(readConfiguration()); // Write changed parameters newContent.write(BOUNDARY_BEGIN + System.getProperty("line.separator")); for (Object o : new TreeSet<>(userConfig.keySet())) { String key = (String) o; // Ignore parameters already stored in newContent if (PARAM_FORCE_GENERATION.equals(key) || PARAM_WIZARD_DONE.equals(key) || PARAM_TEMPLATES_NAME.equals(key)) { continue; } String oldValue = storedConfig.getProperty(key, ""); String newValue = userConfig.getRawProperty(key, ""); if (!newValue.equals(oldValue)) { newContent.write("#" + key + "=" + oldValue + System.getProperty("line.separator")); newContent.write(key + "=" + newValue + System.getProperty("line.separator")); } } newContent.write(BOUNDARY_END + System.getProperty("line.separator")); // Write file only if content has changed if (!Hex.encodeHexString(newContentDigest.digest()).equals(currentConfigurationDigest)) { try (Writer writer = new FileWriter(nuxeoConf, false)) { writer.append(newContent.getBuffer()); } catch (IOException e) { throw new ConfigurationException("Error writing in " + nuxeoConf, e); } } }
From source file:com.xmlcalabash.library.HttpRequest.java
private void doPutOrPostMultipart(EntityEnclosingMethod method, XdmNode multipart) { // The Apache HTTP libraries just don't handle this case...we treat it as a "single part" // and build the body ourselves, using the boundaries etc. // Provide custom retry handler is necessary method.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, new DefaultHttpMethodRetryHandler(3, false)); // Check for consistency of content-type contentType = multipart.getAttributeValue(_content_type); if (contentType == null) { contentType = "multipart/mixed"; }/*from w w w .j ava2 s. c o m*/ if (headerContentType != null && !headerContentType.equals(contentType.toLowerCase())) { throw XProcException.stepError(20); } if (!contentType.startsWith("multipart/")) { throw new UnsupportedOperationException("Multipart content-type must be multipart/..."); } for (Header header : headers) { method.addRequestHeader(header); } String boundary = multipart.getAttributeValue(_boundary); if (boundary == null) { throw new XProcException(step.getNode(), "A boundary value must be specified on c:multipart"); } if (boundary.startsWith("--")) { throw XProcException.stepError(2); } String q = "\""; if (boundary.contains(q)) { q = "'"; } if (boundary.contains(q)) { q = ""; } String multipartContentType = contentType + "; boundary=" + q + boundary + q; // FIXME: This sucks rocks. I want to write the data to be posted, not provide some way to read it MessageBytes byteContent = new MessageBytes(); byteContent.append("This is a multipart message.\r\n"); //String postContent = "This is a multipart message.\r\n"; for (XdmNode body : new RelevantNodes(runtime, multipart, Axis.CHILD)) { if (!XProcConstants.c_body.equals(body.getNodeName())) { throw new XProcException(step.getNode(), "A c:multipart may only contain c:body elements."); } String bodyContentType = body.getAttributeValue(_content_type); if (bodyContentType == null) { throw new XProcException(step.getNode(), "Content-type on c:body is required."); } String bodyId = body.getAttributeValue(_id); String bodyDescription = body.getAttributeValue(_description); String bodyDisposition = body.getAttributeValue(_disposition); String bodyCharset = HttpUtils.getCharset(bodyContentType); if (bodyContentType.contains(";")) { int pos = bodyContentType.indexOf(";"); bodyContentType = bodyContentType.substring(0, pos); } String bodyEncoding = body.getAttributeValue(_encoding); if (bodyEncoding != null && !"base64".equals(bodyEncoding)) { throw new UnsupportedOperationException("The '" + bodyEncoding + "' encoding is not supported"); } if (bodyCharset != null) { bodyContentType += "; charset=" + bodyCharset; } else { // Is utf-8 the right default? What about the image/ case? bodyContentType += "; charset=utf-8"; } //postContent += "--" + boundary + "\r\n"; //postContent += "Content-Type: " + bodyContentType + "\r\n"; byteContent.append("--" + boundary + "\r\n"); byteContent.append("Content-Type: " + bodyContentType + "\r\n"); if (bodyDescription != null) { //postContent += "Content-Description: " + bodyDescription + "\r\n"; byteContent.append("Content-Description: " + bodyDescription + "\r\n"); } if (bodyId != null) { //postContent += "Content-ID: " + bodyId + "\r\n"; byteContent.append("Content-ID: " + bodyId + "\r\n"); } if (bodyDisposition != null) { //postContent += "Content-Disposition: " + bodyDisposition + "\r\n"; byteContent.append("Content-Disposition: " + bodyDisposition + "\r\n"); } if (bodyEncoding != null) { //postContent += "Content-Transfer-Encoding: " + bodyEncoding + "\r\n"; if (encodeBinary) { byteContent.append("Content-Transfer-Encoding: " + bodyEncoding + "\r\n"); } } //postContent += "\r\n"; byteContent.append("\r\n"); try { if (xmlContentType(bodyContentType)) { Serializer serializer = makeSerializer(); Vector<XdmNode> content = new Vector<XdmNode>(); XdmSequenceIterator iter = body.axisIterator(Axis.CHILD); while (iter.hasNext()) { XdmNode node = (XdmNode) iter.next(); content.add(node); } // FIXME: set serializer properties appropriately! StringWriter writer = new StringWriter(); serializer.setOutputWriter(writer); S9apiUtils.serialize(runtime, content, serializer); writer.close(); //postContent += writer.toString(); byteContent.append(writer.toString()); } else if (jsonContentType(contentType)) { byteContent.append(XMLtoJSON.convert(body)); } else if (!encodeBinary && "base64".equals(bodyEncoding)) { byte[] decoded = Base64.decode(body.getStringValue()); byteContent.append(decoded, decoded.length); } else { StringWriter writer = new StringWriter(); XdmSequenceIterator iter = body.axisIterator(Axis.CHILD); while (iter.hasNext()) { XdmNode node = (XdmNode) iter.next(); if (node.getNodeKind() != XdmNodeKind.TEXT) { throw XProcException.stepError(28); } writer.write(node.getStringValue()); } writer.close(); //postContent += writer.toString(); byteContent.append(writer.toString()); } //postContent += "\r\n"; byteContent.append("\r\n"); } catch (IOException ioe) { throw new XProcException(ioe); } catch (SaxonApiException sae) { throw new XProcException(sae); } } //postContent += "--" + boundary + "--\r\n"; byteContent.append("--" + boundary + "--\r\n"); ByteArrayRequestEntity requestEntity = new ByteArrayRequestEntity(byteContent.content(), multipartContentType); //StringRequestEntity requestEntity = new StringRequestEntity(postContent, multipartContentType, null); method.setRequestEntity(requestEntity); }
From source file:com.cyberway.issue.crawler.admin.CrawlJob.java
public Object invoke(String operationName, Object[] params, String[] signature) throws ReflectionException { if (operationName == null) { throw new RuntimeOperationsException(new IllegalArgumentException("Operation name cannot be null"), "Cannot call invoke with null operation name"); }/*w w w . j a v a 2 s. c o m*/ controller.installThreadContextSettingsHandler(); if (this.bdbjeOperationsNameList.contains(operationName)) { try { Object o = this.bdbjeMBeanHelper.invoke(this.controller.getBdbEnvironment(), operationName, params, signature); // If OP_DB_ST, return String version of result. if (operationName.equals(OP_DB_STAT)) { return o.toString(); } return o; } catch (MBeanException e) { throw new RuntimeOperationsException(new RuntimeException(e)); } } // TODO: Exploit passed signature. // The pattern in the below is to match an operation and when found // do a return out of if clause. Doing it this way, I can fall // on to the MethodNotFoundException for case where we've an // attribute but no handler. if (operationName.equals(IMPORT_URI_OPER)) { JmxUtils.checkParamsCount(IMPORT_URI_OPER, params, 3); mustBeCrawling(); try { importUri((String) params[0], ((Boolean) params[1]).booleanValue(), ((Boolean) params[2]).booleanValue()); } catch (URIException e) { throw new RuntimeOperationsException(new RuntimeException(e)); } return null; } if (operationName.equals(IMPORT_URIS_OPER)) { JmxUtils.checkParamsCount(IMPORT_URIS_OPER, params, 4); mustBeCrawling(); return importUris((String) params[0], ((String) params[1]).toString(), ((Boolean) params[2]).booleanValue(), ((Boolean) params[3]).booleanValue()); } if (operationName.equals(DUMP_URIS_OPER)) { JmxUtils.checkParamsCount(DUMP_URIS_OPER, params, 4); mustBeCrawling(); if (!this.controller.isPaused()) { throw new RuntimeOperationsException(new IllegalArgumentException("Must " + "be paused"), "Cannot dump URI's from running job."); } dumpUris((String) params[0], (String) params[1], ((Integer) params[2]).intValue(), ((Boolean) params[3]).booleanValue()); } if (operationName.equals(PAUSE_OPER)) { JmxUtils.checkParamsCount(PAUSE_OPER, params, 0); mustBeCrawling(); pause(); return null; } if (operationName.equals(RESUME_OPER)) { JmxUtils.checkParamsCount(RESUME_OPER, params, 0); mustBeCrawling(); resume(); return null; } if (operationName.equals(FRONTIER_REPORT_OPER)) { JmxUtils.checkParamsCount(FRONTIER_REPORT_OPER, params, 1); mustBeCrawling(); return getFrontierReport((String) params[0]); } if (operationName.equals(THREADS_REPORT_OPER)) { JmxUtils.checkParamsCount(THREADS_REPORT_OPER, params, 0); mustBeCrawling(); return getThreadsReport(); } if (operationName.equals(SEEDS_REPORT_OPER)) { JmxUtils.checkParamsCount(SEEDS_REPORT_OPER, params, 0); mustBeCrawling(); StringWriter sw = new StringWriter(); if (getStatisticsTracking() != null && getStatisticsTracking() instanceof StatisticsTracker) { ((StatisticsTracker) getStatisticsTracking()).writeSeedsReportTo(new PrintWriter(sw)); } else { sw.write("Unsupported"); } return sw.toString(); } if (operationName.equals(CHECKPOINT_OPER)) { JmxUtils.checkParamsCount(CHECKPOINT_OPER, params, 0); mustBeCrawling(); try { checkpoint(); } catch (IllegalStateException e) { throw new RuntimeOperationsException(e); } return null; } if (operationName.equals(PROGRESS_STATISTICS_OPER)) { JmxUtils.checkParamsCount(PROGRESS_STATISTICS_OPER, params, 0); mustBeCrawling(); return getStatisticsTracking().getProgressStatisticsLine(); } if (operationName.equals(PROGRESS_STATISTICS_LEGEND_OPER)) { JmxUtils.checkParamsCount(PROGRESS_STATISTICS_LEGEND_OPER, params, 0); return getStatisticsTracking().progressStatisticsLegend(); } throw new ReflectionException(new NoSuchMethodException(operationName), "Cannot find the operation " + operationName); }
From source file:hudson.scm.CVSSCM.java
/** * Computes the changelog into an XML file. * * <p>/*from ww w .j a va2 s .c o m*/ * When we update the workspace, we'll compute the changelog by using its output to * make it faster. In general case, we'll fall back to the slower approach where * we check all files in the workspace. * * @param changedFiles * Files whose changelog should be checked for updates. * This is provided if the previous operation is update, otherwise null, * which means we have to fall back to the default slow computation. */ private boolean calcChangeLog(AbstractBuild build, FilePath ws, final List<String> changedFiles, File changelogFile, final BuildListener listener) throws InterruptedException { if (build.getPreviousBuild() == null || (changedFiles != null && changedFiles.isEmpty())) { // nothing to compare against, or no changes // (note that changedFiles==null means fallback, so we have to run cvs log. listener.getLogger().println("$ no changes detected"); return createEmptyChangeLog(changelogFile, listener, "changelog"); } if (skipChangeLog) { listener.getLogger().println("Skipping changelog computation"); return createEmptyChangeLog(changelogFile, listener, "changelog"); } listener.getLogger().println("$ computing changelog"); final String cvspassFile = getDescriptor().getCvspassFile(); final String cvsExe = getDescriptor().getCvsExeOrDefault(); OutputStream o = null; try { // range of time for detecting changes final Date startTime = build.getPreviousBuild().getTimestamp().getTime(); final Date endTime = build.getTimestamp().getTime(); final OutputStream out = o = new RemoteOutputStream(new FileOutputStream(changelogFile)); ChangeLogResult result = ws.act(new FileCallable<ChangeLogResult>() { public ChangeLogResult invoke(File ws, VirtualChannel channel) throws IOException { final StringWriter errorOutput = new StringWriter(); final boolean[] hadError = new boolean[1]; ChangeLogTask task = new ChangeLogTask() { public void log(String msg, int msgLevel) { if (msgLevel == org.apache.tools.ant.Project.MSG_ERR) hadError[0] = true; // send error to listener. This seems like the route in which the changelog task // sends output. // Also in ChangeLogTask.getExecuteStreamHandler, we send stderr from CVS // at WARN level. if (msgLevel <= org.apache.tools.ant.Project.MSG_WARN) { errorOutput.write(msg); errorOutput.write('\n'); return; } if (debug) { listener.getLogger().println(msg); } } }; task.setProject(new org.apache.tools.ant.Project()); task.setCvsExe(cvsExe); task.setDir(ws); if (cvspassFile.length() != 0) task.setPassfile(new File(cvspassFile)); if (canUseUpdate && cvsroot.startsWith("/")) { // cvs log of built source trees unreliable in local access method: // https://savannah.nongnu.org/bugs/index.php?15223 task.setCvsRoot(":fork:" + cvsroot); } else if (canUseUpdate && cvsroot.startsWith(":local:")) { task.setCvsRoot(":fork:" + cvsroot.substring(7)); } else { task.setCvsRoot(cvsroot); } task.setCvsRsh(cvsRsh); task.setFailOnError(true); BufferedOutputStream bufferedOutput = new BufferedOutputStream(out); task.setDeststream(bufferedOutput); task.setTag(isTag() ? ":" + branch : branch); task.setStart(startTime); task.setEnd(endTime); if (changedFiles != null) { // we can optimize the processing if we know what files have changed. // but also try not to make the command line too long so as no to hit // the system call limit to the command line length (see issue #389) // the choice of the number is arbitrary, but normally we don't really // expect continuous builds to have too many changes, so this should be OK. if (changedFiles.size() < 100 || !Hudson.isWindows()) { // if the directory doesn't exist, cvs changelog will die, so filter them out. // this means we'll lose the log of those changes for (String filePath : changedFiles) { if (new File(ws, filePath).getParentFile().exists()) task.addFile(filePath); } } } else { // fallback if (!flatten) task.setPackage(getAllModulesNormalized()); } try { task.execute(); } catch (BuildException e) { throw new BuildExceptionWithLog(e, errorOutput.toString()); } finally { bufferedOutput.close(); } return new ChangeLogResult(hadError[0], errorOutput.toString()); } }); if (result.hadError) { // non-fatal error must have occurred, such as cvs changelog parsing error.s listener.getLogger().print(result.errorOutput); } return true; } catch (BuildExceptionWithLog e) { // capture output from the task for diagnosis listener.getLogger().print(e.errorOutput); // then report an error BuildException x = (BuildException) e.getCause(); PrintWriter w = listener.error(x.getMessage()); w.println("Working directory is " + ws); x.printStackTrace(w); return false; } catch (RuntimeException e) { // an user reported a NPE inside the changeLog task. // we don't want a bug in Ant to prevent a build. e.printStackTrace(listener.error(e.getMessage())); return true; // so record the message but continue } catch (IOException e) { e.printStackTrace(listener.error("Failed to detect changlog")); return true; } finally { IOUtils.closeQuietly(o); } }
From source file:org.kepler.objectmanager.ActorMetadata.java
/** * try to locate and parse a moml file as a class *//*w w w . java 2 s. co m*/ protected ComponentEntity parseMoMLFile(String className) throws Exception { if (isDebugging) log.debug("parseMoMLFile(" + className + ")"); JarFile jarFile = null; InputStream xmlStream = null; try { // first we need to find the file and read it File classFile = searchClasspath(className); StringWriter sw = new StringWriter(); if (classFile.getName().endsWith(".jar")) { jarFile = new JarFile(classFile); ZipEntry entry = jarFile.getEntry(className.replace('.', '/') + ".xml"); xmlStream = jarFile.getInputStream(entry); } else { xmlStream = new FileInputStream(classFile); } byte[] b = new byte[1024]; int numread = xmlStream.read(b, 0, 1024); while (numread != -1) { String s = new String(b, 0, numread); sw.write(s); numread = xmlStream.read(b, 0, 1024); } sw.flush(); // get the moml document String xmlDoc = sw.toString(); sw.close(); if (isDebugging) log.debug("**** MoMLParser ****"); // use the moml parser to parse the doc MoMLParser parser = new MoMLParser(); parser.reset(); // System.out.println("processing " + className); NamedObj obj = parser.parse(xmlDoc); return (ComponentEntity) obj; } finally { if (jarFile != null) { jarFile.close(); } if (xmlStream != null) { xmlStream.close(); } } }
From source file:com.github.gekoh.yagen.ddl.CreateDDL.java
private void addAuditTrigger(Dialect dialect, StringBuffer buf, String nameLC, Set<String> columns) { TableConfig tableConfig = tblNameToConfig.get(nameLC); String templateName = "AuditTrigger"; if (!columns.containsAll(AUDIT_COLUMNS)) { if (tableConfig != null && tableConfig.getTableAnnotationOfType(Auditable.class) != null && columns.contains(AuditInfo.LAST_MODIFIED_AT)) { templateName += "SingleTimestamp"; } else {/*from w w w. j a v a 2 s. c om*/ return; } } if (isPostgreSql(dialect)) { writePostgreSqlAuditTrigger(dialect, buf, nameLC); return; } StringWriter wr = new StringWriter(); VelocityContext context = new VelocityContext(); context.put("liveTableName", nameLC); context.put("created_at", AuditInfo.CREATED_AT); context.put("created_by", AuditInfo.CREATED_BY); context.put("last_modified_at", AuditInfo.LAST_MODIFIED_AT); context.put("last_modified_by", AuditInfo.LAST_MODIFIED_BY); if (isOracle(dialect)) { writeOracleAuditTrigger(dialect, buf, context, nameLC, templateName + ".vm.pl.sql"); } else { try { templateName += "SingleOperation.vm.pl.sql"; wr.append(STATEMENT_SEPARATOR); writeTriggerSingleOperation(dialect, wr, templateName, context, nameLC, "_at", "I"); wr.write("\n/\n"); wr.append(STATEMENT_SEPARATOR); writeTriggerSingleOperation(dialect, wr, templateName, context, nameLC, "_at", "U"); wr.write("\n/\n"); buf.append(wr.toString()); } catch (IOException e) { LOG.error("error writing audit triggers", e); } } }
From source file:org.kawanfw.sql.jdbc.ResultSetHttp.java
/** * Retrieves the value of the designated column in the current row of this * <code>ResultSet</code> object as a stream of ASCII characters. The value * can then be read in chunks from the stream. This method is particularly * suitable for retrieving large <char>LONGVARCHAR</char> values. The JDBC * driver will do any necessary conversion from the database format into * ASCII.//from w w w .j a va2 s . c om * * <P> * <B>Note:</B> All the data in the returned stream must be read prior to * getting the value of any other column. The next call to a getter method * implicitly closes the stream. Also, a stream may return <code>0</code> * when the method <code>InputStream.available</code> is called whether * there is data available or not. * * @param columnIndex * the first column is 1, the second is 2, ... * @return a Java input stream that delivers the database column value as a * stream of one-byte ASCII characters; if the value is SQL * <code>NULL</code>, the value returned is <code>null</code> * @exception SQLException * if a database access error occurs */ public java.io.InputStream getAsciiStream(int columnIndex) throws SQLException { testIfClosed(); String value = getValueOfList(columnIndex, true); if (value == null) { // Not sure what to do throw new SQLException("Column Index is out of bound: " + columnIndex); } InputStream in = null; // Check if we must get the byte array from an input stream if (value.startsWith(TransportConverter.KAWANFW_BYTES_STREAM_FILE)) { String remoteFile = StringUtils.substringAfter(value, TransportConverter.KAWANFW_BYTES_STREAM_FILE); // HACK if (!remoteFile.startsWith("/")) { remoteFile = "/" + remoteFile; } in = getAsciiInputStreamFromRemoteFile(remoteFile); } else { String stringValue = getString(columnIndex); debug("AsciiStream in String!"); try { // Put back clean CR_LF BufferedReader bufferedReader = new BufferedReader(new StringReader(stringValue)); StringWriter stringWriter = new StringWriter(); String line = null; while ((line = bufferedReader.readLine()) != null) { stringWriter.write(line + CR_LF); } String cleaned = stringWriter.toString(); byte[] bytes = cleaned.getBytes(); in = new ByteArrayInputStream(bytes); } catch (Exception e) { throw new SQLException(e.getMessage(), e); } } return in; }