List of usage examples for java.io SequenceInputStream SequenceInputStream
public SequenceInputStream(InputStream s1, InputStream s2)
SequenceInputStream
by remembering the two arguments, which will be read in order, first s1
and then s2
, to provide the bytes to be read from this SequenceInputStream
. From source file:pl.otros.logview.importer.DetectOnTheFlyLogImporter.java
@Override public void importLogs(InputStream in, LogDataCollector dataCollector, ParsingContext parsingContext) { HashMap<String, Object> customContextProperties = parsingContext.getCustomConextProperties(); if (customContextProperties.containsKey(PROPERTY_LOG_IMPORTER)) { // Log importer detected, use it; LogImporter logImporter = (LogImporter) customContextProperties.get(PROPERTY_LOG_IMPORTER); LOGGER.debug(String.format("Have log importer detected (%s), will use it", logImporter.getName())); logImporter.importLogs(in, dataCollector, parsingContext); } else {//from ww w. j av a 2 s .c o m try { byte[] buff = new byte[16 * 1024]; int read = 0; while ((read = in.read(buff)) > 0) { try (ByteArrayOutputStream byteArrayOutputStream = (ByteArrayOutputStream) customContextProperties .get(PROPERTY_BYTE_BUFFER)) { int totalRead = byteArrayOutputStream.size(); totalRead += read; if (totalRead < detectTryMinimum) { LOGGER.debug(String.format("To small amount of data to detect log importer [%db]", totalRead)); byteArrayOutputStream.write(buff, 0, read); } else if (totalRead > detectTryMaximum) { // stop parsing, protect of loading unlimited data parsingContext.setParsingInProgress(false); LOGGER.warn( "Reached maximum size of log importer detection buffer, Will not load more data"); } else { // try to detect log byteArrayOutputStream.write(buff, 0, read); LOGGER.debug("Trying to detect log importer"); Optional<LogImporter> maybeLogImporter = Utils.detectLogImporter(logImporters, byteArrayOutputStream.toByteArray()); if (maybeLogImporter.isPresent()) { final LogImporter detectLogImporter = maybeLogImporter.get(); LOGGER.debug( String.format("Log importer detected (%s),this log importer will be used", detectLogImporter.getName())); detectLogImporter.initParsingContext(parsingContext); customContextProperties.put(PROPERTY_LOG_IMPORTER, detectLogImporter); byte[] buf = byteArrayOutputStream.toByteArray(); try (SequenceInputStream sequenceInputStream = new SequenceInputStream( new ByteArrayInputStream(buf), in)) { detectLogImporter.importLogs(sequenceInputStream, dataCollector, parsingContext); return; } } } } } } catch (IOException e) { e.printStackTrace(); LOGGER.warn("IOException reading log file " + parsingContext.getLogSource()); } finally { IOUtils.closeQuietly(in); } } }
From source file:nya.miku.wishmaster.chans.nullchan.AbstractInstant0chan.java
@Override protected WakabaReader getKusabaReader(InputStream stream, UrlPageModel urlModel) { if (urlModel != null && urlModel.chanName != null && urlModel.chanName.equals("expand")) { stream = new SequenceInputStream(new ByteArrayInputStream("<form id=\"delform\">".getBytes()), stream); }/*from w w w.j a v a 2 s. c o m*/ return new Instant0chanReader(stream, canCloudflare()); }
From source file:org.ligoj.app.plugin.id.resource.batch.AbstractBatchResource.java
protected <T extends AbstractBatchTask<B>> long batchInternal(final InputStream uploadedFile, final String[] columns, final String encoding, final String[] defaultColumns, final Class<B> batchType, final Class<T> taskType, final Boolean quiet) throws IOException { // Public identifier is based on system date final long id = System.currentTimeMillis(); // Check column's name validity final String[] sanitizeColumns = ArrayUtils.isEmpty(columns) ? defaultColumns : columns; checkHeaders(defaultColumns, sanitizeColumns); // Build CSV header from array final String csvHeaders = StringUtils.chop(ArrayUtils.toString(sanitizeColumns)).substring(1).replace(',', ';') + "\n"; // Build entries with prepended CSV header final String encSafe = ObjectUtils.defaultIfNull(encoding, StandardCharsets.UTF_8.name()); final ByteArrayInputStream input = new ByteArrayInputStream(csvHeaders.getBytes(encSafe)); final List<B> entries = csvForBean.toBean(batchType, new InputStreamReader(new SequenceInputStream(input, uploadedFile), encSafe)); entries.removeIf(Objects::isNull); // Validate them validator.validateCheck(entries);//from w w w . j a va 2s. c o m // Clone the context for the asynchronous import final BatchTaskVo<B> importTask = new BatchTaskVo<>(); importTask.setEntries(entries); importTask.setPrincipal(SecurityContextHolder.getContext().getAuthentication().getName()); importTask.setId(id); importTask.setQuiet(BooleanUtils.isTrue(quiet)); // Schedule the import final T task = SpringUtils.getBean(taskType); task.configure(importTask); executor.execute(task); // Also cleanup the previous tasks cleanup(); // Expose the task with internal identifier, based on current user PLUS the public identifier imports.put(importTask.getPrincipal() + "-" + importTask.getId(), importTask); // Return private task identifier return id; }
From source file:org.lockss.filter.ZipFilterInputStream.java
/** * <p>//from w w w . j a v a 2 s.c om * Before reading bytes, ensure that the stream has not been closed, that * end of file has not been reached on the underlying Zip input stream, and * that the previous read did not exhaust an entry; if so, it opens the next * entry's input stream and prepends it with the entry's normalized named. * </p> * * @throws IOException */ private void ensureInput() throws IOException { if (zipInputStream == null) { throw new IOException("stream closed"); } while (!eof && currentInputStream == null) { ZipEntry ze = zipInputStream.getNextEntry(); if (ze == null) { eof = true; return; } String zipEntryName = ze.getName(); String normalizedZipEntryName = zipEntryName; if (normalizedZipEntryName.startsWith("./")) { normalizedZipEntryName = normalizedZipEntryName.substring(2); } if (keepZipEntry(ze, normalizedZipEntryName)) { currentInputStream = new SequenceInputStream( new ByteArrayInputStream(normalizedZipEntryName.getBytes(Constants.ENCODING_UTF_8)), new CloseShieldInputStream(zipInputStream)); } } }
From source file:org.dcm4che.tool.hl72xml.HL72Xml.java
public void parse(InputStream is) throws IOException, TransformerConfigurationException, SAXException { byte[] buf = new byte[256]; int len = is.read(buf); HL7Segment msh = HL7Segment.parseMSH(buf, buf.length); String charsetName = HL7Charset.toCharsetName(msh.getField(17, charset)); Reader reader = new InputStreamReader(new SequenceInputStream(new ByteArrayInputStream(buf, 0, len), is), charsetName);//w w w . j a va2s.c o m TransformerHandler th = getTransformerHandler(); th.getTransformer().setOutputProperty(OutputKeys.INDENT, indent ? "yes" : "no"); th.setResult(new StreamResult(System.out)); HL7Parser hl7Parser = new HL7Parser(th); hl7Parser.setIncludeNamespaceDeclaration(includeNamespaceDeclaration); hl7Parser.parse(reader); }
From source file:de.spqrinfo.cups4j.operations.IppOperation.java
/** * Sends a request to the provided url/* w w w. j a v a 2 s .c o m*/ * * @param url * @param ippBuf * * @param documentStream * @return result * @throws Exception */ private IppResult sendRequest(URL url, ByteBuffer ippBuf, InputStream documentStream) throws Exception { IppResult ippResult = null; if (ippBuf == null) { return null; } if (url == null) { return null; } HttpClient client = new DefaultHttpClient(); // will not work with older versions of CUPS! client.getParams().setParameter("http.protocol.version", HttpVersion.HTTP_1_1); client.getParams().setParameter("http.socket.timeout", new Integer(10000)); client.getParams().setParameter("http.connection.timeout", new Integer(10000)); client.getParams().setParameter("http.protocol.content-charset", "UTF-8"); client.getParams().setParameter("http.method.response.buffer.warnlimit", new Integer(8092)); // probabaly not working with older CUPS versions client.getParams().setParameter("http.protocol.expect-continue", Boolean.valueOf(true)); HttpPost httpPost = new HttpPost(new URI("http://" + url.getHost() + ":" + ippPort) + url.getPath()); httpPost.getParams().setParameter("http.socket.timeout", new Integer(10000)); byte[] bytes = new byte[ippBuf.limit()]; ippBuf.get(bytes); ByteArrayInputStream headerStream = new ByteArrayInputStream(bytes); // If we need to send a document, concatenate InputStreams InputStream inputStream = headerStream; if (documentStream != null) { inputStream = new SequenceInputStream(headerStream, documentStream); } // set length to -1 to advice the entity to read until EOF InputStreamEntity requestEntity = new InputStreamEntity(inputStream, -1); requestEntity.setContentType(IPP_MIME_TYPE); httpPost.setEntity(requestEntity); httpStatusLine = null; ResponseHandler<byte[]> handler = new ResponseHandler<byte[]>() { public byte[] handleResponse(HttpResponse response) throws ClientProtocolException, IOException { HttpEntity entity = response.getEntity(); httpStatusLine = response.getStatusLine().toString(); if (entity != null) { return EntityUtils.toByteArray(entity); } else { return null; } } }; byte[] result = client.execute(httpPost, handler); IppResponse ippResponse = new IppResponse(); ippResult = ippResponse.getResponse(ByteBuffer.wrap(result)); ippResult.setHttpStatusResponse(httpStatusLine); // IppResultPrinter.print(ippResult); client.getConnectionManager().shutdown(); return ippResult; }
From source file:org.deviceconnect.message.http.impl.factory.AbstractHttpMessageFactory.java
/** * HTTP???dConnect???./*from w w w.j a va 2 s .c o m*/ * @param dmessage dConnect * @param message HTTP */ protected void parseHttpBody(final DConnectMessage dmessage, final M message) { mLogger.entering(getClass().getName(), "newDConnectMessage", new Object[] { dmessage, message }); HttpEntity entity = getHttpEntity(message); if (entity != null) { MimeStreamParser parser = new MimeStreamParser(new MimeEntityConfig()); MultipartContentHandler handler = new MultipartContentHandler(dmessage); parser.setContentHandler(handler); StringBuilder headerBuffer = new StringBuilder(); for (Header header : message.getAllHeaders()) { headerBuffer.append(header.getName()); headerBuffer.append(": "); headerBuffer.append(header.getValue()); headerBuffer.append(Character.toChars(HTTP.CR)); headerBuffer.append(Character.toChars(HTTP.LF)); mLogger.fine("header: " + header.getName() + ":" + header.getValue()); } headerBuffer.append(Character.toChars(HTTP.CR)); headerBuffer.append(Character.toChars(HTTP.LF)); try { parser.parse(new SequenceInputStream( new ByteArrayInputStream(headerBuffer.toString().getBytes("US-ASCII")), entity.getContent())); } catch (IllegalStateException e) { mLogger.log(Level.FINE, e.toString(), e); mLogger.warning(e.toString()); } catch (MimeException e) { mLogger.log(Level.FINE, e.toString(), e); mLogger.warning(e.toString()); } catch (IOException e) { mLogger.log(Level.FINE, e.toString(), e); mLogger.warning(e.toString()); } } mLogger.exiting(getClass().getName(), "newDConnectMessage"); }
From source file:org.apache.james.mailbox.hbase.mail.HBaseMailboxMessage.java
@Override public InputStream getFullContent() throws IOException { return new SequenceInputStream(getHeaderContent(), getBodyContent()); }
From source file:org.apache.james.mailbox.store.ImmutableMailboxMessage.java
@Override public InputStream getFullContent() { return new SequenceInputStream(new ByteArrayInputStream(headerContent), new ByteArrayInputStream(bodyContent)); }
From source file:com.linuxbox.enkive.docstore.AbstractDocStoreService.java
@Override public StoreRequestResult store(Document document) throws DocStoreException { StoreRequestResult storeResult = null; MessageDigest messageDigest = null; try {/*from w w w . j a v a 2 s . c o m*/ messageDigest = MessageDigest.getInstance(HASH_ALGORITHM); } catch (NoSuchAlgorithmException e) { throw new DocStoreException(e); } final long startTime = System.currentTimeMillis(); // begin the hash calculation using the mime type, file extension, and // binary encoding, so if the same data comes in but is claimed to be a // different in any of those aspects, it will be stored separately; we // don't expect this to happen often if at all, but doing so makes // everything else easier messageDigest.update(getFileTypeEncodingDigestPrime(document)); byte[] inMemoryBuffer = new byte[inMemoryLimit]; try { InputStream originalInputStream = document.getEncodedContentStream(); // keep calling read until we either fill out in-memory buffer or we // hit EOF int offset = 0; int result; do { result = originalInputStream.read(inMemoryBuffer, offset, inMemoryLimit - offset); if (result > 0) { offset += result; } } while (result >= 0 && offset < inMemoryLimit); if (result < 0) { // was able to read whole thing in and offset indicates length messageDigest.update(inMemoryBuffer, 0, offset); final byte[] hashBytes = messageDigest.digest(); storeResult = storeKnownHash(document, hashBytes, inMemoryBuffer, offset); } else { // could not read whole thing into fix-sized buffer, so store // the document, determine its name after-the fact, and rename // it // we first need to do some input stream magic; we've already // read some of the data into our buffer, so convert it into an // input stream and then combine it and the original input // stream as a sequence input stream to then create a hashing // input stream ByteArrayInputStream alreadyReadStream = new ByteArrayInputStream(inMemoryBuffer, 0, offset); SequenceInputStream combinedStream = new SequenceInputStream(alreadyReadStream, originalInputStream); HashingInputStream hashingInputStream = new HashingInputStream(messageDigest, combinedStream); storeResult = storeAndDetermineHash(document, hashingInputStream); } if (!storeResult.isAlreadyStored()) { indexerQueueService.enqueue(storeResult.getIdentifier(), storeResult.getShardKey(), DocStoreConstants.QUEUE_ENTRY_INDEX_DOCUMENT); } return storeResult; } catch (IOException e) { throw new DocStoreException(e); } catch (QueueServiceException e) { throw new DocStoreException("could not add index event to queue"); } finally { if (LOGGER.isTraceEnabled()) { final long endTime = System.currentTimeMillis(); LOGGER.trace("TIMING: " + (endTime - startTime) + " ms to " + (storeResult.isAlreadyStored() ? "determine already stored document " : "store document ") + storeResult.getIdentifier()); } } }