List of usage examples for java.io PushbackInputStream read
public int read() throws IOException
From source file:org.apache.tika.parser.rtf.TextExtractor.java
private void parseControlWord(int firstChar, PushbackInputStream in) throws IOException, SAXException, TikaException { addControl(firstChar);/*from w w w . java 2 s . c o m*/ int b = in.read(); while (isAlpha(b)) { addControl(b); b = in.read(); } boolean hasParam = false; boolean negParam = false; if (b == '-') { negParam = true; hasParam = true; b = in.read(); } int param = 0; while (isDigit(b)) { param *= 10; param += (b - '0'); hasParam = true; b = in.read(); } // space is consumed as part of the // control word, but is not added to the // control word if (b != ' ') { in.unread(b); } if (hasParam) { if (negParam) { param = -param; } processControlWord(param, in); } else { processControlWord(); } pendingControlCount = 0; }
From source file:org.apache.tika.parser.rtf.TextExtractor.java
private void processGroupStart(PushbackInputStream in) throws IOException { ansiSkip = 0;//from ww w . j a v a2 s . c o m // Push current groupState onto the stack groupStates.add(groupState); // Make new GroupState groupState = new GroupState(groupState); assert groupStates.size() == groupState.depth : "size=" + groupStates.size() + " depth=" + groupState.depth; if (uprState == 0) { uprState = 1; groupState.ignore = true; } // Check for ignorable groups. Note that // sometimes we un-ignore within this group, eg // when handling upr escape. int b2 = in.read(); if (b2 == '\\') { int b3 = in.read(); if (b3 == '*') { groupState.ignore = true; } in.unread(b3); } in.unread(b2); }
From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java
@Override public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception { String uploadFileName = AssetUtils.buildAssetKey(appId, entity); ByteArrayOutputStream baos = new ByteArrayOutputStream(); long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB); byte[] data = baos.toByteArray(); InputStream awsInputStream = new ByteArrayInputStream(data); final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity); fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis()); String mimeType = AssetMimeHandler.get().getMimeType(entity, data); Boolean overSizeLimit = false; EntityManager em = emf.getEntityManager(appId); if (written < FIVE_MB) { // total smaller than 5mb ObjectMetadata om = new ObjectMetadata(); om.setContentLength(written);/*from w w w . j a va 2 s.c o m*/ om.setContentType(mimeType); PutObjectResult result = null; result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om); String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5())); String eTag = result.getETag(); fileMetadata.put(AssetUtils.CONTENT_LENGTH, written); if (md5sum != null) fileMetadata.put(AssetUtils.CHECKSUM, md5sum); fileMetadata.put(AssetUtils.E_TAG, eTag); em.update(entity); } else { // bigger than 5mb... dump 5 mb tmp files and upload from them written = 0; //reset written to 0, we still haven't wrote anything in fact int partNumber = 1; int firstByte = 0; Boolean isFirstChunck = true; List<PartETag> partETags = new ArrayList<PartETag>(); //get the s3 client in order to initialize the multipart request getS3Client(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, uploadFileName); InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest); InputStream firstChunck = new ByteArrayInputStream(data); PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1); // determine max size file allowed, default to 50mb long maxSizeBytes = 50 * FileUtils.ONE_MB; String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50"); if (StringUtils.isNumeric(maxSizeMbString)) { maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB; } // always allow files up to 5mb if (maxSizeBytes < 5 * FileUtils.ONE_MB) { maxSizeBytes = 5 * FileUtils.ONE_MB; } while (-1 != (firstByte = chunckableInputStream.read())) { long partSize = 0; chunckableInputStream.unread(firstByte); File tempFile = File.createTempFile( entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp"); tempFile.deleteOnExit(); OutputStream os = null; try { os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath())); if (isFirstChunck == true) { partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB)); isFirstChunck = false; } else { partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB)); } written += partSize; if (written > maxSizeBytes) { overSizeLimit = true; logger.error("OVERSIZED FILE ({}). STARTING ABORT", written); break; //set flag here and break out of loop to run abort } } finally { IOUtils.closeQuietly(os); } FileInputStream chunk = new FileInputStream(tempFile); Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read()); if (!isLastPart) chunckableInputStream.unread(firstByte); UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId()) .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk) .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart); partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag()); partNumber++; } //check for flag here then abort. if (overSizeLimit) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, uploadFileName, initResponse.getUploadId()); ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName); MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest); //upadte the entity with the error. try { logger.error("starting update of entity due to oversized asset"); fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes); em.update(entity); } catch (Exception e) { logger.error("Error updating entity with error message", e); } int timesIterated = 20; //loop and abort all the multipart uploads while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) { getS3Client().abortMultipartUpload(abortRequest); Thread.sleep(1000); timesIterated--; listResult = getS3Client().listMultipartUploads(listRequest); if (logger.isDebugEnabled()) { logger.debug("Files that haven't been aborted are: {}", listResult.getMultipartUploads().listIterator().toString()); } } if (timesIterated == 0) { logger.error("Files parts that couldn't be aborted in 20 seconds are:"); Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator(); while (multipartUploadIterator.hasNext()) { logger.error(multipartUploadIterator.next().getKey()); } } } else { CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName, uploadFileName, initResponse.getUploadId(), partETags); CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request); fileMetadata.put(AssetUtils.CONTENT_LENGTH, written); fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag()); em.update(entity); } } }
From source file:org.gaul.s3proxy.S3ProxyHandler.java
private static void handleSetContainerAcl(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName) throws IOException, S3Exception { ContainerAccess access;// w ww . ja va2 s. co m String cannedAcl = request.getHeader("x-amz-acl"); if (cannedAcl == null || "private".equalsIgnoreCase(cannedAcl)) { access = ContainerAccess.PRIVATE; } else if ("public-read".equalsIgnoreCase(cannedAcl)) { access = ContainerAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } PushbackInputStream pis = new PushbackInputStream(is); int ch = pis.read(); if (ch != -1) { pis.unread(ch); AccessControlPolicy policy = new XmlMapper().readValue(pis, AccessControlPolicy.class); String accessString = mapXmlAclsToCannedPolicy(policy); if (accessString.equals("private")) { access = ContainerAccess.PRIVATE; } else if (accessString.equals("public-read")) { access = ContainerAccess.PUBLIC_READ; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } blobStore.setContainerAccess(containerName, access); }
From source file:org.gaul.s3proxy.S3ProxyHandler.java
private static void handleSetBlobAcl(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { BlobAccess access;/*w w w. j a v a2 s. c o m*/ String cannedAcl = request.getHeader("x-amz-acl"); if (cannedAcl == null || "private".equalsIgnoreCase(cannedAcl)) { access = BlobAccess.PRIVATE; } else if ("public-read".equalsIgnoreCase(cannedAcl)) { access = BlobAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } PushbackInputStream pis = new PushbackInputStream(is); int ch = pis.read(); if (ch != -1) { pis.unread(ch); AccessControlPolicy policy = new XmlMapper().readValue(pis, AccessControlPolicy.class); String accessString = mapXmlAclsToCannedPolicy(policy); if (accessString.equals("private")) { access = BlobAccess.PRIVATE; } else if (accessString.equals("public-read")) { access = BlobAccess.PUBLIC_READ; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } blobStore.setBlobAccess(containerName, blobName, access); }
From source file:org.jcodec.codecs.mjpeg.JpegParser.java
public CodedImage parse(PushbackInputStream is, CountingInputStream counter) throws IOException { CodedImage image = new CodedImage(); int curQTable = 0; while (true) { int marker = is.read(); if (marker == -1) return image; if (marker == 0) continue; if (marker != 0xFF) throw new RuntimeException("@" + Long.toHexString(counter.getByteCount()) + " Marker expected: 0x" + Integer.toHexString(marker)); int b = is.read(); Debug.trace("%s", Markers.toString(b)); switch (b) { case Markers.SOF0: image.frame = FrameHeader.read(is); Debug.trace(" %s", image.frame); break; case Markers.DHT: int len1 = readShort(is); CountingInputStream cis = new CountingInputStream(is); while (cis.getCount() < len1 - 2) { readHuffmanTable(cis, image); }/*from w w w . j a v a2 s .com*/ break; case Markers.DQT: int len4 = readShort(is); CountingInputStream cis1 = new CountingInputStream(is); while (cis1.getCount() < len4 - 2) { QuantTable quantTable = readQuantTable(cis1); if (curQTable == 0) image.setQuantLum(quantTable); else image.setQuantChrom(quantTable); curQTable++; } break; case Markers.SOS: if (image.scan != null) { throw new IllegalStateException("unhandled - more than one scan header"); } image.scan = ScanHeader.read(is); Debug.trace(" %s", image.scan); image.setData(readData(is)); break; case Markers.SOI: break; case Markers.EOI: return image; case Markers.APP0: // int len10 = readShort(is); // byte[] id = new byte[4]; // is.read(id); // if (!Arrays.equals(JFIF, id)) // throw new RuntimeException("Not a JFIF file"); // is.skip(1); // // is.skip(2); // int units = is.read(); // int dx = readShort(is); // int dy = readShort(is); // int tx = is.read(); // int ty = is.read(); // is.skip(tx * ty * 3); // break; case Markers.APP1: case Markers.APP2: case Markers.APP3: case Markers.APP4: case Markers.APP5: case Markers.APP6: case Markers.APP7: case Markers.APP8: case Markers.APP9: case Markers.APPA: case Markers.APPB: case Markers.APPC: case Markers.APPD: case Markers.APPE: case Markers.APPF: int len3 = readShort(is); StringReader.sureSkip(is, len3 - 2); break; case Markers.DRI: /* * Lr: Define restart interval segment length Specifies the * length of the parameters in the DRI segment shown in Figure * B.9 (see B.1.1.4). */ int lr = readShort(is); // Ri: Restart interval Specifies the number of MCU in the // restart interval. int ri = readShort(is); Debug.trace("DRI Lr: %d Ri: %d", lr, ri); // A DRI marker segment with Ri equal to zero shall disable // restart intervals for the following scans. Asserts.assertEquals(0, ri); break; default: { throw new IllegalStateException("unhandled marker " + Markers.toString(b)); } } } }
From source file:org.mule.transport.stdio.StdioMessageReceiver.java
@Override public void poll() { String encoding = endpoint.getEncoding(); try {//from w w w . j ava 2s . co m if (sendStream) { PushbackInputStream in = new PushbackInputStream(inputStream); //Block until we have some data int i = in.read(); //Roll back our read in.unread(i); MuleMessage message = createMuleMessage(in, encoding); routeMessage(message); } else { byte[] inputBuffer = new byte[bufferSize]; int len = inputStream.read(inputBuffer); if (len == -1) { return; } StringBuffer fullBuffer = new StringBuffer(bufferSize); while (len > 0) { fullBuffer.append(new String(inputBuffer, 0, len)); len = 0; // mark as read if (inputStream.available() > 0) { len = inputStream.read(inputBuffer); } } // Each line is a separate message String[] lines = fullBuffer.toString().split(SystemUtils.LINE_SEPARATOR); for (int i = 0; i < lines.length; ++i) { MuleMessage message = createMuleMessage(lines[i], encoding); routeMessage(message); } } doConnect(); } catch (Exception e) { getConnector().getMuleContext().getExceptionListener().handleException(e); } }
From source file:org.openhealthtools.openatna.syslog.bsd.BsdMessageFactory.java
public SyslogMessage read(InputStream in) throws SyslogException { try {// w w w . j a v a 2 s .c o m PushbackInputStream pin = new PushbackInputStream(in, 5); int priority = readPriority(pin); int facility; int severity; byte c; int spaces = 4; int count = 0; boolean spaceBefore = false; ByteBuffer buff = ByteBuffer.wrap(new byte[256]); String timestamp; String month = null; String date = null; String time = null; String host = ""; int max = 256; int curr = 0; while (count < spaces && curr < max) { c = (byte) pin.read(); curr++; if (c == ' ') { if (!spaceBefore) { count++; String currHeader = new String(buff.array(), 0, buff.position(), Constants.ENC_UTF8); buff.clear(); switch (count) { case 1: month = currHeader; break; case 2: date = currHeader; break; case 3: time = currHeader; break; case 4: host = currHeader; break; } } spaceBefore = true; } else { spaceBefore = false; buff.put(c); } } if (month == null || date == null || time == null) { timestamp = createDate(new Date()); } else { String gap = " "; if (date.length() == 1) { gap = " "; } timestamp = (month + gap + date + " " + time); try { formatDate(timestamp); } catch (Exception e) { timestamp = createDate(new Date()); } } String tag = null; int tagLen = 32; buff.clear(); for (int i = 0; i < tagLen; i++) { c = (byte) pin.read(); curr++; if (!Character.isLetterOrDigit((char) (c & 0xff))) { pin.unread(c); break; } buff.put(c); } if (buff.position() > 0) { tag = new String(buff.array(), 0, buff.position(), Constants.ENC_UTF8); } LogMessage logMessage = getLogMessage(tag); String encoding = readBom(pin, logMessage.getExpectedEncoding()); logMessage.read(pin, encoding); facility = priority / 8; severity = priority % 8; return new BsdMessage(facility, severity, timestamp, host, logMessage, tag); } catch (IOException e) { e.printStackTrace(); throw new SyslogException(e); } }
From source file:org.openhealthtools.openatna.syslog.protocol.ProtocolMessageFactory.java
/** * This reads up to 256 characters to read headers (excluding SDs). This limit is arbitrary. * It is imposed to reduce the risk/*from ww w .ja va2 s . co m*/ * of badly formed or malicious messages from using too many resources. * * @param in * @return * @throws SyslogException */ public SyslogMessage read(InputStream in) throws SyslogException { try { PushbackInputStream pin = new PushbackInputStream(in, 5); int priority = readPriority(pin); int facility; int severity; byte c; int spaces = 5; int count = 0; ByteBuffer buff = ByteBuffer.wrap(new byte[256]); String timestamp = null; String host = "-"; String app = "-"; String proc = "-"; String mid = "-"; int max = 256; int curr = 0; while (count < spaces && curr < max) { c = (byte) pin.read(); curr++; if (c == ' ') { count++; String currHeader = new String(buff.array(), 0, buff.position(), Constants.ENC_UTF8); buff.clear(); switch (count) { case 1: timestamp = currHeader; break; case 2: host = currHeader; break; case 3: app = currHeader; break; case 4: proc = currHeader; break; case 5: mid = currHeader; break; } } else { buff.put(c); } } if (timestamp == null) { throw new SyslogException("no timestamp defined"); } c = (byte) pin.read(); List<StructuredElement> els = new ArrayList<StructuredElement>(); if (c == '-') { c = (byte) pin.read(); if (c != ' ') { throw new SyslogException("not a space"); } } else if (c == '[') { pin.unread(c); els = StructuredElement.parse(pin); } else { throw new SyslogException("Illegal Structured data"); } LogMessage logMessage = getLogMessage(mid); String encoding = readBom(pin, logMessage.getExpectedEncoding()); logMessage.read(pin, encoding); facility = priority / 8; severity = priority % 8; ProtocolMessage sm = new ProtocolMessage(facility, severity, timestamp, host, logMessage, app, mid, proc); for (StructuredElement el : els) { sm.addStructuredElement(el); } return sm; } catch (IOException e) { e.printStackTrace(); throw new SyslogException(e); } }
From source file:org.openymsg.network.HTTPConnectionHandler.java
private String readLine(PushbackInputStream pbis) throws IOException { int c = pbis.read(); StringBuffer sb = new StringBuffer(); while (c != '\n' && c != '\r') { sb.append((char) c); c = pbis.read();// w w w. ja v a 2 s . com } // Check next character int c2 = pbis.read(); if ((c == '\n' && c2 != '\r') || (c == '\r' && c2 != '\n')) pbis.unread(c2); return sb.toString(); }