Example usage for java.io ByteArrayInputStream reset

List of usage examples for java.io ByteArrayInputStream reset

Introduction

In this page you can find the example usage for java.io ByteArrayInputStream reset.

Prototype

public synchronized void reset() 

Source Link

Document

Resets the buffer to the marked position.

Usage

From source file:org.apache.solr.search.CursorMark.java

/**
 * Parses the serialized version of a CursorMark from a client 
 * (which must conform to the existing sortSpec) and populates this object.
 *
 * @see #getSerializedTotem/* w  w  w  .j a va2 s  . c o m*/
 */
public void parseSerializedTotem(final String serialized) {
    if (CURSOR_MARK_START.equals(serialized)) {
        values = null;
        return;
    }
    final SortField[] sortFields = sortSpec.getSort().getSort();
    final List<SchemaField> schemaFields = sortSpec.getSchemaFields();

    List<Object> pieces = null;
    try {
        final byte[] rawData = Base64.base64ToByteArray(serialized);
        ByteArrayInputStream in = new ByteArrayInputStream(rawData);
        try {
            pieces = (List<Object>) codec.unmarshal(in);
            boolean b = false;
            for (Object o : pieces) {
                if (o instanceof BytesRefBuilder || o instanceof BytesRef || o instanceof String) {
                    b = true;
                    break;
                }
            }
            if (b) {
                in.reset();
                pieces = (List<Object>) codec.unmarshal(in);
            }
        } finally {
            in.close();
        }
    } catch (Exception ex) {
        throw new SolrException(ErrorCode.BAD_REQUEST,
                "Unable to parse '" + CURSOR_MARK_PARAM + "' after totem: " + "value must either be '"
                        + CURSOR_MARK_START + "' or the " + "'" + CURSOR_MARK_NEXT
                        + "' returned by a previous search: " + serialized,
                ex);
    }
    assert null != pieces : "pieces wasn't parsed?";

    if (sortFields.length != pieces.size()) {
        throw new SolrException(ErrorCode.BAD_REQUEST,
                CURSOR_MARK_PARAM + " does not work with current sort (wrong size): " + serialized);
    }

    this.values = new ArrayList<>(sortFields.length);

    final BytesRef tmpBytes = new BytesRef();
    for (int i = 0; i < sortFields.length; i++) {

        SortField curSort = sortFields[i];
        SchemaField curField = schemaFields.get(i);
        Object rawValue = pieces.get(i);

        if (null != curField) {
            FieldType curType = curField.getType();
            rawValue = curType.unmarshalSortValue(rawValue);
        }

        this.values.add(rawValue);
    }
}

From source file:org.archive.io.arc.ARCRecord.java

/**
 * Read http header if present. Technique borrowed from HttpClient HttpParse
 * class. set errors when found.//from   w w w . j a  v a  2 s . com
 * 
 * @return ByteArrayInputStream with the http header in it or null if no
 *         http header.
 * @throws IOException
 */
private InputStream readHttpHeader() throws IOException {

    // this can be helpful when simply iterating over records, 
    // looking for problems.
    Logger logger = Logger.getLogger(this.getClass().getName());
    ArchiveRecordHeader h = this.getHeader();

    // If judged a record that doesn't have an http header, return
    // immediately.
    String url = getHeader().getUrl();
    if (!url.startsWith("http") || getHeader().getLength() <= MIN_HTTP_HEADER_LENGTH) {
        return null;
    }

    String statusLine;
    byte[] statusBytes;
    int eolCharCount = 0;
    int errOffset = 0;

    // Read status line, skipping any errant http headers found before it
    // This allows a larger number of 'corrupt' arcs -- where headers were accidentally
    // inserted before the status line to be readable
    while (true) {
        statusBytes = LaxHttpParser.readRawLine(getIn());
        eolCharCount = getEolCharsCount(statusBytes);
        if (eolCharCount <= 0) {
            throw new RecoverableIOException("Failed to read http status where one was expected: "
                    + ((statusBytes == null) ? "" : new String(statusBytes)));
        }

        statusLine = EncodingUtil.getString(statusBytes, 0, statusBytes.length - eolCharCount,
                ARCConstants.DEFAULT_ENCODING);

        // If a null or DELETED break immediately
        if ((statusLine == null) || statusLine.startsWith("DELETED")) {
            break;
        }

        // If it's actually the status line, break, otherwise continue skipping any
        // previous header values
        if (!statusLine.contains(":") && StatusLine.startsWithHTTP(statusLine)) {
            break;
        }

        // Add bytes read to error "offset" to add to position
        errOffset += statusBytes.length;
    }

    if (errOffset > 0) {
        this.incrementPosition(errOffset);
    }

    if ((statusLine == null) || !StatusLine.startsWithHTTP(statusLine)) {
        if (statusLine.startsWith("DELETED")) {
            // Some old ARCs have deleted records like following:
            // http://vireo.gatech.edu:80/ebt-bin/nph-dweb/dynaweb/SGI_Developer/SGITCL_PG/@Generic__BookTocView/11108%3Btd%3D2 130.207.168.42 19991010131803 text/html 29202
            // DELETED_TIME=20000425001133_DELETER=Kurt_REASON=alexalist
            // (follows ~29K spaces)
            // For now, throw a RecoverableIOException so if iterating over
            // records, we keep going.  TODO: Later make a legitimate
            // ARCRecord from the deleted record rather than throw
            // exception.
            throw new DeletedARCRecordIOException(statusLine);
        } else {
            this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_INVALID);
        }
    }

    try {
        this.httpStatus = new StatusLine(statusLine);
    } catch (IOException e) {
        logger.warning(e.getMessage() + " at offset: " + h.getOffset());
        this.errors.add(ArcRecordErrors.HTTP_STATUS_LINE_EXCEPTION);
    }

    // Save off all bytes read.  Keep them as bytes rather than
    // convert to strings so we don't have to worry about encodings
    // though this should never be a problem doing http headers since
    // its all supposed to be ascii.
    ByteArrayOutputStream baos = new ByteArrayOutputStream(statusBytes.length + 4 * 1024);
    baos.write(statusBytes);

    // Now read rest of the header lines looking for the separation
    // between header and body.
    for (byte[] lineBytes = null; true;) {
        lineBytes = LaxHttpParser.readRawLine(getIn());
        eolCharCount = getEolCharsCount(lineBytes);
        if (eolCharCount <= 0) {
            if (getIn().available() == 0) {
                httpHeaderBytesRead += statusBytes.length;
                logger.warning("HTTP header truncated at offset: " + h.getOffset());
                this.errors.add(ArcRecordErrors.HTTP_HEADER_TRUNCATED);
                this.setEor(true);
                break;
            } else {
                throw new IOException(
                        "Failed reading http headers: " + ((lineBytes != null) ? new String(lineBytes) : null));
            }
        } else {
            httpHeaderBytesRead += lineBytes.length;
        }
        // Save the bytes read.
        baos.write(lineBytes);
        if ((lineBytes.length - eolCharCount) <= 0) {
            // We've finished reading the http header.
            break;
        }
    }

    byte[] headerBytes = baos.toByteArray();
    // Save off where body starts.
    this.getMetaData().setContentBegin(headerBytes.length);
    ByteArrayInputStream bais = new ByteArrayInputStream(headerBytes);
    if (!bais.markSupported()) {
        throw new IOException("ByteArrayInputStream does not support mark");
    }
    bais.mark(headerBytes.length);
    // Read the status line.  Don't let it into the parseHeaders function.
    // It doesn't know what to do with it.
    bais.read(statusBytes, 0, statusBytes.length);
    this.httpHeaders = LaxHttpParser.parseHeaders(bais, ARCConstants.DEFAULT_ENCODING);
    this.getMetaData().setStatusCode(Integer.toString(getStatusCode()));
    bais.reset();
    return bais;
}

From source file:org.archive.io.HeaderedArchiveRecord.java

/**
 * Read header if present. Technique borrowed from HttpClient HttpParse
 * class. Using http parser code for now. Later move to more generic header
 * parsing code if there proves a need./*www  .  ja v a 2s  .c o  m*/
 * 
 * @return ByteArrayInputStream with the http header in it or null if no
 *         http header.
 * @throws IOException
 */
private InputStream readContentHeaders() throws IOException {
    // If judged a record that doesn't have an http header, return
    // immediately.
    if (!hasContentHeaders()) {
        return null;
    }
    byte[] statusBytes = LaxHttpParser.readRawLine(getIn());
    int eolCharCount = getEolCharsCount(statusBytes);
    if (eolCharCount <= 0) {
        throw new IOException(
                "Failed to read raw lie where one " + " was expected: " + new String(statusBytes));
    }
    String statusLine = EncodingUtil.getString(statusBytes, 0, statusBytes.length - eolCharCount,
            ARCConstants.DEFAULT_ENCODING);
    if (statusLine == null) {
        throw new NullPointerException("Expected status line is null");
    }
    // TODO: Tighten up this test.
    boolean isHttpResponse = StatusLine.startsWithHTTP(statusLine);
    boolean isHttpRequest = false;
    if (!isHttpResponse) {
        isHttpRequest = statusLine.toUpperCase().startsWith("GET")
                || !statusLine.toUpperCase().startsWith("POST");
    }
    if (!isHttpResponse && !isHttpRequest) {
        throw new UnexpectedStartLineIOException("Failed parse of " + "status line: " + statusLine);
    }
    this.statusCode = isHttpResponse ? (new StatusLine(statusLine)).getStatusCode() : -1;

    // Save off all bytes read.  Keep them as bytes rather than
    // convert to strings so we don't have to worry about encodings
    // though this should never be a problem doing http headers since
    // its all supposed to be ascii.
    ByteArrayOutputStream baos = new ByteArrayOutputStream(statusBytes.length + 4 * 1024);
    baos.write(statusBytes);

    // Now read rest of the header lines looking for the separation
    // between header and body.
    for (byte[] lineBytes = null; true;) {
        lineBytes = LaxHttpParser.readRawLine(getIn());
        eolCharCount = getEolCharsCount(lineBytes);
        if (eolCharCount <= 0) {
            throw new IOException(
                    "Failed reading headers: " + ((lineBytes != null) ? new String(lineBytes) : null));
        }
        // Save the bytes read.
        baos.write(lineBytes);
        if ((lineBytes.length - eolCharCount) <= 0) {
            // We've finished reading the http header.
            break;
        }
    }

    byte[] headerBytes = baos.toByteArray();
    // Save off where content body, post content headers, starts.
    this.contentHeadersLength = headerBytes.length;
    ByteArrayInputStream bais = new ByteArrayInputStream(headerBytes);
    if (!bais.markSupported()) {
        throw new IOException("ByteArrayInputStream does not support mark");
    }
    bais.mark(headerBytes.length);
    // Read the status line.  Don't let it into the parseHeaders function.
    // It doesn't know what to do with it.
    bais.read(statusBytes, 0, statusBytes.length);
    this.contentHeaders = LaxHttpParser.parseHeaders(bais, ARCConstants.DEFAULT_ENCODING);
    bais.reset();
    return bais;
}

From source file:org.eclipse.hawkbit.artifact.repository.ArtifactStoreTest.java

@Test
@Description("Ensures that artifact content can be read through InputStream.")
public void getInputStreamFromArtifact() throws IOException {
    final int filelengthBytes = 128;
    final String filename = "testfile.json";
    final String contentType = "application/json";

    final ByteArrayInputStream inputStream = generateInputStream(filelengthBytes);
    final DbArtifact artifact = artifactStoreUnderTest
            .getArtifactById(artifactStoreUnderTest.store(inputStream, filename, contentType).getArtifactId());
    inputStream.reset();

    final byte[] artifactBytes = new byte[filelengthBytes];
    final byte[] artifactStoredBytes = new byte[filelengthBytes];
    IOUtils.readFully(inputStream, artifactBytes);
    IOUtils.readFully(artifact.getFileInputStream(), artifactStoredBytes);

    assertThat(artifactBytes).isEqualTo(artifactStoredBytes);
}

From source file:org.eclipse.lyo.oslc.am.resource.ResourceService.java

protected void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    boolean isFileUpload = ServletFileUpload.isMultipartContent(request);
    String contentType = request.getContentType();

    if (!isFileUpload && (RioStore.rdfFormatFromContentType(contentType) == null)) {
        throw new RioServiceException(IConstants.SC_UNSUPPORTED_MEDIA_TYPE);
    }/*from   ww  w .  ja  va 2s.co  m*/

    InputStream content = request.getInputStream();

    if (isFileUpload) {
        // being uploaded from a web page
        try {
            FileItemFactory factory = new DiskFileItemFactory();
            ServletFileUpload upload = new ServletFileUpload(factory);
            @SuppressWarnings("unchecked")
            List<FileItem> items = upload.parseRequest(request);

            // find the first (and only) file resource in the post
            Iterator<FileItem> iter = items.iterator();
            while (iter.hasNext()) {
                FileItem item = iter.next();
                if (item.isFormField()) {
                    // this is a form field, maybe we can accept a title or descr?
                } else {
                    content = item.getInputStream();
                    contentType = item.getContentType();
                }
            }

        } catch (Exception e) {
            throw new RioServiceException(e);
        }
    }

    RioStore store = this.getStore();
    if (RioStore.rdfFormatFromContentType(contentType) != null) {
        try {
            String resUri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            Resource resource = new Resource(resUri);
            List<RioStatement> statements = store.parse(resUri, content, contentType);
            resource.addStatements(statements);
            String userUri = getUserUri(request.getRemoteUser());

            // if it parsed, then add it to the store.
            store.update(resource, userUri);

            // now get it back, to find 
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (RioServerException e) {
            throw new RioServiceException(IConstants.SC_BAD, e);
        }
    } else if (IAmConstants.CT_APP_X_VND_MSPPT.equals(contentType) || isFileUpload) {
        try {

            ByteArrayInputStream bais = isToBais(content);

            String uri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            Resource resource = new Resource(uri);
            resource.addRdfType(IAmConstants.OSLC_AM_TYPE_RESOURCE);
            resource.addRdfType(IAmConstants.RIO_AM_PPT_DECK);
            String id = resource.getIdentifier();
            String deckTitle = "PPT Deck " + id;
            resource.setTitle(deckTitle);
            resource.setDescription("A Power Point Deck");
            String sourceUri = getBaseUrl() + '/' + IAmConstants.SERVICE_SOURCE + '/' + id;
            resource.setSource(sourceUri);
            resource.setSourceContentType(contentType);
            String userUri = getUserUri(request.getRemoteUser());

            store.storeBinaryResource(bais, id);
            bais.reset();

            SlideShow ppt = new SlideShow(bais);
            Dimension pgsize = ppt.getPageSize();

            Slide[] slide = ppt.getSlides();
            for (int i = 0; i < slide.length; i++) {
                String slideTitle = extractTitle(slide[i]);
                String slideUri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
                Resource slideResource = new Resource(slideUri);
                slideResource.addRdfType(IAmConstants.OSLC_AM_TYPE_RESOURCE);
                slideResource.addRdfType(IAmConstants.RIO_AM_PPT_SLIDE);
                String slideId = slideResource.getIdentifier();
                slideResource.setTitle(slideTitle);
                sourceUri = getBaseUrl() + '/' + IAmConstants.SERVICE_SOURCE + '/' + slideId;
                slideResource.setSource(sourceUri);
                slideResource.setSourceContentType(IConstants.CT_IMAGE_PNG);
                store.update(slideResource, userUri);

                BufferedImage img = new BufferedImage(pgsize.width, pgsize.height, BufferedImage.TYPE_INT_RGB);
                Graphics2D graphics = img.createGraphics();
                graphics.setPaint(Color.white);
                graphics.fill(new Rectangle2D.Float(0, 0, pgsize.width, pgsize.height));
                slide[i].draw(graphics);
                ByteArrayOutputStream out = new ByteArrayOutputStream();
                javax.imageio.ImageIO.write(img, "png", out);
                ByteArrayInputStream is = new ByteArrayInputStream(out.toByteArray());
                store.storeBinaryResource(is, slideId);
                out.close();
                is.close();
                try {
                    RioValue v = new RioValue(RioValueType.URI, slideResource.getUri());
                    resource.appendToSeq(IConstants.RIO_NAMESPACE + "slides", v);
                } catch (UnrecognizedValueTypeException e) {
                    // log this?  don't want to throw away everything, since this should never happen
                }
            }

            store.update(resource, userUri);

            // now get it back, to find eTag and creator stuff
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (RioServerException e) {
            throw new RioServiceException(IConstants.SC_BAD, e);
        }

    } else {
        // must be a binary or unknown format, treat as black box
        // normally a service provider will understand this and parse it appropriately
        // however this server will accept any blank box resource

        try {
            String uri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            Resource resource = new Resource(uri);
            String id = resource.getIdentifier();
            resource.setTitle("Resource " + id);
            resource.setDescription("A binary resource");
            String sourceUri = getBaseUrl() + IAmConstants.SERVICE_SOURCE + '/' + id;
            resource.setSource(sourceUri);
            resource.setSourceContentType(contentType);
            String userUri = getUserUri(request.getRemoteUser());
            store.update(resource, userUri);

            store.storeBinaryResource(content, id);

            // now get it back, to find eTag and creator stuff
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (RioServerException e) {
            throw new RioServiceException(IConstants.SC_BAD, e);
        }
    }
}

From source file:org.eclipse.lyo.samples.sharepoint.adapter.ResourceService.java

protected void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    System.out.println("entered do Post for /resource");
    boolean isFileUpload = ServletFileUpload.isMultipartContent(request);
    String contentType = request.getContentType();

    if (!isFileUpload && !IConstants.CT_RDF_XML.equals(contentType)) {
        throw new ShareServiceException(IConstants.SC_UNSUPPORTED_MEDIA_TYPE);
    }/*from   w w  w.  j a  v  a 2  s. c o m*/

    InputStream content = request.getInputStream();

    if (isFileUpload) {
        // being uploaded from a web page
        try {
            FileItemFactory factory = new DiskFileItemFactory();
            ServletFileUpload upload = new ServletFileUpload(factory);
            @SuppressWarnings("unchecked")
            List<FileItem> items = upload.parseRequest(request);

            // find the first (and only) file resource in the post
            Iterator<FileItem> iter = items.iterator();
            while (iter.hasNext()) {
                FileItem item = iter.next();
                if (item.isFormField()) {
                    // this is a form field, maybe we can accept a title or descr?
                } else {
                    content = item.getInputStream();
                    contentType = item.getContentType();
                }
            }

        } catch (Exception e) {
            throw new ShareServiceException(e);
        }
    }

    ShareStore store = this.getStore();
    if (ShareStore.rdfFormatFromContentType(contentType) != null) {
        try {
            String resUri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            SharepointResource resource = new SharepointResource(resUri);
            List<ShareStatement> statements = store.parse(resUri, content, contentType);
            resource.addStatements(statements);
            String userUri = getUserUri(request.getRemoteUser());

            // if it parsed, then add it to the store.
            store.update(resource, userUri);

            // now get it back, to find 
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (ShareServerException e) {
            throw new ShareServiceException(IConstants.SC_BAD, e);
        }
    } else if (IAmConstants.CT_APP_X_VND_MSPPT.equals(contentType) || isFileUpload) {
        try {

            ByteArrayInputStream bais = isToBais(content);

            String uri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            SharepointResource resource = new SharepointResource(uri);
            resource.addRdfType(IAmConstants.OSLC_AM_TYPE_RESOURCE);
            resource.addRdfType(IAmConstants.RIO_AM_PPT_DECK);
            String id = resource.getIdentifier();
            String deckTitle = "PPT Deck " + id;
            resource.setTitle(deckTitle);
            resource.setDescription("A Power Point Deck");
            String sourceUri = getBaseUrl() + '/' + IAmConstants.SERVICE_SOURCE + '/' + id;
            resource.setSource(sourceUri);
            resource.setSourceContentType(contentType);
            String userUri = getUserUri(request.getRemoteUser());

            store.storeBinaryResource(bais, id);
            bais.reset();

            SlideShow ppt = new SlideShow(bais);
            Dimension pgsize = ppt.getPageSize();

            Slide[] slide = ppt.getSlides();
            for (int i = 0; i < slide.length; i++) {
                String slideTitle = extractTitle(slide[i]);
                String slideUri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
                SharepointResource slideResource = new SharepointResource(slideUri);
                slideResource.addRdfType(IAmConstants.OSLC_AM_TYPE_RESOURCE);
                slideResource.addRdfType(IAmConstants.RIO_AM_PPT_SLIDE);
                String slideId = slideResource.getIdentifier();
                slideResource.setTitle(slideTitle);
                sourceUri = getBaseUrl() + '/' + IAmConstants.SERVICE_SOURCE + '/' + slideId;
                slideResource.setSource(sourceUri);
                slideResource.setSourceContentType(IConstants.CT_IMAGE_PNG);
                store.update(slideResource, userUri);

                BufferedImage img = new BufferedImage(pgsize.width, pgsize.height, BufferedImage.TYPE_INT_RGB);
                Graphics2D graphics = img.createGraphics();
                graphics.setPaint(Color.white);
                graphics.fill(new Rectangle2D.Float(0, 0, pgsize.width, pgsize.height));
                slide[i].draw(graphics);
                ByteArrayOutputStream out = new ByteArrayOutputStream();
                javax.imageio.ImageIO.write(img, "png", out);
                ByteArrayInputStream is = new ByteArrayInputStream(out.toByteArray());
                store.storeBinaryResource(is, slideId);
                out.close();
                is.close();
                try {
                    ShareValue v = new ShareValue(ShareValueType.URI, slideResource.getUri());
                    resource.appendToSeq(IConstants.SHARE_NAMESPACE + "slides", v);
                } catch (UnrecognizedValueTypeException e) {
                    // log this?  don't want to throw away everything, since this should never happen
                }
            }

            store.update(resource, userUri);

            // now get it back, to find eTag and creator stuff
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (ShareServerException e) {
            throw new ShareServiceException(IConstants.SC_BAD, e);
        }

    } else {
        // must be a binary or unknown format, treat as black box
        // normally a service provider will understand this and parse it appropriately
        // however this server will accept any blank box resource

        try {
            String uri = store.nextAvailableUri(IAmConstants.SERVICE_RESOURCE);
            SharepointResource resource = new SharepointResource(uri);
            String id = resource.getIdentifier();
            resource.setTitle("Resource " + id);
            resource.setDescription("A binary resource");
            String sourceUri = getBaseUrl() + IAmConstants.SERVICE_SOURCE + '/' + id;
            resource.setSource(sourceUri);
            resource.setSourceContentType(contentType);
            String userUri = getUserUri(request.getRemoteUser());
            store.update(resource, userUri);

            store.storeBinaryResource(content, id);

            // now get it back, to find eTag and creator stuff
            OslcResource returnedResource = store.getOslcResource(resource.getUri());
            Date created = returnedResource.getCreated();
            String eTag = returnedResource.getETag();

            response.setStatus(IConstants.SC_CREATED);
            response.setHeader(IConstants.HDR_LOCATION, resource.getUri());
            response.setHeader(IConstants.HDR_LAST_MODIFIED, StringUtils.rfc2822(created));
            response.setHeader(IConstants.HDR_ETAG, eTag);

        } catch (ShareServerException e) {
            throw new ShareServiceException(IConstants.SC_BAD, e);
        }
    }
}

From source file:org.gbif.harvest.tapir.TapirMetadataHandler.java

/**
 * Get the most prioritised content namespace.
 * In the event the capabilities response cannot be parsed,
 * the default content namespace is used
 *
 * @param inputStream capabilities response as ByteArrayInputStream
 * @param directory   as String/*from w w  w  .  j  a  va  2s .c o m*/
 *
 * @return most prioritized conetent namespace
 *
 * @throws HarvesterException thrown if method fails
 */
private String getNamespace(ByteArrayInputStream inputStream, String directory) throws HarvesterException {
    log.info("tapirmetadatahandler.start.getNamespace");

    // Initially, set the namespace to the default
    String newestNamespace = DEFAULT_CONTENT_NAMESPACE;

    // reste stream as we're reading it a second time
    if (inputStream != null) {
        inputStream.reset();
    }

    // retrieve the list of supported namespaces
    try {
        // namespaces = returnNamespace(fis, NAMESPACE_RESPONSE_XPATH_ELEMENT);
        Set<String> namespaces = digesterUtils.xmlToListOfAttributeValuesForSingleElement(inputStream,
                TapirMetadataHandler.namespaceResponseXPathElement,
                TapirMetadataHandler.supportedNamespaceAttributeName);

        // Iterate through the ordered list of available namespaces and
        // determine what the newest one from amongst the set of supported
        // namespaces retrieved is
        // Set the default namespace
        for (String supportedNamespace : supported_namespaces) {
            if (namespaces.contains(supportedNamespace)) {
                newestNamespace = supportedNamespace;
                log.debug("tapirmetadatahandler.getNamespace.chooseNamespace", newestNamespace);
                log.info("tapirmetadatahandler.end.getNamespace");
                return newestNamespace;
            }
        }
        // if not found, alert operator
        log.error("tapirmetadatahandler.default.conceptualMappingNotFound", namespaces.toString());
        // and write GBIF Log Message
        gbifLogger.openAndWriteToGbifLogMessageFile(directory,
                CommonGBIFLogEvent.COMMON_MESSAGES_UNKNOWN_SCHEMA_LOCATION.getName(),
                CommonGBIFLogEvent.COMMON_MESSAGES_UNKNOWN_SCHEMA_LOCATION.getValue(), Level.ERROR_INT,
                "None of the namespace(s) " + namespaces.toString()
                        + " was not found in the TAPIR conceptualMapping.properties file. Please update this file with valid namespace(s) and try again. Defaulting to namespace http://rs.tdwg.org/dwc/dwcore/",
                1, false);

    } catch (IOException e) {
        log.error("tapirmetadatahandler.error.getNamespace.parsing", e.getMessage(), e);
        log.debug("tapirmetadatahandler.default.getNamespace.chooseNamespace", newestNamespace);
        // throw new HarvesterException(e.getMessage(), e);
    } catch (SAXException e) {
        log.error("tapirmetadatahandler.error.getNamespace.parsing", e.getMessage(), e);
        log.debug("tapirmetadatahandler.default.getNamespace.chooseNamespace", newestNamespace);
        // throw new HarvesterException(e.getMessage(), e);
    }

    // close inputStream
    try {
        if (inputStream != null) {
            inputStream.close();
        }
    } catch (Exception e) {
        // do nothing
    }

    log.info("tapirmetadatahandler.end.getNamespace");
    return newestNamespace;
}

From source file:org.gbif.harvest.tapir.TapirMetadataHandler.java

/**
 * Update a BioDatsource's target count, and their other metadata.
 *
 * @param id               Biodatasource id
 * @param url              access point URL
 * @param datasetTitle     dataset title
 * @param datasetTitlePath dataset title path
 *
 * @throws HarvesterException thrown if method fails
 */// w w w.  j a  v  a  2 s .com
public void updateMetadata(Long id, String url, String datasetTitle, String datasetTitlePath)
        throws HarvesterException {
    log.debug("start.updateMetadata");

    // retrieve the BioDatasource
    BioDatasource bioDatasource = bioDatasourceManager.get(id);

    // retrieve the BioDatasource's directory
    Map<String, Object> params = JSONUtils.mapFromJSON(bioDatasource.getParametersAsJSON());
    String bioDatasourceDirectory = Constants.BASE_DIR.concat(File.separator)
            .concat((String) params.get("directory"));
    String protocol = (String) params.get("protocol");

    // determine the outputModel from the appropriate mapping file
    String contentNamespace = params.get("contentNamespace").toString();
    String outputModel = getOutputModel(contentNamespace);

    // get the count 'String'
    String resource_count = null;
    try {
        // send search request and get response as ByteArrayInputStream
        ByteArrayInputStream searchResponse = getSearch(url, bioDatasourceDirectory, outputModel, datasetTitle,
                datasetTitlePath, protocol);

        // parse the response for the count information
        resource_count = getCount(searchResponse);
    } catch (HarvesterException e) {
        log.error("error.gettingCount", bioDatasource.getName(), e);
    }

    // check count is proper integer value, then set it as targetCount
    if (StringUtils.trimToNull(resource_count) != null) {
        int targetCount = 0;
        try {
            targetCount = Integer.valueOf(resource_count);
        } catch (NumberFormatException e) {
            log.warn("Problem occurred converting resource count: " + resource_count);
        } finally {
            params.put("targetCount", String.valueOf(targetCount));
            // update the BioDatasource's target count attribute
            bioDatasource.setTargetCount(targetCount);
            log.info("updateCount", String.valueOf(targetCount));
        }
    }
    // update the BioDatasource's params with the default count
    else {
        params.put("targetCount", "0");
        bioDatasource.setTargetCount(0);
    }

    ByteArrayInputStream metadataResponse = metadataRequest(url, bioDatasourceDirectory, protocol);

    Map<String, String> processed = null;
    Map<String, String> processedDataResourceNames;
    try {
        // for all parameters with no alternative languages
        processed = processMetadata(metadataResponse);

        // for all parameters with alternative languages
        // 1. data resource title
        String dataResourceNameXPath = metadataElementsOfInterest.get(resourceNameKeyName);
        // remember to reset inputStream first
        metadataResponse.reset();
        processedDataResourceNames = processAllLanguageAlternativesForAParticularElementOfInterest(
                metadataResponse, dataResourceNameXPath);

        // get the English dataResourceName
        String englishDataResourceName = retrieveValueForEnlishEntry(processedDataResourceNames);
        if (StringUtils.isNotBlank(englishDataResourceName)) {
            processed.put(resourceNameKeyName, englishDataResourceName);
            processed.put(resourceDisplayNameKeyName, englishDataResourceName);
        }

        // for all contact related metadata
        // remember to reset inputStream first
        metadataResponse.reset();
        processMetadataForContacts(metadataResponse, bioDatasourceDirectory);
    } catch (HarvesterException e) {
        // do nothing, log error follows below...
    }

    // update other metadata
    if (processed != null && processed.size() > 0) {
        if (processed.containsKey(resourceNameKeyName)) {
            String dataResourceName = StringUtils.trimToNull(processed.get(resourceNameKeyName));
            if (StringUtils.isBlank(dataResourceName)
                    || StringUtils.equalsIgnoreCase(dataResourceName, "NULL")) {
                log.error("tapirmetadatahandler.error.updateMetadata.dataResourceName",
                        bioDatasource.getName());
                params.put(resourceNameKeyName, bioDatasource.getName());
                params.put(resourceDisplayNameKeyName, bioDatasource.getName());
            }
        }
    } else {
        log.error("tapirmetadatahandler.error.updateMetadata.metadataRequest", bioDatasource.getName());
    }

    // add all metadata to params
    params.putAll(processed);
    // re save params
    bioDatasource.setParametersAsJSON(JSONUtils.jsonFromMap(params));

    // save the BioDatasource
    bioDatasourceManager.save(bioDatasource);
    log.debug("end.updateMetadata");
}

From source file:org.openmrs.util.OpenmrsUtilTest.java

/**
 * @throws IOException/*from w  w  w  .  j ava  2  s.  com*/
 * @see OpenmrsUtil#copyFile(InputStream, OutputStream)
 */
@Test
public void copyFile_shouldCopyInputstreamToOutputstreamAndCloseTheOutputstream() throws IOException {

    String exampleInputStreamString = "ExampleInputStream";
    ByteArrayInputStream expectedByteArrayInputStream = new ByteArrayInputStream(
            exampleInputStreamString.getBytes());

    ByteArrayOutputStream output = spy(new ByteArrayOutputStream());
    OpenmrsUtil.copyFile(expectedByteArrayInputStream, output);

    expectedByteArrayInputStream.reset();
    ByteArrayInputStream byteArrayInputStreamFromOutputStream = new ByteArrayInputStream(output.toByteArray());

    assertTrue(IOUtils.contentEquals(expectedByteArrayInputStream, byteArrayInputStreamFromOutputStream));
    verify(output, times(1)).close();
}

From source file:org.pdfsam.console.business.pdf.handlers.SplitCmdExecutor.java

/**
 * Execute the split of a pdf document when split type is S_BLEVEL
 * //  w  ww  .j  av a  2  s.  c  o m
 * @param inputCommand
 * @throws Exception
 */
private void executeBookmarksSplit(SplitParsedCommand inputCommand) throws Exception {
    pdfReader = PdfUtility.readerFor(inputCommand.getInputFile());
    int bLevel = inputCommand.getBookmarksLevel().intValue();
    Hashtable bookmarksTable = new Hashtable();
    if (bLevel > 0) {
        pdfReader.removeUnusedObjects();
        pdfReader.consolidateNamedDestinations();
        List bookmarks = SimpleBookmark.getBookmark(pdfReader);
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        SimpleBookmark.exportToXML(bookmarks, out, "UTF-8", false);
        ByteArrayInputStream input = new ByteArrayInputStream(out.toByteArray());
        int maxDepth = PdfUtility.getMaxBookmarksDepth(input);
        input.reset();
        if (bLevel <= maxDepth) {
            SAXReader reader = new SAXReader();
            org.dom4j.Document document = reader.read(input);
            // head node
            String headBookmarkXQuery = "/Bookmark/Title[@Action=\"GoTo\"]";
            Node headNode = document.selectSingleNode(headBookmarkXQuery);
            if (headNode != null && headNode.getText() != null && headNode.getText().trim().length() > 0) {
                bookmarksTable.put(new Integer(1), headNode.getText().trim());
            }
            // bLevel nodes
            StringBuffer buffer = new StringBuffer("/Bookmark");
            for (int i = 0; i < bLevel; i++) {
                buffer.append("/Title[@Action=\"GoTo\"]");
            }
            String xQuery = buffer.toString();
            List nodes = document.selectNodes(xQuery);
            input.close();
            input = null;
            if (nodes != null && nodes.size() > 0) {
                LinkedHashSet pageSet = new LinkedHashSet(nodes.size());
                for (Iterator nodeIter = nodes.iterator(); nodeIter.hasNext();) {
                    Node currentNode = (Node) nodeIter.next();
                    Node pageAttribute = currentNode.selectSingleNode("@Page");
                    if (pageAttribute != null && pageAttribute.getText().length() > 0) {
                        String attribute = pageAttribute.getText();
                        int blankIndex = attribute.indexOf(' ');
                        if (blankIndex > 0) {
                            Integer currentNumber = new Integer(attribute.substring(0, blankIndex));
                            String bookmarkText = currentNode.getText().trim();
                            // fix #2789963
                            if (currentNumber.intValue() > 0) {
                                // bookmarks regexp matching if any
                                if (StringUtils.isBlank(inputCommand.getBookmarkRegexp())
                                        || bookmarkText.matches(inputCommand.getBookmarkRegexp())) {
                                    // to split just before the given page
                                    if ((currentNumber.intValue()) > 1) {
                                        pageSet.add(new Integer(currentNumber.intValue() - 1));
                                    }
                                    if (StringUtils.isNotBlank(bookmarkText)) {
                                        bookmarksTable.put(currentNumber, bookmarkText.trim());
                                    }
                                }
                            }
                        }
                    }
                }
                if (pageSet.size() > 0) {
                    if (StringUtils.isBlank(inputCommand.getBookmarkRegexp())) {
                        LOG.debug("Found " + pageSet.size() + " destination pages at level " + bLevel);
                    } else {
                        LOG.debug("Found " + pageSet.size() + " destination pages at level " + bLevel
                                + " matching '" + inputCommand.getBookmarkRegexp() + "'");
                    }
                    inputCommand.setSplitPageNumbers((Integer[]) pageSet.toArray(new Integer[pageSet.size()]));
                } else {
                    throw new SplitException(SplitException.ERR_BLEVEL_NO_DEST, new String[] { "" + bLevel });
                }
            } else {
                throw new SplitException(SplitException.ERR_BLEVEL, new String[] { "" + bLevel });
            }
        } else {
            input.close();
            pdfReader.close();
            throw new SplitException(SplitException.ERR_BLEVEL_OUTOFBOUNDS,
                    new String[] { "" + bLevel, "" + maxDepth });

        }
    } else {
        pdfReader.close();
        throw new SplitException(SplitException.ERR_NOT_VALID_BLEVEL, new String[] { "" + bLevel });
    }
    pdfReader.close();
    executeSplit(inputCommand, bookmarksTable);
}