Example usage for java.io ByteArrayOutputStream reset

List of usage examples for java.io ByteArrayOutputStream reset

Introduction

In this page you can find the example usage for java.io ByteArrayOutputStream reset.

Prototype

public synchronized void reset() 

Source Link

Document

Resets the count field of this ByteArrayOutputStream to zero, so that all currently accumulated output in the output stream is discarded.

Usage

From source file:org.apache.accumulo.test.replication.merkle.cli.GenerateHashes.java

public void run(final Connector conn, final String inputTableName, final String outputTableName,
        final String digestName, int numThreads, final boolean iteratorPushdown, final Collection<Range> ranges)
        throws TableNotFoundException, AccumuloSecurityException, AccumuloException, NoSuchAlgorithmException {
    if (!conn.tableOperations().exists(outputTableName)) {
        throw new IllegalArgumentException(outputTableName + " does not exist, please create it");
    }//  w w  w. j  a v a  2s. co m

    // Get some parallelism
    ExecutorService svc = Executors.newFixedThreadPool(numThreads);
    final BatchWriter bw = conn.createBatchWriter(outputTableName, new BatchWriterConfig());

    try {
        for (final Range range : ranges) {
            final MessageDigest digest = getDigestAlgorithm(digestName);

            svc.execute(new Runnable() {

                @Override
                public void run() {
                    Scanner s;
                    try {
                        s = conn.createScanner(inputTableName, Authorizations.EMPTY);
                    } catch (Exception e) {
                        log.error("Could not get scanner for " + inputTableName, e);
                        throw new RuntimeException(e);
                    }

                    s.setRange(range);

                    Value v = null;
                    Mutation m = null;
                    if (iteratorPushdown) {
                        IteratorSetting cfg = new IteratorSetting(50, DigestIterator.class);
                        cfg.addOption(DigestIterator.HASH_NAME_KEY, digestName);
                        s.addScanIterator(cfg);

                        // The scanner should only ever return us one Key-Value, otherwise this approach won't work
                        Entry<Key, Value> entry = Iterables.getOnlyElement(s);

                        v = entry.getValue();
                        m = RangeSerialization.toMutation(range, v);
                    } else {
                        ByteArrayOutputStream baos = new ByteArrayOutputStream();
                        for (Entry<Key, Value> entry : s) {
                            DataOutputStream out = new DataOutputStream(baos);
                            try {
                                entry.getKey().write(out);
                                entry.getValue().write(out);
                            } catch (Exception e) {
                                log.error("Error writing {}", entry, e);
                                throw new RuntimeException(e);
                            }

                            digest.update(baos.toByteArray());
                            baos.reset();
                        }

                        v = new Value(digest.digest());
                        m = RangeSerialization.toMutation(range, v);
                    }

                    // Log some progress
                    log.info("{} computed digest for {} of {}", Thread.currentThread().getName(), range,
                            Hex.encodeHexString(v.get()));

                    try {
                        bw.addMutation(m);
                    } catch (MutationsRejectedException e) {
                        log.error("Could not write mutation", e);
                        throw new RuntimeException(e);
                    }
                }
            });
        }

        svc.shutdown();

        // Wait indefinitely for the scans to complete
        while (!svc.isTerminated()) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                log.error("Interrupted while waiting for executor service to gracefully complete. Exiting now");
                svc.shutdownNow();
                return;
            }
        }
    } finally {
        // We can only safely close this when we're exiting or we've completely all tasks
        bw.close();
    }
}

From source file:com.ririjin.adminmobile.fragment.UpdateImageFragment.java

private Bitmap comp(Bitmap image) {

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    image.compress(Bitmap.CompressFormat.JPEG, 100, baos);
    if (baos.toByteArray().length / 1024 > 1024) {// 1M,???BitmapFactory.decodeStream
        baos.reset();// ?baos?baos
        image.compress(Bitmap.CompressFormat.JPEG, 50, baos);// 50%??baos
    }// w ww  . j a  v  a 2  s.c  o  m
    ByteArrayInputStream isBm = new ByteArrayInputStream(baos.toByteArray());
    BitmapFactory.Options newOpts = new BitmapFactory.Options();
    // options.inJustDecodeBounds true
    newOpts.inJustDecodeBounds = true;
    Bitmap bitmap = BitmapFactory.decodeStream(isBm, null, newOpts);
    newOpts.inJustDecodeBounds = false;
    int w = newOpts.outWidth;
    int h = newOpts.outHeight;
    // ?800*480
    float hh = 800f;// 800f
    float ww = 480f;// 480f
    // ????
    int be = 1;// be=1?
    if (w > h && w > ww) {// ???
        be = (int) (newOpts.outWidth / ww);
    } else if (w < h && h > hh) {// ???
        be = (int) (newOpts.outHeight / hh);
    }
    if (be <= 0)
        be = 1;
    newOpts.inSampleSize = be;// 
    // ???options.inJustDecodeBounds false
    isBm = new ByteArrayInputStream(baos.toByteArray());
    bitmap = BitmapFactory.decodeStream(isBm, null, newOpts);
    return compressImage(bitmap);// ????
}

From source file:org.wso2.carbon.sequences.services.SequenceAdmin.java

public void addDynamicSequence(String key, OMElement sequence) throws SequenceEditorException {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    stream.reset();
    try {/*w  w  w  .j  a v  a2  s  . c om*/
        XMLPrettyPrinter.prettify(sequence, stream);
    } catch (Exception e) {
        handleException("Unable to pretty print configuration", e);
    }
    try {
        org.wso2.carbon.registry.core.Registry registry;
        if (key.startsWith("conf:")) {
            registry = getConfigSystemRegistry();
            key = key.replace("conf:", "");
        } else {
            registry = getGovernanceRegistry();
            key = key.replace("gov:", "");
        }
        if (registry.resourceExists(key)) {
            handleException("Resource is already exists");
        }
        Resource resource = registry.newResource();
        resource.setMediaType(WSO2_SEQUENCE_MEDIA_TYPE);
        resource.setContent(new String(stream.toByteArray()).trim());
        registry.put(key, resource);
    } catch (RegistryException e) {
        handleException("WSO2 Registry Exception", e);
    }
}

From source file:org.firstopen.singularity.devicemgr.interrogator.IPSADC_IPICO_IO.java

/**
 * find ascii encoded messages in the OutputStream a valid message begins
 * with "aa" and ends with "<CR><LF>" Thee may be 1 or more messages in
 * the buffer. The buffer is reset, and then partial messsages are added
 * back to the buffer for processing on the serial event.
 * /*from   w  ww  .  ja va  2  s . c o m*/
 * @param message
 * @return messageList
 */
protected ArrayList<byte[]> findMessage(ByteArrayOutputStream message) {

    ArrayList<byte[]> messageList = new ArrayList<byte[]>();

    synchronized (message) {

        // log.debug("message is " + message + " record length = " +
        // RECORD_LENGTH);

        byte[] buffer = message.toByteArray();
        byte[] messageBuf = new byte[RECORD_LENGTH];

        message.reset();
        for (int i = 0; i < buffer.length - 1; i++) {
            if (buffer[i] == 97 && buffer[i + 1] == 97) {
                if (i + RECORD_LENGTH - 1 > buffer.length) {

                    message.write(buffer, i, buffer.length - i);
                    break;
                }
                if (buffer[i + RECORD_LENGTH - 2] == 13 && buffer[i + RECORD_LENGTH - 1] == 10) {
                    messageBuf = new byte[RECORD_LENGTH];
                    System.arraycopy(buffer, i, messageBuf, 0, RECORD_LENGTH);
                    messageList.add(messageBuf);
                    i = i + RECORD_LENGTH - 1; // index to end of message
                }

            } // end if start message found
        }
    }
    return messageList;

}

From source file:org.apache.hadoop.hive.service.JDBCExecuteThread.java

public void run() {
    SessionState session = new SessionState(new HiveConf(SessionState.class));
    SessionState.start(session);//  ww w. ja  v a2s .c om
    session.in = null;
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    ByteArrayOutputStream err = new ByteArrayOutputStream();
    session.out = new PrintStream(out);
    session.err = new PrintStream(err);

    SessionState ss = SessionState.get();
    if (ss == null || driver == null)
        return;

    ss.setiscli(true);
    ss.setUserName(user);
    ss.setDbName(dbName);

    driver.setProcessState(Driver.SQLProcessState.PROCESSING);
    int exeRet = driver.run(cmd);
    if (exeRet == 0)
        driver.setProcessState(Driver.SQLProcessState.COMPLETE);
    else
        driver.setProcessState(Driver.SQLProcessState.ERROR);

    String errorMessage = null;
    if (ss != null) {
        ss.get().out.flush();
        ss.get().err.flush();
    }
    if (exeRet != 0) {
        errorMessage = err.toString();
    }
    out.reset();
    err.reset();

    if (exeRet != 0) {
        driver.setErrorMsg("Query w/ errno: " + exeRet + " " + errorMessage);
    }
    if (exeRet == 0)
        driver.setProcessState(Driver.SQLProcessState.COMPLETE);
    else
        driver.setProcessState(Driver.SQLProcessState.ERROR);

    LOG.error("SQL execute end");
}

From source file:org.apache.nifi.processors.standard.TestPostHTTP.java

@Test
public void testSendAsFlowFile() throws Exception {
    setup(null);/*from w  w w  .ja  v a 2s. c o m*/
    runner.setProperty(PostHTTP.URL, server.getUrl());
    runner.setProperty(PostHTTP.SEND_AS_FLOWFILE, "true");

    final Map<String, String> attrs = new HashMap<>();
    attrs.put("abc", "cba");

    runner.enqueue("Hello".getBytes(), attrs);
    attrs.put("abc", "abc");
    attrs.put("filename", "xyz.txt");
    runner.enqueue("World".getBytes(), attrs);

    runner.run(1);
    runner.assertAllFlowFilesTransferred(PostHTTP.REL_SUCCESS);

    final byte[] lastPost = servlet.getLastPost();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    final ByteArrayInputStream bais = new ByteArrayInputStream(lastPost);

    FlowFileUnpackagerV3 unpacker = new FlowFileUnpackagerV3();

    // unpack first flowfile received
    Map<String, String> receivedAttrs = unpacker.unpackageFlowFile(bais, baos);
    byte[] contentReceived = baos.toByteArray();
    assertEquals("Hello", new String(contentReceived));
    assertEquals("cba", receivedAttrs.get("abc"));

    assertTrue(unpacker.hasMoreData());

    baos.reset();
    receivedAttrs = unpacker.unpackageFlowFile(bais, baos);
    contentReceived = baos.toByteArray();

    assertEquals("World", new String(contentReceived));
    assertEquals("abc", receivedAttrs.get("abc"));
    assertEquals("xyz.txt", receivedAttrs.get("filename"));
    Assert.assertNull(receivedAttrs.get("Content-Length"));
}

From source file:org.cloudfoundry.client.lib.io.DynamicZipInputStreamTest.java

@Test
public void shouldCreateValidZipContent() throws Exception {

    byte[] f1 = newRandomBytes(10000);
    byte[] f2 = newRandomBytes(10000);

    ByteArrayOutputStream bos = new ByteArrayOutputStream();
    ZipOutputStream zipOutputStream = new ZipOutputStream(bos);
    zipOutputStream.putNextEntry(new ZipEntry("a/b/c"));
    zipOutputStream.write(f1);/* w w  w  . ja v  a2  s.  co m*/
    zipOutputStream.closeEntry();
    zipOutputStream.putNextEntry(new ZipEntry("a/b/c/d/"));
    zipOutputStream.closeEntry();
    zipOutputStream.putNextEntry(new ZipEntry("d/e/f"));
    zipOutputStream.write(f2);
    zipOutputStream.closeEntry();
    zipOutputStream.flush();
    zipOutputStream.close();
    byte[] expected = bos.toByteArray();

    List<DynamicZipInputStream.Entry> entries = new ArrayList<DynamicZipInputStream.Entry>();
    entries.add(newEntry("a/b/c", f1));
    entries.add(newEntry("a/b/c/d/", null));
    entries.add(newEntry("d/e/f", f2));
    DynamicZipInputStream inputStream = new DynamicZipInputStream(entries);
    bos.reset();
    FileCopyUtils.copy(inputStream, bos);
    byte[] actual = bos.toByteArray();

    assertThat(actual, is(equalTo(expected)));
}

From source file:com.linkedin.pinot.integration.tests.BaseClusterIntegrationTest.java

public static void pushAvroIntoKafka(List<File> avroFiles, String kafkaBroker, String kafkaTopic,
        final byte[] header) {
    Properties properties = new Properties();
    properties.put("metadata.broker.list", kafkaBroker);
    properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
    properties.put("request.required.acks", "1");

    ProducerConfig producerConfig = new ProducerConfig(properties);
    Producer<byte[], byte[]> producer = new Producer<byte[], byte[]>(producerConfig);
    for (File avroFile : avroFiles) {
        try {/*from w  w  w.  j  a v a 2 s . c om*/
            ByteArrayOutputStream outputStream = new ByteArrayOutputStream(65536);
            DataFileStream<GenericRecord> reader = AvroUtils.getAvroReader(avroFile);
            BinaryEncoder binaryEncoder = new EncoderFactory().directBinaryEncoder(outputStream, null);
            GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(
                    reader.getSchema());
            int recordCount = 0;
            List<KeyedMessage<byte[], byte[]>> messagesToWrite = new ArrayList<KeyedMessage<byte[], byte[]>>(
                    10000);
            int messagesInThisBatch = 0;
            for (GenericRecord genericRecord : reader) {
                outputStream.reset();
                if (header != null && 0 < header.length) {
                    outputStream.write(header);
                }
                datumWriter.write(genericRecord, binaryEncoder);
                binaryEncoder.flush();

                byte[] bytes = outputStream.toByteArray();
                KeyedMessage<byte[], byte[]> data = new KeyedMessage<byte[], byte[]>(kafkaTopic,
                        Longs.toByteArray(System.currentTimeMillis()), bytes);

                if (BATCH_KAFKA_MESSAGES) {
                    messagesToWrite.add(data);
                    messagesInThisBatch++;
                    if (MAX_MESSAGES_PER_BATCH <= messagesInThisBatch) {
                        LOGGER.debug("Sending a batch of {} records to Kafka", messagesInThisBatch);
                        messagesInThisBatch = 0;
                        producer.send(messagesToWrite);
                        messagesToWrite.clear();
                    }
                } else {
                    producer.send(data);
                }
                recordCount += 1;
            }

            if (BATCH_KAFKA_MESSAGES) {
                LOGGER.info("Sending last match of {} records to Kafka", messagesToWrite.size());
                producer.send(messagesToWrite);
            }

            outputStream.close();
            reader.close();
            LOGGER.info("Finished writing " + recordCount + " records from " + avroFile.getName()
                    + " into Kafka topic " + kafkaTopic + " from file " + avroFile.getName());
            int totalRecordCount = totalAvroRecordWrittenCount.addAndGet(recordCount);
            LOGGER.info("Total records written so far " + totalRecordCount);
        } catch (Exception e) {
            e.printStackTrace();
            throw new RuntimeException(e);
        }
    }
}

From source file:org.apache.nifi.processors.standard.TestPostHTTP.java

@Test
public void testSendAsFlowFileSecure() throws Exception {
    final Map<String, String> sslProps = new HashMap<>();
    sslProps.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks");
    sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest");
    sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS");
    sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks");
    sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest");
    sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS");
    sslProps.put(TestServer.NEED_CLIENT_AUTH, "true");
    setup(sslProps);/*from ww w  .  j a va  2  s. com*/

    final SSLContextService sslContextService = new StandardSSLContextService();
    runner.addControllerService("ssl-context", sslContextService);
    runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE,
            "src/test/resources/localhost-ts.jks");
    runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest");
    runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS");
    runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE,
            "src/test/resources/localhost-ks.jks");
    runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest");
    runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS");
    runner.enableControllerService(sslContextService);

    runner.setProperty(PostHTTP.URL, server.getSecureUrl());
    runner.setProperty(PostHTTP.SEND_AS_FLOWFILE, "true");
    runner.setProperty(PostHTTP.SSL_CONTEXT_SERVICE, "ssl-context");

    final Map<String, String> attrs = new HashMap<>();
    attrs.put("abc", "cba");

    runner.enqueue("Hello".getBytes(), attrs);
    attrs.put("abc", "abc");
    attrs.put("filename", "xyz.txt");
    runner.enqueue("World".getBytes(), attrs);

    runner.run(1);
    runner.assertAllFlowFilesTransferred(PostHTTP.REL_SUCCESS);

    final byte[] lastPost = servlet.getLastPost();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    final ByteArrayInputStream bais = new ByteArrayInputStream(lastPost);

    FlowFileUnpackagerV3 unpacker = new FlowFileUnpackagerV3();

    // unpack first flowfile received
    Map<String, String> receivedAttrs = unpacker.unpackageFlowFile(bais, baos);
    byte[] contentReceived = baos.toByteArray();
    assertEquals("Hello", new String(contentReceived));
    assertEquals("cba", receivedAttrs.get("abc"));

    assertTrue(unpacker.hasMoreData());

    baos.reset();
    receivedAttrs = unpacker.unpackageFlowFile(bais, baos);
    contentReceived = baos.toByteArray();

    assertEquals("World", new String(contentReceived));
    assertEquals("abc", receivedAttrs.get("abc"));
    assertEquals("xyz.txt", receivedAttrs.get("filename"));
}

From source file:gov.nih.nci.caarray.domain.MultiPartBlob.java

/**
 * Method that takes an input stream and breaks it up in to multiple blobs. Note that this method loads each chunk
 * in to a byte[], while this is not ideal, this will be done by the mysql driver anyway, so we are not adding a new
 * inefficiency.//from   ww  w  . j  a v  a  2 s.c  om
 * 
 * @param data the input stream to store.
 * @param compress true to compress the data, false to leave it uncompressed
 * @param blobPartSize the maximum size of a single blob
 * @throws IOException on error reading from the stream.
 */
public void writeData(InputStream data, boolean compress, int blobPartSize) throws IOException {
    final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
    OutputStream writeStream;
    if (compress) {
        writeStream = new GZIPOutputStream(byteStream);
    } else {
        writeStream = byteStream;
    }
    byte[] unwritten = new byte[0];
    final byte[] uncompressed = new byte[blobPartSize];
    int len = 0;
    while ((len = data.read(uncompressed)) > 0) {
        uncompressedSize += len;
        writeStream.write(uncompressed, 0, len);
        if (byteStream.size() + unwritten.length >= blobPartSize) {
            compressedSize += byteStream.size();
            unwritten = writeData(ArrayUtils.addAll(unwritten, byteStream.toByteArray()), blobPartSize, false);
            byteStream.reset();
        }
    }
    IOUtils.closeQuietly(writeStream);
    compressedSize += byteStream.size();
    writeData(ArrayUtils.addAll(unwritten, byteStream.toByteArray()), blobPartSize, true);
}