Example usage for java.io EOFException getMessage

List of usage examples for java.io EOFException getMessage

Introduction

In this page you can find the example usage for java.io EOFException getMessage.

Prototype

public String getMessage() 

Source Link

Document

Returns the detail message string of this throwable.

Usage

From source file:org.jared.synodroid.ds.server.SimpleSynoServer.java

/**
 * Send a request to the server./*w w w.  j  av  a2  s . c  om*/
 * 
 * @param uriP
 *            The part of the URI ie: /webman/doit.cgi
 * @param requestP
 *            The query in the form 'param1=foo&param2=yes'
 * @param methodP
 *            The method to send this request
 * @return A JSONObject containing the response of the server
 * @throws DSMException
 */
public JSONObject sendJSONRequest(String uriP, String requestP, String methodP, boolean log, int MAX_RETRY)
        throws Exception {
    HttpURLConnection con = null;
    OutputStreamWriter wr = null;
    BufferedReader br = null;
    StringBuffer sb = null;
    Exception last_exception = null;
    try {

        // For some reason in Gingerbread I often get a response code of -1.
        // Here I retry for a maximum of MAX_RETRY to send the request and it usually succeed at the second try...
        int retry = 0;
        while (retry <= MAX_RETRY) {
            try {
                // Create the connection
                con = createConnection(uriP, requestP, methodP, log);
                // Add the parameters
                wr = new OutputStreamWriter(con.getOutputStream());
                wr.write(requestP);
                // Send the request
                wr.flush();
                wr.close();

                // Try to retrieve the session cookie
                String newCookie = con.getHeaderField("set-cookie");
                if (newCookie != null) {
                    synchronized (this) {
                        setCookie(newCookie);
                    }
                    if (DEBUG)
                        Log.v(Synodroid.DS_TAG, "Retreived cookies: " + cookies);
                }

                // Now read the reponse and build a string with it
                br = new BufferedReader(new InputStreamReader(con.getInputStream()));
                sb = new StringBuffer();
                String line;
                while ((line = br.readLine()) != null) {
                    sb.append(line);
                }
                br.close();
                // Verify is response if not -1, otherwise take reason from the header
                if (con.getResponseCode() == -1) {
                    retry++;
                    last_exception = null;
                    if (DEBUG)
                        Log.w(Synodroid.DS_TAG, "Response code is -1 (retry: " + retry + ")");
                } else {
                    if (DEBUG)
                        Log.d(Synodroid.DS_TAG, "Response is: " + sb.toString());
                    JSONObject respJSO = null;
                    try {
                        respJSO = new JSONObject(sb.toString());
                    } catch (JSONException je) {
                        respJSO = new JSONObject();
                    }
                    return respJSO;
                }
            } catch (EOFException e) {
                if (DEBUG)
                    Log.w(Synodroid.DS_TAG, "Caught EOFException while contacting the server, retying...");
                retry++;
                last_exception = e;
            } catch (SocketException e) {
                if (DEBUG)
                    Log.e(Synodroid.DS_TAG, "Caught SocketException while contacting the server, stopping...");
                throw e;
            } catch (SSLHandshakeException e) {
                if (DEBUG)
                    Log.e(Synodroid.DS_TAG,
                            "Caught SSLHandshakeException while contacting the server, stopping...");
                throw e;
            } catch (FileNotFoundException e) {
                String msg = e.getMessage();
                if (DEBUG)
                    Log.e(Synodroid.DS_TAG,
                            "Could not find file " + msg + "\nProbably wrong DSM version, stopping...");
                throw e;
            } catch (Exception e) {
                if (DEBUG)
                    Log.e(Synodroid.DS_TAG, "Caught exception while contacting the server, retying...", e);
                retry++;
                last_exception = e;
            } finally {
                con.disconnect();
            }

        }
        if (last_exception != null)
            throw last_exception;
        throw new GenericException();
    }
    // Finally close everything
    finally {
        wr = null;
        br = null;
        sb = null;
        con = null;
    }
}

From source file:org.apache.shindig.gadgets.http.BasicHttpFetcher.java

/**
 * This method is Safe replica version of org.apache.http.util.EntityUtils.toByteArray.
 * The try block embedding 'instream.read' has a corresponding catch block for 'EOFException'
 * (that's Ignored) and all other IOExceptions are let pass.
 *
 * @param entity//from  ww w .  j a v a2 s.c  o  m
 * @return byte array containing the entity content. May be empty/null.
 * @throws IOException if an error occurs reading the input stream
 */
public byte[] toByteArraySafe(final HttpEntity entity) throws IOException {
    if (entity == null) {
        return null;
    }

    InputStream instream = entity.getContent();
    if (instream == null) {
        return new byte[] {};
    }
    Preconditions.checkArgument(entity.getContentLength() < Integer.MAX_VALUE,
            "HTTP entity too large to be buffered in memory");

    // The raw data stream (inside JDK) is read in a buffer of size '512'. The original code
    // org.apache.http.util.EntityUtils.toByteArray reads the unzipped data in a buffer of
    // 4096 byte. For any data stream that has a compression ratio lesser than 1/8, this may
    // result in the buffer/array overflow. Increasing the buffer size to '16384'. It's highly
    // unlikely to get data compression ratios lesser than 1/32 (3%).
    final int bufferLength = 16384;
    int i = (int) entity.getContentLength();
    if (i < 0) {
        i = bufferLength;
    }
    ByteArrayBuffer buffer = new ByteArrayBuffer(i);
    try {
        byte[] tmp = new byte[bufferLength];
        int l;
        while ((l = instream.read(tmp)) != -1) {
            buffer.append(tmp, 0, l);
        }
    } catch (EOFException eofe) {
        /**
         * Ref: http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4040920
         * Due to a bug in JDK ZLIB (InflaterInputStream), unexpected EOF error can occur.
         * In such cases, even if the input stream is finished reading, the
         * 'Inflater.finished()' call erroneously returns 'false' and
         * 'java.util.zip.InflaterInputStream.fill' throws the 'EOFException'.
         * So for such case, ignore the Exception in case Exception Cause is
         * 'Unexpected end of ZLIB input stream'.
         *
         * Also, ignore this exception in case the exception has no message
         * body as this is the case where {@link GZIPInputStream#readUByte}
         * throws EOFException with empty message. A bug has been filed with Sun
         * and will be mentioned here once it is accepted.
         */
        if (instream.available() == 0 && (eofe.getMessage() == null
                || eofe.getMessage().equals("Unexpected end of ZLIB input stream"))) {
            LOG.log(Level.FINE, "EOFException: ", eofe);
        } else {
            throw eofe;
        }
    } finally {
        instream.close();
    }
    return buffer.toByteArray();
}

From source file:eu.stratosphere.pact.runtime.hash.CompactingHashTable.java

public final void insert(T record) throws IOException {
    final int hashCode = hash(this.buildSideComparator.hash(record));
    final int posHashCode = hashCode % this.numBuckets;

    // get the bucket for the given hash code
    final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
    final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    final MemorySegment bucket = this.buckets[bucketArrayPos];

    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
    final InMemoryPartition<T> p = this.partitions.get(partitionNumber);

    long pointer;
    try {/*www . j  a  v a 2  s. com*/
        pointer = p.appendRecord(record);
        if ((pointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
            this.compactionMemory.allocateSegments((int) (pointer >> this.pageSizeInBits));
        }
    } catch (EOFException e) {
        try {
            compactPartition(partitionNumber);
            // retry append
            pointer = this.partitions.get(partitionNumber).appendRecord(record);
        } catch (EOFException ex) {
            throw new RuntimeException(
                    "Memory ran out. Compaction failed. numPartitions: " + this.partitions.size()
                            + " minPartition: " + getMinPartition() + " maxPartition: " + getMaxPartition()
                            + " bucketSize: " + this.buckets.length + " Message: " + ex.getMessage());
        } catch (IndexOutOfBoundsException ex) {
            throw new RuntimeException(
                    "Memory ran out. Compaction failed. numPartitions: " + this.partitions.size()
                            + " minPartition: " + getMinPartition() + " maxPartition: " + getMaxPartition()
                            + " bucketSize: " + this.buckets.length + " Message: " + ex.getMessage());
        }
    } catch (IndexOutOfBoundsException e1) {
        try {
            compactPartition(partitionNumber);
            // retry append
            pointer = this.partitions.get(partitionNumber).appendRecord(record);
        } catch (EOFException ex) {
            throw new RuntimeException(
                    "Memory ran out. Compaction failed. numPartitions: " + this.partitions.size()
                            + " minPartition: " + getMinPartition() + " maxPartition: " + getMaxPartition()
                            + " bucketSize: " + this.buckets.length + " Message: " + ex.getMessage());
        } catch (IndexOutOfBoundsException ex) {
            throw new RuntimeException(
                    "Memory ran out. Compaction failed. numPartitions: " + this.partitions.size()
                            + " minPartition: " + getMinPartition() + " maxPartition: " + getMaxPartition()
                            + " bucketSize: " + this.buckets.length + " Message: " + ex.getMessage());
        }
    }
    insertBucketEntryFromStart(p, bucket, bucketInSegmentPos, hashCode, pointer);
}

From source file:org.apache.flink.runtime.operators.hash.CompactingHashTable.java

public final void insert(T record) throws IOException {
    if (this.closed.get()) {
        return;//from   ww w  .  j av a 2  s  .c om
    }
    final int hashCode = hash(this.buildSideComparator.hash(record));
    final int posHashCode = hashCode % this.numBuckets;

    // get the bucket for the given hash code
    final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
    final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    final MemorySegment bucket = this.buckets[bucketArrayPos];

    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
    InMemoryPartition<T> partition = this.partitions.get(partitionNumber);

    long pointer;
    try {
        pointer = partition.appendRecord(record);
        if ((pointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
            this.compactionMemory.allocateSegments((int) (pointer >> this.pageSizeInBits));
        }
    } catch (EOFException e) {
        try {
            compactPartition(partitionNumber);
            // retry append
            partition = this.partitions.get(partitionNumber); // compaction invalidates reference
            pointer = partition.appendRecord(record);
        } catch (EOFException ex) {
            throw new RuntimeException("Memory ran out. Compaction failed. " + getMemoryConsumptionString()
                    + " Message: " + ex.getMessage());
        } catch (IndexOutOfBoundsException ex) {
            throw new RuntimeException("Memory ran out. Compaction failed. " + getMemoryConsumptionString()
                    + " Message: " + ex.getMessage());
        }
    } catch (IndexOutOfBoundsException e1) {
        try {
            compactPartition(partitionNumber);
            // retry append
            partition = this.partitions.get(partitionNumber); // compaction invalidates reference
            pointer = partition.appendRecord(record);
        } catch (EOFException ex) {
            throw new RuntimeException("Memory ran out. Compaction failed. " + getMemoryConsumptionString()
                    + " Message: " + ex.getMessage());
        } catch (IndexOutOfBoundsException ex) {
            throw new RuntimeException("Memory ran out. Compaction failed. " + getMemoryConsumptionString()
                    + " Message: " + ex.getMessage());
        }
    }
    insertBucketEntryFromStart(partition, bucket, bucketInSegmentPos, hashCode, pointer);
}

From source file:eu.stratosphere.pact.runtime.hash.CompactingHashTable.java

/**
 * Replaces record in hash table if record already present or append record if not.
 * May trigger expensive compaction.//from   w  w w .j a v  a 2  s.c om
 * 
 * @param record record to insert or replace
 * @param tempHolder instance of T that will be overwritten
 * @throws IOException
 */
public void insertOrReplaceRecord(T record, T tempHolder) throws IOException {
    final int searchHashCode = hash(this.buildSideComparator.hash(record));
    final int posHashCode = searchHashCode % this.numBuckets;

    // get the bucket for the given hash code
    MemorySegment originalBucket = this.buckets[posHashCode >> this.bucketsPerSegmentBits];
    int originalBucketOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    MemorySegment bucket = originalBucket;
    int bucketInSegmentOffset = originalBucketOffset;

    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
    final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
    final MemorySegment[] overflowSegments = partition.overflowSegments;

    this.buildSideComparator.setReference(record);

    int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
    int numInSegment = 0;
    int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

    long currentForwardPointer = BUCKET_FORWARD_POINTER_NOT_SET;

    // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
    while (true) {

        while (numInSegment < countInSegment) {

            final int thisCode = bucket.getInt(posInSegment);
            posInSegment += HASH_CODE_LEN;

            // check if the hash code matches
            if (thisCode == searchHashCode) {
                // get the pointer to the pair
                final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET
                        + (numInSegment * POINTER_LEN);
                final long pointer = bucket.getLong(pointerOffset);
                numInSegment++;

                // deserialize the key to check whether it is really equal, or whether we had only a hash collision
                try {
                    partition.readRecordAt(pointer, tempHolder);
                    if (this.buildSideComparator.equalToReference(tempHolder)) {
                        long newPointer = partition.appendRecord(record);
                        bucket.putLong(pointerOffset, newPointer);
                        partition.setCompaction(false);
                        if ((newPointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
                            this.compactionMemory.allocateSegments((int) (newPointer >> this.pageSizeInBits));
                        }
                        return;
                    }
                } catch (EOFException e) {
                    // system is out of memory so we attempt to reclaim memory with a copy compact run
                    long newPointer;
                    try {
                        compactPartition(partition.getPartitionNumber());
                        // retry append
                        newPointer = this.partitions.get(partitionNumber).appendRecord(record);
                    } catch (EOFException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. numPartitions: "
                                + this.partitions.size() + " minPartition: " + getMinPartition()
                                + " maxPartition: " + getMaxPartition() + " bucketSize: " + this.buckets.length
                                + " Message: " + ex.getMessage());
                    } catch (IndexOutOfBoundsException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. numPartitions: "
                                + this.partitions.size() + " minPartition: " + getMinPartition()
                                + " maxPartition: " + getMaxPartition() + " bucketSize: " + this.buckets.length
                                + " Message: " + ex.getMessage());
                    }
                    bucket.putLong(pointerOffset, newPointer);
                    return;
                } catch (IndexOutOfBoundsException e) {
                    // system is out of memory so we attempt to reclaim memory with a copy compact run
                    long newPointer;
                    try {
                        compactPartition(partition.getPartitionNumber());
                        // retry append
                        newPointer = this.partitions.get(partitionNumber).appendRecord(record);
                    } catch (EOFException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. numPartitions: "
                                + this.partitions.size() + " minPartition: " + getMinPartition()
                                + " maxPartition: " + getMaxPartition() + " bucketSize: " + this.buckets.length
                                + " Message: " + ex.getMessage());
                    } catch (IndexOutOfBoundsException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. numPartitions: "
                                + this.partitions.size() + " minPartition: " + getMinPartition()
                                + " maxPartition: " + getMaxPartition() + " bucketSize: " + this.buckets.length
                                + " Message: " + ex.getMessage());
                    }
                    bucket.putLong(pointerOffset, newPointer);
                    return;
                } catch (IOException e) {
                    throw new RuntimeException(
                            "Error deserializing record from the hashtable: " + e.getMessage(), e);
                }
            } else {
                numInSegment++;
            }
        }

        // this segment is done. check if there is another chained bucket
        long newForwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
        if (newForwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
            // nothing found. append and insert
            long pointer = partition.appendRecord(record);
            insertBucketEntryFromSearch(partition, originalBucket, bucket, originalBucketOffset,
                    bucketInSegmentOffset, countInSegment, currentForwardPointer, searchHashCode, pointer);
            if ((pointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
                this.compactionMemory.allocateSegments((int) (pointer >> this.pageSizeInBits));
            }
            return;
        }

        final int overflowSegNum = (int) (newForwardPointer >>> 32);
        bucket = overflowSegments[overflowSegNum];
        bucketInSegmentOffset = (int) (newForwardPointer & 0xffffffff);
        countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
        posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
        numInSegment = 0;
        currentForwardPointer = newForwardPointer;
    }
}

From source file:org.apache.flink.runtime.operators.hash.CompactingHashTable.java

/**
 * Replaces record in hash table if record already present or append record if not.
 * May trigger expensive compaction.//www  . jav a  2  s  . co  m
 * 
 * @param record record to insert or replace
 * @param tempHolder instance of T that will be overwritten
 * @throws IOException
 */
public void insertOrReplaceRecord(T record, T tempHolder) throws IOException {
    if (this.closed.get()) {
        return;
    }

    final int searchHashCode = hash(this.buildSideComparator.hash(record));
    final int posHashCode = searchHashCode % this.numBuckets;

    // get the bucket for the given hash code
    MemorySegment originalBucket = this.buckets[posHashCode >> this.bucketsPerSegmentBits];
    int originalBucketOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    MemorySegment bucket = originalBucket;
    int bucketInSegmentOffset = originalBucketOffset;

    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
    InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
    final MemorySegment[] overflowSegments = partition.overflowSegments;

    this.buildSideComparator.setReference(record);

    int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
    int numInSegment = 0;
    int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

    long currentForwardPointer = BUCKET_FORWARD_POINTER_NOT_SET;

    // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
    while (true) {

        while (numInSegment < countInSegment) {

            final int thisCode = bucket.getInt(posInSegment);
            posInSegment += HASH_CODE_LEN;

            // check if the hash code matches
            if (thisCode == searchHashCode) {
                // get the pointer to the pair
                final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET
                        + (numInSegment * POINTER_LEN);
                final long pointer = bucket.getLong(pointerOffset);
                numInSegment++;

                // deserialize the key to check whether it is really equal, or whether we had only a hash collision
                try {
                    partition.readRecordAt(pointer, tempHolder);
                    if (this.buildSideComparator.equalToReference(tempHolder)) {
                        long newPointer = partition.appendRecord(record);
                        bucket.putLong(pointerOffset, newPointer);
                        partition.setCompaction(false);
                        if ((newPointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
                            this.compactionMemory.allocateSegments((int) (newPointer >> this.pageSizeInBits));
                        }
                        return;
                    }
                } catch (EOFException e) {
                    // system is out of memory so we attempt to reclaim memory with a copy compact run
                    long newPointer;
                    try {
                        compactPartition(partition.getPartitionNumber());
                        // retry append
                        partition = this.partitions.get(partitionNumber); // compaction invalidates reference
                        newPointer = partition.appendRecord(record);
                    } catch (EOFException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. "
                                + getMemoryConsumptionString() + " Message: " + ex.getMessage());
                    } catch (IndexOutOfBoundsException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. "
                                + getMemoryConsumptionString() + " Message: " + ex.getMessage());
                    }
                    bucket.putLong(pointerOffset, newPointer);
                    return;
                } catch (IndexOutOfBoundsException e) {
                    // system is out of memory so we attempt to reclaim memory with a copy compact run
                    long newPointer;
                    try {
                        compactPartition(partition.getPartitionNumber());
                        // retry append
                        partition = this.partitions.get(partitionNumber); // compaction invalidates reference
                        newPointer = partition.appendRecord(record);
                    } catch (EOFException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. "
                                + getMemoryConsumptionString() + " Message: " + ex.getMessage());
                    } catch (IndexOutOfBoundsException ex) {
                        throw new RuntimeException("Memory ran out. Compaction failed. "
                                + getMemoryConsumptionString() + " Message: " + ex.getMessage());
                    }
                    bucket.putLong(pointerOffset, newPointer);
                    return;
                } catch (IOException e) {
                    throw new RuntimeException(
                            "Error deserializing record from the hashtable: " + e.getMessage(), e);
                }
            } else {
                numInSegment++;
            }
        }

        // this segment is done. check if there is another chained bucket
        long newForwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
        if (newForwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
            // nothing found. append and insert
            long pointer = partition.appendRecord(record);
            //insertBucketEntryFromStart(partition, originalBucket, originalBucketOffset, searchHashCode, pointer);
            insertBucketEntryFromSearch(partition, originalBucket, bucket, originalBucketOffset,
                    bucketInSegmentOffset, countInSegment, currentForwardPointer, searchHashCode, pointer);
            if ((pointer >> this.pageSizeInBits) > this.compactionMemory.getBlockCount()) {
                this.compactionMemory.allocateSegments((int) (pointer >> this.pageSizeInBits));
            }
            return;
        }

        final int overflowSegNum = (int) (newForwardPointer >>> 32);
        bucket = overflowSegments[overflowSegNum];
        bucketInSegmentOffset = (int) (newForwardPointer & 0xffffffff);
        countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
        posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
        numInSegment = 0;
        currentForwardPointer = newForwardPointer;
    }
}

From source file:com.android.tools.idea.sdk.remote.internal.archives.ArchiveInstaller.java

/**
 * Unzips a zip file into the given destination directory.
 * <p/>/*ww  w.  j a v a2 s .co  m*/
 * The archive file MUST have a unique "root" folder.
 * This root folder is skipped when unarchiving.
 */
@SuppressWarnings("unchecked")
@VisibleForTesting(visibility = Visibility.PRIVATE)
protected boolean unzipFolder(ArchiveReplacement archiveInfo, File archiveFile, File unzipDestFolder,
        ITaskMonitor monitor) {

    Archive newArchive = archiveInfo.getNewArchive();
    RemotePkgInfo pkg = newArchive.getParentPackage();
    String pkgName = pkg.getShortDescription();
    long compressedSize = newArchive.getSize();

    ZipFile zipFile = null;
    try {
        zipFile = new ZipFile(archiveFile);

        // To advance the percent and the progress bar, we don't know the number of
        // items left to unzip. However we know the size of the archive and the size of
        // each uncompressed item. The zip file format overhead is negligible so that's
        // a good approximation.
        long incStep = compressedSize / NUM_MONITOR_INC;
        long incTotal = 0;
        long incCurr = 0;
        int lastPercent = 0;

        byte[] buf = new byte[65536];

        Enumeration<ZipArchiveEntry> entries = zipFile.getEntries();
        while (entries.hasMoreElements()) {
            ZipArchiveEntry entry = entries.nextElement();

            String name = entry.getName();

            // ZipFile entries should have forward slashes, but not all Zip
            // implementations can be expected to do that.
            name = name.replace('\\', '/');

            // Zip entries are always packages in a top-level directory (e.g. docs/index.html).
            int pos = name.indexOf('/');
            if (pos == -1) {
                // All zip entries should have a root folder.
                // This zip entry seems located at the root of the zip.
                // Rather than ignore the file, just place it at the root.
            } else if (pos == name.length() - 1) {
                // This is a zip *directory* entry in the form dir/, so essentially
                // it's the root directory of the SDK. It's safe to ignore that one
                // since we want to use our own root directory and we'll recreate
                // root directories as needed.
                // A direct consequence is that if a malformed archive has multiple
                // root directories, their content will all be merged together.
                continue;
            } else {
                // This is the expected behavior: the zip entry is in the form root/file
                // or root/dir/. We want to use our top-level directory so we drop the
                // first segment of the path name.
                name = name.substring(pos + 1);
            }

            File destFile = new File(unzipDestFolder, name);

            if (name.endsWith("/")) { //$NON-NLS-1$
                // Create directory if it doesn't exist yet. This allows us to create
                // empty directories.
                if (!mFileOp.isDirectory(destFile) && !mFileOp.mkdirs(destFile)) {
                    monitor.logError("Failed to create directory %1$s", destFile.getPath());
                    return false;
                }
                continue;
            } else if (name.indexOf('/') != -1) {
                // Otherwise it's a file in a sub-directory.

                // Sanity check: since we're always unzipping in a fresh temp folder
                // the destination file shouldn't already exist.
                if (mFileOp.exists(destFile)) {
                    monitor.logVerbose("Duplicate file found:  %1$s", name);
                }

                // Make sure the parent directory has been created.
                File parentDir = destFile.getParentFile();
                if (!mFileOp.isDirectory(parentDir)) {
                    if (!mFileOp.mkdirs(parentDir)) {
                        monitor.logError("Failed to create directory %1$s", parentDir.getPath());
                        return false;
                    }
                }
            }

            FileOutputStream fos = null;
            long remains = entry.getSize();
            try {
                fos = new FileOutputStream(destFile);

                // Java bug 4040920: do not rely on the input stream EOF and don't
                // try to read more than the entry's size.
                InputStream entryContent = zipFile.getInputStream(entry);
                int n;
                while (remains > 0
                        && (n = entryContent.read(buf, 0, (int) Math.min(remains, buf.length))) != -1) {
                    remains -= n;
                    if (n > 0) {
                        fos.write(buf, 0, n);
                    }
                }
            } catch (EOFException e) {
                monitor.logError("Error uncompressing file %s. Size: %d bytes, Unwritten: %d bytes.",
                        entry.getName(), entry.getSize(), remains);
                throw e;
            } finally {
                if (fos != null) {
                    fos.close();
                }
            }

            pkg.postUnzipFileHook(newArchive, monitor, mFileOp, destFile, entry);

            // Increment progress bar to match. We update only between files.
            for (incTotal += entry.getCompressedSize(); incCurr < incTotal; incCurr += incStep) {
                monitor.incProgress(1);
            }

            int percent = (int) (100 * incTotal / compressedSize);
            if (percent != lastPercent) {
                monitor.setDescription("Unzipping %1$s (%2$d%%)", pkgName, percent);
                lastPercent = percent;
            }

            if (monitor.isCancelRequested()) {
                return false;
            }
        }

        return true;

    } catch (IOException e) {
        monitor.logError("Unzip failed: %1$s", e.getMessage());

    } finally {
        if (zipFile != null) {
            try {
                zipFile.close();
            } catch (IOException e) {
                // pass
            }
        }
    }

    return false;
}

From source file:com.android.sdklib.internal.repository.archives.ArchiveInstaller.java

/**
 * Unzips a zip file into the given destination directory.
 *
 * The archive file MUST have a unique "root" folder.
 * This root folder is skipped when unarchiving.
 */// www .  j av  a 2  s  . c o m
@SuppressWarnings("unchecked")
@VisibleForTesting(visibility = Visibility.PRIVATE)
protected boolean unzipFolder(ArchiveReplacement archiveInfo, File archiveFile, File unzipDestFolder,
        ITaskMonitor monitor) {

    Archive newArchive = archiveInfo.getNewArchive();
    Package pkg = newArchive.getParentPackage();
    String pkgName = pkg.getShortDescription();
    long compressedSize = newArchive.getSize();

    ZipFile zipFile = null;
    try {
        zipFile = new ZipFile(archiveFile);

        // To advance the percent and the progress bar, we don't know the number of
        // items left to unzip. However we know the size of the archive and the size of
        // each uncompressed item. The zip file format overhead is negligible so that's
        // a good approximation.
        long incStep = compressedSize / NUM_MONITOR_INC;
        long incTotal = 0;
        long incCurr = 0;
        int lastPercent = 0;

        byte[] buf = new byte[65536];

        Enumeration<ZipArchiveEntry> entries = zipFile.getEntries();
        while (entries.hasMoreElements()) {
            ZipArchiveEntry entry = entries.nextElement();

            String name = entry.getName();

            // ZipFile entries should have forward slashes, but not all Zip
            // implementations can be expected to do that.
            name = name.replace('\\', '/');

            // Zip entries are always packages in a top-level directory (e.g. docs/index.html).
            int pos = name.indexOf('/');
            if (pos == -1) {
                // All zip entries should have a root folder.
                // This zip entry seems located at the root of the zip.
                // Rather than ignore the file, just place it at the root.
            } else if (pos == name.length() - 1) {
                // This is a zip *directory* entry in the form dir/, so essentially
                // it's the root directory of the SDK. It's safe to ignore that one
                // since we want to use our own root directory and we'll recreate
                // root directories as needed.
                // A direct consequence is that if a malformed archive has multiple
                // root directories, their content will all be merged together.
                continue;
            } else {
                // This is the expected behavior: the zip entry is in the form root/file
                // or root/dir/. We want to use our top-level directory so we drop the
                // first segment of the path name.
                name = name.substring(pos + 1);
            }

            File destFile = new File(unzipDestFolder, name);

            if (name.endsWith("/")) { //$NON-NLS-1$
                // Create directory if it doesn't exist yet. This allows us to create
                // empty directories.
                if (!mFileOp.isDirectory(destFile) && !mFileOp.mkdirs(destFile)) {
                    monitor.logError("Failed to create directory %1$s", destFile.getPath());
                    return false;
                }
                continue;
            } else if (name.indexOf('/') != -1) {
                // Otherwise it's a file in a sub-directory.

                // Sanity check: since we're always unzipping in a fresh temp folder
                // the destination file shouldn't already exist.
                if (mFileOp.exists(destFile)) {
                    monitor.logVerbose("Duplicate file found:  %1$s", name);
                }

                // Make sure the parent directory has been created.
                File parentDir = destFile.getParentFile();
                if (!mFileOp.isDirectory(parentDir)) {
                    if (!mFileOp.mkdirs(parentDir)) {
                        monitor.logError("Failed to create directory %1$s", parentDir.getPath());
                        return false;
                    }
                }
            }

            FileOutputStream fos = null;
            long remains = entry.getSize();
            try {
                fos = new FileOutputStream(destFile);

                // Java bug 4040920: do not rely on the input stream EOF and don't
                // try to read more than the entry's size.
                InputStream entryContent = zipFile.getInputStream(entry);
                int n;
                while (remains > 0
                        && (n = entryContent.read(buf, 0, (int) Math.min(remains, buf.length))) != -1) {
                    remains -= n;
                    if (n > 0) {
                        fos.write(buf, 0, n);
                    }
                }
            } catch (EOFException e) {
                monitor.logError("Error uncompressing file %s. Size: %d bytes, Unwritten: %d bytes.",
                        entry.getName(), entry.getSize(), remains);
                throw e;
            } finally {
                if (fos != null) {
                    fos.close();
                }
            }

            pkg.postUnzipFileHook(newArchive, monitor, mFileOp, destFile, entry);

            // Increment progress bar to match. We update only between files.
            for (incTotal += entry.getCompressedSize(); incCurr < incTotal; incCurr += incStep) {
                monitor.incProgress(1);
            }

            int percent = (int) (100 * incTotal / compressedSize);
            if (percent != lastPercent) {
                monitor.setDescription("Unzipping %1$s (%2$d%%)", pkgName, percent);
                lastPercent = percent;
            }

            if (monitor.isCancelRequested()) {
                return false;
            }
        }

        return true;

    } catch (IOException e) {
        monitor.logError("Unzip failed: %1$s", e.getMessage());

    } finally {
        if (zipFile != null) {
            try {
                zipFile.close();
            } catch (IOException e) {
                // pass
            }
        }
    }

    return false;
}

From source file:com.cloud.server.ConfigurationServerImpl.java

@Override
@DB/*from  w ww.j  a va  2  s  .  co m*/
public void updateKeyPairs() {
    // Grab the SSH key pair and insert it into the database, if it is not present

    String username = System.getProperty("user.name");
    Boolean devel = Boolean.valueOf(_configDao.getValue("developer"));
    if (!username.equalsIgnoreCase("cloud") || !devel) {
        s_logger.warn(
                "Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode.");
        return;
    }
    String already = _configDao.getValue("ssh.privatekey");
    String homeDir = null;
    homeDir = Script.runSimpleBashScript("echo ~" + username);
    if (homeDir == null) {
        throw new CloudRuntimeException("Cannot get home directory for account: " + username);
    }

    if (s_logger.isInfoEnabled()) {
        s_logger.info("Processing updateKeyPairs");
    }

    if (homeDir != null && homeDir.startsWith("~")) {
        s_logger.error("No home directory was detected for the user '" + username
                + "'. Please check the profile of this user.");
        throw new CloudRuntimeException("No home directory was detected for the user '" + username
                + "'. Please check the profile of this user.");
    }

    File privkeyfile = new File(homeDir + "/.ssh/id_rsa");
    File pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub");

    if (already == null || already.isEmpty()) {
        if (s_logger.isInfoEnabled()) {
            s_logger.info("Systemvm keypairs not found in database. Need to store them in the database");
        }
        // FIXME: take a global database lock here for safety.
        Script.runSimpleBashScript("if [ -f " + privkeyfile + " ]; then rm -f " + privkeyfile
                + "; fi; ssh-keygen -t rsa -N '' -f " + privkeyfile + " -q");

        byte[] arr1 = new byte[4094]; // configuration table column value size
        try {
            new DataInputStream(new FileInputStream(privkeyfile)).readFully(arr1);
        } catch (EOFException e) {
        } catch (Exception e) {
            s_logger.error("Cannot read the private key file", e);
            throw new CloudRuntimeException("Cannot read the private key file");
        }
        String privateKey = new String(arr1).trim();
        byte[] arr2 = new byte[4094]; // configuration table column value size
        try {
            new DataInputStream(new FileInputStream(pubkeyfile)).readFully(arr2);
        } catch (EOFException e) {
        } catch (Exception e) {
            s_logger.warn("Cannot read the public key file", e);
            throw new CloudRuntimeException("Cannot read the public key file");
        }
        String publicKey = new String(arr2).trim();

        String insertSql1 = "INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) "
                + "VALUES ('Hidden','DEFAULT', 'management-server','ssh.privatekey', '"
                + DBEncryptionUtil.encrypt(privateKey) + "','Private key for the entire CloudStack')";
        String insertSql2 = "INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) "
                + "VALUES ('Hidden','DEFAULT', 'management-server','ssh.publickey', '"
                + DBEncryptionUtil.encrypt(publicKey) + "','Public key for the entire CloudStack')";

        Transaction txn = Transaction.currentTxn();
        try {
            PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1);
            stmt1.executeUpdate();
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Private key inserted into database");
            }
        } catch (SQLException ex) {
            s_logger.error("SQL of the private key failed", ex);
            throw new CloudRuntimeException("SQL of the private key failed");
        }

        try {
            PreparedStatement stmt2 = txn.prepareAutoCloseStatement(insertSql2);
            stmt2.executeUpdate();
            if (s_logger.isDebugEnabled()) {
                s_logger.debug("Public key inserted into database");
            }
        } catch (SQLException ex) {
            s_logger.error("SQL of the public key failed", ex);
            throw new CloudRuntimeException("SQL of the public key failed");
        }

    } else {
        s_logger.info("Keypairs already in database");
        if (username.equalsIgnoreCase("cloud")) {
            s_logger.info("Keypairs already in database, updating local copy");
            updateKeyPairsOnDisk(homeDir);
        } else {
            s_logger.info("Keypairs already in database, skip updating local copy (not running as cloud user)");
        }
    }
    s_logger.info("Going to update systemvm iso with generated keypairs if needed");
    try {
        injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath());
    } catch (CloudRuntimeException e) {
        if (!devel) {
            throw new CloudRuntimeException(e.getMessage());
        }
    }
}