Example usage for java.io DataInputStream readLong

List of usage examples for java.io DataInputStream readLong

Introduction

In this page you can find the example usage for java.io DataInputStream readLong.

Prototype

public final long readLong() throws IOException 

Source Link

Document

See the general contract of the readLong method of DataInput.

Usage

From source file:org.hyperic.hq.agent.db.DiskList.java

/**
 * A quick routine, which simply zips through the index file,
 * pulling out information about which records are free.
 *
 * We open up the file seperately here, so we can use the
 * buffered input stream, which makes our initial startup much
 * faster, if there is a lot of data sitting in the list.
 *//*from  www  .jav a2 s  .  c o  m*/
private void genFreeList(File idxFile) throws IOException {
    BufferedInputStream bIs;
    FileInputStream fIs = null;
    DataInputStream dIs;

    this.firstRec = -1;
    this.lastRec = -1;

    // TreeSet is used here to ensure a natural ordering of
    // the elements.
    this.freeList = new TreeSet();

    try {
        fIs = new FileInputStream(idxFile);

        bIs = new BufferedInputStream(fIs);
        dIs = new DataInputStream(bIs);

        for (long idx = 0;; idx++) {
            boolean used;
            long prev, next;

            try {
                used = dIs.readBoolean();
            } catch (EOFException exc) {
                break;
            }

            prev = dIs.readLong();
            next = dIs.readLong();

            if (used == false) {
                this.freeList.add(new Long(idx));
            } else {
                if (prev == -1) {
                    this.firstRec = idx;
                }

                if (next == -1) {
                    this.lastRec = idx;
                }
            }
        }
    } catch (FileNotFoundException exc) {
        return;
    } finally {
        try {
            if (fIs != null) {
                fIs.close();
            }
        } catch (IOException exc) {
        }
    }
}

From source file:bobs.is.compress.sevenzip.SevenZFile.java

private StartHeader readStartHeader(final long startHeaderCrc) throws IOException {
    final StartHeader startHeader = new StartHeader();
    DataInputStream dataInputStream = null;
    try {//from   ww w.  j  av  a2s.  com
        dataInputStream = new DataInputStream(new CRC32VerifyingInputStream(
                new BoundedRandomAccessFileInputStream(file, 20), 20, startHeaderCrc));
        startHeader.nextHeaderOffset = Long.reverseBytes(dataInputStream.readLong());
        startHeader.nextHeaderSize = Long.reverseBytes(dataInputStream.readLong());
        startHeader.nextHeaderCrc = 0xffffFFFFL & Integer.reverseBytes(dataInputStream.readInt());
        return startHeader;
    } finally {
        if (dataInputStream != null) {
            dataInputStream.close();
        }
    }
}

From source file:org.apache.giraph.graph.BspServiceWorker.java

@Override
public void loadCheckpoint(long superstep) {
    // Algorithm:
    // Examine all the partition owners and load the ones
    // that match my hostname and id from the master designated checkpoint
    // prefixes./*w w  w . ja v  a2s.  c o m*/
    long startPos = 0;
    int loadedPartitions = 0;
    for (PartitionOwner partitionOwner : workerGraphPartitioner.getPartitionOwners()) {
        if (partitionOwner.getWorkerInfo().equals(getWorkerInfo())) {
            String metadataFile = partitionOwner.getCheckpointFilesPrefix() + CHECKPOINT_METADATA_POSTFIX;
            String partitionsFile = partitionOwner.getCheckpointFilesPrefix() + CHECKPOINT_VERTICES_POSTFIX;
            try {
                int partitionId = -1;
                DataInputStream metadataStream = getFs().open(new Path(metadataFile));
                int partitions = metadataStream.readInt();
                for (int i = 0; i < partitions; ++i) {
                    startPos = metadataStream.readLong();
                    partitionId = metadataStream.readInt();
                    if (partitionId == partitionOwner.getPartitionId()) {
                        break;
                    }
                }
                if (partitionId != partitionOwner.getPartitionId()) {
                    throw new IllegalStateException("loadCheckpoint: " + partitionOwner + " not found!");
                }
                metadataStream.close();
                Partition<I, V, E, M> partition = new Partition<I, V, E, M>(getConfiguration(), partitionId);
                DataInputStream partitionsStream = getFs().open(new Path(partitionsFile));
                if (partitionsStream.skip(startPos) != startPos) {
                    throw new IllegalStateException(
                            "loadCheckpoint: Failed to skip " + startPos + " on " + partitionsFile);
                }
                partition.readFields(partitionsStream);
                partitionsStream.close();
                if (LOG.isInfoEnabled()) {
                    LOG.info("loadCheckpoint: Loaded partition " + partition);
                }
                if (getPartitionMap().put(partitionId, partition) != null) {
                    throw new IllegalStateException(
                            "loadCheckpoint: Already has partition owner " + partitionOwner);
                }
                ++loadedPartitions;
            } catch (IOException e) {
                throw new RuntimeException("loadCheckpoing: Failed to get partition owner " + partitionOwner,
                        e);
            }
        }
    }
    if (LOG.isInfoEnabled()) {
        LOG.info("loadCheckpoint: Loaded " + loadedPartitions + " partitions of out "
                + workerGraphPartitioner.getPartitionOwners().size() + " total.");
    }
    // Communication service needs to setup the connections prior to
    // processing vertices
    commService.setup();
}

From source file:com.sky.drovik.player.media.DiskCache.java

private void loadIndex() {
    final String indexFilePath = getIndexFilePath();
    try {//from   ww  w  .j a  v a  2s. c o m
        // Open the input stream.
        final FileInputStream fileInput = new FileInputStream(indexFilePath);
        final BufferedInputStream bufferedInput = new BufferedInputStream(fileInput, 1024);
        final DataInputStream dataInput = new DataInputStream(bufferedInput);

        // Read the header.
        final int magic = dataInput.readInt();
        final int version = dataInput.readInt();
        boolean valid = true;
        if (magic != INDEX_HEADER_MAGIC) {
            Log.e(TAG, "Index file appears to be corrupt (" + magic + " != " + INDEX_HEADER_MAGIC + "), "
                    + indexFilePath);
            valid = false;
        }
        if (valid && version != INDEX_HEADER_VERSION) {
            // Future versions can implement upgrade in this case.
            Log.e(TAG, "Index file version " + version + " not supported");
            valid = false;
        }
        if (valid) {
            mTailChunk = dataInput.readShort();
        }

        // Read the entries.
        if (valid) {
            // Parse the index file body into the in-memory map.
            final int numEntries = dataInput.readInt();
            mIndexMap = new LongSparseArray<Record>(numEntries);
            synchronized (mIndexMap) {
                for (int i = 0; i < numEntries; ++i) {
                    final long key = dataInput.readLong();
                    final int chunk = dataInput.readShort();
                    final int offset = dataInput.readInt();
                    final int size = dataInput.readInt();
                    final int sizeOnDisk = dataInput.readInt();
                    final long timestamp = dataInput.readLong();
                    mIndexMap.append(key, new Record(chunk, offset, size, sizeOnDisk, timestamp));
                }
            }
        }

        dataInput.close();
        if (!valid) {
            deleteAll();
        }

    } catch (FileNotFoundException e) {
        // If the file does not exist the cache is empty, so just continue.
    } catch (IOException e) {
        Log.e(TAG, "Unable to read the index file " + indexFilePath);
    } finally {
        if (mIndexMap == null) {
            mIndexMap = new LongSparseArray<Record>();
        }
    }
}

From source file:org.apache.giraph.graph.BspServiceMaster.java

/**
 * Read the finalized checkpoint file and associated metadata files for the
 * checkpoint.  Modifies the {@link PartitionOwner} objects to get the
 * checkpoint prefixes.  It is an optimization to prevent all workers from
 * searching all the files.  Also read in the aggregator data from the
 * finalized checkpoint file and setting it.
 *
 * @param superstep Checkpoint set to examine.
 * @param partitionOwners Partition owners to modify with checkpoint
 *        prefixes//from  w w w . j  av a  2s . c  o m
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException
 */
private void prepareCheckpointRestart(long superstep, Collection<PartitionOwner> partitionOwners)
        throws IOException, KeeperException, InterruptedException {
    FileSystem fs = getFs();
    List<Path> validMetadataPathList = new ArrayList<Path>();
    String finalizedCheckpointPath = getCheckpointBasePath(superstep) + CHECKPOINT_FINALIZED_POSTFIX;
    DataInputStream finalizedStream = fs.open(new Path(finalizedCheckpointPath));
    int prefixFileCount = finalizedStream.readInt();
    for (int i = 0; i < prefixFileCount; ++i) {
        String metadataFilePath = finalizedStream.readUTF() + CHECKPOINT_METADATA_POSTFIX;
        validMetadataPathList.add(new Path(metadataFilePath));
    }

    // Set the merged aggregator data if it exists.
    int aggregatorDataSize = finalizedStream.readInt();
    if (aggregatorDataSize > 0) {
        byte[] aggregatorZkData = new byte[aggregatorDataSize];
        int actualDataRead = finalizedStream.read(aggregatorZkData, 0, aggregatorDataSize);
        if (actualDataRead != aggregatorDataSize) {
            throw new RuntimeException("prepareCheckpointRestart: Only read " + actualDataRead + " of "
                    + aggregatorDataSize + " aggregator bytes from " + finalizedCheckpointPath);
        }
        String mergedAggregatorPath = getMergedAggregatorPath(getApplicationAttempt(), superstep - 1);
        if (LOG.isInfoEnabled()) {
            LOG.info("prepareCheckpointRestart: Reloading merged " + "aggregator " + "data '"
                    + Arrays.toString(aggregatorZkData) + "' to previous checkpoint in path "
                    + mergedAggregatorPath);
        }
        if (getZkExt().exists(mergedAggregatorPath, false) == null) {
            getZkExt().createExt(mergedAggregatorPath, aggregatorZkData, Ids.OPEN_ACL_UNSAFE,
                    CreateMode.PERSISTENT, true);
        } else {
            getZkExt().setData(mergedAggregatorPath, aggregatorZkData, -1);
        }
    }
    masterCompute.readFields(finalizedStream);
    finalizedStream.close();

    Map<Integer, PartitionOwner> idOwnerMap = new HashMap<Integer, PartitionOwner>();
    for (PartitionOwner partitionOwner : partitionOwners) {
        if (idOwnerMap.put(partitionOwner.getPartitionId(), partitionOwner) != null) {
            throw new IllegalStateException("prepareCheckpointRestart: Duplicate partition " + partitionOwner);
        }
    }
    // Reading the metadata files.  Simply assign each partition owner
    // the correct file prefix based on the partition id.
    for (Path metadataPath : validMetadataPathList) {
        String checkpointFilePrefix = metadataPath.toString();
        checkpointFilePrefix = checkpointFilePrefix.substring(0,
                checkpointFilePrefix.length() - CHECKPOINT_METADATA_POSTFIX.length());
        DataInputStream metadataStream = fs.open(metadataPath);
        long partitions = metadataStream.readInt();
        for (long i = 0; i < partitions; ++i) {
            long dataPos = metadataStream.readLong();
            int partitionId = metadataStream.readInt();
            PartitionOwner partitionOwner = idOwnerMap.get(partitionId);
            if (LOG.isInfoEnabled()) {
                LOG.info("prepareSuperstepRestart: File " + metadataPath + " with position " + dataPos
                        + ", partition id = " + partitionId + " assigned to " + partitionOwner);
            }
            partitionOwner.setCheckpointFilesPrefix(checkpointFilePrefix);
        }
        metadataStream.close();
    }
}

From source file:com.splout.db.dnode.HttpFileExchanger.java

@Override
public void handle(HttpExchange exchange) throws IOException {
    DataInputStream iS = null;
    FileOutputStream writer = null;
    File dest = null;/*from ww  w. j av  a2 s .co m*/

    String tablespace = null;
    Integer partition = null;
    Long version = null;

    try {
        iS = new DataInputStream(new GZIPInputStream(exchange.getRequestBody()));
        String fileName = exchange.getRequestHeaders().getFirst("filename");
        tablespace = exchange.getRequestHeaders().getFirst("tablespace");
        partition = Integer.valueOf(exchange.getRequestHeaders().getFirst("partition"));
        version = Long.valueOf(exchange.getRequestHeaders().getFirst("version"));

        dest = new File(
                new File(tempDir,
                        DNodeHandler.getLocalStoragePartitionRelativePath(tablespace, partition, version)),
                fileName);

        // just in case, avoid copying the same file concurrently
        // (but we also shouldn't avoid this in other levels of the app)
        synchronized (currentTransfersMonitor) {
            if (currentTransfers.containsKey(dest.toString())) {
                throw new IOException("Incoming file already being transferred - " + dest);
            }
            currentTransfers.put(dest.toString(), new Object());
        }

        if (!dest.getParentFile().exists()) {
            dest.getParentFile().mkdirs();
        }
        if (dest.exists()) {
            dest.delete();
        }

        writer = new FileOutputStream(dest);
        byte[] buffer = new byte[config.getInt(FetcherProperties.DOWNLOAD_BUFFER)];

        Checksum checkSum = new CRC32();

        // 1- Read file size
        long fileSize = iS.readLong();
        log.debug("Going to read file [" + fileName + "] of size: " + fileSize);
        // 2- Read file contents
        long readSoFar = 0;

        do {
            long missingBytes = fileSize - readSoFar;
            int bytesToRead = (int) Math.min(missingBytes, buffer.length);
            int read = iS.read(buffer, 0, bytesToRead);
            checkSum.update(buffer, 0, read);
            writer.write(buffer, 0, read);
            readSoFar += read;
            callback.onProgress(tablespace, partition, version, dest, fileSize, readSoFar);
        } while (readSoFar < fileSize);

        // 3- Read CRC
        long expectedCrc = iS.readLong();
        if (expectedCrc == checkSum.getValue()) {
            log.info("File [" + dest.getAbsolutePath() + "] received -> Checksum -- " + checkSum.getValue()
                    + " matches expected CRC [OK]");
            callback.onFileReceived(tablespace, partition, version, dest);
        } else {
            log.error("File received [" + dest.getAbsolutePath() + "] -> Checksum -- " + checkSum.getValue()
                    + " doesn't match expected CRC: " + expectedCrc);
            callback.onBadCRC(tablespace, partition, version, dest);
            dest.delete();
        }
    } catch (Throwable t) {
        log.error(t);
        callback.onError(t, tablespace, partition, version, dest);
        if (dest != null && dest.exists()
                && !t.getMessage().contains("Incoming file already being transferred")) {
            dest.delete();
        }
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (iS != null) {
            iS.close();
        }
        if (dest != null) {
            currentTransfers.remove(dest.toString());
        }
    }
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

/**
 * loadCacheItemFromDisk - load a single cache item from disk 
 * /*from ww w.  java2 s . c  o  m*/
 * @param file
 * @param optTargetURL
 * @param location
 * @return
 * @throws IOException
 */
private CacheItem loadCacheItemFromDisk(FileInputStream file, String optTargetURL, long location)
        throws IOException {

    long timeStart = System.currentTimeMillis();

    // and read out the Item Header ...  
    CacheItemHeader itemHeader = new CacheItemHeader();
    itemHeader.readHeader(new DataInputStream(file));
    // see if it is valid ... 
    if (!Arrays.equals(itemHeader._sync, _header._sync)) {
        LOG.error("### Item Lookup for URL:" + optTargetURL + " Record at:" + location
                + " failed - corrupt sync bytes detected!!!");
    } else {
        CRC32 crc32 = new CRC32();
        // ok deserialize the bytes ... 
        CacheItem item = new CacheItem();
        CheckedInputStream checkedStream = new CheckedInputStream(file, crc32);
        DataInputStream itemStream = new DataInputStream(checkedStream);
        item.readFields(itemStream);
        // read the content buffer length 
        int contentBufferLen = itemStream.readInt();
        if (contentBufferLen != 0) {
            byte data[] = new byte[contentBufferLen];
            itemStream.read(data);
            item.setContent(new Buffer(data));
        }

        // cache crc 
        long crcValueComputed = crc32.getValue();
        // read disk crc 
        long crcValueOnDisk = itemStream.readLong();
        // validate 
        if (crcValueComputed == crcValueOnDisk) {
            String canonicalURL = URLUtils.canonicalizeURL(item.getUrl(), true);
            if (optTargetURL.length() == 0 || optTargetURL.equals(canonicalURL)) {
                if (isValidCacheItem(item)) {
                    LOG.info("### Item Lookup for URL:" + optTargetURL + " Record at:" + location
                            + " completed in:" + (System.currentTimeMillis() - timeStart));
                    return item;
                } else {
                    LOG.info("### Item Lookup for URL:" + optTargetURL + " Record at:" + location
                            + " failed with invalid result code");
                }

            } else {
                LOG.info("### Item Lookup for URL:" + optTargetURL + " Record at:" + location
                        + " failed with url mismatch. record url:" + item.getUrl());
            }
        } else {
            LOG.error("### Item Lookup for URL:" + optTargetURL + " Record at:" + location
                    + " failed - crc mismatch!!!");
        }
    }
    return null;
}

From source file:mp.teardrop.PlaybackService.java

/**
 * Initializes the service state, loading songs saved from the disk into the
 * song timeline./*from   w w w.  ja va2 s. c o  m*/
 *
 * @return The loaded value for mState.
 */
public int loadState() {
    int state = 0;

    try {
        DataInputStream in = new DataInputStream(openFileInput(STATE_FILE));

        if (in.readLong() == STATE_FILE_MAGIC && in.readInt() == STATE_VERSION) {
            mPendingSeek = in.readInt();
            mPendingSeekSong = in.readLong();
            mTimeline.readState(getSharedPreferences(PREFS_SAVED_SONGS, 0));
            state |= mTimeline.getShuffleMode() << SHIFT_SHUFFLE;
            state |= mTimeline.getFinishAction() << SHIFT_FINISH;
        }

        in.close();
    } catch (EOFException e) {
        Log.w("OrchidMP", "Failed to load state", e);
    } catch (IOException e) {
        Log.w("OrchidMP", "Failed to load state", e);
    } catch (JSONException e) {
        Log.w("OrchidMP", "Failed to load state", e);
    }

    return state;
}

From source file:org.hyperic.hq.agent.server.AgentDListProvider.java

/**
 * DList info string is a series of properties seperated by '|'
 * Three properties are expected./*from w  w w .  java2  s .  c  om*/
 *
 * Directory to place the data files
 * Size in MB to start checking for unused blocks
 * Maximum percentage of free blocks allowed
 *
 * Default is 'data|20|50'
 */
public void init(String info) throws AgentStorageException {
    BufferedInputStream bIs;
    FileInputStream fIs = null;
    DataInputStream dIs;
    long nEnts;

    // Parse out configuration
    StringTokenizer st = new StringTokenizer(info, "|");
    if (st.countTokens() != 5) {
        throw new AgentStorageException(info + " is an invalid agent storage provider configuration");
    }

    keyVals = new HashMap<EncVal, EncVal>();
    lists = new HashMap<String, DiskList>();
    overloads = new HashMap<String, ListInfo>();
    String dir = st.nextToken();
    this.writeDir = new File(dir);
    this.keyValFile = new File(writeDir, "keyvals");
    this.keyValFileBackup = new File(writeDir, "keyvals.backup");

    String s = st.nextToken().trim();
    long factor;
    if ("m".equalsIgnoreCase(s)) {
        factor = 1024 * 1024;
    } else if ("k".equalsIgnoreCase(s)) {
        factor = 1024;
    } else {
        throw new AgentStorageException(info + " is an invalid agent storage provider configuration");
    }
    try {
        maxSize = Long.parseLong(st.nextToken().trim()) * factor;
        chkSize = Long.parseLong(st.nextToken().trim()) * factor;
        chkPerc = Integer.parseInt(st.nextToken().trim());
    } catch (NumberFormatException e) {
        throw new AgentStorageException("Invalid agent storage provider " + "configuration: " + e);
    }

    if (this.writeDir.exists() == false) {
        // Try to create it
        this.writeDir.mkdir();
    }

    if (this.writeDir.isDirectory() == false) {
        throw new AgentStorageException(dir + " is not a directory");
    }

    try {
        fIs = new FileInputStream(this.keyValFile);
        bIs = new BufferedInputStream(fIs);
        dIs = new DataInputStream(bIs);
        nEnts = dIs.readLong();
        while (nEnts-- != 0) {
            String encKey = dIs.readUTF();
            String encVal = dIs.readUTF();
            String key = SecurityUtil.isMarkedEncrypted(encKey)
                    ? SecurityUtil.decryptRecursiveUnmark(encryptor, encKey)
                    : encKey;
            String val = SecurityUtil.isMarkedEncrypted(encVal)
                    ? SecurityUtil.decryptRecursiveUnmark(encryptor, encVal)
                    : encVal;
            this.keyVals.put(new EncVal(encryptor, key, encKey), new EncVal(encryptor, val, encVal));
        }
    } catch (FileNotFoundException exc) {
        // Normal when it doesn't exist
        log.debug("file not found (this is ok): " + exc);
    } catch (IOException exc) {
        log.error("Error reading " + this.keyValFile + " loading " + "last known good version");
        // Close old stream
        close(fIs);
        // Fall back to last known good keyvals file
        try {
            fIs = new FileInputStream(this.keyValFileBackup);
            bIs = new BufferedInputStream(fIs);
            dIs = new DataInputStream(bIs);
            nEnts = dIs.readLong();
            while (nEnts-- != 0) {
                String encKey = dIs.readUTF();
                String encVal = dIs.readUTF();
                String key = SecurityUtil.encrypt(this.encryptor, encKey);
                String val = SecurityUtil.encrypt(this.encryptor, encVal);
                this.keyVals.put(new EncVal(encryptor, key, encKey), new EncVal(encryptor, val, encVal));
            }
        } catch (FileNotFoundException e) {
            log.warn(e);
            log.debug(e, e);
        } catch (IOException e) {
            AgentStorageException toThrow = new AgentStorageException(
                    "Error reading " + this.keyValFile + ": " + e);
            toThrow.initCause(e);
            throw toThrow;
        }
    } finally {
        close(fIs);
    }
}