Example usage for com.google.common.primitives Longs fromByteArray

List of usage examples for com.google.common.primitives Longs fromByteArray

Introduction

In this page you can find the example usage for com.google.common.primitives Longs fromByteArray.

Prototype

public static long fromByteArray(byte[] bytes) 

Source Link

Document

Returns the long value whose big-endian representation is stored in the first 8 bytes of bytes ; equivalent to ByteBuffer.wrap(bytes).getLong() .

Usage

From source file:org.voltdb.SnapshotSaveAPI.java

private void createSetup(String file_path, String file_nonce, SnapshotFormat format, long txnId, String data,
        SystemProcedureExecutionContext context, String hostname, final VoltTable result) {
    {/*from  w w w  . j a va2s. c  o  m*/
        SiteTracker tracker = context.getSiteTrackerForSnapshot();
        final int numLocalSites = (tracker.getLocalSites().length - recoveringSiteCount.get());

        // non-null if targeting only one site (used for rejoin)
        // set later from the "data" JSON string
        Long targetHSid = null;

        MessageDigest digest;
        try {
            digest = MessageDigest.getInstance("SHA-1");
        } catch (NoSuchAlgorithmException e) {
            throw new AssertionError(e);
        }

        /*
         * List of partitions to include if this snapshot is
         * going to be deduped. Attempts to break up the work
         * by seeding an RNG selecting
         * a random replica to do the work. Will not work in failure
         * cases, but we don't use dedupe when we want durability.
         *
         * Originally used the partition id as the seed, but it turns out
         * that nextInt(2) returns a 1 for seeds 0-4095. Now use SHA-1
         * on the txnid + partition id.
         */
        List<Integer> partitionsToInclude = new ArrayList<Integer>();
        List<Long> sitesToInclude = new ArrayList<Long>();
        for (long localSite : tracker.getLocalSites()) {
            final int partitionId = tracker.getPartitionForSite(localSite);
            List<Long> sites = new ArrayList<Long>(
                    tracker.getSitesForPartition(tracker.getPartitionForSite(localSite)));
            Collections.sort(sites);

            digest.update(Longs.toByteArray(txnId));
            final long seed = Longs
                    .fromByteArray(Arrays.copyOf(digest.digest(Ints.toByteArray(partitionId)), 8));

            int siteIndex = new java.util.Random(seed).nextInt(sites.size());
            if (localSite == sites.get(siteIndex)) {
                partitionsToInclude.add(partitionId);
                sitesToInclude.add(localSite);
            }
        }

        assert (partitionsToInclude.size() == sitesToInclude.size());

        /*
         * Used to close targets on failure
         */
        final ArrayList<SnapshotDataTarget> targets = new ArrayList<SnapshotDataTarget>();
        try {
            final ArrayDeque<SnapshotTableTask> partitionedSnapshotTasks = new ArrayDeque<SnapshotTableTask>();
            final ArrayList<SnapshotTableTask> replicatedSnapshotTasks = new ArrayList<SnapshotTableTask>();
            assert (SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.get() == -1);

            final List<Table> tables = SnapshotUtil.getTablesToSave(context.getDatabase());

            if (format.isFileBased()) {
                Runnable completionTask = SnapshotUtil.writeSnapshotDigest(txnId, context.getCatalogCRC(),
                        file_path, file_nonce, tables, context.getHostId(),
                        SnapshotSiteProcessor.getExportSequenceNumbers());
                if (completionTask != null) {
                    SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
                }
                completionTask = SnapshotUtil.writeSnapshotCatalog(file_path, file_nonce);
                if (completionTask != null) {
                    SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
                }
            }

            final AtomicInteger numTables = new AtomicInteger(tables.size());
            final SnapshotRegistry.Snapshot snapshotRecord = SnapshotRegistry.startSnapshot(txnId,
                    context.getHostId(), file_path, file_nonce, format, tables.toArray(new Table[0]));

            SnapshotDataTarget sdt = null;
            if (!format.isTableBased()) {
                // table schemas for all the tables we'll snapshot on this partition
                Map<Integer, byte[]> schemas = new HashMap<Integer, byte[]>();
                for (final Table table : SnapshotUtil.getTablesToSave(context.getDatabase())) {
                    VoltTable schemaTable = CatalogUtil.getVoltTable(table);
                    schemas.put(table.getRelativeIndex(), schemaTable.getSchemaBytes());
                }

                if (format == SnapshotFormat.STREAM && data != null) {
                    JSONObject jsObj = new JSONObject(data);
                    long hsId = jsObj.getLong("hsId");

                    // if a target_hsid exists, set it for filtering a snapshot for a specific site
                    try {
                        targetHSid = jsObj.getLong("target_hsid");
                    } catch (JSONException e) {
                    } // leave value as null on exception

                    // if this snapshot targets a specific site...
                    if (targetHSid != null) {
                        // get the list of sites on this node
                        List<Long> localHSids = tracker.getSitesForHost(context.getHostId());
                        // if the target site is local to this node...
                        if (localHSids.contains(targetHSid)) {
                            sdt = new StreamSnapshotDataTarget(hsId, schemas);
                        } else {
                            sdt = new DevNullSnapshotTarget();
                        }
                    }
                }
            }

            for (final Table table : SnapshotUtil.getTablesToSave(context.getDatabase())) {
                /*
                 * For a deduped csv snapshot, only produce the replicated tables on the "leader"
                 * host.
                 */
                if (format == SnapshotFormat.CSV && table.getIsreplicated() && !tracker.isFirstHost()) {
                    snapshotRecord.removeTable(table.getTypeName());
                    continue;
                }
                String canSnapshot = "SUCCESS";
                String err_msg = "";

                File saveFilePath = null;
                if (format.isFileBased()) {
                    saveFilePath = SnapshotUtil.constructFileForTable(table, file_path, file_nonce, format,
                            context.getHostId());
                }

                try {
                    if (format == SnapshotFormat.CSV) {
                        sdt = new SimpleFileSnapshotDataTarget(saveFilePath);
                    } else if (format == SnapshotFormat.NATIVE) {
                        sdt = constructSnapshotDataTargetForTable(context, saveFilePath, table,
                                context.getHostId(), tracker.m_numberOfPartitions, txnId);
                    }

                    if (sdt == null) {
                        throw new IOException("Unable to create snapshot target");
                    }

                    targets.add(sdt);
                    final SnapshotDataTarget sdtFinal = sdt;
                    final Runnable onClose = new Runnable() {
                        @SuppressWarnings("synthetic-access")
                        @Override
                        public void run() {
                            snapshotRecord.updateTable(table.getTypeName(),
                                    new SnapshotRegistry.Snapshot.TableUpdater() {
                                        @Override
                                        public SnapshotRegistry.Snapshot.Table update(
                                                SnapshotRegistry.Snapshot.Table registryTable) {
                                            return snapshotRecord.new Table(registryTable,
                                                    sdtFinal.getBytesWritten(),
                                                    sdtFinal.getLastWriteException());
                                        }
                                    });
                            int tablesLeft = numTables.decrementAndGet();
                            if (tablesLeft == 0) {
                                final SnapshotRegistry.Snapshot completed = SnapshotRegistry
                                        .finishSnapshot(snapshotRecord);
                                final double duration = (completed.timeFinished
                                        - org.voltdb.TransactionIdManager
                                                .getTimestampFromTransactionId(completed.txnId))
                                        / 1000.0;
                                HOST_LOG.info("Snapshot " + snapshotRecord.nonce + " finished at "
                                        + completed.timeFinished + " and took " + duration + " seconds ");
                            }
                        }
                    };

                    sdt.setOnCloseHandler(onClose);

                    List<SnapshotDataFilter> filters = new ArrayList<SnapshotDataFilter>();
                    if (format == SnapshotFormat.CSV) {
                        /*
                         * Don't need to do filtering on a replicated table.
                         */
                        if (!table.getIsreplicated()) {
                            filters.add(new PartitionProjectionSnapshotFilter(Ints.toArray(partitionsToInclude),
                                    0));
                        }
                        filters.add(new CSVSnapshotFilter(CatalogUtil.getVoltTable(table), ',', null));
                    }

                    // if this snapshot targets a specific site...
                    if (targetHSid != null) {
                        // get the list of sites on this node
                        List<Long> localHSids = tracker.getSitesForHost(context.getHostId());
                        // if the target site is local to this node...
                        if (localHSids.contains(targetHSid)) {
                            // ...get its partition id...
                            int partitionId = tracker.getPartitionForSite(targetHSid);
                            // ...and build a filter to only get that partition
                            filters.add(new PartitionProjectionSnapshotFilter(new int[] { partitionId },
                                    sdt.getHeaderSize()));
                        } else {
                            // filter EVERYTHING because the site we want isn't local
                            filters.add(new PartitionProjectionSnapshotFilter(new int[0], sdt.getHeaderSize()));
                        }
                    }

                    final SnapshotTableTask task = new SnapshotTableTask(table.getRelativeIndex(), sdt,
                            filters.toArray(new SnapshotDataFilter[filters.size()]), table.getIsreplicated(),
                            table.getTypeName());

                    if (table.getIsreplicated()) {
                        replicatedSnapshotTasks.add(task);
                    } else {
                        partitionedSnapshotTasks.offer(task);
                    }
                } catch (IOException ex) {
                    /*
                     * Creation of this specific target failed. Close it if it was created.
                     * Continue attempting the snapshot anyways so that at least some of the data
                     * can be retrieved.
                     */
                    try {
                        if (sdt != null) {
                            targets.remove(sdt);
                            sdt.close();
                        }
                    } catch (Exception e) {
                        HOST_LOG.error(e);
                    }

                    StringWriter sw = new StringWriter();
                    PrintWriter pw = new PrintWriter(sw);
                    ex.printStackTrace(pw);
                    pw.flush();
                    canSnapshot = "FAILURE";
                    err_msg = "SNAPSHOT INITIATION OF " + file_nonce + "RESULTED IN IOException: \n"
                            + sw.toString();
                }

                result.addRow(context.getHostId(), hostname, table.getTypeName(), canSnapshot, err_msg);
            }

            synchronized (SnapshotSiteProcessor.m_taskListsForSites) {
                boolean aborted = false;
                if (!partitionedSnapshotTasks.isEmpty() || !replicatedSnapshotTasks.isEmpty()) {
                    SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.set(numLocalSites);
                    for (int ii = 0; ii < numLocalSites; ii++) {
                        SnapshotSiteProcessor.m_taskListsForSites.add(new ArrayDeque<SnapshotTableTask>());
                    }
                } else {
                    SnapshotRegistry.discardSnapshot(snapshotRecord);
                    aborted = true;
                }

                /**
                 * Distribute the writing of replicated tables to exactly one partition.
                 */
                for (int ii = 0; ii < numLocalSites && !partitionedSnapshotTasks.isEmpty(); ii++) {
                    SnapshotSiteProcessor.m_taskListsForSites.get(ii).addAll(partitionedSnapshotTasks);
                    if (!format.isTableBased()) {
                        SnapshotSiteProcessor.m_taskListsForSites.get(ii).addAll(replicatedSnapshotTasks);
                    }
                }

                if (format.isTableBased()) {
                    int siteIndex = 0;
                    for (SnapshotTableTask t : replicatedSnapshotTasks) {
                        SnapshotSiteProcessor.m_taskListsForSites.get(siteIndex++ % numLocalSites).offer(t);
                    }
                }
                if (!aborted) {
                    logSnapshotStartToZK(txnId, context, file_nonce);
                }
            }
        } catch (Exception ex) {
            /*
             * Close all the targets to release the threads. Don't let sites get any tasks.
             */
            SnapshotSiteProcessor.m_taskListsForSites.clear();
            for (SnapshotDataTarget sdt : targets) {
                try {
                    sdt.close();
                } catch (Exception e) {
                    HOST_LOG.error(ex);
                }
            }

            StringWriter sw = new StringWriter();
            PrintWriter pw = new PrintWriter(sw);
            ex.printStackTrace(pw);
            pw.flush();
            result.addRow(context.getHostId(), hostname, "", "FAILURE", "SNAPSHOT INITIATION OF " + file_path
                    + file_nonce + "RESULTED IN Exception: \n" + sw.toString());
            HOST_LOG.error(ex);
        } finally {
            SnapshotSiteProcessor.m_snapshotPermits.release(numLocalSites);
        }

    }
}

From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java

public void printOwners(java.io.PrintStream ps) {
    ps.println("OWNERS");
    ps.println("======");
    final DatabaseEntry key = new DatabaseEntry();
    final DatabaseEntry value = new DatabaseEntry();
    try (Cursor cursor = ownerDirectory.openCursor(null, CursorConfig.DEFAULT)) {
        while (cursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
            long id = Longs.fromByteArray(key.getData());
            short owner = Shorts.fromByteArray(value.getData());
            ps.println("Id : " + hex(id) + " owner: " + owner + "");
        }//from  w  ww  . j  ava  2 s  .c  o  m
    }
}

From source file:org.opendedup.collections.LongByteArrayMap.java

@Override
public void put(long pos, SparseDataChunk data) throws IOException, FileClosedException {
    long fpos = 0;
    fpos = this.getMapFilePosition(pos);

    ///*  w ww. j  a v  a  2s .c  om*/
    Lock l = this.hashlock.readLock();
    l.lock();
    try {

        if (this.isClosed()) {
            throw new FileClosedException("hashtable [" + this.filePath + "] is close");
        }
        /*
         * if (data.length != arrayLength) throw new
         * IOException("data length " + data.length + " does not equal " +
         * arrayLength);
         */
        if (Main.refCount) {
            SparseDataChunk ck = this.get(pos);
            if (ck != null) {
                for (HashLocPair p : ck.getFingers()) {
                    DedupFileStore.removeRef(p.hash, Longs.fromByteArray(p.hashloc));
                }
            }
            for (HashLocPair p : data.getFingers()) {
                DedupFileStore.addRef(p.hash, Longs.fromByteArray(p.hashloc));
            }
        }
        // rf.seek(fpos);
        // rf.write(data);
        pbdb.write(ByteBuffer.wrap(data.getBytes()), fpos);
    } finally {
        l.unlock();
    }
}

From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java

public void printMainStore(java.io.PrintStream ps) {
    ps.println("MAIN STORE");
    ps.println("==========");
    final DatabaseEntry key = new DatabaseEntry();
    final DatabaseEntry value = new DatabaseEntry();
    try (Cursor cursor = mainStore.openCursor(null, CursorConfig.DEFAULT)) {
        while (cursor.getNext(key, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
            long id = Longs.fromByteArray(key.getData());
            final MainMemoryEntry entry = entryBinding.entryToObject(value);
            ps.println("Id : " + hex(id) + " version: " + entry.version + " data: (" + entry.data.length
                    + " bytes).");
        }//  ww w.j a  v a  2 s .c  o m
    }
}

From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java

public void printOwnerIndex(java.io.PrintStream ps) {
    ps.println("OWNER INDEX");
    ps.println("===========");
    final DatabaseEntry sKey = new DatabaseEntry();
    final DatabaseEntry pKey = new DatabaseEntry();
    final DatabaseEntry value = new DatabaseEntry();
    try (SecondaryCursor cursor = ownerIndex.openCursor(null, CursorConfig.DEFAULT)) {
        while (cursor.getNext(sKey, pKey, value, LockMode.DEFAULT) == OperationStatus.SUCCESS) {
            long id = Longs.fromByteArray(pKey.getData());
            short owner = Shorts.fromByteArray(sKey.getData());
            ps.println("Owner: " + owner + " id : " + hex(id));
        }/*w w  w  .j a va 2 s  . c  om*/
    }
}

From source file:org.apache.beam.runners.dataflow.worker.OrderedCode.java

/**
 * Returns the next long item (encoded via {@code writeSignedNumIncreasing(long)}) from its
 * internal encoded byte array store and removes the item from the store.
 *
 * @see #writeSignedNumIncreasing(long)//from  www. jav  a  2  s  .  c  o m
 */
public long readSignedNumIncreasing() {
    if ((encodedArrays == null) || (encodedArrays.isEmpty())
            || ((encodedArrays.get(0)).length - firstArrayPosition < 1)) {
        throw new IllegalArgumentException("Invalid encoded byte array");
    }

    byte[] store = encodedArrays.get(0);

    long xorMask = ((store[firstArrayPosition] & 0x80) == 0) ? ~0L : 0L;
    // Store first byte as an int rather than a (signed) byte -- to avoid
    // accidental byte-to-int promotion later, which would extend the byte's
    // sign bit (if any).
    int firstByte = (store[firstArrayPosition] & 0xff) ^ (int) (xorMask & 0xff);

    // Now calculate and test length, and set x to raw (unmasked) result.
    int len;
    long x;
    if (firstByte != 0xff) {
        len = 7 - log2Floor(firstByte ^ 0xff);
        if (store.length - firstArrayPosition < len) {
            throw new IllegalArgumentException("Invalid encoded byte array");
        }
        x = xorMask; // Sign extend using xorMask.
        for (int i = firstArrayPosition; i < firstArrayPosition + len; i++) {
            x = (x << 8) | (store[i] & 0xff);
        }
    } else {
        len = 8;
        if (store.length - firstArrayPosition < len) {
            throw new IllegalArgumentException("Invalid encoded byte array");
        }
        int secondByte = (store[firstArrayPosition + 1] & 0xff) ^ (int) (xorMask & 0xff);
        if (secondByte >= 0x80) {
            if (secondByte < 0xc0) {
                len = 9;
            } else {
                int thirdByte = (store[firstArrayPosition + 2] & 0xff) ^ (int) (xorMask & 0xff);
                if (secondByte == 0xc0 && thirdByte < 0x80) {
                    len = 10;
                } else {
                    // Either len > 10 or len == 10 and #bits > 63.
                    throw new IllegalArgumentException("Invalid encoded byte array");
                }
            }
            if (store.length - firstArrayPosition < len) {
                throw new IllegalArgumentException("Invalid encoded byte array");
            }
        }
        x = Longs.fromByteArray(
                Arrays.copyOfRange(store, firstArrayPosition + len - 8, firstArrayPosition + len));
    }

    x ^= LENGTH_TO_MASK[len]; // Remove spurious header bits.

    if (len != getSignedEncodingLength(x)) {
        throw new IllegalArgumentException("Invalid encoded byte array");
    }

    if ((store.length - firstArrayPosition - len) == 0) {
        // We are done with the first array.
        encodedArrays.remove(0);
        firstArrayPosition = 0;
    } else {
        firstArrayPosition = firstArrayPosition + len;
    }

    return x;
}

From source file:com.google.cloud.dataflow.sdk.runners.worker.OrderedCode.java

/**
 * Returns the next long item (encoded via
 * {@code writeSignedNumIncreasing(long)}) from its internal encoded byte
 * array store and removes the item from the store.
 *
 * @see #writeSignedNumIncreasing(long)/*from  w  ww. ja  v a 2  s .c o  m*/
 */
public long readSignedNumIncreasing() {
    if ((encodedArrays == null) || (encodedArrays.size() == 0)
            || ((encodedArrays.get(0)).length - firstArrayPosition < 1)) {
        throw new IllegalArgumentException("Invalid encoded byte array");
    }

    byte[] store = encodedArrays.get(0);

    long xorMask = ((store[firstArrayPosition] & 0x80) == 0) ? ~0L : 0L;
    // Store first byte as an int rather than a (signed) byte -- to avoid
    // accidental byte-to-int promotion later, which would extend the byte's
    // sign bit (if any).
    int firstByte = (store[firstArrayPosition] & 0xff) ^ (int) (xorMask & 0xff);

    // Now calculate and test length, and set x to raw (unmasked) result.
    int len;
    long x;
    if (firstByte != 0xff) {
        len = 7 - log2Floor(firstByte ^ 0xff);
        if (store.length - firstArrayPosition < len) {
            throw new IllegalArgumentException("Invalid encoded byte array");
        }
        x = xorMask; // Sign extend using xorMask.
        for (int i = firstArrayPosition; i < firstArrayPosition + len; i++) {
            x = (x << 8) | (store[i] & 0xff);
        }
    } else {
        len = 8;
        if (store.length - firstArrayPosition < len) {
            throw new IllegalArgumentException("Invalid encoded byte array");
        }
        int secondByte = (store[firstArrayPosition + 1] & 0xff) ^ (int) (xorMask & 0xff);
        if (secondByte >= 0x80) {
            if (secondByte < 0xc0) {
                len = 9;
            } else {
                int thirdByte = (store[firstArrayPosition + 2] & 0xff) ^ (int) (xorMask & 0xff);
                if (secondByte == 0xc0 && thirdByte < 0x80) {
                    len = 10;
                } else {
                    // Either len > 10 or len == 10 and #bits > 63.
                    throw new IllegalArgumentException("Invalid encoded byte array");
                }
            }
            if (store.length - firstArrayPosition < len) {
                throw new IllegalArgumentException("Invalid encoded byte array");
            }
        }
        x = Longs.fromByteArray(
                Arrays.copyOfRange(store, firstArrayPosition + len - 8, firstArrayPosition + len));
    }

    x ^= LENGTH_TO_MASK[len]; // Remove spurious header bits.

    if (len != getSignedEncodingLength(x)) {
        throw new IllegalArgumentException("Invalid encoded byte array");
    }

    if ((store.length - firstArrayPosition - len) == 0) {
        // We are done with the first array.
        encodedArrays.remove(0);
        firstArrayPosition = 0;
    } else {
        firstArrayPosition = firstArrayPosition + len;
    }

    return x;
}

From source file:org.apache.apex.malhar.lib.wal.FSWindowDataManager.java

private Object retrieve(FSWindowReplayWAL wal, long windowId) throws IOException {
    if (windowId > largestCompletedWindow || wal.walEndPointerAfterRecovery == null) {
        return null;
    }/*from www  . j  a  v  a2  s  .  c  om*/

    FileSystemWAL.FileSystemWALReader reader = wal.getReader();

    while (reader.getCurrentPointer() == null
            || reader.getCurrentPointer().compareTo(wal.walEndPointerAfterRecovery) < 0) {
        long currentWindow;

        if (wal.retrievedWindow == null) {
            wal.retrievedWindow = readNext(reader);
            Preconditions.checkNotNull(wal.retrievedWindow);
        }
        currentWindow = Longs.fromByteArray(wal.retrievedWindow.toByteArray());

        if (windowId == currentWindow) {
            Slice data = readNext(reader);
            Preconditions.checkNotNull(data, "data is null");

            wal.windowWalParts.put(currentWindow, reader.getCurrentPointer().getPartNum());
            wal.retrievedWindow = readNext(reader); //null or next window

            return fromSlice(data);
        } else if (windowId < currentWindow) {
            //no artifact saved corresponding to that window and artifact is not read.
            return null;
        } else {
            //windowId > current window so we skip the data
            skipNext(reader);
            wal.windowWalParts.put(currentWindow, reader.getCurrentPointer().getPartNum());

            wal.retrievedWindow = readNext(reader); //null or next window
            if (wal.retrievedWindow == null) {
                //nothing else to read
                return null;
            }
        }
    }
    return null;
}

From source file:org.opendedup.collections.LongByteArrayMap.java

@Override
public synchronized void trim(long pos, int len) throws FileClosedException {
    WriteLock l = this.hashlock.writeLock();
    l.lock();//  w  w  w  .  j av a  2  s  .  com
    try {
        double spos = Math.ceil(((double) pos / (double) Main.CHUNK_LENGTH));
        long ep = pos + len;
        double epos = Math.floor(((double) ep / (double) Main.CHUNK_LENGTH));
        long ls = ((long) spos * (long) FREE.length) + (long) this.offset;
        long es = ((long) epos * (long) FREE.length) + (long) this.offset;
        if (es <= ls)
            return;
        else {
            if (SDFSLogger.isDebug())
                SDFSLogger.getLog().debug("will trim from " + ls + " to " + es);
            FileChannel _bdb = null;
            ByteBuffer buff = ByteBuffer.wrap(this.FREE);
            try {
                _bdb = (FileChannel) Files.newByteChannel(bdbf, StandardOpenOption.CREATE,
                        StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.SPARSE);
                _bdb.position(ls);
                long _pos = ls;
                while (_bdb.position() < es) {
                    _pos = _bdb.position();
                    if (Main.refCount) {
                        byte[] val = new byte[arrayLength];
                        ByteBuffer _bz = ByteBuffer.wrap(val);
                        _bdb.read(_bz);
                        if (!Arrays.equals(val, FREE)) {
                            SparseDataChunk ck = new SparseDataChunk(val, this.version);
                            for (HashLocPair p : ck.getFingers()) {
                                DedupFileStore.removeRef(p.hash, Longs.fromByteArray(p.hashloc));
                            }
                        }
                    }
                    buff.position(0);
                    _bdb.position(_pos);
                    _bdb.write(buff);
                }
                if (SDFSLogger.isDebug())
                    SDFSLogger.getLog().debug("trimed from " + ls + " to " + _bdb.position());
            }

            catch (Exception e) {
                SDFSLogger.getLog().error("error while trim from " + ls + " to " + es, e);
            } finally {
                try {
                    _bdb.close();
                } catch (Exception e) {
                }
            }

        }
    } finally {
        l.unlock();
    }
}

From source file:org.opendedup.collections.LongByteArrayMap.java

@Override
public void truncate(long length) throws IOException {
    WriteLock l = this.hashlock.writeLock();
    l.lock();/*from   w w w  . j a  va  2 s . c  o  m*/
    long fpos = 0;
    FileChannel _bdb = null;
    try {
        fpos = this.getMapFilePosition(length);
        if (Main.refCount) {
            this.iterInit();
            LongKeyValue kv = this.nextKeyValue(false);
            while (kv != null && kv.getKey() < fpos) {
                SparseDataChunk ck = kv.getValue();
                for (HashLocPair p : ck.getFingers()) {
                    DedupFileStore.removeRef(p.hash, Longs.fromByteArray(p.hashloc));
                }
                kv = this.nextKeyValue(false);
            }
        }
        _bdb = (FileChannel) Files.newByteChannel(bdbf, StandardOpenOption.CREATE, StandardOpenOption.WRITE,
                StandardOpenOption.READ, StandardOpenOption.SPARSE);
        _bdb.truncate(fpos);
    } catch (Exception e) {
        // System.exit(-1);
        throw new IOException(e);
    } finally {
        try {
            _bdb.close();
        } catch (Exception e) {
        }
        l.unlock();
    }

}