Example usage for java.io DataOutputStream writeLong

List of usage examples for java.io DataOutputStream writeLong

Introduction

In this page you can find the example usage for java.io DataOutputStream writeLong.

Prototype

public final void writeLong(long v) throws IOException 

Source Link

Document

Writes a long to the underlying output stream as eight bytes, high byte first.

Usage

From source file:com.csipsimple.backup.SipProfilesHelper.java

@Override
public void performBackup(ParcelFileDescriptor oldState, BackupDataOutput data, ParcelFileDescriptor newState) {
    boolean forceBackup = (oldState == null);

    long fileModified = databaseFile.lastModified();
    try {/*from ww w  .java2 s.  co  m*/
        if (!forceBackup) {
            FileInputStream instream = new FileInputStream(oldState.getFileDescriptor());
            DataInputStream in = new DataInputStream(instream);
            long lastModified = in.readLong();
            in.close();

            if (lastModified < fileModified) {
                forceBackup = true;
            }
        }
    } catch (IOException e) {
        Log.e(THIS_FILE, "Cannot manage previous local backup state", e);
        forceBackup = true;
    }

    Log.d(THIS_FILE, "Will backup profiles ? " + forceBackup);
    if (forceBackup) {
        JSONArray accountsSaved = SipProfileJson.serializeSipProfiles(mContext);
        try {
            writeData(data, accountsSaved.toString());
        } catch (IOException e) {
            Log.e(THIS_FILE, "Cannot manage remote backup", e);
        }
    }

    try {
        FileOutputStream outstream = new FileOutputStream(newState.getFileDescriptor());
        DataOutputStream out = new DataOutputStream(outstream);
        out.writeLong(fileModified);
        out.close();
    } catch (IOException e) {
        Log.e(THIS_FILE, "Cannot manage final local backup state", e);
    }
}

From source file:com.exzogeni.dk.http.cache.DiscCacheStore.java

private void saveMetaFile(@NonNull File metaFile, @NonNull Map<String, List<String>> metaHeaders, long maxAge)
        throws IOException {
    final AtomicFile af = new AtomicFile(metaFile);
    final FileOutputStream fos = af.startWrite();
    try {//from  w w w  .j  a  v a2s .  co m
        final DataOutputStream dat = new DataOutputStream(new BufferPoolOutputStream(fos));
        dat.writeLong(System.currentTimeMillis() + maxAge);
        dat.writeInt(metaHeaders.size());
        for (final Map.Entry<String, List<String>> header : metaHeaders.entrySet()) {
            dat.writeUTF(header.getKey());
            dat.writeInt(header.getValue().size());
            for (final String value : header.getValue()) {
                dat.writeUTF(value);
            }
        }
        IOUtils.closeQuietly(dat);
        af.finishWrite(fos);
    } catch (IOException e) {
        af.failWrite(fos);
        af.delete();
        throw e;
    }
}

From source file:org.alfresco.encryption.MACUtils.java

protected byte[] longToByteArray(long l) throws IOException {
    ByteArrayOutputStream bos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(bos);
    dos.writeLong(l);
    dos.flush();//from   www  .j ava 2 s .  c  o m
    return bos.toByteArray();
}

From source file:com.csipsimple.backup.SipSharedPreferencesHelper.java

@Override
public void performBackup(ParcelFileDescriptor oldState, BackupDataOutput data, ParcelFileDescriptor newState) {
    boolean forceBackup = (oldState == null);

    long fileModified = 1;
    if (prefsFiles != null) {
        fileModified = prefsFiles.lastModified();
    }/*ww  w. ja v a  2s . co m*/
    try {
        if (!forceBackup) {
            FileInputStream instream = new FileInputStream(oldState.getFileDescriptor());
            DataInputStream in = new DataInputStream(instream);
            long lastModified = in.readLong();
            in.close();

            if (lastModified < fileModified) {
                forceBackup = true;
            }
        }
    } catch (IOException e) {
        Log.e(THIS_FILE, "Cannot manage previous local backup state", e);
        forceBackup = true;
    }

    Log.d(THIS_FILE, "Will backup profiles ? " + forceBackup);
    if (forceBackup) {
        JSONObject settings = SipProfileJson.serializeSipSettings(mContext);
        try {
            writeData(data, settings.toString());
        } catch (IOException e) {
            Log.e(THIS_FILE, "Cannot manage remote backup", e);
        }
    }

    try {
        FileOutputStream outstream = new FileOutputStream(newState.getFileDescriptor());
        DataOutputStream out = new DataOutputStream(outstream);
        out.writeLong(fileModified);
        out.close();
    } catch (IOException e) {
        Log.e(THIS_FILE, "Cannot manage final local backup state", e);
    }

}

From source file:com.codefollower.lealone.omid.tso.TimestampOracle.java

/**
 * Must be called holding an exclusive lock
 * /*from w ww .  j a v  a 2s  . c o  m*/
 * return the next timestamp
 */
public long next(DataOutputStream toWal) throws IOException {
    last++;
    if (last == maxTimestamp) {
        maxTimestamp += TIMESTAMP_BATCH;
        toWal.writeByte(LoggerProtocol.TIMESTAMP_ORACLE);
        toWal.writeLong(maxTimestamp);
    }

    return last;
}

From source file:be.fedict.eid.idp.protocol.openid.StatelessServerAssociationStore.java

private Association setHandle(Association association) throws AssociationException, IOException,
        NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, IllegalBlockSizeException,
        BadPaddingException, InvalidAlgorithmParameterException, NoSuchProviderException {
    ByteArrayOutputStream encodedAssociation = new ByteArrayOutputStream();
    String type = association.getType();
    if (type == Association.TYPE_HMAC_SHA1) {
        encodedAssociation.write(1);//from   w  w w.ja  va 2s .c o m
    } else if (type == Association.TYPE_HMAC_SHA256) {
        encodedAssociation.write(2);
    } else {
        throw new AssociationException("unknown type: " + type);
    }
    SecretKey macKey = association.getMacKey();
    byte[] macKeyBytes = macKey.getEncoded();
    encodedAssociation.write(macKeyBytes);
    Date expiry = association.getExpiry();
    Long time = expiry.getTime();
    DataOutputStream dos = new DataOutputStream(encodedAssociation);
    dos.writeLong(time);
    dos.flush();
    Cipher cipher = Cipher.getInstance(CIPHER_ALGO);
    byte[] iv = new byte[16];
    this.secureRandom.nextBytes(iv);
    IvParameterSpec ivParameterSpec = new IvParameterSpec(iv);
    cipher.init(Cipher.ENCRYPT_MODE, this.secretKeySpec, ivParameterSpec);
    byte[] handleValue = cipher.doFinal(encodedAssociation.toByteArray());
    ByteArrayOutputStream result = new ByteArrayOutputStream();
    result.write(iv);
    result.write(handleValue);
    if (null != this.macSecretKeySpec) {
        Mac mac = Mac.getInstance("HmacSHA256");
        mac.init(this.macSecretKeySpec);
        byte[] toBeSigned = result.toByteArray();
        byte[] signature = mac.doFinal(toBeSigned);
        result = new ByteArrayOutputStream();
        result.write(signature);
        result.write(iv);
        result.write(handleValue);
    }
    String handle = Base64.encodeBase64URLSafeString(result.toByteArray());
    this.secureRandom.setSeed(result.toByteArray());
    if (handle.getBytes().length > 255) {
        throw new AssociationException("handle size > 255");
    }
    if (type == Association.TYPE_HMAC_SHA1) {
        return Association.createHmacSha1(handle, macKeyBytes, expiry);
    } else if (type == Association.TYPE_HMAC_SHA256) {
        return Association.createHmacSha256(handle, macKeyBytes, expiry);
    }
    throw new AssociationException("unknown type: " + type);
}

From source file:com.codefollower.lealone.omid.tso.persistence.BookKeeperStateLogger.java

/**
 * Initializes this logger object to add records. Implements the initialize 
 * method of the StateLogger interface.//from w w w .  j  ava 2  s  . co m
 * 
 * @param cb
 * @param ctx
 */
@Override
public void initialize(final LoggerInitCallback cb, Object ctx) throws LoggerException {
    TSOServerConfig config = ((BookKeeperStateBuilder.Context) ctx).config;

    bk.asyncCreateLedger(config.getEnsembleSize(), config.getQuorumSize(), BookKeeper.DigestType.CRC32,
            LEDGER_PASSWORD, new CreateCallback() {
                @Override
                public void createComplete(int rc, LedgerHandle lh, Object ctx) {
                    if (rc == BKException.Code.OK) {
                        try {
                            BookKeeperStateLogger.this.lh = lh;

                            ByteArrayOutputStream bos = new ByteArrayOutputStream();
                            DataOutputStream dos = new DataOutputStream(bos);
                            dos.writeLong(lh.getId());

                            zk.create(LoggerConstants.OMID_LEDGER_ID_PATH, bos.toByteArray(),
                                    Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT,
                                    new LedgerIdCreateCallback(cb, bos.toByteArray()), ctx);
                        } catch (IOException e) {
                            LOG.error("Failed to write to zookeeper. ", e);
                            cb.loggerInitComplete(Code.BKOPFAILED, BookKeeperStateLogger.this, ctx);
                        }
                    } else {
                        LOG.error("Failed to create ledger. " + BKException.getMessage(rc));
                        cb.loggerInitComplete(Code.BKOPFAILED, BookKeeperStateLogger.this, ctx);
                    }
                }
            }, ctx);
}

From source file:org.apache.hadoop.hive.serde2.lazy.LazyUtils.java

/**
 * Write out a binary representation of a PrimitiveObject to a byte stream.
 *
 * @param out ByteStream.Output, an unsynchronized version of ByteArrayOutputStream, used as a
 *            backing buffer for the the DataOutputStream
 * @param o the PrimitiveObject//w w  w  . j  a  va  2s. c om
 * @param oi the PrimitiveObjectInspector
 * @throws IOException on error during the write operation
 */
public static void writePrimitive(OutputStream out, Object o, PrimitiveObjectInspector oi) throws IOException {

    DataOutputStream dos = new DataOutputStream(out);

    try {
        switch (oi.getPrimitiveCategory()) {
        case BOOLEAN:
            boolean b = ((BooleanObjectInspector) oi).get(o);
            dos.writeBoolean(b);
            break;

        case BYTE:
            byte bt = ((ByteObjectInspector) oi).get(o);
            dos.writeByte(bt);
            break;

        case SHORT:
            short s = ((ShortObjectInspector) oi).get(o);
            dos.writeShort(s);
            break;

        case INT:
            int i = ((IntObjectInspector) oi).get(o);
            dos.writeInt(i);
            break;

        case LONG:
            long l = ((LongObjectInspector) oi).get(o);
            dos.writeLong(l);
            break;

        case FLOAT:
            float f = ((FloatObjectInspector) oi).get(o);
            dos.writeFloat(f);
            break;

        case DOUBLE:
            double d = ((DoubleObjectInspector) oi).get(o);
            dos.writeDouble(d);
            break;

        case BINARY: {
            BytesWritable bw = ((BinaryObjectInspector) oi).getPrimitiveWritableObject(o);
            out.write(bw.getBytes(), 0, bw.getLength());
            break;
        }

        default:
            throw new RuntimeException("Hive internal error.");
        }
    } finally {
        // closing the underlying ByteStream should have no effect, the data should still be
        // accessible
        dos.close();
    }
}

From source file:com.yahoo.omid.tso.TimestampOracle.java

/**
 * Must be called holding an exclusive lock
 * //from w  w  w  . j a v  a2  s .c  o m
 * return the next timestamp
 */
public long next(DataOutputStream toWal) throws IOException {
    last++;
    if (last == maxTimestamp) {
        maxTimestamp += TIMESTAMP_BATCH;
        toWal.writeByte(LoggerProtocol.TIMESTAMPORACLE);
        toWal.writeLong(maxTimestamp);
        if (LOG.isTraceEnabled()) {
            LOG.trace("Logging TimestampOracle " + maxTimestamp);
        }
    }
    if (LOG.isTraceEnabled()) {
        LOG.trace("Next timestamp: " + last);
    }

    return last;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestNameNodeCorruptionRecovery.java

/**
 * Tests that a cluster's image is not damaged if checkpoint fails after
 * writing checkpoint time to the image directory but before writing checkpoint
 * time to the edits directory.  This is a very rare failure scenario that can
 * only occur if the namenode is configured with separate directories for image
 * and edits.  This test simulates the failure by forcing the fstime file for
 * edits to contain 0, so that it appears the checkpoint time for edits is less
 * than the checkpoint time for image./*from  ww  w  .  ja  v  a  2 s .co m*/
 */
@Test
public void testEditsFsTimeLessThanImageFsTime() throws Exception {
    // Create a cluster with separate directories for image and edits.
    Configuration conf = new Configuration();
    File testDir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
    conf.set("dfs.name.dir", new File(testDir, "name").getPath());
    conf.set("dfs.name.edits.dir", new File(testDir, "edits").getPath());
    cluster = new MiniDFSCluster(0, conf, 1, true, false, true, null, null, null, null);
    cluster.waitActive();

    // Create several files to generate some edits.
    createFile("one");
    createFile("two");
    createFile("three");
    assertTrue(checkFileExists("one"));
    assertTrue(checkFileExists("two"));
    assertTrue(checkFileExists("three"));

    // Restart to force a checkpoint.
    cluster.restartNameNode();

    // Shutdown so that we can safely modify the fstime file.
    File[] editsFsTime = cluster.getNameNode().getFSImage().getFileNames(NameNodeFile.TIME,
            NameNodeDirType.EDITS);
    assertTrue("expected exactly one edits directory containing fstime file", editsFsTime.length == 1);
    cluster.shutdown();

    // Write 0 into the fstime file for the edits directory.
    FileOutputStream fos = null;
    DataOutputStream dos = null;
    try {
        fos = new FileOutputStream(editsFsTime[0]);
        dos = new DataOutputStream(fos);
        dos.writeLong(0);
    } finally {
        IOUtils.cleanup(LOG, dos, fos);
    }

    // Restart to force another checkpoint, which should discard the old edits.
    cluster = new MiniDFSCluster(0, conf, 1, false, false, true, null, null, null, null);
    cluster.waitActive();

    // Restart one more time.  If all of the prior checkpoints worked correctly,
    // then we expect to load the image successfully and find the files.
    cluster.restartNameNode();
    assertTrue(checkFileExists("one"));
    assertTrue(checkFileExists("two"));
    assertTrue(checkFileExists("three"));
}