List of usage examples for java.io ObjectOutputStream writeLong
public void writeLong(long val) throws IOException
From source file:LongArrayList.java
private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject();//from w w w. j a v a2 s . com out.writeInt(array.length); for (int i = 0; i < size; i++) { out.writeLong(array[i]); } }
From source file:it.unimi.di.big.mg4j.document.TRECDocumentCollection.java
private void writeObject(final ObjectOutputStream s) throws IOException { s.defaultWriteObject();//from w w w. j av a2 s . c o m s.writeLong(descriptors.size64()); for (TRECDocumentDescriptor descriptor : descriptors) { s.writeInt(descriptor.fileIndex); s.writeLong(descriptor.startMarker); s.writeInt(descriptor.intermediateMarkerDiff); s.writeInt(descriptor.stopMarkerDiff); } }
From source file:gr.wavesoft.webng.io.cache.DiskCacheStorage.java
private byte[] serialize(HttpCacheEntry o) { ObjectOutputStream oos = null; try {/*from www . ja va2 s .co m*/ // Setut vars ByteArrayOutputStream bos = new ByteArrayOutputStream(); // Dump object oos = new ObjectOutputStream(bos); oos.writeLong(o.getRequestDate().getTime()); oos.writeLong(o.getResponseDate().getTime()); oos.writeObject(o.getStatusLine()); oos.writeObject(o.getAllHeaders()); // Complete stream oos.close(); // Get byte array return bos.toByteArray(); } catch (IOException ex) { systemLogger.except(ex); } finally { try { oos.close(); } catch (IOException ex) { systemLogger.error("Error closing serialization buffer ", ex); } } return null; }
From source file:jfs.sync.encryption.JFSEncryptedStream.java
private void internalClose() throws IOException { delegate.close();/*from ww w. jav a 2 s. c o m*/ byte[] bytes = delegate.toByteArray(); final byte[] originalBytes = bytes; long l = bytes.length; byte marker = COMPRESSION_NONE; if (log.isDebugEnabled()) { log.debug("close() checking for compressions for"); } // if CompressionThread dt = new CompressionThread(originalBytes) { @Override public void run() { try { ByteArrayOutputStream deflaterStream = new ByteArrayOutputStream(); Deflater deflater = new Deflater(Deflater.BEST_COMPRESSION, true); OutputStream dos = new DeflaterOutputStream(deflaterStream, deflater, COMPRESSION_BUFFER_SIZE); dos.write(originalBytes); dos.close(); compressedValue = deflaterStream.toByteArray(); } catch (Exception e) { log.error("run()", e); } // try/catch } // run() }; CompressionThread bt = new CompressionThread(originalBytes) { @Override public void run() { try { if (originalBytes.length > BZIP_MAX_LENGTH) { compressedValue = originalBytes; } else { ByteArrayOutputStream bzipStream = new ByteArrayOutputStream(); OutputStream bos = new BZip2CompressorOutputStream(bzipStream); bos.write(originalBytes); bos.close(); compressedValue = bzipStream.toByteArray(); } // if } catch (Exception e) { log.error("run()", e); } // try/catch } // run() }; CompressionThread lt = new CompressionThread(originalBytes) { /* * // " -a{N}: set compression mode - [0, 1], default: 1 (max)\n" + * " -d{N}: set dictionary - [0,28], default: 23 (8MB)\n" * +" -fb{N}: set number of fast bytes - [5, 273], default: 128\n" * +" -lc{N}: set number of literal context bits - [0, 8], default: 3\n" * +" -lp{N}: set number of literal pos bits - [0, 4], default: 0\n" * +" -pb{N}: set number of pos bits - [0, 4], default: 2\n" * +" -mf{MF_ID}: set Match Finder: [bt2, bt4], default: bt4\n"+" -eos: write End Of Stream marker\n"); */ private int dictionarySize = 1 << 23; private int lc = 3; private int lp = 0; private int pb = 2; private int fb = 128; public int algorithm = 2; public int matchFinderIndex = 1; // 0, 1, 2 @Override public void run() { try { Encoder encoder = new Encoder(); encoder.SetEndMarkerMode(false); encoder.SetAlgorithm(algorithm); // Whatever that means encoder.SetDictionarySize(dictionarySize); encoder.SetNumFastBytes(fb); encoder.SetMatchFinder(matchFinderIndex); encoder.SetLcLpPb(lc, lp, pb); ByteArrayOutputStream lzmaStream = new ByteArrayOutputStream(); ByteArrayInputStream inStream = new ByteArrayInputStream(originalBytes); encoder.WriteCoderProperties(lzmaStream); encoder.Code(inStream, lzmaStream, -1, -1, null); compressedValue = lzmaStream.toByteArray(); } catch (Exception e) { log.error("run()", e); } // try/catch } // run() }; dt.start(); bt.start(); lt.start(); try { dt.join(); bt.join(); lt.join(); } catch (InterruptedException e) { log.error("run()", e); } // try/catch if (dt.compressedValue.length < l) { marker = COMPRESSION_DEFLATE; bytes = dt.compressedValue; l = bytes.length; } // if if (lt.compressedValue.length < l) { marker = COMPRESSION_LZMA; bytes = lt.compressedValue; l = bytes.length; } // if if (bt.compressedValue.length < l) { marker = COMPRESSION_BZIP2; bytes = bt.compressedValue; if (log.isWarnEnabled()) { log.warn("close() using bzip2 and saving " + (l - bytes.length) + " bytes."); } // if l = bytes.length; } // if if (log.isInfoEnabled()) { if (marker == COMPRESSION_NONE) { if (log.isInfoEnabled()) { log.info("close() using no compression"); } // if } // if if (marker == COMPRESSION_LZMA) { if (log.isInfoEnabled()) { log.info("close() using lzma"); } // if } // if } // if ObjectOutputStream oos = new ObjectOutputStream(baseOutputStream); oos.writeByte(marker); oos.writeLong(originalBytes.length); oos.flush(); OutputStream out = baseOutputStream; if (cipher != null) { out = new CipherOutputStream(out, cipher); } // if out.write(bytes); out.close(); delegate = null; baseOutputStream = null; }
From source file:com.projity.pm.criticalpath.TaskSchedule.java
public void serialize(ObjectOutputStream s) throws IOException { s.writeDouble(percentComplete);//ww w .j a v a2 s.c om s.writeLong(rawDuration); s.writeLong(start); s.writeLong(finish); }
From source file:com.ecyrd.jspwiki.ReferenceManager.java
/** * Serializes hashmaps to disk. The format is private, don't touch it. *//* www. ja v a 2 s .c o m*/ private synchronized void serializeToDisk() { ObjectOutputStream out = null; try { StopWatch sw = new StopWatch(); sw.start(); File f = new File(m_engine.getWorkDir(), SERIALIZATION_FILE); out = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(f))); out.writeLong(serialVersionUID); out.writeLong(System.currentTimeMillis()); // Timestamp out.writeObject(m_refersTo); out.writeObject(m_referredBy); out.close(); sw.stop(); log.debug("serialization done - took " + sw); } catch (IOException e) { log.error("Unable to serialize!"); try { if (out != null) out.close(); } catch (IOException ex) { } } }
From source file:com.ecyrd.jspwiki.ReferenceManager.java
/** * Serializes hashmaps to disk. The format is private, don't touch it. *//* w ww . j a va2 s .co m*/ private synchronized void serializeAttrsToDisk(WikiPage p) { ObjectOutputStream out = null; StopWatch sw = new StopWatch(); sw.start(); try { File f = new File(m_engine.getWorkDir(), SERIALIZATION_DIR); if (!f.exists()) f.mkdirs(); // // Create a digest for the name // f = new File(f, getHashFileName(p.getName())); // FIXME: There is a concurrency issue here... Set entries = p.getAttributes().entrySet(); if (entries.size() == 0) { // Nothing to serialize, therefore we will just simply remove the // serialization file so that the next time we boot, we don't // deserialize old data. f.delete(); return; } out = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(f))); out.writeLong(serialVersionUID); out.writeLong(System.currentTimeMillis()); // Timestamp out.writeUTF(p.getName()); out.writeLong(entries.size()); for (Iterator i = entries.iterator(); i.hasNext();) { Map.Entry e = (Map.Entry) i.next(); if (e.getValue() instanceof Serializable) { out.writeUTF((String) e.getKey()); out.writeObject(e.getValue()); } } out.close(); } catch (IOException e) { log.error("Unable to serialize!"); try { if (out != null) out.close(); } catch (IOException ex) { } } catch (NoSuchAlgorithmException e) { log.fatal("No MD5 algorithm!?!"); } finally { sw.stop(); log.debug("serialization for " + p.getName() + " done - took " + sw); } }
From source file:org.aksw.dice.eaglet.uri.impl.FileBasedCachingUriCheckerManager.java
/** * Writes the cache to the {@link #tempCacheFile}. After that the * {@link #cacheFile} is deleted and the {@link #tempCacheFile} is renamed. * <b>NOTE</b> that this method should only be called if the * {@link #cacheWriteMutex} has been acquired. * // www. j ava 2 s . c om * @throws IOException */ private void performCacheStorage() throws IOException { eraseOldEntries(); FileOutputStream fout = null; ObjectOutputStream oout = null; try { fout = new FileOutputStream(tempCacheFile); oout = new ObjectOutputStream(fout); // first, serialize the number of URIs oout.writeInt(cache.assigned); // go over the mapping and serialize all existing pairs for (int i = 0; i < cache.allocated.length; ++i) { if (cache.allocated[i]) { oout.writeObject(((Object[]) cache.keys)[i]); oout.writeLong(cache.values[i]); } } } finally { IOUtils.closeQuietly(oout); IOUtils.closeQuietly(fout); } if (cacheFile.exists() && !cacheFile.delete()) { LOGGER.error("Cache file couldn't be deleted. Aborting."); return; } if (!tempCacheFile.renameTo(cacheFile)) { LOGGER.error("Temporary cache file couldn't be renamed. Aborting."); return; } cacheChanges = 0; }
From source file:org.apache.flink.cep.nfa.SharedBuffer.java
private void writeObject(ObjectOutputStream oos) throws IOException { DataOutputViewStreamWrapper target = new DataOutputViewStreamWrapper(oos); Map<SharedBufferEntry<K, V>, Integer> entryIDs = new HashMap<>(); int totalEdges = 0; int entryCounter = 0; oos.defaultWriteObject();/* w w w .j a va2 s . co m*/ // number of pages oos.writeInt(pages.size()); for (Map.Entry<K, SharedBufferPage<K, V>> pageEntry : pages.entrySet()) { SharedBufferPage<K, V> page = pageEntry.getValue(); // key for the current page oos.writeObject(page.getKey()); // number of page entries oos.writeInt(page.entries.size()); for (Map.Entry<ValueTimeWrapper<V>, SharedBufferEntry<K, V>> sharedBufferEntry : page.entries .entrySet()) { // serialize the sharedBufferEntry SharedBufferEntry<K, V> sharedBuffer = sharedBufferEntry.getValue(); // assign id to the sharedBufferEntry for the future serialization of the previous // relation entryIDs.put(sharedBuffer, entryCounter++); ValueTimeWrapper<V> valueTimeWrapper = sharedBuffer.getValueTime(); valueSerializer.serialize(valueTimeWrapper.value, target); oos.writeLong(valueTimeWrapper.getTimestamp()); int edges = sharedBuffer.edges.size(); totalEdges += edges; oos.writeInt(sharedBuffer.referenceCounter); } } // write the edges between the shared buffer entries oos.writeInt(totalEdges); for (Map.Entry<K, SharedBufferPage<K, V>> pageEntry : pages.entrySet()) { SharedBufferPage<K, V> page = pageEntry.getValue(); for (Map.Entry<ValueTimeWrapper<V>, SharedBufferEntry<K, V>> sharedBufferEntry : page.entries .entrySet()) { SharedBufferEntry<K, V> sharedBuffer = sharedBufferEntry.getValue(); if (!entryIDs.containsKey(sharedBuffer)) { throw new RuntimeException("Could not find id for entry: " + sharedBuffer); } else { int id = entryIDs.get(sharedBuffer); for (SharedBufferEdge<K, V> edge : sharedBuffer.edges) { // in order to serialize the previous relation we simply serialize the ids // of the source and target SharedBufferEntry if (edge.target != null) { if (!entryIDs.containsKey(edge.getTarget())) { throw new RuntimeException("Could not find id for entry: " + edge.getTarget()); } else { int targetId = entryIDs.get(edge.getTarget()); oos.writeInt(id); oos.writeInt(targetId); oos.writeObject(edge.version); } } else { oos.writeInt(id); oos.writeInt(-1); oos.writeObject(edge.version); } } } } } }
From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.java
private void persistToFile() throws IOException { assert !cacheEnabled; FileOutputStream fos = null;// w w w . j a va 2 s . c o m ObjectOutputStream oos = null; try { if (!ioEngine.isPersistent()) throw new IOException("Attempt to persist non-persistent cache mappings!"); fos = new FileOutputStream(persistencePath, false); oos = new ObjectOutputStream(fos); oos.writeLong(cacheCapacity); oos.writeUTF(ioEngine.getClass().getName()); oos.writeUTF(backingMap.getClass().getName()); oos.writeObject(deserialiserMap); oos.writeObject(backingMap); } finally { if (oos != null) oos.close(); if (fos != null) fos.close(); } }