List of usage examples for java.io ObjectOutputStream defaultWriteObject
public void defaultWriteObject() throws IOException
From source file:ArrayDeque.java
/** * Serialize this deque./* w w w.j av a 2 s. c o m*/ * * @serialData The current size (<tt>int</tt>) of the deque, * followed by all of its elements (each an object reference) in * first-to-last order. */ private void writeObject(ObjectOutputStream s) throws IOException { s.defaultWriteObject(); // Write out size int size = size(); s.writeInt(size); // Write out elements in order. int i = head; int mask = elements.length - 1; for (int j = 0; j < size; j++) { s.writeObject(elements[i]); i = (i + 1) & mask; } }
From source file:org.objectweb.proactive.core.remoteobject.RemoteObjectSet.java
private void writeObject(ObjectOutputStream out) throws IOException { // Almost same as in term of speed UniqueID.getCurrentVMID() but more readable this.vmid = ProActiveRuntimeImpl.getProActiveRuntime().getVMInformation().getVMID(); out.defaultWriteObject(); out.writeInt(rros.size());/*from ww w. j ava 2s .c o m*/ // write the default protocol writeProtocol(out, defaultURI, defaultRO); // write all other protocols for (URI uri : initialorder) { if (!uri.equals(defaultURI)) { writeProtocol(out, uri, rros.get(uri)); } } }
From source file:er.directtoweb.pages.ERD2WListPage.java
private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); NSNotificationCenter.defaultCenter().removeObserver(this, EOEditingContext.EditingContextDidSaveChangesNotification, null); }
From source file:ArraySet.java
/** * Save the state of this <tt>HashSet</tt> instance to a stream (that is, * serialize this set)./*w w w .jav a 2 s . c o m*/ * * @serialData The capacity of the backing <tt>HashMap</tt> instance (int), * and its load factor (float) are emitted, followed by the size * of the set (the number of elements it contains) (int), * followed by all of its elements (each an Object) in no * particular order. */ private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { s.defaultWriteObject(); s.writeInt(map.size()); for (Iterator<E> i = map.keySet().iterator(); i.hasNext();) { s.writeObject(i.next()); } }
From source file:IntHashMap.java
/** * Save the state of the <tt>IntHashMap</tt> instance to a stream (i.e., * serialize it)./*ww w . j av a 2s. c om*/ * * @serialData The <i>capacity</i> of the IntHashMap (the length of the * bucket array) is emitted (int), followed by the * <i>size</i> of the IntHashMap (the number of key-value * mappings), followed by the key (Object) and value (Object) * for each key-value mapping represented by the IntHashMap * The key-value mappings are emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) throws IOException { // Write out the threshold, loadfactor, and any hidden stuff s.defaultWriteObject(); // Write out number of buckets s.writeInt(table.length); // Write out size (number of Mappings) s.writeInt(count); // Write out keys and values (alternating) for (int index = table.length - 1; index >= 0; index--) { Entry entry = table[index]; while (entry != null) { s.writeInt(entry.key); s.writeObject(entry.value); entry = entry.next; } } }
From source file:org.sakaiproject.tool.assessment.qti.util.XmlStringBuffer.java
/** * Synchronizes object prior to serialization * * @param out ObjectOutputStream/*from w ww. j ava2 s. c o m*/ * * @throws IOException */ private void writeObject(java.io.ObjectOutputStream out) throws IOException { if (log.isDebugEnabled()) { log.debug("writeObject(ObjectOutputStream " + out + ")"); } this.xml = new StringBuffer(this.stringValue()); out.defaultWriteObject(); }
From source file:org.proteosuite.FastScatterPlot.java
/** * Provides serialisation support.//from ww w. j a v a2s .c o m * * @param stream the output stream. * * @throws IOException if there is an I/O error. */ private void writeObject(ObjectOutputStream stream) throws IOException { stream.defaultWriteObject(); SerialUtilities.writePaint(this.paint, stream); SerialUtilities.writeStroke(this.domainGridlineStroke, stream); SerialUtilities.writePaint(this.domainGridlinePaint, stream); SerialUtilities.writeStroke(this.rangeGridlineStroke, stream); SerialUtilities.writePaint(this.rangeGridlinePaint, stream); }
From source file:org.apache.wicket.util.upload.DiskFileItem.java
/** * Writes the state of this object during serialization. * // w ww .j a v a 2 s .c o m * @param out * The stream to which the state should be written. * * @throws IOException * if an error occurs. */ private void writeObject(final ObjectOutputStream out) throws IOException { // Read the data if (dfos.isInMemory()) { cachedContent = get(); } else { cachedContent = null; dfosFile = dfos.getFile(); } // write out values out.defaultWriteObject(); }
From source file:com.healthmarketscience.rmiio.DirectRemoteInputStream.java
/** * Serializes this object and all of the underlying stream's data directly * to the given ObjectOutputStream.// w w w .j a v a2 s . co m * * @serialData the compression status of the stream, followed by the default * chunk size for the serialized stream data (int), followed by * chunks of the underlying stream. each chunk has a chunk code * which indicates how to handle it's length (either default, * explicit as int, or EOF), and then the specified number of * bytes if not EOF. */ private void writeObject(ObjectOutputStream out) throws IOException { switch (_consumptionState) { case NONE: // this is the required state break; case LOCAL: case SERIAL: throw new NotSerializableException(getClass().getName() + " (underlying stream has already been consumed, type: " + _consumptionState + ")"); default: throw new RuntimeException("unknown state " + _consumptionState); } out.defaultWriteObject(); // once we start consuming the inputstream, we can't rewrite it _consumptionState = ConsumptionState.SERIAL; final int defaultChunkSize = RemoteInputStreamServer.DEFAULT_CHUNK_SIZE; // note, we create RemoteInputStreamServer instances, but we do not // actually export them. RemoteInputStreamServer server = null; try { if (_compress && (_tmpFile == null)) { // this is the first time the data is being read, and we need to // compress it as we read it. server = new GZIPRemoteInputStream(_in, _monitor, defaultChunkSize); } else { // we are re-serializing a previously serialized stream, so the data // is already compressed (if compression was desired) server = new SimpleRemoteInputStream(_in, _monitor, defaultChunkSize); } // record the default chunk size out.writeInt(defaultChunkSize); int packetId = RemoteStreamServer.INITIAL_VALID_SEQUENCE_ID; while (true) { byte[] packet = server.readPacket(packetId++); if (packet != null) { if (packet.length > 0) { // we have a packet with data, write it to the output stream. if // the packet is a different length, record the length. if (packet.length == defaultChunkSize) { out.write(DEFAULT_CHUNK_CODE); } else { out.write(CUSTOM_CHUNK_CODE); out.writeInt(packet.length); } out.write(packet); } } else { // reached end of stream, indicate this out.write(EOF_CODE); break; } } // local stream is exhausted _gotEOF = true; // indicate successful read try { server.close(true); } catch (IOException e) { // log, but ignore failures here if (LOG.isDebugEnabled()) { LOG.debug("Failed closing server", e); } } } finally { RmiioUtil.closeQuietly(server); RmiioUtil.closeQuietly(this); } }
From source file:IntHashMap.java
/** * Save the state of the <code>IntHashMap</code> instance to a stream (i.e., * serialize it)./* w w w . j a va 2s . c o m*/ * <p> * Context The <i>capacity</i> of the IntHashMap (the length of the bucket * array) is emitted (int), followed by the <i>size</i> of the IntHashMap * (the number of key-value mappings), followed by the key (Object) and value * (Object) for each key-value mapping represented by the IntHashMap The * key-value mappings are emitted in no particular order. * * @exception IOException */ private void writeObject(java.io.ObjectOutputStream s) throws IOException { // write out the threshold, loadfactor, and any hidden stuff s.defaultWriteObject(); // write out number of buckets s.writeInt(table.length); // write out size (number of Mappings) s.writeInt(count); // write out keys and values (alternating) for (int index = table.length - 1; index >= 0; index--) { Entry entry = table[index]; while (entry != null) { s.writeInt(entry.key); s.writeObject(entry.value); entry = entry.next; } } }