List of usage examples for java.io DataOutputStream writeInt
public final void writeInt(int v) throws IOException
int
to the underlying output stream as four bytes, high byte first. From source file:ClassFile.java
public void write(DataOutputStream dos, ConstantPoolInfo pool[]) throws IOException, Exception { dos.write(type);//w w w . j av a 2 s .c om switch (type) { case CLASS: case STRING: dos.writeShort(indexOf(arg1, pool)); break; case FIELDREF: case METHODREF: case INTERFACE: case NAMEANDTYPE: dos.writeShort(indexOf(arg1, pool)); dos.writeShort(indexOf(arg2, pool)); break; case INTEGER: dos.writeInt(intValue); break; case FLOAT: dos.writeFloat(floatValue); break; case LONG: dos.writeLong(longValue); break; case DOUBLE: dos.writeDouble(doubleValue); break; case ASCIZ: case UNICODE: dos.writeShort(strValue.length()); dos.writeBytes(strValue); break; default: throw new Exception("ConstantPoolInfo::write() - bad type."); } }
From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java
/** * Wrapper around {@link LogFileValue#write(java.io.DataOutput)} which does not serialize {@link Mutation}s that do not need to be replicate to the given * {@link ReplicationTarget}/*from w w w . j a va 2s. c o m*/ */ protected long writeValueAvoidingReplicationCycles(DataOutputStream out, LogFileValue value, ReplicationTarget target) throws IOException { // TODO This works like LogFileValue, and needs to be parsable by it, which makes this serialization brittle. // see matching TODO in BatchWriterReplicationReplayer int mutationsToSend = 0; for (Mutation m : value.mutations) { if (!m.getReplicationSources().contains(target.getPeerName())) { mutationsToSend++; } } int mutationsRemoved = value.mutations.size() - mutationsToSend; if (mutationsRemoved > 0) { log.debug("Removing {} mutations from WAL entry as they have already been replicated to {}", mutationsRemoved, target.getPeerName()); } // Add our name, and send it final String name = conf.get(Property.REPLICATION_NAME); if (StringUtils.isBlank(name)) { throw new IllegalArgumentException("Local system has no replication name configured"); } out.writeInt(mutationsToSend); for (Mutation m : value.mutations) { // If we haven't yet replicated to this peer if (!m.getReplicationSources().contains(target.getPeerName())) { m.addReplicationSource(name); m.write(out); } } return mutationsToSend; }
From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java
/** * Get block checksum (MD5 of CRC32).//from w w w. ja va 2 s. com * @param in */ void getBlockChecksum(DataInputStream in) throws IOException { LOG.debug("Mathod called : getBlockChecksum()"); final Block block = new Block(in.readLong(), 0, in.readLong()); DataOutputStream out = null; final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(metadataIn, BUFFER_SIZE)); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize(); //compute block checksum final MD5Hash md5 = MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply //out = new DataOutputStream( // NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); out = new DataOutputStream(s.getOutputStream()); out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); out.flush(); } finally { LOG.debug("Finalizing : getBlockChecksum()"); IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } }
From source file:com.android.leanlauncher.LauncherTransitionable.java
private static void writeConfiguration(Context context, LocaleConfiguration configuration) { DataOutputStream out = null; try {/*from www .j a va2 s. co m*/ out = new DataOutputStream(context.openFileOutput(LauncherFiles.LAUNCHER_PREFERENCES, MODE_PRIVATE)); out.writeUTF(configuration.locale); out.writeInt(configuration.mcc); out.writeInt(configuration.mnc); out.flush(); } catch (FileNotFoundException e) { // Ignore } catch (IOException e) { //noinspection ResultOfMethodCallIgnored context.getFileStreamPath(LauncherFiles.LAUNCHER_PREFERENCES).delete(); } finally { if (out != null) { try { out.close(); } catch (IOException e) { // Ignore } } } }
From source file:mp.teardrop.PlaybackService.java
/** * Save the service state to disk./*w w w .j a va 2 s .c o m*/ * * @param pendingSeek The pendingSeek to store. Should be the current * MediaPlayer position or 0. */ public void saveState(int pendingSeek) { try { DataOutputStream out = new DataOutputStream(openFileOutput(STATE_FILE, 0)); Song song = mCurrentSong; out.writeLong(STATE_FILE_MAGIC); out.writeInt(STATE_VERSION); out.writeInt(pendingSeek); out.writeLong(song == null ? -1 : song.id); mTimeline.writeState(getSharedPreferences(PREFS_SAVED_SONGS, 0)); out.close(); } catch (IOException e) { Log.w("OrchidMP", "Failed to save state", e); } catch (JSONException e) { Log.w("OrchidMP", "Failed to save state", e); } }
From source file:se.llbit.chunky.renderer.scene.Scene.java
private synchronized void saveDump(RenderContext context, ProgressListener progressListener) { String fileName = name + ".dump"; DataOutputStream out = null; try {//from www . ja va 2 s . c om String task = "Saving render dump"; progressListener.setProgress(task, 1, 0, 2); Log.info("Saving render dump " + fileName); out = new DataOutputStream(new GZIPOutputStream(context.getSceneFileOutputStream(fileName))); out.writeInt(width); out.writeInt(height); out.writeInt(spp); out.writeLong(renderTime); for (int x = 0; x < width; ++x) { progressListener.setProgress(task, x + 1, 0, width); for (int y = 0; y < height; ++y) { out.writeDouble(samples[(y * width + x) * 3 + 0]); out.writeDouble(samples[(y * width + x) * 3 + 1]); out.writeDouble(samples[(y * width + x) * 3 + 2]); } } Log.info("Render dump saved"); } catch (IOException e) { Log.warn("IO exception while saving render dump!", e); } finally { if (out != null) { try { out.close(); } catch (IOException e) { } } } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Get block checksum (MD5 of CRC32).//from w w w . j av a2s.co m * @param in */ void getBlockChecksum(DataInputStream in) throws IOException { final Block block = new Block(in.readLong(), 0, in.readLong()); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.READ); } catch (InvalidToken e) { try { out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); out.flush(); throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block); } finally { IOUtils.closeStream(out); } } } final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(metadataIn, BUFFER_SIZE)); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize(); //compute block checksum final MD5Hash md5 = MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); out.flush(); } finally { IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
/** * append a ProxyCrawlHistoryItem to the active log * /*from w ww . j av a2 s . co m*/ * @param item * @throws IOException */ void appendItemToLog(ProxyCrawlHistoryItem item) throws IOException { try { // open the log file ... DataOutputStream logStream = new DataOutputStream(new FileOutputStream(getActiveLogFilePath(), true)); try { // reset crc calculator (single thread so no worries on synchronization) _crc16Out.reset(); // reset output stream _outputBuffer.reset(); // create checked stream CheckedOutputStream checkedStream = new CheckedOutputStream(_outputBuffer, _crc16Out); DataOutputStream dataOutputStream = new DataOutputStream(checkedStream); // write out item item.serialize(dataOutputStream, new BinaryProtocol()); dataOutputStream.flush(); // ok now write out sync,crc,length then data logStream.write(getLocalLogSyncBytes()); logStream.writeInt((int) checkedStream.getChecksum().getValue()); logStream.writeShort((short) _outputBuffer.getLength()); logStream.write(_outputBuffer.getData(), 0, _outputBuffer.getLength()); logStream.flush(); logStream.close(); logStream = null; // now we need to update the file header updateLogFileHeader(getActiveLogFilePath(), 1, LOG_ITEM_HEADER_SIZE + _outputBuffer.getLength()); URLFP fingerprint = URLUtils.getURLFPFromURL(item.getOriginalURL(), true); // update local log synchronized (_localLogItems) { if (fingerprint != null) { _localLogItems.put(fingerprint, item); } } ImmutableSet<CrawlList> lists = null; // and now walk lists updating them as necessary synchronized (_crawlLists) { lists = new ImmutableSet.Builder<CrawlList>().addAll(_crawlLists.values()).build(); } for (CrawlList list : lists) { try { list.updateItemState(fingerprint, item); } catch (Exception e) { // ok, IF an error occurs updating the list metadata.. we need to // coninue along. // it is critical for this thread to not die in such a circumstane LOG.fatal("Error Updating List(" + list.getListId() + "):" + CCStringUtils.stringifyException(e)); System.out.println("Exception in List Update(" + list.getListId() + "):" + CCStringUtils.stringifyException(e)); } } } finally { if (logStream != null) { logStream.close(); } } } finally { } }
From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java
public void accept(OutputStream out, InputStream in, byte epType, int qSize, CommunicationMode communicationMode, Principal principal) throws IOException { DataOutputStream dos = new DataOutputStream(out); DataInputStream dis;//from w w w .j a v a 2s. c o m if (clientVersion.compareTo(Version.CURRENT) < 0) { dis = new VersionedDataInputStream(in, clientVersion); dos = new VersionedDataOutputStream(dos, clientVersion); } else { dis = new DataInputStream(in); } // Write ok reply if (communicationMode.isWAN() && principal != null) { dos.writeByte(REPLY_WAN_CREDENTIALS); } else { dos.writeByte(REPLY_OK);// byte 59 } // additional byte of wan site needs to send for Gateway BC if (communicationMode.isWAN()) { Version.writeOrdinal(dos, ServerHandShakeProcessor.currentServerVersion.ordinal(), true); } dos.writeByte(epType); dos.writeInt(qSize); // Write the server's member DistributedMember member = this.system.getDistributedMember(); ServerHandShakeProcessor.writeServerMember(member, dos); // Write no message dos.writeUTF(""); // Write delta-propagation property value if this is not WAN. if (!communicationMode.isWAN() && this.clientVersion.compareTo(Version.GFE_61) >= 0) { dos.writeBoolean(((InternalDistributedSystem) this.system).getConfig().getDeltaPropagation()); } // Neeraj: Now if the communication mode is GATEWAY_TO_GATEWAY // and principal not equal to null then send the credentials also if (communicationMode.isWAN() && principal != null) { sendCredentialsForWan(dos, dis); } // Write the distributed system id if this is a 6.6 or greater client // on the remote side of the gateway if (communicationMode.isWAN() && this.clientVersion.compareTo(Version.GFE_66) >= 0 && ServerHandShakeProcessor.currentServerVersion.compareTo(Version.GFE_66) >= 0) { dos.writeByte( ((InternalDistributedSystem) this.system).getDistributionManager().getDistributedSystemId()); } if ((communicationMode.isWAN()) && this.clientVersion.compareTo(Version.GFE_80) >= 0 && ServerHandShakeProcessor.currentServerVersion.compareTo(Version.GFE_80) >= 0) { int pdxSize = PeerTypeRegistration.getPdxRegistrySize(); dos.writeInt(pdxSize); } // Flush dos.flush(); }
From source file:org.apache.jxtadoop.hdfs.server.datanode.BlockSender.java
/** * sendBlock() is used to read block and its metadata and stream the data to * either a client or to another datanode. * /*w ww. ja v a 2s .c o m*/ * @param out stream to which the block is written to * @param baseStream optional. if non-null, <code>out</code> is assumed to * be a wrapper over this stream. This enables optimizations for * sending the data, e.g. * {@link SocketOutputStream#transferToFully(FileChannel, * long, int)}. * @param throttler for sending data. * @return total bytes reads, including crc. */ long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException("out stream is null"); } this.throttler = throttler; long initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; try { try { checksum.writeHeader(out); if (chunkOffsetOK) { out.writeLong(offset); } out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } int maxChunksPerPacket; int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER; if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream) { FileChannel fileChannel = ((FileInputStream) blockIn).getChannel(); // blockInPosition also indicates sendChunks() uses transferTo. blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; // assure a mininum buffer size. maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1) / bytesPerChecksum; // allocate smaller buffer while using transferTo(). pktSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum); pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; } ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); while (endOffset > offset) { long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks); offset += len; totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize); seqno++; } try { out.writeInt(0); // mark the end of block out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } } finally { if (clientTraceFmt != null) { ClientTraceLog.info(String.format(clientTraceFmt, totalRead)); } close(); } blockReadFully = (initialOffset == 0 && offset >= blockLength); return totalRead; }