Example usage for java.io DataOutputStream writeInt

List of usage examples for java.io DataOutputStream writeInt

Introduction

In this page you can find the example usage for java.io DataOutputStream writeInt.

Prototype

public final void writeInt(int v) throws IOException 

Source Link

Document

Writes an int to the underlying output stream as four bytes, high byte first.

Usage

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Spill message buffers of a particular type of message (current or incoming
 * buffer) for a partition to disk./*  w w w  .  j a  v a 2 s  .co  m*/
 *
 * @param partitionId Id of the partition to spill the messages for
 * @param pendingMessages The map to get the message buffers from
 * @param superstep Superstep of which we want to offload messages. This is
 *                  equal to current superstep number if we want to offload
 *                  buffers for currentMessageStore, and is equal to next
 *                  superstep number if we want to offload buffer for
 *                  incomingMessageStore
 * @throws IOException
 */
private void spillMessages(Integer partitionId,
        ConcurrentMap<Integer, Pair<Integer, List<VertexIdMessages<I, Writable>>>> pendingMessages,
        long superstep) throws IOException {
    Pair<Integer, List<VertexIdMessages<I, Writable>>> entry;
    messageBufferRWLock.writeLock().lock();
    entry = pendingMessages.remove(partitionId);
    if (entry != null && entry.getLeft() < minBuffSize) {
        pendingMessages.put(partitionId, entry);
        entry = null;
    }
    messageBufferRWLock.writeLock().unlock();

    if (entry == null) {
        return;
    }

    // Sanity check
    checkState(!entry.getRight().isEmpty(),
            "spillMessages: the message buffer that is supposed to be flushed to " + "disk does not exist.");

    File file = new File(getPendingMessagesBufferPath(partitionId, superstep));

    FileOutputStream fos = new FileOutputStream(file, true);
    BufferedOutputStream bos = new BufferedOutputStream(fos);
    DataOutputStream dos = new DataOutputStream(bos);
    for (VertexIdMessages<I, Writable> messages : entry.getRight()) {
        SerializedMessageClass messageClass;
        if (messages instanceof ByteArrayVertexIdMessages) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_VERTEX_ID_MESSAGES;
        } else if (messages instanceof ByteArrayOneMessageToManyIds) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_ONE_MESSAGE_TO_MANY_IDS;
        } else {
            throw new IllegalStateException("spillMessages: serialized message " + "type is not supported");
        }
        dos.writeInt(messageClass.ordinal());
        messages.write(dos);
    }
    dos.close();
}

From source file:org.commoncrawl.service.listcrawler.CacheWriterThread.java

@Override
public void run() {

    boolean shutdown = false;

    while (!shutdown) {
        try {/*  www.  j av a 2  s. c  o  m*/
            final CacheWriteRequest request = _writeRequestQueue.take();

            switch (request._requestType) {

            case ExitThreadRequest: {
                // shutdown condition ... 
                CacheManager.LOG.info("Disk Writer Thread Received Shutdown. Exiting!");
                shutdown = true;
            }
                break;

            case WriteRequest: {

                long timeStart = System.currentTimeMillis();

                try {
                    // reset crc calculator (single thread so no worries on synchronization)
                    _crc32Out.reset();

                    // figure out if we need to compress the item ... 
                    if ((request._item.getFlags() & CacheItem.Flags.Flag_IsCompressed) == 0
                            && request._item.getContent().getCount() != 0) {
                        LOG.info("Incoming Cache Request Content for:" + request._item.getUrl()
                                + " is not compressed. Compressing...");
                        ByteStream compressedBytesOut = new ByteStream(request._item.getContent().getCount());
                        ThriftyGZIPOutputStream gzipOutputStream = new ThriftyGZIPOutputStream(
                                compressedBytesOut);
                        gzipOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                                request._item.getContent().getCount());
                        gzipOutputStream.finish();
                        LOG.info("Finished Compressing Incoming Content for:" + request._item.getUrl()
                                + " BytesIn:" + request._item.getContent().getCount() + " BytesOut:"
                                + compressedBytesOut.size());
                        // replace buffer

                        request._item.setContent(
                                new FlexBuffer(compressedBytesOut.getBuffer(), 0, compressedBytesOut.size()));
                        request._item.setFlags((request._item.getFlags() | CacheItem.Flags.Flag_IsCompressed));
                    }

                    // create streams ...
                    ByteStream bufferOutputStream = new ByteStream(8192);

                    CheckedOutputStream checkedStream = new CheckedOutputStream(bufferOutputStream, _crc32Out);
                    DataOutputStream dataOutputStream = new DataOutputStream(checkedStream);

                    // remember if this item has content ... 
                    boolean hasContent = request._item.isFieldDirty(CacheItem.Field_CONTENT);
                    // now mark the content field as clean, so that it will not be serialized in our current serialization attempt ... 
                    request._item.setFieldClean(CacheItem.Field_CONTENT);
                    // and go ahead and write out the data to the intermediate buffer while also computing partial checksum 
                    request._item.write(dataOutputStream);

                    request._item.setFieldDirty(CacheItem.Field_CONTENT);

                    // ok, now ... write out file header ... 
                    CacheItemHeader itemHeader = new CacheItemHeader(_manager.getLocalLogSyncBytes());

                    itemHeader._status = CacheItemHeader.STATUS_ALIVE;
                    itemHeader._lastAccessTime = System.currentTimeMillis();
                    itemHeader._fingerprint = request._itemFingerprint;
                    // compute total length ... 

                    // first the header bytes in the cacheItem 
                    itemHeader._dataLength = bufferOutputStream.size();
                    // next the content length (encoded - as in size + bytes) ... 
                    itemHeader._dataLength += 4 + request._item.getContent().getCount();
                    // lastly the crc value iteself ... 
                    itemHeader._dataLength += 8;
                    // open the log file ... 
                    DataOutputBuffer logStream = new DataOutputBuffer();

                    // ok, go ahead and write the header 
                    itemHeader.writeHeader(logStream);
                    // ok now write out the item data minus content... 
                    logStream.write(bufferOutputStream.getBuffer(), 0, bufferOutputStream.size());
                    // now create a checked stream for the content ... 
                    CheckedOutputStream checkedStream2 = new CheckedOutputStream(logStream,
                            checkedStream.getChecksum());

                    dataOutputStream = new DataOutputStream(checkedStream2);

                    // content size 
                    dataOutputStream.writeInt(request._item.getContent().getCount());
                    // now write out the content (via checked stream so that we can calc checksum on content)
                    dataOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                            request._item.getContent().getCount());
                    // ok ... lastly write out the checksum bytes ... 
                    dataOutputStream.writeLong(checkedStream2.getChecksum().getValue());
                    // and FINALLY, write out the total item bytes (so that we can seek in reverse to read last request log 
                    logStream.writeInt(CacheItemHeader.SIZE + itemHeader._dataLength);

                    // ok flush everyting to the memory stream 
                    dataOutputStream.flush();

                    //ok - time to acquire the log semaphore 
                    //LOG.info("Acquiring Local Log Semaphore");
                    _manager.getLocalLogAccessSemaphore().acquireUninterruptibly();

                    try {

                        // now time to acquire the write semaphore ... 
                        _manager.getLocalLogWriteAccessSemaphore().acquireUninterruptibly();

                        // get the current file position 
                        long recordOffset = _manager.getLocalLogFilePos();

                        try {

                            long ioTimeStart = System.currentTimeMillis();

                            RandomAccessFile logFile = new RandomAccessFile(_manager.getActiveLogFilePath(),
                                    "rw");

                            try {
                                // seek to our known record offset 
                                logFile.seek(recordOffset);
                                // write out the data
                                logFile.write(logStream.getData(), 0, logStream.getLength());
                            } finally {
                                logFile.close();
                            }
                            // now we need to update the file header 
                            _manager.updateLogFileHeader(_manager.getActiveLogFilePath(), 1,
                                    CacheItemHeader.SIZE + itemHeader._dataLength + 4 /*trailing bytes*/);

                            CacheManager.LOG
                                    .info("#### Wrote Cache Item in:" + (System.currentTimeMillis() - timeStart)
                                            + " iotime:" + (System.currentTimeMillis() - ioTimeStart)
                                            + " QueueSize:" + _writeRequestQueue.size());

                        } finally {
                            // release write semaphore quickly 
                            _manager.getLocalLogWriteAccessSemaphore().release();
                        }

                        // now inform the manager of the completed request ... 
                        _manager.writeRequestComplete(request, recordOffset);
                    } finally {
                        //LOG.info("Releasing Local Log Semaphore");
                        _manager.getLocalLogAccessSemaphore().release();
                    }
                } catch (IOException e) {
                    CacheManager.LOG.error("### FUC# BATMAN! - GONNA LOSE THIS REQUEST!!!!:"
                            + CCStringUtils.stringifyException(e));
                    _manager.writeRequestFailed(request, e);
                }
            }
                break;
            }
        } catch (InterruptedException e) {

        }
    }
}

From source file:au.org.ala.spatial.util.RecordsSmall.java

private void makeSmallFile(String filename) throws Exception {
    FileWriter outputSpecies = new FileWriter(filename + "records.csv.small.species");
    DataOutputStream outputPoints = new DataOutputStream(
            new BufferedOutputStream(new FileOutputStream(filename + "records.csv.small.points")));
    DataOutputStream outputPointsToSpecies = new DataOutputStream(
            new BufferedOutputStream(new FileOutputStream(filename + "records.csv.small.pointsToSpecies")));

    Map<String, Integer> lsidMap = new HashMap<String, Integer>(200000);
    byte start = 0;
    BufferedReader br = new BufferedReader(new FileReader(filename + "records.csv"));
    int[] header = new int[3];
    int row = start;
    int currentCount = 0;
    String[] line = new String[3];

    String rawline;/*from   ww  w.j av  a2s  . c o  m*/
    while ((rawline = br.readLine()) != null) {
        currentCount++;
        int p1 = rawline.indexOf(44);
        int p2 = rawline.indexOf(44, p1 + 1);
        if (p1 >= 0 && p2 >= 0) {
            line[0] = rawline.substring(0, p1);
            line[1] = rawline.substring(p1 + 1, p2);
            line[2] = rawline.substring(p2 + 1, rawline.length());
            if (currentCount % 100000 == 0) {
                System.out.print("\rreading row: " + currentCount);
            }

            if (row == 0) {
                for (int e = 0; e < line.length; e++) {
                    if (line[e].equals("names_and_lsid")) {
                        header[0] = e;
                    }

                    if (line[e].equals("longitude")) {
                        header[1] = e;
                    }

                    if (line[e].equals("latitude")) {
                        header[2] = e;
                    }
                }

                logger.debug("header: " + header[0] + "," + header[1] + "," + header[2]);
            } else if (line.length >= 3) {
                try {
                    double lat = Double.parseDouble(line[header[2]]);
                    double lng = Double.parseDouble(line[header[1]]);

                    String species = line[header[0]];

                    Integer idx = lsidMap.get(species);
                    if (idx == null) {
                        idx = lsidMap.size();
                        lsidMap.put(species, idx);

                        outputSpecies.write(species);
                        outputSpecies.write("\n");
                    }

                    outputPoints.writeDouble(lat);
                    outputPoints.writeDouble(lng);

                    outputPointsToSpecies.writeInt(idx);
                } catch (Exception e) {
                    logger.error("failed to read records.csv row: " + row, e);
                }
            }

            row++;
        }
    }

    br.close();

    outputPointsToSpecies.flush();
    outputPointsToSpecies.close();
    outputPoints.flush();
    outputPoints.close();
    outputSpecies.flush();
    outputSpecies.close();

    FileUtils.writeStringToFile(new File(filename + "records.csv.small.speciesCount"),
            String.valueOf(lsidMap.size()));
}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * /*from www . j  a  va  2  s .co  m*/
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)
                    / bytesPerChecksum;

            // packet buffer has to be able to do a normal transfer in the case
            // of recomputing checksum
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            out.writeInt(0); // mark the end of block        
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }
    } catch (RuntimeException e) {
        LOG.error("unexpected exception sending block", e);

        throw new IOException("unexpected runtime exception", e);
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    blockReadFully = (initialOffset == 0 && offset >= blockLength);

    return totalRead;
}

From source file:com.github.hrpc.rpc.Server.java

/**
 * Setup response for the IPC Call./* w  w w.  j  a  v a 2 s  .c o m*/
 *
 * @param responseBuf buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream responseBuf, Call call, RpcStatusProto status,
        RpcErrorCodeProto erCode, Writable rv, String errorClass, String error) throws IOException {
    responseBuf.reset();
    DataOutputStream out = new DataOutputStream(responseBuf);
    RpcResponseHeaderProto.Builder headerBuilder = RpcResponseHeaderProto.newBuilder();
    headerBuilder.setClientId(ByteString.copyFrom(call.clientId));
    headerBuilder.setCallId(call.callId);
    headerBuilder.setRetryCount(call.retryCount);
    headerBuilder.setStatus(status);
    headerBuilder.setServerIpcVersionNum(CURRENT_VERSION);

    if (status == RpcStatusProto.SUCCESS) {
        RpcResponseHeaderProto header = headerBuilder.build();
        final int headerLen = header.getSerializedSize();
        int fullLength = CodedOutputStream.computeRawVarint32Size(headerLen) + headerLen;
        try {
            if (rv instanceof ProtobufRpcEngine.RpcWrapper) {
                ProtobufRpcEngine.RpcWrapper resWrapper = (ProtobufRpcEngine.RpcWrapper) rv;
                fullLength += resWrapper.getLength();
                out.writeInt(fullLength);
                header.writeDelimitedTo(out);
                rv.write(out);
            } else { // Have to serialize to buffer to get len
                final DataOutputBuffer buf = new DataOutputBuffer();
                rv.write(buf);
                byte[] data = buf.getData();
                fullLength += buf.getLength();
                out.writeInt(fullLength);
                header.writeDelimitedTo(out);
                out.write(data, 0, buf.getLength());
            }
        } catch (Throwable t) {
            LOG.warn("Error serializing call response for call " + call, t);
            // Call back to same function - this is OK since the
            // buffer is reset at the top, and since status is changed
            // to ERROR it won't infinite loop.
            setupResponse(responseBuf, call, RpcStatusProto.ERROR, RpcErrorCodeProto.ERROR_SERIALIZING_RESPONSE,
                    null, t.getClass().getName(), StringUtils.stringifyException(t));
            return;
        }
    } else { // Rpc Failure
        headerBuilder.setExceptionClassName(errorClass);
        headerBuilder.setErrorMsg(error);
        headerBuilder.setErrorDetail(erCode);
        RpcResponseHeaderProto header = headerBuilder.build();
        int headerLen = header.getSerializedSize();
        final int fullLength = CodedOutputStream.computeRawVarint32Size(headerLen) + headerLen;
        out.writeInt(fullLength);
        header.writeDelimitedTo(out);
    }
    call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/*from  w  ww.  j av  a 2s .c  o m*/
 *
 * @param dos _more_
 * @param s _more_
 *
 * @throws Exception _more_
 */
private void writeString(DataOutputStream dos, String s) throws Exception {
    if (s == null) {
        dos.writeInt(-1);
    } else {
        dos.writeInt(s.length());
        dos.writeBytes(s);
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataWriter.java

/**
 * Write a block to disk.//ww w .ja  va2 s.  c o m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock() throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
    int namespaceid = in.readInt();
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    updateCurrentThreadName("receiving block " + block + " client=" + client);
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(namespaceid, block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        // get a connection back to the previous target
        replyOut = new DataOutputStream(new BufferedOutputStream(
                NetUtils.getOutputStream(s, datanode.socketWriteTimeout), SMALL_BUFFER_SIZE));

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (datanode.socketReadExtentionTimeout * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (datanode.socketWriteExtentionTimeout * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeInt(namespaceid);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.getDatanodeInfo() + ":Exception transfering block " + block
                            + " to mirror " + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        long totalReceiveSize = blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null,
                targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(namespaceid, block, null);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        } else {
            // Log the fact that the block has been received by this datanode and
            // has been written to the local disk on this datanode.
            LOG.info("Received Block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes() + " and written to local disk");
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(namespaceid, block);
        }

        long writeDuration = System.currentTimeMillis() - startTime;
        datanode.myMetrics.bytesWrittenLatency.inc(writeDuration);
        if (totalReceiveSize > KB_RIGHT_SHIFT_MIN) {
            datanode.myMetrics.bytesWrittenRate.inc((int) (totalReceiveSize >> KB_RIGHT_SHIFT_BITS),
                    writeDuration);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/*from  www  .j a  va  2 s . co  m*/
 *
 * @param dos _more_
 * @param i _more_
 *
 * @throws Exception _more_
 */
private void writeInteger(DataOutputStream dos, Integer i) throws Exception {
    if (i == null) {
        //            dos.writeInt(Integer.NaN);
        dos.writeInt(-999999);
    } else {
        dos.writeInt(i.intValue());
    }
}

From source file:com.android.launcher2.Launcher.java

private static void writeConfiguration(Context context, LocaleConfiguration configuration) {
    DataOutputStream out = null;
    try {/*ww  w .  j  av a2 s. c om*/
        out = new DataOutputStream(context.openFileOutput(PREFERENCES, MODE_PRIVATE));
        out.writeUTF(configuration.locale);
        out.writeInt(configuration.mcc);
        out.writeInt(configuration.mnc);
        out.flush();
    } catch (FileNotFoundException e) {
        // Ignore
    } catch (IOException e) {
        //noinspection ResultOfMethodCallIgnored
        context.getFileStreamPath(PREFERENCES).delete();
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException e) {
                // Ignore
            }
        }
    }
}

From source file:com.symbian.driver.core.controller.tasks.TEFTask.java

/**
 * @param aVisitor//  w  w w. ja va2  s . com
 * @param aTestExecuteScript
 * @param lExecuteOnDevice
 * @param aTask
 * @return The last execption raised when running UCC.
 * @throws JStatException
 */
private boolean runUCC(List<String> aArgs, Task aTask) {
    boolean lReturn = true;
    Socket lUccSocket = null;
    DataOutputStream lSocketOut = null;
    DataInputStream lSocketIn = null;
    int lRunNumber = 0;
    int lUccPort = -1;
    String lUccAddress = null;
    IDeviceComms.ISymbianProcess lProcess = null;

    try {
        String[] lUccSplit = TDConfig.getInstance().getPreference(TDConfig.UCC_IP_ADDRESS).split(":");
        lRunNumber = TDConfig.getInstance().getPreferenceInteger(TDConfig.RUN_NUMBER);

        lUccAddress = lUccSplit[0];
        lUccPort = Integer.parseInt(lUccSplit[1]);
    } catch (ParseException lParseException) {
        LOGGER.log(Level.SEVERE, "Could not get configuration for UCC.", lParseException);
        iExceptions.put(lParseException, ESeverity.ERROR);
        lReturn = false;
    } catch (NumberFormatException lNumberFormatException) {
        LOGGER.log(Level.SEVERE, "Could not parse the port number for UCC.", lNumberFormatException);
        iExceptions.put(lNumberFormatException, ESeverity.ERROR);
        lReturn = false;
    }

    if (lUccAddress == null || lUccAddress.equals("") || lUccPort < 0) {
        iExceptions.put(
                new UnknownHostException("Please specify a valid UCC address for example 192.168.0.1:3000"),
                ESeverity.ERROR);
        return false;
    }

    // Run the test
    try {
        LOGGER.info("Running UCC with:\n\tAddress: " + lUccAddress + "\n\tUCC Port:" + lUccPort);

        lUccSocket = new Socket(lUccAddress, lUccPort);
        lSocketOut = new DataOutputStream(lUccSocket.getOutputStream());
        lSocketIn = new DataInputStream(lUccSocket.getInputStream());

        LOGGER.fine("Starting UCC while still polling");
        lProcess = iDeviceProxy.createSymbianProcess();
        if (lProcess != null) {
            // run and don't wait
            if (!lProcess.runCommand(TEST_EXECUTE, aArgs, aTask.getTimeout() * 1000, false)) {
                iExceptions.put(new Exception("Failed to run TEF for UCC."), ESeverity.ERROR);
                lReturn = false;
            }

            // Tell UCC that the test has started.
            LOGGER.fine("Writing to UCC socket: " + lRunNumber);
            lSocketOut.writeInt(lRunNumber);
            lSocketOut.flush();

            int lUCCReply = lSocketIn.readInt();
            LOGGER.fine("UCC Reply: " + lUCCReply);
        }

    } catch (UnknownHostException lUnknownHostException) {
        LOGGER.log(Level.SEVERE, "Could not find UCC host", lUnknownHostException);
        iExceptions.put(lUnknownHostException, ESeverity.ERROR);
        return false;
    } catch (IOException lIOException) {
        LOGGER.log(Level.SEVERE,
                "IO Exception during UCC testing: " + lIOException.getMessage() + (lUccSocket != null
                        ? "\nUcc Socket Connected: " + lUccSocket.isConnected() + "\nUcc Socket InputShutdown: "
                                + lUccSocket.isInputShutdown() + "\nUcc Socket OutputShutdown:"
                                + lUccSocket.isOutputShutdown() + "\nUcc Socket Bound: " + lUccSocket.isBound()
                        : "\nUcc Socket is NULL"),
                lIOException);
        iExceptions.put(lIOException, ESeverity.ERROR);
        return false;
    } finally {

        // Close UCC
        if (lSocketOut != null) {
            try {
                LOGGER.log(Level.FINE, "Closing Socket Out.");
                lUccSocket.shutdownInput();
                lUccSocket.shutdownOutput();
                lSocketOut.close();
            } catch (IOException lIOException) {
                LOGGER.log(Level.SEVERE, "Could not close UCC Out socket.", lIOException);
                iExceptions.put(lIOException, ESeverity.ERROR);
            }
        }
        if (lSocketIn != null) {
            try {
                LOGGER.log(Level.FINE, "Closing Socket In.");
                lSocketIn.close();
            } catch (IOException lIOException) {
                LOGGER.log(Level.SEVERE, "Could not close UCC In socket.", lIOException);
                iExceptions.put(lIOException, ESeverity.ERROR);
            }
        }
        if (lUccSocket != null) {
            try {
                LOGGER.log(Level.FINE, "Closing Socket UCC.");
                lUccSocket.close();
            } catch (IOException lIOException) {
                LOGGER.log(Level.SEVERE, "Could not close UCC socket.", lIOException);
                iExceptions.put(lIOException, ESeverity.ERROR);
            }
        }

        if (!lUccSocket.isClosed()) {
            LOGGER.warning("Could not close the UCC sockets properly.");
        }

        lSocketOut = null;
        lSocketIn = null;
        lUccSocket = null;

        // Poll TEF Test
        if (!lProcess.join()) {
            iExceptions.put(new Exception("Coud not join UCC-TEF Process"), ESeverity.ERROR);
            lReturn = false;
        }

    }
    return lReturn;
}