Example usage for java.io DataOutputStream writeShort

List of usage examples for java.io DataOutputStream writeShort

Introduction

In this page you can find the example usage for java.io DataOutputStream writeShort.

Prototype

public final void writeShort(int v) throws IOException 

Source Link

Document

Writes a short to the underlying output stream as two bytes, high byte first.

Usage

From source file:org.openymsg.network.HTTPConnectionHandler.java

/**
 * The only time Yahoo can actually send us any packets is when we send it some. Yahoo encodes its packets in a POST
 * body - the format is the same binary representation used for direct connections (with one or two extra codes).
 * //from w  ww . j  a va2s . c o  m
 * After posting a packet, the connection will receive a HTTP response who's payload consists of four bytes followed
 * by zero or more packets. The first byte of the four is a count of packets encoded in the following body.
 * 
 * Each incoming packet is transfered to a queue, where receivePacket() takes them off - thus preserving the effect
 * that input and output packets are being received independently, as with other connection handlers. As
 * readPacket() can throw an exception, these are caught and transfered onto the queue too, then rethrown by
 * receivePacket() .
 */
@Override
synchronized void sendPacket(PacketBodyBuffer body, ServiceType service, long status, long sessionID)
        throws IOException, IllegalStateException {
    if (!connected)
        throw new IllegalStateException("Not logged in");

    if (filterOutput(body, service))
        return;
    byte[] b = body.getBuffer();

    Socket soc = new Socket(proxyHost, proxyPort);
    PushbackInputStream pbis = new PushbackInputStream(soc.getInputStream());
    DataOutputStream dos = new DataOutputStream(soc.getOutputStream());

    // HTTP header
    dos.writeBytes(HTTP_HEADER_POST);
    dos.writeBytes("Content-length: " + (b.length + NetworkConstants.YMSG9_HEADER_SIZE) + NetworkConstants.END);
    dos.writeBytes(HTTP_HEADER_AGENT);
    dos.writeBytes(HTTP_HEADER_HOST);
    if (HTTP_HEADER_PROXY_AUTH != null)
        dos.writeBytes(HTTP_HEADER_PROXY_AUTH);
    if (cookie != null)
        dos.writeBytes("Cookie: " + cookie + NetworkConstants.END);
    dos.writeBytes(NetworkConstants.END);
    // YMSG9 header
    dos.write(NetworkConstants.MAGIC, 0, 4);
    dos.write(NetworkConstants.VERSION_HTTP, 0, 4);
    dos.writeShort(b.length & 0xffff);
    dos.writeShort(service.getValue() & 0xffff);
    dos.writeInt((int) (status & 0xffffffff));
    dos.writeInt((int) (sessionID & 0xffffffff));
    // YMSG9 body
    dos.write(b, 0, b.length);
    dos.flush();

    // HTTP response header
    String s = readLine(pbis);
    if (s == null || s.indexOf(" 200 ") < 0) // Not "HTTP/1.0 200 OK"
    {
        throw new IOException("HTTP request returned didn't return OK (200): " + s);
    }
    while (s != null && s.trim().length() > 0)
        // Read past header
        s = readLine(pbis);
    // Payload count
    byte[] code = new byte[4];
    int res = pbis.read(code, 0, 4); // Packet count (Little-Endian?)
    if (res < 4) {
        throw new IOException("Premature end of HTTP data");
    }
    int count = code[0];
    // Payload body
    YMSG9InputStream yip = new YMSG9InputStream(pbis);
    YMSG9Packet pkt;
    for (int i = 0; i < count; i++) {
        pkt = yip.readPacket();
        if (!filterInput(pkt)) {
            if (!packets.add(pkt)) {
                throw new IllegalArgumentException("Unable to add data to the packetQueue!");
            }
        }
    }

    soc.close();

    // Reset idle timeout
    lastFetch = System.currentTimeMillis();
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Read a block from the disk and then sends it to a destination.
 * /*from  w  w  w  .  java 2 s  .  c o  m*/
 * @param in The stream to read from
 * @throws IOException
 */
private void copyBlock(DataInputStream in) throws IOException {
    // Read in the header
    long blockId = in.readLong(); // read block id
    Block block = new Block(blockId, 0, in.readLong());
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.COPY);
        } catch (InvalidToken e) {
            LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_COPY_BLOCK for block "
                    + block);
            sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN,
                    datanode.socketWriteTimeout);
            return;
        }
    }

    if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
        LOG.info("Not able to copy block " + blockId + " to " + s.getRemoteSocketAddress()
                + " because threads quota is exceeded.");
        sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR, datanode.socketWriteTimeout);
        return;
    }

    BlockSender blockSender = null;
    DataOutputStream reply = null;
    boolean isOpSuccess = true;

    try {
        // check if the block exists or not
        blockSender = new BlockSender(block, 0, -1, false, false, false, datanode);

        // set up response stream
        OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout);
        reply = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));

        // send status first
        reply.writeShort((short) DataTransferProtocol.OP_STATUS_SUCCESS);
        // send block content to the target
        long read = blockSender.sendBlock(reply, baseStream, dataXceiverServer.balanceThrottler);

        datanode.myMetrics.incrBytesRead((int) read);
        datanode.myMetrics.incrBlocksRead();

        LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
    } catch (IOException ioe) {
        isOpSuccess = false;
        throw ioe;
    } finally {
        dataXceiverServer.balanceThrottler.release();
        if (isOpSuccess) {
            try {
                // send one last byte to indicate that the resource is cleaned.
                reply.writeChar('d');
            } catch (IOException ignored) {
            }
        }
        IOUtils.closeStream(reply);
        IOUtils.closeStream(blockSender);
    }
}

From source file:org.opensc.pkcs15.token.impl.CardOSToken.java

@Override
public DF createDF(int path, long size, DFAcl acl) throws IOException {

    if (size < 0 || size > 65535L)
        throw new PKCS15Exception(
                "Illegal size [" + size + "] for DF ["
                        + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "].",
                PKCS15Exception.ERROR_INVALID_PARAMETER);

    ByteArrayOutputStream bos = new ByteArrayOutputStream(256);
    DataOutputStream dos = new DataOutputStream(bos);

    dos.write(0x62);//  w w w  .jav  a  2s . c  o  m
    // length of subsequent FCP data field, to be filled at end.
    dos.write(0x00);

    // fill in FCP data
    //  DF body size
    dos.write(0x81);
    dos.write(0x02);
    dos.writeShort((int) size);

    // File descriptor: 38h DF
    dos.write(0x82);
    dos.write(0x01);
    dos.write(0x38);

    // File ID
    dos.write(0x83);
    dos.write(0x02);
    dos.writeShort(path);

    // Default file status.
    dos.write(0x85);
    dos.write(0x01);
    dos.write(0x00);

    // ACL definitions
    dos.write(0x86);
    dos.write(0x08);
    dos.write(acl.getAcLifeCycle());
    dos.write(acl.getAcUpdate());
    dos.write(acl.getAcAppend());
    dos.write(acl.getAcDeactivate());
    dos.write(acl.getAcActivate());
    dos.write(acl.getAcDelete());
    dos.write(acl.getAcAdmin());
    dos.write(acl.getAcCreate());

    // get command data.
    dos.flush();
    byte[] data = bos.toByteArray();

    // fill in length of subsequent FCP data field, to be filled at end.
    data[1] = (byte) (data.length - 2);

    // CREATE FILE, P1=0x00, P2=0x00, ID -> read current EF from position 0.
    CommandAPDU cmd = new CommandAPDU(0x00, 0xE0, 0x00, 0x00, data, DEFAULT_LE);

    try {
        ResponseAPDU resp = this.channel.transmit(cmd);

        if (resp.getSW() != PKCS15Exception.ERROR_OK)
            throw new PKCS15Exception("CREATE FILE for DF ["
                    + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "] returned error",
                    resp.getSW());

    } catch (CardException e) {
        throw new PKCS15Exception("Error sending CREATE FILE for DF ["
                + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "]", e);
    }

    return new DF(new TokenPath(this.currentFile.getPath(), path), size, acl);
}

From source file:org.opensc.pkcs15.token.impl.CardOSToken.java

@Override
public EF createEF(int path, long size, EFAcl acl) throws IOException {

    if (size < 0 || size > 65535L)
        throw new PKCS15Exception(
                "Illegal size [" + size + "] for EF ["
                        + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "].",
                PKCS15Exception.ERROR_INVALID_PARAMETER);

    ByteArrayOutputStream bos = new ByteArrayOutputStream(256);
    DataOutputStream dos = new DataOutputStream(bos);

    dos.write(0x62);/*  w w  w  . j  a  v  a 2s  .  c om*/
    // length of subsequent FCP data field, to be filled at end.
    dos.write(0x00);

    // *** fill in FCP data
    //   Only EF:      Net size in bytes
    dos.write(0x80);
    dos.write(0x02);
    dos.writeShort((int) size);

    // File descriptor: 01h BINARY
    dos.write(0x82);
    dos.write(0x01);
    dos.write(0x01);

    // File ID
    dos.write(0x83);
    dos.write(0x02);
    dos.writeShort(path);

    // Default file status.
    dos.write(0x85);
    dos.write(0x01);
    dos.write(0x00);

    // ACL definitions
    dos.write(0x86);
    dos.write(0x09);
    dos.write(acl.getAcRead());
    dos.write(acl.getAcUpdate());
    dos.write(acl.getAcAppend());
    dos.write(acl.getAcDeactivate());
    dos.write(acl.getAcActivate());
    dos.write(acl.getAcDelete());
    dos.write(acl.getAcAdmin());
    dos.write(acl.getAcIncrease());
    dos.write(acl.getAcDecrease());

    // *** get command data.
    dos.flush();
    byte[] data = bos.toByteArray();

    // fill in length of subsequent FCP data field, to be filled at end.
    data[1] = (byte) (data.length - 2);

    // CREATE FILE, P1=0x00, P2=0x00, ID -> read current EF from position 0.
    CommandAPDU cmd = new CommandAPDU(0x00, 0xE0, 0x00, 0x00, data, DEFAULT_LE);

    try {
        ResponseAPDU resp = this.channel.transmit(cmd);

        if (resp.getSW() != PKCS15Exception.ERROR_OK)
            throw new PKCS15Exception("CREATE FILE for EF ["
                    + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "] returned error",
                    resp.getSW());

    } catch (CardException e) {
        throw new PKCS15Exception("Error sending CREATE FILE for EF ["
                + PathHelper.formatPathAppend(this.currentFile.getPath(), path) + "]", e);
    }

    return new EF(new TokenPath(this.currentFile.getPath(), path), size, acl);
}

From source file:org.apache.hadoop.hbase.security.visibility.ExpAsStringVisibilityLabelServiceImpl.java

private Tag createTag(ExpressionNode node) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    List<String> labels = new ArrayList<String>();
    List<String> notLabels = new ArrayList<String>();
    extractLabels(node, labels, notLabels);
    Collections.sort(labels);//www.  j  a va  2  s.co  m
    Collections.sort(notLabels);
    // We will write the NOT labels 1st followed by normal labels
    // Each of the label we will write with label length (as short 1st) followed
    // by the label bytes.
    // For a NOT node we will write the label length as -ve.
    for (String label : notLabels) {
        byte[] bLabel = Bytes.toBytes(label);
        short length = (short) bLabel.length;
        length = (short) (-1 * length);
        dos.writeShort(length);
        dos.write(bLabel);
    }
    for (String label : labels) {
        byte[] bLabel = Bytes.toBytes(label);
        dos.writeShort(bLabel.length);
        dos.write(bLabel);
    }
    return new Tag(VISIBILITY_TAG_TYPE, baos.toByteArray());
}

From source file:com.zpci.firstsignhairclipdemo.MainActivity.java

public void savewavefile(byte[] ra) {
    //prepend 44 byte wave header to data

    int sampleRate = 8000; // audio sample rate is 8000 SPS
    int numSecs = ra.length / sampleRate; // number of seconds of audio to record
    int samples = sampleRate * numSecs; // number of samples in file
    short bitsPerSample = 8; // one byte per sample
    int filesize = samples + 44; // check this?
    int fmtChunkSize = 16; // size of 'fmt' chunk
    short channels = 1; // mono
    int byteRate = sampleRate * channels * bitsPerSample / 8; // will be 8K for us
    short format = 1; // 1 == uncompressed pcm
    short blockalign = (short) (channels * bitsPerSample / 8); // bytes per sample
    int audiolen = samples * channels * bitsPerSample / 8; // length of audio in bytes

    try {//from  w  ww  .j av  a2  s .co m
        //OutputStream os = openFileOutput("diagaudio.wav", Context.MODE_PRIVATE);
        String state = Environment.getExternalStorageState();
        Log.d(TAG, "External storage state: " + state);
        if (Environment.MEDIA_MOUNTED.equals(state)) {

            //create firstsign directory
            File rootPath = new File(Environment.getExternalStorageDirectory(), "firstsign");
            if (!rootPath.exists()) {
                rootPath.mkdirs();
                Log.d(TAG, "mkdirs");
            }
            File file = new File(rootPath, "hairclipaudio.wav");
            file.createNewFile();
            OutputStream os = new FileOutputStream(file);
            BufferedOutputStream bos = new BufferedOutputStream(os);
            DataOutputStream wf = new DataOutputStream(bos);

            wf.write("RIFF".getBytes());
            wf.writeInt(Integer.reverseBytes(filesize - 8));
            wf.write("WAVE".getBytes());
            wf.write("fmt ".getBytes());
            wf.writeInt(Integer.reverseBytes(fmtChunkSize));
            wf.writeShort(Short.reverseBytes(format));
            wf.writeShort(Short.reverseBytes(channels));
            wf.writeInt(Integer.reverseBytes(sampleRate));
            wf.writeInt(Integer.reverseBytes(byteRate));
            wf.writeShort(Short.reverseBytes(blockalign));
            wf.writeShort(Short.reverseBytes(bitsPerSample));
            wf.write("data".getBytes());
            wf.writeInt(Integer.reverseBytes(audiolen));
            wf.write(ra);

            wf.close();
            bos.close();
            os.close();

            Log.d(TAG, "wavefile write complete");
        } else {
            Toast.makeText(this, "SDCard not mounted", Toast.LENGTH_LONG).show();
        } //what do i do?

    } catch (Exception e) {
        Log.e(TAG, "exception in savewavefile");
        e.printStackTrace();
    }

}

From source file:com.jivesoftware.os.amza.service.replication.http.HttpRowsTaker.java

private void flushQueues(RingHost ringHost, Ackable ackable, long currentVersion) throws Exception {
    Map<VersionedPartitionName, RowsTakenPayload> rowsTaken;
    PongPayload pong;//from w  w  w.j av  a 2 s .c om
    ackable.semaphore.acquire(Short.MAX_VALUE);
    try {
        rowsTaken = ackable.rowsTakenPayloads.getAndSet(Maps.newConcurrentMap());
        pong = ackable.pongPayloads.getAndSet(null);
    } finally {
        ackable.semaphore.release(Short.MAX_VALUE);
    }
    if (rowsTaken != null && !rowsTaken.isEmpty()) {
        LOG.inc("flush>rowsTaken>pow>" + UIO.chunkPower(rowsTaken.size(), 0));
    }

    if (rowsTaken != null && !rowsTaken.isEmpty() || pong != null) {
        flushExecutor.submit(() -> {
            try {
                String endpoint = "/amza/ackBatch";
                ringClient.call("",
                        new ConnectionDescriptorSelectiveStrategy(
                                new HostPort[] { new HostPort(ringHost.getHost(), ringHost.getPort()) }),
                        "ackBatch", httpClient -> {

                            HttpResponse response = httpClient.postStreamableRequest(endpoint, out -> {
                                try {
                                    DataOutputStream dos = new DataOutputStream(out);
                                    if (rowsTaken.isEmpty()) {
                                        dos.write((byte) 0); // hasMore for rowsTaken stream
                                    } else {
                                        for (Entry<VersionedPartitionName, RowsTakenPayload> e : rowsTaken
                                                .entrySet()) {
                                            dos.write((byte) 1); // hasMore for rowsTaken stream
                                            VersionedPartitionName versionedPartitionName = e.getKey();

                                            byte[] bytes = versionedPartitionName.toBytes();
                                            dos.writeShort(bytes.length);
                                            dos.write(bytes);

                                            RowsTakenPayload rowsTakenPayload = e.getValue();
                                            bytes = rowsTakenPayload.ringMember.toBytes();
                                            dos.writeShort(bytes.length);
                                            dos.write(bytes);

                                            dos.writeLong(rowsTakenPayload.takeSessionId);
                                            dos.writeLong(rowsTakenPayload.takeSharedKey);
                                            dos.writeLong(rowsTakenPayload.txId);
                                            dos.writeLong(rowsTakenPayload.leadershipToken);
                                        }
                                        dos.write((byte) 0); // EOS for rowsTaken stream
                                    }

                                    if (pong == null) {
                                        dos.write((byte) 0); // has pong
                                    } else {
                                        dos.write((byte) 1); // has pong
                                        byte[] bytes = pong.ringMember.toBytes();
                                        dos.writeShort(bytes.length);
                                        dos.write(bytes);

                                        dos.writeLong(pong.takeSessionId);
                                        dos.writeLong(pong.takeSharedKey);

                                    }
                                } catch (Exception x) {
                                    throw new RuntimeException("Failed while streaming ackBatch.", x);
                                } finally {
                                    out.flush();
                                    out.close();
                                }

                            }, null);

                            if (response.getStatusCode() < 200 || response.getStatusCode() >= 300) {
                                throw new NonSuccessStatusCodeException(response.getStatusCode(),
                                        response.getStatusReasonPhrase());
                            }
                            Boolean result = (Boolean) conf.asObject(response.getResponseBody());
                            return new ClientResponse<>(result, true);
                        });

            } catch (Exception x) {
                LOG.warn("Failed to deliver acks for remote:{}", new Object[] { ringHost }, x);
            } finally {
                ackable.running.set(false);
                LOG.inc("flush>version>consume>" + name);
                synchronized (flushVersion) {
                    if (currentVersion != flushVersion.get()) {
                        flushVersion.notify();
                    }
                }
            }
        });
    } else {
        ackable.running.set(false);
        LOG.inc("flush>version>consume>" + name);
        synchronized (flushVersion) {
            if (currentVersion != flushVersion.get()) {
                flushVersion.notify();
            }
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk.//from  w  w  w .  j ava 2s. co  m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    DataOutputStream replyOut = null; // stream to prev target
    replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.WRITE);
        } catch (InvalidToken e) {
            try {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                    Text.writeString(replyOut, datanode.dnRegistration.getName());
                    replyOut.flush();
                }
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_WRITE_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(replyOut);
            }
        }
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup
    short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS;
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }
                accessToken.write(mirrorOut);

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    mirrorInStatus = mirrorIn.readShort();
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR);
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            replyOut.writeShort(mirrorInStatus);
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.echocat.jomon.net.dns.DnsServer.java

public void TCPclient(Socket s) {
    try {// ww  w  .j av  a2  s.c o m
        final int inLength;
        final DataInputStream dataIn;
        final DataOutputStream dataOut;
        final byte[] in;

        final InputStream is = s.getInputStream();
        dataIn = new DataInputStream(is);
        inLength = dataIn.readUnsignedShort();
        in = new byte[inLength];
        dataIn.readFully(in);

        final Message query;
        byte[] response;
        try {
            query = new Message(in);
            response = generateReply(query, in, in.length, s);
            if (response == null) {
                return;
            }
        } catch (final IOException ignored) {
            response = formerrMessage(in);
        }
        dataOut = new DataOutputStream(s.getOutputStream());
        dataOut.writeShort(response.length);
        dataOut.write(response);
    } catch (final IOException e) {
        LOG.warn("TCPclient(" + addrport(s.getLocalAddress(), s.getLocalPort()) + ").", e);
    } finally {
        try {
            s.close();
        } catch (final IOException ignored) {
        }
    }
}

From source file:org.echocat.jomon.net.dns.DnsServer.java

byte[] doAXFR(Name name, Message query, TSIG tsig, TSIGRecord qtsig, Socket s) {
    final Zone zone = _znames.get(name);
    boolean first = true;
    if (zone == null) {
        return errorMessage(query, Rcode.REFUSED);
    }/*from w w w.  j  a va  2  s .  co m*/
    final Iterator<?> it = zone.AXFR();
    try {
        final DataOutputStream dataOut;
        dataOut = new DataOutputStream(s.getOutputStream());
        final int id = query.getHeader().getID();
        while (it.hasNext()) {
            final RRset rrset = (RRset) it.next();
            final Message response = new Message(id);
            final Header header = response.getHeader();
            header.setFlag(Flags.QR);
            header.setFlag(Flags.AA);
            addRRset(rrset.getName(), response, rrset, Section.ANSWER, FLAG_DNSSECOK);
            if (tsig != null) {
                tsig.applyStream(response, qtsig, first);
                qtsig = response.getTSIG();
            }
            first = false;
            final byte[] out = response.toWire();
            dataOut.writeShort(out.length);
            dataOut.write(out);
        }
    } catch (final IOException ignored) {
        LOG.info("AXFR failed");
    }
    closeQuietly(s);
    return null;
}