Example usage for java.io DataInputStream readLong

List of usage examples for java.io DataInputStream readLong

Introduction

In this page you can find the example usage for java.io DataInputStream readLong.

Prototype

public final long readLong() throws IOException 

Source Link

Document

See the general contract of the readLong method of DataInput.

Usage

From source file:org.openmrs.module.odkconnector.serialization.serializer.ListSerializerTest.java

@Test
public void serialize_shouldSerializePatientInformation() throws Exception {
    File file = File.createTempFile("PatientSerialization", "Example");
    GZIPOutputStream outputStream = new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(file)));

    log.info("Writing to: " + file.getAbsolutePath());

    Cohort cohort = new Cohort();
    cohort.addMember(6);//  w  w  w .j a  v a 2 s .c  o  m
    cohort.addMember(7);
    cohort.addMember(8);

    List<Patient> patients = new ArrayList<Patient>();
    List<Obs> observations = new ArrayList<Obs>();
    List<Form> forms = new ArrayList<Form>();

    for (Integer patientId : cohort.getMemberIds()) {
        Patient patient = Context.getPatientService().getPatient(patientId);
        observations.addAll(Context.getObsService().getObservationsByPerson(patient));
        patients.add(patient);
    }

    Serializer serializer = HandlerUtil.getPreferredHandler(Serializer.class, List.class);
    serializer.write(outputStream, patients);
    serializer.write(outputStream, observations);
    serializer.write(outputStream, forms);

    outputStream.close();

    GZIPInputStream inputStream = new GZIPInputStream(new BufferedInputStream(new FileInputStream(file)));
    DataInputStream dataInputStream = new DataInputStream(inputStream);

    // total number of patients
    Integer patientCounter = dataInputStream.readInt();
    System.out.println("Patient Counter: " + patientCounter);
    for (int i = 0; i < patientCounter; i++) {
        System.out.println("=================Patient=====================");
        System.out.println("Patient Id: " + dataInputStream.readInt());
        System.out.println("Family Name: " + dataInputStream.readUTF());
        System.out.println("Middle Name: " + dataInputStream.readUTF());
        System.out.println("Last Name: " + dataInputStream.readUTF());
        System.out.println("Gender: " + dataInputStream.readUTF());
        System.out.println("Birth Date: " + dataInputStream.readLong());
        System.out.println("Identifier" + dataInputStream.readUTF());
    }

    Integer obsCounter = dataInputStream.readInt();
    for (int j = 0; j < obsCounter; j++) {
        System.out.println("==================Observation=================");
        System.out.println("Patient Id: " + dataInputStream.readInt());
        System.out.println("Concept Name: " + dataInputStream.readUTF());

        byte type = dataInputStream.readByte();
        if (type == ObsSerializer.TYPE_STRING)
            System.out.println("Value: " + dataInputStream.readUTF());
        else if (type == ObsSerializer.TYPE_INT)
            System.out.println("Value: " + dataInputStream.readInt());
        else if (type == ObsSerializer.TYPE_DOUBLE)
            System.out.println("Value: " + dataInputStream.readDouble());
        else if (type == ObsSerializer.TYPE_DATE)
            System.out.println("Value: " + dataInputStream.readLong());
        System.out.println("Time: " + dataInputStream.readLong());
    }

    Integer formCounter = dataInputStream.readInt();
    for (int j = 0; j < formCounter; j++) {
        System.out.println("==================Form=================");
        System.out.println("Form Id: " + dataInputStream.readInt());
    }

    System.out.println();

    inputStream.close();
}

From source file:org.apache.hadoop.hbase.migration.nineteen.regionserver.HStoreFile.java

/** 
 * Reads in an info file/*from  ww  w .  j  a  v  a2  s  .  c  o  m*/
 *
 * @param filesystem file system
 * @return The sequence id contained in the info file
 * @throws IOException
 */
public long loadInfo(final FileSystem filesystem) throws IOException {
    Path p = null;
    if (isReference()) {
        p = getInfoFilePath(reference.getEncodedRegionName(), this.reference.getFileId());
    } else {
        p = getInfoFilePath();
    }
    long length = filesystem.getFileStatus(p).getLen();
    boolean hasMoreThanSeqNum = length > (Byte.SIZE + Bytes.SIZEOF_LONG);
    DataInputStream in = new DataInputStream(filesystem.open(p));
    try {
        byte flag = in.readByte();
        if (flag == INFO_SEQ_NUM) {
            if (hasMoreThanSeqNum) {
                flag = in.readByte();
                if (flag == MAJOR_COMPACTION) {
                    this.majorCompaction = in.readBoolean();
                }
            }
            return in.readLong();
        }
        throw new IOException("Cannot process log file: " + p);
    } finally {
        in.close();
    }
}

From source file:com.jivesoftware.os.amza.service.replication.http.endpoints.AmzaReplicationRestEndpoints.java

@POST
@Consumes(MediaType.APPLICATION_OCTET_STREAM)
@Produces(MediaType.APPLICATION_OCTET_STREAM)
@Path("/ackBatch")
public Response ackBatch(InputStream is) {
    try {//w  ww .java  2s .c om

        DataInputStream in = new DataInputStream(is);
        try {

            while (in.readByte() == 1) {

                int length = in.readShort();
                byte[] bytes = new byte[length];
                in.readFully(bytes);
                VersionedPartitionName versionedPartitionName = amzaInterner.internVersionedPartitionName(bytes,
                        0, length);

                length = in.readShort();
                bytes = new byte[length];
                in.readFully(bytes);
                RingMember ringMember = amzaInterner.internRingMember(bytes, 0, length);

                long takeSessionId = in.readLong();
                long takeSharedKey = in.readLong();
                long txId = in.readLong();
                long leadershipToken = in.readLong();

                amzaInstance.rowsTaken(ringMember, takeSessionId, takeSharedKey, versionedPartitionName, txId,
                        leadershipToken);

            }

            if (in.readByte() == 1) {
                int length = in.readShort();
                byte[] bytes = new byte[length];
                in.readFully(bytes);
                RingMember ringMember = amzaInterner.internRingMember(bytes, 0, length);

                long takeSessionId = in.readLong();
                long takeSharedKey = in.readLong();

                amzaInstance.pong(ringMember, takeSessionId, takeSharedKey);
            }

            return Response.ok(conf.asByteArray(Boolean.TRUE)).build();

        } finally {
            try {
                in.close();
            } catch (Exception x) {
                LOG.error("Failed to close input stream", x);
            }
        }

    } catch (Exception x) {
        LOG.warn("Failed ackBatch", x);
        return ResponseHelper.INSTANCE.errorResponse("Failed ackBatch.", x);
    } finally {
        amzaStats.pongsReceived.increment();
    }
}

From source file:it.unimi.dsi.sux4j.io.ChunkedHashStore.java

/** Returns the size of this store. Note that if you set up 
 * a {@linkplain #filter(Predicate) filter}, the first call to
 * this method will require a scan to the whole store. 
 * // www.j av a  2s  .c o m
 * @return the number of (possibly filtered) triples of this store.
 */

public long size() throws IOException {
    if (filter == null)
        return size;
    if (filteredSize == -1) {
        long c = 0;
        final long[] triple = new long[3];
        for (int i = 0; i < DISK_CHUNKS; i++) {
            if (filter == null)
                c += count[i];
            else {
                for (DataOutputStream d : dos)
                    d.flush();
                final DataInputStream dis = new DataInputStream(
                        new FastBufferedInputStream(new FileInputStream(file[i])));
                for (int j = 0; j < count[i]; j++) {
                    triple[0] = dis.readLong();
                    triple[1] = dis.readLong();
                    triple[2] = dis.readLong();
                    if (hashMask == 0)
                        dis.readLong();
                    if (filter.evaluate(triple))
                        c++;
                }
                dis.close();
            }
        }

        filteredSize = c;
    }
    return filteredSize;
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk.//from w ww.  jav a2 s .  c om
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    DataOutputStream replyOut = null; // stream to prev target
    replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.WRITE);
        } catch (InvalidToken e) {
            try {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                    Text.writeString(replyOut, datanode.dnRegistration.getName());
                    replyOut.flush();
                }
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_WRITE_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(replyOut);
            }
        }
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup
    short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS;
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }
                accessToken.write(mirrorOut);

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    mirrorInStatus = mirrorIn.readShort();
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR);
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            replyOut.writeShort(mirrorInStatus);
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:net.mybox.mybox.ClientStatus.java

private boolean getLastSync() {

    try {/*from www.  j  a v  a2 s.c  o  m*/
        FileInputStream fin = new FileInputStream(lastSyncFile);
        DataInputStream din = new DataInputStream(fin);
        lastSync = din.readLong();
        din.close();
    } catch (Exception e) {
        return false;
    }

    return true;
}

From source file:it.unimi.dsi.sux4j.io.ChunkedHashStore.java

/** Returns an iterator over the chunks of this chunked hash store.
 *
 * @return an iterator over the chunks of this chunked hash store.
 *//*w  w w. ja  va2  s  . c o m*/

public Iterator<Chunk> iterator() {
    if (closed)
        throw new IllegalStateException("This " + getClass().getSimpleName() + " has been closed ");
    for (DataOutputStream d : dos)
        try {
            d.flush();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }

    int m = 0;
    for (int i = 0; i < virtualDiskChunks; i++) {
        int s = 0;
        for (int j = 0; j < diskChunkStep; j++)
            s += count[i * diskChunkStep + j];
        if (s > m)
            m = s;
    }

    final int maxCount = m;

    return new AbstractObjectIterator<Chunk>() {
        private int chunk;
        private FastBufferedInputStream fbis;
        private int last;
        private int chunkSize;
        private final long[] buffer0 = new long[maxCount];
        private final long[] buffer1 = new long[maxCount];
        private final long[] buffer2 = new long[maxCount];
        private final long[] data = hashMask != 0 ? null : new long[maxCount];

        public boolean hasNext() {
            return chunk < chunks;
        }

        @SuppressWarnings("unchecked")
        public Chunk next() {
            if (!hasNext())
                throw new NoSuchElementException();
            final long[] buffer0 = this.buffer0;

            if (chunk % (chunks / virtualDiskChunks) == 0) {
                final int diskChunk = (int) (chunk / (chunks / virtualDiskChunks));
                final long[] buffer1 = this.buffer1, buffer2 = this.buffer2;

                chunkSize = 0;
                try {
                    if (diskChunkStep == 1) {
                        fbis = new FastBufferedInputStream(new FileInputStream(file[diskChunk]));
                        chunkSize = count[diskChunk];
                    } else {
                        final FileInputStream[] fis = new FileInputStream[diskChunkStep];
                        for (int i = 0; i < fis.length; i++) {
                            fis[i] = new FileInputStream(file[diskChunk * diskChunkStep + i]);
                            chunkSize += count[diskChunk * diskChunkStep + i];
                        }
                        fbis = new FastBufferedInputStream(new SequenceInputStream(
                                new IteratorEnumeration(Arrays.asList(fis).iterator())));
                    }
                    final DataInputStream dis = new DataInputStream(fbis);

                    final long triple[] = new long[3];
                    int count = 0;
                    for (int j = 0; j < chunkSize; j++) {
                        triple[0] = dis.readLong();
                        triple[1] = dis.readLong();
                        triple[2] = dis.readLong();

                        if (DEBUG)
                            System.err.println("From disk: " + Arrays.toString(triple));

                        if (filter == null || filter.evaluate(triple)) {
                            buffer0[count] = triple[0];
                            buffer1[count] = triple[1];
                            buffer2[count] = triple[2];
                            if (hashMask == 0)
                                data[count] = dis.readLong();
                            count++;
                        } else if (hashMask == 0)
                            dis.readLong(); // Discard data
                    }

                    chunkSize = count;
                    dis.close();
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }

                it.unimi.dsi.fastutil.Arrays.quickSort(0, chunkSize, new AbstractIntComparator() {
                    private static final long serialVersionUID = 0L;

                    public int compare(final int x, final int y) {
                        int t = Long.signum(buffer0[x] - buffer0[y]);
                        if (t != 0)
                            return t;
                        t = Long.signum(buffer1[x] - buffer1[y]);
                        if (t != 0)
                            return t;
                        return Long.signum(buffer2[x] - buffer2[y]);
                    }
                }, new Swapper() {
                    public void swap(final int x, final int y) {
                        final long e0 = buffer0[x], e1 = buffer1[x], e2 = buffer2[x];
                        buffer0[x] = buffer0[y];
                        buffer1[x] = buffer1[y];
                        buffer2[x] = buffer2[y];
                        buffer0[y] = e0;
                        buffer1[y] = e1;
                        buffer2[y] = e2;
                        if (hashMask == 0) {
                            final long v = data[x];
                            data[x] = data[y];
                            data[y] = v;
                        }
                    }
                });

                if (DEBUG) {
                    for (int i = 0; i < chunkSize; i++)
                        System.err.println(buffer0[i] + ", " + buffer1[i] + ", " + buffer2[i]);
                }

                if (!checkedForDuplicates && chunkSize > 1)
                    for (int i = chunkSize - 1; i-- != 0;)
                        if (buffer0[i] == buffer0[i + 1] && buffer1[i] == buffer1[i + 1]
                                && buffer2[i] == buffer2[i + 1])
                            throw new ChunkedHashStore.DuplicateException();
                if (chunk == chunks - 1)
                    checkedForDuplicates = true;
                last = 0;
            }

            final int start = last;
            while (last < chunkSize && (chunkShift == Long.SIZE ? 0 : buffer0[last] >>> chunkShift) == chunk)
                last++;
            chunk++;

            return new Chunk(buffer0, buffer1, buffer2, data, hashMask, start, last);
        }
    };
}

From source file:org.apache.hama.monitor.ZKCollector.java

@Override
public MetricsRecord harvest() throws Exception {
    final String path = this.reference.get().path;
    final ZooKeeper zk = this.reference.get().zk;
    LOG.debug("Searching " + path + " in zookeeper.");
    Stat stat = zk.exists(path, false);//from w  w w. j a  v a 2s  .  co m
    if (null == stat)
        return null; // no need to collect data.
    List<String> children = zk.getChildren(path, false);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Leaves size is " + children.size() + " total znodes in list: " + children);
    }

    // TODO: metrics record contains multiple metrics (1 to many)
    // data is stored under zk e.g. /path/to/metrics/jvm/...
    // within jvm folder metrics is stored in a form of name, value pair
    final MetricsRecord record = reference.get().record;
    if (null != children) {
        for (String child : children) {
            LOG.info("metrics -> " + child);
            // <metricsName_d> indicates data type is double
            String dataType = suffix(child);
            byte[] dataInBytes = zk.getData(path + "/" + child, false, stat);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Data length (in byte): " + dataInBytes.length);
            }
            DataInputStream input = null;
            try {
                String name = removeSuffix(child);
                input = new DataInputStream(new ByteArrayInputStream(dataInBytes));
                if ("d".equals(dataType)) {
                    double dv = input.readDouble();
                    LOG.info("metrics " + name + " value:" + dv);
                    record.add(new Metric<Double>(name, dv));
                } else if ("f".equals(dataType)) {
                    float fv = input.readFloat();
                    LOG.info("metrics " + name + " value:" + fv);
                    record.add(new Metric<Float>(name, fv));
                } else if ("i".equals(dataType)) {
                    int iv = input.readInt();
                    LOG.info("metrics " + name + " value:" + iv);
                    record.add(new Metric<Integer>(name, iv));
                } else if ("l".equals(dataType)) {
                    long lv = input.readLong();
                    LOG.info("metrics " + name + " value:" + lv);
                    record.add(new Metric<Long>(name, lv));
                } else if ("b".equals(dataType)) {
                    LOG.info("metrics" + name + " value:" + Arrays.toString(dataInBytes));
                    record.add(new Metric<byte[]>(name, dataInBytes));
                } else {
                    LOG.warn("Unkown data type for metrics name: " + child);
                }
            } finally {
                input.close();
            }
        }
    }
    return record;
}

From source file:tvbrowser.core.PluginLoader.java

/**
 * read the contents of a proxy file to get the necessary
 * information about the plugin managed by this proxy to recreate
 * the proxy without the plugin actually being loaded
 *
 * @param proxyFile//from   ww  w  .  j ava  2  s . com
 * @return pluginProxy
 */
private JavaPluginProxy readPluginProxy(File proxyFile) {
    DataInputStream in = null;

    try {
        in = new DataInputStream(new BufferedInputStream(new FileInputStream(proxyFile)));

        String name = in.readUTF();
        String author = in.readUTF();
        String description = in.readUTF();
        String license = in.readUTF();

        DummyPlugin.setCurrentVersion(new Version(in));

        String pluginId = in.readUTF();
        in.readLong(); // file size is unused
        String lcFileName = in.readUTF();
        in.close();
        // check existence of plugin file
        File pluginFile = new File(lcFileName);
        if (!pluginFile.canRead()) {
            deletePluginProxy(proxyFile);
            return null;
        }

        // everything seems fine, create plugin proxy and plugin info
        PluginInfo info = new PluginInfo(DummyPlugin.class, name, description, author, license);
        // now get icon
        String iconFileName = getProxyIconFileName(proxyFile);
        return new JavaPluginProxy(info, lcFileName, pluginId, iconFileName);
    } catch (Exception e) {
        if (in != null) {
            try {
                in.close();
            } catch (IOException e1) {
                // ignore
            }
        }
        // delete proxy on read error, maybe the format has changed
        deletePluginProxy(proxyFile);
        return null;
    }
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk./*from   w w  w  . ja  v  a2 s  . c  om*/
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    LOG.debug("Mathod called : writeBlock()");
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);

    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client

    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }

    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }

    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    JxtaSocket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    try {
        // open a block receiver and check if the block does not exist
        /*blockReceiver = new BlockReceiver(block, in, 
            s.getRemoteSocketAddress().toString(),
            s.getLocalSocketAddress().toString(),
            isRecovery, client, srcDataNode, datanode);*/
        blockReceiver = new BlockReceiver(block, in,
                ((JxtaSocketAddress) s.getRemoteSocketAddress()).getPeerId().toString(),
                ((JxtaSocketAddress) s.getLocalSocketAddress()).getPeerId().toString(), isRecovery, client,
                srcDataNode, datanode);

        // get a connection back to the previous target
        //replyOut = new DataOutputStream(
        //     NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
        ReliableOutputStream replyOutRos = (ReliableOutputStream) s.getOutputStream();
        replyOut = new DataOutputStream(replyOutRos);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            // JxtaSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getPeerId();
            // mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            // mirrorSock = datanode.newSocket();

            try {
                //int timeoutValue = numTargets * datanode.socketTimeout;
                //int writeTimeout = datanode.socketWriteTimeout + 
                //                   (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                // NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock = datanode.getDnPeer().getInfoSocket(mirrorNode.toString());
                if (mirrorSock == null)
                    throw new IOException("Failed to get a mirror socket");
                //mirrorSock.setSoTimeout(timeoutValue);
                //mirrorSock.setTcpNoDelay(true);
                //mirrorSock.setSoTimeout(Integer.parseInt(datanode.getConf().get("hadoop.p2p.info.timeout")));
                //mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                /*mirrorOut = new DataOutputStream(
                   new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                 SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
                */
                mirrorOut = new DataOutputStream((ReliableOutputStream) mirrorSock.getOutputStream());
                mirrorIn = new DataInputStream((ReliableInputStream) mirrorSock.getInputStream());

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);

                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }

                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (SocketTimeoutException ste) {
                LOG.debug("Time out while receiving data on DataXceiver");
                LOG.debug(ste);
                ste.printStackTrace();
            } catch (IOException e) {
                LOG.debug("IOException occurred : " + e.getMessage());
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                if (mirrorSock != null) {
                    IOUtils.closeSocket(mirrorSock);
                    mirrorSock = null;
                }
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } catch (Exception e) {
        LOG.warn("Exception occurred in writting block : " + e.getMessage());
    } finally {
        // close all opened streams

        LOG.debug("Finalizing : writeBlock()");
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}