Example usage for java.io DataInputStream readShort

List of usage examples for java.io DataInputStream readShort

Introduction

In this page you can find the example usage for java.io DataInputStream readShort.

Prototype

public final short readShort() throws IOException 

Source Link

Document

See the general contract of the readShort method of DataInput.

Usage

From source file:org.apache.hadoop.hdfs.AvatarClient.java

/**
 * Get the checksum of a file./*  ww  w .j  a va  2 s. c  om*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
            sock.setSoTimeout(timeout);

            DataOutputStream out = new DataOutputStream(
                    new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Read a block from the disk./*from w  ww  .  ja v  a 2 s.c om*/
 * @param in The stream to read from
 * @throws IOException
 */
private void readBlock(DataInputStream in) throws IOException {
    //
    // Read in the header
    //
    long blockId = in.readLong();
    Block block = new Block(blockId, 0, in.readLong());

    long startOffset = in.readLong();
    long length = in.readLong();
    String clientName = Text.readString(in);
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout);
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));

    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.READ);
        } catch (InvalidToken e) {
            try {
                out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                out.flush();
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_READ_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(out);
            }
        }
    }
    // send the block
    BlockSender blockSender = null;
    final String clientTraceFmt = clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
            ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d", "HDFS_READ", clientName,
                    "%d", datanode.dnRegistration.getStorageID(), block, "%d")
            : datanode.dnRegistration + " Served block " + block + " to " + s.getInetAddress();
    try {
        try {
            blockSender = new BlockSender(block, startOffset, length, true, true, false, datanode,
                    clientTraceFmt);
        } catch (IOException e) {
            out.writeShort(DataTransferProtocol.OP_STATUS_ERROR);
            throw e;
        }

        out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); // send op status
        long read = blockSender.sendBlock(out, baseStream, null); // send data

        if (blockSender.isBlockReadFully()) {
            // See if client verification succeeded. 
            // This is an optional response from client.
            try {
                if (in.readShort() == DataTransferProtocol.OP_STATUS_CHECKSUM_OK
                        && datanode.blockScanner != null) {
                    datanode.blockScanner.verifiedByClient(block);
                }
            } catch (IOException ignored) {
            }
        }

        datanode.myMetrics.incrBytesRead((int) read);
        datanode.myMetrics.incrBlocksRead();
    } catch (SocketException ignored) {
        // Its ok for remote side to close the connection anytime.
        datanode.myMetrics.incrBlocksRead();
    } catch (IOException ioe) {
        /* What exactly should we do here?
         * Earlier version shutdown() datanode if there is disk error.
         */
        LOG.warn(datanode.dnRegistration + ":Got exception while serving " + block + " to " + s.getInetAddress()
                + ":\n" + StringUtils.stringifyException(ioe));
        throw ioe;
    } finally {
        IOUtils.closeStream(out);
        IOUtils.closeStream(blockSender);
    }
}

From source file:org.apache.jxtadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*from  w ww.  ja  va 2  s. com*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            /*final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, 
                 NetUtils.createSocketAddr(datanodes[j].getName()),
                 timeout);
            sock.setSoTimeout(timeout);*/
            JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer()
                    .getInfoSocket(datanodes[j].getName());
            // jsock.setSoTimeout(timeout);
            jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout")));

            /*DataOutputStream out = new DataOutputStream(
                new BufferedOutputStream(NetUtils.getOutputStream(jsock), 
                             DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/
            DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream()));
            DataInputStream in = new DataInputStream(jsock.getInputStream());

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(jsock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk./*w w w . j  a  v  a  2  s  .  c  o  m*/
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    DataOutputStream replyOut = null; // stream to prev target
    replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.WRITE);
        } catch (InvalidToken e) {
            try {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                    Text.writeString(replyOut, datanode.dnRegistration.getName());
                    replyOut.flush();
                }
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_WRITE_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(replyOut);
            }
        }
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup
    short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS;
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }
                accessToken.write(mirrorOut);

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    mirrorInStatus = mirrorIn.readShort();
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR);
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            replyOut.writeShort(mirrorInStatus);
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.apache.hadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file.//from w  w w.  j a v a2  s .c o m
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
    if (null == blockLocations) {
        throw new FileNotFoundException("File does not exist: " + src);
    }
    List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;
    boolean refetchBlocks = false;
    int lastRetriedIndex = -1;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        if (refetchBlocks) { // refetch to get fresh tokens
            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
            if (null == blockLocations) {
                throw new FileNotFoundException("File does not exist: " + src);
            }
            locatedblocks = blockLocations.getLocatedBlocks();
            refetchBlocks = false;
        }
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = (socketTimeout > 0)
                ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length)
                : 0;

        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            Socket sock = null;
            DataOutputStream out = null;
            DataInputStream in = null;

            try {
                //connect to a datanode
                sock = socketFactory.createSocket();
                NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
                sock.setSoTimeout(timeout);

                out = new DataOutputStream(
                        new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
                in = new DataInputStream(NetUtils.getInputStream(sock));

                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }

                // get block MD5
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                lb.getBlockToken().write(out);
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file "
                                    + src + " for block " + block + " from datanode " + datanodes[j].getName()
                                    + ". Will retry the block once.");
                        }
                        lastRetriedIndex = i;
                        done = true; // actually it's not done; but we'll retry
                        i--; // repeat at i-th block
                        refetchBlocks = true;
                        break;
                    } else {
                        throw new IOException("Bad response " + reply + " for block " + block
                                + " from datanode " + datanodes[j].getName());
                    }
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:VASSAL.tools.imports.adc2.MapBoard.java

@Override
protected void load(File f) throws IOException {
    super.load(f);
    DataInputStream in = null;

    try {//from ww w. j  av a 2 s .c om
        in = new DataInputStream(new BufferedInputStream(new FileInputStream(f)));

        baseName = stripExtension(f.getName());
        path = f.getPath();
        int header = in.readByte();
        if (header != -3)
            throw new FileFormatException("Invalid Mapboard File Header");

        // don't know what these do.
        in.readFully(new byte[2]);

        // get the symbol set
        String s = readWindowsFileName(in);
        String symbolSetFileName = forceExtension(s, "set");
        set = new SymbolSet();
        File setFile = action.getCaseInsensitiveFile(new File(symbolSetFileName), f, true,
                new ExtensionFileFilter(ADC2Utils.SET_DESCRIPTION, new String[] { ADC2Utils.SET_EXTENSION }));
        if (setFile == null)
            throw new FileNotFoundException("Unable to find symbol set file.");
        set.importFile(action, setFile);

        in.readByte(); // ignored

        columns = ADC2Utils.readBase250Word(in);
        rows = ADC2Utils.readBase250Word(in);
        // presumably, they're all the same size (and they're square)
        int hexSize = set.getMapBoardSymbolSize();

        // each block read separately
        readHexDataBlock(in);
        readPlaceNameBlock(in);
        readHexSideBlock(in);
        readLineDefinitionBlock(in);
        readAttributeBlock(in);
        readMapSheetBlock(in);
        readHexLineBlock(in);
        readLineDrawPriorityBlock(in);
        // end of data blocks

        int orientation = in.read();
        switch (orientation) {
        case 0:
        case 1: // vertical hex orientation or grid offset column
            if (set.getMapBoardSymbolShape() == SymbolSet.Shape.SQUARE)
                layout = new GridOffsetColumnLayout(hexSize, columns, rows);
            else
                layout = new VerticalHexLayout(hexSize, columns, rows);
            break;
        case 2: // horizontal hex orientation or grid offset row
            if (set.getMapBoardSymbolShape() == SymbolSet.Shape.SQUARE)
                layout = new GridOffsetRowLayout(hexSize, columns, rows);
            else
                layout = new HorizontalHexLayout(hexSize, columns, rows);
            break;
        default: // square grid -- no offset
            layout = new GridLayout(hexSize, columns, rows);
        }

        /* int saveMapPosition = */ in.readByte();

        /* int mapViewingPosition = */ in.readShort(); // probably base-250

        /* int mapViewingZoomLevel = */ in.readShort();

        in.readByte(); // totally unknown

        // strangely, more blocks
        readTableColorBlock(in);
        readHexNumberingBlock(in);

        // TODO: default map item drawing order appears to be different for different maps.
        try { // optional blocks
            readMapBoardOverlaySymbolBlock(in);
            readVersionBlock(in);
            readMapItemDrawingOrderBlock(in);
            readMapItemDrawFlagBlock(in);
        } catch (ADC2Utils.NoMoreBlocksException e) {
        }

        in.close();
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/* w ww . j  ava 2 s.c  o  m*/
 *
 * @param file _more_
 * @param doDrop _more_
 *
 * @throws Exception _more_
 */
public void loadRdbFile(String file, boolean doDrop) throws Exception {

    DataInputStream dis = new DataInputStream(new FileInputStream(file));
    XmlEncoder encoder = new XmlEncoder();
    String tableXml = readString(dis);
    List<TableInfo> tableInfos = (List<TableInfo>) encoder.toObject(tableXml);
    System.err.println("# table infos:" + tableInfos.size());
    Hashtable<String, TableInfo> tables = new Hashtable<String, TableInfo>();
    StringBuffer sql = new StringBuffer();
    StringBuffer drop = new StringBuffer();
    for (TableInfo tableInfo : tableInfos) {
        tables.put(tableInfo.getName(), tableInfo);
        drop.append("drop table " + tableInfo.getName() + ";\n");
        sql.append("CREATE TABLE " + tableInfo.getName() + "  (\n");
        for (int i = 0; i < tableInfo.getColumns().size(); i++) {
            ColumnInfo column = tableInfo.getColumns().get(i);
            if (i > 0) {
                sql.append(",\n");
            }
            sql.append(column.getName());
            sql.append(" ");
            int type = column.getType();

            if (type == ColumnInfo.TYPE_TIMESTAMP) {
                sql.append("ramadda.datetime");
            } else if (type == ColumnInfo.TYPE_VARCHAR) {
                sql.append("varchar(" + column.getSize() + ")");
            } else if (type == ColumnInfo.TYPE_INTEGER) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_DOUBLE) {
                sql.append("ramadda.double");
            } else if (type == ColumnInfo.TYPE_BIGINT) {
                sql.append("ramadda.bigint");
            } else if (type == ColumnInfo.TYPE_SMALLINT) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_CLOB) {
                sql.append(convertType("clob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_BLOB) {
                sql.append(convertType("blob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                //                    sql.append(convertType("blob", column.getSize()));
            } else {
                throw new IllegalStateException("Unknown column type:" + type);
            }
        }
        sql.append(");\n");
        for (IndexInfo indexInfo : tableInfo.getIndices()) {
            sql.append("CREATE INDEX " + indexInfo.getName() + " ON " + tableInfo.getName() + " ("
                    + indexInfo.getColumnName() + ");\n");
        }
    }

    //        System.err.println(drop);
    //        System.err.println(sql);

    //TODO: 
    if (doDrop) {
        loadSql(drop.toString(), true, false);
    }
    loadSql(convertSql(sql.toString()), false, true);

    TableInfo tableInfo = null;
    int rows = 0;
    Connection connection = getConnection();
    try {
        while (true) {
            int what = dis.readInt();
            if (what == DUMPTAG_TABLE) {
                String tableName = readString(dis);
                tableInfo = tables.get(tableName);
                if (tableInfo == null) {
                    throw new IllegalArgumentException("No table:" + tableName);
                }
                if (tableInfo.statement == null) {
                    String insert = SqlUtil.makeInsert(tableInfo.getName(), tableInfo.getColumnNames());
                    tableInfo.statement = connection.prepareStatement(insert);
                }
                System.err.println("importing table:" + tableInfo.getName());

                continue;
            }
            if (what == DUMPTAG_END) {
                break;
            }
            if (what != DUMPTAG_ROW) {
                throw new IllegalArgumentException("Unkown tag:" + what);
            }

            rows++;
            if ((rows % 1000) == 0) {
                System.err.println("rows:" + rows);
            }

            Object[] values = new Object[tableInfo.getColumns().size()];
            int colCnt = 0;
            for (ColumnInfo columnInfo : tableInfo.getColumns()) {
                int type = columnInfo.getType();
                if (type == ColumnInfo.TYPE_TIMESTAMP) {
                    long dttm = dis.readLong();
                    values[colCnt++] = new Date(dttm);
                } else if (type == ColumnInfo.TYPE_VARCHAR) {
                    String s = readString(dis);
                    if ((s != null) && (s.length() > 5000)) {
                        //A hack for old dbs
                        if (tableInfo.getName().equals("metadata")) {
                            s = s.substring(0, 4999);
                            System.err.println("clipping: " + tableInfo.getName() + "." + columnInfo.getName());
                        }

                    }
                    values[colCnt++] = s;
                } else if (type == ColumnInfo.TYPE_INTEGER) {
                    values[colCnt++] = new Integer(dis.readInt());
                } else if (type == ColumnInfo.TYPE_DOUBLE) {
                    values[colCnt++] = new Double(dis.readDouble());
                } else if (type == ColumnInfo.TYPE_CLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BIGINT) {
                    long v = dis.readLong();
                    values[colCnt++] = new Long(v);
                } else if (type == ColumnInfo.TYPE_SMALLINT) {
                    short v = dis.readShort();
                    values[colCnt++] = new Short(v);
                } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                } else {
                    throw new IllegalArgumentException(
                            "Unknown type for table" + tableInfo.getName() + " " + type);
                }
            }
            setValues(tableInfo.statement, values);
            tableInfo.statement.addBatch();
            tableInfo.batchCnt++;
            if (tableInfo.batchCnt > 1000) {
                tableInfo.batchCnt = 0;
                tableInfo.statement.executeBatch();

            }
        }

        //Now finish up the batch
        for (TableInfo ti : tableInfos) {
            if (ti.batchCnt > 0) {
                ti.batchCnt = 0;
                ti.statement.executeBatch();
            }
        }
    } finally {
        IOUtil.close(dis);
        closeConnection(connection);
    }

    System.err.println("imported " + rows + " rows");

}