Example usage for java.io DataOutputStream writeInt

List of usage examples for java.io DataOutputStream writeInt

Introduction

In this page you can find the example usage for java.io DataOutputStream writeInt.

Prototype

public final void writeInt(int v) throws IOException 

Source Link

Document

Writes an int to the underlying output stream as four bytes, high byte first.

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk.//from w  w  w.  jav a 2  s.co  m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    DataOutputStream replyOut = null; // stream to prev target
    replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.WRITE);
        } catch (InvalidToken e) {
            try {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                    Text.writeString(replyOut, datanode.dnRegistration.getName());
                    replyOut.flush();
                }
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_WRITE_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(replyOut);
            }
        }
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup
    short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS;
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }
                accessToken.write(mirrorOut);

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    mirrorInStatus = mirrorIn.readShort();
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR);
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            replyOut.writeShort(mirrorInStatus);
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:com.yahoo.pulsar.testclient.LoadSimulationClient.java

private void handle(final byte command, final DataInputStream inputStream, final DataOutputStream outputStream)
        throws Exception {
    final TradeConfiguration tradeConf = new TradeConfiguration();
    tradeConf.command = command;/*from  w w  w  .  j ava2s  . co  m*/
    switch (command) {
    case CHANGE_COMMAND:
        // Change the topic's settings if it exists. Report whether the
        // topic was found on this server.
        decodeProducerOptions(tradeConf, inputStream);
        if (topicsToTradeUnits.containsKey(tradeConf.topic)) {
            topicsToTradeUnits.get(tradeConf.topic).change(tradeConf);
            outputStream.write(FOUND_TOPIC);
        } else {
            outputStream.write(NO_SUCH_TOPIC);
        }
        break;
    case STOP_COMMAND:
        // Stop the topic if it exists. Report whether the topic was found,
        // and whether it was already stopped.
        tradeConf.topic = inputStream.readUTF();
        if (topicsToTradeUnits.containsKey(tradeConf.topic)) {
            final boolean wasStopped = topicsToTradeUnits.get(tradeConf.topic).stop.getAndSet(true);
            outputStream.write(wasStopped ? REDUNDANT_COMMAND : FOUND_TOPIC);
        } else {
            outputStream.write(NO_SUCH_TOPIC);
        }
        break;
    case TRADE_COMMAND:
        // Create the topic. It is assumed that the topic does not already
        // exist.
        decodeProducerOptions(tradeConf, inputStream);
        final TradeUnit tradeUnit = new TradeUnit(tradeConf, client, producerConf, consumerConf, payloadCache);
        topicsToTradeUnits.put(tradeConf.topic, tradeUnit);
        executor.submit(() -> {
            try {
                tradeUnit.start();
            } catch (Exception ex) {
                throw new RuntimeException(ex);
            }
        });
        // Tell controller topic creation is finished.
        outputStream.write(NO_SUCH_TOPIC);
        break;
    case CHANGE_GROUP_COMMAND:
        // Change the settings of all topics belonging to a group. Report
        // the number of topics changed.
        decodeGroupOptions(tradeConf, inputStream);
        tradeConf.size = inputStream.readInt();
        tradeConf.rate = inputStream.readDouble();
        // See if a topic belongs to this tenant and group using this regex.
        final String groupRegex = ".*://.*/" + tradeConf.tenant + "/" + tradeConf.group + "-.*/.*";
        int numFound = 0;
        for (Map.Entry<String, TradeUnit> entry : topicsToTradeUnits.entrySet()) {
            final String destination = entry.getKey();
            final TradeUnit unit = entry.getValue();
            if (destination.matches(groupRegex)) {
                ++numFound;
                unit.change(tradeConf);
            }
        }
        outputStream.writeInt(numFound);
        break;
    case STOP_GROUP_COMMAND:
        // Stop all topics belonging to a group. Report the number of topics
        // stopped.
        decodeGroupOptions(tradeConf, inputStream);
        // See if a topic belongs to this tenant and group using this regex.
        final String regex = ".*://.*/" + tradeConf.tenant + "/" + tradeConf.group + "-.*/.*";
        int numStopped = 0;
        for (Map.Entry<String, TradeUnit> entry : topicsToTradeUnits.entrySet()) {
            final String destination = entry.getKey();
            final TradeUnit unit = entry.getValue();
            if (destination.matches(regex) && !unit.stop.getAndSet(true)) {
                ++numStopped;
            }
        }
        outputStream.writeInt(numStopped);
        break;
    default:
        throw new IllegalArgumentException("Unrecognized command code received: " + command);
    }
    outputStream.flush();
}

From source file:voldemort.store.readonly.disk.HadoopStoreWriter.java

@Override
public void write(BytesWritable key, Iterator<BytesWritable> iterator, Reporter reporter) throws IOException {

    // Read chunk id
    int chunkId = ReadOnlyUtils.chunk(key.getBytes(), getNumChunks());

    initFileStreams(chunkId);/*from  w  w w . j  a  v  a2s. c  o  m*/

    // Write key and position
    this.indexFileStream[chunkId].write(key.getBytes(), 0, key.getLength());
    this.indexFileSizeInBytes[chunkId] += key.getLength();
    this.indexFileStream[chunkId].writeInt(this.position[chunkId]);
    this.indexFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_INT;

    // Run key through checksum digest
    if (this.checkSumDigestIndex[chunkId] != null) {
        this.checkSumDigestIndex[chunkId].update(key.getBytes(), 0, key.getLength());
        this.checkSumDigestIndex[chunkId].update(this.position[chunkId]);
    }

    short numTuples = 0;
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    DataOutputStream valueStream = new DataOutputStream(stream);

    while (iterator.hasNext()) {
        BytesWritable writable = iterator.next();
        byte[] valueBytes = writable.getBytes();
        int offsetTillNow = 0;

        /**
         * Below, we read the node id, partition id and replica type of each record
         * coming in, and validate that it is consistent with the other IDs seen so
         * far. This is to catch potential regressions to the shuffling logic in:
         *
         * {@link AbstractStoreBuilderConfigurable#getPartition(byte[], byte[], int)}
         */

        // Read node Id
        int currentNodeId = ByteUtils.readInt(valueBytes, offsetTillNow);
        if (this.nodeId == -1) {
            this.nodeId = currentNodeId;
        } else if (this.nodeId != currentNodeId) {
            throw new IllegalArgumentException("Should not get various nodeId shuffled to us! "
                    + "First nodeId seen: " + this.nodeId + ", currentNodeId: " + currentNodeId);
        }
        offsetTillNow += ByteUtils.SIZE_OF_INT;

        // Read partition id
        int currentPartitionId = ByteUtils.readInt(valueBytes, offsetTillNow);
        if (this.partitionId == -1) {
            this.partitionId = currentPartitionId;
        } else if (this.partitionId != currentPartitionId) {
            throw new IllegalArgumentException(
                    "Should not get various partitionId shuffled to us! " + "First partitionId seen: "
                            + this.partitionId + ", currentPartitionId: " + currentPartitionId);
        }
        offsetTillNow += ByteUtils.SIZE_OF_INT;

        // Read replica type
        if (getSaveKeys()) {
            int currentReplicaType = (int) ByteUtils.readBytes(valueBytes, offsetTillNow,
                    ByteUtils.SIZE_OF_BYTE);
            if (this.replicaType == -1) {
                this.replicaType = currentReplicaType;
            } else if (this.replicaType != currentReplicaType) {
                throw new IllegalArgumentException(
                        "Should not get various replicaType shuffled to us! " + "First replicaType seen: "
                                + this.replicaType + ", currentReplicaType: " + currentReplicaType);
            }

            if (getBuildPrimaryReplicasOnly() && this.replicaType > 0) {
                throw new IllegalArgumentException("Should not get any replicaType > 0 shuffled to us"
                        + " when buildPrimaryReplicasOnly mode is enabled!");
            }
            offsetTillNow += ByteUtils.SIZE_OF_BYTE;
        }

        int valueLength = writable.getLength() - offsetTillNow;
        if (getSaveKeys()) {
            // Write ( key_length, value_length, key, value )
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        } else {
            // Write (value_length + value)
            valueStream.writeInt(valueLength);
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        }

        numTuples++;

        // If we have multiple values for this md5 that is a collision,
        // throw an exception--either the data itself has duplicates, there
        // are trillions of keys, or someone is attempting something
        // malicious ( We obviously expect collisions when we save keys )
        if (!getSaveKeys() && numTuples > 1)
            throw new VoldemortException("Duplicate keys detected for md5 sum "
                    + ByteUtils.toHexString(ByteUtils.copy(key.getBytes(), 0, key.getLength())));

    }

    if (numTuples < 0) {
        // Overflow
        throw new VoldemortException("Found too many collisions: chunk " + chunkId + " has exceeded "
                + Short.MAX_VALUE + " collisions.");
    } else if (numTuples > 1) {
        // Update number of collisions + max keys per collision
        reporter.incrCounter(CollisionCounter.NUM_COLLISIONS, 1);

        long numCollisions = reporter.getCounter(CollisionCounter.MAX_COLLISIONS).getCounter();
        if (numTuples > numCollisions) {
            reporter.incrCounter(CollisionCounter.MAX_COLLISIONS, numTuples - numCollisions);
        }
    }

    // Flush the value
    valueStream.flush();
    byte[] value = stream.toByteArray();

    // Start writing to file now
    // First, if save keys flag set the number of keys
    if (getSaveKeys()) {

        this.valueFileStream[chunkId].writeShort(numTuples);
        this.valueFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_SHORT;
        this.position[chunkId] += ByteUtils.SIZE_OF_SHORT;

        if (this.checkSumDigestValue[chunkId] != null) {
            this.checkSumDigestValue[chunkId].update(numTuples);
        }
    }

    this.valueFileStream[chunkId].write(value);
    this.valueFileSizeInBytes[chunkId] += value.length;
    this.position[chunkId] += value.length;

    if (this.checkSumDigestValue[chunkId] != null) {
        this.checkSumDigestValue[chunkId].update(value);
    }

    if (this.position[chunkId] < 0)
        throw new VoldemortException("Chunk overflow exception: chunk " + chunkId + " has exceeded "
                + Integer.MAX_VALUE + " bytes.");

}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

/**
 * @param id String//  ww w . j  a  v  a2s.co m
 * @param toDelete Set<String>
 * @param fileName String
 * @throws IOException
 * @throws FileNotFoundException
 */
private void persistDeletions(String id, Set<String> toDelete, String fileName)
        throws IOException, FileNotFoundException {
    File location = new File(indexDirectory, id).getCanonicalFile();
    if (!location.exists()) {
        if (!location.mkdirs()) {
            throw new IndexerException("Failed to make index directory " + location);
        }
    }
    // Write deletions
    DataOutputStream os = new DataOutputStream(
            new BufferedOutputStream(new FileOutputStream(new File(location, fileName).getCanonicalFile())));
    os.writeInt(toDelete.size());
    for (String ref : toDelete) {
        os.writeUTF(ref);
    }
    os.flush();
    os.close();
}

From source file:org.motechproject.mobile.web.OXDFormUploadServlet.java

/**
 * Processes requests for both HTTP <code>GET</code> and <code>POST</code>
 * methods.// w  ww . ja  v a2 s .co  m
 * 
 * @param request
 *            servlet request
 * @param response
 *            servlet response
 * @throws ServletException
 *             if a servlet-specific error occurs
 * @throws IOException
 *             if an I/O error occurs
 */
@RequestMapping(method = RequestMethod.POST)
public void processRequest(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    long startTime = System.currentTimeMillis();

    IMPService impService = (IMPService) appCtx.getBean("impService");
    StudyProcessor studyProcessor = (StudyProcessor) appCtx.getBean("studyProcessor");

    InputStream input = request.getInputStream();
    OutputStream output = response.getOutputStream();

    ZOutputStream zOutput = null; // Wrap the streams for compression

    // Wrap the streams so for logical types
    DataInputStream dataInput = null;
    DataOutputStream dataOutput = null;

    // Set the MIME type so clients don't misinterpret
    response.setContentType("application/octet-stream");

    try {
        zOutput = new ZOutputStream(output, JZlib.Z_BEST_COMPRESSION);
        dataInput = new DataInputStream(input);
        dataOutput = new DataOutputStream(zOutput);

        if (rawUploadLog.isInfoEnabled()) {
            byte[] rawPayload = IOUtils.toByteArray(dataInput);
            String hexEncodedPayload = Hex.encodeHexString(rawPayload);
            rawUploadLog.info(hexEncodedPayload);
            // Replace the original input stream with one using read payload
            dataInput.close();
            dataInput = new DataInputStream(new ByteArrayInputStream(rawPayload));
        }

        String name = dataInput.readUTF();
        String password = dataInput.readUTF();
        String serializer = dataInput.readUTF();
        String locale = dataInput.readUTF();

        byte action = dataInput.readByte();

        // TODO Authentication of usename and password. Possible M6
        // enhancement
        log.info("uploading: name=" + name + ", password=" + password + ", serializer=" + serializer
                + ", locale=" + locale + ", action=" + action);

        EpihandyXformSerializer serObj = new EpihandyXformSerializer();
        serObj.addDeserializationListener(studyProcessor);

        try {
            Map<Integer, String> formVersionMap = formService.getXForms();
            serObj.deserializeStudiesWithEvents(dataInput, formVersionMap);
        } catch (FormNotFoundException fne) {
            String msg = "failed to deserialize forms: ";
            log.error(msg + fne.getMessage());
            dataOutput.writeByte(ResponseHeader.STATUS_FORMS_STALE);
            response.setStatus(HttpServletResponse.SC_OK);
            return;
        } catch (Exception e) {
            String msg = "failed to deserialize forms";
            log.error(msg, e);
            dataOutput.writeByte(ResponseHeader.STATUS_ERROR);
            response.setStatus(HttpServletResponse.SC_OK);
            return;
        }

        String[][] studyForms = studyProcessor.getConvertedStudies();
        int numForms = studyProcessor.getNumForms();

        log.debug("upload contains: studies=" + studyForms.length + ", forms=" + numForms);

        // Starting processing here, only process until we run out of time
        int processedForms = 0;
        int faultyForms = 0;
        if (studyForms != null && numForms > 0) {
            formprocessing: for (int i = 0; i < studyForms.length; i++) {
                for (int j = 0; j < studyForms[i].length; j++, processedForms++) {

                    if (maxProcessingTime > 0 && System.currentTimeMillis() - startTime > maxProcessingTime)
                        break formprocessing;

                    try {
                        studyForms[i][j] = impService.processXForm(studyForms[i][j]);
                    } catch (Exception ex) {
                        log.error("processing form failed", ex);
                        studyForms[i][j] = ex.getMessage();
                    }
                    if (!impService.getFormProcessSuccess().equalsIgnoreCase(studyForms[i][j])) {
                        faultyForms++;
                    }
                }
            }
        }

        // Write out usual upload response
        dataOutput.writeByte(ResponseHeader.STATUS_SUCCESS);

        dataOutput.writeInt(processedForms);
        dataOutput.writeInt(faultyForms);

        for (int s = 0; s < studyForms.length; s++) {
            for (int f = 0; f < studyForms[s].length; f++) {
                if (!impService.getFormProcessSuccess().equalsIgnoreCase(studyForms[s][f])) {
                    dataOutput.writeByte((byte) s);
                    dataOutput.writeShort((short) f);
                    dataOutput.writeUTF(studyForms[s][f]);
                }
            }
        }

        response.setStatus(HttpServletResponse.SC_OK);
    } catch (Exception e) {
        log.error("failure during upload", e);
    } finally {
        if (dataOutput != null)
            dataOutput.flush();
        if (zOutput != null)
            zOutput.finish();
        response.flushBuffer();
    }
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/*from   ww  w  . j ava 2 s.c o m*/
 *
 * @param os _more_
 * @param all _more_
 * @param actionId _more_
 *
 * @throws Exception _more_
 */
public void makeDatabaseCopy(OutputStream os, boolean all, Object actionId) throws Exception {

    XmlEncoder encoder = new XmlEncoder();
    DataOutputStream dos = new DataOutputStream(os);
    Connection connection = getConnection();
    try {
        HashSet<String> skip = new HashSet<String>();
        skip.add(Tables.SESSIONS.NAME);

        List<TableInfo> tableInfos = getTableInfos(connection, false);
        String xml = encoder.toXml(tableInfos, false);
        writeString(dos, xml);

        int rowCnt = 0;
        System.err.println("Exporting database");
        for (TableInfo tableInfo : tableInfos) {
            if (tableInfo.getName().equalsIgnoreCase("base")) {
                continue;
            }
            if (tableInfo.getName().equalsIgnoreCase("agggregation")) {
                continue;
            }
            if (tableInfo.getName().equalsIgnoreCase("entry")) {
                continue;
            }
            System.err.println("Exporting table: " + tableInfo.getName());
            List<ColumnInfo> columns = tableInfo.getColumns();
            List valueList = new ArrayList();
            Statement statement = execute("select * from " + tableInfo.getName(), 10000000, 0);
            SqlUtil.Iterator iter = getIterator(statement);
            ResultSet results;
            dos.writeInt(DUMPTAG_TABLE);
            writeString(dos, tableInfo.getName());
            if (skip.contains(tableInfo.getName().toLowerCase())) {
                continue;
            }
            while ((results = iter.getNext()) != null) {
                dos.writeInt(DUMPTAG_ROW);
                rowCnt++;
                if ((rowCnt % 1000) == 0) {
                    if (actionId != null) {
                        getActionManager().setActionMessage(actionId, "Written " + rowCnt + " database rows");
                    }
                    System.err.println("rows:" + rowCnt);
                }
                for (int i = 1; i <= columns.size(); i++) {
                    ColumnInfo colInfo = columns.get(i - 1);
                    int type = colInfo.getType();
                    if (type == ColumnInfo.TYPE_TIMESTAMP) {
                        Timestamp ts = results.getTimestamp(i);
                        if (ts == null) {
                            dos.writeLong((long) -1);
                        } else {
                            dos.writeLong(ts.getTime());
                        }
                    } else if (type == ColumnInfo.TYPE_VARCHAR) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_TIME) {
                        //TODO: What is the format of a type time?
                        //                            writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_INTEGER) {
                        writeInteger(dos, (Integer) results.getObject(i));
                    } else if (type == ColumnInfo.TYPE_DOUBLE) {
                        writeDouble(dos, (Double) results.getObject(i));
                    } else if (type == ColumnInfo.TYPE_CLOB) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_BLOB) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_BIGINT) {
                        writeLong(dos, results.getLong(i));
                    } else if (type == ColumnInfo.TYPE_SMALLINT) {
                        dos.writeShort(results.getShort(i));
                    } else if (type == ColumnInfo.TYPE_TINYINT) {
                        //TODO:
                        //dos.write(results.getChar(i));
                    } else {
                        Object object = results.getObject(i);

                        throw new IllegalArgumentException(
                                "Unknown type:" + type + "  c:" + object.getClass().getName());
                    }
                }
            }
        }
        System.err.println("Wrote " + rowCnt + " rows");
    } finally {
        closeConnection(connection);
    }
    //Write the end tag
    dos.writeInt(DUMPTAG_END);
    IOUtil.close(dos);

}

From source file:org.apache.hadoop.dfs.FSNamesystem.java

/**
 * Serializes leases. //from  w  w w.j  a  va  2s  .co m
 */
void saveFilesUnderConstruction(DataOutputStream out) throws IOException {
    synchronized (leaseManager) {
        out.writeInt(leaseManager.countPath()); // write the size

        for (Lease lease : leaseManager.getSortedLeases()) {
            Collection<StringBytesWritable> files = lease.getPaths();
            for (Iterator<StringBytesWritable> i = files.iterator(); i.hasNext();) {
                String path = i.next().getString();

                // verify that path exists in namespace
                INode node = dir.getFileINode(path);
                if (node == null) {
                    throw new IOException(
                            "saveLeases found path " + path + " but no matching entry in namespace.");
                }
                if (!node.isUnderConstruction()) {
                    throw new IOException("saveLeases found path " + path + " but is not under construction.");
                }
                INodeFileUnderConstruction cons = (INodeFileUnderConstruction) node;
                FSImage.writeINodeUnderConstruction(out, cons, path);
            }
        }
    }
}

From source file:org.openymsg.network.Session.java

/**
 * Transmit a FILETRANSFER packet, to send a binary file to a friend.
 *//* w  w w.  j ava2 s.  c om*/
protected void transmitFileTransfer(final String to, final String message, final File file)
        throws FileTransferFailedException, IOException {
    if (file == null)
        throw new IllegalArgumentException("Argument 'file' cannot be null.");

    if (!file.isFile())
        throw new IllegalArgumentException(
                "The provided file object does not denote a normal file (but possibly a directory).");

    if (file.length() == 0L)
        throw new FileTransferFailedException("File transfer: empty file");

    final String cookie = this.cookieY + "; " + this.cookieT;

    final byte[] marker = { '2', '9', (byte) 0xc0, (byte) 0x80 };

    // Create a Yahoo packet into 'packet'
    final PacketBodyBuffer body = new PacketBodyBuffer();
    body.addElement("0", this.primaryID.getId());
    body.addElement("5", to);
    body.addElement("28", Long.toString(file.length()));
    body.addElement("27", file.getName());
    body.addElement("14", message);
    byte[] packet = body.getBuffer();

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    dos.write(NetworkConstants.MAGIC, 0, 4);
    dos.write(NetworkConstants.VERSION, 0, 4);
    dos.writeShort((packet.length + 4) & 0xFFFF);
    dos.writeShort(ServiceType.FILETRANSFER.getValue() & 0xFFFF);
    dos.writeInt((int) (this.status.getValue() & 0xFFFFFFFF));
    dos.writeInt((int) (this.sessionId & 0xFFFFFFFF));
    dos.write(packet, 0, packet.length);
    dos.write(marker, 0, 4); // Extra 4 bytes : marker before file data
    // (?)

    packet = baos.toByteArray();

    // Send to Yahoo using POST
    String ftHost = Util.fileTransferHost();
    String ftURL = "http://" + ftHost + NetworkConstants.FILE_TF_PORTPATH;
    HttpURLConnection uConn = (HttpURLConnection) (new URL(ftURL).openConnection());
    uConn.setRequestMethod("POST");
    uConn.setDoOutput(true); // POST, not GET
    Util.initURLConnection(uConn);
    uConn.setRequestProperty("Content-Length", Long.toString(file.length() + packet.length));
    uConn.setRequestProperty("User-Agent", NetworkConstants.USER_AGENT);
    // uConn.setRequestProperty("Host",ftHost);
    uConn.setRequestProperty("Cookie", cookie);
    uConn.connect();

    final BufferedOutputStream bos = new BufferedOutputStream(uConn.getOutputStream());
    try {
        bos.write(packet);
        bos.write(Util.getBytesFromFile(file));
        bos.flush();
    } finally {
        bos.close();
    }

    final int ret = uConn.getResponseCode();
    uConn.disconnect();

    if (ret != 200)
        throw new FileTransferFailedException("Server rejected upload");
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk./*from   w w w . j  ava 2  s.co m*/
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    LOG.debug("Mathod called : writeBlock()");
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);

    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client

    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }

    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }

    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    JxtaSocket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    try {
        // open a block receiver and check if the block does not exist
        /*blockReceiver = new BlockReceiver(block, in, 
            s.getRemoteSocketAddress().toString(),
            s.getLocalSocketAddress().toString(),
            isRecovery, client, srcDataNode, datanode);*/
        blockReceiver = new BlockReceiver(block, in,
                ((JxtaSocketAddress) s.getRemoteSocketAddress()).getPeerId().toString(),
                ((JxtaSocketAddress) s.getLocalSocketAddress()).getPeerId().toString(), isRecovery, client,
                srcDataNode, datanode);

        // get a connection back to the previous target
        //replyOut = new DataOutputStream(
        //     NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
        ReliableOutputStream replyOutRos = (ReliableOutputStream) s.getOutputStream();
        replyOut = new DataOutputStream(replyOutRos);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            // JxtaSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getPeerId();
            // mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            // mirrorSock = datanode.newSocket();

            try {
                //int timeoutValue = numTargets * datanode.socketTimeout;
                //int writeTimeout = datanode.socketWriteTimeout + 
                //                   (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                // NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock = datanode.getDnPeer().getInfoSocket(mirrorNode.toString());
                if (mirrorSock == null)
                    throw new IOException("Failed to get a mirror socket");
                //mirrorSock.setSoTimeout(timeoutValue);
                //mirrorSock.setTcpNoDelay(true);
                //mirrorSock.setSoTimeout(Integer.parseInt(datanode.getConf().get("hadoop.p2p.info.timeout")));
                //mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                /*mirrorOut = new DataOutputStream(
                   new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                 SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
                */
                mirrorOut = new DataOutputStream((ReliableOutputStream) mirrorSock.getOutputStream());
                mirrorIn = new DataInputStream((ReliableInputStream) mirrorSock.getInputStream());

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);

                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }

                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (SocketTimeoutException ste) {
                LOG.debug("Time out while receiving data on DataXceiver");
                LOG.debug(ste);
                ste.printStackTrace();
            } catch (IOException e) {
                LOG.debug("IOException occurred : " + e.getMessage());
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                if (mirrorSock != null) {
                    IOUtils.closeSocket(mirrorSock);
                    mirrorSock = null;
                }
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } catch (Exception e) {
        LOG.warn("Exception occurred in writting block : " + e.getMessage());
    } finally {
        // close all opened streams

        LOG.debug("Finalizing : writeBlock()");
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:com.cognizant.trumobi.PersonaLauncher.java

private static void writeConfiguration(Context context, LocaleConfiguration configuration) {
    DataOutputStream out = null;
    try {/*from  www  .j  a  va2  s  .  com*/
        out = new DataOutputStream(context.openFileOutput(PREFERENCES, MODE_PRIVATE));
        out.writeUTF(configuration.locale);
        out.writeInt(configuration.mcc);
        out.writeInt(configuration.mnc);
        out.flush();
    } catch (FileNotFoundException e) {
        // Ignore
    } catch (IOException e) {
        // noinspection ResultOfMethodCallIgnored
        context.getFileStreamPath(PREFERENCES).delete();
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException e) {
                // Ignore
            }
        }
    }
}