List of usage examples for java.io DataOutputStream write
public synchronized void write(int b) throws IOException
b
) to the underlying output stream. From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java
@Test public void testSecondaryIndexBinarySearch() throws IOException { int numTotalKeys = 99; assertTrue(numTotalKeys % 2 == 1); // Ensure no one made this even. // We only add odd-index keys into the array that we will binary-search. int numSearchedKeys = (numTotalKeys - 1) / 2; ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); dos.writeInt(numSearchedKeys);/*from www . j a va 2 s . c o m*/ int curAllEntriesSize = 0; int numEntriesAdded = 0; // Only odd-index elements of this array are used to keep the secondary // index entries of the corresponding keys. int secondaryIndexEntries[] = new int[numTotalKeys]; for (int i = 0; i < numTotalKeys; ++i) { byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i * 2); KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val")); //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length); keys.add(cell.getKey()); String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): "; StringBuilder padding = new StringBuilder(); while (msgPrefix.length() + padding.length() < 70) padding.append(' '); msgPrefix += padding; if (i % 2 == 1) { dos.writeInt(curAllEntriesSize); secondaryIndexEntries[i] = curAllEntriesSize; LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize); curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD; ++numEntriesAdded; } else { secondaryIndexEntries[i] = -1; LOG.info(msgPrefix + "not in the searched array"); } } // Make sure the keys are increasing. for (int i = 0; i < keys.size() - 1; ++i) assertTrue(KeyValue.COMPARATOR.compare(new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length), new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0); dos.writeInt(curAllEntriesSize); assertEquals(numSearchedKeys, numEntriesAdded); int secondaryIndexOffset = dos.size(); assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset); for (int i = 1; i <= numTotalKeys - 1; i += 2) { assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]); long dummyFileOffset = getDummyFileOffset(i); int dummyOnDiskSize = getDummyOnDiskSize(i); LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize + " at offset " + dos.size()); dos.writeLong(dummyFileOffset); dos.writeInt(dummyOnDiskSize); LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size()); dos.write(keys.get(i)); } dos.writeInt(curAllEntriesSize); ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray()); for (int i = 0; i < numTotalKeys; ++i) { byte[] searchKey = keys.get(i); byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2]; // To make things a bit more interesting, store the key we are looking // for at a non-zero offset in a new array. System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length); KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2, searchKey.length); int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, nonRootIndex, KeyValue.COMPARATOR); String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")"; int expectedResult; int referenceItem; if (i % 2 == 1) { // This key is in the array we search as the element (i - 1) / 2. Make // sure we find it. expectedResult = (i - 1) / 2; referenceItem = i; } else { // This key is not in the array but between two elements on the array, // in the beginning, or in the end. The result should be the previous // key in the searched array, or -1 for i = 0. expectedResult = i / 2 - 1; referenceItem = i - 1; } assertEquals(lookupFailureMsg, expectedResult, searchResult); // Now test we can get the offset and the on-disk-size using a // higher-level API function.s boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell, KeyValue.COMPARATOR) != -1); if (i == 0) { assertFalse(locateBlockResult); } else { assertTrue(locateBlockResult); String errorMsg = "i=" + i + ", position=" + nonRootIndex.position(); assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong()); assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt()); } } }
From source file:edu.vu.isis.ammo.dash.provider.IncidentSyncAdaptor.java
public ArrayList<File> mediaSerialize(Cursor cursor) { logger.debug("::mediaSerialize"); ArrayList<File> paths = new ArrayList<File>(); if (1 > cursor.getCount()) return paths; ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream eos = new DataOutputStream(baos); for (boolean more = cursor.moveToFirst(); more; more = cursor.moveToNext()) { MediaWrapper iw = new MediaWrapper(); iw.setEventId(cursor.getString(cursor.getColumnIndex(MediaTableSchemaBase.EVENT_ID))); iw.setDataType(cursor.getString(cursor.getColumnIndex(MediaTableSchemaBase.DATA_TYPE))); iw.setData(cursor.getString(cursor.getColumnIndex(MediaTableSchemaBase.DATA))); iw.setCreatedDate(cursor.getLong(cursor.getColumnIndex(MediaTableSchemaBase.CREATED_DATE))); iw.setModifiedDate(cursor.getLong(cursor.getColumnIndex(MediaTableSchemaBase.MODIFIED_DATE))); iw.set_ReceivedDate(cursor.getLong(cursor.getColumnIndex(MediaTableSchemaBase._RECEIVED_DATE))); iw.set_Disposition(cursor.getInt(cursor.getColumnIndex(MediaTableSchemaBase._DISPOSITION))); Gson gson = new Gson(); try {/*from w w w . ja va2 s. co m*/ eos.writeBytes(gson.toJson(iw)); eos.writeByte(0); } catch (IOException ex) { ex.printStackTrace(); } // not a reference field name :event id eventId event_id\n try { String fileName = iw.getData(); File dataFile = new File(fileName); int dataSize = (int) dataFile.length(); byte[] buffData = new byte[dataSize]; FileInputStream fileStream = new FileInputStream(dataFile); int ret = 0; for (int position = 0; (ret > -1 && dataSize > position); position += ret) { ret = fileStream.read(buffData, position, dataSize - position); } fileStream.close(); eos.writeBytes("data"); eos.writeByte(0); ByteBuffer dataSizeBuf = ByteBuffer.allocate(Integer.SIZE / Byte.SIZE); dataSizeBuf.order(ByteOrder.LITTLE_ENDIAN); dataSizeBuf.putInt(dataSize); // write the media back out eos.write(dataSizeBuf.array()); eos.write(buffData); eos.write(dataSizeBuf.array()); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } // not a reference field name :created date createdDate created_date\n // not a reference field name :modified date modifiedDate modified_date\n // MediaTableSchemaBase._DISPOSITION; // try { // TODO write to content provider using openFile // if (!applCacheMediaDir.exists() ) applCacheMediaDir.mkdirs(); // File outfile = new File(applCacheMediaDir, Integer.toHexString((int) System.currentTimeMillis())); // BufferedOutputStream bufferedOutput = new BufferedOutputStream(new FileOutputStream(outfile), 8192); // bufferedOutput.write(baos.toByteArray()); // bufferedOutput.flush(); // bufferedOutput.close(); // } catch (FileNotFoundException e) { // e.printStackTrace(); // } catch (IOException e) { // e.printStackTrace(); // } } return paths; }
From source file:org.apache.fontbox.ttf.TTFSubsetter.java
private byte[] buildNameTable() throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(bos); NamingTable name = ttf.getNaming();//from w w w . jav a 2s .co m if (name == null || keepTables != null && !keepTables.contains("name")) { return null; } List<NameRecord> nameRecords = name.getNameRecords(); int numRecords = 0; for (NameRecord record : nameRecords) { if (shouldCopyNameRecord(record)) { numRecords++; } } writeUint16(out, 0); writeUint16(out, numRecords); writeUint16(out, 2 * 3 + 2 * 6 * numRecords); if (numRecords == 0) { return null; } byte[][] names = new byte[numRecords][]; int j = 0; for (NameRecord record : nameRecords) { if (shouldCopyNameRecord(record)) { int platform = record.getPlatformId(); int encoding = record.getPlatformEncodingId(); String charset = "ISO-8859-1"; if (platform == CmapTable.PLATFORM_WINDOWS && encoding == CmapTable.ENCODING_WIN_UNICODE_BMP) { charset = "UTF-16BE"; } else if (platform == 2) // ISO [deprecated]= { if (encoding == 0) // 7-bit ASCII { charset = "US-ASCII"; } else if (encoding == 1) // ISO 10646= { //not sure is this is correct?? charset = "UTF16-BE"; } else if (encoding == 2) // ISO 8859-1 { charset = "ISO-8859-1"; } } String value = record.getString(); if (record.getNameId() == 6 && prefix != null) { value = prefix + value; } names[j] = value.getBytes(charset); j++; } } int offset = 0; j = 0; for (NameRecord nr : nameRecords) { if (shouldCopyNameRecord(nr)) { writeUint16(out, nr.getPlatformId()); writeUint16(out, nr.getPlatformEncodingId()); writeUint16(out, nr.getLanguageId()); writeUint16(out, nr.getNameId()); writeUint16(out, names[j].length); writeUint16(out, offset); offset += names[j].length; j++; } } for (int i = 0; i < numRecords; i++) { out.write(names[i]); } out.flush(); return bos.toByteArray(); }
From source file:org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataFormatter.java
private void writeFileSystemStats(DataOutputStream outStream, HiveConf conf, List<Path> locations, Path tblPath, boolean partSpecified, int indent) throws IOException { long totalFileSize = 0; long maxFileSize = 0; long minFileSize = Long.MAX_VALUE; long lastAccessTime = 0; long lastUpdateTime = 0; int numOfFiles = 0; boolean unknown = false; FileSystem fs = tblPath.getFileSystem(conf); // in case all files in locations do not exist try {/* w ww . ja va2s .c o m*/ FileStatus tmpStatus = fs.getFileStatus(tblPath); lastAccessTime = tmpStatus.getAccessTime(); lastUpdateTime = tmpStatus.getModificationTime(); if (partSpecified) { // check whether the part exists or not in fs tmpStatus = fs.getFileStatus(locations.get(0)); } } catch (IOException e) { LOG.warn("Cannot access File System. File System status will be unknown: ", e); unknown = true; } if (!unknown) { for (Path loc : locations) { try { FileStatus status = fs.getFileStatus(tblPath); FileStatus[] files = fs.listStatus(loc); long accessTime = status.getAccessTime(); long updateTime = status.getModificationTime(); // no matter loc is the table location or part location, it must be a // directory. if (!status.isDir()) { continue; } if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } for (FileStatus currentStatus : files) { if (currentStatus.isDir()) { continue; } numOfFiles++; long fileLen = currentStatus.getLen(); totalFileSize += fileLen; if (fileLen > maxFileSize) { maxFileSize = fileLen; } if (fileLen < minFileSize) { minFileSize = fileLen; } accessTime = currentStatus.getAccessTime(); updateTime = currentStatus.getModificationTime(); if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } } } catch (IOException e) { // ignore } } } String unknownString = "unknown"; for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("totalNumberFiles:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + numOfFiles).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("totalFileSize:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + totalFileSize).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("maxFileSize:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + maxFileSize).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("minFileSize:".getBytes("UTF-8")); if (numOfFiles > 0) { outStream.write((unknown ? unknownString : "" + minFileSize).getBytes("UTF-8")); } else { outStream.write((unknown ? unknownString : "" + 0).getBytes("UTF-8")); } outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("lastAccessTime:".getBytes("UTF-8")); outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : "" + lastAccessTime); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("lastUpdateTime:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + lastUpdateTime).getBytes("UTF-8")); outStream.write(terminator); }
From source file:edu.vu.isis.ammo.dash.provider.IncidentSyncAdaptor.java
public ArrayList<File> categorySerialize(Cursor cursor) { logger.debug("::categorySerialize"); ArrayList<File> paths = new ArrayList<File>(); if (1 > cursor.getCount()) return paths; ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream eos = new DataOutputStream(baos); for (boolean more = cursor.moveToFirst(); more; more = cursor.moveToNext()) { CategoryWrapper iw = new CategoryWrapper(); iw.setMainCategory(cursor.getString(cursor.getColumnIndex(CategoryTableSchemaBase.MAIN_CATEGORY))); iw.setSubCategory(cursor.getString(cursor.getColumnIndex(CategoryTableSchemaBase.SUB_CATEGORY))); iw.setTigrId(cursor.getString(cursor.getColumnIndex(CategoryTableSchemaBase.TIGR_ID))); iw.setIconType(cursor.getString(cursor.getColumnIndex(CategoryTableSchemaBase.ICON_TYPE))); iw.setIcon(cursor.getString(cursor.getColumnIndex(CategoryTableSchemaBase.ICON))); iw.set_ReceivedDate(cursor.getLong(cursor.getColumnIndex(CategoryTableSchemaBase._RECEIVED_DATE))); iw.set_Disposition(cursor.getInt(cursor.getColumnIndex(CategoryTableSchemaBase._DISPOSITION))); Gson gson = new Gson(); try {//from w w w . j a v a 2 s . co m eos.writeBytes(gson.toJson(iw)); eos.writeByte(0); } catch (IOException ex) { ex.printStackTrace(); } // not a reference field name :main category mainCategory main_category\n // not a reference field name :sub category subCategory sub_category\n // not a reference field name :tigr id tigrId tigr_id\n try { String fileName = iw.getIcon(); File dataFile = new File(fileName); int dataSize = (int) dataFile.length(); byte[] buffData = new byte[dataSize]; FileInputStream fileStream = new FileInputStream(dataFile); int ret = 0; for (int position = 0; (ret > -1 && dataSize > position); position += ret) { ret = fileStream.read(buffData, position, dataSize - position); } fileStream.close(); eos.writeBytes("icon"); eos.writeByte(0); ByteBuffer dataSizeBuf = ByteBuffer.allocate(Integer.SIZE / Byte.SIZE); dataSizeBuf.order(ByteOrder.LITTLE_ENDIAN); dataSizeBuf.putInt(dataSize); // write the category back out eos.write(dataSizeBuf.array()); eos.write(buffData); eos.write(dataSizeBuf.array()); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } // CategoryTableSchemaBase._DISPOSITION; // try { // if (!applCacheCategoryDir.exists() ) applCacheCategoryDir.mkdirs(); // // File outfile = new File(applCacheCategoryDir, Integer.toHexString((int) System.currentTimeMillis())); // BufferedOutputStream bufferedOutput = new BufferedOutputStream(new FileOutputStream(outfile), 8192); // bufferedOutput.write(baos.toByteArray()); // bufferedOutput.flush(); // bufferedOutput.close(); // // paths.add(outfile); // } catch (FileNotFoundException e) { // e.printStackTrace(); // } catch (IOException e) { // e.printStackTrace(); // } } return paths; }
From source file:org.apache.hadoop.hdfs.DFSClient.java
/** * Get the checksum of a file.// w w w.java 2 s. c o m * @param src The file path * @return The checksum */ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout) throws IOException { //get all block locations LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE); if (null == blockLocations) { throw new FileNotFoundException("File does not exist: " + src); } List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks(); final DataOutputBuffer md5out = new DataOutputBuffer(); int bytesPerCRC = 0; long crcPerBlock = 0; boolean refetchBlocks = false; int lastRetriedIndex = -1; //get block checksum for each block for (int i = 0; i < locatedblocks.size(); i++) { if (refetchBlocks) { // refetch to get fresh tokens blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE); if (null == blockLocations) { throw new FileNotFoundException("File does not exist: " + src); } locatedblocks = blockLocations.getLocatedBlocks(); refetchBlocks = false; } LocatedBlock lb = locatedblocks.get(i); final Block block = lb.getBlock(); final DatanodeInfo[] datanodes = lb.getLocations(); //try each datanode location of the block final int timeout = (socketTimeout > 0) ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length) : 0; boolean done = false; for (int j = 0; !done && j < datanodes.length; j++) { Socket sock = null; DataOutputStream out = null; DataInputStream in = null; try { //connect to a datanode sock = socketFactory.createSocket(); NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout); sock.setSoTimeout(timeout); out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE)); in = new DataInputStream(NetUtils.getInputStream(sock)); if (LOG.isDebugEnabled()) { LOG.debug("write to " + datanodes[j].getName() + ": " + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block); } // get block MD5 out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM); out.writeLong(block.getBlockId()); out.writeLong(block.getGenerationStamp()); lb.getBlockToken().write(out); out.flush(); final short reply = in.readShort(); if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) { if (LOG.isDebugEnabled()) { LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file " + src + " for block " + block + " from datanode " + datanodes[j].getName() + ". Will retry the block once."); } lastRetriedIndex = i; done = true; // actually it's not done; but we'll retry i--; // repeat at i-th block refetchBlocks = true; break; } else { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + datanodes[j].getName()); } } //read byte-per-checksum final int bpc = in.readInt(); if (i == 0) { //first block bytesPerCRC = bpc; } else if (bpc != bytesPerCRC) { throw new IOException( "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC); } //read crc-per-block final long cpb = in.readLong(); if (locatedblocks.size() > 1 && i == 0) { crcPerBlock = cpb; } //read md5 final MD5Hash md5 = MD5Hash.read(in); md5.write(md5out); done = true; if (LOG.isDebugEnabled()) { if (i == 0) { LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock); } LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5); } } catch (IOException ie) { LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); IOUtils.closeSocket(sock); } } if (!done) { throw new IOException("Fail to get block MD5 for " + block); } } //compute file MD5 final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5); }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
public static void dumpLockInfo(DataOutputStream os, ShowLocksResponse rsp) throws IOException { // Write a header os.writeBytes("Lock ID"); os.write(separator); os.writeBytes("Database"); os.write(separator);//from ww w .j a v a 2 s. co m os.writeBytes("Table"); os.write(separator); os.writeBytes("Partition"); os.write(separator); os.writeBytes("State"); os.write(separator); os.writeBytes("Blocked By"); os.write(separator); os.writeBytes("Type"); os.write(separator); os.writeBytes("Transaction ID"); os.write(separator); os.writeBytes("Last Heartbeat"); os.write(separator); os.writeBytes("Acquired At"); os.write(separator); os.writeBytes("User"); os.write(separator); os.writeBytes("Hostname"); os.write(separator); os.writeBytes("Agent Info"); os.write(terminator); List<ShowLocksResponseElement> locks = rsp.getLocks(); if (locks != null) { for (ShowLocksResponseElement lock : locks) { if (lock.isSetLockIdInternal()) { os.writeBytes(Long.toString(lock.getLockid()) + "." + Long.toString(lock.getLockIdInternal())); } else { os.writeBytes(Long.toString(lock.getLockid())); } os.write(separator); os.writeBytes(lock.getDbname()); os.write(separator); os.writeBytes((lock.getTablename() == null) ? "NULL" : lock.getTablename()); os.write(separator); os.writeBytes((lock.getPartname() == null) ? "NULL" : lock.getPartname()); os.write(separator); os.writeBytes(lock.getState().toString()); os.write(separator); if (lock.isSetBlockedByExtId()) {//both "blockedby" are either there or not os.writeBytes(Long.toString(lock.getBlockedByExtId()) + "." + Long.toString(lock.getBlockedByIntId())); } else { os.writeBytes(" ");//12 chars - try to keep cols aligned } os.write(separator); os.writeBytes(lock.getType().toString()); os.write(separator); os.writeBytes((lock.getTxnid() == 0) ? "NULL" : Long.toString(lock.getTxnid())); os.write(separator); os.writeBytes(Long.toString(lock.getLastheartbeat())); os.write(separator); os.writeBytes((lock.getAcquiredat() == 0) ? "NULL" : Long.toString(lock.getAcquiredat())); os.write(separator); os.writeBytes(lock.getUser()); os.write(separator); os.writeBytes(lock.getHostname()); os.write(separator); os.writeBytes(lock.getAgentInfo() == null ? "NULL" : lock.getAgentInfo()); os.write(separator); os.write(terminator); } } }
From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java
/** * client-to-server handshake. Nothing is sent to the server prior to invoking this method. *///from www . j a v a2 s . co m private byte write(DataOutputStream dos, DataInputStream dis, CommunicationMode communicationMode, int replyCode, int readTimeout, List ports, Properties p_credentials, DistributedMember member, boolean isCallbackConnection) throws IOException { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); byte acceptanceCode = -1; try { hdos.writeByte(communicationMode.getModeNumber()); if (overrideClientVersion > 0) { // for testing Version.writeOrdinal(hdos, overrideClientVersion, true); } else { Version.writeOrdinal(hdos, currentClientVersion.ordinal(), true); } hdos.writeByte(replyCode); if (ports != null) { hdos.writeInt(ports.size()); for (int i = 0; i < ports.size(); i++) { hdos.writeInt(Integer.parseInt((String) ports.get(i))); } } else { hdos.writeInt(readTimeout); } // we do not know the receiver's version at this point, but the on-wire // form of InternalDistributedMember changed in 9.0, so we must serialize // it using the previous version DataOutput idOut = new VersionedDataOutputStream(hdos, Version.GFE_82); DataSerializer.writeObject(this.id, idOut); if (currentClientVersion.compareTo(Version.GFE_603) >= 0) { for (int bytes = 0; bytes < this.overrides.length; bytes++) { hdos.writeByte(this.overrides[bytes]); } } else { // write the client conflation setting byte if (setClientConflationForTesting) { hdos.writeByte(clientConflationForTesting); } else { hdos.writeByte(this.clientConflation); } } if (isCallbackConnection || communicationMode.isWAN()) { if (isCallbackConnection && this.multiuserSecureMode && !communicationMode.isWAN()) { hdos.writeByte(SECURITY_MULTIUSER_NOTIFICATIONCHANNEL); hdos.flush(); dos.write(hdos.toByteArray()); dos.flush(); } else { writeCredentials(dos, dis, p_credentials, ports != null, member, hdos); } } else { String authInitMethod = this.system.getProperties().getProperty(SECURITY_CLIENT_AUTH_INIT); acceptanceCode = writeCredential(dos, dis, authInitMethod, ports != null, member, hdos); } } finally { hdos.close(); } return acceptanceCode; }
From source file:GifEncoder.java
public void encode(BufferedImage bufferedimage, DataOutputStream dataoutputstream, Hashtable hashtable) throws Exception { try {// w ww . j av a2 s .c o m a = bufferedimage.getWidth(); g = bufferedimage.getHeight(); e = bufferedimage.getRGB(0, 0, a, g, null, 0, a); int i4 = 0; b = hashtable.get("encoding").toString(); if (b.equals("websafe")) { int ai[] = new int[256]; i = new int[256]; h = 8; int k1 = 0; int j; int j1 = j = 0; for (; j <= 255; j += 51) { for (int l = 0; l <= 255; l += 51) { for (int i1 = 0; i1 <= 255;) { i[j1] = (j << 16) + (l << 8) + i1; ai[k1++] = j1; i1 += 51; j1++; } } } if (f > 0) { int j4 = c[0]; int l1 = ((c[0] >> 16 & 0xff) + 25) / 51; int k2 = ((c[0] >> 8 & 0xff) + 25) / 51; int j3 = ((c[0] & 0xff) + 25) / 51; i4 = l1 * 36 + k2 * 6 + j3; for (j = 1; j < f; j++) { int i2 = ((c[j] >> 16 & 0xff) + 25) / 51; int l2 = ((c[j] >> 8 & 0xff) + 25) / 51; int k3 = ((c[j] & 0xff) + 25) / 51; ai[i2 * 36 + l2 * 6 + k3] = i4; } } j = 0; try { do { int i5 = e[j]; int j2 = ((i5 >> 16 & 0xff) + 25) / 51; int i3 = ((i5 >> 8 & 0xff) + 25) / 51; int l3 = ((i5 & 0xff) + 25) / 51; e[j++] = ai[j2 * 36 + i3 * 6 + l3]; } while (true); } catch (Exception exception1) { } } /*else if(b.equals("optimized")) { try { int k4 = Integer.parseInt(hashtable.get("colors").toString()); for(h = 1; k4 - 1 >> h > 0; h++) { } i = new int[1 << h]; CSelectiveQuant cselectivequant = new CSelectiveQuant(); for(int j5 = 0; j5 < e.length; j5++) { cselectivequant.addPixel(e[j5]); } boolean flag = f > 0; int k5 = flag ? 1 : 0; int ai1[] = cselectivequant.createPalette(k4 - k5); for(int l5 = 0; l5 < i.length; l5++) { try { i[l5] = ai1[l5 - k5]; } catch(ArrayIndexOutOfBoundsException arrayindexoutofboundsexception) { i[l5] = 0; } } if(flag) { i4 = 0; for(int i6 = 0; i6 < f; i6++) { cselectivequant.setIndex(c[i6], -1); } } for(int j6 = 0; j6 < e.length; j6++) { e[j6] = cselectivequant.getIndex(e[j6]) + k5; } } catch(NumberFormatException numberformatexception) { CmsLogger.logInfo("Parameter: 'colors' is malformated..."); return; } } */ dataoutputstream.write("GIF89a".getBytes()); dataoutputstream.writeByte(a); dataoutputstream.writeByte(a >> 8); dataoutputstream.writeByte(g); dataoutputstream.writeByte(g >> 8); dataoutputstream.writeByte(0xf0 | h - 1); dataoutputstream.writeByte(0); dataoutputstream.writeByte(0); int k = 0; try { do { int l4 = i[k++]; dataoutputstream.writeByte(l4 >> 16 & 0xff); dataoutputstream.writeByte(l4 >> 8 & 0xff); dataoutputstream.writeByte(l4 & 0xff); } while (true); } catch (Exception exception) { } if (f > 0) { dataoutputstream.writeByte(33); dataoutputstream.writeByte(249); dataoutputstream.writeByte(4); dataoutputstream.writeByte(1); dataoutputstream.writeByte(0); dataoutputstream.writeByte(0); dataoutputstream.writeByte(i4); dataoutputstream.writeByte(0); } dataoutputstream.writeByte(44); dataoutputstream.writeByte(0); dataoutputstream.writeByte(0); dataoutputstream.writeByte(0); dataoutputstream.writeByte(0); dataoutputstream.writeByte(a); dataoutputstream.writeByte(a >> 8); dataoutputstream.writeByte(g); dataoutputstream.writeByte(g >> 8); dataoutputstream.writeByte(0); dataoutputstream.writeByte(h); a(e, h, dataoutputstream); dataoutputstream.writeByte(59); dataoutputstream.flush(); return; } catch (Exception e) { } }
From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java
/** * Write a block to disk./*from w w w.j a v a 2s . c om*/ * * @param in The stream to read from * @throws IOException */ private void writeBlock(DataInputStream in) throws IOException { LOG.debug("Mathod called : writeBlock()"); DatanodeInfo srcDataNode = null; LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay()); // // Read in the header // Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong()); LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress); int pipelineSize = in.readInt(); // num of datanodes in entire pipeline boolean isRecovery = in.readBoolean(); // is this part of recovery? String client = Text.readString(in); // working on behalf of this client boolean hasSrcDataNode = in.readBoolean(); // is src node info present if (hasSrcDataNode) { srcDataNode = new DatanodeInfo(); srcDataNode.readFields(in); } int numTargets = in.readInt(); if (numTargets < 0) { throw new IOException("Mislabelled incoming datastream."); } DatanodeInfo targets[] = new DatanodeInfo[numTargets]; for (int i = 0; i < targets.length; i++) { DatanodeInfo tmp = new DatanodeInfo(); tmp.readFields(in); targets[i] = tmp; } DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target DataOutputStream replyOut = null; // stream to prev target JxtaSocket mirrorSock = null; // socket to next target BlockReceiver blockReceiver = null; // responsible for data handling String mirrorNode = null; // the name:port of next target String firstBadLink = ""; // first datanode that failed in connection setup try { // open a block receiver and check if the block does not exist /*blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(), s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);*/ blockReceiver = new BlockReceiver(block, in, ((JxtaSocketAddress) s.getRemoteSocketAddress()).getPeerId().toString(), ((JxtaSocketAddress) s.getLocalSocketAddress()).getPeerId().toString(), isRecovery, client, srcDataNode, datanode); // get a connection back to the previous target //replyOut = new DataOutputStream( // NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); ReliableOutputStream replyOutRos = (ReliableOutputStream) s.getOutputStream(); replyOut = new DataOutputStream(replyOutRos); // // Open network conn to backup machine, if // appropriate // if (targets.length > 0) { // JxtaSocketAddress mirrorTarget = null; // Connect to backup machine mirrorNode = targets[0].getPeerId(); // mirrorTarget = NetUtils.createSocketAddr(mirrorNode); // mirrorSock = datanode.newSocket(); try { //int timeoutValue = numTargets * datanode.socketTimeout; //int writeTimeout = datanode.socketWriteTimeout + // (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets); // NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock = datanode.getDnPeer().getInfoSocket(mirrorNode.toString()); if (mirrorSock == null) throw new IOException("Failed to get a mirror socket"); //mirrorSock.setSoTimeout(timeoutValue); //mirrorSock.setTcpNoDelay(true); //mirrorSock.setSoTimeout(Integer.parseInt(datanode.getConf().get("hadoop.p2p.info.timeout"))); //mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); /*mirrorOut = new DataOutputStream( new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); */ mirrorOut = new DataOutputStream((ReliableOutputStream) mirrorSock.getOutputStream()); mirrorIn = new DataInputStream((ReliableInputStream) mirrorSock.getInputStream()); // Write header: Copied from DFSClient.java! mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK); mirrorOut.writeLong(block.getBlockId()); mirrorOut.writeLong(block.getGenerationStamp()); mirrorOut.writeInt(pipelineSize); mirrorOut.writeBoolean(isRecovery); Text.writeString(mirrorOut, client); mirrorOut.writeBoolean(hasSrcDataNode); if (hasSrcDataNode) { // pass src node information srcDataNode.write(mirrorOut); } mirrorOut.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(mirrorOut); } blockReceiver.writeChecksumHeader(mirrorOut); mirrorOut.flush(); // read connect ack (only for clients, not for replication req) if (client.length() != 0) { firstBadLink = Text.readString(mirrorIn); if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink); } } } catch (SocketTimeoutException ste) { LOG.debug("Time out while receiving data on DataXceiver"); LOG.debug(ste); ste.printStackTrace(); } catch (IOException e) { LOG.debug("IOException occurred : " + e.getMessage()); if (client.length() != 0) { Text.writeString(replyOut, mirrorNode); replyOut.flush(); } IOUtils.closeStream(mirrorOut); mirrorOut = null; IOUtils.closeStream(mirrorIn); mirrorIn = null; if (mirrorSock != null) { IOUtils.closeSocket(mirrorSock); mirrorSock = null; } if (client.length() > 0) { throw e; } else { LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror " + mirrorNode + ". continuing without the mirror.\n" + StringUtils.stringifyException(e)); } } } // send connect ack back to source (only for clients) if (client.length() != 0) { if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink); } Text.writeString(replyOut, firstBadLink); replyOut.flush(); } // receive the block and mirror to the next target String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length); // if this write is for a replication request (and not // from a client), then confirm block. For client-writes, // the block is finalized in the PacketResponder. if (client.length() == 0) { datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes()); } if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(block); } } catch (IOException ioe) { LOG.info("writeBlock " + block + " received exception " + ioe); throw ioe; } catch (Exception e) { LOG.warn("Exception occurred in writting block : " + e.getMessage()); } finally { // close all opened streams LOG.debug("Finalizing : writeBlock()"); IOUtils.closeStream(mirrorOut); IOUtils.closeStream(mirrorIn); IOUtils.closeStream(replyOut); IOUtils.closeSocket(mirrorSock); IOUtils.closeStream(blockReceiver); } }