List of usage examples for org.apache.hadoop.io Text readString
public static String readString(DataInput in) throws IOException
From source file:com.marklogic.mapreduce.LargeBinaryDocument.java
License:Apache License
@Override public void readFields(DataInput in) throws IOException { super.readFields(in); path = new Path(Text.readString(in)); offset = in.readLong();/* w ww . ja v a2s. c o m*/ size = in.readLong(); binaryOrigLen = in.readLong(); conf = new Configuration(); conf.readFields(in); }
From source file:com.marklogic.mapreduce.MarkLogicInputSplit.java
License:Apache License
@Override public void readFields(DataInput in) throws IOException { start = in.readLong();/* ww w . j a v a 2 s .co m*/ length = in.readLong(); Text forestIdText = new Text(); forestIdText.readFields(in); forestId = new BigInteger(forestIdText.getBytes()); hostName = new String[1]; hostName[0] = Text.readString(in); isLastSplit = in.readBoolean(); }
From source file:com.marklogic.mapreduce.MarkLogicNode.java
License:Apache License
public void readFields(DataInput in) throws IOException { int type = in.readInt(); DocumentBuilder docBuilder = builderLocal.get(); String val = Text.readString(in); try {/*from ww w.ja v a 2s .c o m*/ if (type == Node.ATTRIBUTE_NODE) { AttributeImpl attr = new AttributeImpl(Text.readString(in), Text.readString(in)); node = attr.asW3cNode(docBuilder); } else { node = DomUtil.readXml(IOHelper.newStream(val)); } } catch (SAXException e) { LOG.error("error parsing input", e); throw new IOException(e); } catch (ParserConfigurationException e) { LOG.error("error parsing input", e); throw new IOException(e); } }
From source file:com.marklogic.mapreduce.NodePath.java
License:Apache License
@Override public void readFields(DataInput in) throws IOException { docUri = Text.readString(in); path = Text.readString(in); }
From source file:com.marklogic.mapreduce.StreamLocator.java
License:Apache License
@Override public void readFields(DataInput in) throws IOException { path = new Path(Text.readString(in)); codec = WritableUtils.readEnum(in, CompressionCodec.class); }
From source file:com.marklogic.mapreduce.utilities.ForestInfo.java
License:Apache License
public void readFields(DataInput in) throws IOException { hostName = Text.readString(in); frangmentCount = in.readLong(); updatable = in.readBoolean(); }
From source file:com.pivotal.hawq.mapreduce.ao.file.HAWQAOSplit.java
License:Apache License
@Override public void readFields(DataInput in) throws IOException { super.readFields(in); checksum = in.readBoolean();/*from www . j a va 2s .co m*/ compressType = Text.readString(in); blockSize = in.readInt(); }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.SubmittedJob.java
License:Apache License
@SuppressWarnings("unchecked") private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration) throws IOException { inFile.seek(offset);//from w w w .ja v a 2s . c om String className = StringInterner.weakIntern(Text.readString(inFile)); Class<T> cls; try { cls = (Class<T>) configuration.getClassByName(className); } catch (ClassNotFoundException ce) { IOException wrap = new IOException("Split class " + className + " not found"); wrap.initCause(ce); throw wrap; } SerializationFactory factory = new SerializationFactory(configuration); Deserializer<T> deserializer = (Deserializer<T>) factory.getDeserializer(cls); deserializer.open(inFile); T split = deserializer.deserialize(null); return split; }
From source file:com.sensei.indexing.hadoop.keyvalueformat.Shard.java
License:Apache License
public void readFields(DataInput in) throws IOException { version = in.readLong(); dir = Text.readString(in); gen = in.readLong(); }
From source file:com.sensei.indexing.hadoop.reduce.RAMDirectoryUtil.java
License:Apache License
/** * Read a number of files from a data input to a ram directory. * @param in the data input//from w w w. j av a2 s .c om * @param dir the ram directory * @throws IOException */ public static void readRAMFiles(DataInput in, RAMDirectory dir) throws IOException { int numFiles = in.readInt(); for (int i = 0; i < numFiles; i++) { String name = Text.readString(in); long length = in.readLong(); if (length > 0) { // can we avoid the extra copy? IndexOutput output = null; try { output = dir.createOutput(name); int position = 0; byte[] buffer = new byte[BUFFER_SIZE]; while (position < length) { int len = position + BUFFER_SIZE <= length ? BUFFER_SIZE : (int) (length - position); in.readFully(buffer, 0, len); output.writeBytes(buffer, 0, len); position += len; } } finally { if (output != null) { output.close(); } } } } }