List of usage examples for org.apache.hadoop.io Text set
public void set(byte[] utf8, int start, int len)
From source file:org.apache.accumulo.examples.wikisearch.iterator.FieldIndexIterator.java
License:Apache License
public boolean jump(Key jumpKey) throws IOException { if (log.isDebugEnabled()) { String pEndRow = "empty"; if (parentEndRow != null) { pEndRow = parentEndRow.toString(); }// w w w . j av a 2 s . c o m log.debug("jump, current range: " + range + " parentEndRow is: " + pEndRow); } if (parentEndRow != null && jumpKey.getRow().compareTo(parentEndRow) > 0) { // can't go there. if (log.isDebugEnabled()) { log.debug("jumpRow: " + jumpKey.getRow() + " is greater than my parentEndRow: " + parentEndRow); } return false; } int comp; if (!this.hasTop()) { if (log.isDebugEnabled()) { log.debug("current row: " + this.currentRow); } /* * if I don't have a top, then I should be out of my range for my current row. Need to check parent range to see if I'm supposed to continue to next row * or not. Current row can be null because maybe I never found anything in this row. */ if (parentEndRow != null) { // if jumpKey row is greater than parentEndRow, stop if (jumpKey.getRow().compareTo(parentEndRow) > 0) { if (log.isDebugEnabled()) { log.debug("jumpKey row is greater than my parentEndRow, done"); } return false; } // if my current row is null, I must have hit the end of the tablet if (currentRow == null) { if (log.isDebugEnabled()) { log.debug("I have parentEndRow, but no current row, must have hit end of tablet, done"); } return false; } // if my current row is greater than jump row stop, a seek will be // called to get me going again. If my row is equal, but i don't // have a topkey, i'm done if (currentRow.compareTo(jumpKey.getRow()) >= 0) { if (log.isDebugEnabled()) { log.debug("I have parentEndRow, but topKey, and my currentRow is >= jumpRow, done"); } return false; } } else { // we're allowed to go to the end of the tablet // if my current row is null, I must have hit the end of the tablet if (currentRow == null) { if (log.isDebugEnabled()) { log.debug("no parentEndRow and current Row is null, must have hit end of tablet, done"); } return false; } if (currentRow.compareTo(jumpKey.getRow()) >= 0) { // i'm past or equal to the jump point and have no top, // jumping's not going to help if (log.isDebugEnabled()) { log.debug("no parentEndRow, no topKey, and currentRow is >= jumpRow, done"); } return false; } } // ok, jumpKey is ahead of me I'll mark it and allow the normal // flow to jump there and see if I have top. if (log.isDebugEnabled()) { log.debug("no topKey, but jumpRow is ahead and I'm allowed to go to it, marking"); } comp = -1; } else { // I have a topKey, I can do the normal comparisons if (log.isDebugEnabled()) { log.debug("have top, can do normal comparisons"); } comp = this.topKey.getRow().compareTo(jumpKey.getRow()); } // ------------------ // compare rows if (comp > 0) { // my row is ahead of jump key if (canBeInNextRow()) { if (log.isDebugEnabled()) { log.debug("I'm ahead of jump row & it's ok."); log.debug("jumpRow: " + jumpKey.getRow() + " myRow: " + topKey.getRow() + " parentEndRow: " + parentEndRow); } return true; } else { if (log.isDebugEnabled()) { log.debug("I'm ahead of jump row & can't be here, or at end of tablet."); } topKey = null; topValue = null; return false; } } else if (comp < 0) { // a row behind jump key, need to move forward if (log.isDebugEnabled()) { String myRow = ""; if (hasTop()) { myRow = topKey.getRow().toString(); } else if (currentRow != null) { myRow = currentRow.toString(); } log.debug("My row " + myRow + " is less than jump row: " + jumpKey.getRow() + " seeking"); } range = buildRange(jumpKey.getRow()); // this.seek(range, EMPTY_COL_FAMS, false); boolean success = jumpSeek(range); if (log.isDebugEnabled() && success) { log.debug("uid forced jump, found topKey: " + topKey); } if (!this.hasTop()) { log.debug("seeked with new row and had no top"); topKey = null; topValue = null; return false; } else if (parentEndRow != null && currentRow.compareTo(parentEndRow) > 0) { if (log.isDebugEnabled()) { log.debug("myRow: " + getTopKey().getRow() + " is past parentEndRow: " + parentEndRow); } topKey = null; topValue = null; return false; } if (log.isDebugEnabled()) { log.debug("jumped, valid top: " + getTopKey()); } return true; } else { // rows are equal, check the uid! keyParser.parse(topKey); String myUid = keyParser.getUid(); keyParser.parse(jumpKey); String jumpUid = keyParser.getUid(); int ucomp = myUid.compareTo(jumpUid); if (log.isDebugEnabled()) { log.debug("topKeyUid: " + myUid + " jumpUid: " + jumpUid + " myUid.compareTo(jumpUid)->" + ucomp); } if (ucomp < 0) { // need to move up log.debug("my uid is less than jumpUid, topUid: " + myUid + " jumpUid: " + jumpUid); Text cq = jumpKey.getColumnQualifier(); int index = cq.find(NULL_BYTE); if (0 <= index) { cq.set(cq.getBytes(), index + 1, cq.getLength() - index - 1); } else { log.error("Expected a NULL separator in the column qualifier"); this.topKey = null; this.topValue = null; return false; } // note my internal range stays the same, I just need to move forward Key startKey = new Key(topKey.getRow(), fName, new Text(fValue + NULL_BYTE + cq)); Key endKey = new Key(topKey.getRow(), fName, new Text(fValue + ONE_BYTE)); range = new Range(startKey, true, endKey, false); log.debug("Using range: " + range + " to seek"); // source.seek(range, EMPTY_COL_FAMS, false); boolean success = jumpSeek(range); if (log.isDebugEnabled() && success) { log.debug("uid forced jump, found topKey: " + topKey); } return success; } else { // else do nothing log.debug("my uid is greater than jumpUid, topKey: " + topKey + " jumpKey: " + jumpKey); log.debug("doing nothing"); } } return hasTop(); }
From source file:org.apache.accumulo.master.replication.WorkMaker.java
License:Apache License
protected void addWorkRecord(Text file, Value v, Map<String, String> targets, String sourceTableId) { log.info("Adding work records for " + file + " to targets " + targets); try {//from w w w .ja va 2s.c om Mutation m = new Mutation(file); ReplicationTarget target = new ReplicationTarget(); DataOutputBuffer buffer = new DataOutputBuffer(); Text t = new Text(); for (Entry<String, String> entry : targets.entrySet()) { buffer.reset(); // Set up the writable target.setPeerName(entry.getKey()); target.setRemoteIdentifier(entry.getValue()); target.setSourceTableId(sourceTableId); target.write(buffer); // Throw it in a text for the mutation t.set(buffer.getData(), 0, buffer.getLength()); // Add it to the work section WorkSection.add(m, t, v); } try { writer.addMutation(m); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); } } catch (IOException e) { log.warn("Failed to serialize data to Text, will retry", e); } finally { try { writer.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); } } }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
License:Apache License
private SplitRowSpec findSplitRow(Collection<FileRef> files) { // never split the root tablet // check if we already decided that we can never split // check to see if we're big enough to split long splitThreshold = acuTableConf.getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD); if (extent.isRootTablet() || estimateTabletSize() <= splitThreshold) { return null; }/*from w ww.jav a2 s .c om*/ // have seen a big row before, do not bother checking unless a minor compaction or map file import has occurred. if (sawBigRow) { if (timeOfLastMinCWhenBigFreakinRowWasSeen != lastMinorCompactionFinishTime || timeOfLastImportWhenBigFreakinRowWasSeen != lastMapFileImportTime) { // a minor compaction or map file import has occurred... check again sawBigRow = false; } else { // nothing changed, do not split return null; } } SortedMap<Double, Key> keys = null; try { // we should make .25 below configurable keys = FileUtil.findMidPoint(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), files, .25); } catch (IOException e) { log.error("Failed to find midpoint " + e.getMessage()); return null; } // check to see if one row takes up most of the tablet, in which case we can not split try { Text lastRow; if (extent.getEndRow() == null) { Key lastKey = (Key) FileUtil.findLastKey(fs, tabletServer.getSystemConfiguration(), files); lastRow = lastKey.getRow(); } else { lastRow = extent.getEndRow(); } // check to see that the midPoint is not equal to the end key if (keys.get(.5).compareRow(lastRow) == 0) { if (keys.firstKey() < .5) { Key candidate = keys.get(keys.firstKey()); if (candidate.compareRow(lastRow) != 0) { // we should use this ratio in split size estimations if (log.isTraceEnabled()) log.trace(String.format( "Splitting at %6.2f instead of .5, row at .5 is same as end row%n", keys.firstKey())); return new SplitRowSpec(keys.firstKey(), candidate.getRow()); } } log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } Key mid = keys.get(.5); Text text = (mid == null) ? null : mid.getRow(); SortedMap<Double, Key> firstHalf = keys.headMap(.5); if (firstHalf.size() > 0) { Text beforeMid = firstHalf.get(firstHalf.lastKey()).getRow(); Text shorter = new Text(); int trunc = longestCommonLength(text, beforeMid); shorter.set(text.getBytes(), 0, Math.min(text.getLength(), trunc + 1)); text = shorter; } return new SplitRowSpec(.5, text); } catch (IOException e) { // don't split now, but check again later log.error("Failed to find lastkey " + e.getMessage()); return null; } }
From source file:org.apache.accumulo.server.test.continuous.ContinuousBatchWalker.java
License:Apache License
private static void addRow(int batchSize, Value v) { byte[] val = v.get(); int offset = ContinuousWalk.getPrevRowOffset(val); if (offset > 1) { Text prevRow = new Text(); prevRow.set(val, offset, 16); if (rowsToQuery.size() < 3 * batchSize) { rowsToQuery.add(prevRow);/* w w w . j a va2s .co m*/ } } }
From source file:org.apache.accumulo.tserver.tablet.Tablet.java
License:Apache License
private SplitRowSpec findSplitRow(Collection<FileRef> files) { // never split the root tablet // check if we already decided that we can never split // check to see if we're big enough to split long splitThreshold = tableConfiguration.getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD); long maxEndRow = tableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE); if (extent.isRootTablet() || estimateTabletSize() <= splitThreshold) { return null; }/*ww w . j a va 2 s .c o m*/ // have seen a big row before, do not bother checking unless a minor compaction or map file import has occurred. if (sawBigRow) { if (timeOfLastMinCWhenBigFreakinRowWasSeen != lastMinorCompactionFinishTime || timeOfLastImportWhenBigFreakinRowWasSeen != lastMapFileImportTime) { // a minor compaction or map file import has occurred... check again sawBigRow = false; } else { // nothing changed, do not split return null; } } SortedMap<Double, Key> keys = null; try { // we should make .25 below configurable keys = FileUtil.findMidPoint(getTabletServer().getFileSystem(), getTabletServer().getConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), FileUtil.toPathStrings(files), .25); } catch (IOException e) { log.error("Failed to find midpoint " + e.getMessage()); return null; } // check to see if one row takes up most of the tablet, in which case we can not split try { Text lastRow; if (extent.getEndRow() == null) { Key lastKey = (Key) FileUtil.findLastKey(getTabletServer().getFileSystem(), getTabletServer().getConfiguration(), files); lastRow = lastKey.getRow(); } else { lastRow = extent.getEndRow(); } // We expect to get a midPoint for this set of files. If we don't get one, we have a problem. final Key mid = keys.get(.5); if (null == mid) { throw new IllegalStateException("Could not determine midpoint for files"); } // check to see that the midPoint is not equal to the end key if (mid.compareRow(lastRow) == 0) { if (keys.firstKey() < .5) { Key candidate = keys.get(keys.firstKey()); if (candidate.getLength() > maxEndRow) { log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + candidate.getLength()); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } if (candidate.compareRow(lastRow) != 0) { // we should use this ratio in split size estimations if (log.isTraceEnabled()) log.trace(String.format( "Splitting at %6.2f instead of .5, row at .5 is same as end row%n", keys.firstKey())); return new SplitRowSpec(keys.firstKey(), candidate.getRow()); } } log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } Text text = mid.getRow(); SortedMap<Double, Key> firstHalf = keys.headMap(.5); if (firstHalf.size() > 0) { Text beforeMid = firstHalf.get(firstHalf.lastKey()).getRow(); Text shorter = new Text(); int trunc = longestCommonLength(text, beforeMid); shorter.set(text.getBytes(), 0, Math.min(text.getLength(), trunc + 1)); text = shorter; } if (text.getLength() > maxEndRow) { log.warn("Cannot split tablet " + extent + ", selected split point too long. Length : " + text.getLength()); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } return new SplitRowSpec(.5, text); } catch (IOException e) { // don't split now, but check again later log.error("Failed to find lastkey " + e.getMessage()); return null; } }
From source file:org.apache.accumulo.tserver.Tablet.java
License:Apache License
private SplitRowSpec findSplitRow(Collection<FileRef> files) { // never split the root tablet // check if we already decided that we can never split // check to see if we're big enough to split long splitThreshold = acuTableConf.getMemoryInBytes(Property.TABLE_SPLIT_THRESHOLD); if (extent.isRootTablet() || estimateTabletSize() <= splitThreshold) { return null; }// w ww . j av a 2 s. c om // have seen a big row before, do not bother checking unless a minor compaction or map file import has occurred. if (sawBigRow) { if (timeOfLastMinCWhenBigFreakinRowWasSeen != lastMinorCompactionFinishTime || timeOfLastImportWhenBigFreakinRowWasSeen != lastMapFileImportTime) { // a minor compaction or map file import has occurred... check again sawBigRow = false; } else { // nothing changed, do not split return null; } } SortedMap<Double, Key> keys = null; try { // we should make .25 below configurable keys = FileUtil.findMidPoint(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), FileUtil.toPathStrings(files), .25); } catch (IOException e) { log.error("Failed to find midpoint " + e.getMessage()); return null; } // check to see if one row takes up most of the tablet, in which case we can not split try { Text lastRow; if (extent.getEndRow() == null) { Key lastKey = (Key) FileUtil.findLastKey(fs, tabletServer.getSystemConfiguration(), files); lastRow = lastKey.getRow(); } else { lastRow = extent.getEndRow(); } // check to see that the midPoint is not equal to the end key if (keys.get(.5).compareRow(lastRow) == 0) { if (keys.firstKey() < .5) { Key candidate = keys.get(keys.firstKey()); if (candidate.compareRow(lastRow) != 0) { // we should use this ratio in split size estimations if (log.isTraceEnabled()) log.trace(String.format( "Splitting at %6.2f instead of .5, row at .5 is same as end row%n", keys.firstKey())); return new SplitRowSpec(keys.firstKey(), candidate.getRow()); } } log.warn("Cannot split tablet " + extent + " it contains a big row : " + lastRow); sawBigRow = true; timeOfLastMinCWhenBigFreakinRowWasSeen = lastMinorCompactionFinishTime; timeOfLastImportWhenBigFreakinRowWasSeen = lastMapFileImportTime; return null; } Key mid = keys.get(.5); Text text = (mid == null) ? null : mid.getRow(); SortedMap<Double, Key> firstHalf = keys.headMap(.5); if (firstHalf.size() > 0) { Text beforeMid = firstHalf.get(firstHalf.lastKey()).getRow(); Text shorter = new Text(); int trunc = longestCommonLength(text, beforeMid); shorter.set(text.getBytes(), 0, Math.min(text.getLength(), trunc + 1)); text = shorter; } return new SplitRowSpec(.5, text); } catch (IOException e) { // don't split now, but check again later log.error("Failed to find lastkey " + e.getMessage()); return null; } }
From source file:org.apache.avro.mapred.AvroAsTextRecordReader.java
License:Apache License
public boolean next(Text key, Text ignore) throws IOException { if (!reader.hasNext() || reader.pastSync(end)) return false; datum = reader.next(datum);//from w w w. j a v a 2 s . c o m if (datum instanceof ByteBuffer) { ByteBuffer b = (ByteBuffer) datum; if (b.hasArray()) { int offset = b.arrayOffset(); int start = b.position(); int length = b.remaining(); key.set(b.array(), offset + start, offset + start + length); } else { byte[] bytes = new byte[b.remaining()]; b.duplicate().get(bytes); key.set(bytes); } } else { key.set(datum.toString()); } return true; }
From source file:org.apache.blur.manager.writer.IndexImporter.java
License:Apache License
private void applyDeletes(Directory directory, IndexWriter indexWriter, String shard, boolean emitDeletes) throws IOException { DirectoryReader reader = DirectoryReader.open(directory); try {//from w w w.j a v a2 s . c o m LOG.info("Applying deletes in reader [{0}]", reader); CompositeReaderContext compositeReaderContext = reader.getContext(); List<AtomicReaderContext> leaves = compositeReaderContext.leaves(); BlurPartitioner blurPartitioner = new BlurPartitioner(); Text key = new Text(); int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount(); int shardId = ShardUtil.getShardIndex(shard); for (AtomicReaderContext context : leaves) { AtomicReader atomicReader = context.reader(); Fields fields = atomicReader.fields(); Terms terms = fields.terms(BlurConstants.ROW_ID); if (terms != null) { TermsEnum termsEnum = terms.iterator(null); BytesRef ref = null; while ((ref = termsEnum.next()) != null) { key.set(ref.bytes, ref.offset, ref.length); int partition = blurPartitioner.getPartition(key, null, numberOfShards); if (shardId != partition) { throw new IOException("Index is corrupted, RowIds are found in wrong shard, partition [" + partition + "] does not shard [" + shardId + "], this can happen when rows are not hashed correctly."); } if (emitDeletes) { indexWriter.deleteDocuments(new Term(BlurConstants.ROW_ID, BytesRef.deepCopyOf(ref))); } } } } } finally { reader.close(); } }
From source file:org.apache.hama.bsp.KeyValueLineRecordReader.java
License:Apache License
/** Read key/value pair in a line. */ public synchronized boolean next(Text key, Text value) throws IOException { Text tKey = key; Text tValue = value;/*from w w w .j a v a 2s . c o m*/ byte[] line = null; int lineLen = -1; if (lineRecordReader.next(dummyKey, innerValue)) { line = innerValue.getBytes(); lineLen = innerValue.getLength(); } else { return false; } if (line == null) return false; int pos = findSeparator(line, 0, lineLen, this.separator); if (pos == -1) { tKey.set(line, 0, lineLen); tValue.set(""); } else { int keyLen = pos; byte[] keyBytes = new byte[keyLen]; System.arraycopy(line, 0, keyBytes, 0, keyLen); int valLen = lineLen - keyLen - 1; byte[] valBytes = new byte[valLen]; System.arraycopy(line, pos + 1, valBytes, 0, valLen); tKey.set(keyBytes); tValue.set(valBytes); } return true; }
From source file:org.apache.hawq.pxf.plugins.hive.HiveORCVectorizedResolver.java
License:Apache License
private void populatePrimitiveColumn(PrimitiveCategory primitiveCategory, ObjectInspector oi, VectorizedRowBatch vectorizedBatch, int columnIndex) { ColumnVector columnVector = vectorizedBatch.cols[columnIndex]; Object fieldValue = null;/*from w w w. j av a 2 s .c o m*/ DataType fieldType = null; switch (primitiveCategory) { case BOOLEAN: { fieldType = BOOLEAN; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = lcv.vector[rowId] == 1; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case SHORT: { fieldType = SMALLINT; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = (short) lcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case INT: { fieldType = INTEGER; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = (int) lcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case LONG: { fieldType = BIGINT; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = lcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case FLOAT: { fieldType = REAL; DoubleColumnVector dcv = (DoubleColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (dcv != null) { int rowId = dcv.isRepeating ? 0 : rowIndex; if (!dcv.isNull[rowId]) { fieldValue = (float) dcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case DOUBLE: { fieldType = FLOAT8; DoubleColumnVector dcv = (DoubleColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (dcv != null) { int rowId = dcv.isRepeating ? 0 : rowIndex; if (!dcv.isNull[rowId]) { fieldValue = dcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case DECIMAL: { fieldType = NUMERIC; DecimalColumnVector dcv = (DecimalColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (dcv != null) { int rowId = dcv.isRepeating ? 0 : rowIndex; if (!dcv.isNull[rowId]) { fieldValue = dcv.vector[rowId]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case VARCHAR: { fieldType = VARCHAR; BytesColumnVector bcv = (BytesColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (columnVector != null) { int rowId = bcv.isRepeating ? 0 : rowIndex; if (!bcv.isNull[rowId]) { Text textValue = new Text(); textValue.set(bcv.vector[rowIndex], bcv.start[rowIndex], bcv.length[rowIndex]); fieldValue = textValue; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case CHAR: { fieldType = BPCHAR; BytesColumnVector bcv = (BytesColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (columnVector != null) { int rowId = bcv.isRepeating ? 0 : rowIndex; if (!bcv.isNull[rowId]) { Text textValue = new Text(); textValue.set(bcv.vector[rowIndex], bcv.start[rowIndex], bcv.length[rowIndex]); fieldValue = textValue; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case STRING: { fieldType = TEXT; BytesColumnVector bcv = (BytesColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (columnVector != null) { int rowId = bcv.isRepeating ? 0 : rowIndex; if (!bcv.isNull[rowId]) { Text textValue = new Text(); textValue.set(bcv.vector[rowIndex], bcv.start[rowIndex], bcv.length[rowIndex]); fieldValue = textValue; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case BINARY: { fieldType = BYTEA; BytesColumnVector bcv = (BytesColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (columnVector != null) { int rowId = bcv.isRepeating ? 0 : rowIndex; if (!bcv.isNull[rowId]) { fieldValue = new byte[bcv.length[rowId]]; System.arraycopy(bcv.vector[rowId], bcv.start[rowId], fieldValue, 0, bcv.length[rowId]); } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case DATE: { fieldType = DATE; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = new Date(DateWritable.daysToMillis((int) lcv.vector[rowIndex])); } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } case BYTE: { fieldType = SMALLINT; LongColumnVector lcv = (LongColumnVector) columnVector; for (int rowIndex = 0; rowIndex < vectorizedBatch.size; rowIndex++) { fieldValue = null; if (lcv != null) { int rowId = lcv.isRepeating ? 0 : rowIndex; if (!lcv.isNull[rowId]) { fieldValue = (short) lcv.vector[rowIndex]; } } addValueToColumn(columnIndex, rowIndex, new OneField(fieldType.getOID(), fieldValue)); } break; } default: { throw new UnsupportedTypeException( oi.getTypeName() + " conversion is not supported by " + getClass().getSimpleName()); } } }