List of usage examples for java.nio ByteBuffer compareTo
public int compareTo(ByteBuffer otherBuffer)
From source file:Main.java
public static void main(String[] argv) throws Exception { ByteBuffer bbuf = ByteBuffer.allocate(10); int capacity = bbuf.capacity(); // 10 System.out.println(capacity); bbuf.putShort(2, (short) 123); ByteBuffer bb = bbuf.duplicate(); System.out.println(bb.compareTo(bb)); }
From source file:Main.java
public static int byteArrayCompare(byte[] byte1, byte[] byte2) { byte[] tByte1 = new byte[byte2.length]; ByteArrayInputStream input = new ByteArrayInputStream(byte1); try {/* ww w . ja v a 2s. c o m*/ input.read(tByte1); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } ByteBuffer byteBuf1 = ByteBuffer.wrap(tByte1); ByteBuffer byteBuf2 = ByteBuffer.wrap(byte2); return byteBuf1.compareTo(byteBuf2); }
From source file:com.sequoiadb.hadoop.io.BSONWritableComparator.java
private int compareValues(Object one, Object two) { int diff = 0; if (one instanceof Number) { diff = (Double.valueOf(one.toString())).compareTo(Double.valueOf(two.toString())); } else if (one instanceof String) { diff = ((String) one).compareTo((String) two); } else if (one instanceof BSONObject) { diff = compare((BSONObject) one, (BSONObject) two); } else if (one instanceof Binary) { ByteBuffer buff1 = ByteBuffer.wrap(((Binary) one).getData()); ByteBuffer buff2 = ByteBuffer.wrap(((Binary) two).getData()); diff = buff1.compareTo(buff2); } else if (one instanceof byte[]) { ByteBuffer buff1 = ByteBuffer.wrap((byte[]) one); ByteBuffer buff2 = ByteBuffer.wrap((byte[]) two); diff = buff1.compareTo(buff2);/*from w w w.j av a 2 s . c om*/ } else if (one instanceof ObjectId) { diff = ((ObjectId) one).compareTo((ObjectId) two); } else if (one instanceof Boolean) { diff = ((Boolean) one).compareTo((Boolean) two); } else if (one instanceof Date) { diff = ((Date) one).compareTo((Date) two); } else if (one instanceof BSONTimestamp) { diff = compareBSONTimestamp((BSONTimestamp) one, (BSONTimestamp) two); } return diff; }
From source file:org.alfresco.cacheserver.PatchServiceTest.java
private void assertFileEquals(String contentPath1, String contentPath2) throws IOException { File file1 = new File(contentPath1); if (!file1.exists()) { fail();/*from w w w . j a va 2s .c o m*/ } File file2 = new File(contentPath2); if (!file2.exists()) { fail(); } try (FileInputStream fis1 = new FileInputStream(file1); FileChannel channel1 = fis1.getChannel();) { try (FileInputStream fis2 = new FileInputStream(file2); FileChannel channel2 = fis2.getChannel()) { ByteBuffer bb1 = ByteBuffer.allocate(1024); channel1.read(bb1); ByteBuffer bb2 = ByteBuffer.allocate(1024); channel2.read(bb2); bb1.flip(); bb2.flip(); assertEquals(0, bb1.compareTo(bb2)); } } }
From source file:com.dappervision.hbase.mapred.TypedBytesTableInputFormatBase.java
/** * Calculates the splits that will serve as input for the map tasks. * <ul>//from w w w . j a v a 2s.c om * Splits are created in number equal to the smallest between numSplits and * the number of {@link HRegion}s in the table. If the number of splits is * smaller than the number of {@link HRegion}s then splits are spanned across * multiple {@link HRegion}s and are grouped the most evenly possible. In the * case splits are uneven the bigger splits are placed first in the * {@link InputSplit} array. * * @param job the map task {@link JobConf} * @param numSplits a hint to calculate the number of splits (mapred.map.tasks). * * @return the input splits * * @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int) */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { if (this.table == null) { throw new IOException("No table was provided"); } byte[][] startKeys = this.table.getStartKeys(); // NOTE(brandyn): Here we remove regions that are entirely outside of our start/stop rows ByteBuffer emptyStartRow = ByteBuffer.wrap(HConstants.EMPTY_START_ROW); ArrayList<byte[]> startKeysList = new ArrayList<byte[]>(); for (int i = 0; i < startKeys.length; i++) { ByteBuffer curStartKey = ByteBuffer.wrap(startKeys[i]); ByteBuffer curEndKey = ByteBuffer .wrap(((i + 1) < startKeys.length) ? startKeys[i + 1] : HConstants.EMPTY_START_ROW); if (startRow != null && curEndKey.compareTo(startRow) < 0 && curEndKey.compareTo(emptyStartRow) != 0) { LOG.info("Skipping split (< start)..."); continue; } if (stopRow != null && curStartKey.compareTo(stopRow) > 0) { LOG.info("Skipping split (> stop)..."); continue; } startKeysList.add(startKeys[i]); } startKeys = startKeysList.toArray(new byte[startKeysList.size()][]); if (startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } if (this.inputColumns == null || this.inputColumns.length == 0) { throw new IOException("Expecting at least one column"); } int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits; InputSplit[] splits = new InputSplit[realNumSplits]; int middle = startKeys.length / realNumSplits; int startPos = 0; int curSplit = 0; for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; String regionLocation = table.getRegionLocation(startKeys[startPos]).getServerAddress().getHostname(); ByteBuffer curStartKey = ByteBuffer.wrap(startKeys[startPos]); ByteBuffer curEndKey = ByteBuffer .wrap(((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW); startPos = lastPos; // NOTE(brandyn): Truncate splits that overlap start/end row if (startRow != null && curStartKey.compareTo(startRow) < 0) { LOG.info("Truncating split..."); curStartKey = startRow; } if (stopRow != null && (curEndKey.compareTo(stopRow) > 0 || curEndKey.compareTo(emptyStartRow) == 0)) { LOG.info("Truncating split..."); curEndKey = stopRow; } splits[curSplit] = new TableSplit(this.table.getTableName(), curStartKey.array(), curEndKey.array(), regionLocation); LOG.info("split: " + i + "->" + splits[curSplit]); curSplit += 1; } return Arrays.copyOf(splits, curSplit); }
From source file:org.alfresco.contentstore.ContentStoreTest.java
private void assertFileEquals(byte[] bytes, String contentPath) throws IOException { File file = new File(contentPath); if (!file.exists()) { fail();/* ww w. j av a2 s. c o m*/ } try (FileInputStream fis = new FileInputStream(file); FileChannel channel = fis.getChannel()) { ByteBuffer bb = ByteBuffer.allocate(1024); channel.read(bb); bb.flip(); assertEquals(0, bb.compareTo(ByteBuffer.wrap(bytes))); } }
From source file:org.apache.cassandra.db.marshal.TimeUUIDType.java
public int compare(ByteBuffer o1, ByteBuffer o2) { if (o1.remaining() == 0) { return o2.remaining() == 0 ? 0 : -1; }//from w w w .j a v a 2 s . c o m if (o2.remaining() == 0) { return 1; } int res = compareTimestampBytes(o1, o2); if (res != 0) return res; return o1.compareTo(o2); }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFile.java
private void readNumMetablocks(Reader reader, int n) throws IOException { for (int i = 0; i < n; i++) { ByteBuffer actual = reader.getMetaBlock("HFileMeta" + i, false); ByteBuffer expected = ByteBuffer.wrap(("something to test" + i).getBytes()); assertTrue("failed to match metadata", actual.compareTo(expected) == 0); }/* w w w .j av a2 s . c o m*/ }
From source file:org.apache.hadoop.hbase.regionserver.TestHStoreFile.java
private void checkHalfHFile(final HRegionFileSystem regionFs, final HStoreFile f) throws IOException { f.initReader();//from w w w . j a v a 2 s.co m Cell midkey = f.getReader().midkey(); KeyValue midKV = (KeyValue) midkey; byte[] midRow = CellUtil.cloneRow(midKV); // Create top split. HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. HStoreFile topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true); topF.initReader(); StoreFileReader top = topF.getReader(); HStoreFile bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true); bottomF.initReader(); StoreFileReader bottom = bottomF.getReader(); ByteBuffer previous = null; LOG.info("Midkey: " + midKV.toString()); ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midKV.getKey()); try { // Now make two HalfMapFiles and assert they can read the full backing // file, one from the top and the other from the bottom. // Test bottom half first. // Now test reading from the top. boolean first = true; ByteBuffer key = null; HFileScanner topScanner = top.getScanner(false, false); while ((!topScanner.isSeeked() && topScanner.seekTo()) || (topScanner.isSeeked() && topScanner.next())) { key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); if ((topScanner.getReader().getComparator().compare(midKV, key.array(), key.arrayOffset(), key.limit())) > 0) { fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey); } if (first) { first = false; LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); } } LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key))); first = true; HFileScanner bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); if (first) { first = false; LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } assertTrue(key.compareTo(bbMidkeyBytes) < 0); } if (previous != null) { LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } // Remove references. regionFs.cleanupDaughterRegion(topHri); regionFs.cleanupDaughterRegion(bottomHri); // Next test using a midkey that does not exist in the file. // First, do a key that is < than first key. Ensure splits behave // properly. byte[] badmidkey = Bytes.toBytes(" ."); assertTrue(fs.exists(f.getPath())); topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); assertNull(bottomPath); topF = new HStoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, true); topF.initReader(); top = topF.getReader(); // Now read from the top. first = true; topScanner = top.getScanner(false, false); KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue(); while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey()); keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit()); assertTrue(topScanner.getReader().getComparator().compare(keyOnlyKV, badmidkey, 0, badmidkey.length) >= 0); if (first) { first = false; KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'a'); } } } KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'z'); } // Remove references. regionFs.cleanupDaughterRegion(topHri); regionFs.cleanupDaughterRegion(bottomHri); // Test when badkey is > than last key in file ('||' > 'zz'). badmidkey = Bytes.toBytes("|||"); topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); assertNull(topPath); bottomF = new HStoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, true); bottomF.initReader(); bottom = bottomF.getReader(); first = true; bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey()); if (first) { first = false; keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("First bottom when key > top: " + keyKV); tmp = Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'a'); } } } keyKV = KeyValueUtil.createKeyValueFromKey(key); LOG.info("Last bottom when key > top: " + keyKV); for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength()) .charAt(i) == 'z'); } } finally { if (top != null) { top.close(true); // evict since we are about to delete the file } if (bottom != null) { bottom.close(true); // evict since we are about to delete the file } fs.delete(f.getPath(), true); } }
From source file:org.apache.hadoop.hbase.regionserver.TestStoreFile.java
private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException { byte[] midkey = f.createReader().midkey(); KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); byte[] midRow = midKV.getRow(); // Create top split. HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader(); StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE) .createReader();/*from www.java 2 s . c o m*/ ByteBuffer previous = null; LOG.info("Midkey: " + midKV.toString()); ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey); try { // Now make two HalfMapFiles and assert they can read the full backing // file, one from the top and the other from the bottom. // Test bottom half first. // Now test reading from the top. boolean first = true; ByteBuffer key = null; HFileScanner topScanner = top.getScanner(false, false); while ((!topScanner.isSeeked() && topScanner.seekTo()) || (topScanner.isSeeked() && topScanner.next())) { key = topScanner.getKey(); if (topScanner.getReader().getComparator().compareFlatKey(key.array(), key.arrayOffset(), key.limit(), midkey, 0, midkey.length) < 0) { fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + Bytes.toStringBinary(midkey)); } if (first) { first = false; LOG.info("First in top: " + Bytes.toString(Bytes.toBytes(key))); } } LOG.info("Last in top: " + Bytes.toString(Bytes.toBytes(key))); first = true; HFileScanner bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { previous = bottomScanner.getKey(); key = bottomScanner.getKey(); if (first) { first = false; LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } assertTrue(key.compareTo(bbMidkeyBytes) < 0); } if (previous != null) { LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous))); } // Remove references. regionFs.cleanupDaughterRegion(topHri); regionFs.cleanupDaughterRegion(bottomHri); // Next test using a midkey that does not exist in the file. // First, do a key that is < than first key. Ensure splits behave // properly. byte[] badmidkey = Bytes.toBytes(" ."); assertTrue(fs.exists(f.getPath())); topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); assertNull(bottomPath); top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader(); // Now read from the top. first = true; topScanner = top.getScanner(false, false); while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) { key = topScanner.getKey(); assertTrue(topScanner.getReader().getComparator().compareFlatKey(key.array(), key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0); if (first) { first = false; KeyValue keyKV = KeyValue.createKeyValueFromKey(key); LOG.info("First top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'a'); } } } KeyValue keyKV = KeyValue.createKeyValueFromKey(key); LOG.info("Last top when key < bottom: " + keyKV); String tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'z'); } // Remove references. regionFs.cleanupDaughterRegion(topHri); regionFs.cleanupDaughterRegion(bottomHri); // Test when badkey is > than last key in file ('||' > 'zz'). badmidkey = Bytes.toBytes("|||"); topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); assertNull(topPath); bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader(); first = true; bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) { key = bottomScanner.getKey(); if (first) { first = false; keyKV = KeyValue.createKeyValueFromKey(key); LOG.info("First bottom when key > top: " + keyKV); tmp = Bytes.toString(keyKV.getRow()); for (int i = 0; i < tmp.length(); i++) { assertTrue(tmp.charAt(i) == 'a'); } } } keyKV = KeyValue.createKeyValueFromKey(key); LOG.info("Last bottom when key > top: " + keyKV); for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z'); } } finally { if (top != null) { top.close(true); // evict since we are about to delete the file } if (bottom != null) { bottom.close(true); // evict since we are about to delete the file } fs.delete(f.getPath(), true); } }