List of usage examples for java.lang Byte MAX_VALUE
byte MAX_VALUE
To view the source code for java.lang Byte MAX_VALUE.
Click Source Link
From source file:org.godhuli.rhipe.hbase.Util.java
public static Scan[] generateBytePrefixScans(Calendar startCal, Calendar endCal, String dateFormat, ArrayList<Pair<String, String>> columns, int caching, boolean cacheBlocks) { ArrayList<Scan> scans = new ArrayList<Scan>(); SimpleDateFormat rowsdf = new SimpleDateFormat(dateFormat); long endTime = getEndTimeAtResolution(endCal.getTimeInMillis(), Calendar.DATE); byte[] temp = new byte[1]; while (startCal.getTimeInMillis() < endTime) { for (byte b = Byte.MIN_VALUE; b < Byte.MAX_VALUE; b++) { int d = Integer.parseInt(rowsdf.format(startCal.getTime())); Scan s = new Scan(); s.setCaching(caching);//w w w.j ava 2 s .c om s.setCacheBlocks(cacheBlocks); // add columns for (Pair<String, String> pair : columns) { s.addColumn(pair.getFirst().getBytes(), pair.getSecond().getBytes()); } temp[0] = b; s.setStartRow(Bytes.add(temp, Bytes.toBytes(String.format("%06d", d)))); s.setStopRow(Bytes.add(temp, Bytes.toBytes(String.format("%06d", d + 1)))); if (LOG.isDebugEnabled()) { LOG.info("Adding start-stop range: " + temp + String.format("%06d", d) + " - " + temp + String.format("%06d", d + 1)); } scans.add(s); } startCal.add(Calendar.DATE, 1); } return scans.toArray(new Scan[scans.size()]); }
From source file:it.unipmn.di.dcs.common.conversion.Base64.java
/** * Decode a Base64 data.// ww w.j av a 2 s . c om * * Original method name is <em>decode</em>. */ public static byte[] Decode(String data) { char[] ibuf = new char[4]; int ibufcount = 0; byte[] obuf = new byte[data.length() / 4 * 3 + 3]; int obufcount = 0; for (int i = 0; i < data.length(); i++) { char ch = data.charAt(i); if (ch == S_BASE64PAD || ch < S_DECODETABLE.length && S_DECODETABLE[ch] != Byte.MAX_VALUE) { ibuf[ibufcount++] = ch; if (ibufcount == ibuf.length) { ibufcount = 0; obufcount += Decode0(ibuf, obuf, obufcount); } } } if (obufcount == obuf.length) return obuf; byte[] ret = new byte[obufcount]; System.arraycopy(obuf, 0, ret, 0, obufcount); return ret; }
From source file:org.apache.hadoop.hive.ql.exec.ExecReducer.java
@Override public void configure(JobConf job) { rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector keyObjectInspector;//from w ww. j a va2s. c o m // Allocate the bean at the beginning - memoryMXBean = ManagementFactory.getMemoryMXBean(); l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); isLogInfoEnabled = l4j.isInfoEnabled(); try { l4j.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); l4j.info("thread classpath = " + Arrays.asList(((URLClassLoader) Thread.currentThread().getContextClassLoader()).getURLs())); } catch (Exception e) { l4j.info("cannot get classpath: " + e.getMessage()); } jc = job; MapredWork gWork = Utilities.getMapRedWork(job); reducer = gWork.getReducer(); reducer.setParentOperators(null); // clear out any parents as reducer is the // root isTagged = gWork.getNeedsTagging(); try { keyTableDesc = gWork.getKeyDesc(); inputKeyDeserializer = (SerDe) ReflectionUtils.newInstance(keyTableDesc.getDeserializerClass(), null); inputKeyDeserializer.initialize(null, keyTableDesc.getProperties()); keyObjectInspector = inputKeyDeserializer.getObjectInspector(); valueTableDesc = new TableDesc[gWork.getTagToValueDesc().size()]; for (int tag = 0; tag < gWork.getTagToValueDesc().size(); tag++) { // We should initialize the SerDe with the TypeInfo when available. valueTableDesc[tag] = gWork.getTagToValueDesc().get(tag); inputValueDeserializer[tag] = (SerDe) ReflectionUtils .newInstance(valueTableDesc[tag].getDeserializerClass(), null); inputValueDeserializer[tag].initialize(null, valueTableDesc[tag].getProperties()); valueObjectInspector[tag] = inputValueDeserializer[tag].getObjectInspector(); ArrayList<ObjectInspector> ois = new ArrayList<ObjectInspector>(); ois.add(keyObjectInspector); ois.add(valueObjectInspector[tag]); ois.add(PrimitiveObjectInspectorFactory.writableByteObjectInspector); rowObjectInspector[tag] = ObjectInspectorFactory .getStandardStructObjectInspector(Arrays.asList(fieldNames), ois); } } catch (Exception e) { throw new RuntimeException(e); } // initialize reduce operator tree try { l4j.info(reducer.dump(0)); reducer.initialize(jc, rowObjectInspector); } catch (Throwable e) { abort = true; if (e instanceof OutOfMemoryError) { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { throw new RuntimeException("Reduce operator initialization failed", e); } } }
From source file:org.apache.hadoop.hive.accumulo.AccumuloTestSetup.java
protected void createAccumuloTable(Connector conn) throws TableExistsException, TableNotFoundException, AccumuloException, AccumuloSecurityException { TableOperations tops = conn.tableOperations(); if (tops.exists(TABLE_NAME)) { tops.delete(TABLE_NAME);/*ww w. j a v a2s. com*/ } tops.create(TABLE_NAME); boolean[] booleans = new boolean[] { true, false, true }; byte[] bytes = new byte[] { Byte.MIN_VALUE, -1, Byte.MAX_VALUE }; short[] shorts = new short[] { Short.MIN_VALUE, -1, Short.MAX_VALUE }; int[] ints = new int[] { Integer.MIN_VALUE, -1, Integer.MAX_VALUE }; long[] longs = new long[] { Long.MIN_VALUE, -1, Long.MAX_VALUE }; String[] strings = new String[] { "Hadoop, Accumulo", "Hive", "Test Strings" }; float[] floats = new float[] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE }; double[] doubles = new double[] { Double.MIN_VALUE, -1.0, Double.MAX_VALUE }; HiveDecimal[] decimals = new HiveDecimal[] { HiveDecimal.create("3.14159"), HiveDecimal.create("2.71828"), HiveDecimal.create("0.57721") }; Date[] dates = new Date[] { Date.valueOf("2014-01-01"), Date.valueOf("2014-03-01"), Date.valueOf("2014-05-01") }; Timestamp[] timestamps = new Timestamp[] { new Timestamp(50), new Timestamp(100), new Timestamp(150) }; BatchWriter bw = conn.createBatchWriter(TABLE_NAME, new BatchWriterConfig()); final String cf = "cf"; try { for (int i = 0; i < 3; i++) { Mutation m = new Mutation("key-" + i); m.put(cf, "cq-boolean", Boolean.toString(booleans[i])); m.put(cf.getBytes(), "cq-byte".getBytes(), new byte[] { bytes[i] }); m.put(cf, "cq-short", Short.toString(shorts[i])); m.put(cf, "cq-int", Integer.toString(ints[i])); m.put(cf, "cq-long", Long.toString(longs[i])); m.put(cf, "cq-string", strings[i]); m.put(cf, "cq-float", Float.toString(floats[i])); m.put(cf, "cq-double", Double.toString(doubles[i])); m.put(cf, "cq-decimal", decimals[i].toString()); m.put(cf, "cq-date", dates[i].toString()); m.put(cf, "cq-timestamp", timestamps[i].toString()); bw.addMutation(m); } } finally { bw.close(); } }
From source file:org.apache.james.mime4j.codec.QuotedPrintableEncodeTest.java
public void testLetterEncoding() throws Exception { for (byte b = 0; b < Byte.MAX_VALUE; b++) { byte[] content = { b }; checkRoundtrip(content);/*from w w w .j av a2 s .c o m*/ } }
From source file:org.jts.eclipse.conversion.cjsidl.ConversionUtil.java
/** * converts a range of values to a JSIDL type. This is used where the CJSIDL * grammar does not include type in the definition of a range. * @param lowerLim - the lower limit of the range * @param upperLim - the upper limit of the range * @return - a string representing a JSIDL data type that can hold the upper and * lower limits./*from w w w . j ava 2 s. c o m*/ */ public static String rangeToJSIDLType(long lowerLim, long upperLim) { long range = upperLim - lowerLim; String type = "unsigned byte"; if (range >= Byte.MIN_VALUE && range <= Byte.MAX_VALUE) { type = "unsigned byte"; } else if (range >= Short.MIN_VALUE && range <= Short.MAX_VALUE) { type = "unsigned short integer"; } else if (range >= Integer.MIN_VALUE && range <= Integer.MAX_VALUE) { type = "unsigned integer"; } else { type = "unsigned long integer"; } return type; }
From source file:org.apache.james.mime4j.codec.QuotedPrintableTextEncodeTest.java
public void testLetterEncoding() throws Exception { for (byte b = 0; b < Byte.MAX_VALUE; b++) { byte[] content = { b }; // White space is only escaped when followed by CRLF if (b != 32 && b != 9) { checkRoundtrip(content);/*w ww . java2 s.c o m*/ } } }
From source file:org.mule.util.NumberUtils.java
@SuppressWarnings("unchecked") public static <T extends Number> T convertNumberToTargetClass(Number number, Class<T> targetClass) throws IllegalArgumentException { if (targetClass.isInstance(number)) { return (T) number; } else if (targetClass.equals(Byte.class)) { long value = number.longValue(); if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { raiseOverflowException(number, targetClass); }// ww w . jav a2s . c o m return (T) new Byte(number.byteValue()); } else if (targetClass.equals(Short.class)) { long value = number.longValue(); if (value < Short.MIN_VALUE || value > Short.MAX_VALUE) { raiseOverflowException(number, targetClass); } return (T) new Short(number.shortValue()); } else if (targetClass.equals(Integer.class)) { long value = number.longValue(); if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) { raiseOverflowException(number, targetClass); } return (T) new Integer(number.intValue()); } else if (targetClass.equals(Long.class)) { return (T) new Long(number.longValue()); } else if (targetClass.equals(BigInteger.class)) { if (number instanceof BigDecimal) { // do not lose precision - use BigDecimal's own conversion return (T) ((BigDecimal) number).toBigInteger(); } else { // original value is not a Big* number - use standard long conversion return (T) BigInteger.valueOf(number.longValue()); } } else if (targetClass.equals(Float.class)) { return (T) new Float(number.floatValue()); } else if (targetClass.equals(Double.class)) { return (T) new Double(number.doubleValue()); } else if (targetClass.equals(BigDecimal.class)) { // always use BigDecimal(String) here to avoid unpredictability of // BigDecimal(double) // (see BigDecimal javadoc for details) return (T) new BigDecimal(number.toString()); } else { throw new IllegalArgumentException("Could not convert number [" + number + "] of type [" + number.getClass().getName() + "] to unknown target class [" + targetClass.getName() + "]"); } }
From source file:Base64Utils.java
public static void base64Decode(char[] data, int off, int len, OutputStream ostream) throws IOException { char[] ibuf = new char[4]; int ibufcount = 0; byte[] obuf = new byte[3]; for (int i = off; i < off + len; i++) { char ch = data[i]; if (ch == S_BASE64PAD || ch < S_DECODETABLE.length && S_DECODETABLE[ch] != Byte.MAX_VALUE) { ibuf[ibufcount++] = ch;// w w w .jav a 2s.c o m if (ibufcount == ibuf.length) { ibufcount = 0; int obufcount = base64Decode0(ibuf, obuf, 0); ostream.write(obuf, 0, obufcount); } } } }
From source file:org.apache.hadoop.hive.ql.exec.mr.ExecReducer.java
@Override public void configure(JobConf job) { rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector keyObjectInspector;/*from w ww . j a va 2s . c o m*/ if (isInfoEnabled) { try { LOG.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); LOG.info("thread classpath = " + Arrays .asList(((URLClassLoader) Thread.currentThread().getContextClassLoader()).getURLs())); } catch (Exception e) { LOG.info("cannot get classpath: " + e.getMessage()); } } jc = job; ReduceWork gWork = Utilities.getReduceWork(job); reducer = gWork.getReducer(); reducer.setParentOperators(null); // clear out any parents as reducer is the // root isTagged = gWork.getNeedsTagging(); try { keyTableDesc = gWork.getKeyDesc(); inputKeyDeserializer = ReflectionUtils.newInstance(keyTableDesc.getDeserializerClass(), null); SerDeUtils.initializeSerDe(inputKeyDeserializer, null, keyTableDesc.getProperties(), null); keyObjectInspector = inputKeyDeserializer.getObjectInspector(); valueTableDesc = new TableDesc[gWork.getTagToValueDesc().size()]; for (int tag = 0; tag < gWork.getTagToValueDesc().size(); tag++) { // We should initialize the SerDe with the TypeInfo when available. valueTableDesc[tag] = gWork.getTagToValueDesc().get(tag); inputValueDeserializer[tag] = ReflectionUtils .newInstance(valueTableDesc[tag].getDeserializerClass(), null); SerDeUtils.initializeSerDe(inputValueDeserializer[tag], null, valueTableDesc[tag].getProperties(), null); valueObjectInspector[tag] = inputValueDeserializer[tag].getObjectInspector(); ArrayList<ObjectInspector> ois = new ArrayList<ObjectInspector>(); ois.add(keyObjectInspector); ois.add(valueObjectInspector[tag]); rowObjectInspector[tag] = ObjectInspectorFactory .getStandardStructObjectInspector(Utilities.reduceFieldNameList, ois); } } catch (Exception e) { throw new RuntimeException(e); } MapredContext.init(false, new JobConf(jc)); // initialize reduce operator tree try { LOG.info(reducer.dump(0)); reducer.initialize(jc, rowObjectInspector); } catch (Throwable e) { abort = true; if (e instanceof OutOfMemoryError) { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { throw new RuntimeException("Reduce operator initialization failed", e); } } }