List of usage examples for org.apache.hadoop.conf Configuration getInt
public int getInt(String name, int defaultValue)
name
property as an int
. From source file:co.nubetech.apache.hadoop.BigDecimalSplitter.java
License:Apache License
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { BigDecimal minVal = results.getBigDecimal(1); BigDecimal maxVal = results.getBigDecimal(2); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; BigDecimal numSplits = new BigDecimal(conf.getInt(MRJobConfig.NUM_MAPS, 1)); if (minVal == null && maxVal == null) { // Range is null to null. Return a null split accordingly. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add(/*from w w w .j av a2s .c o m*/ new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } if (minVal == null || maxVal == null) { // Don't know what is a reasonable min/max value for interpolation. // Fail. LOG.error("Cannot find a range for NUMERIC or DECIMAL fields with one end NULL."); return null; } // Get all the split points together. List<BigDecimal> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. BigDecimal start = splitPoints.get(0); for (int i = 1; i < splitPoints.size(); i++) { BigDecimal end = splitPoints.get(i); if (i == splitPoints.size() - 1) { // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(lowClausePrefix + start.toString(), colName + " <= " + end.toString())); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(lowClausePrefix + start.toString(), highClausePrefix + end.toString())); } start = end; } return splits; }
From source file:co.nubetech.apache.hadoop.DateSplitter.java
License:Apache License
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;/*from w w w . ja v a2s .c o m*/ long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a // single split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual // lower-bound nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it // and don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual // upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just // ignore it and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.apache.hadoop.FloatSplitter.java
License:Apache License
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { LOG.warn("Generating splits for a floating-point index column. Due to the"); LOG.warn("imprecise representation of floating-point values in Java, this"); LOG.warn("may result in an incomplete import."); LOG.warn("You are strongly encouraged to choose an integral split column."); List<InputSplit> splits = new ArrayList<InputSplit>(); if (results.getString(1) == null && results.getString(2) == null) { // Range is null to null. Return a null split accordingly. splits.add(/*from ww w.j av a2s . c o m*/ new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } double minVal = results.getDouble(1); double maxVal = results.getDouble(2); // Use this as a hint. May need an extra task if the size doesn't // divide cleanly. int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); double splitSize = (maxVal - minVal) / (double) numSplits; if (splitSize < MIN_INCREMENT) { splitSize = MIN_INCREMENT; } String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; double curLower = minVal; double curUpper = curLower + splitSize; while (curUpper < maxVal) { splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + Double.toString(curLower), highClausePrefix + Double.toString(curUpper))); curLower = curUpper; curUpper += splitSize; } // Catch any overage and create the closed interval for the last split. if (curLower <= maxVal || splits.size() == 1) { splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + Double.toString(curUpper), colName + " <= " + Double.toString(maxVal))); } if (results.getString(1) == null || results.getString(2) == null) { // At least one extrema is null; add a null split. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.apache.hadoop.IntegerSplitter.java
License:Apache License
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal = results.getLong(1); long maxVal = results.getLong(2); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); if (numSplits < 1) { numSplits = 1;// w w w.j av a 2 s . co m } if (results.getString(1) == null && results.getString(2) == null) { // Range is null to null. Return a null split accordingly. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Get all the split points together. List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); if (i == splitPoints.size() - 1) { // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + Long.toString(start), colName + " <= " + Long.toString(end))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + Long.toString(start), highClausePrefix + Long.toString(end))); } start = end; } if (results.getString(1) == null || results.getString(2) == null) { // At least one extrema is null; add a null split. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.apache.hadoop.mapred.DataDrivenDBInputFormat.java
License:Apache License
/** {@inheritDoc} */ public List<InputSplit> getSplits(Configuration job) throws IOException { int targetNumTasks = job.getInt(MRJobConfig.NUM_MAPS, 1); if (1 == targetNumTasks) { // There's no need to run a bounding vals query; just return a split // that separates nothing. This can be considerably more optimal for // a/* www .j a v a2 s.c o m*/ // large table with no index. List<InputSplit> singletonSplit = new ArrayList<InputSplit>(); singletonSplit.add( new org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit("1=1", "1=1")); return singletonSplit; } ResultSet results = null; Statement statement = null; Connection connection = getConnection(); try { statement = connection.createStatement(); results = statement.executeQuery(getBoundingValsQuery()); results.next(); // Based on the type of the results, use a different mechanism // for interpolating split points (i.e., numeric splits, text // splits, // dates, etc.) int sqlDataType = results.getMetaData().getColumnType(1); DBSplitter splitter = getSplitter(sqlDataType); if (null == splitter) { throw new IOException("Unknown SQL data type: " + sqlDataType); } //return convertSplit(splitter.split(job, results, getDBConf() // .getInputOrderBy())); return splitter.split(job, results, getDBConf().getInputOrderBy()); } catch (SQLException e) { throw new IOException(e.getMessage()); } finally { // More-or-less ignore SQL exceptions here, but log in case we need // it. try { if (null != results) { results.close(); } } catch (SQLException se) { LOG.debug("SQLException closing resultset: " + se.toString()); } try { if (null != statement) { statement.close(); } } catch (SQLException se) { LOG.debug("SQLException closing statement: " + se.toString()); } try { connection.commit(); closeConnection(); } catch (SQLException se) { LOG.debug("SQLException committing split transaction: " + se.toString()); } } }
From source file:co.nubetech.apache.hadoop.TextSplitter.java
License:Apache License
/** * This method needs to determine the splits between two user-provided * strings. In the case where the user's strings are 'A' and 'Z', this is * not hard; we could create two splits from ['A', 'M') and ['M', 'Z'], 26 * splits for strings beginning with each letter, etc. * // ww w. java2 s.c om * If a user has provided us with the strings "Ham" and "Haze", however, we * need to create splits that differ in the third letter. * * The algorithm used is as follows: Since there are 2**16 unicode * characters, we interpret characters as digits in base 65536. Given a * string 's' containing characters s_0, s_1 .. s_n, we interpret the string * as the number: 0.s_0 s_1 s_2.. s_n in base 65536. Having mapped the low * and high strings into floating-point values, we then use the * BigDecimalSplitter to establish the even split points, then map the * resulting floating point values back into strings. */ public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { LOG.warn("Generating splits for a textual index column."); LOG.warn("If your database sorts in a case-insensitive order, " + "this may result in a partial import or duplicate records."); LOG.warn("You are strongly encouraged to choose an integral split column."); String minString = results.getString(1); String maxString = results.getString(2); boolean minIsNull = false; // If the min value is null, switch it to an empty string instead for // purposes // of interpolation. Then add [null, null] as a special case split. if (null == minString) { minString = ""; minIsNull = true; } if (null == maxString) { // If the max string is null, then the min string has to be null // too. // Just return a special split for this case. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Use this as a hint. May need an extra task if the size doesn't // divide cleanly. int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); String lowClausePrefix = colName + " >= '"; String highClausePrefix = colName + " < '"; // If there is a common prefix between minString and maxString, // establish it // and pull it out of minString and maxString. int maxPrefixLen = Math.min(minString.length(), maxString.length()); int sharedLen; for (sharedLen = 0; sharedLen < maxPrefixLen; sharedLen++) { char c1 = minString.charAt(sharedLen); char c2 = maxString.charAt(sharedLen); if (c1 != c2) { break; } } // The common prefix has length 'sharedLen'. Extract it from both. String commonPrefix = minString.substring(0, sharedLen); minString = minString.substring(sharedLen); maxString = maxString.substring(sharedLen); List<String> splitStrings = split(numSplits, minString, maxString, commonPrefix); List<InputSplit> splits = new ArrayList<InputSplit>(); // Convert the list of split point strings into an actual set of // InputSplits. String start = splitStrings.get(0); for (int i = 1; i < splitStrings.size(); i++) { String end = splitStrings.get(i); if (i == splitStrings.size() - 1) { // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(lowClausePrefix + start + "'", colName + " <= '" + end + "'")); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(lowClausePrefix + start + "'", highClausePrefix + end + "'")); } } if (minIsNull) { // Add the special null split at the end. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.hiho.dedup.DelimitedLineRecordReader.java
License:Apache License
/** * /* www. ja v a2s . c o m*/ * @param delimiter * @param column * * */ @Override public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException { FileSplit split = (FileSplit) genericSplit; Configuration job = context.getConfiguration(); this.delimiter = job.get(DelimitedTextInputFormat.DELIMITER_CONF); this.column = job.getInt(DelimitedTextInputFormat.COLUMN_CONF, 0); this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength", Integer.MAX_VALUE); start = split.getStart(); end = start + split.getLength(); final Path file = split.getPath(); compressionCodecs = new CompressionCodecFactory(job); final CompressionCodec codec = compressionCodecs.getCodec(file); // open the file and seek to the start of the split FileSystem fs = file.getFileSystem(job); FSDataInputStream fileIn = fs.open(split.getPath()); boolean skipFirstLine = false; if (codec != null) { in = new LineReader(codec.createInputStream(fileIn), job); end = Long.MAX_VALUE; } else { if (start != 0) { skipFirstLine = true; --start; fileIn.seek(start); } in = new LineReader(fileIn, job); } if (skipFirstLine) { // skip first line and re-establish "start". start += in.readLine(new Text(), 0, (int) Math.min((long) Integer.MAX_VALUE, end - start)); } this.pos = start; }
From source file:co.nubetech.hiho.job.DBQueryInputJob.java
License:Apache License
public void runJobs(Configuration conf, int jobCounter) throws IOException { try {/*from ww w. j av a2 s . c om*/ checkMandatoryConfs(conf); } catch (HIHOException e1) { e1.printStackTrace(); throw new IOException(e1); } Job job = new Job(conf); for (Entry<String, String> entry : conf) { logger.warn("key, value " + entry.getKey() + "=" + entry.getValue()); } // logger.debug("Number of maps " + // conf.getInt("mapred.map.tasks", 1)); // conf.setInt(JobContext.NUM_MAPS, // conf.getInt("mapreduce.job.maps", 1)); // job.getConfiguration().setInt("mapred.map.tasks", 4); job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, conf.getInt(HIHOConf.NUMBER_MAPPERS, 1)); logger.warn("Number of maps " + conf.getInt(MRJobConfig.NUM_MAPS, 1)); job.setJobName("Import job"); job.setJarByClass(DBQueryInputJob.class); String strategy = conf.get(HIHOConf.INPUT_OUTPUT_STRATEGY); OutputStrategyEnum os = OutputStrategyEnum.value(strategy); if (os == null) { throw new IllegalArgumentException("Wrong value of output strategy. Please correct"); } if (os != OutputStrategyEnum.AVRO) { switch (os) { case DUMP: { // job.setMapperClass(DBImportMapper.class); break; } /* * case AVRO: { job.setMapperClass(DBInputAvroMapper.class); // * need avro in cp // job.setJarByClass(Schema.class); // need * jackson which is needed by avro - ugly! // * job.setJarByClass(ObjectMapper.class); * job.setMapOutputKeyClass(NullWritable.class); * job.setMapOutputValueClass(AvroValue.class); * job.setOutputKeyClass(NullWritable.class); * job.setOutputValueClass(AvroValue.class); * job.setOutputFormatClass(AvroOutputFormat.class); * * AvroOutputFormat.setOutputPath(job, new * Path(getConf().get(HIHOConf.INPUT_OUTPUT_PATH))); break; } */ case DELIMITED: { job.setMapperClass(DBInputDelimMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setOutputFormatClass(NoKeyOnlyValueOutputFormat.class); NoKeyOnlyValueOutputFormat.setOutputPath(job, new Path(getConf().get(HIHOConf.INPUT_OUTPUT_PATH))); } case JSON: { // job.setMapperClass(DBImportJsonMapper.class); // job.setJarByClass(ObjectMapper.class); break; } default: { job.setMapperClass(DBInputDelimMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setOutputFormatClass(NoKeyOnlyValueOutputFormat.class); NoKeyOnlyValueOutputFormat.setOutputPath(job, new Path(getConf().get(HIHOConf.INPUT_OUTPUT_PATH))); break; } } String inputQuery = conf.get(DBConfiguration.INPUT_QUERY); String inputBoundingQuery = conf.get(DBConfiguration.INPUT_BOUNDING_QUERY); logger.debug("About to set the params"); DBQueryInputFormat.setInput(job, inputQuery, inputBoundingQuery, params); logger.debug("Set the params"); job.setNumReduceTasks(0); try { // job.setJarByClass(Class.forName(conf.get( // org.apache.hadoop.mapred.lib.db.DBConfiguration.DRIVER_CLASS_PROPERTY))); logger.debug("OUTPUT format class is " + job.getOutputFormatClass()); /* * org.apache.hadoop.mapreduce.OutputFormat<?, ?> output = * ReflectionUtils.newInstance(job.getOutputFormatClass(), * job.getConfiguration()); output.checkOutputSpecs(job); */ logger.debug("Class is " + ReflectionUtils .newInstance(job.getOutputFormatClass(), job.getConfiguration()).getClass().getName()); job.waitForCompletion(false); if (conf.get(HIHOConf.INPUT_OUTPUT_LOADTO) != null) { generateHiveScript(conf, job, jobCounter); generatePigScript(conf, job); } } /* * catch (HIHOException h) { h.printStackTrace(); } */ catch (Exception e) { e.printStackTrace(); } catch (HIHOException e) { e.printStackTrace(); } } // avro to be handled differently, thanks to all the incompatibilities // in the apis. else { String inputQuery = conf.get(DBConfiguration.INPUT_QUERY); String inputBoundingQuery = conf.get(DBConfiguration.INPUT_BOUNDING_QUERY); logger.debug("About to set the params"); // co.nubetech.apache.hadoop.mapred.DBQueryInputFormat.setInput(job, // inputQuery, inputBoundingQuery, params); logger.debug("Set the params"); JobConf jobConf = new JobConf(conf); try { GenericDBWritable queryWritable = getDBWritable(jobConf); Schema pair = DBMapper.getPairSchema(queryWritable.getColumns()); AvroJob.setMapOutputSchema(jobConf, pair); GenericRecordAvroOutputFormat.setOutputPath(jobConf, new Path(getConf().get(HIHOConf.INPUT_OUTPUT_PATH))); co.nubetech.apache.hadoop.mapred.DBQueryInputFormat.setInput(jobConf, inputQuery, inputBoundingQuery, params); jobConf.setInputFormat(co.nubetech.apache.hadoop.mapred.DBQueryInputFormat.class); jobConf.setMapperClass(DBInputAvroMapper.class); jobConf.setMapOutputKeyClass(NullWritable.class); jobConf.setMapOutputValueClass(AvroValue.class); jobConf.setOutputKeyClass(NullWritable.class); jobConf.setOutputValueClass(Text.class); jobConf.setOutputFormat(GenericRecordAvroOutputFormat.class); jobConf.setJarByClass(DBQueryInputJob.class); jobConf.setStrings("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization,org.apache.hadoop.io.serializer.WritableSerialization,org.apache.avro.mapred.AvroSerialization"); jobConf.setNumReduceTasks(0); /* * jobConf.setOutputFormat(org.apache.hadoop.mapred. * SequenceFileOutputFormat.class); * org.apache.hadoop.mapred.SequenceFileOutputFormat * .setOutputPath(jobConf, new * Path(getConf().get(HIHOConf.INPUT_OUTPUT_PATH))); */ JobClient.runJob(jobConf); } catch (Throwable e) { e.printStackTrace(); } } }
From source file:co.nubetech.hiho.job.ExportToDB.java
License:Apache License
public int run(String[] args) throws Exception { Configuration conf = getConf(); populateConfiguration(args, conf);//from w ww .j a va 2 s . c om try { checkMandatoryConfs(conf); } catch (HIHOException e1) { e1.printStackTrace(); throw new Exception(e1); } Job job = new Job(conf); job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, conf.getInt(HIHOConf.NUMBER_MAPPERS, 1)); job.setJobName("HihoDBExport"); job.setMapperClass(GenericDBLoadDataMapper.class); job.setJarByClass(ExportToDB.class); job.setNumReduceTasks(0); job.setInputFormatClass(TextInputFormat.class); TextInputFormat.addInputPath(job, new Path(inputPath)); GenericDBOutputFormat.setOutput(job, tableName, columnNames); int ret = 0; try { ret = job.waitForCompletion(true) ? 0 : 1; } catch (Exception e) { e.printStackTrace(); } return ret; }
From source file:co.nubetech.hiho.job.ExportToOracleDb.java
License:Apache License
@Override public int run(String[] args) throws IOException { Configuration conf = getConf(); for (Entry<String, String> entry : conf) { logger.debug("key, value " + entry.getKey() + "=" + entry.getValue()); }/*from ww w. jav a 2s . c o m*/ for (int i = 0; i < args.length; i++) { logger.debug("Remaining arguments are" + " " + args[i]); } populateConfiguration(args, conf); try { checkMandatoryConfs(conf); } catch (HIHOException e1) { e1.printStackTrace(); throw new IOException(e1); } Job job = new Job(conf); job.setJobName("OracleLoading"); job.setMapperClass(OracleLoadMapper.class); job.setJarByClass(ExportToOracleDb.class); job.getConfiguration().setInt(MRJobConfig.NUM_MAPS, conf.getInt(HIHOConf.NUMBER_MAPPERS, 1)); try { // we first create the external table definition String query = conf.get(HIHOConf.EXTERNAL_TABLE_DML); // create table if user has specified if (query != null) { this.runQuery(query, conf); } } catch (HIHOException e1) { e1.printStackTrace(); } // verify required properties are loaded job.setNumReduceTasks(0); job.setInputFormatClass(FileStreamInputFormat.class); FileStreamInputFormat.addInputPath(job, new Path(inputPath)); job.setMapOutputKeyClass(NullWritable.class); job.setMapOutputValueClass(NullWritable.class); // job.setJarByClass(com.mysql.jdbc.Driver.class); job.setOutputFormatClass(NullOutputFormat.class); int ret = 0; try { ret = job.waitForCompletion(true) ? 0 : 1; } catch (Exception e) { e.printStackTrace(); } // run alter table query and add locations try { this.runQuery(getAlterTableDML(new Path(inputPath), conf), conf); } catch (HIHOException e1) { e1.printStackTrace(); } return ret; }