List of usage examples for java.sql ResultSet getTimestamp
java.sql.Timestamp getTimestamp(String columnLabel) throws SQLException;
ResultSet
object as a java.sql.Timestamp
object in the Java programming language. From source file:co.nubetech.apache.hadoop.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;//from w ww . j a v a 2s . com long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a // single split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual // lower-bound nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it // and don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual // upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just // ignore it and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:com.sfs.whichdoctor.dao.AddressVerificationDAOImpl.java
/** * Load the address verification bean./*from w w w . j a va2s. co m*/ * * @param rs the result set * @return the address verification bean * @throws SQLException the sQL exception */ private AddressVerificationBean loadAddressVerification(final ResultSet rs) throws SQLException { AddressVerificationBean addressVerification = new AddressVerificationBean(); addressVerification.setAddressVerificationId(rs.getInt("AddressVerificationId")); addressVerification.setAddressGUID(rs.getInt("GUID")); addressVerification.setReferenceGUID(rs.getInt("ReferenceGUID")); addressVerification.setPersonIdentifier(rs.getInt("PersonIdentifier")); addressVerification.setPersonName(rs.getString("PersonName")); addressVerification.setOrganisationName(rs.getString("OrganisationName")); addressVerification.setProcessStatus(rs.getString("ProcessStatus")); addressVerification.setProcessingException(rs.getString("ProcessingException")); addressVerification.setReturnCode(rs.getString("ReturnCode")); addressVerification.setReturnCodeExtension(rs.getString("ReturnCodeExtension")); try { addressVerification.setCreatedDate(rs.getTimestamp("Created")); } catch (SQLException sqe) { dataLogger.debug("Error parsing CreatedDate: " + sqe.getMessage()); } AddressBean inAddress = new AddressBean(); inAddress.setGUID(rs.getInt("GUID")); inAddress.setActive(true); inAddress.setAddressField(rs.getString("IN_Address1")); inAddress.setAddressField(rs.getString("IN_Address2")); inAddress.setAddressField(rs.getString("IN_Address3")); inAddress.setAddressField(rs.getString("IN_Address4")); inAddress.setAddressField(rs.getString("IN_Address5")); inAddress.setAddressField(rs.getString("IN_Address6")); inAddress.setAddressField(rs.getString("IN_Suburb")); inAddress.setAddressField(rs.getString("IN_City")); String inState = rs.getString("IN_State"); inAddress.setState(addressDAO.getStateFromAbbreviation(inState)); inAddress.setStateAbbreviation(inState); String inCountry = rs.getString("IN_Country"); inAddress.setCountry(addressDAO.getCountryFromAbbreviation(inCountry)); inAddress.setCountryAbbreviation(inCountry); inAddress.setPostCode(rs.getString("IN_Postcode")); addressVerification.setInputAddress(inAddress); AddressBean outAddress = new AddressBean(); outAddress.setGUID(rs.getInt("GUID")); outAddress.setActive(true); outAddress.setAddressField(capitalise(rs.getString("OUT_Address1"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Address2"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Address3"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Address4"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Address5"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Address6"))); outAddress.setAddressField(capitalise(rs.getString("OUT_Suburb"))); outAddress.setAddressField(capitalise(rs.getString("OUT_City"))); String outState = rs.getString("OUT_State"); outAddress.setState(addressDAO.getStateFromAbbreviation(outState)); outAddress.setStateAbbreviation(outState); String outCountry = rs.getString("OUT_Country"); outAddress.setCountry(addressDAO.getCountryFromAbbreviation(outCountry)); outAddress.setCountryAbbreviation(outCountry); outAddress.setPostCode(rs.getString("OUT_Postcode")); addressVerification.setOutputAddress(outAddress); return addressVerification; }
From source file:com.sfs.whichdoctor.dao.SupervisorDAOImpl.java
/** * Load supervisor.//from ww w . j av a2 s . c o m * * @param rs the rs * @param loadDetails the load details * * @return the supervisor bean * * @throws SQLException the SQL exception */ private SupervisorBean loadSupervisor(final ResultSet rs, final BuilderBean loadDetails) throws SQLException { SupervisorBean supervisor = new SupervisorBean(); supervisor.setId(rs.getInt("SupervisorId")); supervisor.setGUID(rs.getInt("GUID")); supervisor.setReferenceGUID(rs.getInt("ReferenceGUID")); supervisor.setOrderId(rs.getInt("OrderId")); supervisor.setRelationshipClass(rs.getString("RelationshipClass")); supervisor.setRelationshipType(rs.getString("RelationshipType")); supervisor.setRelationshipAbbreviation(rs.getString("RelationshipAbbreviation")); supervisor.setSupervisorClass(rs.getString("SupervisorClass")); supervisor.setPersonGUID(rs.getInt("PersonGUID")); if (loadDetails.getBoolean("SUPERVISOR_PERSONOBJ")) { try { PersonBean person = this.personDAO.loadGUID(supervisor.getPersonGUID(), loadDetails); supervisor.setPerson(person); } catch (Exception e) { dataLogger.error("Error loading person for supervisor: " + e.getMessage()); } } supervisor.setActive(rs.getBoolean("Active")); try { supervisor.setCreatedDate(rs.getTimestamp("CreatedDate")); } catch (SQLException sqe) { dataLogger.debug("Error setting CreatedDate: " + sqe.getMessage()); } supervisor.setCreatedBy(rs.getString("CreatedBy")); try { supervisor.setModifiedDate(rs.getTimestamp("ModifiedDate")); } catch (SQLException sqe) { dataLogger.debug("Error setting ModifiedDate: " + sqe.getMessage()); } supervisor.setModifiedBy(rs.getString("ModifiedBy")); return supervisor; }
From source file:com.cloudera.sqoop.mapreduce.db.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;//w w w .j a v a 2s.co m long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = ConfigurationHelper.getConfNumMaps(conf); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a single // split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual lower-bound // nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it and // don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual // upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just ignore it // and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:se.technipelago.weather.chart.Generator.java
private void setCurrentData(Map<String, Object> map) { PreparedStatement stmt = null; ResultSet result = null; init();/*from w w w. j a v a 2 s. c om*/ try { stmt = conn.prepareStatement("SELECT * FROM current"); result = stmt.executeQuery(); if (result.next()) { // id (primary key) is column index 1 map.put("bar_trend", result.getInt(2)); map.put("console_battery", result.getFloat(3)); map.put("forecast_icons", result.getString(4)); map.put("forecast_msg", result.getString(5)); map.put("sunrise", result.getTimestamp(6)); map.put("sunset", result.getTimestamp(7)); //map.put("timestamp", result.getTimestamp(8)); map.put("transmit_battery", result.getInt(9)); } else { map.put("bar_trend", 0); map.put("console_battery", 4.5); map.put("forecast_icons", "none"); map.put("forecast_msg", "NO CURRENT VALUES"); map.put("sunrise", new Date()); map.put("sunset", new Date()); } } catch (SQLException ex) { log.log(Level.SEVERE, null, ex); } finally { if (result != null) { try { result.close(); } catch (SQLException ex) { log.log(Level.WARNING, "Failed to close ResultSet", ex); } } if (stmt != null) { try { stmt.close(); } catch (SQLException ex) { log.log(Level.WARNING, "Failed to close select statement", ex); } } } }
From source file:org.mayocat.shop.marketplace.store.jdbi.AbstractEntityAndTenantMapper.java
@Override public EntityAndTenant<Product> map(int index, ResultSet result, StatementContext ctx) throws SQLException { Product product = extractEntity(index, result, ctx); String slug = result.getString("tenant_entity_slug"); String defaultHost = result.getString("tenant_entity_default_host"); ObjectMapper mapper = new ObjectMapper(); mapper.registerModule(new GuavaModule()); Integer configurationVersion = result.getInt("tenant_entity_configuration_version"); TenantConfiguration configuration;/*from ww w. j a v a 2 s . c o m*/ if (Strings.isNullOrEmpty(result.getString("tenant_entity_configuration"))) { configuration = new TenantConfiguration(configurationVersion, Collections.<String, Serializable>emptyMap()); } else { try { Map<String, Serializable> data = mapper.readValue(result.getString("tenant_entity_configuration"), new TypeReference<Map<String, Object>>() { }); configuration = new TenantConfiguration(configurationVersion, data); } catch (IOException e) { final Logger logger = LoggerFactory.getLogger(TenantMapper.class); logger.error("Failed to load configuration for tenant with slug [{}]", e); configuration = new TenantConfiguration(); } } Tenant tenant = new Tenant((UUID) result.getObject("tenant_entity_id"), slug, configuration); tenant.setFeaturedImageId((UUID) result.getObject("tenant_entity_featured_image_id")); tenant.setSlug(slug); tenant.setDefaultHost(defaultHost); tenant.setCreationDate(result.getTimestamp("tenant_entity_creation_date")); tenant.setName(result.getString("tenant_entity_name")); tenant.setDescription(result.getString("tenant_entity_description")); tenant.setContactEmail(result.getString("tenant_entity_contact_email")); return new EntityAndTenant<>(product, tenant); }
From source file:se.technipelago.weather.chart.Generator.java
private HiLow getHighLow(Date from, Date to, String column, boolean max) { PreparedStatement stmt = null; ResultSet result = null; HiLow hilo = null;//from w ww . ja v a 2s.c o m String func = max ? "DESC" : "ASC"; init(); try { stmt = conn.prepareStatement("SELECT ts, " + column + " FROM archive WHERE ts BETWEEN ? AND ? ORDER BY " + column + " " + func + ", ts ASC"); stmt.setTimestamp(1, new java.sql.Timestamp(from.getTime())); stmt.setTimestamp(2, new java.sql.Timestamp(to.getTime())); result = stmt.executeQuery(); if (result.next()) { hilo = new HiLow(result.getTimestamp(1), result.getFloat(2)); } } catch (SQLException ex) { Logger.getLogger(Generator.class.getName()).log(Level.SEVERE, null, ex); } finally { if (result != null) { try { result.close(); } catch (SQLException ex) { log.log(Level.WARNING, "Failed to close ResultSet", ex); } } if (stmt != null) { try { stmt.close(); } catch (SQLException ex) { log.log(Level.WARNING, "Failed to close select statement", ex); } } } return hilo; }
From source file:org.bytesoft.openjtcc.supports.logger.DbTransactionLoggerImpl.java
private Map<XidImpl, TransactionArchive> loadTransactionSet(Connection connection) { Map<XidImpl, TransactionArchive> metaMap = new HashMap<XidImpl, TransactionArchive>(); PreparedStatement stmt = null; ResultSet rs = null; try {// ww w . j a v a 2 s . co m StringBuilder ber = new StringBuilder(); ber.append("select global_tx_id, status, status_trace"); ber.append(", coordinator, created_time "); ber.append("from tcc_transaction "); ber.append("where application = ? and endpoint = ? and deleted = ?"); stmt = connection.prepareStatement(ber.toString()); stmt.setString(1, this.instanceKey.getApplication()); stmt.setString(2, this.instanceKey.getEndpoint()); stmt.setBoolean(3, false); rs = stmt.executeQuery(); while (rs.next()) { String globalTransactionId = rs.getString("global_tx_id"); int state = rs.getInt("status"); int trace = rs.getInt("status_trace"); boolean coordinator = rs.getBoolean("coordinator"); Timestamp createdTime = rs.getTimestamp("created_time"); TransactionContext context = new TransactionContext(); TerminalKey terminalKey = new TerminalKey(); terminalKey.setApplication(this.instanceKey.getApplication()); terminalKey.setEndpoint(this.instanceKey.getEndpoint()); context.setTerminalKey(terminalKey); context.setRecovery(true); context.setCompensable(true); context.setCoordinator(coordinator); byte[] globalBytes = ByteUtils.stringToByteArray(globalTransactionId); XidImpl globalXid = this.xidFactory.createGlobalXid(globalBytes); // context.setGlobalXid(globalXid); context.setCreationXid(globalXid); context.setCurrentXid(globalXid); context.setCreatedTime(createdTime.getTime()); TransactionStatus status = new TransactionStatus(state, trace); TransactionArchive meta = new TransactionArchive(); meta.setTransactionStatus(status); meta.setTransactionContext(context); metaMap.put(globalXid, meta); } } catch (Exception ex) { ex.printStackTrace(); } finally { closeResultSet(rs); closeStatement(stmt); } return metaMap; }
From source file:com.splicemachine.derby.impl.load.HdfsImportIT.java
@Test public void testImportISODateFormat() throws Exception { PreparedStatement ps = methodWatcher.prepareStatement(format("call SYSCS_UTIL.IMPORT_DATA(" + "'%s'," + // schema name "'%s'," + // table name "null," + // insert column list "'%s'," + // file path "','," + // column delimiter "'%s'," + // character delimiter "'yyyy-MM-dd''T''HH:mm:ss.SSS''Z'''," + // timestamp format "null," + // date format "null," + // time format "%d," + // max bad records "'%s'," + // bad record dir "null," + // has one line records "null)", // char set spliceSchemaWatcher.schemaName, TABLE_9, getResourceDirectory() + "iso_order_date.csv", "\"", 0, BADDIR.getCanonicalPath())); ps.execute();/*www . j a v a 2 s . c o m*/ ResultSet rs = methodWatcher .executeQuery(format("select * from %s.%s", spliceSchemaWatcher.schemaName, TABLE_9)); List<String> results = Lists.newArrayList(); while (rs.next()) { Timestamp order_date = rs.getTimestamp(1); assertNotNull("order_date incorrect", order_date); Assert.assertEquals(order_date.toString(), "2013-06-06 15:02:48.0"); results.add(String.format("order_date:%s", order_date)); } Assert.assertTrue("import failed!", results.size() == 1); }
From source file:com.splicemachine.derby.impl.load.HdfsImportIT.java
@Test public void testImportCustomTimeFormatMicro() throws Exception { methodWatcher.executeUpdate("delete from " + spliceTableWatcher9); PreparedStatement ps = methodWatcher.prepareStatement(format("call SYSCS_UTIL.IMPORT_DATA(" + "'%s'," + // schema name "'%s'," + // table name "null," + // insert column list "'%s'," + // file path "null," + // column delimiter "'%s'," + // character delimiter "'yyyy-MM-dd HH:mm:ss.SSSSSS'," + // timestamp format "null," + // date format "null," + // time format "%d," + // max bad records "'%s'," + // bad record dir "null," + // has one line records "null)", // char set spliceSchemaWatcher.schemaName, TABLE_19, getResourceDirectory() + "tz_micro_order_date.csv", "\"", 0, BADDIR.getCanonicalPath())); ps.execute();//from www.j av a 2 s . c o m ResultSet rs = methodWatcher .executeQuery(format("select * from %s.%s", spliceSchemaWatcher.schemaName, TABLE_19)); List<String> results = Lists.newArrayList(); while (rs.next()) { Timestamp order_date = rs.getTimestamp(1); assertNotNull("order_date incorrect", order_date); //have to deal with differing time zones here Assert.assertEquals("2013-04-21 09:21:24.980034", order_date.toString()); results.add(String.format("order_date:%s", order_date)); } Assert.assertTrue("import failed!", results.size() == 1); }