List of usage examples for java.sql Types TIMESTAMP
int TIMESTAMP
To view the source code for java.sql Types TIMESTAMP.
Click Source Link
The constant in the Java programming language, sometimes referred to as a type code, that identifies the generic SQL type TIMESTAMP
.
From source file:org.pentaho.reporting.engine.classic.core.modules.misc.tablemodel.TypeMapper.java
private static Class mapSQLType(final int t) { switch (t) {/* w w w . j a v a 2s .co m*/ case Types.ARRAY: return Object[].class; case Types.BIGINT: return Long.class; case Types.BINARY: return byteArrayClass; case Types.BIT: return Boolean.class; case Types.BLOB: return Blob.class; case Types.BOOLEAN: // Types.BOOLEAN was not part of JDK1.2.2 return Boolean.class; case Types.CHAR: return String.class; case Types.CLOB: return Clob.class; case Types.DATALINK: // Types.DATALINK was not part of JDK 1.2.2 return URL.class; case Types.DATE: return java.sql.Date.class; case Types.DECIMAL: return java.math.BigDecimal.class; case Types.DISTINCT: return Object.class; case Types.DOUBLE: return Double.class; case Types.FLOAT: return Double.class; case Types.INTEGER: return Integer.class; case Types.JAVA_OBJECT: return Object.class; case Types.LONGVARBINARY: return byteArrayClass; case Types.LONGVARCHAR: return String.class; case Types.NCLOB: return NClob.class; case Types.NULL: return Object.class; case Types.NUMERIC: return java.math.BigDecimal.class; case Types.NCHAR: case Types.NVARCHAR: case Types.LONGNVARCHAR: return String.class; case Types.OTHER: return Object.class; case Types.REAL: return Float.class; case Types.REF: return Ref.class; case Types.ROWID: return RowId.class; case Types.SMALLINT: return Short.class; case Types.STRUCT: return Struct.class; case Types.SQLXML: return SQLXML.class; case Types.TIME: return Time.class; case Types.TIMESTAMP: return Timestamp.class; case Types.TINYINT: return Byte.class; case Types.VARBINARY: return byteArrayClass; case Types.VARCHAR: return String.class; default: return Object.class; } }
From source file:net.sourceforge.vulcan.spring.jdbc.BuildInserter.java
public BuildInserter(DataSource dataSource) { setDataSource(dataSource);//from ww w.jav a 2 s . c o m setSql("insert into builds " + "(project_id, uuid, status, message_key," + "message_arg_0, message_arg_1, message_arg_2, message_arg_3, " + "build_reason_key, " + "build_reason_arg_0, build_reason_arg_1, build_reason_arg_2, build_reason_arg_3, " + "start_date, completion_date, build_number, update_type," + "work_dir, revision, revision_label, last_good_build_number," + "tag_name, repository_url, status_changed, scheduled_build," + "requested_by, revision_unavailable, broken_by_user_id, claimed_date, work_dir_vcs_clean) " + "values ((select id from project_names where name=?)," + " ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?," + " (select id from users where username=?), ?, ?)"); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.TIMESTAMP)); declareParameter(new SqlParameter(Types.TIMESTAMP)); declareParameter(new SqlParameter(Types.NUMERIC)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.BIGINT)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.NUMERIC)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.BOOLEAN)); declareParameter(new SqlParameter(Types.BOOLEAN)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.BOOLEAN)); declareParameter(new SqlParameter(Types.VARCHAR)); declareParameter(new SqlParameter(Types.TIMESTAMP)); declareParameter(new SqlParameter(Types.BOOLEAN)); compile(); }
From source file:com.trackplus.ddl.GenericStringValueConverter.java
protected String extractColumnValue(ResultSet resultSet, int columnIdx, int jdbcType) throws SQLException, DDLException { String value = resultSet.getString(columnIdx); if (value != null) { switch (jdbcType) { case Types.NUMERIC: case Types.DECIMAL: break; case Types.BIT: case Types.BOOLEAN: case Types.TINYINT: case Types.SMALLINT: case Types.INTEGER: case Types.BIGINT: case Types.REAL: case Types.FLOAT: case Types.DOUBLE: { break; }//ww w .jav a2 s. c o m case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: case Types.BINARY: case Types.VARBINARY: case Types.TIME: case Types.CLOB: case Types.ARRAY: case Types.REF: { value = "'" + value.replaceAll("'", "''") + "'"; break; } case Types.DATE: case Types.TIMESTAMP: { Date d = resultSet.getDate(columnIdx); Calendar cal = Calendar.getInstance(); cal.setTime(d); int year = cal.get(Calendar.YEAR); if (year < 1900) { throw new DDLException("Invalid date:" + d); } else { value = "'" + value + "'"; } break; } case Types.BLOB: case Types.LONGVARBINARY: { Blob blobValue = resultSet.getBlob(columnIdx); String str = new String(Base64.encodeBase64(blobValue.getBytes(1l, (int) blobValue.length()))); value = "'" + str + "'"; break; } default: break; } } return value; }
From source file:net.sourceforge.vulcan.spring.jdbc.HistoryQueryBuilder.java
static void buildQuery(String selectClause, BuildOutcomeQueryDto dto, BuilderQuery query) { final Set<String> projectNames = dto.getProjectNames(); if (projectNames == null || projectNames.isEmpty()) { throw new IllegalArgumentException("Must query for at least one project name"); }/* ww w . j a v a 2 s . co m*/ final List<? super Object> params = new ArrayList<Object>(); final StringBuilder sb = new StringBuilder(); sb.append("where project_names.name"); query.declareParameter(new SqlParameter(Types.VARCHAR)); if (projectNames.size() == 1) { sb.append("=?"); params.addAll(projectNames); } else { sb.append(" in (?"); for (int i = 1; i < projectNames.size(); i++) { sb.append(",?"); query.declareParameter(new SqlParameter(Types.VARCHAR)); } sb.append(")"); final List<String> sorted = new ArrayList<String>(projectNames); Collections.sort(sorted); params.addAll(sorted); } if (dto.getMinDate() != null) { sb.append(" and completion_date>=?"); params.add(dto.getMinDate()); query.declareParameter(new SqlParameter(Types.TIMESTAMP)); } if (dto.getMaxDate() != null) { sb.append(" and completion_date<?"); params.add(dto.getMaxDate()); query.declareParameter(new SqlParameter(Types.TIMESTAMP)); } if (dto.getMinBuildNumber() != null) { sb.append(" and build_number>=?"); params.add(dto.getMinBuildNumber()); query.declareParameter(new SqlParameter(Types.INTEGER)); } if (dto.getMaxBuildNumber() != null) { sb.append(" and build_number<=?"); params.add(dto.getMaxBuildNumber()); query.declareParameter(new SqlParameter(Types.INTEGER)); } final Set<Status> statuses = dto.getStatuses(); if (statuses != null && !statuses.isEmpty()) { query.declareParameter(new SqlParameter(Types.VARCHAR)); sb.append(" and status in (?"); for (int i = 1; i < statuses.size(); i++) { query.declareParameter(new SqlParameter(Types.VARCHAR)); sb.append(",?"); } sb.append(")"); params.addAll(statuses); } if (dto.getUpdateType() != null) { query.declareParameter(new SqlParameter(Types.VARCHAR)); sb.append(" and update_type=?"); params.add(dto.getUpdateType().name()); } if (isNotBlank(dto.getRequestedBy())) { query.declareParameter(new SqlParameter(Types.VARCHAR)); sb.append(" and requested_by=?"); params.add(dto.getRequestedBy()); } query.setParameterValues(params.toArray()); query.setSql(selectClause + sb.toString()); }
From source file:com.cloudera.sqoop.mapreduce.db.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;/*from w ww . ja v a 2 s. c o m*/ long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = ConfigurationHelper.getConfNumMaps(conf); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a single // split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual lower-bound // nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it and // don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual // upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just ignore it // and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.apache.hadoop.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;/* ww w . j a v a 2 s.c om*/ long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a // single split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual // lower-bound nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it // and don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual // upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just // ignore it and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:co.nubetech.hiho.mapreduce.lib.db.apache.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;//from w w w . j a v a 2 s . c o m long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a single split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual lower-bound nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it and don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just ignore it and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:org.apache.openjpa.jdbc.schema.Schemas.java
/** * Return the SQL type name for the given {@link Types} constant. *//*w w w. ja v a 2s. c om*/ public static String getJDBCName(int type) { switch (type) { case Types.ARRAY: return "array"; case Types.BIGINT: return "bigint"; case Types.BINARY: return "binary"; case Types.BIT: return "bit"; case Types.BLOB: return "blob"; case Types.CHAR: return "char"; case Types.CLOB: return "clob"; case Types.DATE: return "date"; case Types.DECIMAL: return "decimal"; case Types.DISTINCT: return "distinct"; case Types.DOUBLE: return "double"; case Types.FLOAT: return "float"; case Types.INTEGER: return "integer"; case Types.JAVA_OBJECT: return "java_object"; case Types.LONGVARBINARY: return "longvarbinary"; case Types.LONGVARCHAR: return "longvarchar"; case Types.NULL: return "null"; case Types.NUMERIC: return "numeric"; case Types.OTHER: return "other"; case Types.REAL: return "real"; case Types.REF: return "ref"; case Types.SMALLINT: return "smallint"; case Types.STRUCT: return "struct"; case Types.TIME: return "time"; case Types.TIMESTAMP: return "timestamp"; case Types.TINYINT: return "tinyint"; case Types.VARBINARY: return "varbinary"; case Types.VARCHAR: return "varchar"; default: return "unknown(" + type + ")"; } }
From source file:org.apache.hadoop.mapreduce.lib.db.DateSplitter.java
public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { long minVal;/*from w w w . j ava 2s . com*/ long maxVal; int sqlDataType = results.getMetaData().getColumnType(1); minVal = resultSetColToLong(results, 1, sqlDataType); maxVal = resultSetColToLong(results, 2, sqlDataType); String lowClausePrefix = colName + " >= "; String highClausePrefix = colName + " < "; int numSplits = conf.getInt("mapred.map.tasks", 1); if (numSplits < 1) { numSplits = 1; } if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) { // The range of acceptable dates is NULL to NULL. Just create a single split. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); return splits; } // Gather the split point integers List<Long> splitPoints = split(numSplits, minVal, maxVal); List<InputSplit> splits = new ArrayList<InputSplit>(); // Turn the split points into a set of intervals. long start = splitPoints.get(0); Date startDate = longToDate(start, sqlDataType); if (sqlDataType == Types.TIMESTAMP) { // The lower bound's nanos value needs to match the actual lower-bound nanos. try { ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos()); } catch (NullPointerException npe) { // If the lower bound was NULL, we'll get an NPE; just ignore it and don't set nanos. } } for (int i = 1; i < splitPoints.size(); i++) { long end = splitPoints.get(i); Date endDate = longToDate(end, sqlDataType); if (i == splitPoints.size() - 1) { if (sqlDataType == Types.TIMESTAMP) { // The upper bound's nanos value needs to match the actual upper-bound nanos. try { ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos()); } catch (NullPointerException npe) { // If the upper bound was NULL, we'll get an NPE; just ignore it and don't set nanos. } } // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate))); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate))); } start = end; startDate = endDate; } if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) { // Add an extra split to handle the null case that we saw. splits.add( new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL")); } return splits; }
From source file:com.nabla.wapp.server.json.SqlColumn.java
public void write(final ResultSet rs, int column, final JSONObject record) throws SQLException { switch (type) { case Types.BIGINT: case Types.INTEGER: case Types.SMALLINT: case Types.TINYINT: record.put(label, rs.getInt(column)); break;/* w ww.java 2 s .c om*/ case Types.BOOLEAN: case Types.BIT: record.put(label, rs.getBoolean(column)); break; case Types.DATE: final Date dt = rs.getDate(column); if (rs.wasNull()) record.put(label, null); else record.put(label, new JSonDate(dt)); return; case Types.TIMESTAMP: final Timestamp tm = rs.getTimestamp(column); if (rs.wasNull()) record.put(label, null); else record.put(label, timeStampFormat.format(tm)); return; case Types.DOUBLE: record.put(label, rs.getDouble(column)); break; case Types.FLOAT: record.put(label, rs.getFloat(column)); break; case Types.NULL: record.put(label, null); return; default: record.put(label, rs.getString(column)); break; } if (rs.wasNull()) record.put(label, null); }