List of usage examples for java.sql ResultSetMetaData getColumnType
int getColumnType(int column) throws SQLException;
From source file:org.apache.hive.jdbc.TestJdbcDriver2.java
/** * Validate the Metadata for the result set of a metadata getColumns call. *//*from ww w.j a va 2 s . co m*/ @Test public void testMetaDataGetColumnsMetaData() throws SQLException { ResultSet rs = con.getMetaData().getColumns(null, null, "testhivejdbcdriver\\_table", null); ResultSetMetaData rsmd = rs.getMetaData(); assertEquals("TABLE_CAT", rsmd.getColumnName(1)); assertEquals(Types.VARCHAR, rsmd.getColumnType(1)); assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(1)); assertEquals("ORDINAL_POSITION", rsmd.getColumnName(17)); assertEquals(Types.INTEGER, rsmd.getColumnType(17)); assertEquals(11, rsmd.getColumnDisplaySize(17)); }
From source file:org.fastcatsearch.datasource.reader.DBReader.java
@Override public SchemaSetting getAutoGeneratedSchemaSetting() { Map<String, String> properties = singleSourceConfig.getProperties(); String jdbcSourceId = properties.get("jdbcSourceId"); String dataSQL = properties.get("dataSQL"); IRService service = ServiceManager.getInstance().getService(IRService.class); Connection con = null;//from w ww. ja v a2s .co m PreparedStatement pst = null; ResultSet res = null; ResultSetMetaData meta = null; try { JDBCSourceInfo jdbcInfo = service.getJDBCSourceInfo(jdbcSourceId); if (jdbcInfo != null) { con = getConnection(jdbcInfo); } logger.trace("get jdbc connection : {}", con); if (con != null) { logger.trace("executing sql :{}", dataSQL); pst = con.prepareStatement(dataSQL); pst.setFetchSize(1); pst.setMaxRows(1); res = pst.executeQuery(); res.next(); meta = res.getMetaData(); SchemaSetting setting = new SchemaSetting(); PrimaryKeySetting primaryKeySetting = new PrimaryKeySetting(); List<FieldSetting> fieldSettingList = new ArrayList<FieldSetting>(); List<AnalyzerSetting> analyzerSetting = new ArrayList<AnalyzerSetting>(); List<GroupIndexSetting> groupIndexSetting = new ArrayList<GroupIndexSetting>(); List<IndexSetting> indexSetting = new ArrayList<IndexSetting>(); List<FieldIndexSetting> fieldIndexSetting = new ArrayList<FieldIndexSetting>(); logger.trace("columnCount:{}", meta.getColumnCount()); String tableName = null; for (int inx = 0; inx < meta.getColumnCount(); inx++) { if (tableName == null) { tableName = meta.getTableName(inx + 1); } FieldSetting field = new FieldSetting(); Type type = null; int size = 0; switch (meta.getColumnType(inx + 1)) { case Types.INTEGER: case Types.TINYINT: case Types.SMALLINT: case Types.NUMERIC: type = Type.INT; break; case Types.BIGINT: type = Type.LONG; break; case Types.FLOAT: type = Type.FLOAT; break; case Types.DOUBLE: type = Type.DOUBLE; break; case Types.DATE: case Types.TIME: case Types.TIMESTAMP: type = Type.DATETIME; break; case Types.CHAR: case Types.VARCHAR: case Types.LONGVARCHAR: type = Type.STRING; break; default: type = Type.STRING; break; } field.setId(meta.getColumnLabel(inx + 1)); field.setName(field.getId()); field.setType(type); field.setSize(size); logger.trace("field add {}", field); fieldSettingList.add(field); } setting.setFieldSettingList(fieldSettingList); setting.setPrimaryKeySetting(primaryKeySetting); setting.setFieldIndexSettingList(fieldIndexSetting); setting.setAnalyzerSettingList(analyzerSetting); setting.setGroupIndexSettingList(groupIndexSetting); setting.setIndexSettingList(indexSetting); return setting; } } catch (IRException e) { logger.error("", e); } catch (SQLException e) { logger.error("", e); } finally { if (res != null) try { res.close(); } catch (SQLException ignore) { } if (pst != null) try { pst.close(); } catch (SQLException ignore) { } if (con != null) try { con.close(); } catch (SQLException ignore) { } } return null; }
From source file:org.apache.hive.jdbc.TestJdbcDriver2.java
@Test public void testIntervalTypes() throws Exception { Statement stmt = con.createStatement(); // Since interval types not currently supported as table columns, need to create them // as expressions. ResultSet res = stmt/*from w w w . ja v a 2s . c o m*/ .executeQuery("select case when c17 is null then null else interval '1' year end as col1," + " c17 - c17 as col2 from " + dataTypeTableName + " order by col1"); ResultSetMetaData meta = res.getMetaData(); assertEquals("col1", meta.getColumnLabel(1)); assertEquals(java.sql.Types.OTHER, meta.getColumnType(1)); assertEquals("interval_year_month", meta.getColumnTypeName(1)); assertEquals(11, meta.getColumnDisplaySize(1)); assertEquals(11, meta.getPrecision(1)); assertEquals(0, meta.getScale(1)); assertEquals(HiveIntervalYearMonth.class.getName(), meta.getColumnClassName(1)); assertEquals("col2", meta.getColumnLabel(2)); assertEquals(java.sql.Types.OTHER, meta.getColumnType(2)); assertEquals("interval_day_time", meta.getColumnTypeName(2)); assertEquals(29, meta.getColumnDisplaySize(2)); assertEquals(29, meta.getPrecision(2)); assertEquals(0, meta.getScale(2)); assertEquals(HiveIntervalDayTime.class.getName(), meta.getColumnClassName(2)); // row 1 - results should be null assertTrue(res.next()); // skip the last (partitioning) column since it is always non-null for (int i = 1; i < meta.getColumnCount(); i++) { assertNull("Column " + i + " should be null", res.getObject(i)); } // row 2 - results should be null assertTrue(res.next()); for (int i = 1; i < meta.getColumnCount(); i++) { assertNull("Column " + i + " should be null", res.getObject(i)); } // row 3 assertTrue(res.next()); assertEquals("1-0", res.getString(1)); assertEquals(1, ((HiveIntervalYearMonth) res.getObject(1)).getYears()); assertEquals("0 00:00:00.000000000", res.getString(2)); assertEquals(0, ((HiveIntervalDayTime) res.getObject(2)).getDays()); }
From source file:org.getobjects.eoaccess.EOAdaptorChannel.java
/** * Executes the SQL string and returns a Map containing the results of the * SQL./* w w w . j a v a 2s. c o m*/ * <p> * If the SQL string is empty, an error is set and null is returned. * * @return null on error (check lastException), or the fetch results */ public List<Map<String, Object>> performSQL(final String _sql, final EOAttribute[] _optAttrs) { if (_sql == null || _sql.length() == 0) { log.error("performSQL caller gave us no SQL ..."); this.lastException = new Exception("got no SQL to perform!"); return null; } this.lastException = null; /* acquire DB resources */ final Statement stmt = this._createStatement(); if (stmt == null) return null; /* perform query */ ArrayList<Map<String, Object>> records = null; ResultSet rs = null; try { if (sqllog.isInfoEnabled()) sqllog.info(_sql); rs = stmt.executeQuery(_sql); SQLWarning warning = rs.getWarnings(); if (warning != null) { // TBD: find out when this happens log.warn("detected SQL warning: " + warning); } /* Collect meta data, calling meta inside fetches is rather expensive, * even though the PG JDBC adaptor also has some cache. */ final ResultSetMetaData meta = rs.getMetaData(); final int columnCount = meta.getColumnCount(); final String[] colNames = new String[columnCount]; final int[] colHashes = new int[columnCount]; final int[] colTypes = new int[columnCount]; for (int i = 1; i <= columnCount; i++) { if (_optAttrs != null) colNames[i - 1] = _optAttrs[i - 1].columnName(); else colNames[i - 1] = meta.getColumnName(i); colHashes[i - 1] = colNames[i - 1].hashCode(); colTypes[i - 1] = meta.getColumnType(i); } /* loop over results and convert them to records */ records = new ArrayList<Map<String, Object>>(128); while (rs.next()) { EORecordMap record = new EORecordMap(colNames, colHashes); boolean ok = this.fillRecordMapFromResultSet(record, rs, colNames, colTypes); if (ok) records.add(record); } } catch (SQLException e) { /* * SQLState: * 42601 - PostgreSQL for invalid SQL, like "SELECT *" or "IN ()" * 42804 - PostgreSQL for * IN types character varying and integer cannot be matched * 42P01 - PostgreSQL: relation 'notes' does not exist * 42703 - PostgreSQL: column "lastname" does not exist */ this.lastException = e; /* Note: if we already fetched records, we actually return them ... */ if (records != null && records.size() == 0) { records = null; if (log.isInfoEnabled()) { log.info("could not execute SQL statement (state=" + e.getSQLState() + "): " + _sql, e); } // System.err.println("STATE: " + e.getSQLState()); } else { log.warn("could not execute SQL statement (state=" + e.getSQLState() + "): " + _sql, e); } } finally { // TODO: we might also want to close our channel if the tear down was not // clean this._releaseResources(stmt, rs); } if (sqllog.isDebugEnabled()) sqllog.debug(" GOT RESULTS: " + records); /* compact array */ if (records != null) records.trimToSize(); return records; }
From source file:org.getobjects.eoaccess.EOAdaptorChannel.java
/** * A primary fetch method./* www . j a v a 2 s .c o m*/ * <p> * Creates a PreparedStatement from the statement and the bindings of the * EOSQLExpression. * <p> * @param _sqlexpr - the EOSQLExpression to execute * @return the fetch results as a List of Maps */ public List<Map<String, Object>> evaluateQueryExpression(final EOSQLExpression _sqlexpr, final EOAttribute[] _optAttrs) { this.lastException = null; // System.err.println("\nEXEC: " + _s.statement()); if (_sqlexpr == null) { log.error("evaluateQueryExpression() caller gave us no SQL ..."); return null; } final List<Map<String, Object>> binds = _sqlexpr.bindVariableDictionaries(); if (binds == null || binds.size() == 0) /* expression has no binds, perform a plain SQL query */ return this.performSQL(_sqlexpr.statement(), _optAttrs); /* otherwise, create a PreparedStatement */ final PreparedStatement stmt = this._prepareStatementWithBinds(_sqlexpr.statement(), binds); if (stmt == null) { log.error("could not create prepared statement for expr: " + _sqlexpr); return null; } /* perform query */ this.lastException = null; List<Map<String, Object>> records = null; ResultSet rs = null; try { if (sqllog.isInfoEnabled()) sqllog.info(_sqlexpr.statement()); rs = stmt.executeQuery(); SQLWarning warning = rs.getWarnings(); if (warning != null) { // TBD: find out when this happens log.warn("detected SQL warning: " + warning); } /* Collect meta data, calling meta inside fetches is rather expensive, * even though the PG JDBC adaptor also has some cache. */ final ResultSetMetaData meta = rs.getMetaData(); final int columnCount = meta.getColumnCount(); final String[] colNames = new String[columnCount]; final int[] colHashes = new int[columnCount]; final int[] colTypes = new int[columnCount]; for (int i = 1; i <= columnCount; i++) { if (_optAttrs != null) colNames[i - 1] = _optAttrs[i - 1].columnName(); else colNames[i - 1] = meta.getColumnName(i); colHashes[i - 1] = colNames[i - 1].hashCode(); colTypes[i - 1] = meta.getColumnType(i); } /* loop over results and convert them to records */ records = new ArrayList<Map<String, Object>>(128); while (rs.next()) { final EORecordMap record = new EORecordMap(colNames, colHashes); boolean ok = this.fillRecordMapFromResultSet(record, rs, colNames, colTypes); if (ok) records.add(record); } } catch (SQLException e) { /* * getSQLState() * 08S01 MySQL network-connect issues during the processing of a query * 42601 PG syntax error * 42703 PG column "number" does not exist * 22023 PG No value specified for parameter 3 (eg multiple %andQual) */ this.lastException = e; if (records != null && records.size() == 0) { records = null; if (log.isInfoEnabled()) { log.info("could not execute SQL expression " + e.getSQLState() + ":\n " + _sqlexpr.statement(), e); } // System.err.println("STATE: " + e.getSQLState()); } else { log.warn("could not execute SQL expression " + e.getSQLState() + ":\n " + _sqlexpr.statement(), e); } } finally { // TODO: we might also want to close our channel if the tear down was not // clean this._releaseResources(stmt, rs); } return records; }
From source file:org.ensembl.healthcheck.util.DBUtils.java
public static boolean compareResultSets(ResultSet rs1, ResultSet rs2, EnsTestCase testCase, String text, boolean reportErrors, boolean warnNull, String singleTableName, int[] columns, boolean comparingSchema) { // quick tests first // Check for object equality if (rs1.equals(rs2)) { return true; }/*from w w w . j a v a 2s.c o m*/ try { // get some information about the ResultSets String name1 = getShortDatabaseName(rs1.getStatement().getConnection()); String name2 = getShortDatabaseName(rs2.getStatement().getConnection()); // Check for same column count, names and types ResultSetMetaData rsmd1 = rs1.getMetaData(); ResultSetMetaData rsmd2 = rs2.getMetaData(); if (rsmd1.getColumnCount() != rsmd2.getColumnCount() && columns == null) { if (reportErrors) { ReportManager.problem(testCase, name1, "Column counts differ " + singleTableName + " " + name1 + ": " + rsmd1.getColumnCount() + " " + name2 + ": " + rsmd2.getColumnCount()); } return false; // Deliberate early return for performance // reasons } if (columns == null) { columns = new int[rsmd1.getColumnCount()]; for (int i = 0; i < columns.length; i++) { columns[i] = i + 1; } } for (int j = 0; j < columns.length; j++) { int i = columns[j]; // note columns indexed from l if (!((rsmd1.getColumnName(i)).equals(rsmd2.getColumnName(i)))) { if (reportErrors) { ReportManager.problem(testCase, name1, "Column names differ for " + singleTableName + " column " + i + " - " + name1 + ": " + rsmd1.getColumnName(i) + " " + name2 + ": " + rsmd2.getColumnName(i)); } // Deliberate early return for performance reasons return false; } if (rsmd1.getColumnType(i) != rsmd2.getColumnType(i)) { if (reportErrors) { ReportManager.problem(testCase, name1, "Column types differ for " + singleTableName + " column " + i + " - " + name1 + ": " + rsmd1.getColumnType(i) + " " + name2 + ": " + rsmd2.getColumnType(i)); } return false; // Deliberate early return for performance // reasons } } // for column // make sure both cursors are at the start of the ResultSet // (default is before the start) rs1.beforeFirst(); rs2.beforeFirst(); // if quick checks didn't cause return, try comparing row-wise int row = 1; while (rs1.next()) { if (rs2.next()) { String str = name1 + " and " + name2 + text + " " + singleTableName + " with columns "; for (int j = 0; j < columns.length; j++) { int i = columns[j]; str += rsmd1.getColumnName(i) + " " + Utils.truncate(rs1.getString(i), 250, true) + ", "; // note columns indexed from 1 if (!compareColumns(rs1, rs2, i, warnNull)) { str += " differ for values " + Utils.truncate(rs1.getString(i), 250, true) + ", " + Utils.truncate(rs2.getString(i), 250, true); if (reportErrors) { ReportManager.problem(testCase, name1, str); } return false; } } row++; } else { // rs1 has more rows than rs2 if (reportErrors) { ReportManager.problem(testCase, name1, singleTableName + " has more rows in " + name1 + " than in " + name2); } return false; } } // while rs1 // if both ResultSets are the same, then we should be at the end of // both, i.e. .next() should return false String extra = comparingSchema ? ". This means that there are missing columns in the table, rectify!" : ""; if (rs1.next()) { if (reportErrors) { ReportManager.problem(testCase, name1, name1 + " " + singleTableName + " has additional rows that are not in " + name2 + extra); } return false; } else if (rs2.next()) { if (reportErrors) { ReportManager.problem(testCase, name1, name2 + " " + singleTableName + " has additional rows that are not in " + name1 + extra); } return false; } } catch (SQLException se) { throw new SqlUncheckedException("Could not compare two result sets", se); } return true; }
From source file:io.druid.indexing.jdbc.JDBCIndexTask.java
@Override public TaskStatus run(final TaskToolbox toolbox) throws Exception { log.info("Starting up!"); startTime = DateTime.now();//from www . j av a2 s .c o m mapper = toolbox.getObjectMapper(); status = Status.STARTING; if (chatHandlerProvider.isPresent()) { log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName()); chatHandlerProvider.get().register(getId(), this, false); } else { log.warn("No chat handler detected"); } runThread = Thread.currentThread(); // Set up FireDepartmentMetrics final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null); fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics(); toolbox.getMonitorScheduler() .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() }))); BasicDataSource dataSource = new BasicDataSource(); dataSource.setUsername(ioConfig.getUser()); dataSource.setPassword(ioConfig.getPassword()); dataSource.setUrl(ioConfig.getConnectURI()); dataSource.setDriverClassLoader(getClass().getClassLoader()); final String table = ioConfig.getTableName(); if (!StringUtils.isEmpty(ioConfig.getDriverClass())) { dataSource.setDriverClassName(ioConfig.getDriverClass()); } final Handle handle = new DBI(dataSource).open(); try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox); final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) { toolbox.getDataSegmentServerAnnouncer().announce(); appenderator = appenderator0; // Start up, set up initial offsets. final Object restoredMetadata = driver.startJob(); if (restoredMetadata == null) { nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps()); } else { final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata; final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper() .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class); nextOffsets.putAll(restoredNextPartitions.getOffsetMaps()); // Sanity checks. if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) { throw new ISE("WTF?! Restored table[%s] but expected table[%s]", restoredNextPartitions.getTable(), ioConfig.getTableName()); } if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) { throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets, ioConfig.getJdbcOffsets().getOffsetMaps()); } } // Set up sequenceNames. final Map<Integer, String> sequenceNames = Maps.newHashMap(); for (Integer partitionNum : nextOffsets.keySet()) { sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum)); } // Set up committer. final Supplier<Committer> committerSupplier = new Supplier<Committer>() { @Override public Committer get() { final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets); return new Committer() { @Override public Object getMetadata() { return ImmutableMap.of(METADATA_NEXT_OFFSETS, new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot)); } @Override public void run() { // Do nothing. } }; } }; // Set<Integer> assignment = assignPartitionsAndSeekToNext(handle); // boolean stillReading = !assignment.isEmpty(); status = Status.READING; try { // while (stillReading) { // if (possiblyPause(assignment)) { // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign // partitions upon resuming. This is safe even if the end offsets have not been modified. // assignment = assignPartitionsAndSeekToNext(handle); // if (assignment.isEmpty()) { // log.info("All partitions have been fully read"); // publishOnStop = true; // stopRequested = true; // } // } // if (stopRequested) { // break; // } final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery() : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets()); org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query); final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() { List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList() : ioConfig.getColumns(); List<Boolean> columnIsNumeric = Lists.newArrayList(); @Override public InputRow map(final int index, final ResultSet r, final StatementContext ctx) throws SQLException { try { if (queryColumns.size() == 0) { ResultSetMetaData metadata = r.getMetaData(); for (int idx = 1; idx <= metadata.getColumnCount(); idx++) { queryColumns.add(metadata.getColumnName(idx)); } Preconditions.checkArgument(queryColumns.size() > 0, String.format("No column in table [%s]", table)); verifyParserSpec(parser.getParseSpec(), queryColumns); } if (columnIsNumeric.size() == 0) { ResultSetMetaData metadata = r.getMetaData(); Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(), String.format( "number of column names [%d] exceeds the actual number of returning column values [%d]", queryColumns.size(), metadata.getColumnCount())); columnIsNumeric.add(false); // dummy to make start index to 1 for (int idx = 1; idx <= metadata.getColumnCount(); idx++) { boolean isNumeric = false; int type = metadata.getColumnType(idx); switch (type) { case BIGINT: case DECIMAL: case DOUBLE: case FLOAT: case INTEGER: case NUMERIC: case SMALLINT: case TINYINT: isNumeric = true; break; } columnIsNumeric.add(isNumeric); } } final Map<String, Object> columnMap = Maps.newHashMap(); int columnIdx = 1; for (String column : queryColumns) { Object objToPut = null; if (table != null) { objToPut = r.getObject(column); } else { objToPut = r.getObject(columnIdx); } columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut); columnIdx++; } return parser.parse(columnMap); } catch (IllegalArgumentException e) { throw new SQLException(e); } } }).iterator(); org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets())); long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0; while (rowIterator.hasNext()) { InputRow row = rowIterator.next(); try { if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) { final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName, committerSupplier); if (addResult.isOk()) { // If the number of rows in the segment exceeds the threshold after adding a row, // move the segment out from the active segments of AppenderatorDriver to make a new segment. if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) { driver.moveSegmentOut(sequenceName, ImmutableList.of(addResult.getSegmentIdentifier())); } } else { // Failure to allocate segment puts determinism at risk, bail out to be safe. // May want configurable behavior here at some point. // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks. throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp()); } fireDepartmentMetrics.incrementProcessed(); } else { fireDepartmentMetrics.incrementThrownAway(); } } catch (ParseException e) { if (tuningConfig.isReportParseExceptions()) { throw e; } else { log.debug(e, "Dropping unparseable row from row[%d] .", row); fireDepartmentMetrics.incrementUnparseable(); } } } nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset); // if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) // && assignment.remove(record.partition())) { // log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition()); // stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty(); // } // } } finally { driver.persist(committerSupplier.get()); // persist pending data } synchronized (statusLock) { if (stopRequested && !publishOnStop) { throw new InterruptedException("Stopping without publishing"); } status = Status.PUBLISHING; } final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> { final JDBCOffsets finalOffsets = toolbox.getObjectMapper() .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class); // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check! // if (!endOffsets.equals(finalOffsets.getOffsetMaps())) { // throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata); // } final SegmentTransactionalInsertAction action; if (ioConfig.isUseTransaction()) { action = new SegmentTransactionalInsertAction(segments, new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()), new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values ); } else { action = new SegmentTransactionalInsertAction(segments, null, null); } log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction()); return toolbox.getTaskActionClient().submit(action).isSuccess(); }; // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting // for hand off. See JDBCSupervisorIOConfig.completionTimeout. final SegmentsAndMetadata published = driver .publish(publisher, committerSupplier.get(), sequenceNames.values()).get(); final SegmentsAndMetadata handedOff; if (tuningConfig.getHandoffConditionTimeout() == 0) { handedOff = driver.registerHandoff(published).get(); } else { handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS); } if (handedOff == null) { throw new ISE("Transaction failure publishing segments, aborting"); } else { log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ") .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() { @Override public String apply(DataSegment input) { return input.getIdentifier(); } })), handedOff.getCommitMetadata()); } } catch (InterruptedException | RejectedExecutionException e) { // handle the InterruptedException that gets wrapped in a RejectedExecutionException if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) { throw e; } // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow if (!stopRequested) { Thread.currentThread().interrupt(); throw e; } log.info("The task was asked to stop before completing"); } finally { if (chatHandlerProvider.isPresent()) { chatHandlerProvider.get().unregister(getId()); } handle.close(); } toolbox.getDataSegmentServerAnnouncer().unannounce(); //TODO::implement return success(); }
From source file:org.alinous.plugin.derby.DerbyDataSource.java
private void fetchWithOffset(ResultSet rs, ResultSetMetaData metaData, List<Record> retList, LimitOffsetClause limit, PostContext context, VariableRepository provider, AdjustWhere adjWhere, TypeHelper helper) throws ExecutionException, SQLException { ISQLStatement limitStmt = limit.getLimit(); ISQLStatement offsetStmt = limit.getOffset(); int nLimit = 0, nOffset = 0; if (limitStmt != null && limitStmt.isReady(context, provider, adjWhere)) { String str = limitStmt.extract(context, provider, adjWhere, null, helper); nLimit = Integer.parseInt(str); }//ww w . j av a 2 s .c om if (offsetStmt != null && offsetStmt.isReady(context, provider, adjWhere)) { String str = offsetStmt.extract(context, provider, adjWhere, null, helper); nOffset = Integer.parseInt(str); } if (offsetStmt != null) { rs.absolute(nOffset); } int count = 0; while (rs.next()) { if (count >= nLimit) { break; } count++; int cnt = metaData.getColumnCount(); Record rec = new Record(); for (int i = 0; i < cnt; i++) { String colName = metaData.getColumnName(i + 1).toUpperCase(); String value = rs.getString(i + 1); int colType = metaData.getColumnType(i + 1); rec.addFieldValue(colName, value, colType); } retList.add(rec); } }
From source file:com.glaf.core.jdbc.QueryHelper.java
/** * @param conn/* w ww. jav a 2 s . co m*/ * ? * @param sqlExecutor * ? * @param start * 0 * @param pageSize * ? * @return */ @SuppressWarnings("unchecked") public List<Map<String, Object>> getResultList(Connection conn, SqlExecutor sqlExecutor, int start, int pageSize) { if (!DBUtils.isLegalQuerySql(sqlExecutor.getSql())) { throw new RuntimeException(" SQL statement illegal "); } List<Map<String, Object>> resultList = new ArrayList<Map<String, Object>>(); String sql = sqlExecutor.getSql(); PreparedStatement psmt = null; ResultSet rs = null; ResultSetMetaData rsmd = null; boolean supportsPhysicalPage = false; try { Dialect dialect = DBConfiguration.getDatabaseDialect(conn); if (dialect != null && dialect.supportsPhysicalPage()) { supportsPhysicalPage = true; sql = dialect.getLimitString(sql, start, pageSize); logger.debug("sql=" + sqlExecutor.getSql()); logger.debug(">>sql=" + sql); } psmt = conn.prepareStatement(sql); if (sqlExecutor.getParameter() != null) { List<Object> values = (List<Object>) sqlExecutor.getParameter(); JdbcUtils.fillStatement(psmt, values); logger.debug(">>values=" + values); } rs = psmt.executeQuery(); if (conf.getBoolean("useMyBatisResultHandler", false)) { resultList = this.getResults(rs); } else { rsmd = rs.getMetaData(); int count = rsmd.getColumnCount(); List<ColumnDefinition> columns = new ArrayList<ColumnDefinition>(); for (int i = 1; i <= count; i++) { int sqlType = rsmd.getColumnType(i); ColumnDefinition column = new ColumnDefinition(); column.setIndex(i); column.setColumnName(rsmd.getColumnName(i)); column.setColumnLabel(rsmd.getColumnLabel(i)); column.setJavaType(FieldType.getJavaType(sqlType)); column.setPrecision(rsmd.getPrecision(i)); column.setScale(rsmd.getScale(i)); if (column.getScale() == 0 && sqlType == Types.NUMERIC) { column.setJavaType("Long"); } column.setName(StringTools.camelStyle(column.getColumnLabel().toLowerCase())); columns.add(column); } if (!supportsPhysicalPage) { logger.debug("---------------------skipRows:" + start); this.skipRows(rs, start, pageSize); } logger.debug("---------------------columns:" + columns.size()); logger.debug("---------------------start:" + start); logger.debug("---------------------pageSize:" + pageSize); // int index = 0; while (rs.next()) { // index++; // logger.debug("---------------------row index:" + index); Map<String, Object> rowMap = new HashMap<String, Object>(); Iterator<ColumnDefinition> iterator = columns.iterator(); while (iterator.hasNext()) { ColumnDefinition column = iterator.next(); String columnLabel = column.getColumnLabel(); String columnName = column.getColumnName(); if (StringUtils.isEmpty(columnName)) { columnName = column.getColumnLabel(); } columnName = columnName.toLowerCase(); String javaType = column.getJavaType(); if ("String".equals(javaType)) { String value = rs.getString(column.getIndex()); if (value != null) { value = value.trim(); rowMap.put(columnName, value); rowMap.put(columnLabel, rowMap.get(columnName)); } } else if ("Integer".equals(javaType)) { try { Integer value = rs.getInt(column.getIndex()); rowMap.put(columnName, value); rowMap.put(columnLabel, rowMap.get(columnName)); } catch (Exception e) { String str = rs.getString(column.getIndex()); logger.error("integer:" + str); str = StringTools.replace(str, "$", ""); str = StringTools.replace(str, "", ""); str = StringTools.replace(str, ",", ""); NumberFormat fmt = NumberFormat.getInstance(); Number num = fmt.parse(str); rowMap.put(columnName, num.intValue()); rowMap.put(columnLabel, rowMap.get(columnName)); logger.debug("?:" + num.intValue()); } } else if ("Long".equals(javaType)) { try { Long value = rs.getLong(column.getIndex()); rowMap.put(columnName, value); rowMap.put(columnLabel, rowMap.get(columnName)); } catch (Exception e) { String str = rs.getString(column.getIndex()); logger.error("long:" + str); str = StringTools.replace(str, "$", ""); str = StringTools.replace(str, "", ""); str = StringTools.replace(str, ",", ""); NumberFormat fmt = NumberFormat.getInstance(); Number num = fmt.parse(str); rowMap.put(columnName, num.longValue()); rowMap.put(columnLabel, rowMap.get(columnName)); logger.debug("?:" + num.longValue()); } } else if ("Double".equals(javaType)) { try { Double d = rs.getDouble(column.getIndex()); rowMap.put(columnName, d); rowMap.put(columnLabel, rowMap.get(columnName)); } catch (Exception e) { String str = rs.getString(column.getIndex()); logger.error("double:" + str); str = StringTools.replace(str, "$", ""); str = StringTools.replace(str, "", ""); str = StringTools.replace(str, ",", ""); NumberFormat fmt = NumberFormat.getInstance(); Number num = fmt.parse(str); rowMap.put(columnName, num.doubleValue()); rowMap.put(columnLabel, rowMap.get(columnName)); logger.debug("?:" + num.doubleValue()); } } else if ("Boolean".equals(javaType)) { rowMap.put(columnName, rs.getBoolean(column.getIndex())); rowMap.put(columnLabel, rowMap.get(columnName)); } else if ("Date".equals(javaType)) { rowMap.put(columnName, rs.getTimestamp(column.getIndex())); rowMap.put(columnLabel, rowMap.get(columnName)); } else if ("Blob".equals(javaType)) { } else { Object value = rs.getObject(column.getIndex()); if (value != null) { if (value instanceof String) { value = (String) value.toString().trim(); } rowMap.put(columnName, value); rowMap.put(columnLabel, rowMap.get(columnName)); } } } resultList.add(rowMap); } } logger.debug(">resultList size = " + resultList.size()); return resultList; } catch (Exception ex) { logger.error(ex); ex.printStackTrace(); throw new RuntimeException(ex); } finally { JdbcUtils.close(psmt); JdbcUtils.close(rs); } }
From source file:swp.bibjsf.persistence.Data.java
/** * Retrieves the column information from <code>table</code>. * * @param table//from w ww .j a v a 2 s .c o m * the name of the table whose columns need to be known * @return descriptors for each table column * @throws DataSourceException * thrown in case of problems with the data source */ private ColumnDescriptor[] getColumns(String table) throws DataSourceException { final String query = "SELECT * from " + table; try { Connection connection = dataSource.getConnection(); try { logger.debug("getColumns " + query); Statement stmt = connection.createStatement(); try { ResultSet set = stmt.executeQuery(query); try { final int numberOfColumns = set.getMetaData().getColumnCount(); ColumnDescriptor[] result = new ColumnDescriptor[numberOfColumns]; { // get columns ResultSetMetaData metaData = set.getMetaData(); for (int column = 1; column <= numberOfColumns; column++) { result[column - 1] = new ColumnDescriptor(); result[column - 1].type = metaData.getColumnType(column); result[column - 1].label = metaData.getColumnLabel(column); } } return result; } finally { set.close(); } } finally { stmt.close(); } } finally { connection.close(); } } catch (SQLException e) { throw new DataSourceException(e.getLocalizedMessage()); } }