List of usage examples for java.sql ResultSet getStatement
Statement getStatement() throws SQLException;
Statement
object that produced this ResultSet
object. From source file:org.executequery.gui.resultset.ResultSetTableModel.java
public void createTable(ResultSet resultSet) { if (!isOpenAndValid(resultSet)) { clearData();/*from ww w .jav a2 s .c o m*/ return; } try { resetMetaData(); ResultSetMetaData rsmd = resultSet.getMetaData(); columnHeaders.clear(); visibleColumnHeaders.clear(); tableData.clear(); int zeroBaseIndex = 0; int count = rsmd.getColumnCount(); for (int i = 1; i <= count; i++) { zeroBaseIndex = i - 1; columnHeaders.add(new ResultSetColumnHeader(zeroBaseIndex, rsmd.getColumnLabel(i), rsmd.getColumnName(i), rsmd.getColumnType(i), rsmd.getColumnTypeName(i))); } int recordCount = 0; interrupted = false; if (holdMetaData) { setMetaDataVectors(rsmd); } List<RecordDataItem> rowData; long time = System.currentTimeMillis(); while (resultSet.next()) { if (interrupted || Thread.interrupted()) { throw new InterruptedException(); } recordCount++; rowData = new ArrayList<RecordDataItem>(count); for (int i = 1; i <= count; i++) { zeroBaseIndex = i - 1; ResultSetColumnHeader header = columnHeaders.get(zeroBaseIndex); RecordDataItem value = recordDataItemFactory.create(header); try { int dataType = header.getDataType(); switch (dataType) { // some drivers (informix for example) // was noticed to return the hashcode from // getObject for -1 data types (eg. longvarchar). // force string for these - others stick with // getObject() for default value formatting case Types.CHAR: case Types.VARCHAR: value.setValue(resultSet.getString(i)); break; case Types.DATE: value.setValue(resultSet.getDate(i)); break; case Types.TIME: value.setValue(resultSet.getTime(i)); break; case Types.TIMESTAMP: value.setValue(resultSet.getTimestamp(i)); break; case Types.LONGVARCHAR: case Types.CLOB: value.setValue(resultSet.getClob(i)); break; case Types.LONGVARBINARY: case Types.VARBINARY: case Types.BINARY: value.setValue(resultSet.getBytes(i)); break; case Types.BLOB: value.setValue(resultSet.getBlob(i)); break; case Types.BIT: case Types.TINYINT: case Types.SMALLINT: case Types.INTEGER: case Types.BIGINT: case Types.FLOAT: case Types.REAL: case Types.DOUBLE: case Types.NUMERIC: case Types.DECIMAL: case Types.NULL: case Types.OTHER: case Types.JAVA_OBJECT: case Types.DISTINCT: case Types.STRUCT: case Types.ARRAY: case Types.REF: case Types.DATALINK: case Types.BOOLEAN: case Types.ROWID: case Types.NCHAR: case Types.NVARCHAR: case Types.LONGNVARCHAR: case Types.NCLOB: case Types.SQLXML: // use getObject for all other known types value.setValue(resultSet.getObject(i)); break; default: // otherwise try as string asStringOrObject(value, resultSet, i); break; } } catch (Exception e) { try { // ... and on dump, resort to string value.setValue(resultSet.getString(i)); } catch (SQLException sqlException) { // catch-all SQLException - yes, this is hideous // noticed with invalid date formatted values in mysql value.setValue("<Error - " + sqlException.getMessage() + ">"); } } if (resultSet.wasNull()) { value.setNull(); } rowData.add(value); } tableData.add(rowData); if (recordCount == maxRecords) { break; } } if (Log.isTraceEnabled()) { Log.trace("Finished populating table model - " + recordCount + " rows - [ " + MiscUtils.formatDuration(System.currentTimeMillis() - time) + "]"); } fireTableStructureChanged(); } catch (SQLException e) { System.err.println("SQL error populating table model at: " + e.getMessage()); Log.debug("Table model error - " + e.getMessage(), e); } catch (Exception e) { if (e instanceof InterruptedException) { Log.debug("ResultSet generation interrupted.", e); } else { String message = e.getMessage(); if (StringUtils.isBlank(message)) { System.err.println("Exception populating table model."); } else { System.err.println("Exception populating table model at: " + message); } Log.debug("Table model error - ", e); } } finally { if (resultSet != null) { try { resultSet.close(); Statement statement = resultSet.getStatement(); if (statement != null) { statement.close(); } } catch (SQLException e) { } } } }
From source file:org.fao.geonet.arcgis.ArcSDEJdbcConnection.java
@Override public Map<String, String> retrieveMetadata(AtomicBoolean cancelMonitor, String arcSDEVersion) throws Exception { Map<String, String> results = new HashMap<>(); ArcSDEVersionFactory arcSDEVersionFactory = new ArcSDEVersionFactory(); String metadataTable = arcSDEVersionFactory.getTableName(arcSDEVersion); String columnName = arcSDEVersionFactory.getMetadataColumnName(arcSDEVersion); String sqlQuery = "SELECT " + columnName + ", UUID FROM " + metadataTable; getJdbcTemplate().query(sqlQuery, new RowCallbackHandler() { @Override/*from ww w . j a va 2s. c o m*/ public void processRow(ResultSet rs) throws SQLException { // Cancel processing if (cancelMonitor.get()) { Log.warning(ARCSDE_LOG_MODULE_NAME, "Cancelling metadata retrieve using " + "ArcSDE connection (via JDBC)"); rs.getStatement().cancel(); results.clear(); } String document = ""; int colId = rs.findColumn(columnName); int colIdUuid = rs.findColumn("UUID"); // very simple type check: if (rs.getObject(colId) != null) { if (rs.getMetaData().getColumnType(colId) == Types.BLOB) { Blob blob = rs.getBlob(columnName); byte[] bdata = blob.getBytes(1, (int) blob.length()); document = new String(bdata); } else if (rs.getMetaData().getColumnType(colId) == Types.LONGVARBINARY) { byte[] byteData = rs.getBytes(colId); document = new String(byteData); } else if (rs.getMetaData().getColumnType(colId) == Types.LONGNVARCHAR || rs.getMetaData().getColumnType(colId) == Types.LONGVARCHAR || rs.getMetaData().getColumnType(colId) == Types.VARCHAR || rs.getMetaData().getColumnType(colId) == Types.SQLXML) { document = rs.getString(colId); } else { throw new SQLException("Trying to harvest from a column with an invalid datatype: " + rs.getMetaData().getColumnTypeName(colId)); } String uuid = rs.getString(colIdUuid); ; results.put(uuid, document); } } }); Log.info(ARCSDE_LOG_MODULE_NAME, "Finished retrieving metadata, found: #" + results.size() + " metadata records"); return results; }
From source file:org.forgerock.openidm.repo.jdbc.impl.GenericTableHandler.java
@Override public void update(String fullId, String type, String localId, String rev, Map<String, Object> obj, Connection connection) throws SQLException, IOException, PreconditionFailedException, NotFoundException, InternalServerErrorException {//from w w w .jav a 2 s .c o m logger.debug("Update with fullid {}", fullId); int revInt = Integer.parseInt(rev); ++revInt; String newRev = Integer.toString(revInt); obj.put("_rev", newRev); // Save the rev in the object, and return the changed rev from the create. ResultSet rs = null; PreparedStatement updateStatement = null; PreparedStatement deletePropStatement = null; try { rs = readForUpdate(fullId, type, localId, connection); String existingRev = rs.getString("rev"); long dbId = rs.getLong("id"); long objectTypeDbId = rs.getLong("objecttypes_id"); logger.debug("Update existing object {} rev: {} db id: {}, object type db id: {}", fullId, existingRev, dbId, objectTypeDbId); if (!existingRev.equals(rev)) { throw new PreconditionFailedException("Update rejected as current Object revision " + existingRev + " is different than expected by caller (" + rev + "), the object has changed since retrieval."); } updateStatement = getPreparedStatement(connection, QueryDefinition.UPDATEQUERYSTR); deletePropStatement = getPreparedStatement(connection, QueryDefinition.PROPDELETEQUERYSTR); // Support changing object identifier String newLocalId = (String) obj.get("_id"); if (newLocalId != null && !localId.equals(newLocalId)) { logger.debug("Object identifier is changing from " + localId + " to " + newLocalId); } else { newLocalId = localId; // If it hasn't changed, use the existing ID obj.put("_id", newLocalId); // Ensure the ID is saved in the object } String objString = mapper.writeValueAsString(obj); logger.trace("Populating prepared statement {} for {} {} {} {} {}", updateStatement, fullId, newLocalId, newRev, objString, dbId); updateStatement.setString(1, newLocalId); updateStatement.setString(2, newRev); updateStatement.setString(3, objString); updateStatement.setLong(4, dbId); logger.debug("Update statement: {}", updateStatement); int updateCount = updateStatement.executeUpdate(); logger.trace("Updated rows: {} for {}", updateCount, fullId); if (updateCount != 1) { throw new InternalServerErrorException( "Update execution did not result in updating 1 row as expected. Updated rows: " + updateCount); } JsonValue jv = new JsonValue(obj); // TODO: only update what changed? logger.trace("Populating prepared statement {} for {} {} {}", deletePropStatement, fullId, type, localId); deletePropStatement.setString(1, type); deletePropStatement.setString(2, localId); logger.debug("Update properties del statement: {}", deletePropStatement); int deleteCount = deletePropStatement.executeUpdate(); logger.trace("Deleted child rows: {} for: {}", deleteCount, fullId); writeValueProperties(fullId, dbId, localId, jv, connection); } finally { if (rs != null) { // Ensure associated statement also is closed Statement rsStatement = rs.getStatement(); CleanupHelper.loggedClose(rs); CleanupHelper.loggedClose(rsStatement); } CleanupHelper.loggedClose(updateStatement); CleanupHelper.loggedClose(deletePropStatement); } }
From source file:org.forgerock.openidm.repo.jdbc.impl.GenericTableHandler.java
/** * @see org.forgerock.openidm.repo.jdbc.impl.GenericTableHandler#delete(java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.sql.Connection) *//*ww w.j a v a 2s . c o m*/ @Override public void delete(String fullId, String type, String localId, String rev, Connection connection) throws PreconditionFailedException, InternalServerErrorException, NotFoundException, SQLException, IOException { logger.debug("Delete with fullid {}", fullId); // First check if the revision matches and select it for UPDATE ResultSet existing = null; PreparedStatement deleteStatement = null; try { try { existing = readForUpdate(fullId, type, localId, connection); } catch (NotFoundException ex) { throw new NotFoundException("Object does not exist for delete on: " + fullId); } String existingRev = existing.getString("rev"); if (!"*".equals(rev) && !rev.equals(existingRev)) { throw new PreconditionFailedException( "Delete rejected as current Object revision " + existingRev + " is different than " + "expected by caller " + rev + ", the object has changed since retrieval."); } // Proceed with the valid delete deleteStatement = getPreparedStatement(connection, QueryDefinition.DELETEQUERYSTR); logger.trace("Populating prepared statement {} for {} {} {} {}", deleteStatement, fullId, type, localId, rev); // Rely on ON DELETE CASCADE for connected object properties to be deleted deleteStatement.setString(1, type); deleteStatement.setString(2, localId); deleteStatement.setString(3, rev); logger.debug("Delete statement: {}", deleteStatement); int deletedRows = deleteStatement.executeUpdate(); logger.trace("Deleted {} rows for id : {} {}", deletedRows, localId); if (deletedRows < 1) { throw new InternalServerErrorException( "Deleting object for " + fullId + " failed, DB reported " + deletedRows + " rows deleted"); } else { logger.debug("delete for id succeeded: {} revision: {}", localId, rev); } } finally { if (existing != null) { // Ensure associated statement also is closed Statement existingStatement = existing.getStatement(); CleanupHelper.loggedClose(existing); CleanupHelper.loggedClose(existingStatement); } CleanupHelper.loggedClose(deleteStatement); } }
From source file:org.forgerock.openidm.repo.jdbc.impl.MappedTableHandler.java
/** * @see org.forgerock.openidm.repo.jdbc.TableHandler#update(java.lang.String, * java.lang.String, java.lang.String, java.lang.String, java.util.Map, * java.sql.Connection)//from w w w.j ava 2s. c o m */ @Override public void update(String fullId, String type, String localId, String rev, Map<String, Object> obj, Connection connection) throws SQLException, IOException, PreconditionFailedException, NotFoundException, InternalServerErrorException { logger.debug("Update with fullid {}", fullId); int revInt = Integer.parseInt(rev); ++revInt; String newRev = Integer.toString(revInt); obj.put("_rev", newRev); // Save the rev in the object, and return the // changed rev from the create. ResultSet rs = null; PreparedStatement updateStatement = null; try { rs = readForUpdate(fullId, type, localId, connection); String existingRev = explicitMapping.getRev(rs); logger.debug("Update existing object {} rev: {} ", fullId, existingRev); if (!existingRev.equals(rev)) { throw new PreconditionFailedException("Update rejected as current Object revision " + existingRev + " is different than expected by caller (" + rev + "), the object has changed since retrieval."); } updateStatement = queries.getPreparedStatement(connection, updateQueryStr); // Support changing object identifier String newLocalId = (String) obj.get("_id"); if (newLocalId != null && !localId.equals(newLocalId)) { logger.debug("Object identifier is changing from " + localId + " to " + newLocalId); } else { newLocalId = localId; // If it hasn't changed, use the existing // ID obj.put("_id", newLocalId); // Ensure the ID is saved in the // object } JsonValue objVal = new JsonValue(obj); logger.trace("Populating prepared statement {} for {} {} {}", updateStatement, fullId, newLocalId, newRev); int nextCol = populatePrepStatementColumns(updateStatement, objVal, tokenReplacementPropPointers); updateStatement.setString(nextCol, localId); logger.debug("Update statement: {}", updateStatement); int updateCount = updateStatement.executeUpdate(); logger.trace("Updated rows: {} for {}", updateCount, fullId); if (updateCount != 1) { throw new InternalServerErrorException( "Update execution did not result in updating 1 row as expected. Updated rows: " + updateCount); } } finally { if (rs != null) { // Ensure associated statement also is closed Statement rsStatement = rs.getStatement(); CleanupHelper.loggedClose(rs); CleanupHelper.loggedClose(rsStatement); } CleanupHelper.loggedClose(updateStatement); } }
From source file:org.forgerock.openidm.repo.jdbc.impl.MappedTableHandler.java
/** * @see org.forgerock.openidm.repo.jdbc.TableHandler#delete(java.lang.String, * java.lang.String, java.lang.String, java.lang.String, * java.sql.Connection)//w ww .j ava2 s .c o m */ @Override public void delete(String fullId, String type, String localId, String rev, Connection connection) throws PreconditionFailedException, InternalServerErrorException, NotFoundException, SQLException, IOException { logger.debug("Delete with fullid {}", fullId); // First check if the revision matches and select it for UPDATE ResultSet existing = null; PreparedStatement deleteStatement = null; try { try { existing = readForUpdate(fullId, type, localId, connection); } catch (NotFoundException ex) { throw new NotFoundException("Object does not exist for delete on: " + fullId); } String existingRev = explicitMapping.getRev(existing); if (!"*".equals(rev) && !rev.equals(existingRev)) { throw new PreconditionFailedException( "Delete rejected as current Object revision " + existingRev + " is different than " + "expected by caller " + rev + ", the object has changed since retrieval."); } // Proceed with the valid delete deleteStatement = queries.getPreparedStatement(connection, deleteQueryStr); logger.trace("Populating prepared statement {} for {} {} {} {}", deleteStatement, fullId, type, localId, rev); deleteStatement.setString(1, localId); deleteStatement.setString(2, rev); logger.debug("Delete statement: {}", deleteStatement); int deletedRows = deleteStatement.executeUpdate(); logger.trace("Deleted {} rows for id : {} {}", deletedRows, localId); if (deletedRows < 1) { throw new InternalServerErrorException( "Deleting object for " + fullId + " failed, DB reported " + deletedRows + " rows deleted"); } else { logger.debug("delete for id succeeded: {} revision: {}", localId, rev); } } finally { if (existing != null) { // Ensure associated statement also is closed Statement existingStatement = existing.getStatement(); CleanupHelper.loggedClose(existing); CleanupHelper.loggedClose(existingStatement); } CleanupHelper.loggedClose(deleteStatement); } }
From source file:org.forgerock.openidm.repo.jdbc.impl.MSSQLTableHandler.java
@Override public void update(String fullId, String type, String localId, String rev, Map<String, Object> obj, Connection connection) throws SQLException, IOException, org.forgerock.json.resource.PreconditionFailedException, org.forgerock.json.resource.NotFoundException, org.forgerock.json.resource.InternalServerErrorException { logger.debug("Update with fullid {}", fullId); int revInt = Integer.parseInt(rev); ++revInt;// www. ja v a 2 s . c o m String newRev = Integer.toString(revInt); obj.put("_rev", newRev); // Save the rev in the object, and return the changed rev from the create. ResultSet rs = null; PreparedStatement updateStatement = null; PreparedStatement deletePropStatement = null; try { rs = readForUpdate(fullId, type, localId, connection); String existingRev = rs.getString("rev"); long dbId = rs.getLong("id"); long objectTypeDbId = rs.getLong("objecttypes_id"); logger.debug("Update existing object {} rev: {} db id: {}, object type db id: {}", new Object[] { fullId, existingRev, dbId, objectTypeDbId }); if (!existingRev.equals(rev)) { throw new org.forgerock.json.resource.PreconditionFailedException( "Update rejected as current Object revision " + existingRev + " is different than expected by caller (" + rev + "), the object has changed since retrieval."); } updateStatement = getPreparedStatement(connection, QueryDefinition.UPDATEQUERYSTR); deletePropStatement = getPreparedStatement(connection, QueryDefinition.PROPDELETEQUERYSTR); // Support changing object identifier String newLocalId = (String) obj.get("_id"); if (newLocalId != null && !localId.equals(newLocalId)) { logger.debug("Object identifier is changing from " + localId + " to " + newLocalId); } else { newLocalId = localId; // If it hasn't changed, use the existing ID obj.put("_id", newLocalId); // Ensure the ID is saved in the object } String objString = mapper.writeValueAsString(obj); logger.trace("Populating prepared statement {} for {} {} {} {} {} {}", new Object[] { updateStatement, fullId, newLocalId, newRev, objString, dbId, existingRev }); updateStatement.setString(1, newLocalId); updateStatement.setString(2, newRev); updateStatement.setString(3, objString); updateStatement.setLong(4, dbId); updateStatement.setString(5, existingRev); logger.debug("Update statement: {}", updateStatement); int updateCount = updateStatement.executeUpdate(); logger.trace("Updated rows: {} for {}", updateCount, fullId); if (updateCount == 0) { throw new org.forgerock.json.resource.PreconditionFailedException( "Update rejected as current Object revision " + existingRev + ", has changed since retrieval."); } else if (updateCount > 1) { throw new org.forgerock.json.resource.InternalServerErrorException( "Update execution did not result in updating 1 row as expected. Updated rows: " + updateCount); } JsonValue jv = new JsonValue(obj); // TODO: only update what changed? logger.trace("Populating prepared statement {} for {} {} {}", new Object[] { deletePropStatement, fullId, type, localId }); deletePropStatement.setString(1, type); deletePropStatement.setString(2, localId); logger.debug("Update properties del statement: {}", deletePropStatement); int deleteCount = deletePropStatement.executeUpdate(); logger.trace("Deleted child rows: {} for: {}", deleteCount, fullId); writeValueProperties(fullId, dbId, localId, jv, connection); } finally { if (rs != null) { // Ensure associated statement also is closed Statement rsStatement = rs.getStatement(); CleanupHelper.loggedClose(rs); CleanupHelper.loggedClose(rsStatement); } CleanupHelper.loggedClose(updateStatement); CleanupHelper.loggedClose(deletePropStatement); } }
From source file:org.freebxml.omar.server.persistence.rdb.RegistryObjectDAO.java
/** * Get a HashMap with registry object id as key and owner id as value *//*from www.j a v a2 s . c o m*/ public HashMap getOwnersMap(List ids) throws RegistryException { Statement stmt = null; List resultSets = null; HashMap ownersMap = new HashMap(); final String prefixPred = "SELECT ao.id, ae.user_ FROM AuditableEvent ae, AffectedObject ao WHERE ao.eventId = ae.id"; final String suffixPred = " AND (ae.eventType = '" + BindingUtility.CANONICAL_EVENT_TYPE_ID_Created + "' OR ae.eventType = '" + BindingUtility.CANONICAL_EVENT_TYPE_ID_Versioned + "' OR ae.eventType = '" + BindingUtility.CANONICAL_EVENT_TYPE_ID_Relocated + "') ORDER BY ae.timeStamp_ ASC "; if (ids.size() == 0) { return ownersMap; } try { if (ids.size() == 1) { //Optmization for 1 term case stmt = context.getConnection().createStatement(); StringBuffer query = new StringBuffer(prefixPred); query.append(" AND ao.id = '").append(ids.get(0)).append("'"); query.append(suffixPred); log.trace("SQL = " + query.toString()); // HIEOS/BHT: (DEBUG) ResultSet rs = stmt.executeQuery(query.toString()); while (rs.next()) { ownersMap.put(rs.getString(1), rs.getString(2)); } } else { //This will handle unlimited terms using buffered Selects StringBuffer query = new StringBuffer(prefixPred); query.append(" AND ao.id IN ( $InClauseTerms ) "); query.append(suffixPred); resultSets = executeBufferedSelectWithINClause(query.toString(), ids, inClauseTermLimit); Iterator resultsSetsIter = resultSets.iterator(); while (resultsSetsIter.hasNext()) { ResultSet rs = (ResultSet) resultsSetsIter.next(); while (rs.next()) { ownersMap.put(rs.getString(1), rs.getString(2)); } } } return ownersMap; } catch (SQLException e) { log.error(ServerResourceBundle.getInstance().getString("message.CaughtException"), e); throw new RegistryException(e); } finally { if (stmt != null) { closeStatement(stmt); } if (resultSets != null) { Iterator resultsSetsIter = resultSets.iterator(); while (resultsSetsIter.hasNext()) { try { ResultSet rs = (ResultSet) resultsSetsIter.next(); Statement stmt2 = rs.getStatement(); closeStatement(stmt2); } catch (SQLException e) { log.error(e, e); } } } } }
From source file:org.kawanfw.sql.servlet.sql.PostgreSqlUtil.java
/** * Extract the Large Object Input Stream from PostgreSQL * /*from w w w .j a va2 s .c o m*/ * @param resultSet * the Result Set to extract the blob from * @param columnIndex * the index of column * @return the Large Object Input Stream from PostgreSQL * @throws SQLException */ public static InputStream getPostgreSqlnputStream(ResultSet resultSet, int columnIndex) throws SQLException { InputStream in; Statement statement = resultSet.getStatement(); Connection conn = statement.getConnection(); // Get the Large Object Manager to perform operations with LargeObjectManager lobj = ((org.postgresql.PGConnection) conn).getLargeObjectAPI(); long oid = resultSet.getLong(columnIndex); LargeObject obj = lobj.open(oid, LargeObjectManager.READ); in = obj.getInputStream(); return in; }
From source file:org.kawanfw.sql.servlet.sql.ResultSetWriter.java
/** * Returns true if engine is terradata//from ww w . jav a 2 s . com * * @param resultSet * the result set in use * @returns true if engine is terradata * @throws SQLException */ private String getDatabaseProductName(ResultSet resultSet) throws SQLException { Statement statement = resultSet.getStatement(); // happens on Metadata requests, we don' care about the result: if (statement == null) { return "unknown"; } else { Connection connection = statement.getConnection(); return new SqlUtil(connection).getDatabaseProductName(); } }