List of usage examples for java.sql Statement getResultSet
ResultSet getResultSet() throws SQLException;
ResultSet
object. From source file:org.hyperic.hq.plugin.sybase.SybaseMeasurementPlugin.java
private ResultSet getResultSet(Statement stmt, String col) throws SQLException { do {//from ww w . j a va 2 s. co m ResultSet rs = null; try { rs = stmt.getResultSet(); if (rs == null) break; rs.findColumn(col); return rs; } catch (SQLException e) { //don't close the resultset!!! } } while (stmt.getMoreResults() == true && stmt.getUpdateCount() != -1); throw new SQLException(); }
From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up//from www . j a v a 2 s . co m ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start Zk, Kafka and Pinot startHybridCluster(); // Unpack the Avro files TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir); _tmpDir.mkdirs(); final List<File> avroFiles = getAllAvroFiles(); File schemaFile = getSchemaFile(); schema = Schema.fromFile(schemaFile); addSchema(schemaFile, schema.getSchemaName()); final List<String> invertedIndexColumns = makeInvertedIndexColumns(); final String sortedColumn = makeSortedColumn(); // Create Pinot table addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null); LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns); // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime) final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles); final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles); buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = new CountDownLatch(1); HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR); manager.connect(); manager.addExternalViewChangeListener(new ExternalViewChangeListener() { @Override public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) { for (ExternalView externalView : externalViewList) { if (externalView.getId().contains("mytable")) { Set<String> partitionSet = externalView.getPartitionSet(); if (partitionSet.size() == offlineSegmentCount) { int onlinePartitionCount = 0; for (String partitionId : partitionSet) { Map<String, String> partitionStateMap = externalView.getStateMap(partitionId); if (partitionStateMap.containsValue("ONLINE")) { onlinePartitionCount++; } } if (onlinePartitionCount == offlineSegmentCount) { System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread"); latch.countDown(); } } } } } }); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all offline segments to be online latch.await(); // Load realtime data into Kafka LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles); pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC); // Wait until the Pinot event count matches with the number of events in the Avro files int pinotRecordCount, h2RecordCount; long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L; Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); statement.execute("select count(*) from mytable"); ResultSet rs = statement.getResultSet(); rs.first(); h2RecordCount = rs.getInt(1); rs.close(); waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes); }
From source file:org.hyperic.hq.product.JDBCMeasurementPlugin.java
private String getSqlRow(Statement stmt) throws SQLException { StringBuffer buf = new StringBuffer(); do {//from w w w. j a va 2 s . c o m ResultSet rs = stmt.getResultSet(); if (stmt.getUpdateCount() != -1) { continue; } if (rs == null) { break; } setData(rs); buf.append(getOutput(rs.getMetaData())); } while (stmt.getMoreResults() == true); return buf.toString(); }
From source file:com.nextep.datadesigner.sqlgen.impl.generator.DataSetGenerator.java
private ISQLScript buildScript(IDataSet set, IDataSet valuesSet, DeltaType type) { final ISQLScript script = CorePlugin.getTypedObjectFactory().create(ISQLScript.class); script.setScriptType(ScriptType.DATA); script.setName(type.name().toLowerCase() + "." + set.getName()); //$NON-NLS-1$ final StringBuilder buf = new StringBuilder(2000); final ISQLParser parser = SQLGenPlugin.getService(IGenerationService.class).getCurrentSQLParser(); // final IDataService dataService = DbgmPlugin.getService(IDataService.class); final IStorageService storageService = DbgmPlugin.getService(IStorageService.class); final IStorageHandle handle = valuesSet.getStorageHandle(); if (handle != null) { Connection conn = null;/* ww w . ja v a 2 s . com*/ Statement stmt = null; ResultSet rset = null; try { conn = storageService.getLocalConnection(); stmt = conn.createStatement(); stmt.execute(handle.getSelectStatement()); rset = stmt.getResultSet(); final ResultSetMetaData md = rset.getMetaData(); while (rset.next()) { final List<Object> values = new LinkedList<Object>(); for (int i = 1; i <= md.getColumnCount(); i++) { values.add(rset.getObject(i)); } switch (type) { case INSERT: buf.append(buildInsert(parser, set, values)); break; case UPDATE: buf.append(buildUpdate(parser, set, values)); break; case DELETE: buf.append(buildDelete(parser, set, values)); break; } } } catch (SQLException e) { throw new ErrorException("Data generation problem: " + e.getMessage(), e); } finally { safeClose(rset, stmt, conn); } } if (buf.length() == 0) { return null; } else { script.appendSQL(buf.toString()); return script; } }
From source file:org.apache.hadoop.chukwa.hicc.DatasetMapper.java
public void execute(String query, boolean groupBySecondColumn) { dataset.clear();/*from ww w . j a v a2 s . co m*/ try { // The newInstance() call is a work around for some // broken Java implementations Class.forName("com.mysql.jdbc.Driver").newInstance(); } catch (Exception ex) { // handle the error } Connection conn = null; Statement stmt = null; ResultSet rs = null; int counter = 0; int size = 0; labels.clear(); double max = 0.0; int labelsCount = 0; try { conn = DriverManager.getConnection(jdbc); stmt = conn.createStatement(); //rs = stmt.executeQuery(query); if (stmt.execute(query)) { rs = stmt.getResultSet(); ResultSetMetaData rmeta = rs.getMetaData(); int col = rmeta.getColumnCount(); int i = 0; java.util.ArrayList<Double> data = null; HashMap<String, Integer> xAxisMap = new HashMap<String, Integer>(); while (rs.next()) { String label = rs.getString(1); if (!xAxisMap.containsKey(label)) { xAxisMap.put(label, i); labels.add(label); i++; } if (groupBySecondColumn) { String item = rs.getString(2); // Get the data from the row using the series column double current = rs.getDouble(3); if (current > max) { max = current; } data = dataset.get(item); if (data == null) { data = new java.util.ArrayList<Double>(); } data.add(rs.getDouble(3)); dataset.put(item, data); } else { for (int j = 2; j <= col; j++) { String item = rmeta.getColumnName(j); // Get the data from the row using the column name double current = rs.getDouble(j); if (current > max) { max = current; } data = dataset.get(item); if (data == null) { data = new java.util.ArrayList<Double>(); } data.add(rs.getDouble(j)); dataset.put(item, data); } } } labelsCount = i; } else { log.error("query is not executed."); } // Now do something with the ResultSet .... } catch (SQLException ex) { // handle any errors log.error("SQLException: " + ex.getMessage()); log.error("SQLState: " + ex.getSQLState()); log.error("VendorError: " + ex.getErrorCode()); } catch (Exception ex) { } finally { // it is a good idea to release // resources in a finally{} block // in reverse-order of their creation // if they are no-longer needed if (rs != null) { try { rs.close(); } catch (SQLException sqlEx) { // ignore } rs = null; } if (stmt != null) { try { stmt.close(); } catch (SQLException sqlEx) { // ignore } stmt = null; } if (conn != null) { try { conn.close(); } catch (SQLException sqlEx) { // ignore } conn = null; } } }
From source file:org.apache.camel.component.jdbc.JdbcProducer.java
/** * Execute sql of exchange and set results on output *//*from w w w.j a v a 2 s . c o m*/ public void process(Exchange exchange) throws Exception { String sql = exchange.getIn().getBody(String.class); Connection conn = null; Statement stmt = null; ResultSet rs = null; try { conn = dataSource.getConnection(); stmt = conn.createStatement(); if (parameters != null && !parameters.isEmpty()) { IntrospectionSupport.setProperties(stmt, parameters); } if (LOG.isDebugEnabled()) { LOG.debug("Executing JDBC statement: " + sql); } if (stmt.execute(sql)) { rs = stmt.getResultSet(); setResultSet(exchange, rs); } else { int updateCount = stmt.getUpdateCount(); exchange.getOut().setHeader(JdbcConstants.JDBC_UPDATE_COUNT, updateCount); } } finally { try { if (rs != null) { rs.close(); } if (stmt != null) { stmt.close(); } if (conn != null) { conn.close(); } } catch (SQLException e) { LOG.warn("Error closing JDBC resource: " + e, e); } } // populate headers exchange.getOut().getHeaders().putAll(exchange.getIn().getHeaders()); }
From source file:org.jumpmind.db.sql.JdbcSqlTransaction.java
public int execute(final String sql) { return executeCallback(new IConnectionCallback<Integer>() { public Integer execute(Connection con) throws SQLException { Statement stmt = null; ResultSet rs = null;/*from ww w. ja v a2 s .c o m*/ try { logSql(sql, null); stmt = con.createStatement(); if (stmt.execute(sql)) { rs = stmt.getResultSet(); while (rs.next()) { } } return stmt.getUpdateCount(); } finally { JdbcSqlTemplate.close(rs); JdbcSqlTemplate.close(stmt); } } }); }
From source file:org.jumpmind.vaadin.ui.sqlexplorer.SqlRunner.java
protected String resultsAsText(Statement stmt, int maxResultSize) throws SQLException { ResultSet rs = null;//from w ww .j ava 2 s.c om try { rs = stmt.getResultSet(); ResultSetMetaData meta = rs.getMetaData(); int columns = meta.getColumnCount(); int[] maxColumnSizes = new int[columns]; for (int i = 1; i <= columns; i++) { String columnName = meta.getColumnName(i); maxColumnSizes[i - 1] = columnName.length(); } int rowNumber = 1; List<Object[]> rows = new ArrayList<Object[]>(); while (rs.next() && rowNumber <= maxResultSize) { Object[] row = new Object[columns]; for (int i = 1; i <= columns; i++) { Object obj = CommonUiUtils.getObject(rs, i); row[i - 1] = obj; if (obj != null) { int size = obj.toString().length(); if (maxColumnSizes[i - 1] < size) { maxColumnSizes[i - 1] = size; } } } rows.add(row); rowNumber++; } StringBuilder text = new StringBuilder(); for (int i = 1; i <= columns; i++) { String columnName = meta.getColumnName(i); text.append(StringUtils.rightPad(columnName, maxColumnSizes[i - 1])); text.append(" "); } text.append("\n"); for (int i = 1; i <= columns; i++) { text.append(StringUtils.rightPad("", maxColumnSizes[i - 1], "-")); text.append(" "); } text.append("\n"); for (Object[] objects : rows) { for (int i = 0; i < objects.length; i++) { text.append(StringUtils.rightPad(objects[i] != null ? objects[i].toString() : "<null>", maxColumnSizes[i])); text.append(" "); } text.append("\n"); } return text.toString(); } finally { JdbcSqlTemplate.close(rs); } }
From source file:mom.trd.opentheso.bdd.helper.LanguageHelper.java
/** * Permet de retourner un objet Langauge par identifiant / ou null si rien * /*from w w w .j a v a 2 s . c om*/ * @param ds le pool de connexion * @param idLang * @return Objet Class Thesaurus */ public Languages_iso639 getThisLanguage(HikariDataSource ds, String idLang) { Connection conn; Statement stmt; ResultSet resultSet; Languages_iso639 language = null; try { // Get connection from pool conn = ds.getConnection(); try { stmt = conn.createStatement(); try { String query1 = "select * from languages_iso639" + " where iso639_1 = '" + idLang + "'"; stmt.executeQuery(query1); resultSet = stmt.getResultSet(); if (resultSet != null) { resultSet.next(); language = new Languages_iso639(); language.setId_iso639_1(idLang); language.setId_iso639_2(resultSet.getString("iso639_2")); language.setFrench_name(resultSet.getString("french_name")); language.setEnglish_name(resultSet.getString("english_name")); } } finally { stmt.close(); } } finally { conn.close(); } } catch (SQLException sqle) { // Log exception log.error("Error while adding element : " + language, sqle); } return language; }
From source file:de.xwic.sandbox.server.installer.impl.SQLServerDatabaseHandler.java
public boolean renameScope(String oldScope, String newScope) { boolean exist = false; try {//from w w w . j a v a2 s . com String sql = "SELECT NAME FROM CRM_SEC_SCOPE WHERE NAME = '" + newScope + "'"; Statement stmt = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); try { if (stmt.execute(sql)) { ResultSet rs = stmt.getResultSet(); if (rs.next()) { exist = true; } } } finally { stmt.close(); } // determine if the newScope does not exist? if (exist) { return false; } stmt = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); sql = "UPDATE CRM_SEC_SCOPE SET NAME = '" + newScope + "' WHERE NAME = '" + oldScope + "'"; try { if (stmt.executeUpdate(sql) != 1) { log.warn("More than one entry with the name " + oldScope + " have been found."); } } finally { stmt.close(); } // now update the SCOPE.propNAME scope types stmt = connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); sql = "SELECT NAME FROM CRM_SEC_SCOPE WHERE NAME LIKE '" + oldScope + ".%'"; try { if (stmt.execute(sql)) { ResultSet rs = stmt.getResultSet(); while (rs.next()) { String oldName = rs.getString("NAME"); String propertyName = oldName.substring(oldScope.length() + 1); String newName = newScope + "." + propertyName; //rs.updateString("NAME", newName); //rs.updateRow(); Statement stmtUpdate = connection.createStatement(); try { String sqlUpdate = "UPDATE CRM_SEC_SCOPE SET NAME = '" + newName + "' WHERE NAME='" + oldName + "'"; int num = stmtUpdate.executeUpdate(sqlUpdate); if (num == 0) { log.warn("Warning: RENAME FAILED FROM " + oldName + " TO " + newName); } } finally { stmtUpdate.close(); } } } } finally { stmt.close(); } // else, rename the scope + all dependent scopes // that include the property names // oldScopeName + ".%" return true; } catch (Exception e) { // the update process should not be aborted when this happens, so // its just a warning log.warn("Error renaming '" + oldScope + "', into '" + newScope + "'", e); return false; } }