List of usage examples for java.sql ResultSet CONCUR_READ_ONLY
int CONCUR_READ_ONLY
To view the source code for java.sql ResultSet CONCUR_READ_ONLY.
Click Source Link
ResultSet
object that may NOT be updated. From source file:net.sourceforge.msscodefactory.v1_10.MSSBamPg8.MSSBamPg8AddressTagTable.java
public MSSBamAddressTagBuff readBuff(MSSBamAuthorization Authorization, MSSBamAddressTagPKey PKey) { final String S_ProcName = "readBuff"; if (!schema.isTransactionOpen()) { throw CFLib.getDefaultExceptionFactory().newUsageException(getClass(), S_ProcName, "Transaction not open"); }// www . j a v a 2 s.c o m try { Connection cnx = schema.getCnx(); long AddressId = PKey.getRequiredAddressId(); long TagId = PKey.getRequiredTagId(); String sql = S_sqlSelectAddressTagBuff + "WHERE " + "adtg.AddressId = " + Long.toString(AddressId) + " " + "AND " + "adtg.TagId = " + Long.toString(TagId) + " "; Statement stmt = cnx.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); ResultSet resultSet = stmt.executeQuery(sql); if (resultSet.next()) { MSSBamAddressTagBuff buff = unpackAddressTagResultSetToBuff(resultSet); if (resultSet.next()) { resultSet.last(); throw CFLib.getDefaultExceptionFactory().newRuntimeException(getClass(), S_ProcName, "Did not expect multi-buff response, " + resultSet.getRow() + " rows selected"); } return (buff); } else { return (null); } } catch (SQLException e) { throw CFLib.getDefaultExceptionFactory().newDbException(getClass(), S_ProcName, e); } }
From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up/*from w w w. j av a 2 s .c o m*/ ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start Zk, Kafka and Pinot startHybridCluster(); // Unpack the Avro files TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir); _tmpDir.mkdirs(); final List<File> avroFiles = getAllAvroFiles(); File schemaFile = getSchemaFile(); schema = Schema.fromFile(schemaFile); addSchema(schemaFile, schema.getSchemaName()); final List<String> invertedIndexColumns = makeInvertedIndexColumns(); final String sortedColumn = makeSortedColumn(); // Create Pinot table addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null); LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns); // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime) final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles); final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles); buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = new CountDownLatch(1); HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR); manager.connect(); manager.addExternalViewChangeListener(new ExternalViewChangeListener() { @Override public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) { for (ExternalView externalView : externalViewList) { if (externalView.getId().contains("mytable")) { Set<String> partitionSet = externalView.getPartitionSet(); if (partitionSet.size() == offlineSegmentCount) { int onlinePartitionCount = 0; for (String partitionId : partitionSet) { Map<String, String> partitionStateMap = externalView.getStateMap(partitionId); if (partitionStateMap.containsValue("ONLINE")) { onlinePartitionCount++; } } if (onlinePartitionCount == offlineSegmentCount) { System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread"); latch.countDown(); } } } } } }); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all offline segments to be online latch.await(); // Load realtime data into Kafka LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles); pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC); // Wait until the Pinot event count matches with the number of events in the Avro files int pinotRecordCount, h2RecordCount; long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L; Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); statement.execute("select count(*) from mytable"); ResultSet rs = statement.getResultSet(); rs.first(); h2RecordCount = rs.getInt(1); rs.close(); waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes); }
From source file:net.sourceforge.msscodefactory.v1_10.MSSBamPg8.MSSBamPg8AttachmentTagTable.java
public MSSBamAttachmentTagBuff readBuff(MSSBamAuthorization Authorization, MSSBamAttachmentTagPKey PKey) { final String S_ProcName = "readBuff"; if (!schema.isTransactionOpen()) { throw CFLib.getDefaultExceptionFactory().newUsageException(getClass(), S_ProcName, "Transaction not open"); }//w ww. ja va 2 s .c o m try { Connection cnx = schema.getCnx(); long AttachmentId = PKey.getRequiredAttachmentId(); long TagId = PKey.getRequiredTagId(); String sql = S_sqlSelectAttachmentTagBuff + "WHERE " + "attg.AttachmentId = " + Long.toString(AttachmentId) + " " + "AND " + "attg.TagId = " + Long.toString(TagId) + " "; Statement stmt = cnx.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); ResultSet resultSet = stmt.executeQuery(sql); if (resultSet.next()) { MSSBamAttachmentTagBuff buff = unpackAttachmentTagResultSetToBuff(resultSet); if (resultSet.next()) { resultSet.last(); throw CFLib.getDefaultExceptionFactory().newRuntimeException(getClass(), S_ProcName, "Did not expect multi-buff response, " + resultSet.getRow() + " rows selected"); } return (buff); } else { return (null); } } catch (SQLException e) { throw CFLib.getDefaultExceptionFactory().newDbException(getClass(), S_ProcName, e); } }
From source file:main.export.sql.DocBuilder.java
@SuppressWarnings("unchecked") public Map<String, Object> getFields(Map<String, Object> firstRow, ResultSet rs, Entity entity, Map<String, Object> entityMap, Map<String, Object> rootEntityMap) throws SQLException { entityMap = new HashMap<String, Object>(); if (entity.allAttributes.get(MULTI_VALUED) != null && entity.allAttributes.get(MULTI_VALUED).equalsIgnoreCase("true")) { List<Object> fieldArray = new ArrayList<Object>(); rs.beforeFirst();/*from w w w . j av a2 s .co m*/ while (rs.next()) { if (entity.fields.size() > 1) { Map<String, Object> entityFieldsMap = new HashMap<String, Object>(); for (Iterator<Field> iterator = entity.fields.iterator(); iterator.hasNext();) { Field field = (Field) iterator.next(); FieldType fieldType = FieldType.valueOf(field.allAttributes.get("type").toUpperCase()); entityFieldsMap.put(field.name, convertFieldType(fieldType, rs.getObject(field.column)).get(0)); } fieldArray.add(entityFieldsMap); } else if (entity.fields.size() == 1) { fieldArray.add(rs.getObject(entity.fields.get(0).column)); } } rootEntityMap.put(entity.name, fieldArray); } else if (firstRow != null) { for (Iterator<Field> iterator = entity.fields.iterator(); iterator.hasNext();) { Field field = (Field) iterator.next(); FieldType fieldType = FieldType.valueOf(field.allAttributes.get("type").toUpperCase()); if (firstRow.get(field.column) != null) { if (entity.pk != null && entity.pk.equals(field.name)) { if (importer.getDataStoreType().equals(DataStoreType.MONGO)) { entityMap.put("_id", convertFieldType(fieldType, firstRow.get(field.column)).get(0)); } else if (importer.getDataStoreType().equals(DataStoreType.COUCH)) { // couch db says document id must be string entityMap.put("_id", convertFieldType(FieldType.STRING, firstRow.get(field.column)).get(0)); } } else { entityMap.put(field.getName(), convertFieldType(fieldType, firstRow.get(field.column)).get(0)); } params.put(entity.name + "." + field.name, firstRow.get(field.column).toString()); } } } if (entity.entities != null) { Entity subEntity = null; String query = "", aparam = ""; for (Iterator<Entity> iterator = entity.entities.iterator(); iterator.hasNext();) { subEntity = (Entity) iterator.next(); subLevel = subConnection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); query = subEntity.allAttributes.get("query"); m = p.matcher(query); aparam = ""; try { log.info("Parameter Map is: " + params); while (m.find()) { aparam = query.substring(m.start() + 2, m.end() - 1); query = query.replaceAll("(\\$\\{" + aparam + "\\})", Matcher.quoteReplacement(StringEscapeUtils.escapeSql(params.get(aparam)))); m = p.matcher(query); } } catch (Exception e) { e.printStackTrace(); } resultSet = subLevel.executeQuery(query); if (resultSet.next()) { subEntityData = getFields(processor.toMap(resultSet), resultSet, subEntity, null, entityMap); if (subEntityData.size() > 0) entityMap.put(subEntity.name, subEntityData); } resultSet.close(); subLevel.close(); } } return entityMap; }
From source file:edu.ku.brc.specify.toycode.mexconabio.CopyPlantsFromGBIF.java
/** * //from w ww . ja v a 2 s . c om */ public void processNullKingdom() { PrintWriter pw = null; try { pw = new PrintWriter("gbif_plants_from_null.log"); } catch (FileNotFoundException e) { e.printStackTrace(); } System.out.println("----------------------- Searching NULL ----------------------- "); String gbifWhereStr = "FROM raw WHERE kingdom IS NULL"; long startTime = System.currentTimeMillis(); String cntGBIFSQL = "SELECT COUNT(*) " + gbifWhereStr;// + " LIMIT 0,1000"; String gbifSQL = gbifSQLBase + gbifWhereStr; System.out.println(cntGBIFSQL); long totalRecs = BasicSQLUtils.getCount(srcConn, cntGBIFSQL); long procRecs = 0; int secsThreshold = 0; String msg = String.format("Query: %8.2f secs", (double) (System.currentTimeMillis() - startTime) / 1000.0); System.out.println(msg); pw.println(msg); pw.flush(); startTime = System.currentTimeMillis(); Statement gStmt = null; PreparedStatement pStmt = null; try { pw = new PrintWriter("gbif_plants_from_null.log"); pStmt = dstConn.prepareStatement(pSQL); System.out.println("Total Records: " + totalRecs); pw.println("Total Records: " + totalRecs); gStmt = srcConn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); gStmt.setFetchSize(Integer.MIN_VALUE); ResultSet rs = gStmt.executeQuery(gbifSQL); ResultSetMetaData rsmd = rs.getMetaData(); while (rs.next()) { String genus = rs.getString(16); if (genus == null) continue; String species = rs.getString(17); if (isPlant(colStmtGN, colStmtGNSP, genus, species) || isPlant(colDstStmtGN, colDstStmtGNSP, genus, species)) { for (int i = 1; i <= rsmd.getColumnCount(); i++) { Object obj = rs.getObject(i); pStmt.setObject(i, obj); } try { pStmt.executeUpdate(); } catch (Exception ex) { System.err.println("For Old ID[" + rs.getObject(1) + "]"); ex.printStackTrace(); pw.print("For Old ID[" + rs.getObject(1) + "] " + ex.getMessage()); pw.flush(); } procRecs++; if (procRecs % 10000 == 0) { long endTime = System.currentTimeMillis(); long elapsedTime = endTime - startTime; double avergeTime = (double) elapsedTime / (double) procRecs; double hrsLeft = (((double) elapsedTime / (double) procRecs) * (double) totalRecs - procRecs) / HRS; int seconds = (int) (elapsedTime / 60000.0); if (secsThreshold != seconds) { secsThreshold = seconds; msg = String.format( "Elapsed %8.2f hr.mn Ave Time: %5.2f Percent: %6.3f Hours Left: %8.2f ", ((double) (elapsedTime)) / HRS, avergeTime, 100.0 * ((double) procRecs / (double) totalRecs), hrsLeft); System.out.println(msg); pw.println(msg); pw.flush(); } } } } } catch (Exception ex) { ex.printStackTrace(); } finally { try { if (gStmt != null) { gStmt.close(); } if (pStmt != null) { pStmt.close(); } pw.close(); } catch (Exception ex) { } } System.out.println("Done transferring."); pw.println("Done transferring."); }
From source file:com.cloudera.sqoop.testutil.BaseSqoopTestCase.java
/** * Drop a table if it already exists in the database. * @param table the name of the table to drop. * @throws SQLException if something goes wrong. *///from ww w . j a va2 s. com protected void dropTableIfExists(String table) throws SQLException { Connection conn = getManager().getConnection(); PreparedStatement statement = conn.prepareStatement("DROP TABLE " + table + " IF EXISTS", ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); try { statement.executeUpdate(); conn.commit(); } finally { statement.close(); } }
From source file:net.sourceforge.msscodefactory.v1_10.MSSBamPg8.MSSBamPg8AccessSecurityTable.java
public MSSBamAccessSecurityBuff[] readAllBuff(MSSBamAuthorization Authorization) { final String S_ProcName = "readAllBuff"; if (!schema.isTransactionOpen()) { throw CFLib.getDefaultExceptionFactory().newUsageException(getClass(), S_ProcName, "Transaction not open"); }//w w w . jav a 2 s . c o m try { Connection cnx = schema.getCnx(); String sql = S_sqlSelectAccessSecurityBuff + "ORDER BY " + "asec.Id ASC"; Statement stmt = cnx.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); ResultSet resultSet = stmt.executeQuery(sql); List<MSSBamAccessSecurityBuff> buffList = new ArrayList<MSSBamAccessSecurityBuff>(); while (resultSet.next()) { MSSBamAccessSecurityBuff buff = unpackAccessSecurityResultSetToBuff(resultSet); buffList.add(buff); } return (buffList.toArray(new MSSBamAccessSecurityBuff[0])); } catch (SQLException e) { throw CFLib.getDefaultExceptionFactory().newDbException(getClass(), S_ProcName, e); } }
From source file:com.alibaba.wasp.jdbc.TestJdbcResultSet.java
public void testAbsolute() throws SQLException { stat = conn.createStatement();/*from w ww .j a v a2s . c o m*/ stat.execute("CREATE TABLE test(ID INT PRIMARY KEY)"); // there was a problem when more than MAX_MEMORY_ROWS where in the result // set stat.execute("INSERT INTO test SELECT X FROM SYSTEM_RANGE(1, 200)"); Statement s2 = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); ResultSet rs = s2.executeQuery("SELECT * FROM test ORDER BY ID"); for (int i = 100; i > 0; i--) { rs.absolute(i); assertEquals(i, rs.getInt(1)); } stat.execute("DROP TABLE test"); }
From source file:org.apache.jena.jdbc.remote.connections.RemoteEndpointConnection.java
@Override protected JenaPreparedStatement createPreparedStatementInternal(String sparql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { if (this.isClosed()) throw new SQLException("Cannot create a statement after the connection was closed"); if (resultSetType == ResultSet.TYPE_SCROLL_SENSITIVE) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connection do not support scroll sensitive result sets"); if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) throw new SQLFeatureNotSupportedException( "Remote endpoint backed connections only support read-only result sets"); return new RemoteEndpointPreparedStatement(sparql, this, this.client, resultSetType, ResultSet.FETCH_FORWARD, 0, resultSetHoldability); }
From source file:com.tfm.utad.sqoopdata.SqoopVerticaDB.java
private static Long findMaxID(Connection conn) { Long id = (long) 0; Statement stmt = null;//from w ww . j a v a2 s. c om String query; try { stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); query = "SELECT MAX(id) AS id FROM s1.coordinates"; LOG.info("Query execution: " + query); ResultSet rs = stmt.executeQuery(query); while (rs.next()) { id = (long) rs.getInt("id"); } } catch (SQLException e) { LOG.error("SQLException error: " + e.toString()); } finally { if (stmt != null) { try { stmt.close(); } catch (SQLException ex) { LOG.error("Statement error: " + ex.toString()); } } if (conn != null) { try { conn.close(); } catch (SQLException ex) { LOG.error("Connection error: " + ex.toString()); } } } return id; }