List of usage examples for java.sql SQLException toString
public String toString()
From source file:co.nubetech.apache.hadoop.DataDrivenDBInputFormat.java
/** {@inheritDoc} */ public List<InputSplit> getSplits(JobContext job) throws IOException { int targetNumTasks = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1); if (1 == targetNumTasks) { // There's no need to run a bounding vals query; just return a split // that separates nothing. This can be considerably more optimal for // a/*from w w w. j a v a2s .c om*/ // large table with no index. List<InputSplit> singletonSplit = new ArrayList<InputSplit>(); singletonSplit.add(new DataDrivenDBInputSplit("1=1", "1=1")); return singletonSplit; } ResultSet results = null; Statement statement = null; Connection connection = getConnection(); try { statement = connection.createStatement(); results = statement.executeQuery(getBoundingValsQuery()); results.next(); // Based on the type of the results, use a different mechanism // for interpolating split points (i.e., numeric splits, text // splits, // dates, etc.) int sqlDataType = results.getMetaData().getColumnType(1); DBSplitter splitter = getSplitter(sqlDataType); if (null == splitter) { throw new IOException("Unknown SQL data type: " + sqlDataType); } return splitter.split(job.getConfiguration(), results, getDBConf().getInputOrderBy()); } catch (SQLException e) { throw new IOException(e.getMessage()); } finally { // More-or-less ignore SQL exceptions here, but log in case we need // it. try { if (null != results) { results.close(); } } catch (SQLException se) { LOG.debug("SQLException closing resultset: " + se.toString()); } try { if (null != statement) { statement.close(); } } catch (SQLException se) { LOG.debug("SQLException closing statement: " + se.toString()); } try { connection.commit(); closeConnection(); } catch (SQLException se) { LOG.debug("SQLException committing split transaction: " + se.toString()); } } }
From source file:muvis.Environment.java
/** * @return the databaseManager// w w w. ja va2 s . c o m */ public synchronized MusicLibraryDatabaseManager getDatabaseManager() { if (databaseManager == null) { databaseManager = new MusicLibraryDatabaseManager(); try { databaseManager.connect(); databaseManager.initDatabase(); } catch (SQLException ex) { System.out.println("Cannot init the database!" + ex.toString()); } } return databaseManager; }
From source file:BQJDBC.QueryResultTest.Timeouttest.java
@Test public void QueryResultTest05() { final String sql = "SELECT word FROM publicdata:samples.shakespeare WHERE word=\"huzzah\""; final String description = "The word \"huzzah\" NOTE: It doesn't appear in any any book, so it returns with a null #WHERE"; this.logger.info("Test number: 05"); this.logger.info("Running query:" + sql); java.sql.ResultSet Result = null; try {// www . j a va2 s . co m Result = Timeouttest.con.createStatement().executeQuery(sql); this.logger.debug(Result.getMetaData().getColumnCount()); } catch (SQLException e) { this.logger.error("SQLexception" + e.toString()); Assert.fail("SQLException" + e.toString()); } Assert.assertNotNull(Result); this.logger.debug(description); try { if (Result.getType() != ResultSet.TYPE_FORWARD_ONLY) Assert.assertFalse(Result.first()); } catch (SQLException e) { this.logger.error("SQLexception" + e.toString()); Assert.fail(e.toString()); } }
From source file:org.apache.hadoop.metrics2.sink.SqlServerSink.java
public long getMetricRecordID(String recordTypeContext, String recordTypeName, String nodeName, String sourceIP, String clusterName, String serviceName, String tagPairs, long recordTimestamp) { CallableStatement cstmt = null; long result;// w w w. ja v a 2 s.co m if (recordTypeContext == null || recordTypeName == null || nodeName == null || sourceIP == null || tagPairs == null) return -1; int colid = 1; try { if (ensureConnection()) { cstmt = conn.prepareCall("{call dbo.uspGetMetricRecord(?, ?, ?, ?, ?, ?, ?, ?, ?)}"); cstmt.setNString(colid++, recordTypeContext); cstmt.setNString(colid++, recordTypeName); cstmt.setNString(colid++, nodeName); cstmt.setNString(colid++, sourceIP); cstmt.setNString(colid++, clusterName); cstmt.setNString(colid++, serviceName); cstmt.setNString(colid++, tagPairs); cstmt.setLong(colid++, recordTimestamp); cstmt.registerOutParameter(colid, java.sql.Types.BIGINT); cstmt.execute(); result = cstmt.getLong(colid); if (cstmt.wasNull()) return -1; return result; } } catch (Exception e) { if (DEBUG) logger.info("Error during getMetricRecordID call sproc: " + e.toString()); flush(); } finally { if (cstmt != null) { try { cstmt.close(); } catch (SQLException se) { if (DEBUG) logger.info("Error during getMetricRecordID close cstmt: " + se.toString()); } /* * We don't close the connection here because we are likely to be * writing * metric values next and it is more efficient to share the connection. */ } } return -1; }
From source file:org.apache.jcs.auxiliary.disk.jdbc.hsql.HSQLDiskCacheFactory.java
/** * SETUP TABLE FOR CACHE// www .j av a 2s . c o m * <p> * @param cConn * @param tableName */ private void setupTABLE(Connection cConn, String tableName) { boolean newT = true; // TODO make the cached nature of the table configurable StringBuffer createSql = new StringBuffer(); createSql.append("CREATE CACHED TABLE " + tableName); createSql.append("( "); createSql.append("CACHE_KEY VARCHAR(250) NOT NULL, "); createSql.append("REGION VARCHAR(250) NOT NULL, "); createSql.append("ELEMENT BINARY, "); createSql.append("CREATE_TIME DATE, "); createSql.append("CREATE_TIME_SECONDS BIGINT, "); createSql.append("MAX_LIFE_SECONDS BIGINT, "); createSql.append("SYSTEM_EXPIRE_TIME_SECONDS BIGINT, "); createSql.append("IS_ETERNAL CHAR(1), "); createSql.append("PRIMARY KEY (CACHE_KEY, REGION) "); createSql.append(");"); Statement sStatement = null; try { sStatement = cConn.createStatement(); } catch (SQLException e) { log.error("problem creating a statement.", e); } try { sStatement.executeQuery(createSql.toString()); sStatement.close(); } catch (SQLException e) { if (e.toString().indexOf("already exists") != -1) { newT = false; } else { log.error("Problem creating table.", e); } } // TODO create an index on SYSTEM_EXPIRE_TIME_SECONDS String setupData[] = { "create index iKEY on " + tableName + " (CACHE_KEY, REGION)" }; if (newT) { for (int i = 1; i < setupData.length; i++) { try { sStatement.executeQuery(setupData[i]); } catch (SQLException e) { log.error("Exception caught when creating index." + e); } } } }
From source file:org.apache.hadoop.sqoop.testutil.ImportJobTestCase.java
@Before public void setUp() { incrementTableNum();//from ww w .j a va 2 s . c o m if (!isLog4jConfigured) { BasicConfigurator.configure(); isLog4jConfigured = true; LOG.info("Configured log4j with console appender."); } testServer = new HsqldbTestServer(); try { testServer.resetServer(); } catch (SQLException sqlE) { LOG.error("Got SQLException: " + sqlE.toString()); fail("Got SQLException: " + sqlE.toString()); } catch (ClassNotFoundException cnfe) { LOG.error("Could not find class for db driver: " + cnfe.toString()); fail("Could not find class for db driver: " + cnfe.toString()); } manager = testServer.getManager(); }
From source file:org.apache.hadoop.sqoop.manager.SqlManager.java
@Override public String[] getColumnNames(String tableName) { String stmt = "SELECT t.* FROM " + tableName + " AS t WHERE 1 = 1"; ResultSet results = execute(stmt); if (null == results) { return null; }/*from w ww . j a va2 s . c o m*/ try { int cols = results.getMetaData().getColumnCount(); ArrayList<String> columns = new ArrayList<String>(); ResultSetMetaData metadata = results.getMetaData(); for (int i = 1; i < cols + 1; i++) { String colName = metadata.getColumnName(i); if (colName == null || colName.equals("")) { colName = metadata.getColumnLabel(i); } columns.add(colName); } return columns.toArray(new String[0]); } catch (SQLException sqlException) { LOG.error("Error reading from database: " + sqlException.toString()); return null; } }
From source file:org.apache.hadoop.sqoop.manager.SqlManager.java
@Override public String getPrimaryKey(String tableName) { try {/* w ww .ja v a 2 s . c om*/ DatabaseMetaData metaData = this.getConnection().getMetaData(); ResultSet results = metaData.getPrimaryKeys(null, null, tableName); if (null == results) { return null; } if (results.next()) { return results.getString("COLUMN_NAME"); } } catch (SQLException sqlException) { LOG.error("Error reading primary key metadata: " + sqlException.toString()); return null; } return null; }
From source file:biz.source_code.miniConnectionPoolManager.TestMiniConnectionPoolManager.java
private void closeConnectionNoEx(PooledConnection pconn) { try {//from ww w . j av a2 s.co m pconn.close(); } catch (SQLException e) { log("Error while closing database connection: " + e.toString()); } }
From source file:BQJDBC.QueryResultTest.Timeouttest.java
@Test public void QueryResultTest02() { final String sql = "SELECT corpus FROM publicdata:samples.shakespeare GROUP BY corpus ORDER BY corpus LIMIT 5"; final String description = "The book names of shakespeare #GROUP_BY #ORDER_BY"; String[][] expectation = new String[][] { { "1kinghenryiv", "1kinghenryvi", "2kinghenryiv", "2kinghenryvi", "3kinghenryvi" } }; this.logger.info("Test number: 02"); this.logger.info("Running query:" + sql); java.sql.ResultSet Result = null; try {/*from w w w .j a va 2 s . c om*/ Result = Timeouttest.con.createStatement().executeQuery(sql); } catch (SQLException e) { this.logger.error("SQLexception" + e.toString()); Assert.fail("SQLException" + e.toString()); } Assert.assertNotNull(Result); this.logger.debug(description); HelperFunctions.printer(expectation); try { Assert.assertTrue("Comparing failed in the String[][] array", this.comparer(expectation, BQSupportMethods.GetQueryResult(Result))); } catch (SQLException e) { this.logger.error("SQLexception" + e.toString()); Assert.fail(e.toString()); } }