Example usage for java.sql SQLException toString

List of usage examples for java.sql SQLException toString

Introduction

In this page you can find the example usage for java.sql SQLException toString.

Prototype

public String toString() 

Source Link

Document

Returns a short description of this throwable.

Usage

From source file:org.apache.sqoop.mapreduce.db.DataDrivenDBInputFormat.java

@Override
/** {@inheritDoc} */
public List<InputSplit> getSplits(JobContext job) throws IOException {

    int targetNumTasks = ConfigurationHelper.getJobNumMaps(job);
    String boundaryQuery = getDBConf().getInputBoundingQuery();

    // If user do not forced us to use his boundary query and we don't have to
    // bacause there is only one mapper we will return single split that
    // separates nothing. This can be considerably more optimal for a large
    // table with no index.
    if (1 == targetNumTasks && (boundaryQuery == null || boundaryQuery.isEmpty())) {
        List<InputSplit> singletonSplit = new ArrayList<InputSplit>();
        singletonSplit.add(new com.cloudera.sqoop.mapreduce.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                "1=1", "1=1"));
        return singletonSplit;
    }/*  w ww  . j a v  a  2  s .  c om*/

    ResultSet results = null;
    Statement statement = null;
    Connection connection = getConnection();
    try {
        statement = connection.createStatement();

        String query = getBoundingValsQuery();
        LOG.info("BoundingValsQuery: " + query);

        results = statement.executeQuery(query);
        results.next();

        // Based on the type of the results, use a different mechanism
        // for interpolating split points (i.e., numeric splits, text splits,
        // dates, etc.)
        int sqlDataType = results.getMetaData().getColumnType(1);
        boolean isSigned = results.getMetaData().isSigned(1);

        // MySQL has an unsigned integer which we need to allocate space for
        if (sqlDataType == Types.INTEGER && !isSigned) {
            sqlDataType = Types.BIGINT;
        }

        DBSplitter splitter = getSplitter(sqlDataType);
        if (null == splitter) {
            throw new IOException("Unknown SQL data type: " + sqlDataType);
        }

        return splitter.split(job.getConfiguration(), results, getDBConf().getInputOrderBy());
    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        // More-or-less ignore SQL exceptions here, but log in case we need it.
        try {
            if (null != results) {
                results.close();
            }
        } catch (SQLException se) {
            LOG.debug("SQLException closing resultset: " + se.toString());
        }

        try {
            if (null != statement) {
                statement.close();
            }
        } catch (SQLException se) {
            LOG.debug("SQLException closing statement: " + se.toString());
        }

        try {
            connection.commit();
            closeConnection();
        } catch (SQLException se) {
            LOG.debug("SQLException committing split transaction: " + se.toString());
        }
    }
}

From source file:org.apache.hadoop.metrics2.sink.SqlSink.java

public void insertMetricValue(long metricRecordID, String metricName, String metricValue) {
    CallableStatement cstmt = null;
    if (metricRecordID < 0 || metricName == null || metricValue == null)
        return;/*from  w ww  .  j  ava  2  s. c  o m*/
    try {
        logger.trace("Insert metricRecordId : " + metricRecordID + ", " + "metricName : " + metricName
                + ", metricValue : " + metricValue + ", " + "procedure = " + getInsertMetricsProcedureName());
        if (ensureConnection()) {
            String procedureCall = String.format("{call %s(?, ?, ?)}", getInsertMetricsProcedureName());
            cstmt = conn.prepareCall(procedureCall);
            cstmt.setLong(1, metricRecordID);
            cstmt.setNString(2, metricName);
            cstmt.setNString(3, metricValue);
            cstmt.execute();
        }
    } catch (Exception e) {
        if (DEBUG)
            logger.info("Error during insertMetricValue call sproc: " + e.toString());
        flush();
    } finally {
        if (cstmt != null) {
            try {
                cstmt.close();
            } catch (SQLException se) {
                if (DEBUG)
                    logger.info("Error during insertMetricValue close cstmt: " + se.toString());
            }
            /*
             * We don't close the connection here because we are likely to be
             * writing
             * more metric values next and it is more efficient to share the
             * connection.
             */
        }
    }
}

From source file:org.apache.sqoop.manager.sqlserver.SQLServerManagerManualTest.java

@Test
public void testReadTable() {
    ResultSet results = null;/*from  www.java2 s  .co m*/
    try {
        results = manager.readTable(getTableName(), MSSQLTestUtils.getColumns());

        assertNotNull("ResultSet from readTable() is null!", results);

        ResultSetMetaData metaData = results.getMetaData();
        assertNotNull("ResultSetMetadata is null in readTable()", metaData);

        // ensure that we get the correct number of columns back
        assertEquals("Number of returned columns was unexpected!", metaData.getColumnCount(), 16);

        // should get back 4 rows. They are:
        // 1 2
        // 3 4
        // 5 6
        // 7 8
        // .. so while order isn't guaranteed, we should get back 16 on the
        // left
        // and 20 on the right.
        int sumCol1 = 0, sumCol2 = 0, rowCount = 0;
        while (results.next()) {
            rowCount++;
            sumCol1 += results.getInt(1);
            sumCol2 += results.getInt(2);
        }

        assertEquals("Expected 4 rows back", EXPECTED_NUM_ROWS, rowCount);
        assertEquals("Expected left sum of 10", EXPECTED_COL1_SUM, sumCol1);
        assertEquals("Expected right sum of 14", EXPECTED_COL2_SUM, sumCol2);
    } catch (SQLException sqlException) {
        LOG.error(StringUtils.stringifyException(sqlException));
        fail("SQL Exception: " + sqlException.toString());
    } finally {
        if (null != results) {
            try {
                results.close();
            } catch (SQLException sqlE) {
                LOG.error(StringUtils.stringifyException(sqlE));
                fail("SQL Exception in ResultSet.close(): " + sqlE.toString());
            }
        }

        manager.release();
    }
}

From source file:org.apache.hadoop.sqoop.testutil.ImportJobTestCase.java

/**
 * Create a table with a set of columns and add a row of values.
 * @param colTypes the types of the columns to make
 * @param vals the SQL text for each value to insert
 *///from   w  w w.  j  a v  a  2 s.c om
protected void createTableWithColTypes(String[] colTypes, String[] vals) {
    Connection conn = null;
    try {
        conn = getTestServer().getConnection();
        PreparedStatement statement = conn.prepareStatement("DROP TABLE " + getTableName() + " IF EXISTS",
                ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        statement.executeUpdate();
        statement.close();

        String columnDefStr = "";
        String columnListStr = "";
        String valueListStr = "";

        String[] myColNames = new String[colTypes.length];

        for (int i = 0; i < colTypes.length; i++) {
            String colName = BASE_COL_NAME + Integer.toString(i);
            columnDefStr += colName + " " + colTypes[i];
            columnListStr += colName;
            valueListStr += vals[i];
            myColNames[i] = colName;
            if (i < colTypes.length - 1) {
                columnDefStr += ", ";
                columnListStr += ", ";
                valueListStr += ", ";
            }
        }

        statement = conn.prepareStatement("CREATE TABLE " + getTableName() + "(" + columnDefStr + ")",
                ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        statement.executeUpdate();
        statement.close();

        statement = conn.prepareStatement(
                "INSERT INTO " + getTableName() + "(" + columnListStr + ")" + " VALUES(" + valueListStr + ")",
                ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        statement.executeUpdate();
        statement.close();
        conn.commit();
        this.colNames = myColNames;
    } catch (SQLException sqlException) {
        fail("Could not create table: " + sqlException.toString());
    } finally {
        if (null != conn) {
            try {
                conn.close();
            } catch (SQLException sqlE) {
                LOG.warn("Got SQLException during close: " + sqlE.toString());
            }
        }
    }
}

From source file:org.apache.sqoop.manager.sqlserver.SQLServerManagerManualTest.java

@Before
public void setUp() {
    MSSQLTestUtils utils = new MSSQLTestUtils();
    try {//from  w w  w  .  java 2 s .co  m
        utils.createTableFromSQL(MSSQLTestUtils.CREATE_TALBE_LINEITEM);
        utils.populateLineItem();
    } catch (SQLException e) {
        LOG.error("Setup fail with SQLException: " + StringUtils.stringifyException(e));
        fail("Setup fail with SQLException: " + e.toString());
    }
    Configuration conf = getConf();
    SqoopOptions opts = getSqoopOptions(conf);
    String username = MSSQLTestUtils.getDBUserName();
    String password = MSSQLTestUtils.getDBPassWord();
    opts.setUsername(username);
    opts.setPassword(password);
    opts.setConnectString(getConnectString());
    ConnFactory f = new ConnFactory(conf);
    try {
        this.manager = f.getManager(new JobData(opts, new ImportTool()));
        System.out.println("Manger : " + this.manager);
    } catch (IOException ioe) {
        LOG.error("Setup fail with IOException: " + StringUtils.stringifyException(ioe));
        fail("IOException instantiating manager: " + StringUtils.stringifyException(ioe));
    }
}

From source file:org.apache.sqoop.manager.sqlserver.SQLServerManagerManualTest.java

@Test
public void testgetPrimaryKeyFromTable() {
    // first, create a table with a primary key
    Connection conn = null;//from   ww w.jav  a2s  . com
    try {
        conn = getManager().getConnection();
        dropTableIfExists(TABLE_WITH_KEY);
        PreparedStatement statement = conn.prepareStatement(
                "CREATE TABLE " + TABLE_WITH_KEY + "(" + KEY_FIELD_NAME + " INT NOT NULL PRIMARY KEY, foo INT)",
                ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        statement.executeUpdate();
        statement.close();

        String primaryKey = getManager().getPrimaryKey(TABLE_WITH_KEY);
        assertEquals("Expected null pkey for table without key", primaryKey, KEY_FIELD_NAME);

    } catch (SQLException sqlException) {
        LOG.error(StringUtils.stringifyException(sqlException));
        fail("Could not create table with primary key: " + sqlException.toString());
    } finally {
        if (null != conn) {
            try {
                conn.close();
            } catch (SQLException sqlE) {
                LOG.warn("Got SQLException during close: " + sqlE.toString());
            }
        }
    }

}

From source file:BQJDBC.QueryResultTest.QueryResultTest.java

@Test
public void QueryResultTest11() {

    this.logger.info("Test number: 10");
    this.logger.info("Testing databesmetadata ... getSchemas() ");

    try {/* www  . java2  s  .c o m*/
        QueryResultTest.con.getMetaData().getSchemas();
    } catch (SQLException e) {
        this.logger.warn("SQLexception" + e.toString());
        Assert.fail("schema problem");
    }

}

From source file:org.apache.hadoop.metrics2.sink.SqlSink.java

public long getMetricRecordID(String recordTypeContext, String recordTypeName, String nodeName, String sourceIP,
        String clusterName, String serviceName, String tagPairs, long recordTimestamp) {
    CallableStatement cstmt = null;
    long result;//  www .  j  a va 2 s . c o m
    logger.trace("Params: recordTypeContext = " + recordTypeContext + ", recordTypeName = " + recordTypeName
            + ", nodeName = " + nodeName + ", sourceIP = " + sourceIP + ", tagPairs = " + tagPairs
            + ", clusterName = " + clusterName + ", serviceName = " + serviceName + ", recordTimestamp = "
            + recordTimestamp);
    if (recordTypeContext == null || recordTypeName == null || nodeName == null || sourceIP == null
            || tagPairs == null)
        return -1;

    int colid = 1;
    try {
        if (ensureConnection()) {
            String procedureCall = String.format("{call %s(?, ?, ?, ?, ?, ?, ?, ?, ?)}",
                    getGetMetricsProcedureName());
            cstmt = conn.prepareCall(procedureCall);
            cstmt.setNString(colid++, recordTypeContext);
            cstmt.setNString(colid++, recordTypeName);
            cstmt.setNString(colid++, nodeName);
            cstmt.setNString(colid++, sourceIP);
            cstmt.setNString(colid++, clusterName);
            cstmt.setNString(colid++, serviceName);
            cstmt.setNString(colid++, tagPairs);
            cstmt.setLong(colid++, recordTimestamp);
            cstmt.registerOutParameter(colid, java.sql.Types.BIGINT);
            cstmt.execute();

            result = cstmt.getLong(colid);
            if (cstmt.wasNull())
                return -1;
            return result;
        }
    } catch (Exception e) {
        if (DEBUG)
            logger.info("Error during getMetricRecordID call sproc: " + e.toString());
        flush();
    } finally {
        if (cstmt != null) {
            try {
                cstmt.close();
            } catch (SQLException se) {
                if (DEBUG)
                    logger.info("Error during getMetricRecordID close cstmt: " + se.toString());
            }
            /*
             * We don't close the connection here because we are likely to be
             * writing
             * metric values next and it is more efficient to share the connection.
             */
        }
    }
    return -1;
}

From source file:co.nubetech.apache.hadoop.mapred.DataDrivenDBInputFormat.java

/** {@inheritDoc} */
public List<InputSplit> getSplits(Configuration job) throws IOException {

    int targetNumTasks = job.getInt(MRJobConfig.NUM_MAPS, 1);
    if (1 == targetNumTasks) {
        // There's no need to run a bounding vals query; just return a split
        // that separates nothing. This can be considerably more optimal for
        // a//  w ww  .j  a  va2  s .c  o  m
        // large table with no index.
        List<InputSplit> singletonSplit = new ArrayList<InputSplit>();
        singletonSplit.add(
                new org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit("1=1",
                        "1=1"));
        return singletonSplit;
    }

    ResultSet results = null;
    Statement statement = null;
    Connection connection = getConnection();
    try {
        statement = connection.createStatement();

        results = statement.executeQuery(getBoundingValsQuery());
        results.next();

        // Based on the type of the results, use a different mechanism
        // for interpolating split points (i.e., numeric splits, text
        // splits,
        // dates, etc.)
        int sqlDataType = results.getMetaData().getColumnType(1);
        DBSplitter splitter = getSplitter(sqlDataType);
        if (null == splitter) {
            throw new IOException("Unknown SQL data type: " + sqlDataType);
        }

        //return convertSplit(splitter.split(job, results, getDBConf()
        //      .getInputOrderBy()));
        return splitter.split(job, results, getDBConf().getInputOrderBy());
    } catch (SQLException e) {
        throw new IOException(e.getMessage());
    } finally {
        // More-or-less ignore SQL exceptions here, but log in case we need
        // it.
        try {
            if (null != results) {
                results.close();
            }
        } catch (SQLException se) {
            LOG.debug("SQLException closing resultset: " + se.toString());
        }

        try {
            if (null != statement) {
                statement.close();
            }
        } catch (SQLException se) {
            LOG.debug("SQLException closing statement: " + se.toString());
        }

        try {
            connection.commit();
            closeConnection();
        } catch (SQLException se) {
            LOG.debug("SQLException committing split transaction: " + se.toString());
        }
    }
}

From source file:BQJDBC.QueryResultTest.QueryResultTest.java

@Test
public void QueryResultTest03() {
    final String sql = "SELECT COUNT(DISTINCT web100_log_entry.connection_spec.remote_ip) AS num_clients FROM [guid754187384106:m_lab.2010_01] "
            + "WHERE IS_EXPLICITLY_DEFINED(web100_log_entry.connection_spec.remote_ip) AND IS_EXPLICITLY_DEFINED(web100_log_entry.log_time) "
            + "AND web100_log_entry.log_time > 1262304000 AND web100_log_entry.log_time < 1262476800";
    final String description = "A sample query from google, but we don't have Access for the query table #ERROR #accessDenied #403";

    this.logger.info("Test number: 03");
    this.logger.info("Running query:" + sql);
    this.logger.debug(description);
    java.sql.ResultSet result = null;
    try {/*from  w  w w .j ava  2 s .  c o m*/
        Statement stmt = con.createStatement();
        //stmt.setQueryTimeout(60);
        result = stmt.executeQuery(sql);
    } catch (SQLException e) {
        this.logger.debug("SQLexception" + e.toString());
        //fail("SQLException" + e.toString());
        Assert.assertTrue(e.getCause().toString()
                .contains("Access Denied: Table measurement-lab:m_lab.2010_01: QUERY_TABLE"));
    }
    logger.info("QueryResult03 result is" + result.toString());
}