Example usage for java.sql SQLException printStackTrace

List of usage examples for java.sql SQLException printStackTrace

Introduction

In this page you can find the example usage for java.sql SQLException printStackTrace.

Prototype

public void printStackTrace() 

Source Link

Document

Prints this throwable and its backtrace to the standard error stream.

Usage

From source file:database.HashTablesTools.java

public static int countFilesInDB(String tableName, String tableFailureName) {

    Connection connection = HashTablesTools.getConnection(tableName, tableFailureName);

    int countOfFiles = 0;

    Statement stmt = null;// w ww. j  a va2 s  . com
    ResultSet resultFindEntry = null;
    String hash = null;

    try {
        stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableFailureName;
        resultFindEntry = stmt.executeQuery(findEntry);

        while (resultFindEntry.next()) {
            countOfFiles += 1;
        }
    } catch (SQLException e) {
        e.printStackTrace();
    }

    Set uniqueHash = new HashSet();
    try {
        stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableName;
        resultFindEntry = stmt.executeQuery(findEntry);

        while (resultFindEntry.next()) {
            hash = resultFindEntry.getString(1);
            uniqueHash.add(hash);
        }
        countOfFiles += uniqueHash.size();
    } catch (SQLException e) {
        e.printStackTrace();
    }
    HashTablesTools.shutdown();
    return countOfFiles;
}

From source file:capture.MySQLDatabase.java

public static Connection getConnection() {
    try {/*from w  w  w  .j a  v  a  2 s  .c o  m*/
        return dataSource.getConnection();
    } catch (SQLException e) {
        e.printStackTrace();
        return null;
    }
}

From source file:org.apache.drill.test.framework.Utils.java

public static int getNumberOfDrillbits(Connection connection) {
    String query = "select count(*) from sys.drillbits";
    int numberOfDrillbits = 0;
    try {//from  w ww  .  ja  v a 2  s.  co  m
        ResultSet resultSet = execSQL(query, connection);
        resultSet.next();
        numberOfDrillbits = resultSet.getInt(1);
    } catch (SQLException e) {
        LOG.error(e.getMessage());
        e.printStackTrace();
    }
    return numberOfDrillbits;
}

From source file:database.HashTablesTools.java

public static int countFilesWhichAreAlreadyIndexedInSequenceDB_notgood_either(String tableName,
        String tableFailureName, Map<String, List<MMcifFileInfos>> indexPDBFileInFolder) {

    Connection connection = HashTablesTools.getConnection(tableName, tableFailureName);

    int countOfHashFoundInFailureDB = 0;
    int countOfHashFoundInSequenceDB = 0;
    // fastest//  w  w w . j a  v  a  2 s. com
    Statement stmt = null;
    ResultSet resultFindEntryFailureDb = null;
    for (Map.Entry<String, List<MMcifFileInfos>> entry : indexPDBFileInFolder.entrySet()) {
        A: for (MMcifFileInfos fileInfos : entry.getValue()) {

            try {
                stmt = connection.createStatement();
                String findEntry = "SELECT * from " + tableFailureName + " WHERE pdbfilehash = '"
                        + fileInfos.getHash() + "'";
                resultFindEntryFailureDb = stmt.executeQuery(findEntry);

                if (resultFindEntryFailureDb.next()) {
                    countOfHashFoundInFailureDB += 1;
                    stmt.close();
                    continue A;

                }
            } catch (SQLException e) {
                e.printStackTrace();
            }

            // if here not found in failureDB
            try {
                stmt = connection.createStatement();
                String findEntry = "SELECT * from " + tableName + " WHERE pdbfilehash = '" + fileInfos.getHash()
                        + "'";
                resultFindEntryFailureDb = stmt.executeQuery(findEntry);

                if (resultFindEntryFailureDb.next()) {
                    countOfHashFoundInSequenceDB += 1;
                    stmt.close();
                    continue A;
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }
    HashTablesTools.shutdown();
    return countOfHashFoundInFailureDB + countOfHashFoundInSequenceDB;
}

From source file:org.apache.drill.test.framework.Utils.java

public static ResultSet execSQL(String sql, Connection connection) throws SQLException {
    try {/*from  w  w  w  . j a v a2  s . c om*/
        Statement statement = connection.createStatement();
        return statement.executeQuery(sql);
    } catch (SQLException e) {
        LOG.error(e.getMessage());
        e.printStackTrace();
        try {
            connection.close();
        } catch (SQLException e1) {
            LOG.error(e.getMessage());
            e1.printStackTrace();
        }
        throw e;
    }
}

From source file:es.tekniker.framework.ktek.questionnaire.mng.db.QuestionnaireLoadFile.java

public static boolean loadData(List<String> data) {

    boolean boolOK = true;

    PersistentSession session = null;/* w w w  . j ava  2  s.  co  m*/
    PersistentTransaction tr = null;
    Statement st;

    StringBuffer sql = null;
    String[] dataline = null;
    String tablename = null;
    try {

        session = KTEKPersistentManager.instance().getSession();
        tr = session.beginTransaction();

        try {
            st = session.connection().createStatement();
            System.out.println(data.size());
            for (int i = 0; i < data.size(); i++) {

                dataline = data.get(i).split(";");

                log.debug("data by line " + data.get(i) + " num items " + dataline.length + " data line 0 "
                        + dataline[0]);

                tablename = dataline[0];

                tablename = tablename.trim();

                sql = null;
                if (tablename.equals(TABLE_ktek_questionnaire))
                    sql = getSQL4_Table_Questionnaire(dataline);
                else if (dataline[0].equals(TABLE_ktek_formatquestionnaire))
                    sql = getSQL4_Table_Formatquestionnaire(dataline);
                else if (dataline[0].equals(TABLE_ktek_questionnairesection))
                    sql = getSQL4_Table_Questionnairesection(dataline);
                else if (dataline[0].equals(TABLE_ktek_questionnaireitem))
                    sql = getSQL4_Table_Questionnaireitem(dataline);
                else if (dataline[0].equals(TABLE_ktek_question))
                    sql = getSQL4_Table_Question(dataline);
                else if (dataline[0].equals(TABLE_ktek_answer))
                    sql = getSQL4_Table_Answer(dataline);
                else if (dataline[0].equals(TABLE_ktek_answerset))
                    sql = getSQL4_Table_Answerset(dataline);
                else if (dataline[0].equals(TABLE_ktek_answerset_answer))
                    sql = getSQL4_Table_Answerset_answer(dataline);
                else if (dataline[0].equals(TABLE_ktek_qitem_section))
                    sql = getSQL4_Table_Qitemsection(dataline);
                else if (dataline[0].equals(TABLE_ktek_qsection_questionnaire))
                    sql = getSQL4_Table_Qsectionquestionnaire(dataline);

                else if (dataline[0].equals(TABLE_ktek_parameter))
                    sql = getSQL4_Table_Parameter(dataline);
                else if (dataline[0].equals(TABLE_ktek_unit))
                    sql = getSQL4_Table_Unit(dataline);
                else if (dataline[0].equals(TABLE_ktek_unit_parameter))
                    sql = getSQL4_Table_Unit_parameter(dataline);
                else if (dataline[0].equals(TABLE_ktek_questionnaireconfiguration))
                    sql = getSQL4_Table_Questionnaireconfiguration(dataline);

                else {
                    log.debug("table name not found " + dataline[0]);
                }
                log.debug("i : " + i + " SQL : " + sql);
                if (sql != null)
                    st.execute(sql.toString());
            }
        } catch (SQLException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
            log.debug(" SQLException " + e.getMessage());
            boolOK = false;
        }
        tr.commit();

        session.close();

    } catch (PersistentException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return boolOK;
}

From source file:database.HashTablesTools.java

public static int countFilesWhichAreAlreadyIndexedInSequenceDB(String tableName, String tableFailureName,
        Map<String, List<MMcifFileInfos>> indexPDBFileInFolder) {

    Connection connection = HashTablesTools.getConnection(tableName, tableFailureName);

    // build all hash
    //System.out.println("starting hash list");

    ResultSet resultFindEntryFailureDb = null;
    try {// w  w  w  . j  a  v a  2s .  co m
        Statement stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableFailureName;
        resultFindEntryFailureDb = stmt.executeQuery(findEntry);

    } catch (SQLException e) {
        e.printStackTrace();
    }

    ResultSet resultFindEntrySequenceDb = null;
    try {
        Statement stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableName;
        resultFindEntrySequenceDb = stmt.executeQuery(findEntry);

    } catch (SQLException e) {
        e.printStackTrace();
    }
    Statement stmt = null;
    ResultSet resultFindEntry = null;

    int countOfHashFoundInFailureDB = 0;
    int countOfHashFoundInSequenceDB = 0;

    for (Map.Entry<String, List<MMcifFileInfos>> entry : indexPDBFileInFolder.entrySet()) {
        A: for (MMcifFileInfos fileInfos : entry.getValue()) {

            try {
                stmt = connection.createStatement();
                String findEntry = "SELECT * from " + tableFailureName + " WHERE pdbfilehash = '"
                        + fileInfos.getHash() + "'";
                resultFindEntry = stmt.executeQuery(findEntry);

                if (resultFindEntry.next()) {
                    countOfHashFoundInFailureDB += 1;
                    stmt.close();
                    continue A;

                }
            } catch (SQLException e) {
                e.printStackTrace();
            }

            // if here not found in failureDB
            try {
                stmt = connection.createStatement();
                String findEntry = "SELECT * from " + tableName + " WHERE pdbfilehash = '" + fileInfos.getHash()
                        + "'";
                resultFindEntry = stmt.executeQuery(findEntry);

                if (resultFindEntry.next()) {
                    countOfHashFoundInSequenceDB += 1;
                    stmt.close();
                    continue A;
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }
    }

    System.out.println("countOfHashFoundInSequenceDB = " + countOfHashFoundInSequenceDB);
    System.out.println("countOfHashFoundInFailureDB = " + countOfHashFoundInFailureDB);

    return countOfHashFoundInSequenceDB + countOfHashFoundInFailureDB;
}

From source file:com.splicemachine.mrio.api.SpliceTableMapReduceUtil.java

/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf./*from   w w  w .j a  v a  2  s.c om*/
 *
 * @param table  The output Splice table name, The format should be Schema.tableName.
 * @param reducer  The reducer class to use.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary configuration.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param quorumAddress Distant cluster to write to; default is null for
 * output to the cluster that is designated in <code>hbase-site.xml</code>.
 * Set this String to the zookeeper ensemble of an alternate remote cluster
 * when you would have the reduce write a cluster that is other than the
 * default; e.g. copying tables between clusters, the source would be
 * designated by <code>hbase-site.xml</code> and this param would have the
 * ensemble address of the remote cluster.  The format to pass is particular.
 * Pass <code> &lt;hbase.zookeeper.quorum>:&lt;hbase.zookeeper.client.port>:&lt;zookeeper.znode.parent>
 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
 * @param serverClass redefined hbase.regionserver.class
 * @param serverImpl redefined hbase.regionserver.client
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 * @throws SQLException
 */
public static void initTableReducerJob(String table, Class<? extends Reducer> reducer, Job job,
        Class partitioner, String quorumAddress, String serverClass, String serverImpl,
        boolean addDependencyJars, Class<? extends OutputFormat> outputformatClass) throws IOException {

    Configuration conf = job.getConfiguration();
    job.setOutputFormatClass(outputformatClass);
    if (reducer != null)
        job.setReducerClass(reducer);
    conf.set(MRConstants.SPLICE_OUTPUT_TABLE_NAME, table);
    if (sqlUtil == null)
        sqlUtil = SMSQLUtil.getInstance(conf.get(MRConstants.SPLICE_JDBC_STR));
    // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
    String hbaseTableID = null;
    try {
        hbaseTableID = sqlUtil.getConglomID(table);
    } catch (SQLException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        throw new IOException(e);
    }
    conf.set(MRConstants.HBASE_OUTPUT_TABLE_NAME, table);

    if (quorumAddress != null) {
        // Calling this will validate the format
        HBasePlatformUtils.validateClusterKey(quorumAddress);
        conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress);
    }
    if (serverClass != null && serverImpl != null) {
        conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
        conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);

    }
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(Object.class);
    if (partitioner == HRegionPartitioner.class) {
        job.setPartitionerClass(HRegionPartitioner.class);
        // TODO Where are the keys?
        int regions = getReduceNumberOfRegions(hbaseTableID);
        if (job.getNumReduceTasks() > regions) {
            job.setNumReduceTasks(regions);
        }
    } else if (partitioner != null) {
        job.setPartitionerClass(partitioner);
    }

    if (addDependencyJars) {
        addDependencyJars(job);
    }

    //initCredentials(job);
}

From source file:database.HashTablesTools.java

public static String returnSequenceInDbifFourLetterCodeAndChainfoundInDatabase(Connection connection,
        String fourLetterCode, String chainName, String sequenceTableName) {

    String sequenceInDb = null;//www. j av a2s .c  o  m
    try {
        Statement stmt = connection.createStatement();
        String findEntry = "SELECT * from " + sequenceTableName + " WHERE fourLettercode = '" + fourLetterCode
                + "' and chainId = '" + chainName + "'";
        ResultSet resultFindEntry = stmt.executeQuery(findEntry);
        int foundEntriesCount = 0;
        String fourLetterCodeFromDB;
        String chainIdFromDB;
        if (resultFindEntry.next()) {
            foundEntriesCount += 1;

            fourLetterCodeFromDB = resultFindEntry.getString(1);
            chainIdFromDB = resultFindEntry.getString(2);
            sequenceInDb = resultFindEntry.getString(4);
        }

        if (foundEntriesCount != 1) {
            System.out.println("problem isFourLetterCodeAndChainfoundInDatabase " + fourLetterCode + "  "
                    + chainName + "  " + foundEntriesCount);
            return null;
        }
    } catch (SQLException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
        return null;
    }
    return sequenceInDb;
}

From source file:database.HashTablesTools.java

public static int countFilesWhichAreAlreadyIndexedInSequenceDB_old(String tableName, String tableFailureName,
        Map<String, List<MMcifFileInfos>> indexPDBFileInFolder) {

    Connection connection = HashTablesTools.getConnection(tableName, tableFailureName);

    // build all hash
    //System.out.println("starting hash list");
    List<String> filesHash = new ArrayList<>();
    for (Map.Entry<String, List<MMcifFileInfos>> entry : indexPDBFileInFolder.entrySet()) {
        for (MMcifFileInfos fileInfos : entry.getValue()) {
            filesHash.add(fileInfos.getHash());

        }/*from w w  w .j  a v a  2s. co  m*/
    }
    System.out.println("finished hash list " + filesHash.size());
    ResultSet resultFindEntryFailureDb = null;
    try {
        Statement stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableFailureName;
        resultFindEntryFailureDb = stmt.executeQuery(findEntry);

    } catch (SQLException e) {
        e.printStackTrace();
    }

    ResultSet resultFindEntrySequenceDb = null;
    try {
        Statement stmt = connection.createStatement();
        String findEntry = "SELECT * from " + tableName;
        resultFindEntrySequenceDb = stmt.executeQuery(findEntry);

    } catch (SQLException e) {
        e.printStackTrace();
    }

    int countOfFilesAlreadyFoundInFailureHashDb = 0;
    int countOfFilesAlreadyFoundInSequenceDb = 0;

    int totalcountOfFilesAlreadyFoundInFailureHashDb = 0;
    int totalcountOfFilesAlreadyFoundInSequenceDb = 0;

    Set<String> uniqueHashFailure = new HashSet<>();
    Set<String> totaluniqueHashFailure = new HashSet<>();
    try {
        System.out.println("starting hgo through failure db");
        while (resultFindEntryFailureDb.next()) {

            String hash = resultFindEntryFailureDb.getString(1);
            if (filesHash.contains(hash)) {
                // then it is found
                uniqueHashFailure.add(hash);
            }
            totaluniqueHashFailure.add(hash);
        }
        countOfFilesAlreadyFoundInFailureHashDb = uniqueHashFailure.size();
        totalcountOfFilesAlreadyFoundInFailureHashDb = totaluniqueHashFailure.size();

        System.out.println("starting hgo through sequence db");
        Set<String> uniqueHashIndexed = new HashSet<>();
        Set<String> totaluniqueHashIndexed = new HashSet<>();

        while (resultFindEntrySequenceDb.next()) {

            String hash = resultFindEntrySequenceDb.getString(1);
            if (filesHash.contains(hash)) {
                // then it is found
                uniqueHashIndexed.add(hash);
            }
            totaluniqueHashIndexed.add(hash);
        }
        countOfFilesAlreadyFoundInSequenceDb = uniqueHashIndexed.size();
        totalcountOfFilesAlreadyFoundInSequenceDb = totaluniqueHashIndexed.size();
    } catch (SQLException e) {
        e.printStackTrace();
    }

    System.out.println("countOfFilesAlreadyFoundInFailureHashDb = " + countOfFilesAlreadyFoundInFailureHashDb);
    System.out.println(
            "totalcountOfFilesAlreadyFoundInFailureHashDb = " + totalcountOfFilesAlreadyFoundInFailureHashDb);
    System.out.println("countOfFilesAlreadyFoundInSequenceDb = " + countOfFilesAlreadyFoundInSequenceDb);
    System.out.println(
            "totalcountOfFilesAlreadyFoundInSequenceDb = " + totalcountOfFilesAlreadyFoundInSequenceDb);

    return countOfFilesAlreadyFoundInFailureHashDb + countOfFilesAlreadyFoundInSequenceDb;
}