Example usage for java.sql ResultSetMetaData getColumnLabel

List of usage examples for java.sql ResultSetMetaData getColumnLabel

Introduction

In this page you can find the example usage for java.sql ResultSetMetaData getColumnLabel.

Prototype

String getColumnLabel(int column) throws SQLException;

Source Link

Document

Gets the designated column's suggested title for use in printouts and displays.

Usage

From source file:com.google.visualization.datasource.util.SqlDataSourceHelper.java

/**
 * Returns the table description which includes the ids, labels and types of
 * the table columns./*from   w  w  w.j  a  va  2  s .  c om*/
 *
 * @param rs The result set holding the data from the sql table.
 * @param columnIdsList The list of the column ids in the data table.
 *
 * @return The table description.
 *
 * @throws SQLException Thrown when the connection to the database failed.
 */
static DataTable buildColumns(ResultSet rs, List<String> columnIdsList) throws SQLException {
    DataTable result = new DataTable();
    ResultSetMetaData metaData = rs.getMetaData();
    int numOfCols = metaData.getColumnCount();
    // For each column in the table, create the column description. SQL indexes
    // are 1-based.
    for (int i = 1; i <= numOfCols; i++) {
        String id = (columnIdsList == null) ? metaData.getColumnLabel(i) : columnIdsList.get(i - 1);
        ColumnDescription columnDescription = new ColumnDescription(id,
                sqlTypeToValueType(metaData.getColumnType(i)), metaData.getColumnLabel(i));
        result.addColumn(columnDescription);
    }
    return result;
}

From source file:io.cloudslang.content.database.utils.Format.java

/**
 * Returns tabular form of resultSet similar to what you would get from
 * running a query from the command line
 *
 * @param result populated result set//from  ww w . ja  va  2s .  c  om
 * @param cal    calendar to use to format
 * @return
 * @throws SQLException
 */
private static String resultSetToTable(ResultSet result, Calendar cal, boolean checkNullTermination)
        throws SQLException {
    assert (result != null);

    ResultSetMetaData md = result.getMetaData();
    int nCols = md.getColumnCount();
    String[] headers = new String[nCols];
    int[] headerSz = new int[nCols]; // Note: Eclipse has a friendly getDisplaySizes() function
    int maxWidth = 1; // maximum column width - initially set at 1 to prevent an edge case

    List<String[]> rows = new ArrayList<>(); // columns
    // This is fairly space intensive because we don't want to turn this into an O^2(n) problem
    // instead of a O(n) problem. It's O(n) space but with a k of 2, and same with space.
    // Ugh, stupid time-memory tradeoffs
    // We're storing it ALL in Java data structures to figure out width of columns first, then padding later
    // for serialization
    for (int colheader = 0; colheader < nCols; colheader++) {
        headers[colheader] = md.getColumnLabel(colheader + 1);
        headerSz[colheader] = headers[colheader].length();
    }

    // setup columns to be width of column labels
    while (result.next()) {
        String[] row = new String[headers.length];
        for (int colN = 0; colN < nCols; colN++) {
            String colVal = getColumn(result, colN + 1, checkNullTermination);
            headerSz[colN] = colVal.length() > headerSz[colN] ? colVal.length() : headerSz[colN];
            row[colN] = colVal;
        }
        rows.add(row);
    }
    // column widths set, now start populating the string builder
    StringBuilder resultSb = new StringBuilder(headers.length * maxWidth / 2);
    // construct the headers
    for (int colheader = 0; colheader < nCols; colheader++) {
        resultSb.append(headers[colheader]);
        for (int count = 0; count < headerSz[colheader] - headers[colheader].length() + colPadding; count++)
            resultSb.append(" ");
    }
    resultSb.append(NEW_LINE);
    for (int colheader = 0; colheader < nCols; colheader++) {
        for (int count = 0; count < headerSz[colheader]; count++)
            resultSb.append("-");
        for (int count = 0; count < colPadding; count++)
            resultSb.append(" ");
    }
    resultSb.append(NEW_LINE);
    // now append the data itself
    for (String[] row : rows) {
        for (int col = 0; col < nCols; col++) {
            resultSb.append(row[col]);
            for (int padIdx = 0; padIdx < headerSz[col] - row[col].length() + colPadding; padIdx++)
                resultSb.append(" ");
        }
        resultSb.append(NEW_LINE);
    }

    return resultSb.toString();
}

From source file:org.apache.phoenix.end2end.AlterTableWithViewsIT.java

public static String getSystemCatalogEntriesForTable(Connection conn, String tableName, String message)
        throws Exception {
    StringBuilder sb = new StringBuilder(message);
    sb.append("\n\n\n");
    ResultSet rs = conn.createStatement()
            .executeQuery("SELECT * FROM \"SYSTEM\".\"CATALOG\" WHERE TABLE_NAME='" + tableName + "'");
    ResultSetMetaData metaData = rs.getMetaData();
    int rowNum = 0;
    while (rs.next()) {
        sb.append(rowNum++).append("------\n");
        for (int i = 1; i <= metaData.getColumnCount(); i++) {
            sb.append("\t").append(metaData.getColumnLabel(i)).append("=").append(rs.getString(i)).append("\n");
        }// www.  j  a v a  2s  .  c om
        sb.append("\n");
    }
    rs.close();
    return sb.toString();
}

From source file:dbconverter.dao.util.ToolKit.java

/**
 * Indexes every document within a ResultSet object
 * @param resultSet The ResultSet containing all documents to be indexed
 * @param bl Determines where to index the data
 * @param uploadInterval Determines how frequently to clear local memory
 * @return The number of documents indexed
 * @author hightowe//from  w w  w  .  j a va 2 s.c o m
 */
public static int bulkIndexResultSet(ResultSet resultSet, BulkLoader bl, int uploadInterval) {
    assert resultSet != null : PARAMETER_ERROR;
    assert uploadInterval > 0 : PARAMETER_ERROR;
    assert bl != null && bl.isConfigured() : PARAMETER_ERROR;

    int count = 0;
    try {
        ResultSetMetaData rsMetaData = resultSet.getMetaData();
        int columnNumbers = rsMetaData.getColumnCount();
        List<Map> docsList = new ArrayList<>();

        while (resultSet.next()) {
            Map<String, Object> dataMap = new HashMap<>();
            for (int i = 1; i <= columnNumbers; i++) {
                dataMap.put(rsMetaData.getColumnLabel(i), resultSet.getString(i));
            }

            // append a timestamp of when this document was created
            dataMap.put(TIME_STAMP, getISOTime(TIME_STAMP_FORMAT));

            docsList.add(dataMap);
            count++;

            if (count % uploadInterval == 0) {
                bl.bulkIndex(docsList);
                logger.info("Indexed " + count + " documents " + getISOTime(TIME_STAMP_FORMAT));
                docsList.clear();
            }
        }

        if (docsList.size() > 0) {
            bl.bulkIndex(docsList);
            logger.info("Indexed " + count + " documents " + getISOTime(TIME_STAMP_FORMAT));
        }
    } catch (SQLException ex) {
        logger.error(ex);
    }

    logger.info("Total documents indexed: " + count + ", " + getISOTime(TIME_STAMP_FORMAT));

    return count;
}

From source file:com.quest.orahive.HiveJdbcClient.java

private static List<OracleTableColumn> getOracleTableColumnsForHiveResults(ResultSet resultSet) {

    List<OracleTableColumn> result = null;

    try {// w ww  .j a v a 2s . co m
        ResultSetMetaData metaData = resultSet.getMetaData();

        result = new ArrayList<OracleTableColumn>(metaData.getColumnCount());

        for (int idx = 0; idx < metaData.getColumnCount(); idx++) {
            OracleTableColumn column = new OracleTableColumn();
            result.add(column);

            // column Name...
            column.setName(metaData.getColumnLabel(idx + 1)); //<- 1-based in JDBC;

            // Sql Type...
            column.sqlType = metaData.getColumnType(idx + 1); //<- 1-based in JDBC

            // column Oracle data-type...

            Constants.OracleType oracleType = javaSqlTypeToOracleType(column.sqlType);

            switch (oracleType) {

            case VARCHAR2: {

                column.oracleDataType = String.format("%s(%d)", oracleType.toString(), 4000 // Max length for a varchar
                );
                break;
            }

            default: {
                column.oracleDataType = oracleType.toString();
                break;
            }

            }
        }

    } catch (SQLException ex) {
        LOG.error("An error occurred when processing the metadata for the Hive result-set.", ex);
        System.exit(1);
    }

    return result;
}

From source file:com.hangum.tadpole.engine.sql.util.QueryUtils.java

/**
 * query to csv/*from www  .  j  a  va  2 s .  co  m*/
 * 
 * @param userDB
 * @param strQuery
 * @param listParam
 * @param isAddHead is true add head title
 * @param strDelimiter if delimite is null default comma(,)
 */
@SuppressWarnings("deprecation")
public static String selectToCSV(final UserDBDAO userDB, final String strQuery, final List<Object> listParam,
        final boolean isAddHead, final String strDelimiter) throws Exception {
    final StringWriter stWriter = new StringWriter();

    SqlMapClient client = TadpoleSQLManager.getInstance(userDB);
    QueryRunner qr = new QueryRunner(client.getDataSource());
    qr.query(strQuery, listParam.toArray(), new ResultSetHandler<Object>() {

        @Override
        public Object handle(ResultSet rs) throws SQLException {
            ResultSetMetaData metaData = rs.getMetaData();

            char strDel;
            if ("".equals(strDelimiter)) {
                strDel = ',';
            } else if (StringUtils.equalsIgnoreCase("\t", strDelimiter)) {
                strDel = (char) 9;
            } else {
                strDel = strDelimiter.charAt(0);
            }

            CSVWriter csvWriter = new CSVWriter(stWriter, strDel);
            if (isAddHead) {
                String[] arryString = new String[metaData.getColumnCount()];
                for (int i = 1; i <= metaData.getColumnCount(); i++) {
                    arryString[i - 1] = metaData.getColumnLabel(i);
                }
                csvWriter.writeNext(arryString);
            }

            while (rs.next()) {
                String[] arryString = new String[metaData.getColumnCount()];
                for (int i = 1; i <= metaData.getColumnCount(); i++) {
                    arryString[i - 1] = rs.getString(i);
                }
                csvWriter.writeNext(arryString);
            }

            return stWriter.toString();
        }
    });

    return stWriter.toString();
}

From source file:dbconverter.dao.util.ToolKit.java

/**
 * Given a ResultSet, writes the contained data as JSON to a target file,
 *  with the expectation that said file will be used in an Elasticsearch
 *  bulk index operation./*from   w  ww . j  ava 2 s .co m*/
 * This method supports arbitrary-sized ResultSets, provided interval is set low enough
 * @param resultSet The ResultSet to save to a file
 * @param obj A QueryObject which must contain the index and type of the target
 * @param interval Determines how many documents should be stored within Java at a time
 *                 If you run out of heap space, try decreasing this value
 * @param fileName The name of the file to write to
 * @author hightowe
 */
public static void writeResultSetToJson(ResultSet resultSet, QueryObject obj, int interval, String fileName) {
    assert resultSet != null : "ResultSet cannont be null!";

    List<String> resultsList = new ArrayList<>();

    try {
        ResultSetMetaData rsMetaData = resultSet.getMetaData();
        int columnNumbers = rsMetaData.getColumnCount();
        int count = 0;
        int prev = 0;
        while (resultSet.next()) {
            Map<String, Object> dataMap = new HashMap<>();

            // add all column names to the map key-set
            for (int i = 1; i <= columnNumbers; i++) {
                dataMap.put(rsMetaData.getColumnLabel(i), resultSet.getObject(i));
            }

            dataMap.put(TIME_STAMP, getISOTime(TIME_STAMP_FORMAT));

            // Add the data to List of Maps
            String json = ToolKit.convertMapToJson(dataMap);
            resultsList.add(json);
            count++;

            // write to file after every (interval)th run, then clear
            // resultsList to avoid heap space errors
            if (count % interval == 0) {
                writeJsonStringsToFile(resultsList, fileName, obj, prev);
                prev += interval;
                resultsList.clear();
            }
        }

        writeJsonStringsToFile(resultsList, fileName, obj, prev);

    } catch (SQLException e) {
        logger.error(e);
    }
}

From source file:org.mule.transport.jdbc.ColumnAliasRowProcessor.java

@Override
public Map toMap(ResultSet rs) throws SQLException {
    Map result = new CaseInsensitiveHashMap();
    ResultSetMetaData rsmd = rs.getMetaData();
    int cols = rsmd.getColumnCount();

    for (int i = 1; i <= cols; i++) {
        result.put(rsmd.getColumnLabel(i), rs.getObject(i));
    }//from   w w w  .j a  v  a  2 s.  c  o  m

    return result;
}

From source file:Professor.java

  public void checkData(String sql) throws Exception {
  ResultSet rs = st.executeQuery(sql);
  ResultSetMetaData metadata = rs.getMetaData();

  for (int i = 0; i < metadata.getColumnCount(); i++) {
    System.out.print("\t"+ metadata.getColumnLabel(i + 1)); 
  }//  w w  w . j ava2 s .co m
  System.out.println("\n----------------------------------");

  while (rs.next()) {
    for (int i = 0; i < metadata.getColumnCount(); i++) {
      Object value = rs.getObject(i + 1);
      if (value == null) {
        System.out.print("\t       ");
      } else {
        System.out.print("\t"+value.toString().trim());
      }
    }
    System.out.println("");
  }
}

From source file:org.apache.cayenne.dbsync.reverse.dbload.AttributeLoader.java

private boolean checkForAutoIncrement(ResultSet rs) throws SQLException {
    ResultSetMetaData rsMetaData = rs.getMetaData();
    for (int i = 1; i <= rsMetaData.getColumnCount(); i++) {
        if ("IS_AUTOINCREMENT".equals(rsMetaData.getColumnLabel(i))) {
            return true;
        }/*from w  w  w.  j  a  v  a  2  s.co m*/
    }
    return false;
}