Example usage for java.sql ResultSetMetaData getColumnName

List of usage examples for java.sql ResultSetMetaData getColumnName

Introduction

In this page you can find the example usage for java.sql ResultSetMetaData getColumnName.

Prototype

String getColumnName(int column) throws SQLException;

Source Link

Document

Get the designated column's name.

Usage

From source file:com.alibaba.wasp.jdbc.TestJdbcResultSet.java

public void testColumnLabelColumnName() throws SQLException {
    stat = conn.createStatement();/*from w  ww.  j  av  a  2s  . c  om*/
    stat.executeUpdate("Insert into " + TABLE_NAME + "(column1,column2,column3) values (2,1,'binlijin');");
    ResultSet rs = stat.executeQuery("select column3 as y from test where column1=2 and column3='binlijin' ");
    rs.next();
    rs.getString("column3");
    rs.getString("y");
    rs.close();
    rs = conn.getMetaData().getColumns(null, null, null, null);
    ResultSetMetaData meta = rs.getMetaData();
    int columnCount = meta.getColumnCount();
    String[] columnName = new String[columnCount];
    for (int i = 1; i <= columnCount; i++) {
        columnName[i - 1] = meta.getColumnName(i);
    }
    while (rs.next()) {
        for (int i = 0; i < columnCount; i++) {
            rs.getObject(columnName[i]);
        }
    }
}

From source file:com.commander4j.db.JDBUserReport.java

private boolean generateSYSTEM(ResultSet temp) {
    boolean result = true;
    setSystemResultData("");

    try {/*ww  w .  j  a  va  2  s .c om*/
        if (temp.next()) {
            ResultSetMetaData rsmd = temp.getMetaData();
            int colcount = rsmd.getColumnCount();
            boolean dataColumnFound = false;
            for (int x = 1; x <= colcount; x++) {
                if (rsmd.getColumnName(x).toLowerCase().equals("data")) {
                    dataColumnFound = true;
                }
            }
            if (dataColumnFound) {
                setSystemResultData(temp.getString("data"));
                result = true;
            } else {
                setErrorMessage("No field called 'data' returned by query.");
            }

        } else {
            setErrorMessage("No record returned by query.");
        }

    } catch (Exception e) {
        result = false;
        setErrorMessage(e.getMessage());
    }
    return result;
}

From source file:ch.rgw.tools.JdbcLink.java

public boolean dumpTable(BufferedWriter w, String name) throws Exception {
    Stm stm = getStatement();/*from  w  ww.j a  va 2  s  . c  o  m*/
    ResultSet res = stm.query("SELECT * from " + name);
    ResultSetMetaData rm = res.getMetaData();
    int cols = rm.getColumnCount();
    String[] ColNames = new String[cols];
    int[] colTypes = new int[cols];
    w.write("CREATE TABLE " + name + "(");
    for (int i = 0; i < cols; i++) {
        ColNames[i] = rm.getColumnName(i + 1);
        colTypes[i] = rm.getColumnType(i + 1);
        w.write(ColNames[i] + " " + colTypes[i] + ",\n");
    }
    w.write(");");

    while ((res != null) && (res.next() == true)) {
        w.write("INSERT INTO " + name + " (");
        for (int i = 0; i < cols; i++) {
            w.write(ColNames[i]);
            if (i < cols - 1) {
                w.write(",");
            }
        }
        w.write(") VALUES (");
        for (int i = 0; i < cols; i++) {
            Object o = res.getObject(ColNames[i]);
            switch (JdbcLink.generalType(colTypes[i])) {
            case JdbcLink.INTEGRAL:
                if (o == null) {
                    w.write("0");
                } else {
                    w.write(Integer.toString(((Integer) o).intValue()));
                }
                break;
            case JdbcLink.TEXT:
                if (o == null) {
                    w.write(JdbcLink.wrap("null"));
                } else {
                    w.write(JdbcLink.wrap((String) o));
                }
                break;

            default:
                String t = o.getClass().getName();
                log.log("Unknown type " + t, Log.ERRORS);
                throw new Exception("Cant write " + t);

            }
            if (i < cols - 1) {
                w.write(",");
            }
        }
        w.write(");");
        w.newLine();
    }
    res.close();
    releaseStatement(stm);
    return true;
}

From source file:org.agnitas.web.ExportWizardAction.java

/**
 * Creates sql query for getting recipients according to the export definition conditions;
 * gets recipients from database;//from   ww  w  .  j  av  a 2 s. c  o m
 * stores the export result in temporary zip file.
 *
 * @param aForm ExportWizardForm object
 * @param aContext application context
 * @param req HTTP request
 */
protected void collectContent(ExportWizardForm aForm, ApplicationContext aContext, HttpServletRequest req) {
    int companyID = AgnUtils.getAdmin(req).getCompany().getId();
    Locale loc_old = Locale.getDefault();

    aForm.setDbExportStatusMessages(new LinkedList<String>());
    aForm.setDbExportStatus(100);
    aForm.setLinesOK(0);

    Target aTarget = null;
    if (aForm.getTargetID() != 0) {
        aTarget = targetDao.getTarget(aForm.getTargetID(), companyID);
        aForm.setTargetID(aTarget.getId());
    }

    String charset = aForm.getCharset();
    if (charset == null || charset.trim().equals("")) {
        charset = "UTF-8";
        aForm.setCharset(charset); // charset also in form
    }

    StringBuffer usedColumnsString = new StringBuffer();
    int exportStartColumn = 2;
    for (String columnName : aForm.getColumns()) {
        // customer_id is selected by default in the the base sql statement
        if ("customer_id".equalsIgnoreCase(columnName)) {
            // mark customer_id to be exported too
            exportStartColumn = 1;
        } else {
            usedColumnsString.append(", cust." + columnName + " " + columnName);
        }
    }

    if (aForm.getMailinglists() != null) {
        for (int i = 0; i < aForm.getMailinglists().length; i++) {
            String ml = aForm.getMailinglists()[i];
            usedColumnsString.append(", (SELECT m" + ml + ".user_status FROM customer_" + companyID
                    + "_binding_tbl m" + ml + " WHERE m" + ml + ".customer_id = cust.customer_id AND m" + ml
                    + ".mailinglist_id = " + ml + " AND m" + ml + ".mediatype = 0) as Userstate_Mailinglist_"
                    + ml);
            usedColumnsString.append(", (SELECT m" + ml + "." + AgnUtils.changeDateName() + " FROM customer_"
                    + companyID + "_binding_tbl m" + ml + " WHERE m" + ml
                    + ".customer_id = cust.customer_id AND m" + ml + ".mailinglist_id = " + ml + " AND m" + ml
                    + ".mediatype = 0) as Mailinglist_" + ml + "_Timestamp");
        }
    }

    StringBuffer whereString = new StringBuffer("");
    StringBuffer customerTableSql = new StringBuffer("SELECT * FROM (SELECT DISTINCT cust.customer_id"
            + usedColumnsString.toString() + " FROM customer_" + companyID + "_tbl cust");
    if (aForm.getMailinglistID() != -1 && (aForm.getMailinglistID() > 0 || !aForm.getUserType().equals("E")
            || aForm.getUserStatus() != 0)) {
        customerTableSql.append(", customer_" + companyID + "_binding_tbl bind");
        whereString.append(" cust.customer_id = bind.customer_id AND bind.mediatype=0");
    }

    if (aForm.getMailinglistID() > 0) {
        whereString.append(" and bind.mailinglist_id = " + aForm.getMailinglistID());
    }

    if (aForm.getMailinglistID() == NO_MAILINGLIST) {
        whereString.append(" NOT EXISTS (SELECT 1 FROM customer_" + companyID
                + "_binding_tbl bind WHERE cust.customer_id = bind.customer_id) ");
    } else {
        if (!aForm.getUserType().equals("E")) {
            whereString
                    .append(" AND bind.user_type = '" + SafeString.getSQLSafeString(aForm.getUserType()) + "'");
        }

        if (aForm.getUserStatus() != 0) {
            whereString.append(" AND bind.user_status = " + aForm.getUserStatus());
        }
    }

    if (aForm.getTargetID() != 0) {
        if (aForm.getMailinglistID() != 0 || !aForm.getUserType().equals("E") || aForm.getUserStatus() != 0) {
            whereString.append(" AND ");
        }
        whereString.append(" (" + aTarget.getTargetSQL() + ")");
    }

    String datesParametersString = getDatesParametersString(aForm);
    if (!StringUtils.isEmpty(whereString.toString())) {
        whereString.append(" and ");
    }
    whereString.append(datesParametersString);

    if (whereString.length() > 0) {
        customerTableSql.append(" WHERE " + whereString);
    }

    AgnUtils.userlogger().info("Generated export SQL query: " + customerTableSql);

    Connection con = DataSourceUtils.getConnection(dataSource);

    aForm.setCsvFile(null);
    PrintWriter out = null;
    Statement stmt = null;
    ResultSet rset = null;
    try {
        File systemUploadDirectory = AgnUtils.createDirectory(AgnUtils.getDefaultValue("system.upload"));
        File outFile = File.createTempFile("exp" + companyID + "_", ".zip", systemUploadDirectory);
        ZipOutputStream aZip = new ZipOutputStream(new FileOutputStream(outFile));
        AgnUtils.userlogger().info("Export file <" + outFile.getAbsolutePath() + ">");

        stmt = con.createStatement();
        rset = stmt.executeQuery(customerTableSql.toString());

        aZip.putNextEntry(new ZipEntry("emm_export.csv"));
        Locale.setDefault(new Locale("en"));
        out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(aZip, charset)));

        ResultSetMetaData mData = rset.getMetaData();
        int columnCount = mData.getColumnCount();

        // Write CSV-Header line
        for (int i = exportStartColumn; i <= columnCount; i++) {
            if (i > exportStartColumn) {
                out.print(aForm.getSeparator());
            }
            String columnName = mData.getColumnName(i);
            out.print(aForm.getDelimiter() + escapeChars(columnName, aForm.getDelimiter())
                    + aForm.getDelimiter());
        }
        out.print("\n");

        // Write data lines
        while (rset.next()) {
            for (int i = exportStartColumn; i <= columnCount; i++) {
                if (i > exportStartColumn) {
                    out.print(aForm.getSeparator());
                }

                String aValue;
                try {
                    aValue = rset.getString(i);
                } catch (Exception ex) {
                    aValue = null;
                    // Exceptions should not break the export, but should be logged
                    AgnUtils.userlogger().error("Exception in export:collectContent:", ex);
                }

                if (aValue == null) {
                    // null values should be displayed as empty string
                    aValue = "";
                } else {
                    aValue = escapeChars(aValue, aForm.getDelimiter());
                    aValue = aForm.getDelimiter() + aValue + aForm.getDelimiter();
                }
                out.print(aValue);
            }
            out.print("\n");
            aForm.setLinesOK(aForm.getLinesOK() + 1);
        }
        aForm.setCsvFile(outFile);
    } catch (Exception e) {
        AgnUtils.userlogger().error("collectContent: " + e);
        e.printStackTrace();
    } finally {
        if (out != null) {
            out.close();
        }

        if (rset != null) {
            try {
                rset.close();
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }

        if (stmt != null) {
            try {
                stmt.close();
            } catch (SQLException e) {
                e.printStackTrace();
            }
        }

        DataSourceUtils.releaseConnection(con, dataSource);
        aForm.setDbExportStatus(1001);
        Locale.setDefault(loc_old);
    }
}

From source file:org.apache.ddlutils.task.DumpMetadataTask.java

/**
 * Determines the columns that are present in the given result set.
 * /*from w  ww  . jav a2s  .  c o  m*/
 * @param resultSet The result set
 * @return The columns
 */
private Set getColumnsInResultSet(ResultSet resultSet) throws SQLException {
    ListOrderedSet result = new ListOrderedSet();
    ResultSetMetaData metaData = resultSet.getMetaData();

    for (int idx = 1; idx <= metaData.getColumnCount(); idx++) {
        result.add(metaData.getColumnName(idx).toUpperCase());
    }

    return result;
}

From source file:io.druid.indexing.jdbc.JDBCIndexTask.java

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();//w w  w.ja  v a2 s  . c o  m
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema,
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler()
            .addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics),
                    ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));

    BasicDataSource dataSource = new BasicDataSource();
    dataSource.setUsername(ioConfig.getUser());
    dataSource.setPassword(ioConfig.getPassword());
    dataSource.setUrl(ioConfig.getConnectURI());
    dataSource.setDriverClassLoader(getClass().getClassLoader());

    final String table = ioConfig.getTableName();

    if (!StringUtils.isEmpty(ioConfig.getDriverClass())) {
        dataSource.setDriverClassName(ioConfig.getDriverClass());
    }

    final Handle handle = new DBI(dataSource).open();
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
            final AppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics)) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        appenderator = appenderator0;

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getJdbcOffsets().getOffsetMaps());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final JDBCOffsets restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            nextOffsets.putAll(restoredNextPartitions.getOffsetMaps());

            // Sanity checks.
            if (!restoredNextPartitions.getTable().equals(ioConfig.getTableName())) {
                throw new ISE("WTF?! Restored table[%s] but expected table[%s]",
                        restoredNextPartitions.getTable(), ioConfig.getTableName());
            }

            if (!nextOffsets.equals(ioConfig.getJdbcOffsets().getOffsetMaps())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets,
                        ioConfig.getJdbcOffsets().getOffsetMaps());
            }
        }

        // Set up sequenceNames.

        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum,
                    String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {
            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);

                return new Committer() {
                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_OFFSETS,
                                new JDBCOffsets(ioConfig.getJdbcOffsets().getTable(), snapshot));

                    }

                    @Override
                    public void run() {
                        // Do nothing.
                    }
                };
            }
        };

        //      Set<Integer> assignment = assignPartitionsAndSeekToNext(handle);
        //      boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            //        while (stillReading) {
            //          if (possiblyPause(assignment)) {
            //             The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
            //             partitions upon resuming. This is safe even if the end offsets have not been modified.
            //            assignment = assignPartitionsAndSeekToNext(handle);
            //            if (assignment.isEmpty()) {
            //              log.info("All partitions have been fully read");
            //              publishOnStop = true;
            //              stopRequested = true;
            //            }
            //          }
            //          if (stopRequested) {
            //            break;
            //          }

            final String query = (ioConfig.getQuery() != null) ? ioConfig.getQuery()
                    : makeQuery(ioConfig.getColumns(), ioConfig.getJdbcOffsets());
            org.skife.jdbi.v2.Query<Map<String, Object>> dbiQuery = handle.createQuery(query);

            final ResultIterator<InputRow> rowIterator = dbiQuery.map(new ResultSetMapper<InputRow>() {
                List<String> queryColumns = (ioConfig.getColumns() == null) ? Lists.<String>newArrayList()
                        : ioConfig.getColumns();
                List<Boolean> columnIsNumeric = Lists.newArrayList();

                @Override
                public InputRow map(final int index, final ResultSet r, final StatementContext ctx)
                        throws SQLException {
                    try {
                        if (queryColumns.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                queryColumns.add(metadata.getColumnName(idx));
                            }
                            Preconditions.checkArgument(queryColumns.size() > 0,
                                    String.format("No column in table [%s]", table));
                            verifyParserSpec(parser.getParseSpec(), queryColumns);
                        }
                        if (columnIsNumeric.size() == 0) {
                            ResultSetMetaData metadata = r.getMetaData();
                            Preconditions.checkArgument(metadata.getColumnCount() >= queryColumns.size(),
                                    String.format(
                                            "number of column names [%d] exceeds the actual number of returning column values [%d]",
                                            queryColumns.size(), metadata.getColumnCount()));
                            columnIsNumeric.add(false); // dummy to make start index to 1
                            for (int idx = 1; idx <= metadata.getColumnCount(); idx++) {
                                boolean isNumeric = false;
                                int type = metadata.getColumnType(idx);
                                switch (type) {
                                case BIGINT:
                                case DECIMAL:
                                case DOUBLE:
                                case FLOAT:
                                case INTEGER:
                                case NUMERIC:
                                case SMALLINT:
                                case TINYINT:
                                    isNumeric = true;
                                    break;
                                }
                                columnIsNumeric.add(isNumeric);
                            }
                        }
                        final Map<String, Object> columnMap = Maps.newHashMap();
                        int columnIdx = 1;
                        for (String column : queryColumns) {
                            Object objToPut = null;
                            if (table != null) {
                                objToPut = r.getObject(column);
                            } else {
                                objToPut = r.getObject(columnIdx);
                            }
                            columnMap.put(column, objToPut == null ? columnIsNumeric.get(columnIdx) : objToPut);

                            columnIdx++;
                        }
                        return parser.parse(columnMap);

                    } catch (IllegalArgumentException e) {
                        throw new SQLException(e);
                    }
                }
            }).iterator();

            org.skife.jdbi.v2.Query<Map<String, Object>> maxItemQuery = handle
                    .createQuery(makeMaxQuery(ioConfig.getJdbcOffsets()));
            long currOffset = maxItemQuery != null ? (long) maxItemQuery.list(1).get(0).get("MAX") : 0;

            while (rowIterator.hasNext()) {
                InputRow row = rowIterator.next();
                try {
                    if (!ioConfig.getMinimumMessageTime().isPresent()
                            || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {

                        final String sequenceName = sequenceNames.get(nextOffsets.keySet().toArray()[0]); //TODO::: check data
                        final AppenderatorDriverAddResult addResult = driver.add(row, sequenceName,
                                committerSupplier);

                        if (addResult.isOk()) {
                            // If the number of rows in the segment exceeds the threshold after adding a row,
                            // move the segment out from the active segments of AppenderatorDriver to make a new segment.
                            if (addResult.getNumRowsInSegment() > tuningConfig.getMaxRowsPerSegment()) {
                                driver.moveSegmentOut(sequenceName,
                                        ImmutableList.of(addResult.getSegmentIdentifier()));
                            }
                        } else {
                            // Failure to allocate segment puts determinism at risk, bail out to be safe.
                            // May want configurable behavior here at some point.
                            // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                            throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                    row.getTimestamp());
                        }

                        fireDepartmentMetrics.incrementProcessed();
                    } else {
                        fireDepartmentMetrics.incrementThrownAway();
                    }
                } catch (ParseException e) {
                    if (tuningConfig.isReportParseExceptions()) {
                        throw e;
                    } else {
                        log.debug(e, "Dropping unparseable row from row[%d] .", row);

                        fireDepartmentMetrics.incrementUnparseable();
                    }
                }
            }
            nextOffsets.put((int) ioConfig.getJdbcOffsets().getOffsetMaps().keySet().toArray()[0], currOffset);
            //          if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
            //              && assignment.remove(record.partition())) {
            //            log.info("Finished reading table[%s], partition[%,d].", record.topic(), record.partition());
            //            stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
            //          }
            //        }
        } finally {
            driver.persist(committerSupplier.get()); // persist pending data
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        final TransactionalSegmentPublisher publisher = (segments, commitMetadata) -> {

            final JDBCOffsets finalOffsets = toolbox.getObjectMapper()
                    .convertValue(((Map) commitMetadata).get(METADATA_NEXT_OFFSETS), JDBCOffsets.class);
            // Sanity check, we should only be publishing things that match our desired end state. //TODO::: Santiny Check!
            //        if (!endOffsets.equals(finalOffsets.getOffsetMaps())) {
            //          throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
            //        }

            final SegmentTransactionalInsertAction action;

            if (ioConfig.isUseTransaction()) {
                action = new SegmentTransactionalInsertAction(segments,
                        new JDBCDataSourceMetadata(ioConfig.getJdbcOffsets()),
                        new JDBCDataSourceMetadata(finalOffsets) //TODO::: Check Values
                );
            } else {
                action = new SegmentTransactionalInsertAction(segments, null, null);
            }

            log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());

            return toolbox.getTaskActionClient().submit(action).isSuccess();
        };

        // Supervised kafka tasks are killed by JDBCSupervisor if they are stuck during publishing segments or waiting
        // for hand off. See JDBCSupervisorIOConfig.completionTimeout.
        final SegmentsAndMetadata published = driver
                .publish(publisher, committerSupplier.get(), sequenceNames.values()).get();

        final SegmentsAndMetadata handedOff;
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOff = driver.registerHandoff(published).get();
        } else {
            handedOff = driver.registerHandoff(published).get(tuningConfig.getHandoffConditionTimeout(),
                    TimeUnit.MILLISECONDS);
        }

        if (handedOff == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ")
                    .join(Iterables.transform(handedOff.getSegments(), new Function<DataSegment, String>() {
                        @Override
                        public String apply(DataSegment input) {
                            return input.getIdentifier();
                        }
                    })), handedOff.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } finally

    {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
        handle.close();
    }

    toolbox.getDataSegmentServerAnnouncer().unannounce();

    //TODO::implement
    return success();

}

From source file:mondrian.spi.impl.JdbcDialectImpl.java

void logTypeInfo(ResultSetMetaData metaData, int columnIndex, SqlStatement.Type internalType)
        throws SQLException {
    if (LOGGER.isDebugEnabled()) {
        final int columnType = metaData.getColumnType(columnIndex + 1);
        final int precision = metaData.getPrecision(columnIndex + 1);
        final int scale = metaData.getScale(columnIndex + 1);
        final String columnName = metaData.getColumnName(columnIndex + 1);
        LOGGER.debug("JdbcDialectImpl.getType " + "Dialect- " + this.getDatabaseProduct() + ", Column-"
                + columnName + " is of internal type " + internalType + ". JDBC type was " + columnType
                + ".  Column precision=" + precision + ".  Column scale=" + scale);
    }//from ww w  .  j a  v a  2  s  .  c o m
}

From source file:net.hydromatic.optiq.test.JdbcTest.java

/** Tests driver's implementation of {@link DatabaseMetaData#getColumns}. */
@Test//w w w. j a v a  2s  .c o  m
public void testResultSetMetaData() throws ClassNotFoundException, SQLException {
    Connection connection = getConnection("hr", "foodmart");
    Statement statement = connection.createStatement();
    ResultSet resultSet = statement
            .executeQuery("select \"empid\", \"deptno\" as x, 1 as y\n" + "from \"hr\".\"emps\"");
    ResultSetMetaData metaData = resultSet.getMetaData();
    assertEquals(3, metaData.getColumnCount());
    assertEquals("empid", metaData.getColumnLabel(1));
    assertEquals("empid", metaData.getColumnName(1));
    assertEquals("emps", metaData.getTableName(1));
    assertEquals("X", metaData.getColumnLabel(2));
    assertEquals("deptno", metaData.getColumnName(2));
    assertEquals("emps", metaData.getTableName(2));
    assertEquals("Y", metaData.getColumnLabel(3));
    assertEquals("Y", metaData.getColumnName(3));
    assertEquals(null, metaData.getTableName(3));
    resultSet.close();
    connection.close();
}

From source file:com.mapd.utility.SQLImporter.java

private void checkMapDTable(ResultSetMetaData md) throws SQLException {
    createMapDConnection();//from   ww  w  .j  a  va  2 s  .co m
    String tName = cmd.getOptionValue("targetTable");

    if (tableExists(tName)) {
        // check if we want to truncate
        if (cmd.hasOption("truncate")) {
            executeMapDCommand("Drop table " + tName);
            createMapDTable(md);
        } else {
            List<TColumnType> columnInfo = getColumnInfo(tName);
            // table exists lets check it has same number of columns
            if (md.getColumnCount() != columnInfo.size()) {
                LOGGER.error("Table sizes do not match - Mapd " + columnInfo.size() + " versus Select "
                        + md.getColumnCount());
                exit(1);
            }
            // table exists lets check it is same layout - check names will do for now
            for (int colNum = 1; colNum <= columnInfo.size(); colNum++) {
                if (!columnInfo.get(colNum - 1).col_name.equalsIgnoreCase(md.getColumnName(colNum))) {
                    LOGGER.error("MapD Table does not have matching column in same order for column number"
                            + colNum + " MapD column name is " + columnInfo.get(colNum - 1).col_name
                            + " versus Select " + md.getColumnName(colNum));
                    exit(1);
                }
            }
        }
    } else {
        createMapDTable(md);
    }
}

From source file:org.apache.hadoop.chukwa.analysis.salsa.visualization.Heatmap.java

/**
 * Interfaces with database to get data and 
 * populate data structures for rendering
 *///from   w  w  w  .  j  a  v a  2s. c o  m
public HeatmapData getData() {
    // preliminary setup
    OfflineTimeHandler time_offline;
    TimeHandler time_online;
    long start, end, min, max;

    if (offline_use) {
        time_offline = new OfflineTimeHandler(param_map, this.timezone);
        start = time_offline.getStartTime();
        end = time_offline.getEndTime();
    } else {
        time_online = new TimeHandler(this.request, this.timezone);
        start = time_online.getStartTime();
        end = time_online.getEndTime();
    }

    DatabaseWriter dbw = new DatabaseWriter(this.cluster);

    // setup query
    String query;
    if (this.query_state != null && this.query_state.equals("read")) {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table
                + "] where finish_time between '[start]' and '[end]' and (state_name like 'read_local' or state_name like 'read_remote')";
    } else if (this.query_state != null && this.query_state.equals("write")) {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table
                + "] where finish_time between '[start]' and '[end]' and (state_name like 'write_local' or state_name like 'write_remote' or state_name like 'write_replicated')";
    } else {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table + "] where finish_time between '[start]' and '[end]' and state_name like '"
                + query_state + "'";
    }
    Macro mp = new Macro(start, end, query);
    query = mp.toString() + " order by start_time";

    ArrayList<HashMap<String, Object>> events = new ArrayList<HashMap<String, Object>>();

    ResultSet rs = null;

    log.debug("Query: " + query);
    // run query, extract results
    try {
        rs = dbw.query(query);
        ResultSetMetaData rmeta = rs.getMetaData();
        int col = rmeta.getColumnCount();
        while (rs.next()) {
            HashMap<String, Object> event = new HashMap<String, Object>();
            long event_time = 0;
            for (int i = 1; i <= col; i++) {
                if (rmeta.getColumnType(i) == java.sql.Types.TIMESTAMP) {
                    event.put(rmeta.getColumnName(i), rs.getTimestamp(i).getTime());
                } else {
                    event.put(rmeta.getColumnName(i), rs.getString(i));
                }
            }
            events.add(event);
        }
    } catch (SQLException ex) {
        // handle any errors
        log.error("SQLException: " + ex.getMessage());
        log.error("SQLState: " + ex.getSQLState());
        log.error("VendorError: " + ex.getErrorCode());
    } finally {
        dbw.close();
    }
    SimpleDateFormat format = new SimpleDateFormat("MMM dd yyyy HH:mm:ss");

    log.info(events.size() + " results returned.");

    HashSet<String> host_set = new HashSet<String>();
    HashMap<String, Integer> host_indices = new HashMap<String, Integer>();
    HashMap<Integer, String> host_rev_indices = new HashMap<Integer, String>();

    // collect hosts, name unique hosts
    for (int i = 0; i < events.size(); i++) {
        HashMap<String, Object> event = events.get(i);
        String curr_host = (String) event.get("hostname");
        String other_host = (String) event.get("other_host");
        host_set.add(curr_host);
        host_set.add(other_host);
    }
    int num_hosts = host_set.size();

    Iterator<String> host_iter = host_set.iterator();
    for (int i = 0; i < num_hosts && host_iter.hasNext(); i++) {
        String curr_host = host_iter.next();
        host_indices.put(curr_host, new Integer(i));
        host_rev_indices.put(new Integer(i), curr_host);
    }

    System.out.println("Number of hosts: " + num_hosts);
    long stats[][] = new long[num_hosts][num_hosts];
    long count[][] = new long[num_hosts][num_hosts]; // used for averaging

    int start_millis = 0, end_millis = 0;

    // deliberate design choice to duplicate code PER possible operation
    // otherwise we have to do the mode check N times, for N states returned
    //
    // compute aggregate statistics
    log.info("Query statistic type: " + this.query_stat_type);
    if (this.query_stat_type.equals("transaction_count")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            // to, from
            stats[other_host_idx][this_host_idx] += 1;
        }
    } else if (this.query_stat_type.equals("avg_duration")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = end_millis - start_millis + ((end - start) * 1000);

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
            count[other_host_idx][this_host_idx] += 1;
        }
        for (int i = 0; i < num_hosts; i++) {
            for (int j = 0; j < num_hosts; j++) {
                if (count[i][j] > 0)
                    stats[i][j] = stats[i][j] / count[i][j];
            }
        }
    } else if (this.query_stat_type.equals("avg_volume")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = Long.parseLong((String) event.get("bytes"));

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
            count[other_host_idx][this_host_idx] += 1;
        }
        for (int i = 0; i < num_hosts; i++) {
            for (int j = 0; j < num_hosts; j++) {
                if (count[i][j] > 0)
                    stats[i][j] = stats[i][j] / count[i][j];
            }
        }
    } else if (this.query_stat_type.equals("total_duration")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            double curr_val = end_millis - start_millis + ((end - start) * 1000);

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
        }
    } else if (this.query_stat_type.equals("total_volume")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = Long.parseLong((String) event.get("bytes"));

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
        }
    }

    int[] permute = null;
    if (sort_nodes) {
        permute = hClust(stats);
        stats = doPermute(stats, permute);
    }

    Table agg_tab = new Table();
    agg_tab.addColumn("stat", long.class);
    min = Long.MAX_VALUE;
    max = Long.MIN_VALUE;
    agg_tab.addRows(num_hosts * num_hosts);

    // row-wise placement (row1, followed by row2, etc.)
    for (int i = 0; i < num_hosts; i++) {
        for (int j = 0; j < num_hosts; j++) {
            agg_tab.setLong((i * num_hosts) + j, "stat", stats[i][j]);
            if (stats[i][j] > max)
                max = stats[i][j];
            if (stats[i][j] > 0 && stats[i][j] < min)
                min = stats[i][j];
        }
    }
    if (min == Long.MAX_VALUE)
        min = 0;

    log.info(agg_tab);

    // collate data
    HeatmapData hd = new HeatmapData();
    hd.stats = new long[num_hosts][num_hosts];
    hd.stats = stats;
    hd.min = min;
    hd.max = max;
    hd.num_hosts = num_hosts;
    hd.agg_tab = agg_tab;

    this.add_info_extra = new String("\nState: " + this.prettyStateNames.get(this.query_state) + " ("
            + events.size() + " " + this.query_state + "'s [" + this.query_stat_type + "])\n"
            + "Plotted value range: [" + hd.min + "," + hd.max + "] (Zeros in black)");

    hd.hostnames = new String[num_hosts];
    for (int i = 0; i < num_hosts; i++) {
        String curr_host = host_rev_indices.get(new Integer(permute[i]));
        if (sort_nodes) {
            hd.hostnames[i] = new String(curr_host);
        } else {
            hd.hostnames[i] = new String(curr_host);
        }
    }

    return hd;
}