Example usage for java.sql PreparedStatement setNull

List of usage examples for java.sql PreparedStatement setNull

Introduction

In this page you can find the example usage for java.sql PreparedStatement setNull.

Prototype

void setNull(int parameterIndex, int sqlType) throws SQLException;

Source Link

Document

Sets the designated parameter to SQL NULL.

Usage

From source file:org.apache.ode.bpel.extvar.jdbc.JdbcExternalVariableModule.java

int execUpdate(DbExternalVariable dbev, RowKey key, RowVal values) throws SQLException {
    Connection conn = dbev.dataSource.getConnection();
    PreparedStatement stmt = null;
    try {/*from   w  ww  .  j  a v a2  s .  co m*/
        if (__log.isDebugEnabled()) {
            __log.debug("execUpdate: key=" + key + " values=" + values);
            __log.debug("Prepare statement: " + dbev.update);
        }
        stmt = conn.prepareStatement(dbev.update);
        int idx = 1;
        for (Column c : dbev._updcolumns) {
            Object val = values.get(c.name);
            if (__log.isDebugEnabled())
                __log.debug("Set value parameter " + idx + ": " + val);
            if (val == null)
                stmt.setNull(idx, c.dataType);
            else
                stmt.setObject(idx, downcastValue(val, c.dataType));
            idx++;
        }

        for (Column ck : dbev._keycolumns) {
            Object val = key.get(ck.name);
            if (__log.isDebugEnabled())
                __log.debug("Set key parameter " + idx + ": " + val);
            if (val == null)
                stmt.setNull(idx, ck.dataType);
            else
                stmt.setObject(idx, downcastValue(val, ck.dataType));
            idx++;
        }
        return stmt.executeUpdate();
    } finally {
        if (stmt != null)
            stmt.close();
        try {
            conn.close();
        } catch (SQLException e) {
            // ignore
        }
    }
}

From source file:org.ojbc.adapters.analyticaldatastore.dao.AnalyticalDatastoreDAOImpl.java

@Override
public Integer saveIncident(final Incident inboundIncident) {
    log.debug("Inserting row into Incident table: " + inboundIncident.toString());

    KeyHolder keyHolder = new GeneratedKeyHolder();
    jdbcTemplate.update(new PreparedStatementCreator() {
        public PreparedStatement createPreparedStatement(Connection connection) throws SQLException {

            String incidentInsertStatement = "";
            String[] insertArgs = null;

            if (inboundIncident.getIncidentID() == null) {
                incidentInsertStatement = "INSERT into INCIDENT (ReportingAgencyID, IncidentCaseNumber,"
                        + "IncidentLocationLatitude, IncidentLocationLongitude, IncidentLocationStreetAddress,IncidentLocationTown,IncidentDate,IncidentTime,ReportingSystem,RecordType) values (?,?,?,?,?,?,?,?,?,?)";

                insertArgs = new String[] { "ReportingAgencyID",
                        "IncidentCaseNumber" + "IncidentLocationLatitude", "IncidentLocationLongitude",
                        "IncidentLocationStreetAddress", "IncidentLocationTown", "IncidentDate", "IncidentTime",
                        "ReportingSystem", "RecordType" };
            } else {
                incidentInsertStatement = "INSERT into INCIDENT (ReportingAgencyID, IncidentCaseNumber,"
                        + "IncidentLocationLatitude, IncidentLocationLongitude, IncidentLocationStreetAddress,IncidentLocationTown,IncidentDate,IncidentTime,ReportingSystem,RecordType, IncidentID) values (?,?,?,?,?,?,?,?,?,?,?)";

                insertArgs = new String[] { "ReportingAgencyID",
                        "IncidentCaseNumber" + "IncidentLocationLatitude", "IncidentLocationLongitude",
                        "IncidentLocationStreetAddress", "IncidentLocationTown", "IncidentDate", "IncidentTime",
                        "ReportingSystem", "RecordType", "IncidentID" };
            }//from   w  w w  .j av a2  s. c o m

            PreparedStatement ps = connection.prepareStatement(incidentInsertStatement, insertArgs);
            if (inboundIncident.getReportingAgencyID() != null) {
                ps.setInt(1, inboundIncident.getReportingAgencyID());
            } else {
                ps.setNull(1, java.sql.Types.NULL);
            }

            ps.setString(2, inboundIncident.getIncidentCaseNumber());
            ps.setBigDecimal(3, inboundIncident.getIncidentLocationLatitude());
            ps.setBigDecimal(4, inboundIncident.getIncidentLocationLongitude());
            ps.setString(5, inboundIncident.getIncidentLocationStreetAddress());
            ps.setString(6, inboundIncident.getIncidentLocationTown());
            ps.setDate(7, new java.sql.Date(inboundIncident.getIncidentDate().getTime()));
            ps.setTime(8, new java.sql.Time(inboundIncident.getIncidentDate().getTime()));
            ps.setString(9, inboundIncident.getReportingSystem());
            ps.setString(10, String.valueOf(inboundIncident.getRecordType()));

            if (inboundIncident.getIncidentID() != null) {
                ps.setInt(11, inboundIncident.getIncidentID());
            }

            return ps;
        }
    }, keyHolder);

    Integer returnValue = null;

    if (inboundIncident.getIncidentID() != null) {
        returnValue = inboundIncident.getIncidentID();
    } else {
        returnValue = keyHolder.getKey().intValue();
    }

    return returnValue;
}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.DeleteMetadataAlterHelper.java

protected void doInPreparedStatement(PreparedStatement ps, String metaModelCode, boolean hasChildMetaModel,
        List<AbstractMetadata> metadatas) throws SQLException {
    try {/*  w w  w.java  2s .  c o m*/
        for (AbstractMetadata metadata : metadatas) {
            // ?ID
            String sequenceId = sequenceDao.getUuid();
            ps.setString(1, sequenceId);
            // ?,1
            ps.setString(2, "1");
            // ID
            ps.setString(3, taskInstanceId);
            // // ?ID
            // ps.setString(4, metadata.getId());
            // // 
            // ps.setString(5, metaModelCode);
            // ID
            ps.setString(4, userId);

            // START_TIME?START_TIME
            ps.setLong(5, metadata.getStartTime());
            // : ALTERATION_TIME
            ps.setLong(6, startTime);

            // OLD_START_TIME ???OLD_START_TIME??
            ps.setNull(7, java.sql.Types.BIGINT);
            // ?ID
            ps.setString(8, metadata.getId());

            ps.addBatch();
            ps.clearParameters();

            if (++super.count % super.batchSize == 0) {
                ps.executeBatch();
                ps.clearBatch();
            }

        }
    } catch (SQLException e) {
        // ??,????,,??
        log.warn("??!", e);
    }

}

From source file:org.geowebcache.storage.jdbc.jobstore.JDBCJobWrapper.java

public void putJobLog(JobLogObject stObj) throws SQLException, StorageException {

    // Not really a fan of tacking log_ onto the front of every field, but it ensures
    // common keywords like log, level, time, summary, text won't clash with database 
    // keywords, causing unnecessary pain.
    String query = "MERGE INTO " + "JOB_LOGS(job_log_id, job_id, log_level, log_time, log_summary, log_text) "
            + "KEY(job_log_id) " + "VALUES(?,?,?,?,?,?)";

    final Connection conn = getConnection();

    try {/*  w  ww  .  j  a  v  a 2 s. c o  m*/
        Long insertId;
        PreparedStatement prep = conn.prepareStatement(query, Statement.RETURN_GENERATED_KEYS);
        try {
            if (stObj.getJobLogId() == -1) {
                prep.setNull(1, java.sql.Types.BIGINT);
            } else {
                prep.setLong(1, stObj.getJobLogId());
            }

            prep.setLong(2, stObj.getJobId());
            prep.setString(3, stObj.getLogLevel().name());
            prep.setTimestamp(4, stObj.getLogTime());
            prep.setString(5, stObj.getLogSummary());

            Reader reader = (Reader) new BufferedReader(new StringReader(stObj.getLogText()));
            prep.setCharacterStream(6, reader, stObj.getLogText().length());

            insertId = wrappedInsert(prep);
        } finally {
            close(prep);
        }
        if (insertId == null) {
            log.error("Did not receive an id for " + query);
        } else {
            if (stObj.getJobLogId() == -1) {
                // only use the inserted id if we were doing an insert.
                // what insertid will be if we weren't doing an insert is not defined.
                stObj.setJobLogId(insertId.longValue());
            }
        }

    } finally {
        conn.close();
    }

}

From source file:com.flexive.ejb.beans.workflow.StepDefinitionEngineBean.java

/**
 * {@inheritDoc}/* w w  w . j  ava  2s.  c o  m*/
 */
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public void update(StepDefinition stepDefinition) throws FxApplicationException {

    final UserTicket ticket = FxContext.getUserTicket();
    final FxContext ri = FxContext.get();

    // Security checks
    FxPermissionUtils.checkRole(ticket, Role.WorkflowManagement);

    StepDefinition orgDefinition;
    try {
        // Lookup the stepDefinition, throws FxNotFoundException (+FxDbException)
        orgDefinition = getEnvironment().getStepDefinition(stepDefinition.getId());
    } catch (Exception exc) {
        throw new FxUpdateException(exc.getMessage(), exc);
    }

    // Unique target checks
    boolean uniqueTargetAdded = stepDefinition.getId() != -1 && stepDefinition.getUniqueTargetId() != -1
            && stepDefinition.getUniqueTargetId() != orgDefinition.getUniqueTargetId();

    // Check the unique target, throws FxInvalidParameterException if target is invalid (+FxDbException)
    if (stepDefinition.getUniqueTargetId() != -1) {
        checkValidUniqueTarget(stepDefinition.getId(), stepDefinition.getUniqueTargetId());
    }

    // Sanity checks
    if (stepDefinition.getLabel() == null || stepDefinition.getLabel().isEmpty()) {
        throw new FxInvalidParameterException("NAME", "ex.stepdefinition.name.empty");
    }

    Connection con = null;
    PreparedStatement stmt = null;
    String sql;
    boolean success = false;
    try {

        // Obtain a database connection
        con = Database.getDbConnection();

        // store label
        Database.storeFxString(stepDefinition.getLabel(), con, TBL_WORKFLOW_STEPDEFINITION, "name", "id",
                stepDefinition.getId());

        sql = "UPDATE " + TBL_WORKFLOW_STEPDEFINITION + " SET NAME=?,UNIQUE_TARGET=? WHERE ID=?";
        stmt = con.prepareStatement(sql);
        stmt.setString(1, stepDefinition.getName());
        if (stepDefinition.getUniqueTargetId() != -1) {
            stmt.setLong(2, stepDefinition.getUniqueTargetId());
        } else {
            stmt.setNull(2, Types.NUMERIC);
        }
        stmt.setLong(3, stepDefinition.getId());

        int updateCount = stmt.executeUpdate();

        if (updateCount == 0) {
            FxNotFoundException nfe = new FxNotFoundException("ex.stepdefinition.load.notFound",
                    stepDefinition.getId());
            if (LOG.isInfoEnabled())
                LOG.info(nfe);
            throw nfe;
        } else if (updateCount != 1) {
            FxUpdateException dbe = new FxUpdateException("ex.stepdefinition.update.rows");
            LOG.error(dbe);
            throw dbe;
        }

        // Unique target has to exist for every workflow
        long workflowId;
        if (uniqueTargetAdded) {
            try {
                ri.runAsSystem();
                List<StepDefinition> stepDefinitionList = new ArrayList<StepDefinition>();
                stepDefinitionList.add(stepDefinition);
                // Do this for all existing workflows ..
                for (Workflow workflow : getEnvironment().getWorkflows()) {
                    workflowId = workflow.getId();
                    if (FxSharedUtils.getUsedStepDefinitions(workflow.getSteps(), stepDefinitionList)
                            .size() > 0) {
                        // create step IF the step definition is used by the workflow
                        stepEngine.createStep(
                                new Step(-1, stepDefinition.getUniqueTargetId(), workflowId, getEnvironment()
                                        .getStepByDefinition(workflowId, stepDefinition.getId()).getAclId()));
                    }
                }
            } catch (Exception exc) {
                throw new FxUpdateException(LOG, "ex.stepdefinition.uniqueTarget.create");
            } finally {
                ri.stopRunAsSystem();
            }
        }
        success = true;
    } catch (SQLException exc) {
        if (StorageManager.isUniqueConstraintViolation(exc)) {
            FxEntryExistsException ee = new FxEntryExistsException("ex.stepdefinition.name.exists",
                    stepDefinition.getName());
            if (LOG.isDebugEnabled())
                LOG.debug(ee);
            throw ee;
        }
        throw new FxUpdateException(LOG, exc, "ex.db.sqlError", exc.getMessage());
    } finally {
        Database.closeObjects(StepDefinitionEngineBean.class, con, stmt);
        if (!success) {
            EJBUtils.rollback(ctx);
        } else {
            StructureLoader.reloadWorkflows(FxContext.get().getDivisionId());
        }
    }
}

From source file:org.globus.workspace.scheduler.defaults.DefaultSchedulerAdapterDB.java

void addNodeRequest(NodeRequest req, String coschedid) throws WorkspaceDatabaseException {

    if (lager.traceLog) {
        logger.trace("addNodeRequest(): " + Lager.ensembleid(coschedid));
    }//from   ww  w  .j a  va  2s .c o  m

    String assocString = null;
    final String[] assocs = req.getNeededAssociations();
    if (assocs != null && assocs.length > 0) {
        StringBuffer buf = new StringBuffer(256);
        buf.append(assocs[0]);
        for (int i = 1; i < assocs.length; i++) {
            buf.append(",").append(assocs[i]);
        }
        assocString = buf.toString();
    }

    // this nastily loops because failure situation to scheduler will only
    // send vmids and scheduler needs to be able to back out ensemble
    // pieces one by one but keep others intact (unless entire ensemble is
    // destroyed).  won't have to loop like this in the future when inner
    // abstractions are themselves unlooped more.

    Connection c = null;
    PreparedStatement pstmt = null;
    try {
        c = getConnection();
        c.setAutoCommit(false);

        for (int i = 0; i < req.getIds().length; i++) {

            pstmt = c.prepareStatement(DefaultSchedulerConstants.SQL_INSERT_NODE_REQUEST);

            pstmt.setString(1, coschedid);

            if (req.getGroupid() != null) {
                pstmt.setString(2, req.getGroupid());
            } else {
                pstmt.setNull(2, Types.VARCHAR);
            }

            pstmt.setInt(3, req.getIds()[i]);

            pstmt.setInt(4, req.getDuration());

            pstmt.setInt(5, req.getMemory());

            if (assocString != null) {
                pstmt.setString(6, assocString);
            } else {
                pstmt.setNull(6, Types.VARCHAR);
            }

            pstmt.executeUpdate();
            pstmt.close();
            pstmt = null;

        }

        c.commit();

    } catch (SQLException e) {
        logger.error("", e);
        throw new WorkspaceDatabaseException(e);
    } finally {
        try {
            if (pstmt != null) {
                pstmt.close();
            }
            if (c != null) {
                c.setAutoCommit(true);
                returnConnection(c);
            }
        } catch (SQLException sql) {
            logger.error("SQLException in finally cleanup", sql);
        }
    }
}

From source file:org.apache.kylin.common.persistence.JDBCResourceDAO.java

public void checkAndPutResource(final String resPath, final byte[] content, final long oldTS, final long newTS)
        throws SQLException, WriteConflictException {
    logger.trace(/*w  w w . j a v  a 2 s .  c om*/
            "execute checkAndPutResource method. resPath : {} , oldTs : {} , newTs : {} , content null ? : {} ",
            resPath, oldTS, newTS, content == null);
    executeSql(new SqlOperation() {
        @Override
        public void execute(Connection connection) throws SQLException {
            synchronized (resPath.intern()) {
                String tableName = getMetaTableName(resPath);
                if (!existResource(resPath)) {
                    if (oldTS != 0) {
                        throw new IllegalStateException(
                                "For not exist file. OldTS have to be 0. but Actual oldTS is : " + oldTS);
                    }
                    if (isContentOverflow(content, resPath)) {
                        logger.debug("Overflow! resource path: {}, content size: {}", resPath, content.length);
                        pstat = connection.prepareStatement(getInsertSqlWithoutContent(tableName));
                        pstat.setString(1, resPath);
                        pstat.setLong(2, newTS);
                        writeLargeCellToHdfs(resPath, content);
                        try {
                            int result = pstat.executeUpdate();
                            if (result != 1)
                                throw new SQLException();
                        } catch (SQLException e) {
                            rollbackLargeCellFromHdfs(resPath);
                            throw e;
                        }
                    } else {
                        pstat = connection.prepareStatement(getInsertSql(tableName));
                        pstat.setString(1, resPath);
                        pstat.setLong(2, newTS);
                        pstat.setBlob(3, new BufferedInputStream(new ByteArrayInputStream(content)));
                        pstat.executeUpdate();
                    }
                } else {
                    // Note the checkAndPut trick:
                    // update {0} set {1}=? where {2}=? and {3}=?
                    pstat = connection.prepareStatement(getUpdateSqlWithoutContent(tableName));
                    pstat.setLong(1, newTS);
                    pstat.setString(2, resPath);
                    pstat.setLong(3, oldTS);
                    int result = pstat.executeUpdate();
                    if (result != 1) {
                        long realTime = getResourceTimestamp(resPath);
                        throw new WriteConflictException("Overwriting conflict " + resPath + ", expect old TS "
                                + oldTS + ", but it is " + realTime);
                    }
                    PreparedStatement pstat2 = null;
                    try {
                        // "update {0} set {1}=? where {3}=?"
                        pstat2 = connection.prepareStatement(getUpdateContentSql(tableName));
                        if (isContentOverflow(content, resPath)) {
                            logger.debug("Overflow! resource path: {}, content size: {}", resPath,
                                    content.length);
                            pstat2.setNull(1, Types.BLOB);
                            pstat2.setString(2, resPath);
                            writeLargeCellToHdfs(resPath, content);
                            try {
                                int result2 = pstat2.executeUpdate();
                                if (result2 != 1)
                                    throw new SQLException();
                            } catch (SQLException e) {
                                rollbackLargeCellFromHdfs(resPath);
                                throw e;
                            }
                            cleanOldLargeCellFromHdfs(resPath);
                        } else {
                            pstat2.setBinaryStream(1,
                                    new BufferedInputStream(new ByteArrayInputStream(content)));
                            pstat2.setString(2, resPath);
                            pstat2.executeUpdate();
                        }
                    } finally {
                        JDBCConnectionManager.closeQuietly(pstat2);
                    }
                }
            }
        }
    });
}

From source file:org.geowebcache.storage.jdbc.jobstore.JDBCJobWrapper.java

public void putJob(JobObject stObj) throws SQLException, StorageException {

    String query = "MERGE INTO " + "JOBS(job_id, layer_name, state, time_spent, time_remaining, tiles_done, "
            + "tiles_total, failed_tile_count, bounds, gridset_id, srs, thread_count, "
            + "zoom_start, zoom_stop, format, job_type, throughput, max_throughput, "
            + "priority, schedule, run_once, spawned_by, filter_update, parameters, "
            + "time_first_start, time_latest_start, time_finish) " + "KEY(job_id) "
            + "VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)";

    final Connection conn = getConnection();

    try {/* w  w w. j av a2  s . c o  m*/
        Long insertId;
        PreparedStatement prep = conn.prepareStatement(query, Statement.RETURN_GENERATED_KEYS);
        try {
            if (stObj.getJobId() == -1) {
                prep.setNull(1, java.sql.Types.BIGINT);
            } else {
                prep.setLong(1, stObj.getJobId());
            }

            prep.setString(2, stObj.getLayerName());
            prep.setString(3, stObj.getState().name());
            prep.setLong(4, stObj.getTimeSpent());
            prep.setLong(5, stObj.getTimeRemaining());
            prep.setLong(6, stObj.getTilesDone());
            prep.setLong(7, stObj.getTilesTotal());
            prep.setLong(8, stObj.getFailedTileCount());

            prep.setString(9, stObj.getBounds().toString());
            prep.setString(10, stObj.getGridSetId());
            prep.setInt(11, stObj.getSrs().getNumber());

            prep.setInt(12, stObj.getThreadCount());
            prep.setInt(13, stObj.getZoomStart());
            prep.setInt(14, stObj.getZoomStop());
            prep.setString(15, stObj.getFormat());
            prep.setString(16, stObj.getJobType().name());
            prep.setFloat(17, stObj.getThroughput());
            prep.setInt(18, stObj.getMaxThroughput());
            prep.setString(19, stObj.getPriority().name());
            prep.setString(20, stObj.getSchedule());
            prep.setBoolean(21, stObj.isRunOnce());
            prep.setLong(22, stObj.getSpawnedBy());
            prep.setBoolean(23, stObj.isFilterUpdate());
            prep.setString(24, stObj.getEncodedParameters());

            prep.setTimestamp(25, stObj.getTimeFirstStart());
            prep.setTimestamp(26, stObj.getTimeLatestStart());
            prep.setTimestamp(27, stObj.getTimeFinish());

            insertId = wrappedInsert(prep);
        } finally {
            close(prep);
        }
        if (insertId == null) {
            log.error("Did not receive an id for " + query);
        } else {
            if (stObj.getJobId() == -1) {
                // only use the inserted id if we were doing an insert.
                // what insertid will be if we weren't doing an insert is not defined.
                stObj.setJobId(insertId.longValue());
            }
        }

        putRecentJobLogs(stObj, conn);

    } finally {
        conn.close();
    }
}

From source file:org.apache.phoenix.mapreduce.index.IndexScrutinyMapper.java

private Map<String, Pair<Long, List<Object>>> buildTargetStatement(PreparedStatement targetStatement)
        throws SQLException {
    Map<String, Pair<Long, List<Object>>> targetPkToSourceValues = new HashMap<>(currentBatchValues.size());
    int rsIndex = 1;
    for (Pair<Long, List<Object>> batchTsRow : currentBatchValues) {
        List<Object> batchRow = batchTsRow.getSecond();
        // our original query against the source table (which provided the batchRow) projected
        // with the data table PK cols first, so the first numTargetPkCols form the PK
        String targetPkHash = getPkHash(batchRow.subList(0, numTargetPkCols));
        targetPkToSourceValues.put(targetPkHash, batchTsRow);
        for (int i = 0; i < numTargetPkCols; i++) {
            ColumnInfo targetPkInfo = targetTblColumnMetadata.get(i);
            Object value = batchRow.get(i);
            if (value == null) {
                targetStatement.setNull(rsIndex++, targetPkInfo.getSqlType());
            } else {
                targetStatement.setObject(rsIndex++, value, targetPkInfo.getSqlType());
            }//w  w  w . j a  v a2s  .  com
        }
    }
    return targetPkToSourceValues;
}

From source file:org.ojbc.adapters.analyticaldatastore.dao.AnalyticalDatastoreDAOImpl.java

@Override
public Integer savePerson(final Person person) {
    log.debug("Inserting row into Person table");

    final String personStatement = "INSERT into Person (PersonSexID, PersonRaceID, PersonBirthDate, PersonUniqueIdentifier) values (?,?,?,?)";

    KeyHolder keyHolder = new GeneratedKeyHolder();
    jdbcTemplate.update(new PreparedStatementCreator() {
        public PreparedStatement createPreparedStatement(Connection connection) throws SQLException {
            PreparedStatement ps = connection.prepareStatement(personStatement, new String[] { "PersonSexID",
                    "PersonRaceID", "PersonBirthDate", "PersonUniqueIdentifier" });

            if (person.getPersonSexID() != null) {
                ps.setInt(1, person.getPersonSexID());
            } else {
                ps.setNull(1, java.sql.Types.NULL);
            }//from  ww w . jav a2 s  .  co m

            if (person.getPersonRaceID() != null) {
                ps.setInt(2, person.getPersonRaceID());
            } else {
                ps.setNull(2, java.sql.Types.NULL);
            }

            if (person.getPersonBirthDate() != null) {
                ps.setDate(3, new java.sql.Date(person.getPersonBirthDate().getTime()));
            } else {
                ps.setNull(3, java.sql.Types.NULL);
            }

            ps.setString(4, String.valueOf(person.getPersonUniqueIdentifier()));

            return ps;
        }
    }, keyHolder);

    return keyHolder.getKey().intValue();
}