Example usage for java.sql PreparedStatement setLong

List of usage examples for java.sql PreparedStatement setLong

Introduction

In this page you can find the example usage for java.sql PreparedStatement setLong.

Prototype

void setLong(int parameterIndex, long x) throws SQLException;

Source Link

Document

Sets the designated parameter to the given Java long value.

Usage

From source file:com.nextep.datadesigner.vcs.services.VCSFiles.java

private void generateOracleFile(Connection conn, IRepositoryFile file, String path) throws SQLException {
    PreparedStatement stmt = null;
    ResultSet rs = null;/*from  ww w.j a v a2s . c o  m*/
    InputStream blobStream = null;
    OutputStream os = null;
    try {
        // Querying blob
        stmt = conn.prepareStatement("SELECT rf.file_content " //$NON-NLS-1$
                + "FROM rep_files rf " //$NON-NLS-1$
                + "WHERE rf.file_id = ? "); //$NON-NLS-1$
        stmt.setLong(1, file.getUID().rawId());
        rs = stmt.executeQuery();
        if (rs.next()) {
            // Retrieving blob input stream
            blobStream = rs.getBinaryStream(1);
            if (blobStream == null) {
                return;
            }

            // Opening output file
            File f = new File(path);
            os = new FileOutputStream(f);

            // Large 10K buffer for efficient read
            byte[] buffer = new byte[10240];
            int bytesRead = 0;

            while ((bytesRead = blobStream.read(buffer)) >= 0) {
                os.write(buffer, 0, bytesRead);
            }
        } else {
            throw new ErrorException(VCSMessages.getString("files.notFound")); //$NON-NLS-1$
        }
    } catch (IOException e) {
        throw new ErrorException(VCSMessages.getString("files.readRepositoryProblem"), //$NON-NLS-1$
                e);
    } finally {
        safeClose(os);
        safeClose(blobStream);
        if (rs != null) {
            rs.close();
        }
        if (stmt != null) {
            stmt.close();
        }
    }
}

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaTagDao.java

/**
 * Insert new tags in a single batch statement
 * /* w  w w . ja va2s.com*/
 * @param newTagIndex
 * @param drops
 */
private void batchInsert(final Map<String, List<int[]>> newTagIndex, final List<Drop> drops, Sequence seq) {

    final List<String> hashes = new ArrayList<String>();
    hashes.addAll(newTagIndex.keySet());
    final long startKey = sequenceDao.getIds(seq, hashes.size());

    String sql = "INSERT INTO tags (id, hash, tag, " + "tag_canonical, tag_type) " + "VALUES (?,?,?,?,?)";

    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            String hash = hashes.get(i);

            // Update drops with the newly generated id
            for (int[] index : newTagIndex.get(hash)) {
                drops.get(index[0]).getTags().get(index[1]).setId(startKey + i);
            }

            int[] index = newTagIndex.get(hash).get(0);
            Tag tag = drops.get(index[0]).getTags().get(index[1]);
            ps.setLong(1, tag.getId());
            ps.setString(2, tag.getHash());
            ps.setString(3, tag.getTag());
            ps.setString(4, tag.getTagCanonical());
            ps.setString(5, tag.getType());
        }

        public int getBatchSize() {
            return hashes.size();
        }
    });

    // Update the droplet_tags table
    insertDropletTags(drops);

}

From source file:dk.netarkivet.harvester.datamodel.extendedfield.ExtendedFieldDBDAO.java

/**
 * Read an ExtendedField from database.//  w ww . j  a  v  a2 s.  com
 * @param connection A connection to the harvestDatabase
 * @param aExtendedfieldId The ID for a given ExtendedField 
 * @return An ExtendedField object for the given ID.
 */
private synchronized ExtendedField read(Connection connection, Long aExtendedfieldId) {
    if (!exists(connection, aExtendedfieldId)) {
        throw new UnknownID("Extended Field id " + aExtendedfieldId + " is not known in persistent storage");
    }

    ExtendedField extendedField = null;
    PreparedStatement statement = null;
    try {
        statement = connection.prepareStatement("" + "SELECT extendedfieldtype_id, " + "       name, "
                + "       format, " + "       defaultvalue, " + "       options, " + "       datatype, "
                + "       mandatory, " + "       sequencenr, " + "       maxlen " + "FROM   extendedfield "
                + "WHERE  extendedfield_id = ? ");

        statement.setLong(1, aExtendedfieldId);
        ResultSet result = statement.executeQuery();
        result.next();

        long extendedfieldtypeId = result.getLong(1);
        String name = result.getString(2);
        String format = result.getString(3);
        String defaultvalue = result.getString(4);
        String options = result.getString(5);
        int datatype = result.getInt(6);
        //TODO maybe this cast is not necessary
        boolean mandatory = (result.getInt(7) != 0);
        int sequencenr = result.getInt(8);
        int maxlen = result.getInt(9);

        extendedField = new ExtendedField(aExtendedfieldId, extendedfieldtypeId, name, format, datatype,
                mandatory, sequencenr, defaultvalue, options, maxlen);

        return extendedField;
    } catch (SQLException e) {
        String message = "SQL error reading extended Field " + aExtendedfieldId + " in database" + "\n"
                + ExceptionUtils.getSQLExceptionCause(e);
        log.warn(message, e);
        throw new IOFailure(message, e);
    }
}

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaLinkDao.java

/**
 * Insert new links in a single batch statement
 * /*  w  w w.j a  va 2  s.c o m*/
 * @param newLinkIndex
 * @param drops
 */
private void batchInsert(final Map<String, List<int[]>> newLinkIndex, final List<Drop> drops, Sequence seq) {

    final List<String> hashes = new ArrayList<String>();
    hashes.addAll(newLinkIndex.keySet());
    final long startKey = sequenceDao.getIds(seq, hashes.size());

    String sql = "INSERT INTO links (id, hash, url) " + "VALUES (?,?,?)";

    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            String hash = hashes.get(i);

            // Update drops with the newly generated id
            for (int[] index : newLinkIndex.get(hash)) {
                drops.get(index[0]).getLinks().get(index[1]).setId(startKey + i);
            }

            int[] index = newLinkIndex.get(hash).get(0);
            Link link = drops.get(index[0]).getLinks().get(index[1]);
            ps.setLong(1, link.getId());
            ps.setString(2, link.getHash());
            ps.setString(3, link.getUrl());
        }

        public int getBatchSize() {
            return hashes.size();
        }
    });

    // Update the droplet_links table
    insertDropletLinks(drops);

}

From source file:com.flexive.core.storage.genericSQL.GenericSQLFulltextIndexer.java

/**
 * {@inheritDoc}//from   www . j a  v a2s  .  c o  m
 */
@Override
public void remove() {
    if (pk == null) {
        LOG.warn("Tried to remove a fulltext version with no pk provided!");
        return;
    }
    PreparedStatement ps = null;
    try {
        ps = con.prepareStatement(CONTENT_DATA_FT_REMOVE + " AND VER=?");
        ps.setLong(1, pk.getId());
        ps.setInt(2, pk.getVersion());
        ps.executeUpdate();
    } catch (SQLException e) {
        LOG.error(e);
    } finally {
        try {
            if (ps != null)
                ps.close();
        } catch (SQLException e) {
            LOG.error(e);
        }
    }
}

From source file:com.npstrandberg.simplemq.MessageQueueImp.java

private List<Message> receiveInternal(int limit, boolean delete) {
    if (limit < 1)
        limit = 1;/* w ww .j  a v a  2  s  . c om*/

    List<Message> messages = new ArrayList<Message>(limit);

    try {

        // 'ORDER BY time' depends on that the host computer times is always right.
        // 'ORDER BY id' what happens with the 'id' when we hit Long.MAX_VALUE?
        PreparedStatement ps = conn.prepareStatement(
                "SELECT LIMIT 0 " + limit + " id, object, body FROM message WHERE read=false ORDER BY id");

        // The lock is making sure, that a SELECT and DELETE/UPDATE is only
        // done by one thread at a time.
        lock.lock();

        ResultSet rs = ps.executeQuery();

        while (rs.next()) {
            long id = rs.getLong(1);
            InputStream is = rs.getBinaryStream(2);
            String body = rs.getString(3);

            if (delete) {
                PreparedStatement updateInventory = conn.prepareStatement("DELETE FROM message WHERE id=?");
                updateInventory.setLong(1, id);
                updateInventory.executeUpdate();
            } else {
                PreparedStatement updateInventory = conn
                        .prepareStatement("UPDATE message SET read=? WHERE id=?");
                updateInventory.setBoolean(1, true);
                updateInventory.setLong(2, id);
                updateInventory.executeUpdate();
            }

            MessageWrapper mw = new MessageWrapper();
            mw.id = id;
            mw.body = body;
            if (is != null)
                mw.object = Utils.deserialize(is);

            messages.add(mw);
        }

        ps.close();
    } catch (SQLException e) {
        logger.error(e);
    } finally {
        lock.unlock();
    }

    return messages;
}

From source file:eu.optimis_project.monitoring.storage.MySQLStorageManager.java

@Override
public boolean storeData(Measurement measurement) throws IOException {
    final String query = "INSERT INTO " + tableName + " VALUES (?,?,?,?)";

    Connection connection = null;
    try {/* w  w  w  .j a  va 2s.  c  o  m*/
        connection = getConnection();
        PreparedStatement statement = null;
        try {
            statement = connection.prepareStatement(query);
            statement.setString(ENTRIES_COLUMNINDEX_SERVICEID, measurement.getServiceID());
            statement.setString(ENTRIES_COLUMNINDEX_NAME, measurement.getName());
            statement.setString(ENTRIES_COLUMNINDEX_DATA, measurement.getData());
            statement.setLong(ENTRIES_COLUMNINDEX_TIMESTAMP, measurement.getTimestamp());
            statement.execute();
        } catch (SQLException e) {
            log.debug("Failed to add measurement: " + measurement, e);
        } finally {
            try {
                if (statement != null) {
                    statement.close();
                }
            } catch (SQLException e) {
                log.warn("Failed to close statement.");
            }
        }
    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        try {
            if (connection != null) {
                connection.close();
            }
        } catch (SQLException e) {
        }
    }

    return true;
}

From source file:com.flexive.core.storage.genericSQL.GenericSQLFulltextIndexer.java

/**
 * {@inheritDoc}/*from   w ww  .  j a va 2  s.c  o  m*/
 */
@Override
public void remove(long assignmentId) {
    if (pk == null) {
        LOG.warn("Tried to remove a fulltext version with no pk provided!");
        return;
    }
    PreparedStatement ps = null;
    try {
        ps = con.prepareStatement(CONTENT_DATA_FT_REMOVE + " AND VER=? AND ASSIGN=?");
        ps.setLong(1, pk.getId());
        ps.setInt(2, pk.getVersion());
        ps.setLong(3, assignmentId);
        ps.executeUpdate();
    } catch (SQLException e) {
        LOG.error(e);
    } finally {
        try {
            if (ps != null)
                ps.close();
        } catch (SQLException e) {
            LOG.error(e);
        }
    }
}

From source file:architecture.ee.web.community.page.dao.jdbc.JdbcPageDao.java

private void updateProperties(final Page page) {
    Map<String, String> oldProps = loadProperties(page);
    log.debug("old:" + oldProps);
    log.debug("new:" + page.getProperties());

    final List<String> deleteKeys = getDeletedPropertyKeys(oldProps, page.getProperties());
    final List<String> modifiedKeys = getModifiedPropertyKeys(oldProps, page.getProperties());
    final List<String> addedKeys = getAddedPropertyKeys(oldProps, page.getProperties());
    log.debug("deleteKeys:" + deleteKeys.size());
    if (!deleteKeys.isEmpty()) {
        getExtendedJdbcTemplate().batchUpdate(
                getBoundSql("ARCHITECTURE_COMMUNITY.DELETE_PAGE_PROPERTY_BY_NAME").getSql(),
                new BatchPreparedStatementSetter() {
                    public void setValues(PreparedStatement ps, int i) throws SQLException {
                        ps.setLong(1, page.getPageId());
                        ps.setLong(2, page.getVersionId());
                        ps.setString(3, deleteKeys.get(i));
                    }//from  www  .j av a2s . co  m

                    public int getBatchSize() {
                        return deleteKeys.size();
                    }
                });
    }
    log.debug("modifiedKeys:" + modifiedKeys.size());
    if (!modifiedKeys.isEmpty()) {
        getExtendedJdbcTemplate().batchUpdate(
                getBoundSql("ARCHITECTURE_COMMUNITY.UPDATE_PAGE_PROPERTY_BY_NAME").getSql(),
                new BatchPreparedStatementSetter() {
                    public void setValues(PreparedStatement ps, int i) throws SQLException {
                        String key = modifiedKeys.get(i);
                        String value = page.getProperties().get(key);
                        log.debug("batch[" + key + "=" + value + "]");
                        ps.setString(1, value);
                        ps.setLong(2, page.getPageId());
                        ps.setLong(3, page.getVersionId());
                        ps.setString(4, key);
                    }

                    public int getBatchSize() {
                        return modifiedKeys.size();
                    }
                });
    }
    log.debug("addedKeys:" + addedKeys.size());
    if (!addedKeys.isEmpty()) {
        getExtendedJdbcTemplate().batchUpdate(
                getBoundSql("ARCHITECTURE_COMMUNITY.INSERT_PAGE_PROPERTY").getSql(),
                new BatchPreparedStatementSetter() {
                    public void setValues(PreparedStatement ps, int i) throws SQLException {
                        ps.setLong(1, page.getPageId());
                        ps.setLong(2, page.getVersionId());
                        String key = addedKeys.get(i);
                        String value = page.getProperty(key, null);
                        log.debug("batch[" + key + "=" + value + "]");
                        ps.setString(3, key);
                        ps.setString(4, value);
                    }

                    public int getBatchSize() {
                        return addedKeys.size();
                    }
                });
    }
}

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaPlaceDao.java

/**
 * Insert new places in a single batch statement
 * /*from ww  w.ja  va2s.  com*/
 * @param newPlaceIndex
 * @param drops
 */
private void batchInsert(final Map<String, List<int[]>> newPlaceIndex, final List<Drop> drops, Sequence seq) {

    final List<String> hashes = new ArrayList<String>();
    hashes.addAll(newPlaceIndex.keySet());
    final long startKey = sequenceDao.getIds(seq, hashes.size());

    String sql = "INSERT INTO places (id, hash, place_name, " + "place_name_canonical, longitude, latitude) "
            + "VALUES (?,?,?,?,?,?)";

    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            String hash = hashes.get(i);

            // Update drops with the newly generated id
            for (int[] index : newPlaceIndex.get(hash)) {
                drops.get(index[0]).getPlaces().get(index[1]).setId(startKey + i);
            }

            int[] index = newPlaceIndex.get(hash).get(0);
            Place place = drops.get(index[0]).getPlaces().get(index[1]);
            ps.setLong(1, place.getId());
            ps.setString(2, place.getHash());
            ps.setString(3, place.getPlaceName());
            ps.setString(4, place.getPlaceNameCanonical());
            ps.setFloat(5, place.getLongitude());
            ps.setFloat(6, place.getLatitude());
        }

        public int getBatchSize() {
            return hashes.size();
        }
    });

    // Update the droplet_places table
    insertDropletPlaces(drops);

}