Example usage for java.sql Connection setReadOnly

List of usage examples for java.sql Connection setReadOnly

Introduction

In this page you can find the example usage for java.sql Connection setReadOnly.

Prototype

void setReadOnly(boolean readOnly) throws SQLException;

Source Link

Document

Puts this connection in read-only mode as a hint to the driver to enable database optimizations.

Usage

From source file:helma.objectmodel.db.NodeManager.java

/**
 *  Updates a modified node in the embedded db or an external relational database, depending
 * on its database mapping./*  w w  w. j  av  a 2  s .  c  o  m*/
 *
 * @return true if the DbMapping of the updated Node is to be marked as updated via
 *              DbMapping.setLastDataChange
 */
public boolean updateNode(IDatabase db, ITransaction txn, Node node)
        throws IOException, SQLException, ClassNotFoundException {

    invokeOnPersist(node);
    DbMapping dbm = node.getDbMapping();
    boolean markMappingAsUpdated = false;

    if ((dbm == null) || !dbm.isRelational()) {
        db.updateNode(txn, node.getID(), node);
    } else {
        Hashtable propMap = node.getPropMap();
        Property[] props;

        if (propMap == null) {
            props = new Property[0];
        } else {
            props = new Property[propMap.size()];
            propMap.values().toArray(props);
        }

        // make sure table meta info is loaded by dbmapping
        dbm.getColumns();

        StringBuffer b = dbm.getUpdate();

        // comma flag set after the first dirty column, also tells as
        // if there are dirty columns at all
        boolean comma = false;

        for (int i = 0; i < props.length; i++) {
            // skip clean properties
            if ((props[i] == null) || !props[i].dirty) {
                // null out clean property so we don't consider it later
                props[i] = null;
                continue;
            }

            Relation rel = dbm.propertyToRelation(props[i].getName());

            // skip readonly, virtual and collection relations
            if ((rel == null) || rel.readonly || rel.virtual || (!rel.isPrimitiveOrReference())) {
                // null out property so we don't consider it later
                props[i] = null;
                continue;
            }

            if (comma) {
                b.append(", ");
            } else {
                comma = true;
            }

            b.append(rel.getDbField());
            b.append(" = ?");
        }

        // if no columns were updated, return false
        if (!comma) {
            return false;
        }

        b.append(" WHERE ");
        dbm.appendCondition(b, dbm.getIDField(), node.getID());

        Connection con = dbm.getConnection();
        // set connection to write mode
        if (con.isReadOnly())
            con.setReadOnly(false);
        PreparedStatement stmt = con.prepareStatement(b.toString());

        int stmtNumber = 0;
        long logTimeStart = logSql ? System.currentTimeMillis() : 0;

        try {
            for (int i = 0; i < props.length; i++) {
                Property p = props[i];

                if (p == null) {
                    continue;
                }

                Relation rel = dbm.propertyToRelation(p.getName());

                stmtNumber++;
                setStatementValue(stmt, stmtNumber, p, rel.getColumnType());

                p.dirty = false;

                if (!rel.isPrivate()) {
                    markMappingAsUpdated = true;
                }
            }

            stmt.executeUpdate();

        } finally {
            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL UPDATE", dbm.getTableName(), logTimeStart, logTimeStop, b.toString());
            }
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                }
            }
        }
    }

    // update may cause changes in the node's parent subnode array
    // TODO: is this really needed anymore?
    if (markMappingAsUpdated && node.isAnonymous()) {
        Node parent = node.getCachedParent();

        if (parent != null) {
            parent.markSubnodesChanged();
        }
    }

    return markMappingAsUpdated;
}

From source file:helma.objectmodel.db.NodeManager.java

/**
 *
 *//* w  w  w. j a  va 2 s.  c  o m*/
public void prefetchNodes(Node home, Relation rel, SubnodeList list, int start, int length) throws Exception {
    DbMapping dbm = rel.otherType;

    // this does nothing for objects in the embedded database
    if (dbm != null && dbm.isRelational()) {
        // int missing = cache.containsKeys(keys);
        List missing = collectMissingKeys(list, start, length);

        if (missing != null) {
            Connection con = dbm.getConnection();
            // set connection to read-only mode
            if (!con.isReadOnly())
                con.setReadOnly(true);

            Statement stmt = con.createStatement();
            DbColumn[] columns = dbm.getColumns();
            Relation[] joins = dbm.getJoins();
            String query = null;
            long logTimeStart = logSql ? System.currentTimeMillis() : 0;

            try {
                StringBuffer b = dbm.getSelect(null).append(" WHERE ");
                String idfield = (rel.groupby != null) ? rel.groupby : dbm.getIDField();
                String[] ids = (String[]) missing.toArray(new String[missing.size()]);

                dbm.appendCondition(b, idfield, ids);
                dbm.addJoinConstraints(b, " AND ");

                if (rel.groupby != null) {
                    rel.renderConstraints(b, home, " AND ");

                    if (rel.order != null) {
                        b.append(" ORDER BY ");
                        b.append(rel.order);
                    }
                }

                query = b.toString();

                ResultSet rs = stmt.executeQuery(query);

                String groupbyProp = null;
                HashMap groupbySubnodes = null;

                if (rel.groupby != null) {
                    groupbyProp = dbm.columnNameToProperty(rel.groupby);
                    groupbySubnodes = new HashMap();
                }

                String accessProp = null;

                if ((rel.accessName != null) && !rel.usesPrimaryKey()) {
                    accessProp = dbm.columnNameToProperty(rel.accessName);
                }

                while (rs.next()) {
                    // create new Nodes.
                    Node node = createNode(dbm, rs, columns, 0);
                    if (node == null) {
                        continue;
                    }
                    Key key = node.getKey();
                    Key secondaryKey = null;

                    // for grouped nodes, collect subnode lists for the intermediary
                    // group nodes.
                    String groupName = null;

                    if (groupbyProp != null) {
                        groupName = node.getString(groupbyProp);
                        if (groupName != null) {
                            Node groupNode = (Node) groupbySubnodes.get(groupName);

                            if (groupNode == null) {
                                groupNode = home.getGroupbySubnode(groupName, true);
                                groupbySubnodes.put(groupName, groupNode);
                            }

                            SubnodeList subnodes = groupNode.getSubnodeList();
                            if (subnodes == null) {
                                subnodes = groupNode.createSubnodeList();
                                // mark subnodes as up-to-date
                                subnodes.lastSubnodeFetch = subnodes.getLastSubnodeChange();
                            }
                            subnodes.add(new NodeHandle(key));
                        }
                    }

                    // if relation doesn't use primary key as accessName, get secondary key
                    if (accessProp != null) {
                        String accessName = node.getString(accessProp);
                        if (accessName != null) {
                            if (groupName == null) {
                                secondaryKey = new SyntheticKey(home.getKey(), accessName);
                            } else {
                                Key groupKey = new SyntheticKey(home.getKey(), groupName);
                                secondaryKey = new SyntheticKey(groupKey, accessName);
                            }
                        }

                    }

                    // register new nodes with the cache. If an up-to-date copy
                    // existed in the cache, use that.
                    registerNewNode(node, secondaryKey);
                    fetchJoinedNodes(rs, joins, columns.length);
                }

            } catch (Exception x) {
                app.logError("Error in prefetchNodes()", x);
            } finally {
                if (logSql) {
                    long logTimeStop = System.currentTimeMillis();
                    logSqlStatement("SQL SELECT_PREFETCH", dbm.getTableName(), logTimeStart, logTimeStop,
                            query);
                }
                if (stmt != null) {
                    try {
                        stmt.close();
                    } catch (Exception ignore) {
                    }
                }
            }
        }
    }
}

From source file:org.horizontaldb.integration.InterceptorMockEnvironmentTest.java

@Test
public void shouldValidateOneLevelShardedCall() throws SQLException {
    ConversationRegistry mockRegistry = EasyMock.createMock(ConversationRegistry.class);
    TenantContext mockTenantContext = EasyMock.createMock(TenantContext.class);
    org.apache.tomcat.jdbc.pool.DataSource mockDataSource = EasyMock
            .createMock(org.apache.tomcat.jdbc.pool.DataSource.class);
    DataSourceResource mockDataSourceResource = new DataSourceResource(mockDataSource);
    ShardBeanResolver mockShardBeanResolver = EasyMock.createMock(ShardBeanResolver.class);
    ShardBeanEnricher mockShardBeanEnricher = EasyMock.createMock(ShardBeanEnricher.class);
    Connection mockConnection = EasyMock.createMock(Connection.class);
    PreparedStatement mockStatement = EasyMock.createMock(PreparedStatement.class);
    ResultSet mockResultset = EasyMock.createMock(ResultSet.class);

    conversationRegistryMockProxy.setMockRegistry(mockRegistry);
    tenantContextMockProxy.setMockTenantContext(mockTenantContext);
    dataSourceFactoryMockProxy.setMockDataSourceResource(mockDataSourceResource);
    shardBeanResolverMockProxy.setMockResolver(mockShardBeanResolver);
    shardBeanEnricherMockProxy.setMockEnricher(mockShardBeanEnricher);

    // This is the protocol that the interceptors should follow during a sharded call
    mockRegistry.startConversation(testUserHelper.getJoeToken());
    expect(mockRegistry.hasConversation(TestUser.JOE.name())).andReturn(true);
    expect(mockTenantContext.resolveCurrentTenantIdentifier()).andReturn(TestUser.JOE.name());
    mockRegistry.addResource(TestUser.JOE.name(), mockDataSourceResource);
    mockRegistry.addResource(same(TestUser.JOE.name()), anyObject(DepartmentDaoImpl.class));
    expect(mockShardBeanResolver.getBean(same(DepartmentDao.class), anyObject(ShardContext.class)))
            .andReturn(null);/* w  w w.j a  va 2s .com*/
    mockShardBeanEnricher.setup(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class));
    mockShardBeanEnricher.tearDown(anyObject(DepartmentDaoImpl.class), anyObject(ShardContext.class));
    mockDataSource.close(true);
    mockRegistry.teardownConversation(testUserHelper.getJoeToken());
    // end protocol

    // This is the flow of a Hibernate transaction which is irrelevant, but had to be defined because of the
    // mocked dataSource.
    expect(mockDataSource.getConnection()).andReturn(mockConnection);
    mockConnection.setReadOnly(true);
    expect(mockConnection.getAutoCommit()).andReturn(false);
    expect(mockConnection.prepareStatement(anyObject(String.class))).andReturn(mockStatement);
    expect(mockStatement.executeQuery()).andReturn(mockResultset);
    expect(mockStatement.getWarnings()).andReturn(null);
    mockStatement.clearWarnings();
    expect(mockStatement.getMaxRows()).andReturn(0);
    expect(mockStatement.getQueryTimeout()).andReturn(0);
    expect(mockResultset.next()).andReturn(true);
    expect(mockResultset.next()).andReturn(false);
    expect(mockResultset.getLong(anyObject(String.class))).andReturn(0l);
    expect(mockResultset.wasNull()).andReturn(false);
    mockResultset.close();
    mockStatement.close();
    mockConnection.commit();
    // end Hibernate transaction

    replay(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource,
            mockConnection, mockStatement, mockResultset);

    try {
        ShardContext context = new ShardContext(TestUser.JOE.name());

        testService.authenticate(testUserHelper.getJoeToken());

        Long actualCount = testService.getCountOfDepartments(context);

        assertEquals(0, actualCount.longValue());
    } finally {
        testService.logoff(testUserHelper.getJoeToken());
    }

    verify(mockRegistry, mockTenantContext, mockShardBeanResolver, mockShardBeanEnricher, mockDataSource,
            mockConnection, mockStatement, mockResultset);
}

From source file:helma.objectmodel.db.NodeManager.java

/**
 * Insert a node into a relational database.
 *//*from  www  .  j a  v a2s.  c o  m*/
protected void insertRelationalNode(Node node, DbMapping dbm, Connection con)
        throws ClassNotFoundException, SQLException {

    if (con == null) {
        throw new NullPointerException("Error inserting relational node: Connection is null");
    }

    // set connection to write mode
    if (con.isReadOnly())
        con.setReadOnly(false);

    String insertString = dbm.getInsert();
    PreparedStatement stmt = con.prepareStatement(insertString);

    // app.logEvent ("inserting relational node: " + node.getID ());
    DbColumn[] columns = dbm.getColumns();

    long logTimeStart = logSql ? System.currentTimeMillis() : 0;

    try {
        int columnNumber = 1;

        for (int i = 0; i < columns.length; i++) {
            DbColumn col = columns[i];
            if (!col.isMapped())
                continue;
            if (col.isIdField()) {
                setStatementValue(stmt, columnNumber, node.getID(), col);
            } else if (col.isPrototypeField()) {
                setStatementValue(stmt, columnNumber, dbm.getExtensionId(), col);
            } else {
                Relation rel = col.getRelation();
                Property p = rel == null ? null : node.getProperty(rel.getPropName());

                if (p != null) {
                    setStatementValue(stmt, columnNumber, p, col.getType());
                } else if (col.isNameField()) {
                    stmt.setString(columnNumber, node.getName());
                } else {
                    stmt.setNull(columnNumber, col.getType());
                }
            }
            columnNumber += 1;
        }
        stmt.executeUpdate();

    } finally {
        if (logSql) {
            long logTimeStop = java.lang.System.currentTimeMillis();
            logSqlStatement("SQL INSERT", dbm.getTableName(), logTimeStart, logTimeStop, insertString);
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (Exception ignore) {
            }
        }
    }
}

From source file:com.frameworkset.commons.dbcp2.datasources.PerUserPoolDataSource.java

@Override
protected void setupDefaults(Connection con, String username) throws SQLException {
    Boolean defaultAutoCommit = isDefaultAutoCommit();
    if (username != null) {
        Boolean userMax = getPerUserDefaultAutoCommit(username);
        if (userMax != null) {
            defaultAutoCommit = userMax;
        }//from   w w  w.  j ava 2s  .  c o  m
    }

    Boolean defaultReadOnly = isDefaultReadOnly();
    if (username != null) {
        Boolean userMax = getPerUserDefaultReadOnly(username);
        if (userMax != null) {
            defaultReadOnly = userMax;
        }
    }

    int defaultTransactionIsolation = getDefaultTransactionIsolation();
    if (username != null) {
        Integer userMax = getPerUserDefaultTransactionIsolation(username);
        if (userMax != null) {
            defaultTransactionIsolation = userMax.intValue();
        }
    }

    if (defaultAutoCommit != null && con.getAutoCommit() != defaultAutoCommit.booleanValue()) {
        con.setAutoCommit(defaultAutoCommit.booleanValue());
    }

    if (defaultTransactionIsolation != UNKNOWN_TRANSACTIONISOLATION) {
        con.setTransactionIsolation(defaultTransactionIsolation);
    }

    if (defaultReadOnly != null && con.isReadOnly() != defaultReadOnly.booleanValue()) {
        con.setReadOnly(defaultReadOnly.booleanValue());
    }
}

From source file:com.funambol.foundation.items.dao.PIMCalendarDAO.java

public CalendarWrapper getItem(String uid) throws DAOException {

    if (log.isTraceEnabled()) {
        log.trace("DAO start getItem " + uid);
    }/*from  www. j  a v  a  2s.  c o m*/

    Connection con = null;
    PreparedStatement ps = null;
    ResultSet rs = null;
    CalendarWrapper cw = null;

    try {
        // Looks up the data source when the first connection is created
        con = getUserDataSource().getRoutedConnection(userId);
        con.setReadOnly(true);

        ps = con.prepareStatement(SQL_GET_FNBL_PIM_CALENDAR_BY_ID_USERID);

        ps.setLong(1, Long.parseLong(uid));
        ps.setString(2, userId);

        rs = ps.executeQuery();

        cw = createCalendar(uid, rs);

        DBTools.close(null, ps, rs);

        ps = con.prepareStatement(SQL_GET_FNBL_PIM_CALENDAR_EXCEPTION_BY_CALENDAR);

        ps.setLong(1, Long.parseLong(uid));

        rs = ps.executeQuery();

        try {
            cw = addPIMCalendarExceptions(cw, rs);
        } catch (SQLException sqle) {
            throw new SQLException("Error while adding PIM calendar " + "exceptions. " + sqle,
                    sqle.getSQLState());
        }

    } catch (Exception e) {
        throw new DAOException("Error retrieving a calendar item: " + e, e);
    } finally {
        DBTools.close(con, ps, rs);
    }

    return cw;
}

From source file:axiom.objectmodel.db.NodeManager.java

/**
 * Generates an ID for the table by finding out the maximum current value
 *//*from   w ww.j  av a2s.  c  o  m*/
synchronized String generateMaxID(DbMapping map) throws Exception {
    // Transactor tx = (Transactor) Thread.currentThread ();
    // tx.timer.beginEvent ("generateID "+map);
    String retval = null;
    Statement stmt = null;
    long logTimeStart = logSql ? System.currentTimeMillis() : 0;
    String q = new StringBuffer("SELECT MAX(")
            /*.append(map.getTableName(0)).append(".")*/.append(map.getIDField()).append(") FROM ")
            .append(map.getTableName(0)).toString();

    try {
        Connection con = map.getConnection();
        // set connection to read-only mode
        if (!con.isReadOnly())
            con.setReadOnly(true);

        stmt = con.createStatement();

        ResultSet rs = stmt.executeQuery(q);

        // check for empty table
        if (!rs.next()) {
            long currMax = map.getNewID(0);

            retval = Long.toString(currMax);
        } else {
            long currMax = rs.getLong(1);

            currMax = map.getNewID(currMax);
            retval = Long.toString(currMax);
        }
    } finally {
        if (logSql) {
            long logTimeStop = System.currentTimeMillis();
            logSqlStatement("SQL SELECT_MAX", map.getTableName(), logTimeStart, logTimeStop, q);
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (Exception ignore) {
                app.logEvent(ignore.getMessage());
            }
        }
    }

    return retval;
}

From source file:axiom.objectmodel.db.NodeManager.java

String generateSequenceID(DbMapping map) throws Exception {
    // Transactor tx = (Transactor) Thread.currentThread ();
    // tx.timer.beginEvent ("generateID "+map);
    Statement stmt = null;/*w  ww.  j a  va2 s  .  c o  m*/
    String retval = null;
    long logTimeStart = logSql ? System.currentTimeMillis() : 0;
    String q = new StringBuffer("SELECT ").append(map.getIDgen()).append(".nextval FROM dual").toString();

    try {
        Connection con = map.getConnection();
        // TODO is it necessary to set connection to write mode here?
        if (con.isReadOnly())
            con.setReadOnly(false);

        stmt = con.createStatement();

        ResultSet rs = stmt.executeQuery(q);

        if (!rs.next()) {
            throw new SQLException("Error creating ID from Sequence: empty recordset");
        }

        retval = rs.getString(1);
    } finally {
        if (logSql) {
            long logTimeStop = System.currentTimeMillis();
            logSqlStatement("SQL SELECT_NEXTVAL", map.getTableName(), logTimeStart, logTimeStop, q);
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (Exception ignore) {
                app.logEvent(ignore.getMessage());
            }
        }
    }

    return retval;
}

From source file:axiom.objectmodel.db.NodeManager.java

/**
 *  Performs the actual deletion of a node from either the embedded or an external
 *  SQL database./* w  w w  .ja v a 2s.  c o  m*/
 */
public void deleteNode(IDatabase db, ITransaction txn, Node node) throws Exception {
    DbMapping dbm = node.getDbMapping();

    if ((dbm == null) || !dbm.isRelational()) {
        String className = dbm.getClassName();
        IDatabase idb = null;
        if (className != null) {
            idb = (IDatabase) this.dbs.get(className);
        }
        if (idb == null) {
            idb = db;
        }
        try {
            idb.deleteNode(txn, node.getID(), ((DbKey) node.getKey()).getLayer());
            String proto = node.getPrototype();
            if ("File".equals(proto) || "Image".equals(proto)) {
                LuceneDatabase ldb = (LuceneDatabase) idb;
                ldb.getLuceneManager().deleteFromStorage(node);
            }
        } catch (Exception ex) {
            idb.deleteNode(txn, node.getID());
        }
    } else {
        Statement st = null;
        long logTimeStart = logSql ? System.currentTimeMillis() : 0;
        String idstring = node.getID();
        if (dbm.needsQuotes(dbm.getIDField())) {
            idstring = "'" + escape(idstring) + "'";
        }
        String str = new StringBuffer("DELETE " + dbm.getTableDeleteProperties() + "FROM ")
                .append(dbm.getTableName()).append(" WHERE ")
                //.append(dbm.getTableName(0))
                //.append(".")
                .append(dbm.getIDField()).append(" = ").append(idstring).append(dbm.getTableJoinClause(0))
                .toString();

        try {
            Connection con = dbm.getConnection();
            // set connection to write mode
            if (con.isReadOnly())
                con.setReadOnly(false);

            st = con.createStatement();

            st.executeUpdate(str);

        } finally {
            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL DELETE", dbm.getTableName(), logTimeStart, logTimeStop, str);
            }
            if (st != null) {
                try {
                    st.close();
                } catch (Exception ignore) {
                    app.logEvent(ignore.getMessage());
                }
            }
        }
    }

    // node may still be cached via non-primary keys. mark as invalid
    node.setState(Node.INVALID);
}