Example usage for java.sql Connection TRANSACTION_NONE

List of usage examples for java.sql Connection TRANSACTION_NONE

Introduction

In this page you can find the example usage for java.sql Connection TRANSACTION_NONE.

Prototype

int TRANSACTION_NONE

To view the source code for java.sql Connection TRANSACTION_NONE.

Click Source Link

Document

A constant indicating that transactions are not supported.

Usage

From source file:com.couchbase.CBConnection.java

/**
 * Attempts to change the transaction isolation level for this
 * <code>Connection</code> object to the one given.
 * The constants defined in the interface <code>Connection</code>
 * are the possible transaction isolation levels.
 * <p/>//from  w w w. j  a v  a2  s  .  c o m
 * <B>Note:</B> If this method is called during a transaction, the result
 * is implementation-defined.
 *
 * @param level one of the following <code>Connection</code> constants:
 *              <code>Connection.TRANSACTION_READ_UNCOMMITTED</code>,
 *              <code>Connection.TRANSACTION_READ_COMMITTED</code>,
 *              <code>Connection.TRANSACTION_REPEATABLE_READ</code>, or
 *              <code>Connection.TRANSACTION_SERIALIZABLE</code>.
 *              (Note that <code>Connection.TRANSACTION_NONE</code> cannot be used
 *              because it specifies that transactions are not supported.)
 * @throws java.sql.SQLException if a database access error occurs, this
 *                               method is called on a closed connection
 *                               or the given parameter is not one of the <code>Connection</code>
 *                               constants
 * @see java.sql.DatabaseMetaData#supportsTransactionIsolationLevel
 * @see #getTransactionIsolation
 */
@Override
public void setTransactionIsolation(int level) throws SQLException {
    checkClosed();

    switch (level) {
    case Connection.TRANSACTION_NONE:
    case Connection.TRANSACTION_READ_UNCOMMITTED:
    case Connection.TRANSACTION_READ_COMMITTED:
    case Connection.TRANSACTION_REPEATABLE_READ:
    case Connection.TRANSACTION_SERIALIZABLE:
        break;
    default:
        throw new SQLException("transaction level " + level + " not allowed ");
    }

}

From source file:com.couchbase.CBConnection.java

/**
 * Retrieves this <code>Connection</code> object's current
 * transaction isolation level./*from  ww w.  j  ava  2  s  .  c  om*/
 *
 * @return the current transaction isolation level, which will be one
 * of the following constants:
 * <code>Connection.TRANSACTION_READ_UNCOMMITTED</code>,
 * <code>Connection.TRANSACTION_READ_COMMITTED</code>,
 * <code>Connection.TRANSACTION_REPEATABLE_READ</code>,
 * <code>Connection.TRANSACTION_SERIALIZABLE</code>, or
 * <code>Connection.TRANSACTION_NONE</code>.
 * @throws java.sql.SQLException if a database access error occurs
 *                               or this method is called on a closed connection
 * @see #setTransactionIsolation
 */
@Override
public int getTransactionIsolation() throws SQLException {
    checkClosed();
    return Connection.TRANSACTION_NONE;
}

From source file:com.evolveum.midpoint.repo.sql.SqlRepositoryServiceImpl.java

private String getTransactionIsolation(Connection connection, SqlRepositoryConfiguration config) {
    String value = config.getTransactionIsolation() != null
            ? config.getTransactionIsolation().name() + "(read from repo configuration)"
            : null;/* ww w .j a va2  s . c o  m*/

    try {
        switch (connection.getTransactionIsolation()) {
        case Connection.TRANSACTION_NONE:
            value = "TRANSACTION_NONE (read from connection)";
            break;
        case Connection.TRANSACTION_READ_COMMITTED:
            value = "TRANSACTION_READ_COMMITTED (read from connection)";
            break;
        case Connection.TRANSACTION_READ_UNCOMMITTED:
            value = "TRANSACTION_READ_UNCOMMITTED (read from connection)";
            break;
        case Connection.TRANSACTION_REPEATABLE_READ:
            value = "TRANSACTION_REPEATABLE_READ (read from connection)";
            break;
        case Connection.TRANSACTION_SERIALIZABLE:
            value = "TRANSACTION_SERIALIZABLE (read from connection)";
            break;
        default:
            value = "Unknown value in connection.";
        }
    } catch (Exception ex) {
        //nowhere to report error (no operation result available)
    }

    return value;
}

From source file:org.executequery.gui.browser.ConnectionPanel.java

private int isolationLevelFromSelection(int index) {
    int isolationLevel = -1;
    switch (index) {
    case 1:/*from   w  w  w .j  a  va  2s .  co m*/
        isolationLevel = Connection.TRANSACTION_NONE;
        break;
    case 2:
        isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED;
        break;
    case 3:
        isolationLevel = Connection.TRANSACTION_READ_COMMITTED;
        break;
    case 4:
        isolationLevel = Connection.TRANSACTION_REPEATABLE_READ;
        break;
    case 5:
        isolationLevel = Connection.TRANSACTION_SERIALIZABLE;
        break;
    }
    return isolationLevel;
}

From source file:net.starschema.clouddb.jdbc.BQDatabaseMetadata.java

/**
 * <p>// ww  w  . j  a  va2 s  . c o  m
 * <h1>Implementation Details:</h1><br>
 * Commit is not supported, so we return with no transaction
 * </p>
 *
 * return TRANSACTION_NONE
 */
@Override
public int getDefaultTransactionIsolation() throws SQLException {
    return java.sql.Connection.TRANSACTION_NONE;
}

From source file:org.executequery.gui.browser.ConnectionPanel.java

/**
 * Sets the values for the tx level on the tx combo
 * based on the tx level in the connection object.
 *//*from   w  ww  .  ja v a  2s  .  c  o  m*/
private void setTransactionIsolationLevel() {
    int index = 0;
    int isolationLevel = databaseConnection.getTransactionIsolation();
    switch (isolationLevel) {
    case Connection.TRANSACTION_NONE:
        index = 1;
        break;
    case Connection.TRANSACTION_READ_UNCOMMITTED:
        index = 2;
        break;
    case Connection.TRANSACTION_READ_COMMITTED:
        index = 3;
        break;
    case Connection.TRANSACTION_REPEATABLE_READ:
        index = 4;
        break;
    case Connection.TRANSACTION_SERIALIZABLE:
        index = 5;
        break;
    }
    txCombo.setSelectedIndex(index);
}

From source file:ca.gnewton.lusql.core.LuSqlMain.java

static void setupOptions() {

    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Subquery in the form \"field|A:A:A|sql\" or \"field|A:A:A A:A:A...|sql\" or \"field|sql\"  (See -i for A:A:A values). Note that you can have multiple -Qs. Also note that putting a '*' before the field indicates you want the results cached (useful only if there is a possible for subsequent cache hits. Use only if you know what you are doing.")
            .create("Q"));

    options.addOption(OptionBuilder.hasArgs().withDescription(
            "For DocSinks (Indexes) that support multiple real indexes, either to eventual merging or as-is")
            .create("L"));

    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Set static Document field and value. This is a field that has the same value for all saved documents. Format: \"field=value\" or \"A:A:A:field=value\" (See -i for A:A:A values)")
            .create("g"));

    options.addOption(OptionBuilder.hasArg()
            .withDescription(/*from www  .  ja  va 2 s .  com*/
                    "Full name class implementing Lucene Analyzer; Default: " + LuSql.DefaultAnalyzerClassName)
            .create("a"));

    options.addOption(OptionBuilder.hasArg()
            .withDescription(
                    "Offset # documents to ignore before indexing. Default:" + LuSqlFields.OffsetDefault)
            .create("O"));

    options.addOption(OptionBuilder.hasArg().withDescription(
            "Full name class implementing DocSink (the index class). Default: " + LuSql.DefaultDocSinkClassName)
            .create(CLIDocSinkClassName));

    options.addOption(OptionBuilder.hasArg()
            .withDescription("Full name class implementing DocSource (the index class). Default: "
                    + LuSql.DefaultDocSourceClassName)
            .create("so"));

    options.addOption(OptionBuilder.hasArg().withDescription(
            "Primary key field name fron DocSource to be used in DocSink. Only for DocSinks that need it. Lucene does not. BDB does. For JDBCDocSource, if not set, uses first field in SQL query.")
            .create("P"));

    options.addOption("A", false, "Append to existing Lucene index.");

    options.addOption(OptionBuilder.hasArg()
            .withDescription("Queue size for multithreading. Default: numThreads * 50").create("S"));

    options.addOption(OptionBuilder.hasArg().withDescription(
            "Tries to limit activity to keep load average below this (float) value. Can reduce performance. Default: "
                    + LuSql.loadAverageLimit)
            .create("V"));

    options.addOption("J", false, "For multiple indexes (see -L) do not merge. Default: false");

    options.addOption("X", false, "Print out command line arguments");

    options.addOption("Y", false, "Silent output");

    options.addOption("o", false, "If supported, have DocSink write to stdout");

    //////////////////////////
    options.addOption(OptionBuilder.hasArg()
            //.isRequired()
            .withDescription(
                    "JDBC connection URL: REQUIRED _OR_ Source location (Source dependent: file, url, etc)")
            .create("c"));

    //////////////////////////
    options.addOption(OptionBuilder.hasArg()
            .withDescription("Verbose output chunk size. Default:" + LuSqlFields.DefaultChunkSize).create("C"));

    //////////////////////////
    options.addOption(OptionBuilder.hasArg()
            .withDescription("Amount of documents to be processed per thread. Default:"
                    + LuSqlFields.DefaultWorkPerThread
                    + ". Increasing tends to improve throughput; Decreasing tends to reduce memory problems and can alleviate an \"Out of memory\" exception. Should be 5-100 for medium/small documents. Should be 1 for very large documents.")
            .create("w"));

    //////////////////////////
    options.addOption(OptionBuilder.hasArg()
            .withDescription("Full name of DB driver class (should be in CLASSPATH); Default: "
                    + LuSql.DefaultJDBCDriverClassName)
            .create("d"));

    //////////////////////////
    options.addOption(OptionBuilder.hasArgs()
            .withDescription("Full name class implementing DocumentFilter; Default: "
                    + LuSql.DefaultDocFilterClassName
                    + " (does nothing). This is applied before each Lucene Document is added to the Index. If it returns null, nothing is added. Note that multiple filters are allowed. They are applied in the same order as they appear in the command line.")
            .create(CLIDocFiltersClassName));

    //////////////////////////
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Only include these fields from DocSource. Example: -F author -F id. Is absolute (i.e. even if you have additional fields - like in your SQL query - they will be filtered out.")
            .create("F"));

    //////////////////////////
    options.addOption("I", true,
            "Global field index parameters. This sets all the fields parameters to this one set. Format: A:A:A. See -i for A:A:A values. Note that -i has precedence over -I.");

    //////////////////////////
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Size of internal arrays of documents. One of these arrays is put on the queue. So the number of objects waiting to be processed is K*S (array size * queue size). For small objects have more (k=100). For large objects have fewer (k=5). Default: "
                    + LuSqlFields.DefaultDocPacketSize)
            .create("K"));

    //////////////////////////
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Full name plugin class; Get description and properties options needed by specific plugin (filter, source, or sink.")
            .create("e"));

    StringBuilder sb = new StringBuilder();
    sb.append("One set per field in SQL, and in same order as in SQL. ");
    sb.append("Used only if you want to overide the defaults (below). ");
    sb.append("See for more information Field.Index, Field.Store, Field.TermVector in");
    sb.append(
            "org.apache.lucene.document.Field http://lucene.apache.org/java/3_0_2/api/core/org/apache/lucene/document/Field.html");
    //http://lucene.apache.org/java/2_2_0/api/org/apache/lucene/document/Field.html");
    sb.append("\nDefault: A:A:A= "
            //+ Util.getIndex(LuSql.IndexDefault, IndexParameterValues) 
            //+ Util.getIndex(LuSql.StoreDefault, StoreParameterValues) 
            //+ Util.getIndex(LuSql.TermVectorDefault, TermVectorParameterValues) 
            + LuceneFieldParameters.rindex.get(LuSql.defaultLuceneFieldParameters.getIndex()) + ":"
            + LuceneFieldParameters.rstorex.get(LuSql.defaultLuceneFieldParameters.getStore()) + ":"
            + LuceneFieldParameters.rtermx.get(LuSql.defaultLuceneFieldParameters.getTermVector()));

    sb.append("\nField Index Parameter values:");
    sb.append("\nIndex: Default: "
            + LuceneFieldParameters.rindex.get(LuSql.defaultLuceneFieldParameters.getIndex()));
    sb.append("\n");

    Set<String> names = LuceneFieldParameters.indexx.keySet();
    for (String name : names) {
        sb.append("\n-  " + name);
    }

    sb.append("\nStore: Default: "
            + LuceneFieldParameters.rstorex.get(LuSql.defaultLuceneFieldParameters.getStore()));

    sb.append("\n");
    names = LuceneFieldParameters.storex.keySet();
    for (String name : names) {
        sb.append("\n-  " + name);
    }
    sb.append("\n Term vector: Default: "
            + LuceneFieldParameters.rtermx.get(LuSql.defaultLuceneFieldParameters.getTermVector()));

    sb.append("\n");
    names = LuceneFieldParameters.termx.keySet();
    for (String name : names) {
        sb.append("\n-  " + name);
    }
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Field index parameters. \nFormat: \"fieldName=A:A:A\". Note that -i can have a slightly different interpretation depending on the DocSource. For DocSource implementation where the syntax of the query allows for separate definition of the query and the fields of interest (like SQL), all of the fields defined in the query are stored/indexed. For other DocSource's where only the query can be defined and the fields of interest cannot (like the Lucene syntax of the LucenDocSource), the \"-i\" syntax is the only way to set the fields to be used. "
                    + sb)
            .create("i"));

    //////////////////////
    options.addOption(OptionBuilder.hasArg()
            //.isRequired()
            .withDescription("Sink destination (i.e. Lucene index to create/write to). Default: "
                    + LuSql.DefaultSinkLocationName)
            .create("l"));

    //////////////////////
    options.addOption("N", true,
            "Number of thread for multithreading. Defaults: Runtime.getRuntime().availableProcessors()) *"
                    + LuSql.ThreadFactor + ". For this machine this is: "
                    + (Runtime.getRuntime().availableProcessors() * LuSql.ThreadFactor));

    //////////////////////
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Properties to be passed to the DocSource driver. Can be is multiple. Example: -pso foo=bar  -pso \"start=test 4\"")
            .create("pso"));

    //////////////////////
    options.addOption(OptionBuilder.hasArgs()
            .withDescription(
                    "Properties to be passed to the DocSink driver. Can be multiple. See 'pso' for examples")
            .create("psi"));

    //////////////////////
    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Properties to be passed to a filter. Can be multiple. Identify filter using integer (zero is the first filter). Example: -pf 0:size=10 -pf 0:name=fred -pf 1:reduce=true")
            .create("pf"));

    //////////////////////
    options.addOption(OptionBuilder
            .withDescription("Read from source using source driver's internal compression, if it supports it")
            .create("zso"));

    options.addOption(OptionBuilder
            .withDescription("Have sink driver use internal compression (opaque), if it supports it")
            .create("zsi"));

    //////////////////////
    options.addOption("n", true, "Number of documents to add. If unset all records from query are used.");

    //////////////////////
    options.addOption("M", true,
            "Changes the meta replacement string for the -Q command line parameters. Default: "
                    + SubQuery.getKeyMeta());

    //////////////////////
    options.addOption("m", false,
            "Turns off need get around MySql driver-caused OutOfMemory problem in large queries. Sets Statement.setFetchSize(Integer.MIN_VALUE)"
                    + "\n See http://benjchristensen.wordpress.com/2008/05/27/mysql-jdbc-memory-usage-on-large-resultset");

    //////////////////////
    options.addOption(OptionBuilder.hasArg().withDescription("Set JDBC Transaction level. Default: "
            + DefaultTransactionIsolation + ". Values:\n" + Connection.TRANSACTION_NONE + " TRANSACTION_NONE\n"
            + Connection.TRANSACTION_READ_UNCOMMITTED + " TRANSACTION_READ_UNCOMMITTED\n"
            + Connection.TRANSACTION_READ_COMMITTED + " TRANSACTION_READ_COMMITTED\n"
            + Connection.TRANSACTION_REPEATABLE_READ + " TRANSACTION_REPEATABLE_READ\n"
            + Connection.TRANSACTION_SERIALIZABLE + " TRANSACTION_SERIALIZABLE\n "
            + "(See http://java.sun.com/j2se/1.5.0/docs/api/constant-values.html#java.sql.Connection.TRANSACTION_NONE)")
            .create("E"));

    //////////////////////
    options.addOption("p", true, "Properties file");

    //////////////////////
    //////////////////////
    options.addOption(OptionBuilder.hasArg()
            //.isRequired()
            .withDescription("Primary SQL query (in double quotes). Only used by JDBC driver").create("q"));

    //////////////////////
    options.addOption("r", true,
            "LuceneRAMBufferSizeInMBs: IndexWriter.setRAMBufferSizeMB(). Only used by Lucene sinks. Default: "
                    + Double.toString(LuSql.DefaultRAMBufferSizeMB));

    //////////////////////
    options.addOption("s", true,
            "Name of stop word file to use (relative or full path). If supported by DocSource");

    //////////////////////
    options.addOption("T", false,
            "Turn off multithreading. Note that multithreading does not guarantee the ordering of documents. If you want the order of Lucene documents to match the ordering of DB records generated by the SQL query, turn-off multithreading");

    //////////////////////
    options.addOption("t", false,
            "Test mode. Does not open up Lucene index. Prints (-n) records from SQL query");

    //////////////////////
    options.addOption("v", false, "Verbose mode");

    //////////////////////
    options.addOption("onlyMap", false, "Only use the fields from the DocSource that are mapped using -map");

    options.addOption(OptionBuilder.hasArgs().withDescription(
            "Field map. Transforms field names in DocSource to new fieldnames: Example -map \"AU=author\", where \"AU\" is the original (source) field name and \"author\" is the new (sink) field")
            .create("map"));
}

From source file:com.udps.hive.jdbc.HiveConnection.java

@Override
public int getTransactionIsolation() throws SQLException {
    return Connection.TRANSACTION_NONE;
}

From source file:org.openconcerto.sql.model.SQLDataSource.java

final synchronized void setTransactionIsolation(Connection conn) throws SQLException {
    if (this.dbTxIsolation == null) {
        this.dbTxIsolation = conn.getTransactionIsolation();
        assert this.dbTxIsolation != null;
    }/*from  w w w. j  a va 2  s  .  co m*/
    // no need to try to change the level if the DB doesn't support transactions
    if (this.dbTxIsolation != Connection.TRANSACTION_NONE
            && (!this.checkOnceDBTxIsolation || this.dbTxIsolation != this.txIsolation)) {
        // if not check once, it's the desired action, so don't log
        if (this.checkOnceDBTxIsolation)
            Log.get().config("Setting transaction isolation to " + this.txIsolation);
        conn.setTransactionIsolation(this.txIsolation);
    }
}

From source file:net.starschema.clouddb.jdbc.BQDatabaseMetadata.java

/** {@inheritDoc} */
@Override/*from   www .  j  av  a2 s  .  co  m*/
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
    if (java.sql.Connection.TRANSACTION_NONE == level) {
        return true;
    } else {
        return false;
    }
}