Example usage for java.sql PreparedStatement addBatch

List of usage examples for java.sql PreparedStatement addBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement addBatch.

Prototype

void addBatch() throws SQLException;

Source Link

Document

Adds a set of parameters to this PreparedStatement object's batch of commands.

Usage

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

/**
 * This method is called to perform 'updates' for any inserts that failed.
 * /*from ww w. ja v  a 2 s . c om*/
 * @return The data insert result containing the data points that were not
 *         updated.
 */
private List<DataPoint> updateData(Connection conn, List<DataPoint> data) {
    PreparedStatement stmt = null;
    List<DataPoint> left = new ArrayList<DataPoint>();
    Map<String, List<DataPoint>> buckets = MeasRangeObj.getInstance().bucketData(data);

    for (Entry<String, List<DataPoint>> entry : buckets.entrySet()) {
        String table = entry.getKey();
        List<DataPoint> dpts = entry.getValue();

        try {
            // TODO need to set synchronous commit to off
            stmt = conn.prepareStatement(
                    "UPDATE " + table + " SET value = ? WHERE timestamp = ? AND measurement_id = ?");
            for (DataPoint pt : dpts) {
                Integer metricId = pt.getMeasurementId();
                MetricValue val = pt.getMetricValue();
                BigDecimal bigDec;
                bigDec = new BigDecimal(val.getValue());
                stmt.setBigDecimal(1, getDecimalInRange(bigDec, metricId));
                stmt.setLong(2, val.getTimestamp());
                stmt.setInt(3, metricId.intValue());
                stmt.addBatch();
            }

            int[] execInfo = stmt.executeBatch();
            left.addAll(getRemainingDataPoints(dpts, execInfo));
        } catch (BatchUpdateException e) {
            left.addAll(getRemainingDataPointsAfterBatchFail(dpts, e.getUpdateCounts()));
        } catch (SQLException e) {
            // If the batch update is not within a transaction, then we
            // don't know which of the updates completed successfully.
            // Assume they all failed.
            left.addAll(dpts);

            if (log.isDebugEnabled()) {
                log.debug("A general SQLException occurred during the update. " + "Assuming that none of the "
                        + dpts.size() + " data points were updated.", e);
            }
        } finally {
            DBUtil.closeStatement(LOG_CTX, stmt);
        }
    }
    return left;
}

From source file:dk.netarkivet.harvester.datamodel.RunningJobsInfoDBDAO.java

/**
 * Store frontier report data to the persistent storage.
 * @param report the report to store/* w ww .  j  ava2 s .c o  m*/
 * @param filterId the id of the filter that produced the report
 * @param jobId The ID of the job responsible for this report
 * @return the update count
 */
public int storeFrontierReport(String filterId, InMemoryFrontierReport report, Long jobId) {
    ArgumentNotValid.checkNotNull(report, "report");
    ArgumentNotValid.checkNotNull(jobId, "jobId");

    Connection c = HarvestDBConnection.get();
    PreparedStatement stm = null;
    try {

        // First drop existing rows
        try {
            c.setAutoCommit(false);

            stm = c.prepareStatement("DELETE FROM frontierReportMonitor" + " WHERE jobId=? AND filterId=?");
            stm.setLong(1, jobId);
            stm.setString(2, filterId);

            stm.executeUpdate();

            c.commit();
        } catch (SQLException e) {
            String message = "SQL error dropping records for job ID " + jobId + " and filterId " + filterId
                    + "\n" + ExceptionUtils.getSQLExceptionCause(e);
            log.warn(message, e);
            return 0;
        } finally {
            DBUtils.closeStatementIfOpen(stm);
            DBUtils.rollbackIfNeeded(c, "storeFrontierReport delete", jobId);
        }

        // Now batch insert report lines
        try {
            c.setAutoCommit(false);

            stm = c.prepareStatement("INSERT INTO frontierReportMonitor(" + FR_COLUMN.getColumnsInOrder()
                    + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)");

            for (FrontierReportLine frl : report.getLines()) {
                stm.setLong(FR_COLUMN.jobId.rank(), jobId);
                stm.setString(FR_COLUMN.filterId.rank(), filterId);
                stm.setTimestamp(FR_COLUMN.tstamp.rank(), new Timestamp(report.getTimestamp()));
                stm.setString(FR_COLUMN.domainName.rank(), frl.getDomainName());
                stm.setLong(FR_COLUMN.currentSize.rank(), frl.getCurrentSize());
                stm.setLong(FR_COLUMN.totalEnqueues.rank(), frl.getTotalEnqueues());
                stm.setLong(FR_COLUMN.sessionBalance.rank(), frl.getSessionBalance());
                stm.setDouble(FR_COLUMN.lastCost.rank(), frl.getLastCost());
                stm.setDouble(FR_COLUMN.averageCost.rank(),
                        correctNumericIfIllegalAverageCost(frl.getAverageCost()));
                stm.setString(FR_COLUMN.lastDequeueTime.rank(), frl.getLastDequeueTime());
                stm.setString(FR_COLUMN.wakeTime.rank(), frl.getWakeTime());
                stm.setLong(FR_COLUMN.totalSpend.rank(), frl.getTotalSpend());
                stm.setLong(FR_COLUMN.totalBudget.rank(), frl.getTotalBudget());
                stm.setLong(FR_COLUMN.errorCount.rank(), frl.getErrorCount());

                // URIs are to be truncated to 1000 characters
                // (see SQL scripts)
                DBUtils.setStringMaxLength(stm, FR_COLUMN.lastPeekUri.rank(), frl.getLastPeekUri(),
                        MAX_URL_LENGTH, frl, "lastPeekUri");
                DBUtils.setStringMaxLength(stm, FR_COLUMN.lastQueuedUri.rank(), frl.getLastQueuedUri(),
                        MAX_URL_LENGTH, frl, "lastQueuedUri");

                stm.addBatch();
            }

            int[] updCounts = stm.executeBatch();
            int updCountTotal = 0;
            for (int count : updCounts) {
                updCountTotal += count;
            }

            c.commit();

            return updCountTotal;
        } catch (SQLException e) {
            String message = "SQL error writing records for job ID " + jobId + " and filterId " + filterId
                    + "\n" + ExceptionUtils.getSQLExceptionCause(e);
            log.warn(message, e);
            return 0;
        } finally {
            DBUtils.closeStatementIfOpen(stm);
            DBUtils.rollbackIfNeeded(c, "storeFrontierReport insert", jobId);
        }

    } finally {
        HarvestDBConnection.release(c);
    }
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private Set<String> putReconfigurationRecordDB(Map<String, ReconfigurationRecord<NodeIDType>> toCommit) {
    String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, "
            + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?";
    String cmd = updateCmd;//from   w  ww  . j a v  a 2  s  .  c  o m

    PreparedStatement pstmt = null;
    Connection conn = null;
    Set<String> committed = new HashSet<String>();
    String[] keys = toCommit.keySet().toArray(new String[0]);
    try {
        ArrayList<String> batch = new ArrayList<String>();
        for (int i = 0; i < keys.length; i++) {
            String name = keys[i];
            if (conn == null) {
                conn = this.getDefaultConn();
                conn.setAutoCommit(false);
                pstmt = conn.prepareStatement(updateCmd);
            }
            // removal
            if (toCommit.get(name) == null) {
                this.deleteReconfigurationRecordDB(name);
                log.log(Level.INFO, "{0} deleted RC record {1}", new Object[] { this, name });
                committed.add(name);
                continue;
            }
            // else update/insert
            String rcGroupName = toCommit.get(name).getRCGroupName();
            if (rcGroupName == null)
                rcGroupName = this.getRCGroupName(name);
            pstmt.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                pstmt.setClob(2, new StringReader((toCommit.get(name)).toString()));
            else
                pstmt.setString(2, (toCommit.get(name)).toString());
            pstmt.setString(3, name);
            pstmt.addBatch();
            batch.add(name);

            int[] executed = new int[batch.size()];
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == toCommit.size()) {
                executed = pstmt.executeBatch();
                assert (executed.length == batch.size());
                conn.commit();
                pstmt.clearBatch();
                for (int j = 0; j < executed.length; j++) {
                    if (executed[j] > 0) {
                        log.log(Level.FINE, "{0} updated RC DB record to {1}",
                                new Object[] { this, toCommit.get(batch.get(j)).getSummary() });
                        committed.add(batch.get(j));
                    } else
                        log.log(Level.FINE,
                                "{0} unable to update RC record {1} (executed={2}), will try insert",
                                new Object[] { this, batch.get(j), executed[j] });
                }
                batch.clear();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting RC record using " + cmd);
        sqle.printStackTrace();
    } finally {
        cleanup(pstmt);
        cleanup(conn);
    }

    log.log(Level.FINE, "{0} batch-committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    committed.addAll(this.putReconfigurationRecordIndividually(this.diff(toCommit, committed)));
    log.log(Level.FINE, "{0} committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    return committed;
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean createReconfigurationRecordsDB(Map<String, String> nameStates, Set<NodeIDType> newActives) {
    String insertCmd = "insert into " + getRCRecordTable() + " (" + Columns.RC_GROUP_NAME.toString() + ", "
            + Columns.STRINGIFIED_RECORD.toString() + ", " + Columns.SERVICE_NAME.toString()
            + " ) values (?,?,?)";

    PreparedStatement insertRC = null;
    Connection conn = null;/*w  ww  .j a  v  a 2  s.c om*/
    boolean insertedAll = true;
    Set<String> batch = new HashSet<String>();
    Set<String> committed = new HashSet<String>();
    try {
        if (conn == null) {
            conn = this.getDefaultConn();
            conn.setAutoCommit(false);
            insertRC = conn.prepareStatement(insertCmd);
        }
        assert (nameStates != null && !nameStates.isEmpty());
        String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next());
        int i = 0;
        long t1 = System.currentTimeMillis();
        for (String name : nameStates.keySet()) {
            ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, -1,
                    newActives);
            /* We just directly initialize with WAIT_ACK_STOP:-1 instead of
             * starting with READY:-1 and pretending to go through the whole
             * reconfiguration protocol sequence. */
            record.setState(name, -1, RCStates.WAIT_ACK_STOP);
            insertRC.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                insertRC.setClob(2, new StringReader(record.toString()));
            else
                insertRC.setString(2, record.toString());
            insertRC.setString(3, name);
            insertRC.addBatch();
            batch.add(name);
            i++;
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) {
                int[] executed = insertRC.executeBatch();
                conn.commit();
                insertRC.clearBatch();
                committed.addAll(batch);
                batch.clear();
                for (int j : executed)
                    insertedAll = insertedAll && (j > 0);
                if (insertedAll)
                    log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms",
                            new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) });
                t1 = System.currentTimeMillis();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting batched RC records using " + insertCmd);
        sqle.printStackTrace();
    } finally {
        cleanup(insertRC);
        cleanup(conn);
    }

    // rollback
    if (!insertedAll) {
        for (String name : nameStates.keySet())
            if (committed.contains(name))
                this.deleteReconfigurationRecord(name, 0);
    }

    return insertedAll;
}

From source file:com.flexive.ejb.beans.structure.TypeEngineBean.java

/**
 * {@inheritDoc}/*from w w w .j a  v  a  2s  . c o  m*/
 */
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRED)
public void remove(long id) throws FxApplicationException {
    final UserTicket ticket = FxContext.getUserTicket();
    FxPermissionUtils.checkRole(ticket, Role.StructureManagement);

    FxType type = CacheAdmin.getEnvironment().getType(id);

    Connection con = null;
    PreparedStatement ps = null;
    StringBuilder sql = new StringBuilder(500);
    try {
        con = Database.getDbConnection();
        List<FxPropertyAssignment> allPropertyAssignments = new ArrayList<FxPropertyAssignment>(20);
        FxEnvironment env = CacheAdmin.getEnvironment();
        for (FxPropertyAssignment fxpa : env.getPropertyAssignments(true))
            if (fxpa.getAssignedType().getId() == id)
                allPropertyAssignments.add(fxpa);
        List<Long> rmStackProp = new ArrayList<Long>(allPropertyAssignments.size());
        List<FxPropertyAssignment> rmProp = new ArrayList<FxPropertyAssignment>(allPropertyAssignments.size());

        for (FxPropertyAssignment a : allPropertyAssignments)
            if (a.getBaseAssignmentId() == FxAssignment.NO_PARENT)
                rmStackProp.add(a.getId());
            else {
                //check if base is from the same type
                if (env.getAssignment(a.getBaseAssignmentId()).getAssignedType().getId() == id)
                    rmProp.add(a);
                else
                    rmStackProp.add(a.getId());
            }
        boolean found;
        while (rmProp.size() > 0) {
            found = false;
            for (FxPropertyAssignment a : rmProp)
                if (rmStackProp.contains(a.getBaseAssignmentId())) {
                    rmProp.remove(a);
                    rmStackProp.add(0, a.getId());
                    found = true;
                    break;
                }
            assert found : "Internal error: no property assignment found to be removed!";
        }
        //remove group assignments in the 'correct' order (ie not violating parentgroup references)
        ArrayList<Long> rmStack = new ArrayList<Long>(10);
        buildGroupAssignmentRemoveStack(type.getConnectedAssignments("/"), rmStack);
        rmStack.addAll(0, rmStackProp);

        sql.setLength(0);
        sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(ML).append(" WHERE ID=?");
        ps = con.prepareStatement(sql.toString());
        for (Long rmid : rmStack) {
            ps.setLong(1, rmid);
            ps.addBatch();
        }
        ps.executeBatch();
        ps.close();

        //prevent base-related constraint issues by setting the base to null prior to removal
        sql.setLength(0);
        sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS).append(" SET BASE=NULL WHERE TYPEDEF=? AND ID=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        for (Long rmid : rmStack) {
            ps.setLong(2, rmid);
            ps.addBatch();
        }
        ps.executeBatch();
        ps.close();

        //remove property and group assignment option entries
        sql.setLength(0);
        for (FxPropertyAssignment pa : allPropertyAssignments) {
            if ( //exclude the "ID" property whose Id is "0" which is "NO_PARENT"
            !(pa.getProperty().getId() == FxAssignment.NO_PARENT)) {
                if (sql.length() == 0) {
                    sql.append(" WHERE ASSID IN(").append(pa.getId());
                } else
                    sql.append(',').append(pa.getId());
            }
        }
        if (sql.length() > 0) {
            sql.append(')');
            ps = con.prepareStatement("DELETE FROM " + TBL_STRUCT_PROPERTY_OPTIONS + sql.toString());
            ps.executeUpdate();
            ps.close();
        }
        sql.setLength(0);
        for (FxGroupAssignment ga : type.getAssignedGroups()) {
            if (ga.getBaseAssignmentId() == FxAssignment.NO_PARENT) {
                if (sql.length() == 0) {
                    sql.append(" WHERE ASSID IN(").append(ga.getId());
                } else
                    sql.append(',').append(ga.getId());
            }
        }
        if (sql.length() > 0) {
            sql.append(')');
            ps = con.prepareStatement("DELETE FROM " + TBL_STRUCT_GROUP_OPTIONS + sql.toString());
            ps.executeUpdate();
            ps.close();
        }

        // remove all type structure options
        storeTypeOptions(con, TBL_STRUCT_TYPES_OPTIONS, "ID", id, null, true);

        //remove all flat storage assignments for this type
        FxFlatStorageManager.getInstance().removeTypeMappings(con, type.getId());

        //remove the assignments
        sql.setLength(0);
        //clear parent key refs for removal to avoid referential integrity issues within the type itself
        //            sql.append("UPDATE ").append(TBL_STRUCT_ASSIGNMENTS).append(" SET PARENTGROUP=ID WHERE TYPEDEF=?");
        //            ps = con.prepareStatement(sql.toString());
        //            ps.setLong(1, type.getId());
        //            ps.executeUpdate();
        //            ps.close();
        //            sql.setLength(0);
        //            ps = con.prepareStatement(sql.toString());
        sql.append("DELETE FROM ").append(TBL_STRUCT_ASSIGNMENTS).append(" WHERE TYPEDEF=? AND ID=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        for (Long rmid : rmStack) {
            ps.setLong(2, rmid);
            ps.addBatch();
        }
        ps.executeBatch();
        ps.close();

        sql.setLength(0);
        sql.append("DELETE FROM ").append(TBL_STRUCT_TYPERELATIONS)
                .append(" WHERE TYPEDEF=? OR TYPESRC=? OR TYPEDST=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        ps.setLong(2, type.getId());
        ps.setLong(3, type.getId());
        ps.executeUpdate();
        ps.close();
        sql.setLength(0);
        sql.append("DELETE FROM ").append(TBL_STRUCT_TYPES).append(ML).append(" WHERE ID=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        ps.executeUpdate();
        ps.close();
        sql.setLength(0);
        sql.append("UPDATE ").append(TBL_STRUCT_PROPERTIES).append(" SET REFTYPE=NULL WHERE REFTYPE=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        ps.executeUpdate();
        ps.close();
        sql.setLength(0);
        sql.append("DELETE FROM ").append(TBL_STRUCT_TYPES).append(" WHERE ID=?");
        ps = con.prepareStatement(sql.toString());
        ps.setLong(1, type.getId());
        ps.executeUpdate();

        //remove eventually orphaned properties and groups
        FxStructureUtils.removeOrphanedProperties(con);
        FxStructureUtils.removeOrphanedGroups(con);

        StructureLoader.reload(con);
        htracker.track(type, "history.type.remove", type.getName(), type.getId());
    } catch (SQLException e) {
        if (StorageManager.isForeignKeyViolation(e)) {
            EJBUtils.rollback(ctx);
            throw new FxRemoveException(LOG, e, "ex.structure.type.inUse", type.getName());
        }
        EJBUtils.rollback(ctx);
        throw new FxRemoveException(LOG, e, "ex.db.sqlError", e.getMessage());
    } catch (FxCacheException e) {
        EJBUtils.rollback(ctx);
        throw new FxRemoveException(LOG, e, "ex.cache", e.getMessage());
    } catch (FxLoadException e) {
        EJBUtils.rollback(ctx);
        throw new FxRemoveException(e);
    } finally {
        Database.closeObjects(TypeEngineBean.class, con, ps);
    }
}

From source file:HSqlManager.java

@SuppressWarnings("Duplicates")
@Deprecated//from  ww w. j  a v  a 2 s. c o m
public static void mycoUniqueDB(Connection connection, int bps) throws ClassNotFoundException, SQLException,
        InstantiationException, IllegalAccessException, IOException {
    long time = System.currentTimeMillis();
    DpalLoad.main(new String[1]);
    HSqlPrimerDesign.Dpal_Inst = DpalLoad.INSTANCE_WIN64;
    String base = new File("").getAbsolutePath();
    if (!written) {
        CSV.makeDirectory(new File(base + "/PhageData"));
        INSTANCE.parseAllPhages(bps);
    }
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    PrintWriter log = new PrintWriter(new File("javalog.log"));
    stat.execute("SET FILES LOG FALSE;\n");
    PreparedStatement st = db
            .prepareStatement("UPDATE Primerdb.Primers" + " SET UniqueP = true, Tm = ?, GC =?, Hairpin =?"
                    + "WHERE Cluster = ? and Strain = ? and " + "Sequence = ? and Bp = ?");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    String strain = "";
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
        if (r[2].equals("xkcd")) {
            strain = r[0];
        }
    }
    call.close();
    String x = strain;
    Set<String> clust = phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet());
    String[] clusters = clust.toArray(new String[clust.size()]);
    for (String z : clusters) {
        try {
            Set<String> nonclustphages = phages.stream().filter(a -> a[0].equals(x) && !a[1].equals(z))
                    .map(a -> a[2]).collect(Collectors.toSet());
            ResultSet resultSet = stat.executeQuery(
                    "Select Sequence from primerdb.primers" + " where Strain ='" + x + "' and Cluster ='" + z
                            + "' and CommonP = true" + " and Bp = " + Integer.valueOf(bps) + " ");
            Set<CharSequence> primers = Collections.synchronizedSet(new HashSet<>());
            while (resultSet.next()) {
                primers.add(resultSet.getString("Sequence"));
            }
            resultSet.close();
            for (String phage : nonclustphages) {
                //                    String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                    String sequence =seqs[0]+seqs[1];
                //                        Map<String, List<Integer>> seqInd = new HashMap<>();
                //                        for (int i = 0; i <= sequence.length()-bps; i++) {
                //                            String sub=sequence.substring(i,i+bps);
                //                            if(seqInd.containsKey(sub)){
                //                                seqInd.get(sub).add(i);
                //                            }else {
                //                                List<Integer> list = new ArrayList<>();
                //                                list.add(i);
                //                                seqInd.put(sub,list);
                //                            }
                //                        }
                //                    primers = primers.stream().filter(primer->!seqInd.containsKey(primer)).collect(Collectors.toSet());
                //                    primers =Sets.difference(primers,CSV.readCSV(base + "/PhageData/"+Integer.toString(bps)
                //                                    + phage + ".csv"));
                CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv").stream()
                        .filter(primers::contains).forEach(primers::remove);
                //                    System.gc();

            }
            int i = 0;
            for (CharSequence a : primers) {
                try {
                    st.setDouble(1, HSqlPrimerDesign.primerTm(a, 0, 800, 1.5, 0.2));
                    st.setDouble(2, HSqlPrimerDesign.gcContent(a));
                    st.setBoolean(3, HSqlPrimerDesign.calcHairpin((String) a, 4));
                    st.setString(4, z);
                    st.setString(5, x);
                    st.setString(6, a.toString());
                    st.setInt(7, bps);
                    st.addBatch();
                } catch (SQLException e) {
                    e.printStackTrace();
                    System.out.println("Error occurred at " + x + " " + z);
                }
                i++;
                if (i == 1000) {
                    i = 0;
                    st.executeBatch();
                    db.commit();
                }
            }
            if (i > 0) {
                st.executeBatch();
                db.commit();
            }
        } catch (SQLException e) {
            e.printStackTrace();
            System.out.println("Error occurred at " + x + " " + z);
        }
        log.println(z);
        log.flush();
        System.gc();
    }
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Unique Updated");
    System.out.println((System.currentTimeMillis() - time) / Math.pow(10, 3) / 60);
}

From source file:com.nextep.designer.dbgm.services.impl.DataService.java

private void saveDataLinesToRepository(IDataSet dataSet, IDataSet dataSetContents, DeltaType deltaType,
        IProgressMonitor monitor) {/*from  www  .j av a 2  s  .c om*/
    final SubMonitor m = SubMonitor.convert(monitor, 10000);
    m.subTask(MessageFormat.format(DBGMMessages.getString("service.data.dataSetSaveInit"), dataSet.getName())); //$NON-NLS-1$
    IStorageHandle handle = dataSetContents.getStorageHandle();
    if (handle == null) {
        handle = storageService.createDataSetStorage(dataSet);
    }

    Connection derbyConn = null;
    Statement stmt = null;
    ResultSet rset = null;
    Connection repoConn = null;
    PreparedStatement insertStmt = null;
    Session s = null;
    Transaction t = null;
    long rowid = dataSet.getCurrentRowId() + 1;
    try {
        repoConn = getRepositoryConnection();
        repoConn.setAutoCommit(false);

        // We handle the Hibernate session specifically to boost the import process
        s = HibernateUtil.getInstance().getSandBoxSession();
        s.clear();
        t = s.beginTransaction();
        // Our prepared INSERT rows statement
        insertStmt = repoConn.prepareStatement("INSERT INTO dbgm_dset_row_values ( " //$NON-NLS-1$
                + "  drow_id, column_refid, column_value " //$NON-NLS-1$
                + ") VALUES ( " //$NON-NLS-1$
                + "  ?, ?, ? " //$NON-NLS-1$
                + ") "); //$NON-NLS-1$

        // Getting our local derby connection
        derbyConn = storageService.getLocalConnection();
        stmt = derbyConn.createStatement();

        // Selecting data from derby local storage
        String selectStmt = handle.getSelectStatement();
        selectStmt = selectStmt.replace("SELECT", "SELECT " + IStorageService.ROWID_COLUMN_NAME //$NON-NLS-1$ //$NON-NLS-2$
                + ","); //$NON-NLS-1$
        rset = stmt.executeQuery(selectStmt);
        final List<IReference> colRefs = dataSet.getColumnsRef();

        int lineBufferCount = 0;
        long counter = 0;
        while (rset.next()) {
            final IDataLine line = typedObjectFactory.create(IDataLine.class);
            line.setDataSet(dataSet);
            // If we got a repository rowid, we use it, else we affect a new available rowid
            final long selectedRowId = rset.getLong(1);
            if (selectedRowId != 0) {
                line.setRowId(selectedRowId);
            } else {
                line.setRowId(rowid++);
            }
            // Persisting line so that columns can use its ID
            s.save(line);
            if (deltaType != DeltaType.DELETE) {
                for (int i = 2; i < colRefs.size() + 2; i++) {
                    final Object val = rset.getObject(i);
                    // First column is our rowid, so we shift left by 1, starting at 0 => -2
                    final IReference colRef = colRefs.get(i - 2);
                    final IColumnValue colValue = typedObjectFactory.create(IColumnValue.class);
                    colValue.setDataLine(line);
                    colValue.setColumnRef(colRef);
                    colValue.setValue(val);
                    line.addColumnValue(colValue);
                    insertStmt.setLong(1, line.getUID().rawId());
                    insertStmt.setLong(2, colRef.getUID().rawId());
                    insertStmt.setString(3, colValue.getStringValue());
                    insertStmt.addBatch();
                }
            }
            if (lineBufferCount++ >= LINE_BUFFER_SIZE) {
                t.commit();
                insertStmt.executeBatch();
                s.clear();
                t = s.beginTransaction();
                counter += lineBufferCount;
                m.subTask(MessageFormat.format(DBGMMessages.getString("service.data.savedLines"), //$NON-NLS-1$
                        dataSet.getName(), counter));
                m.worked(500);
                lineBufferCount = 0;
            }
        }
        if (lineBufferCount > 0) {
            t.commit();
            insertStmt.executeBatch();
            s.clear();
            lineBufferCount = 0;
        }
        repoConn.commit();
        dataSet.setCurrentRowId(rowid);
    } catch (SQLException e) {
        throw new ErrorException(DBGMMessages.getString("service.data.saveDatalineFailed") + e.getMessage(), e); //$NON-NLS-1$
    } finally {
        safeClose(rset, stmt, derbyConn, false);
        safeClose(null, insertStmt, repoConn, true);
    }
}

From source file:com.sec.ose.osi.sdk.protexsdk.discovery.DCStringSearch.java

public void loadFromProtexServer(UIResponseObserver observer, ReportEntityList identifiedFiles,
        ReportEntityList stringSearch) {

    PreparedStatement prep = IdentificationDBManager.getStringSearchPreparedStatement(projectName);
    HashSet<String> StringSearchFileLineSet = new HashSet<String>();

    if (stringSearch == null) {
        System.err.println("Not Founded StringSearch.");
        return;//  w w  w  .ja  v  a 2 s  .c  o m
    }

    StringBuffer stringSearchLineBuf = new StringBuffer("");

    if (identifiedFiles != null) {
        for (ReportEntity tmpIdentifiedFile : identifiedFiles.getEntityList()) {

            stringSearchLineBuf.setLength(0);

            if (tmpIdentifiedFile.getValue(ReportInfo.IDENTIFIED_FILES.DISCOVERY_TYPE)
                    .equals("String Search")) {

                String stringSearchFilePath = tmpIdentifiedFile
                        .getValue(ReportInfo.IDENTIFIED_FILES.FILE_FOLDER_NAME).substring(1);
                String stringSearchSearch = tmpIdentifiedFile.getValue(ReportInfo.IDENTIFIED_FILES.SEARCH);
                String stringSearchComponent = tmpIdentifiedFile
                        .getValue(ReportInfo.IDENTIFIED_FILES.COMPONENT);
                String stringSearchVersion = tmpIdentifiedFile.getValue(ReportInfo.IDENTIFIED_FILES.VERSION);
                String stringSearchLicense = tmpIdentifiedFile.getValue(ReportInfo.IDENTIFIED_FILES.LICENSE);
                String stringSearchTotalLine = tmpIdentifiedFile
                        .getValue(ReportInfo.IDENTIFIED_FILES.TOTAL_LINES);
                String stringSearchComment = tmpIdentifiedFile.getValue(ReportInfo.IDENTIFIED_FILES.COMMENT);
                String stringResolutionType = tmpIdentifiedFile
                        .getValue(ReportInfo.IDENTIFIED_FILES.RESOLUTION_TYPE);
                String stringSearchStartLine = tmpIdentifiedFile
                        .getValue(ReportInfo.IDENTIFIED_FILES.FILE_LINE);

                stringSearchLineBuf.append(stringSearchFilePath);
                stringSearchLineBuf.append(stringSearchStartLine);
                if (stringSearchTotalLine.length() > 0) {
                    int iStringSearchStartLine = Tools.transStringToInteger(stringSearchStartLine);
                    int iStringSearchTotalLine = Tools.transStringToInteger(stringSearchTotalLine);
                    int iStringSearchEndLine = iStringSearchStartLine + iStringSearchTotalLine - 1;

                    stringSearchLineBuf.append("..");
                    stringSearchLineBuf.append(iStringSearchEndLine);
                }

                StringSearchFileLineSet.add(stringSearchLineBuf.toString());
                if (stringSearchSearch.length() > 0) {

                    String pendingStatus = String.valueOf(AbstractMatchInfo.STATUS_IDENTIFIED);
                    if ("Declared".equals(stringResolutionType))
                        pendingStatus = String.valueOf(AbstractMatchInfo.STATUS_DECLARED);

                    try {
                        prep.setString(1, stringSearchFilePath);
                        prep.setString(2, stringSearchSearch);
                        prep.setString(3, stringSearchComponent);
                        prep.setString(4, stringSearchVersion);
                        prep.setString(5, stringSearchLicense);
                        prep.setString(6, pendingStatus);
                        prep.setString(7,
                                stringSearchLineBuf.toString().substring(stringSearchFilePath.length()));
                        prep.setString(8, stringSearchComment);
                        prep.addBatch();
                    } catch (SQLException e) {
                        log.warn(e);
                    }

                }
            }
        }
        IdentificationDBManager.execute(prep);
    }

    for (ReportEntity entity : stringSearch) {

        String stringSearchFilePath = entity.getValue(ReportInfo.STRING_SEARCHES.FILE);
        String stringSearchSearch = entity.getValue(ReportInfo.STRING_SEARCHES.SEARCH);
        String stringSearchLine = entity.getValue(ReportInfo.STRING_SEARCHES.LINE);

        if (StringSearchFileLineSet.contains(stringSearchFilePath + stringSearchLine))
            continue;

        if (stringSearchSearch.length() > 0) {

            try {
                prep.setString(1, stringSearchFilePath);
                prep.setString(2, stringSearchSearch);
                prep.setString(3, null);
                prep.setString(4, null);
                prep.setString(5, null);
                prep.setString(6, String.valueOf(AbstractMatchInfo.STATUS_PENDING));
                prep.setString(7, stringSearchLine);
                prep.setString(8, null); // comment
                prep.addBatch();
            } catch (SQLException e) {
                log.warn(e);
            }
        }
    }
    IdentificationDBManager.execute(prep);
    StringSearchFileLineSet = null;
    if (prep != null) {
        try {
            prep.close();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }
}

From source file:org.obm.domain.dao.CalendarDaoJdbcImpl.java

@VisibleForTesting
void insertAttendees(AccessToken editor, Event ev, Connection con, List<Attendee> attendees)
        throws SQLException {
    String attQ = "INSERT INTO EventLink (" + ATT_INSERT_FIELDS + ") VALUES (" + "?, " + // event_id
            "?, " + // entity_id
            "?, " + // state
            "?, " + // required
            "?," + // percent
            "?," + // user_create
            "?" + // is_organizer
            ")";/*from  w w w.j a va  2 s. c  o m*/
    boolean shouldClearOrganizer = false;
    PreparedStatement ps = null;

    try {
        ps = con.prepareStatement(attQ);

        final int eventObmId = ev.getObmId().getObmId();
        final Set<Attendee> listAttendee = removeDuplicateAttendee(attendees);
        Set<EntityId> alreadyAddedAttendees = Sets.newHashSet();

        for (final Attendee at : listAttendee) {
            boolean isOrganizer = Objects.firstNonNull(at.isOrganizer(), false);

            String attendeeEmail = at.getEmail();
            EntityId userEntity = at.getEntityId();

            // There must be only one organizer in a given event
            if (isOrganizer) {
                shouldClearOrganizer = true;
            }

            if (alreadyAddedAttendees.contains(userEntity)) {
                logger.info("Attendee {} with entity ID {} already added, skipping.", attendeeEmail,
                        userEntity);

                continue;
            }

            ps.setInt(1, eventObmId);
            ps.setInt(2, userEntity.getId());
            ps.setObject(3, getJdbcObjectParticipation(at));
            ps.setObject(4, getJdbcObjectParticipationRole(at));
            ps.setInt(5, at.getPercent());
            ps.setInt(6, editor.getObmId());
            ps.setBoolean(7, isOrganizer);
            ps.addBatch();
            logger.info("Adding " + attendeeEmail + (isOrganizer ? " as organizer" : " as attendee"));

            alreadyAddedAttendees.add(userEntity);
        }

        // Clear the previous organizer if needed
        if (shouldClearOrganizer) {
            clearOrganizer(eventObmId, con);
        }

        ps.executeBatch();
    } finally {
        obmHelper.cleanup(null, ps, null);
    }
}

From source file:org.wso2.carbon.identity.application.mgt.dao.impl.ApplicationDAOImpl.java

/**
 * @param applicationId/* w  ww  .j  a v  a2  s  .c o m*/
 * @param claimConfiguration
 * @param applicationID
 * @param connection
 * @throws SQLException
 */
private void updateClaimConfiguration(int applicationId, ClaimConfig claimConfiguration, int applicationID,
        Connection connection) throws SQLException {

    int tenantID = CarbonContext.getThreadLocalCarbonContext().getTenantId();

    PreparedStatement storeRoleClaimPrepStmt = null;
    PreparedStatement storeClaimDialectPrepStmt = null;
    PreparedStatement storeSendLocalSubIdPrepStmt = null;

    if (claimConfiguration == null) {
        return;
    }

    try {
        // update the application data
        String roleClaim = claimConfiguration.getRoleClaimURI();
        if (roleClaim != null) {
            storeRoleClaimPrepStmt = connection
                    .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_ROLE_CLAIM);
            // ROLE_CLAIM=? WHERE TENANT_ID= ? AND ID =
            storeRoleClaimPrepStmt.setString(1, CharacterEncoder.getSafeText(roleClaim));
            storeRoleClaimPrepStmt.setInt(2, tenantID);
            storeRoleClaimPrepStmt.setInt(3, applicationId);
            storeRoleClaimPrepStmt.executeUpdate();
        }

    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeRoleClaimPrepStmt);
    }

    try {
        storeClaimDialectPrepStmt = connection
                .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_CLAIM_DIALEECT);
        // IS_LOCAL_CLAIM_DIALECT=? WHERE TENANT_ID= ? AND ID = ?
        storeClaimDialectPrepStmt.setString(1, claimConfiguration.isLocalClaimDialect() ? "1" : "0");
        storeClaimDialectPrepStmt.setInt(2, tenantID);
        storeClaimDialectPrepStmt.setInt(3, applicationId);
        storeClaimDialectPrepStmt.executeUpdate();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeClaimDialectPrepStmt);
    }

    try {
        storeSendLocalSubIdPrepStmt = connection
                .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_SEND_LOCAL_SUB_ID);
        // IS_SEND_LOCAL_SUBJECT_ID=? WHERE TENANT_ID= ? AND ID = ?
        storeSendLocalSubIdPrepStmt.setString(1,
                claimConfiguration.isAlwaysSendMappedLocalSubjectId() ? "1" : "0");
        storeSendLocalSubIdPrepStmt.setInt(2, tenantID);
        storeSendLocalSubIdPrepStmt.setInt(3, applicationId);
        storeSendLocalSubIdPrepStmt.executeUpdate();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeSendLocalSubIdPrepStmt);
    }

    if (claimConfiguration.getClaimMappings() == null || claimConfiguration.getClaimMappings().length == 0) {
        return;
    }

    List<ClaimMapping> claimMappings = Arrays.asList(claimConfiguration.getClaimMappings());

    if (claimConfiguration == null || claimMappings.isEmpty()) {
        log.debug("No claim mapping found, Skipping ..");
        return;
    }

    PreparedStatement storeClaimMapPrepStmt = null;
    try {
        storeClaimMapPrepStmt = connection.prepareStatement(ApplicationMgtDBQueries.STORE_CLAIM_MAPPING);

        for (ClaimMapping mapping : claimMappings) {
            if (mapping.getLocalClaim() == null || mapping.getLocalClaim().getClaimUri() == null
                    || mapping.getRemoteClaim().getClaimUri() == null || mapping.getRemoteClaim() == null) {
                continue;
            }
            // TENANT_ID, IDP_CLAIM, SP_CLAIM, APP_ID, IS_REQUESTED
            storeClaimMapPrepStmt.setInt(1, tenantID);
            storeClaimMapPrepStmt.setString(2,
                    CharacterEncoder.getSafeText(mapping.getLocalClaim().getClaimUri()));
            storeClaimMapPrepStmt.setString(3,
                    CharacterEncoder.getSafeText(mapping.getRemoteClaim().getClaimUri()));
            storeClaimMapPrepStmt.setInt(4, applicationID);
            if (mapping.isRequested()) {
                storeClaimMapPrepStmt.setString(5, "1");
            } else {
                storeClaimMapPrepStmt.setString(5, "0");
            }
            storeClaimMapPrepStmt.setString(6, CharacterEncoder.getSafeText(mapping.getDefaultValue()));
            storeClaimMapPrepStmt.addBatch();

            if (debugMode) {
                log.debug("Storing Claim Mapping. Local Claim: " + mapping.getLocalClaim().getClaimUri()
                        + " SPClaim: " + mapping.getRemoteClaim().getClaimUri());
            }
        }

        storeClaimMapPrepStmt.executeBatch();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeClaimMapPrepStmt);
    }
}