com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java Source code

Java tutorial

Introduction

Here is the source code for com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java

Source

// Copyright 2007 Hitachi Data Systems
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.

package com.archivas.clienttools.arcutils.utils.database;

import com.archivas.clienttools.arcutils.config.HCPMoverProperties;
import com.archivas.clienttools.arcutils.impl.jobs.FileStats;
import com.archivas.clienttools.arcutils.impl.jobs.FileStatus;
import com.archivas.clienttools.arcutils.model.ArcProcessFile;
import com.archivas.clienttools.arcutils.profile.AbstractProfileBase;
import org.apache.http.HttpStatus;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.sql.*;
import java.util.*;
import java.util.Date;
import java.util.logging.Level;
import java.util.logging.Logger;

@SuppressWarnings({ "ConstantConditions" })
public abstract class ManagedJobSchema {

    private static final Logger LOG = Logger.getLogger(ManagedJobSchema.class.getName());
    public static final Logger DB_PERFORMANCE_LOG = Logger
            .getLogger(ManagedJobSchema.class.getName() + ".dbPerformance");

    public static final String JOB_FILES_TABLE_NAME = "JOB_FILES";
    private static final String JOB_SCHEMA_PREFIX = "JOB_";
    public static final String LIFE_CYCLE_INDEX_NAME = "LIFE_CYCLE_INDEX";
    public static final String LIFE_CYCLE_STATUS_RECORD_ID_INDEX_NAME = "LIFE_CYCLE_STATUS_RECORD_ID_INDEX";
    public static final String LIFE_CYCLE_STATUS_PATH_DEPTH_INDEX_NAME = "LIFE_CYCLE_STATUS_PATH_DEPTH_INDEX";
    private static final String INVENTORY_INDEX_NAME = "INVENTORY_INDEX";
    private static final String INITIAL_LIST_INDEX_NAME = "INITIAL_LIST_INDEX";
    private static final String DIR_LISTING_IN_PROGRESS_INDEX_NAME = "DIR_LISTING_INDEX";

    private static final String INSERT_FILES_TO_DISCOVER_STMT_NAME = "insertToDiscovery";
    private static final String UPDATE_LIFE_CYCLE_STMT_NAME = "updateLifeCycle";
    private static final String MARK_FILES_READY_TO_PROCESS_STMT_NAME = "markFilesReadyToProcess";
    private static final String MARK_FILE_PROCESSED_STMT_NAME = "markFileProcessed";
    private static final String MARK_DIR_PROCESSED_STMT_NAME = "markDirProcessed";
    private static final String MARK_DIR_LISTING_IN_PROGRESS_STMT_NAME = "dirListingInProgress";
    private static final String GET_FILES_IN_DIR_IN_PROGRESS_STMT_NAME = "filesInDirInProgress";
    private static final String INSERT_INTO_TEMP_TABLE_STMT_NAME = "insertIntoTempTable";
    private static final String GET_FILE_IN_TEMP_TABLE_STMT_NAME = "fileInTempTable";

    private static final String TEMP_DIR_LISTING_TABLE_NAME = "TEMP_DIR_LISTING";

    private AbstractProfileBase sourceProfile;

    private String insertFilesToDiscoverySql;
    private String updateLifeCycleSql;
    private String markFilesReadyToProcessSql;
    private String getInitialDiscoveryListSql;
    private String markFileProcessedSql;
    private String prepareFilesForResumeSql;
    private String markDirectoryProcessedSql;
    private String markDirListingInProgressSql;
    private String getDirsInProgressSql;
    private String getFilesInDirInProgressSql;
    private String insertIntoTempTableSql;
    private String getFileInTempTableSql;

    protected String qualifiedFilesTableName;
    private String qualifiedTempTableName;
    private String schemaName;
    private long jobId;

    private long lastDbRecordId = 0;

    private DbConnectionPool connPool;

    public ManagedJobSchema(long jobId) {
        this.jobId = jobId;
        schemaName = getJobSchemaName(jobId);

        qualifiedFilesTableName = schemaName + "." + JOB_FILES_TABLE_NAME;
        qualifiedTempTableName = schemaName + "." + TEMP_DIR_LISTING_TABLE_NAME;

        connPool = new DbConnectionPool(schemaName);
    }

    public void setSourceProfile(AbstractProfileBase sourceProfile) {
        this.sourceProfile = sourceProfile;
    }

    public AbstractProfileBase getSourceProfile() {
        return sourceProfile;
    }

    public long getLastDbRecordId() {
        return lastDbRecordId;
    }

    private synchronized void initLastDbRecordId(long lastDbRecordId) throws DatabaseException {
        this.lastDbRecordId = lastDbRecordId;
    }

    public void load(long lastDbRecordId) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                initializeSqlStrings();
                initLastDbRecordId(lastDbRecordId);
            } catch (Exception e) {
                throw new DatabaseException("Unexpected error loading database", e);
            }
        }
    }

    /**
     *
     * @param conn
     *            - DB connection to use
     * @param initialDiscoveryList
     *            - files selected/specified by user
     * @return - the max RECORD_ID inserted for the initialDiscoveryList
     * @throws DatabaseException
     *             -
     */
    protected long create(PooledDbConnection conn, List<ArcProcessFile> initialDiscoveryList)
            throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                initializeSqlStrings();
                return createSchema(conn, initialDiscoveryList);
            } catch (DatabaseException dbe) {
                throw dbe;
            } catch (Exception e) {
                throw new DatabaseException("Unexpected error creating database schema", e);
            }
        }
    }

    /**
     *
     * @param conn
     *            - the connection on which to perform the operation
     * @param initialDiscoveryList
     *            - the list of files selected/specified by user
     * @return - the max RECORD_ID inserted for the initialDiscoveryList
     * @throws DatabaseException
     *             -
     * @throws SQLException
     *             -
     */
    private long createSchema(PooledDbConnection conn, List<ArcProcessFile> initialDiscoveryList)
            throws DatabaseException, SQLException {
        Statement stmt = conn.createStatement();
        DBUtils.executeUpdate(stmt, "CREATE SCHEMA " + schemaName);
        createFilesTable(stmt);

        return insertFilesToDiscover(conn, initialDiscoveryList, true);
    }

    private void initializeSqlStrings() {
        insertFilesToDiscoverySql = "INSERT INTO " + qualifiedFilesTableName + getInsertFilesToDiscoverSqlCols();

        updateLifeCycleSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + " = ?" + " WHERE "
                + ManagedJobFilesTableColumn.RECORD_ID + " = ?";

        markFilesReadyToProcessSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + " = "
                + FileLifeCycle.READY_TO_PROCESS.ordinal() + ", "
                + ManagedJobFilesTableColumn.INCLUDE_IN_INVENTORY.toString() + " = 1" + ", "
                + ManagedJobFilesTableColumn.SIZE.toString() + " = ?" + " WHERE "
                + ManagedJobFilesTableColumn.RECORD_ID + " = ?";

        getInitialDiscoveryListSql = "SELECT * FROM " + qualifiedFilesTableName + " WHERE "
                + ManagedJobFilesTableColumn.INITIAL_LIST + " = " + DatabaseResourceManager.boolToDbValue(true);

        markFileProcessedSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.STATUS.toString() + " = ?, " + // param 1
                ManagedJobFilesTableColumn.RETRIES.toString() + " = ?, " + // 2
                ManagedJobFilesTableColumn.START_TIME.toString() + " = ?, " + // 3
                ManagedJobFilesTableColumn.END_TIME.toString() + " = ?, " + // 4
                ManagedJobFilesTableColumn.RUN_TIME_MS.toString() + " = ?, " + // 5
                ManagedJobFilesTableColumn.EXCEPTION_MSG.toString() + " = ?, " + // 6
                ManagedJobFilesTableColumn.STATUS_CODE.toString() + " = ? " + // 7
                " WHERE " + ManagedJobFilesTableColumn.RECORD_ID + " = ?"; // 8

        markDirectoryProcessedSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + " = ? " + // param 1
                ", " + ManagedJobFilesTableColumn.STATUS.toString() + " = ? " + // 2
                ", " + ManagedJobFilesTableColumn.INCLUDE_IN_INVENTORY.toString() + " = ? " + // 3
                ", " + ManagedJobFilesTableColumn.EXCEPTION_MSG.toString() + " = ? " + // 4
                ", " + ManagedJobFilesTableColumn.DIR_LISTING_IN_PROGRESS.toString() + " = ? " + // 5
                " WHERE " + ManagedJobFilesTableColumn.RECORD_ID + " = ?"; // 6

        prepareFilesForResumeSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + " = ?" + // param 1
                " WHERE " + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + " = ?" + // param 2
                " AND " + ManagedJobFilesTableColumn.STATUS + " = " + FileStatus.NONE.ordinal();

        markDirListingInProgressSql = "UPDATE " + qualifiedFilesTableName + " SET "
                + ManagedJobFilesTableColumn.DIR_LISTING_IN_PROGRESS.toString() + " = 1" + " WHERE "
                + ManagedJobFilesTableColumn.RECORD_ID + " = ?"; // param 1

        getDirsInProgressSql = "SELECT " + ManagedJobFilesTableColumn.RECORD_ID + ", "
                + ManagedJobFilesTableColumn.SOURCE_PATH + " FROM " + qualifiedFilesTableName + " WHERE "
                + ManagedJobFilesTableColumn.DIR_LISTING_IN_PROGRESS + " = 1" + " AND "
                + ManagedJobFilesTableColumn.STATUS + " = " + FileStatus.NONE.ordinal();

        getFilesInDirInProgressSql = "SELECT " + ManagedJobFilesTableColumn.SOURCE_PATH + " FROM "
                + qualifiedFilesTableName + " WHERE " + ManagedJobFilesTableColumn.PARENT_RECORD_ID + " = ?"; // param 1

        insertIntoTempTableSql = "INSERT INTO " + qualifiedTempTableName + "( " + DirListingTempTableColumn.PATH
                + " ) values ( ? )";

        getFileInTempTableSql = "SELECT * " + " FROM " + qualifiedTempTableName + " WHERE "
                + DirListingTempTableColumn.PATH + " = ?"; // param 1
    }

    protected void createFilesTable(Statement stmt) throws SQLException {
        String sql = "CREATE TABLE " + qualifiedFilesTableName + " ("
                + ManagedJobFilesTableColumn.getColumnDefinitions() + getExtraFileDataColumns()
                + ManagedJobFilesTableColumn.getPrimaryKeyString() + ")";
        DBUtils.executeUpdate(stmt, sql);

        /*
         * // create an index on the life cycle and status columns (for use by prepare for resume)
         * String indexSql = "CREATE INDEX " + LIFE_CYCLE_STATUS_INDEX_NAME + " ON " +
         * qualifiedFilesTableName + " (" + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + ", "
         * + ManagedJobFilesTableColumn.STATUS + ")"; stmt.executeUpdate(indexSql);
         */

        // create an index on the include_in_inventory and status columns (for use by export
        // results)
        String index2Sql = "CREATE INDEX " + INVENTORY_INDEX_NAME + " ON " + qualifiedFilesTableName + " ("
                + ManagedJobFilesTableColumn.INCLUDE_IN_INVENTORY.toString() + ", "
                + ManagedJobFilesTableColumn.STATUS + ")";
        DBUtils.executeUpdate(stmt, index2Sql);

        // create an index on the INITIAL_LIST column (for use when opening a job; we read initial
        // list)
        String index3Sql = "CREATE INDEX " + INITIAL_LIST_INDEX_NAME + " ON " + qualifiedFilesTableName + " ("
                + ManagedJobFilesTableColumn.INITIAL_LIST.toString() + ")";
        DBUtils.executeUpdate(stmt, index3Sql);

        // possibly create an index on the life cycle column, for use by
        // LifecycleBasedCachedTableIterator
        boolean useSimpleIndex = HCPMoverProperties.DB_LIFE_CYCLE_USE_SIMPLE_INDEX.getAsBoolean();
        String index4Sql = "CREATE INDEX " + LIFE_CYCLE_INDEX_NAME + " ON " + qualifiedFilesTableName + " ("
                + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + ")";
        if (useSimpleIndex) {
            DBUtils.executeUpdate(stmt, index4Sql);
        } else {
            DB_PERFORMANCE_LOG.log(Level.WARNING, "Per configuration setting "
                    + HCPMoverProperties.DB_LIFE_CYCLE_USE_SIMPLE_INDEX.getKey()
                    + ", skipping creation of simple life cycle index.  If you set the parameter to true, you must manually create the index via the following SQL: "
                    + index4Sql);
        }

        // create an index on the life cycle, status, and record_id (desc) columns, for use by
        // prepareForResume() and
        // LifeCycleBasedCachedTableIterator when selecting directories to delete (and possibly for
        // all selects)
        String index5Sql = "CREATE INDEX " + LIFE_CYCLE_STATUS_RECORD_ID_INDEX_NAME + " ON "
                + qualifiedFilesTableName + " (" + ManagedJobFilesTableColumn.LIFE_CYCLE.toString() + ", "
                + ManagedJobFilesTableColumn.STATUS + ", " + ManagedJobFilesTableColumn.RECORD_ID + " DESC" + ")";
        DBUtils.executeUpdate(stmt, index5Sql);

        // create an index on the DIR_LISTING_IN_PROGRESS column for use by prepare for resume.
        String dirListingIndexSql = "CREATE INDEX " + DIR_LISTING_IN_PROGRESS_INDEX_NAME + " ON "
                + qualifiedFilesTableName + " (" + ManagedJobFilesTableColumn.DIR_LISTING_IN_PROGRESS + ", "
                + ManagedJobFilesTableColumn.STATUS + ")";
        DBUtils.executeUpdate(stmt, dirListingIndexSql);
    }

    public void dropSchema() throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            Connection conn = null;
            try {
                conn = connPool.getConnection();
                dropTempTable(conn);
                dropTable(conn, qualifiedFilesTableName);
                Statement stmt = conn.createStatement();
                DBUtils.executeUpdate(stmt, "DROP SCHEMA " + schemaName + " RESTRICT");
                conn.commit();
            } catch (Exception e) {
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred dropping schema " + schemaName, e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    private void createTempTable(Connection conn) throws SQLException {
        Statement stmt = conn.createStatement();
        String sql = "CREATE TABLE " + qualifiedTempTableName + " ("
                + DirListingTempTableColumn.getColumnDefinitions() + DirListingTempTableColumn.getPrimaryKeyString()
                + ")";
        DBUtils.executeUpdate(stmt, sql);
    }

    /**
     * Drops the temp table. Logs but otherwise ignores errors, as it is non-fatal
     *
     * @param conn
     *            - db connection
     */
    private void dropTempTable(Connection conn) {
        dropTable(conn, qualifiedTempTableName);
    }

    private void dropTable(Connection conn, String tableName) {
        Statement stmt = null;
        try {
            stmt = conn.createStatement();
            DBUtils.executeUpdate(stmt, "DROP TABLE " + tableName);
        } catch (SQLException sqle) {
            if ("42Y55".equalsIgnoreCase(sqle.getSQLState())) {
                // ignore this error, it means that the table did not exist
            } else {
                LOG.log(Level.WARNING, DBUtils.getErrorMessage("Failed to drop table " + tableName, sqle));
            }
        } catch (Exception e) {
            LOG.log(Level.WARNING, DBUtils.getErrorMessage("Failed to drop table " + tableName, e));
        } finally {
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                }
            }
        }
    }

    public void insertDirListingBatch(Collection<ArcProcessFile> dirFiles, ArcProcessFile dir, boolean firstBatch)
            throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {
                conn = connPool.getConnection();
                conn.setAutoCommit(false);

                if (firstBatch) {
                    PreparedStatement stmt = conn.prepareStatement(MARK_DIR_LISTING_IN_PROGRESS_STMT_NAME,
                            markDirListingInProgressSql);
                    stmt.clearParameters();
                    stmt.setLong(1, dir.getDatabaseRecordId());
                    stmt.executeUpdate();
                }

                insertFilesToDiscover(conn, dirFiles, false);

                conn.commit();
            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred inserting rows into the database", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * @param filesToDiscover
     *            - list of files to insert to db
     * @throws DatabaseException
     */
    public void insertFilesToDiscover(Collection<ArcProcessFile> filesToDiscover) throws DatabaseException {
        insertFilesToDiscover(filesToDiscover, false);
    }

    /**
     * @param filesToDiscover
     *            - list of files to insert to db
     * @param isInitialList
     *            - true if the list of files is generated by what the user specified in the job
     *            definition
     * @throws DatabaseException
     */
    public void insertFilesToDiscover(Collection<ArcProcessFile> filesToDiscover, boolean isInitialList)
            throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {
                conn = connPool.getConnection();
                conn.setAutoCommit(false);

                insertFilesToDiscover(conn, filesToDiscover, isInitialList);

                conn.commit();
            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred inserting rows into the database", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * @param conn
     *            - the db connection to use
     * @param filesToDiscover
     *            - list of files to insert
     * @param isInitialList
     *            - true if the list is generated by what the user selected/specified in the job
     *            definition
     * @return - the max RECORD_ID inserted
     * @throws DatabaseException
     *             -
     * @throws SQLException
     *             - if a db error occurred
     */
    private long insertFilesToDiscover(PooledDbConnection conn, Collection<ArcProcessFile> filesToDiscover,
            boolean isInitialList) throws DatabaseException, SQLException {

        PreparedStatement preparedStatement = conn.prepareStatement(INSERT_FILES_TO_DISCOVER_STMT_NAME,
                insertFilesToDiscoverySql);
        long recordId = getNextBlockOfDbRecordIds(filesToDiscover.size()); // the last record id
                                                                           // inserted; we're going
                                                                           // to increment back up
                                                                           // to this
        int maxPathDepth = 0;
        for (ArcProcessFile file : filesToDiscover) {
            int pathDepth = file.getPathDepth();
            maxPathDepth = Math.max(maxPathDepth, pathDepth);
            recordId++;
            setInsertFilesToDiscoverySqlParams(file, preparedStatement, isInitialList, recordId, pathDepth);
            preparedStatement.addBatch();
        }

        // execute the batch statement we created in the for loop
        preparedStatement.executeBatch();

        if (!isInitialList) {
            // now update overall job stats to reflect these changes. A "select count(*)" from a
            // large table is a table scan! so we keep track of the count ourselves
            ManagedJobsSchema.getInstance().updateDiscoveredObjCnt(conn, jobId, filesToDiscover.size(), recordId,
                    maxPathDepth);
        }

        return recordId;
    }

    public int getMaxPathDepth() throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {
                conn = connPool.getConnection();
                return getMaxPathDepth(conn);
            } catch (Exception e) {
                throw new DatabaseException(DBUtils
                        .getErrorMessage("An error occurred selecting the max path depth from the database", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * @param conn
     *            - the db connection to use
     * @return - the max PATH_DEPTH value
     * @throws DatabaseException
     *             -
     * @throws SQLException
     *             - if a db error occurred
     */
    private int getMaxPathDepth(PooledDbConnection conn) throws DatabaseException, SQLException {

        return ManagedJobsSchema.getInstance().getMaxPathDepth(conn, jobId);
    }

    public List<ArcProcessFile> getInitialDiscoveryList() throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {

                conn = connPool.getConnection();
                Statement stmt = conn.createStatement();
                long start = System.currentTimeMillis();
                ResultSet rs = DBUtils.executeQuery(stmt, getInitialDiscoveryListSql);
                List<ArcProcessFile> initialDiscoveryList = new ArrayList<ArcProcessFile>();
                while (rs.next()) {
                    ArcProcessFile file = readArcProcessFile(rs);
                    initialDiscoveryList.add(file);
                }
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Fetched the initial discovery list in " + (System.currentTimeMillis() - start) + " ms ");
                return initialDiscoveryList;
            } catch (Exception e) {
                throw new DatabaseException(DBUtils.getErrorMessage(
                        "An error occurred reading the initial discovery list from the database", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    public void markFileComplete(ArcProcessFile file) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {

                conn = connPool.getConnection();

                PreparedStatement stmt = conn.prepareStatement(UPDATE_LIFE_CYCLE_STMT_NAME, updateLifeCycleSql);
                stmt.clearParameters();
                stmt.setInt(1, FileLifeCycle.COMPLETE.ordinal());
                stmt.setLong(2, file.getDatabaseRecordId());
                stmt.executeUpdate();

            } catch (Exception e) {
                throw new DatabaseException(DBUtils
                        .getErrorMessage("An error occurred updating the life cycle of a file to complete", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    public void markFilesReadyToProcess(Collection<ArcProcessFile> filesReadyToProcess) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {
                conn = connPool.getConnection();
                conn.setAutoCommit(false);
                PreparedStatement stmt = conn.prepareStatement(MARK_FILES_READY_TO_PROCESS_STMT_NAME,
                        markFilesReadyToProcessSql);
                long sizeToAdd = 0;
                for (ArcProcessFile file : filesReadyToProcess) {
                    long fileSize = file.getSourceFile().getSize();
                    sizeToAdd += fileSize;
                    stmt.clearParameters();
                    stmt.setLong(1, fileSize);
                    stmt.setLong(2, file.getDatabaseRecordId());
                    stmt.addBatch();
                }
                stmt.executeBatch();

                // now update overall job stats to reflect these changes
                ManagedJobsSchema.getInstance().updateTotalFilesStats(conn, jobId, filesReadyToProcess.size(),
                        sizeToAdd);

                conn.commit();
            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred marking files ready to process", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * Updates the stats for all of the files passed in, and also updates the general job statistics
     * to reflect how many success/failures there were
     * 
     * @param files
     *            -
     * @throws DatabaseException
     *             -
     */
    @SuppressWarnings({ "ThrowableResultOfMethodCallIgnored" })
    public void markFilesProcessed(Collection<FileStats> files) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {

                conn = connPool.getConnection();
                PreparedStatement stmt = conn.prepareStatement(MARK_FILE_PROCESSED_STMT_NAME, markFileProcessedSql);
                conn.setAutoCommit(false);
                long totalCnt = 0;
                long totalSize = 0;
                long successCnt = 0;
                long successSize = 0;
                long failCnt = 0;
                long failSize = 0;
                long failDirCnt = 0;
                for (FileStats file : files) {
                    FileStatus status = file.getStatus();
                    switch (status) {
                    case SUCCEEDED: {
                        if (file.includeInStats()) {
                            successCnt++;
                            successSize += file.getSize();
                        }
                        break;
                    }
                    case FAILED: {
                        if (file.includeInStats()) {
                            failCnt++;
                            failSize += file.getSize();
                            if (file.failedDuringFind()) {
                                // in this case we need to increment the total counts as well
                                totalCnt++;
                                totalSize += file.getSize(); // probably zero since we didn't
                                                             // get that far
                            }
                        }
                        if (file.getArcProcssFile().isDirectory()) {
                            failDirCnt++;
                        }
                        break;
                    }
                    default:
                        throw new RuntimeException("Unsupported file status: " + status);
                    }
                    stmt.clearParameters();
                    stmt.setInt(1, file.getStatus().ordinal());
                    stmt.setInt(2, file.getRetries());
                    Date startTime = file.getStartTime();
                    if (startTime != null) {
                        stmt.setLong(3, startTime.getTime());
                    } else {
                        stmt.setNull(3, java.sql.Types.BIGINT);
                    }
                    Date endTime = file.getEndTime();
                    if (endTime != null) {
                        stmt.setLong(4, endTime.getTime());
                    } else {
                        stmt.setNull(4, java.sql.Types.BIGINT);
                    }
                    stmt.setLong(5, file.getRunTimeMs());

                    if (file.getException() == null) {
                        stmt.setNull(6, java.sql.Types.VARCHAR);
                    } else {
                        stmt.setString(6, file.getException().getMessage());
                    }

                    if (file.getStatusCode() == null) {
                        stmt.setNull(7, java.sql.Types.INTEGER);
                    } else {
                        stmt.setInt(7, file.getStatusCode());
                    }
                    stmt.setLong(8, file.getDatabaseRecordId());

                    stmt.addBatch();
                }

                // execute the batch statment to update all of the file rows
                stmt.executeBatch();

                // now update overall job stats to reflect these changes
                ManagedJobsSchema.getInstance().updateProcessedFilesStats(conn, jobId, totalCnt, totalSize,
                        successCnt, successSize, failCnt, failSize, failDirCnt);
                conn.commit();

            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(DBUtils.getErrorMessage(
                        "An error occurred updating file stats on table " + qualifiedFilesTableName, e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * Mark processing of a directory complete in the database. Everything is done in a single
     * transaction.
     *
     * @param dir
     *            - the directory for which there was a directory listing
     * @param dirFiles
     *            - the last batch of files in the directory listing
     * @param dirFailureException
     *            - if this is non null, it means that the directory should be "failed" with this
     *            exception
     * @param processingRequired
     *            - true if the directory needs processing, false otherwise
     * @param postProcessingRequired
     *            - true if post processing of the dir is required on success (true for delete jobs)
     * @throws DatabaseException
     *             - any error occurred
     */
    public void markDirProcessed(ArcProcessFile dir, Collection<ArcProcessFile> dirFiles,
            Exception dirFailureException, boolean processingRequired, boolean postProcessingRequired)
            throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {

                conn = connPool.getConnection();
                conn.setAutoCommit(false);

                //
                // Insert into the DB all files in the last batch of the dir listing
                //
                if (!dirFiles.isEmpty()) {
                    insertFilesToDiscover(conn, dirFiles, false);
                }

                //
                // Update the status of the directory row
                //
                FileLifeCycle lifeCycle;
                FileStatus status;
                int dirListingInProgress;
                if (dirFailureException != null) {
                    lifeCycle = FileLifeCycle.FINDING; // keep life cycle at "finding" so it will be
                                                       // retried if we restart the job
                    status = FileStatus.FAILED;
                    dirListingInProgress = 1; // don't clear this flag or else on rerun of the job
                                              // we'll add duplicate rows
                } else if (processingRequired) {
                    lifeCycle = FileLifeCycle.READY_TO_PROCESS;
                    status = FileStatus.NONE;
                    dirListingInProgress = 0;
                } else if (postProcessingRequired) {
                    lifeCycle = FileLifeCycle.READY_TO_POSTPROCESS;
                    status = FileStatus.NONE;
                    dirListingInProgress = 0;
                } else {
                    lifeCycle = FileLifeCycle.COMPLETE;
                    status = FileStatus.SUCCEEDED;
                    dirListingInProgress = 0;
                }

                int includeInInventory = (postProcessingRequired ? 1 : 0);

                PreparedStatement stmt = conn.prepareStatement(MARK_DIR_PROCESSED_STMT_NAME,
                        markDirectoryProcessedSql);

                stmt.clearParameters();
                stmt.setInt(1, lifeCycle.ordinal());
                stmt.setInt(2, status.ordinal());
                stmt.setInt(3, includeInInventory);
                if (dirFailureException == null) {
                    stmt.setNull(4, java.sql.Types.VARCHAR);
                } else {
                    stmt.setString(4, dirFailureException.getMessage());
                }
                stmt.setInt(5, dirListingInProgress);
                stmt.setLong(6, dir.getDatabaseRecordId());
                stmt.executeUpdate();

                //
                // Update stats in the managed_jobs table for this job
                //
                int totalCntToAdd = (postProcessingRequired ? 1 : 0); // only for delete jobs do
                                                                      // dirs count towards total
                                                                      // object cnt
                int failCntToAdd = (postProcessingRequired && dirFailureException != null ? 1 : 0); // only
                                                                                                    // for
                                                                                                    // delete
                                                                                                    // jobs
                                                                                                    // do
                                                                                                    // dirs
                                                                                                    // count
                                                                                                    // towards
                                                                                                    // total
                                                                                                    // and
                                                                                                    // therefore
                                                                                                    // failure
                                                                                                    // total
                int failDirCntToAdd = (dirFailureException != null ? 1 : 0);
                if (totalCntToAdd + failCntToAdd + failDirCntToAdd > 0) {
                    ManagedJobsSchema.getInstance().updateDirStats(conn, jobId, totalCntToAdd, failCntToAdd,
                            failDirCntToAdd);
                }

                conn.commit();

            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(DBUtils.getErrorMessage(
                        "An error occurred marking directory processing complete in " + qualifiedFilesTableName, e),
                        e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * Prepare to resume a job.
     *
     * @return the Set of directory paths that were in the middle of a directory listing when job
     *         stopped
     * @throws DatabaseException
     *             -
     */
    public Set<String> prepareFilesForResume() throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            try {

                conn = connPool.getConnection();
                // keep autoCommit to true; each statement should be in its own transaction

                PreparedStatement stmt = conn.prepareStatement(prepareFilesForResumeSql);

                long startTime = System.currentTimeMillis();
                stmt.setInt(1, FileLifeCycle.READY_TO_FIND.ordinal());
                stmt.setInt(2, FileLifeCycle.FINDING.ordinal());
                int cnt = stmt.executeUpdate();
                long now = System.currentTimeMillis();
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Updated " + cnt + " rows with life cycle FINDING to life cycle READY_TO_FIND in "
                                + (now - startTime) + " ms");

                startTime = now;
                stmt.setInt(1, FileLifeCycle.READY_TO_PROCESS.ordinal());
                stmt.setInt(2, FileLifeCycle.PROCESSING.ordinal());
                cnt = stmt.executeUpdate();
                now = System.currentTimeMillis();
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Updated " + cnt + " rows with life cycle PROCESSING to life cycle READY_TO_PROCESS in "
                                + (now - startTime) + " ms");

                startTime = now;
                stmt.setInt(1, FileLifeCycle.READY_TO_POSTPROCESS.ordinal());
                stmt.setInt(2, FileLifeCycle.POSTPROCESSING.ordinal());
                cnt = stmt.executeUpdate();
                now = System.currentTimeMillis();
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Updated " + cnt
                                + " rows with life cycle POSTPROCESSING to life cycle READY_TO_POSTPROCESS in"
                                + (now - startTime) + " ms");

                return prepareDirsForResume(conn);

            } catch (Exception e) {
                rollback(conn);
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred preparing files for resume", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    /**
     * Select all directories in the job_files table that were in the middle of a dir listing when
     * we paused. Then, for each of those directories, select all files in that directory that were
     * already added to the job_files table and put those in a temporary table. Later when "find" is
     * handling a directory, these files can be looked up so they are not re-added to the job_files
     * table.
     *
     * @param conn
     *            - DB connection to use
     * @return The set of directories (full paths) that were in progress at time of last job stop
     * @throws SQLException
     *             - if any error occurs
     */
    private Set<String> prepareDirsForResume(PooledDbConnection conn) throws SQLException {

        conn.setAutoCommit(false);
        long startTime = System.currentTimeMillis();

        dropTempTable(conn);
        createTempTable(conn);
        conn.commit();

        Set<String> dirsInProgress = new HashSet<String>();

        Statement stmt = conn.createStatement();
        ResultSet rs = DBUtils.executeQuery(stmt, getDirsInProgressSql);
        while (rs.next()) {

            String dirPath = rs.getString(ManagedJobFilesTableColumn.SOURCE_PATH.toString());
            dirsInProgress.add(dirPath);
            long dirRecordId = rs.getLong(ManagedJobFilesTableColumn.RECORD_ID.toString());

            getFilesInDirInProgress(conn, dirRecordId);
        }

        long now = System.currentTimeMillis();
        DB_PERFORMANCE_LOG.log(Level.FINE,
                String.format("It took %d ms to prepare directories in progress for resume", (now - startTime)));

        return dirsInProgress;
    }

    private void getFilesInDirInProgress(PooledDbConnection conn, long dirRecordId) throws SQLException {
        PreparedStatement filesInDirStmt = conn.prepareStatement(GET_FILES_IN_DIR_IN_PROGRESS_STMT_NAME,
                getFilesInDirInProgressSql);

        filesInDirStmt.clearParameters();
        filesInDirStmt.setLong(1, dirRecordId);

        Set<String> paths = new HashSet<String>();
        ResultSet rs = filesInDirStmt.executeQuery();
        while (rs.next()) {
            String path = rs.getString(ManagedJobFilesTableColumn.SOURCE_PATH.toString());
            paths.add(path);
            if (paths.size() >= HCPMoverProperties.PREPROCESS_FILES_BATCH_SIZE.getAsInt()) {
                insertIntoTempTable(conn, paths);
                paths.clear();
            }
        }
        if (paths.size() > 0) {
            insertIntoTempTable(conn, paths);
        }

        rs.close();
    }

    private void insertIntoTempTable(PooledDbConnection conn, Set<String> paths) throws SQLException {
        PreparedStatement insertStmt = conn.prepareStatement(INSERT_INTO_TEMP_TABLE_STMT_NAME,
                insertIntoTempTableSql);

        for (String path : paths) {
            insertStmt.clearParameters();
            insertStmt.setString(1, path);
            insertStmt.addBatch();
        }

        insertStmt.executeBatch();
        conn.commit();
    }

    public boolean isFileAlreadyDiscovered(String path) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            ResultSet rs;
            try {
                conn = connPool.getConnection();

                PreparedStatement stmt = conn.prepareStatement(GET_FILE_IN_TEMP_TABLE_STMT_NAME,
                        getFileInTempTableSql);
                stmt.clearParameters();
                stmt.setString(1, path);
                rs = stmt.executeQuery();

                boolean ret = rs.next();
                rs.close();
                return ret;

            } catch (Exception e) {
                throw new DatabaseException(
                        DBUtils.getErrorMessage("An error occurred selecting from temp table", e), e);
            } finally {
                connPool.returnConnection(conn);
            }
        }
    }

    protected FileStats readFileStats(ResultSet rs) throws SQLException {
        ArcProcessFile processFile = readArcProcessFile(rs);

        // now read extra columns that are for the stats
        int retries = rs.getInt(ManagedJobFilesTableColumn.RETRIES.toString());
        long startTime = rs.getLong(ManagedJobFilesTableColumn.START_TIME.toString());
        long endTime = rs.getLong(ManagedJobFilesTableColumn.END_TIME.toString());
        long runTimeMs = rs.getLong(ManagedJobFilesTableColumn.RUN_TIME_MS.toString());
        String errMsg = rs.getString(ManagedJobFilesTableColumn.EXCEPTION_MSG.toString());

        FileStats stats = new FileStats(processFile);
        stats.setRetries(retries);
        stats.setStartTime(new Date(startTime));
        stats.setEndTime(new Date(endTime));
        stats.setRunTimeMs(runTimeMs);
        // noinspection ThrowableInstanceNeverThrown
        stats.setException(new Exception(errMsg));

        return stats;
    }

    public void resetJobStats() throws DatabaseException {
        ManagedJobsSchema.getInstance().resetJobStats(jobId);
    }

    public ResetJobTableIterator getResetJobTableIterator(boolean dirsIncludedInTotalCnt) throws Exception {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            return new ResetJobTableIterator(jobId, qualifiedFilesTableName, dirsIncludedInTotalCnt);
        }
    }

    public void updateFindFilesStatus(boolean isComplete) throws DatabaseException {
        ManagedJobsSchema.getInstance().updateFindFilesStatus(jobId, isComplete);
    }

    public void updateRunStats(long startTime, long endTime, long runTime) throws DatabaseException {
        ManagedJobsSchema.getInstance().updateRunStats(jobId, startTime, endTime, runTime);
    }

    class FindFileIterator<ArcProcessFile> extends LifeCycleBasedCachedTableIterator<ArcProcessFile> {
        private FindFileIterator() throws SQLException {
            super(qualifiedFilesTableName, "*", FileLifeCycle.READY_TO_FIND, FileLifeCycle.FINDING,
                    HCPMoverProperties.FIND_FILES_BATCH_SIZE.getAsInt());
        }

        @SuppressWarnings({ "unchecked" })
        protected ArcProcessFile readRow(ResultSet rs) throws SQLException {
            return (ArcProcessFile) readArcProcessFile(rs);
        }

        protected long getDatabaseRecordId(ArcProcessFile file) {
            return ((com.archivas.clienttools.arcutils.model.ArcProcessFile) file).getDatabaseRecordId();
        }
    }

    public FindFileIterator<ArcProcessFile> getFindFileIterator() throws SQLException {
        return new FindFileIterator<ArcProcessFile>();
    }

    //
    // for iterating over all files that need to be processed (copied/deleted)
    //
    class ProcessFileIterator<FileStats> extends LifeCycleBasedCachedTableIterator<FileStats> {
        private ProcessFileIterator() throws SQLException {
            super(qualifiedFilesTableName, "*", FileLifeCycle.READY_TO_PROCESS, FileLifeCycle.PROCESSING,
                    HCPMoverProperties.PROCESS_FILES_BATCH_SIZE.getAsInt());
        }

        @SuppressWarnings({ "unchecked" })
        protected FileStats readRow(ResultSet rs) throws SQLException {
            return (FileStats) readFileStats(rs);
        }

        protected long getDatabaseRecordId(FileStats file) {
            return ((com.archivas.clienttools.arcutils.impl.jobs.FileStats) file).getDatabaseRecordId();
        }
    }

    public CachedTableIterator<FileStats> getProcessFileIterator() throws SQLException {
        return new ProcessFileIterator<FileStats>();
    }

    //
    // For iterating over all files that need to be post processed (delete directories) in a
    // single-threaded manner.
    // This iterator iterates over the table in descending record_id order.
    //
    class PostProcessReverseOrderFileIterator<FileStats>
            extends ReverseOrderLifeCycleBasedCachedTableIterator<FileStats> {
        private PostProcessReverseOrderFileIterator() throws SQLException {
            super(qualifiedFilesTableName, "*", FileLifeCycle.READY_TO_POSTPROCESS, FileLifeCycle.POSTPROCESSING,
                    HCPMoverProperties.POSTPROCESS_FILES_BATCH_SIZE.getAsInt());
        }

        @SuppressWarnings({ "unchecked" })
        protected FileStats readRow(ResultSet rs) throws SQLException {
            return (FileStats) readFileStats(rs);
        }

        protected long getDatabaseRecordId(FileStats file) {
            return ((com.archivas.clienttools.arcutils.impl.jobs.FileStats) file).getDatabaseRecordId();
        }
    }

    public CachedTableIterator<FileStats> getPostProcessReverseOrderFileIterator() throws SQLException {
        return new PostProcessReverseOrderFileIterator<FileStats>();
    }

    //
    // For iterating over all files that need to be post processed (delete directories) in a
    // single-threaded manner.
    // This iterator iterates over the table in descending record_id order.
    //
    class PostProcessPathDepthFileIterator<FileStats>
            extends PathDepthLifeCycleBasedCachedTableIterator<FileStats> {
        private PostProcessPathDepthFileIterator(int pathDepth) throws SQLException {
            super(qualifiedFilesTableName, "*", FileLifeCycle.READY_TO_POSTPROCESS, FileLifeCycle.POSTPROCESSING,
                    HCPMoverProperties.POSTPROCESS_FILES_BATCH_SIZE.getAsInt(), pathDepth);
        }

        @SuppressWarnings({ "unchecked" })
        protected FileStats readRow(ResultSet rs) throws SQLException {
            return (FileStats) readFileStats(rs);
        }

        protected long getDatabaseRecordId(FileStats file) {
            return ((com.archivas.clienttools.arcutils.impl.jobs.FileStats) file).getDatabaseRecordId();
        }
    }

    public CachedTableIterator<FileStats> getPostProcessPathDepthFileIterator(int pathDepth) throws SQLException {
        return new PostProcessPathDepthFileIterator<FileStats>(pathDepth);
    }

    /**
     * Concurrency note: This method is thread safe
     *
     * The result of a successful run of this method is that the specified file is created that
     * represents the underlying DB for this object.
     *
     * @param dumpFile
     *            - file to write to
     * @param sourceProfile
     *            - the source profile
     * @throws DatabaseException
     *             -
     */
    public void dumpTotalList(File dumpFile, AbstractProfileBase sourceProfile) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                outputResults(false, null, null, dumpFile, sourceProfile);
            } catch (Exception e) {
                throw new DatabaseException(e.getMessage(), e);
            }
        }
    }

    /**
     * Concurrency note: This method is thread safe
     *
     * The result of a successful run of this method is that the specified file is created that
     * represents the underlying DB for this object.
     *
     * @param dumpFile
     *            - file to write to
     * @param sourceProfile
     *            -
     * @throws DatabaseException
     *             -
     */
    public void dumpFailedList(File dumpFile, AbstractProfileBase sourceProfile) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                outputResults(true, null, FileStatus.FAILED, dumpFile, sourceProfile);
            } catch (Exception e) {
                throw new DatabaseException(e.getMessage(), e);
            }
        }
    }

    /**
     * Concurrency note: This method is thread safe
     *
     * The result of a successful run of this method is that the specified file is created that
     * represents the underlying DB for this object.
     *
     * @param dumpFile
     *            - file to write to
     * @param sourceProfile
     *            -
     * @throws DatabaseException
     *             -
     */
    public void dumpSuccessList(File dumpFile, AbstractProfileBase sourceProfile) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                outputResults(false, null, FileStatus.SUCCEEDED, dumpFile, sourceProfile);
            } catch (Exception e) {
                throw new DatabaseException(e.getMessage(), e);
            }
        }
    }

    public void dumpConflictsList(File dumpFile, AbstractProfileBase sourceProfile) throws DatabaseException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            try {
                outputResults(false, HttpStatus.SC_CONFLICT, null, dumpFile, sourceProfile);
            } catch (Exception e) {
                throw new DatabaseException(e.getMessage(), e);
            }
        }
    }

    private void outputResults(boolean includeErrMsg, Integer includeOnlyRowsWithStatusCode, FileStatus status,
            File outFile, AbstractProfileBase sourceProfile) throws IOException, SQLException {
        synchronized (DatabaseResourceManager.DB_LOCK) {
            PooledDbConnection conn = null;
            FileWriter fstream = new FileWriter(outFile);
            BufferedWriter out = new BufferedWriter(fstream);
            try {
                conn = connPool.getConnection();

                String sql = "SELECT " + ManagedJobFilesTableColumn.SOURCE_PATH.toString()
                        + (includeErrMsg ? ", " + ManagedJobFilesTableColumn.EXCEPTION_MSG.toString() : "")
                        + " FROM " + qualifiedFilesTableName + " WHERE "
                        + ManagedJobFilesTableColumn.INCLUDE_IN_INVENTORY.toString() + " = 1";
                if (status != null) {
                    sql += " AND " + ManagedJobFilesTableColumn.STATUS.toString() + " = " + status.ordinal();
                }
                if (includeOnlyRowsWithStatusCode != null) {
                    sql += " AND " + ManagedJobFilesTableColumn.STATUS_CODE.toString() + " = "
                            + includeOnlyRowsWithStatusCode;
                }

                Statement stmt = conn.createStatement();
                long startTime = System.currentTimeMillis();
                DB_PERFORMANCE_LOG.log(Level.FINE, "Executing export results query: " + sql);
                ResultSet rs = stmt.executeQuery(sql);
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Executed export results query in " + (System.currentTimeMillis() - startTime) + " ms");
                long cnt = 0;
                while (rs.next()) {
                    out.write(
                            sourceProfile.decode(rs.getString(ManagedJobFilesTableColumn.SOURCE_PATH.toString())));
                    if (includeErrMsg) {
                        out.write("," + rs.getString(ManagedJobFilesTableColumn.EXCEPTION_MSG.toString()));
                    }
                    out.newLine();
                    cnt++;
                }
                DB_PERFORMANCE_LOG.log(Level.FINE,
                        "Exported " + cnt + " files in " + (System.currentTimeMillis() - startTime) + " ms");
            } finally {
                connPool.returnConnection(conn);
                out.close();
            }
        }
    }

    protected abstract String getExtraFileDataColumns();

    protected abstract String getInsertFilesToDiscoverSqlCols();

    protected abstract void setInsertFilesToDiscoverySqlParams(ArcProcessFile file, PreparedStatement stmt,
            boolean isInitialList, long id, int pathDepth) throws SQLException;

    protected abstract ArcProcessFile readArcProcessFile(ResultSet rs) throws SQLException;

    ///////////////////////////////////////////////////////////////////////////////////////////////////////
    // Helper Methods //
    ///////////////////////////////////////////////////////////////////////////////////////////////////////

    private void rollback(Connection conn) {
        try {
            conn.rollback();
        } catch (Exception ex) {
            // do nothing
        }
    }

    public static String getJobSchemaName(long jobId) {
        return JOB_SCHEMA_PREFIX + jobId;
    }

    /**
     * This method is synchronized so that no two threads will use the same DB RECORD_IDs
     * 
     * @param size
     *            - the number of Ids to return
     * @return - the last record_id used. The caller should start using the next record id after the
     *         one returned
     */
    private synchronized long getNextBlockOfDbRecordIds(int size) {
        if (size < 0) {
            throw new RuntimeException("size (" + size + ") can not be less than zero");
        }

        long ret = lastDbRecordId;
        lastDbRecordId += size;
        return ret;
    }

    public void finalize() throws Throwable {
        // make sure the db connection pool is cleaned up
        connPool.closeAll();
        super.finalize();
    }

}