Example usage for java.util SortedSet size

List of usage examples for java.util SortedSet size

Introduction

In this page you can find the example usage for java.util SortedSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

/**
 * Loads the nodes into cache using batching.
 *//*www.ja v  a 2  s  .  c o  m*/
private void cacheNodes(StoreRef storeRef, List<String> uuids) {
    StoreEntity store = getStoreNotNull(storeRef);
    Long storeId = store.getId();

    int batchSize = 256;
    SortedSet<String> batch = new TreeSet<String>();
    for (String uuid : uuids) {
        batch.add(uuid);
        if (batch.size() >= batchSize) {
            // Preload
            List<Node> nodes = selectNodesByUuids(storeId, batch);
            cacheNodesNoBatch(nodes);
            batch.clear();
        }
    }
    // Load any remaining nodes
    if (batch.size() > 0) {
        List<Node> nodes = selectNodesByUuids(storeId, batch);
        cacheNodesNoBatch(nodes);
    }
}

From source file:net.cbtltd.rest.yandex.A_Handler.java

private void createSchedule(SortedSet<DateTime> availableDates, Product product, Date version,
        SqlSession sqlSession) {//from  w w  w .  j  av  a 2 s  . co m
    DateTime currentDate = new DateTime(version).withTime(0, 0, 0, 0);

    // create reservation if current date is less than the first date in the available dates set
    DateTime firstAvailableDate = availableDates.first();
    System.out.println("firstAvailableDate = " + firstAvailableDate);
    int daysBetween = Days.daysBetween(currentDate, availableDates.first()).getDays();
    System.out.println("createSchedule daysBetween: " + daysBetween);
    System.out.println("daysBetween = " + currentDate + ",  " + availableDates.first());
    if (daysBetween > 1) {
        PartnerService.createSchedule(sqlSession, product, currentDate.toDate(),
                firstAvailableDate.withFieldAdded(DurationFieldType.days(), -1).toDate(), version);
    }

    DateTime fromDate = firstAvailableDate;

    boolean first = true;
    System.out.println("availableDates size = " + availableDates.size());
    for (DateTime toDate : availableDates) {
        if (first) {
            first = false;
            continue;
        }
        daysBetween = Days.daysBetween(fromDate, toDate).getDays();
        if (daysBetween > 1 && toDate.isAfterNow()) {
            System.out.println("createSchedule availableDates daysBetween: " + daysBetween);
            PartnerService.createSchedule(sqlSession, product,
                    fromDate.withFieldAdded(DurationFieldType.days(), 1).toDate(),
                    toDate.withFieldAdded(DurationFieldType.days(), -1).toDate(), version);
        }
        fromDate = toDate;
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Lock acquisition is meant to be fair, so every lock can only block on some lock with smaller
 * hl_lock_ext_id by only checking earlier locks.
 *
 * For any given SQL statment all locks required by it are grouped under single extLockId and are
 * granted all at once or all locks wait.
 *
 * This is expected to run at READ_COMMITTED.
 *
 * Note: this calls acquire() for (extLockId,intLockId) but extLockId is the same and we either take
 * all locks for given extLockId or none.  Would be more efficient to update state on all locks
 * at once.  Semantics are the same since this is all part of the same txn.
 *
 * If there is a concurrent commitTxn/rollbackTxn, those can only remove rows from HIVE_LOCKS.
 * If they happen to be for the same txnid, there will be a WW conflict (in MS DB), if different txnid,
 * checkLock() will in the worst case keep locks in Waiting state a little longer.
 *//*  w  w w  .j a  v  a  2s.c om*/
@RetrySemantics.SafeToRetry("See @SafeToRetry")
private LockResponse checkLock(Connection dbConn, long extLockId)
        throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException, SQLException {
    TxnStore.MutexAPI.LockHandle handle = null;
    Statement stmt = null;
    ResultSet rs = null;
    LockResponse response = new LockResponse();
    /**
     * todo: Longer term we should pass this from client somehow - this would be an optimization;  once
     * that is in place make sure to build and test "writeSet" below using OperationType not LockType
     * With Static Partitions we assume that the query modifies exactly the partitions it locked.  (not entirely
     * realistic since Update/Delete may have some predicate that filters out all records out of
     * some partition(s), but plausible).  For DP, we acquire locks very wide (all known partitions),
     * but for most queries only a fraction will actually be updated.  #addDynamicPartitions() tells
     * us exactly which ones were written to.  Thus using this trick to kill a query early for
     * DP queries may be too restrictive.
     */
    boolean isPartOfDynamicPartitionInsert = true;
    try {
        /**
         * checkLock() must be mutexed against any other checkLock to make sure 2 conflicting locks
         * are not granted by parallel checkLock() calls.
         */
        handle = getMutexAPI().acquireLock(MUTEX_KEY.CheckLock.name());
        List<LockInfo> locksBeingChecked = getLockInfoFromLockId(dbConn, extLockId);//being acquired now
        response.setLockid(extLockId);

        LOG.debug("checkLock(): Setting savepoint. extLockId=" + JavaUtils.lockIdToString(extLockId));
        Savepoint save = dbConn.setSavepoint();
        StringBuilder query = new StringBuilder(
                "select hl_lock_ext_id, " + "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, "
                        + "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in (");

        Set<String> strings = new HashSet<String>(locksBeingChecked.size());

        //This the set of entities that the statement represented by extLockId wants to update
        List<LockInfo> writeSet = new ArrayList<>();

        for (LockInfo info : locksBeingChecked) {
            strings.add(info.db);
            if (!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) {
                writeSet.add(info);
            }
        }
        if (!writeSet.isEmpty()) {
            if (writeSet.get(0).txnId == 0) {
                //Write operation always start a txn
                throw new IllegalStateException(
                        "Found Write lock for " + JavaUtils.lockIdToString(extLockId) + " but no txnid");
            }
            stmt = dbConn.createStatement();
            StringBuilder sb = new StringBuilder(
                    " ws_database, ws_table, ws_partition, " + "ws_txnid, ws_commit_id "
                            + "from WRITE_SET where ws_commit_id >= " + writeSet.get(0).txnId + " and (");//see commitTxn() for more info on this inequality
            for (LockInfo info : writeSet) {
                sb.append("(ws_database = ").append(quoteString(info.db)).append(" and ws_table = ")
                        .append(quoteString(info.table)).append(" and ws_partition ")
                        .append(info.partition == null ? "is null" : "= " + quoteString(info.partition))
                        .append(") or ");
            }
            sb.setLength(sb.length() - 4);//nuke trailing " or "
            sb.append(")");
            //1 row is sufficient to know we have to kill the query
            rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, sb.toString()));
            if (rs.next()) {
                /**
                 * if here, it means we found an already committed txn which overlaps with the current one and
                 * it updated the same resource the current txn wants to update.  By First-committer-wins
                 * rule, current txn will not be allowed to commit so  may as well kill it now;  This is just an
                 * optimization to prevent wasting cluster resources to run a query which is known to be DOA.
                 * {@link #commitTxn(CommitTxnRequest)} has the primary responsibility to ensure this.
                 * checkLock() runs at READ_COMMITTED so you could have another (Hive) txn running commitTxn()
                 * in parallel and thus writing to WRITE_SET.  commitTxn() logic is properly mutexed to ensure
                 * that we don't "miss" any WW conflicts. We could've mutexed the checkLock() and commitTxn()
                 * as well but this reduces concurrency for very little gain.
                 * Note that update/delete (which runs as dynamic partition insert) acquires a lock on the table,
                 * but WRITE_SET has entries for actual partitions updated.  Thus this optimization will "miss"
                 * the WW conflict but it will be caught in commitTxn() where actual partitions written are known.
                 * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit.
                 */
                String resourceName = rs.getString(1) + '/' + rs.getString(2);
                String partName = rs.getString(3);
                if (partName != null) {
                    resourceName += '/' + partName;
                }

                String msg = "Aborting " + JavaUtils.txnIdToString(writeSet.get(0).txnId)
                        + " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4))
                        + "," + rs.getLong(5) + "] has already updated resouce '" + resourceName + "'";
                LOG.info(msg);
                if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), true) != 1) {
                    throw new IllegalStateException(msg + " FAILED!");
                }
                dbConn.commit();
                throw new TxnAbortedException(msg);
            }
            close(rs, stmt, null);
        }

        boolean first = true;
        for (String s : strings) {
            if (first)
                first = false;
            else
                query.append(", ");
            query.append('\'');
            query.append(s);
            query.append('\'');
        }
        query.append(")");

        // If any of the table requests are null, then I need to pull all the
        // table locks for this db.
        boolean sawNull = false;
        strings.clear();
        for (LockInfo info : locksBeingChecked) {
            if (info.table == null) {
                sawNull = true;
                break;
            } else {
                strings.add(info.table);
            }
        }
        if (!sawNull) {
            query.append(" and (hl_table is null or hl_table in(");
            first = true;
            for (String s : strings) {
                if (first)
                    first = false;
                else
                    query.append(", ");
                query.append('\'');
                query.append(s);
                query.append('\'');
            }
            query.append("))");

            // If any of the partition requests are null, then I need to pull all
            // partition locks for this table.
            sawNull = false;
            strings.clear();
            for (LockInfo info : locksBeingChecked) {
                if (info.partition == null) {
                    sawNull = true;
                    break;
                } else {
                    strings.add(info.partition);
                }
            }
            if (!sawNull) {
                query.append(" and (hl_partition is null or hl_partition in(");
                first = true;
                for (String s : strings) {
                    if (first)
                        first = false;
                    else
                        query.append(", ");
                    query.append('\'');
                    query.append(s);
                    query.append('\'');
                }
                query.append("))");
            }
        }
        query.append(" and hl_lock_ext_id <= ").append(extLockId);

        LOG.debug("Going to execute query <" + query.toString() + ">");
        stmt = dbConn.createStatement();
        rs = stmt.executeQuery(query.toString());
        SortedSet<LockInfo> lockSet = new TreeSet<LockInfo>(new LockInfoComparator());
        while (rs.next()) {
            lockSet.add(new LockInfo(rs));
        }
        // Turn the tree set into an array so we can move back and forth easily
        // in it.
        LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]);
        if (LOG.isTraceEnabled()) {
            LOG.trace("Locks to check(full): ");
            for (LockInfo info : locks) {
                LOG.trace("  " + info);
            }
        }

        for (LockInfo info : locksBeingChecked) {
            // Find the lock record we're checking
            int index = -1;
            for (int i = 0; i < locks.length; i++) {
                if (locks[i].equals(info)) {
                    index = i;
                    break;
                }
            }

            // If we didn't find the lock, then it must not be in the table
            if (index == -1) {
                LOG.debug("Going to rollback");
                dbConn.rollback();
                throw new MetaException(
                        "How did we get here, we heartbeated our lock before we started! ( " + info + ")");
            }

            // If we've found it and it's already been marked acquired,
            // then just look at the other locks.
            if (locks[index].state == LockState.ACQUIRED) {
                /**this is what makes this method @SafeToRetry*/
                continue;
            }

            // Look at everything in front of this lock to see if it should block
            // it or not.
            boolean acquired = false;
            for (int i = index - 1; i >= 0; i--) {
                // Check if we're operating on the same database, if not, move on
                if (!locks[index].db.equals(locks[i].db)) {
                    continue;
                }

                // If table is null on either of these, then they are claiming to
                // lock the whole database and we need to check it.  Otherwise,
                // check if they are operating on the same table, if not, move on.
                if (locks[index].table != null && locks[i].table != null
                        && !locks[index].table.equals(locks[i].table)) {
                    continue;
                }

                // If partition is null on either of these, then they are claiming to
                // lock the whole table and we need to check it.  Otherwise,
                // check if they are operating on the same partition, if not, move on.
                if (locks[index].partition != null && locks[i].partition != null
                        && !locks[index].partition.equals(locks[i].partition)) {
                    continue;
                }

                // We've found something that matches what we're trying to lock,
                // so figure out if we can lock it too.
                LockAction lockAction = jumpTable.get(locks[index].type).get(locks[i].type).get(locks[i].state);
                LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction);
                switch (lockAction) {
                case WAIT:
                    if (!ignoreConflict(info, locks[i])) {
                        /*we acquire all locks for a given query atomically; if 1 blocks, all go into (remain) in
                        * Waiting state.  wait() will undo any 'acquire()' which may have happened as part of
                        * this (metastore db) transaction and then we record which lock blocked the lock
                        * we were testing ('info').*/
                        wait(dbConn, save);
                        String sqlText = "update HIVE_LOCKS" + " set HL_BLOCKEDBY_EXT_ID=" + locks[i].extLockId
                                + ", HL_BLOCKEDBY_INT_ID=" + locks[i].intLockId + " where HL_LOCK_EXT_ID="
                                + info.extLockId + " and HL_LOCK_INT_ID=" + info.intLockId;
                        LOG.debug("Executing sql: " + sqlText);
                        int updCnt = stmt.executeUpdate(sqlText);
                        if (updCnt != 1) {
                            shouldNeverHappen(info.txnId, info.extLockId, info.intLockId);
                        }
                        LOG.debug("Going to commit");
                        dbConn.commit();
                        response.setState(LockState.WAITING);
                        LOG.debug("Lock(" + info + ") waiting for Lock(" + locks[i] + ")");
                        return response;
                    }
                    //fall through to ACQUIRE
                case ACQUIRE:
                    acquire(dbConn, stmt, extLockId, info);
                    acquired = true;
                    break;
                case KEEP_LOOKING:
                    continue;
                }
                if (acquired)
                    break; // We've acquired this lock component,
                // so get out of the loop and look at the next component.
            }

            // If we've arrived here and we have not already acquired, it means there's nothing in the
            // way of the lock, so acquire the lock.
            if (!acquired)
                acquire(dbConn, stmt, extLockId, info);
        }

        // We acquired all of the locks, so commit and return acquired.
        LOG.debug("Going to commit");
        dbConn.commit();
        response.setState(LockState.ACQUIRED);
    } finally {
        close(rs, stmt, null);
        if (handle != null) {
            handle.releaseLocks();
        }
    }
    return response;
}

From source file:net.sourceforge.fenixedu.domain.student.Registration.java

public boolean isReingression(final ExecutionYear executionYear) {
    final SortedSet<RegistrationState> states = new TreeSet<RegistrationState>(
            RegistrationState.DATE_COMPARATOR);
    states.addAll(getRegistrationStatesSet());

    Registration sourceRegistration = getSourceRegistration();
    while (sourceRegistration != null) {
        states.addAll(sourceRegistration.getRegistrationStatesSet());
        sourceRegistration = sourceRegistration.getSourceRegistration();
    }/* w w  w .  ja va2 s .  c  o m*/

    if (states.size() == 0) {
        return false;
    }

    RegistrationState previous = null;
    for (final RegistrationState registrationState : states) {
        if (previous != null) {
            if (registrationState.getExecutionYear() == executionYear
                    && (registrationState.isActive()
                            || registrationState.getStateType() == RegistrationStateType.TRANSITED)
                    && (previous.getStateType() == RegistrationStateType.EXTERNAL_ABANDON
                            || previous.getStateType() == RegistrationStateType.INTERRUPTED
                            || previous.getStateType() == RegistrationStateType.FLUNKED)) {
                return true;
            }
        }

        previous = registrationState;
    }

    return false;

}

From source file:com.gtwm.pb.model.manageData.DataManagement.java

public SortedSet<CommentInfo> getComments(BaseField field, int rowId) throws SQLException, CantDoThatException {
    SortedSet<CommentInfo> comments = new TreeSet<CommentInfo>();
    Boolean hasComments = field.hasComments();
    if (hasComments != null) {
        if (hasComments.equals(false)) {
            return comments;
        }/*from ww  w  .  j  a  va2s  . c  o m*/
    }
    String sqlCode = "SELECT created, author, text FROM dbint_comments WHERE internalfieldname=? AND rowid=? order by created desc limit 10";
    Connection conn = null;
    try {
        conn = this.dataSource.getConnection();
        conn.setAutoCommit(false);
        PreparedStatement statement = conn.prepareStatement(sqlCode);
        String internalFieldName = field.getInternalFieldName();
        statement.setString(1, internalFieldName);
        statement.setInt(2, rowId);
        ResultSet results = statement.executeQuery();
        while (results.next()) {
            Timestamp createdTimestamp = results.getTimestamp(1);
            Calendar created = Calendar.getInstance();
            created.setTimeInMillis(createdTimestamp.getTime());
            String author = results.getString(2);
            String comment = results.getString(3);
            comments.add(new Comment(internalFieldName, rowId, author, created, comment));
        }
        results.close();
        statement.close();
        if (comments.size() > 0) {
            field.setHasComments(true);
        } else if (hasComments == null) {
            // We've seen there are no comments for this particular record
            // but we don't know if there are any for the field in other
            // records. Check.
            sqlCode = "SELECT count(*) from dbint_comments WHERE internalfieldname=?";
            statement = conn.prepareStatement(sqlCode);
            statement.setString(1, internalFieldName);
            results = statement.executeQuery();
            if (results.next()) {
                int numComments = results.getInt(1);
                if (numComments > 0) {
                    field.setHasComments(true);
                } else {
                    // Another check in case another thread e.g. running
                    // addComment has set this to true.
                    // We don't want to overwrite that
                    // TODO: Really, this should be atomic but it takes such
                    // a small amount of time compared to the SQL it's
                    // probably fine
                    if (field.hasComments() == null) {
                        field.setHasComments(false);
                    }
                }
            } else {
                logger.error("Unable to see if comments exist with query " + statement);
            }
            results.close();
            statement.close();
        }
    } finally {
        if (conn != null) {
            conn.close();
        }
    }
    return comments;
}

From source file:org.alfresco.repo.domain.node.AbstractNodeDAOImpl.java

/**
 * Bulk-fetch the nodes for a given store.  All nodes passed in are fetched.
 *//*from w w w  .ja  va2  s  . c  o  m*/
private void cacheNodesNoBatch(List<Node> nodes) {
    // Get the nodes
    SortedSet<Long> aspectNodeIds = new TreeSet<Long>();
    SortedSet<Long> propertiesNodeIds = new TreeSet<Long>();
    Map<Long, NodeVersionKey> nodeVersionKeysFromCache = new HashMap<Long, NodeVersionKey>(nodes.size() * 2); // Keep for quick lookup
    for (Node node : nodes) {
        Long nodeId = node.getId();
        NodeVersionKey nodeVersionKey = node.getNodeVersionKey();
        node.lock(); // Prevent unexpected edits of values going into the cache
        nodesCache.setValue(nodeId, node);
        if (propertiesCache.getValue(nodeVersionKey) == null) {
            propertiesNodeIds.add(nodeId);
        }
        if (aspectsCache.getValue(nodeVersionKey) == null) {
            aspectNodeIds.add(nodeId);
        }
        nodeVersionKeysFromCache.put(nodeId, nodeVersionKey);
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Pre-loaded " + propertiesNodeIds.size() + " properties");
        logger.debug("Pre-loaded " + propertiesNodeIds.size() + " aspects");
    }

    Map<NodeVersionKey, Set<QName>> nodeAspects = selectNodeAspects(aspectNodeIds);
    for (Map.Entry<NodeVersionKey, Set<QName>> entry : nodeAspects.entrySet()) {
        NodeVersionKey nodeVersionKeyFromDb = entry.getKey();
        Long nodeId = nodeVersionKeyFromDb.getNodeId();
        Set<QName> qnames = entry.getValue();
        setNodeAspectsCached(nodeId, qnames);
        aspectNodeIds.remove(nodeId);
    }
    // Cache the absence of aspects too!
    for (Long nodeId : aspectNodeIds) {
        setNodeAspectsCached(nodeId, Collections.<QName>emptySet());
    }

    // First ensure all content data are pre-cached, so we don't have to load them individually when converting properties
    contentDataDAO.cacheContentDataForNodes(propertiesNodeIds);

    // Now bulk load the properties
    Map<NodeVersionKey, Map<NodePropertyKey, NodePropertyValue>> propsByNodeId = selectNodeProperties(
            propertiesNodeIds);
    for (Map.Entry<NodeVersionKey, Map<NodePropertyKey, NodePropertyValue>> entry : propsByNodeId.entrySet()) {
        Long nodeId = entry.getKey().getNodeId();
        Map<NodePropertyKey, NodePropertyValue> propertyValues = entry.getValue();
        Map<QName, Serializable> props = nodePropertyHelper.convertToPublicProperties(propertyValues);
        setNodePropertiesCached(nodeId, props);
    }
}

From source file:org.eclipse.skalli.model.ext.maven.internal.MavenResolverRunnable.java

private void run(ProjectService projectService, List<UUID> uuids) {
    LOG.info(MessageFormat.format("MavenResolver: Started ({0} projects to scan)", uuids.size()));

    NexusClient nexusClient = getNexusClient();
    NexusVersionsResolver versionsResolver = nexusClient != null ? new NexusVersionsResolver(nexusClient)
            : null;// w  ww.java2 s .c om

    int count = 0;
    int countUpdated = 0;
    int countInvalidPom = 0;
    int countIOExceptions = 0;
    int countUnexpectedException = 0;
    int countPersistingProblem = 0;
    SortedSet<Issue> issues = new TreeSet<Issue>();

    for (UUID uuid : uuids) {
        if (count > 0) {
            // delay the execution for 10 seconds, otherwise we may
            // overcharge the remote systems with out requests;
            // but not before the first project in the loop
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
                break;
            }
        }
        ++count;

        Project project = projectService.getByUUID(uuid);
        if (project == null) {
            handleIssue(Severity.ERROR, uuid, MessageFormat.format("MavenResolver: Unknown project {0}", uuid),
                    null, issues);
            continue;
        }
        LOG.info(MessageFormat.format("MavenResolver: Processing {0}", project.getProjectId()));

        MavenReactor oldReactor = getMavenReactorProperty(project);
        MavenReactor newReactor = null;
        try {
            newReactor = resolveProject(project, oldReactor, issues);
        } catch (ValidationException e) {
            ++countInvalidPom;
            handleIssue(
                    Severity.WARNING, uuid, MessageFormat
                            .format("MavenResolver: Invalid POM found for project {0}", project.getProjectId()),
                    e, issues);
            continue;
        } catch (IOException e) {
            ++countIOExceptions;
            handleIssue(Severity.ERROR, uuid, MessageFormat
                    .format("MavenResolver: Failed to retrieve POM for project {0}", project.getProjectId()), e,
                    issues);
            continue;
        } catch (RuntimeException e) {
            ++countUnexpectedException;
            handleIssue(Severity.FATAL, uuid,
                    MessageFormat.format(
                            "MavenResolver: Unexpected exception when resolving POMs for project {0}",
                            project.getProjectId()),
                    e, issues);
            continue;
        }

        if (versionsResolver != null) {
            try {
                versionsResolver.addVersions(newReactor, oldReactor);
                versionsResolver.setNexusVersions(newReactor);
            } catch (RuntimeException e) {
                ++countUnexpectedException;
                handleIssue(Severity.FATAL, uuid, MessageFormat.format(
                        "MavenResolver: Unexpected exception when retrieving artifact versions for project {0}",
                        project.getProjectId()), e, issues);
                continue;
            }
        }

        if (!ComparatorUtils.equals(newReactor, oldReactor)) {
            if (updateMavenReactorExtension(project, newReactor)) {
                try {
                    projectService.persist(project, userId);
                    handleIssue(Severity.INFO, uuid,
                            MessageFormat.format("MavenResolver: Updated project {0}", project.getProjectId()),
                            null, issues);
                    ++countUpdated;
                } catch (ValidationException e) {
                    ++countPersistingProblem;
                    handleIssue(Severity.FATAL, uuid, MessageFormat.format(
                            "MavenResolver: Failed to persist project {0}", project.getProjectId()), e, issues);
                    continue;
                } catch (RuntimeException e) {
                    ++countUnexpectedException;
                    handleIssue(Severity.FATAL, uuid,
                            MessageFormat.format(
                                    "MavenResolver: Unexpected exception when persisting project {0}",
                                    project.getProjectId()),
                            e, issues);
                    continue;
                }
            }
        }
        LOG.info(MessageFormat.format("MavenResolver: Processed {0} ({1} projects processed, {2} remaining)",
                project.getProjectId(), count, uuids.size() - count));
    }
    LOG.info(MessageFormat.format(
            "MavenResolver: Finished ({0} projects processed, {1} updated, {2} with invalid POM, {3} persisting problems, "
                    + "{4} i/o exceptions, {5} unexpected exceptions)",
            uuids.size(), countUpdated, countInvalidPom, countPersistingProblem, countIOExceptions,
            countUnexpectedException));
    if (issues.size() > 0) {
        LOG.info(Issue.getMessage("MavenResolver: Issue Summary", issues));
    }
}

From source file:edu.ku.brc.specify.tasks.subpane.wb.wbuploader.Uploader.java

protected void doUploadSansUIMatchProcessingStuff(HashMap<UploadTable, HashMap<Integer, Integer>> uploadedRecs,
        List<Pair<Integer, List<UploadTableMatchInfo>>> matchInfos) {
    List<UploadTableMatchInfo> mis = new ArrayList<UploadTableMatchInfo>();
    List<UploadTable> prevMatches = new ArrayList<UploadTable>();
    for (UploadTable t : uploadTables) {
        if (t.isCheckMatchInfo() && !t.isSkipMatching()) {
            Integer[] mCount = t.getMatchCountForCurrentRow();
            SortedSet<UploadedRecordInfo> urs = t.getUploadedRecs() == null || t.getUploadedRecs().size() == 0
                    ? new TreeSet<UploadedRecordInfo>()
                    : t.getUploadedRecs().tailSet(new UploadedRecordInfo(null, rowUploading, 0, null));
            if (urs.size() == 0) {
                urs.add(new UploadedRecordInfo(null, rowUploading, 0, null));
            }//from  w  ww.j  a va  2  s.  c  o m
            for (UploadedRecordInfo ur : urs) {
                Integer seq = ur == null ? 0 : ur.getSeq();
                List<Integer> colIdxs = new ArrayList<Integer>();
                for (UploadField uf : t.getUploadFields().get(seq)) {
                    if (uf.getIndex() != -1) {
                        colIdxs.add(uf.getIndex());
                    }
                }
                HashMap<Integer, Integer> recs = mCount[seq] > 1 ? null : uploadedRecs.get(t);
                if ((mCount[seq] == 0 && t.getCurrentRecord(seq) != null) || mCount[seq] > 1) {
                    //a record was  added or multiple matches
                    addMatchInfo(mis, prevMatches, t, mCount[seq], colIdxs);
                    if (mCount[seq] == 0) {
                        if (recs != null && ur != null) {
                            recs.put(ur.getKey(), ur.getSeq());
                        } else {
                            System.out.println(
                                    "Error: " + t + " is not enhashed or beset for row " + rowUploading);
                        }
                    }
                } else if (mCount[seq] == 1) {
                    //figure out if record was added earlier in the upload
                    if (recs != null && t.getCurrentRecord(seq) != null) {
                        Integer oseq = recs.get(t.getCurrentRecord(seq).getId());
                        if (oseq != null) {
                            addMatchInfo(mis, prevMatches, t, 0, colIdxs);
                        }
                    } else {
                        System.out.println("Error: " + t + " is not enhashed or beset for row " + rowUploading);
                    }
                } else {
                    //what the hell?
                }
            }
        }
    }
    if (mis.size() > 0) {
        matchInfos.add(new Pair<Integer, List<UploadTableMatchInfo>>(rowUploading, mis));
    }

}

From source file:edu.ku.brc.specify.tasks.subpane.wb.wbuploader.UploadTable.java

/**
 * @param row/*  w  w  w  . ja v  a 2s . c o m*/
 * @throws UploaderException
 * 
 * deletes all records uploaded for row.
 */
public void abortRow(final int row) throws UploaderException {
    UploadedRecordInfo arg1 = new UploadedRecordInfo(null, row, 0, null);
    UploadedRecordInfo arg2 = new UploadedRecordInfo(null, row + 1, 0, null);
    SortedSet<UploadedRecordInfo> recsForRow = uploadedRecs.subSet(arg1, arg2);
    if (recsForRow.size() > 0) {
        deleteObjects(recsForRow.iterator(), false);
        uploadedRecs.removeAll(recsForRow);
    }
}

From source file:org.sakaiproject.citation.tool.CitationHelperAction.java

protected void captureAccess(ParameterParser params, SessionState state, ContentResourceEdit edit,
        Map<String, Object> results) {

    Map<String, Object> entityProperties = (Map<String, Object>) state
            .getAttribute(STATE_RESOURCE_ENTITY_PROPERTIES);
    boolean changesFound = false;
    String access_mode = params.getString("access_mode");
    if (access_mode == null) {
        access_mode = AccessMode.INHERITED.toString();
    }//from w  w w  .  ja  va 2  s .co m
    String oldAccessMode = entityProperties.get(PROP_ACCESS_MODE).toString();
    if (oldAccessMode == null) {
        oldAccessMode = AccessMode.INHERITED.toString();
    }
    if (!access_mode.equals(oldAccessMode)) {
        results.put(PROP_ACCESS_MODE, AccessMode.fromString(access_mode));
        changesFound = true;
    }
    if (AccessMode.GROUPED.toString().equals(access_mode)) {
        // we inherit more than one group and must check whether group access changes at this item
        String[] access_groups = params.getStrings("access_groups");

        SortedSet<String> new_groups = new TreeSet<String>();
        if (access_groups != null) {
            new_groups.addAll(Arrays.asList(access_groups));
        }

        List<Map<String, String>> possibleGroups = (List<Map<String, String>>) entityProperties
                .get(PROP_POSSIBLE_GROUPS);
        if (possibleGroups == null) {
            possibleGroups = new ArrayList<Map<String, String>>();
        }
        Map<String, String> possibleGroupMap = mapGroupRefs(possibleGroups);
        SortedSet<String> new_group_refs = convertToRefs(new_groups, possibleGroupMap);

        boolean groups_are_inherited = (new_groups.size() == possibleGroupMap.size())
                && possibleGroupMap.keySet().containsAll(new_groups);

        try {
            if (groups_are_inherited) {
                edit.clearGroupAccess();
                edit.setGroupAccess(new_group_refs);
            } else {
                edit.setGroupAccess(new_group_refs);
            }
            edit.clearPublicAccess();
        } catch (InconsistentException e) {
            logger.warn("InconsistentException in captureAccess() " + e);
        } catch (PermissionException e) {
            logger.warn("PermissionException in captureAccess() " + e);
        }
    } else if ("public".equals(access_mode)) {
        Boolean isPubviewInherited = (Boolean) entityProperties.get(PROP_IS_PUBVIEW_INHERITED);
        if (isPubviewInherited == null || !isPubviewInherited) {
            try {
                edit.setPublicAccess();
            } catch (InconsistentException e) {
                logger.warn("InconsistentException in captureAccess() " + e);
            } catch (PermissionException e) {
                logger.warn("PermissionException in captureAccess() " + e);
            }
        }
    } else if (AccessMode.INHERITED.toString().equals(access_mode)) {
        try {
            if (edit.getAccess() == AccessMode.GROUPED) {
                edit.clearGroupAccess();
            }
            edit.clearPublicAccess();
        } catch (InconsistentException e) {
            logger.warn("InconsistentException in captureAccess() " + e);
        } catch (PermissionException e) {
            logger.warn("PermissionException in captureAccess() " + e);
        }
    }

    // isPubview
    results.put(PROP_IS_PUBVIEW, getContentService().isPubView(edit.getId()));
    // isPubviewInherited
    results.put(PROP_IS_PUBVIEW_INHERITED, new Boolean(getContentService().isInheritingPubView(edit.getId())));
    // isPubviewPossible
    Boolean preventPublicDisplay = (Boolean) state.getAttribute("resources.request.prevent_public_display");
    if (preventPublicDisplay == null) {
        preventPublicDisplay = Boolean.FALSE;
    }
    results.put(PROP_IS_PUBVIEW_POSSIBLE, new Boolean(!preventPublicDisplay.booleanValue()));

    // accessMode
    results.put(PROP_ACCESS_MODE, edit.getAccess());
    // isGroupInherited
    results.put(PROP_IS_GROUP_INHERITED, AccessMode.GROUPED == edit.getInheritedAccess());
    // possibleGroups
    Collection<Group> inheritedGroupObjs = edit.getInheritedGroupObjects();
    Map<String, Map<String, String>> groups = new HashMap<String, Map<String, String>>();
    if (inheritedGroupObjs != null) {
        for (Group group : inheritedGroupObjs) {
            Map<String, String> grp = new HashMap<String, String>();
            grp.put("groupId", group.getId());
            grp.put("title", group.getTitle());
            grp.put("description", group.getDescription());
            grp.put("entityRef", group.getReference());
            groups.put(grp.get("groupId"), grp);
        }
    }
    results.put(PROP_POSSIBLE_GROUPS, groups);
    // isGroupPossible
    results.put(PROP_IS_GROUP_POSSIBLE, new Boolean(groups != null && groups.size() > 0));
    // isSingleGroupInherited
    results.put(PROP_IS_SINGLE_GROUP_INHERITED, new Boolean(groups != null && groups.size() == 1));
    // isSiteOnly = ! isPubviewPossible && ! isGroupPossible
    results.put(PROP_IS_SITE_ONLY,
            new Boolean(preventPublicDisplay.booleanValue() && (groups == null || groups.size() < 1)));
    // isUserSite
    SiteService siteService = (SiteService) ComponentManager.get(SiteService.class);
    Reference ref = getEntityManager().newReference(edit.getReference());
    results.put(PROP_IS_USER_SITE, siteService.isUserSite(ref.getContext()));
}