Example usage for java.util Hashtable values

List of usage examples for java.util Hashtable values

Introduction

In this page you can find the example usage for java.util Hashtable values.

Prototype

Collection values

To view the source code for java.util Hashtable values.

Click Source Link

Usage

From source file:de.berlios.statcvs.xml.chart.AbstractTimeSeriesChart.java

protected Map createTimeSeries(Grouper grouper, Iterator it, RevisionVisitorFactory factory) {
    Hashtable timeSeriesByGroup = new Hashtable();
    Hashtable visitorByGroup = new Hashtable();

    ReportSettings.Predicate predicate = getSettings().getOutputPredicate();
    while (it.hasNext()) {
        CvsRevision rev = (CvsRevision) it.next();
        Object group = grouper.getGroup(rev);
        DateTimeSeries series = (DateTimeSeries) timeSeriesByGroup.get(group);
        RevisionVisitor visitor = (RevisionVisitor) visitorByGroup.get(group);
        if (series == null) {
            series = new DateTimeSeries(grouper.getName(group));
            timeSeriesByGroup.put(group, series);
            visitor = factory.create(group);
            visitorByGroup.put(group, visitor);
        }//from  w w w. ja va2s . co  m

        int value = visitor.visit(rev);
        if (predicate == null || predicate.matches(rev)) {
            // TODO: Maybe introduce CvsFile.isBinary() again.
            series.add(rev.getDate(), value);
        }
    }

    for (Iterator it2 = timeSeriesByGroup.values().iterator(); it2.hasNext();) {
        ((DateTimeSeries) it2.next()).addLast();
    }

    return timeSeriesByGroup;
}

From source file:edu.ku.brc.specify.tasks.ExpressSearchTask.java

/**
 * @param searchName/* ww w .j  a  v a  2s  .c  o  m*/
 */
public void doBasicSearch(final String searchName) {
    Hashtable<String, ExpressResultsTableInfo> idToTableInfoMap = ExpressSearchConfigCache
            .getSearchIdToTableInfoHash();
    for (ExpressResultsTableInfo erti : idToTableInfoMap.values()) {
        //log.debug("["+erti.getName()+"]["+searchName+"]");
        if (erti.getName().equals(searchName)) {
            // This needs to be fixed in that it might not return any results
            // and we are always adding the pane.
            ESResultsSubPane expressSearchPane = new ESResultsSubPane(erti.getTitle(), this, true);
            QueryForIdResultsSQL esr = new QueryForIdResultsSQL(erti.getTitle(), null, erti, 0, "");
            @SuppressWarnings("unused")
            ExpressTableResultsFromQuery esrfq = new ExpressTableResultsFromQuery(expressSearchPane, esr, true);
            addSubPaneToMgr(expressSearchPane);
            return;
        }
    }
    log.error("Can't find a search definition for name [" + searchName + "]");
}

From source file:org.hyperic.hq.plugin.iis.IisRtPlugin.java

/**
 * Main method for parsing the log/*from w w  w  . ja va  2s  . c  o m*/
 *
 * Much of this is duplicated from the BaseRTPlugin, mainly due
 * to the file format being specified in the log file itself.  This
 * needs to be abstracted.
 *
 */
public Collection getTimes(Integer svcID, Properties alreadyParsedFiles, String logdir, String logmask,
        String logfmt, int svcType, String transforms, ArrayList noLog, boolean collectIPs) throws IOException {
    Hashtable urls = new Hashtable();

    // Setup the parser
    lp = getParser();
    lp.setTimeMultiplier(this.getTimeMultiplier());
    lp.urlDontLog(noLog);

    // Get the list of logs to parse
    ParsedFile[] flist = generateFileList(alreadyParsedFiles, logdir, logmask);

    // For each log, parse out the response time info
    for (int i = 0; i < flist.length; i++) {

        long flen[] = new long[1];

        ParsedFile f = flist[i];

        logfmt = getLogFormat(f.fname);
        if (logfmt == "") {
            // If we cannot determine the log format, don't bother
            // passing the file through the parser.
            log.debug("Not parsing " + f.fname + ": No log format");
            continue;
        }

        long start = System.currentTimeMillis();
        log.debug("Parsing log: " + f.fname);

        Hashtable rv = lp.parseLog(f.fname, logfmt, f.oldLen, svcID, svcType, flen, collectIPs);

        if (log.isDebugEnabled()) {
            long elapsed = System.currentTimeMillis() - start;
            log.debug("Done parsing log, " + rv.keySet().size() + " elements ("
                    + StringUtil.formatDuration(elapsed, 0, true) + ")");
        }

        alreadyParsedFiles.put(f.fname, Long.toString(flen[0]));
        combineUrls(rv, urls, transforms);
    }

    log.debug("Returning parsed data " + urls.values().size() + " entries");

    return urls.values();
}

From source file:pt.iflow.flows.FlowData.java

private void buildForkJoinDepPath(UserInfoInterface userInfo, Hashtable<Integer, Block> htBlocks,
        Set<Integer> alStates, Block block, ForkJoinDep lastFJD) {

    // So vale a pena gastar CPU a percorrer grafos se existirem blocos
    // bifurcao, sincronizao ou juno
    boolean blockFound = false;
    for (Block b : htBlocks.values()) {
        String name = b.getClass().getName();
        // TODO encontrar uma forma mais elegante de efectuar este teste.
        if (name.contains("BlockSincronizacao") || name.contains("JuncaoExclusiva")
                || name.contains("BlockBifurcacao")) {
            blockFound = true;//from  w  w w .  ja  v a  2  s  .  co  m
            break;
        }
    }

    if (!blockFound)
        return;

    buildForkJoinDepPathImpl(userInfo, htBlocks, alStates, block, lastFJD);
}

From source file:de.juwimm.cms.remote.UserServiceSpringImpl.java

/**
 * Returns all users regardings the active site the logged user is in or if
 * the logged in user is<br/>/*from  w w w .  j a va  2s. c  o  m*/
 * only unitAdmin, all Users for all Units he is in.<br/>
 * The unitAdmin will not see SiteAdmins in his list, even they can see all
 * Units. <b>SECURITY INFORMATION:</b> Available only to: <i>siteRoot,
 * unitAdmin</i>
 * 
 * @return Returns all UserValue Objects in an Array. Is empty if nobody was
 *         found.
 * 
 * @see de.juwimm.cms.remote.UserServiceSpring#getAllUser()
 */
@Override
protected UserValue[] handleGetAllUser() throws Exception {
    if (log.isDebugEnabled()) {
        log.debug("begin getAllUser");
    }
    UserValue[] itarr = null;
    try {
        UserHbm user = super.getUserHbmDao().load(AuthenticationHelper.getUserName());
        if (getUserHbmDao().isInRole(user, UserRights.SITE_ROOT, user.getActiveSite())) {
            Collection coll = null;
            int siz = 0;
            coll = super.getUserHbmDao().findAll(user.getActiveSite().getSiteId());
            siz = coll.size();
            itarr = new UserValue[siz];
            if (siz > 0) {
                Iterator it = coll.iterator();
                for (int i = 0; i < siz; i++) {
                    itarr[i] = ((UserHbm) it.next()).getUserValue();
                }
            }
        } else {
            Hashtable<String, UserValue> userMap = new Hashtable<String, UserValue>();
            Iterator unitsIt = user.getUnits().iterator();
            while (unitsIt.hasNext()) {
                UnitHbm unit = (UnitHbm) unitsIt.next();
                Collection users = unit.getUsers();
                Iterator<UserHbm> userIt = users.iterator();
                while (userIt.hasNext()) {
                    UserValue current = userIt.next().getUserValue();
                    userMap.put(current.getUserName(), current);
                }
            }
            itarr = userMap.values().toArray(new UserValue[0]);
        }
    } catch (Exception e) {
        throw new UserException(e.getMessage());
    }
    return itarr;
}

From source file:net.ustyugov.jtalk.service.JTalkService.java

public int getMessagesCount() {
    int result = 0;
    for (Hashtable<String, Integer> hash : messagesCount.values()) {
        for (Integer i : hash.values()) {
            result = result + i;/*from w w w.j  a  v  a2  s .c o  m*/
        }
    }
    return result;
}

From source file:com.stimulus.archiva.search.StandardSearch.java

protected Searcher getVolumeSearchers() throws MessageSearchException {

    logger.debug("getVolumeSearchers()");
    boolean searcherPresent = false;
    Hashtable<String, String> remoteServers = new Hashtable<String, String>();
    List<Volume> volumes = Config.getConfig().getVolumes().getVolumes();
    LinkedList<Searchable> searchers = new LinkedList<Searchable>();
    Iterator<Volume> vl = volumes.iterator();
    logger.debug("searching for suitable searchers");
    while (vl.hasNext()) {
        Volume volume = (Volume) vl.next();
        logger.debug("should search volume? {" + volume + "}");
        try {// w w  w . j  a  v a 2s.  c  o  m
            Searchable volsearcher;
            if (shouldSearch(volume)) {
                try {
                    volsearcher = new IndexSearcher(volume.getIndexPath());
                    logger.debug("adding volume to search {indexpath='" + volume.getIndexPath() + "'}");
                    searchers.add(volsearcher);
                    searcherPresent = true;
                } catch (Exception e) {
                    logger.error("failed to volume to search{" + volume + "}: " + e.getMessage(), e);
                }
            } else {
                logger.debug("deliberately not searching inside volume {" + volume.getIndexPath() + "}");
            }
        } catch (Exception io) {
            logger.error("failed to open index for search {" + volume + "}.", io);
        }

    }

    if (!searcherPresent)
        return null;

    for (String remotePath : remoteServers.values()) {
        try {
            Searchable volsearcher = (Searchable) Naming.lookup(remotePath);
            searchers.add(volsearcher);
        } catch (Exception e) {
            logger.error("failed to add volume searcher", e);
        }
    }
    Searchable[] searcherarraytype = new Searchable[searchers.size()];
    Searchable[] allsearchers = (Searchable[]) (searchers.toArray(searcherarraytype));

    Searcher searcher;
    try {
        searcher = new ParallelMultiSearcher(allsearchers);
    } catch (IOException io) {
        throw new MessageSearchException("failed to open/create one or more index searchers", logger);
    }
    return searcher;
}

From source file:com.flexive.ejb.beans.workflow.StepEngineBean.java

/**
 * {@inheritDoc}/*from   w  w  w  . jav  a  2  s  .c o m*/
 */
@Override
public List<StepPermission> loadAllStepsForUser(long userId) throws FxApplicationException {
    UserTicket ticket = FxContext.getUserTicket();
    // Select all step ids
    final String sql =
            //                 1    ,   2         ,      3
            "SELECT DISTINCT step.ID,aclug.ACL,step.WORKFLOW,"
                    // 4        ,  5       ,   6     ,  7         ,   8      ,      9        , 10
                    + " aclug.PEDIT,aclug.PREAD,aclug.PREMOVE,aclug.PEXPORT,aclug.PREL,aclug.PCREATE,step.STEPDEF"
                    + " FROM " + TBL_ACLS + " acl," + TBL_ACLS_ASSIGNMENT + " aclug," + TBL_WORKFLOW_STEP
                    + " step" + " WHERE" + " aclug.ACL=acl.ID" + " AND acl.CAT_TYPE="
                    + ACLCategory.WORKFLOW.getId() + " AND aclug.USERGROUP IN (SELECT DISTINCT USERGROUP FROM "
                    + TBL_ASSIGN_GROUPS + " WHERE ACCOUNT=" + userId + " AND USERGROUP<>"
                    + UserGroup.GROUP_OWNER + ")" + " AND step.ACL=acl.ID";

    // Security
    if (!ticket.isGlobalSupervisor()) {
        if (ticket.getUserId() != userId) {
            FxNoAccessException na = new FxNoAccessException("You may not load the Steps for a other user");
            if (LOG.isInfoEnabled())
                LOG.info(na);
            throw na;
        }
    }

    // Obtain a database connection
    Connection con = null;
    Statement stmt = null;
    try {
        con = Database.getDbConnection();

        // Load all steps in the database
        stmt = con.createStatement();
        ResultSet rs = stmt.executeQuery(sql);
        //ArrayList result = new ArrayList(50);
        Hashtable<Integer, StepPermission> result = new Hashtable<Integer, StepPermission>(50);

        while (rs != null && rs.next()) {
            // Fill in a step object
            Integer stepId = rs.getInt(1);
            int workflowId = rs.getInt(3);
            boolean mayEdit = rs.getBoolean(4);
            boolean mayRead = rs.getBoolean(5);
            boolean mayDelete = rs.getBoolean(6);
            boolean mayExport = rs.getBoolean(7);
            boolean mayRelate = rs.getBoolean(8);
            boolean mayCreate = rs.getBoolean(9);
            int stepDefinitionId = rs.getInt(10);
            StepPermissionEdit data;
            StepPermission stepPerm = result.get(stepId);
            if (stepPerm == null) {
                data = new StepPermissionEdit(new StepPermission(stepId, stepDefinitionId, workflowId, mayRead,
                        mayEdit, mayRelate, mayDelete, mayExport, mayCreate));
            } else {
                data = new StepPermissionEdit(stepPerm);
                if (mayDelete)
                    data.setMayDelete(true);
                if (mayEdit)
                    data.setMayEdit(true);
                if (mayExport)
                    data.setMayExport(true);
                if (mayRelate)
                    data.setMayRelate(true);
                if (mayRead)
                    data.setMayRead(true);
                if (mayCreate)
                    data.setMayCreate(true);
            }
            result.put(stepId, data);
        }

        return new ArrayList<StepPermission>(result.values());
    } catch (SQLException exc) {
        throw new FxLoadException(LOG, "ex.step.load.user", exc, userId, exc.getMessage());
    } finally {
        Database.closeObjects(StepEngineBean.class, con, stmt);
    }

}

From source file:helma.objectmodel.db.NodeManager.java

/**
 *  Updates a modified node in the embedded db or an external relational database, depending
 * on its database mapping.//w  w w  . ja va  2 s  .  co  m
 *
 * @return true if the DbMapping of the updated Node is to be marked as updated via
 *              DbMapping.setLastDataChange
 */
public boolean updateNode(IDatabase db, ITransaction txn, Node node)
        throws IOException, SQLException, ClassNotFoundException {

    invokeOnPersist(node);
    DbMapping dbm = node.getDbMapping();
    boolean markMappingAsUpdated = false;

    if ((dbm == null) || !dbm.isRelational()) {
        db.updateNode(txn, node.getID(), node);
    } else {
        Hashtable propMap = node.getPropMap();
        Property[] props;

        if (propMap == null) {
            props = new Property[0];
        } else {
            props = new Property[propMap.size()];
            propMap.values().toArray(props);
        }

        // make sure table meta info is loaded by dbmapping
        dbm.getColumns();

        StringBuffer b = dbm.getUpdate();

        // comma flag set after the first dirty column, also tells as
        // if there are dirty columns at all
        boolean comma = false;

        for (int i = 0; i < props.length; i++) {
            // skip clean properties
            if ((props[i] == null) || !props[i].dirty) {
                // null out clean property so we don't consider it later
                props[i] = null;
                continue;
            }

            Relation rel = dbm.propertyToRelation(props[i].getName());

            // skip readonly, virtual and collection relations
            if ((rel == null) || rel.readonly || rel.virtual || (!rel.isPrimitiveOrReference())) {
                // null out property so we don't consider it later
                props[i] = null;
                continue;
            }

            if (comma) {
                b.append(", ");
            } else {
                comma = true;
            }

            b.append(rel.getDbField());
            b.append(" = ?");
        }

        // if no columns were updated, return false
        if (!comma) {
            return false;
        }

        b.append(" WHERE ");
        dbm.appendCondition(b, dbm.getIDField(), node.getID());

        Connection con = dbm.getConnection();
        // set connection to write mode
        if (con.isReadOnly())
            con.setReadOnly(false);
        PreparedStatement stmt = con.prepareStatement(b.toString());

        int stmtNumber = 0;
        long logTimeStart = logSql ? System.currentTimeMillis() : 0;

        try {
            for (int i = 0; i < props.length; i++) {
                Property p = props[i];

                if (p == null) {
                    continue;
                }

                Relation rel = dbm.propertyToRelation(p.getName());

                stmtNumber++;
                setStatementValue(stmt, stmtNumber, p, rel.getColumnType());

                p.dirty = false;

                if (!rel.isPrivate()) {
                    markMappingAsUpdated = true;
                }
            }

            stmt.executeUpdate();

        } finally {
            if (logSql) {
                long logTimeStop = System.currentTimeMillis();
                logSqlStatement("SQL UPDATE", dbm.getTableName(), logTimeStart, logTimeStop, b.toString());
            }
            if (stmt != null) {
                try {
                    stmt.close();
                } catch (Exception ignore) {
                }
            }
        }
    }

    // update may cause changes in the node's parent subnode array
    // TODO: is this really needed anymore?
    if (markMappingAsUpdated && node.isAnonymous()) {
        Node parent = node.getCachedParent();

        if (parent != null) {
            parent.markSubnodesChanged();
        }
    }

    return markMappingAsUpdated;
}

From source file:org.kepler.kar.KARFile.java

/**
 * This method makes sure that all of the entries of this KARFile are in the
 * Cache. It caches the entries in the order that their dependencies
 * dictate./*from   w  w  w.j a va 2  s .c  o m*/
 * 
 * @throws Exception
 */
public void cacheKARContents() throws Exception {
    if (isDebugging) {
        log.debug("openKAR: " + this.toString());
    }
    try {
        // get references to all the managers we'll be using
        LocalRepositoryManager lrm = LocalRepositoryManager.getInstance();
        KARCacheManager kcm = KARCacheManager.getInstance();
        CacheManager cm = CacheManager.getInstance();

        // Make sure the file is in a local repository
        if (!lrm.isInLocalRepository(getFileLocation())) {
            log.warn("KAR should be in a Local Repository Folder to be inserted in the cache: "
                    + getFileLocation());
            // return;
        }

        // Add a row to the KARS_CACHED table
        boolean inserted = kcm.insertIntoCache(this);
        if (!inserted) {
            // This KAR has already been cached, don't do it again
            return;
        }

        // keep two lists while traversing the dependencies, start with all
        // of the entries (we don't know yet if they are cached or not)
        // and move them into the cached entries as they are cached (or if
        // they are already cached)
        Vector<KAREntry> entries = (Vector<KAREntry>) karEntries();
        Hashtable<KeplerLSID, KAREntry> cachedEntries = new Hashtable<KeplerLSID, KAREntry>();

        // do one pass through the entries to see if any of them are already
        // in the cache
        for (KAREntry entry : entries) {
            KeplerLSID lsid = entry.getLSID();

            // See if this entry is already in the Cache
            boolean alreadyCached = cm.isContained(lsid);
            if (alreadyCached) {

                // add this entry into the cachedEntries list
                cachedEntries.put(entry.getLSID(), entry);

                // Insert a row into the KAR_CONTENTS table for this entry
                File karFile = getFileLocation();
                KeplerLSID entryLsid = entry.getLSID();
                String entryName = entry.getName();
                String entryType = entry.getType();
                kcm.insertEntryIntoCache(karFile, entryLsid, entryName, entryType);
            }
        }

        // remove entries that were already cached
        for (KAREntry entry : cachedEntries.values()) {
            entries.remove(entry);
        }

        // keep cycling through the uncached entries until the list is empty
        while (entries.size() > 0) {

            // keep track of the entries cached during this pass
            Vector<KAREntry> cachedThisPass = new Vector<KAREntry>(entries.size());

            // cycle through all of the remaining, uncached entries
            for (KAREntry entry : entries) {
                if (isDebugging)
                    log.debug(entry.getName());

                // get the dependency list for this entry
                List<KeplerLSID> depList = entry.getLsidDependencies();

                if (depList.size() == 0) {
                    // if there are no dependencies we just cache it
                    boolean success = cache(entry);
                    if (success) {
                        cachedEntries.put(entry.getLSID(), entry);
                        cachedThisPass.add(entry);
                        break;
                    }
                    if (isDebugging)
                        log.debug(success);
                } else {
                    // if there are dependencies then we check to make sure
                    // that all of the dependencies have already been cached
                    boolean allDependenciesHaveBeenCached = true;
                    for (KeplerLSID lsid : depList) {
                        // if any of the dependencies have not been cached,
                        // set false
                        if (!cm.isContained(lsid)) {
                            allDependenciesHaveBeenCached = false;
                        }
                    }
                    if (allDependenciesHaveBeenCached) {
                        // all dependencies have been cached so it is
                        // OK to cache this entry
                        boolean success = cache(entry);
                        if (success) {
                            cachedEntries.put(entry.getLSID(), entry);
                            cachedThisPass.add(entry);
                            break;
                        }
                        if (isDebugging)
                            log.debug(success);
                    }
                }
            }
            if (cachedThisPass.size() == 0) {
                // Bad news, nothing is getting cached
                // This means that there are uncached entries that
                // have unsatisfied dependencies
                // break out to avoid infinite loop
                // Vector<KAREntry> entriesWithBrokenDependencies = entries;
                break;
            }

            // remove any entries that got cached this pass
            for (KAREntry entry : cachedThisPass) {
                entries.remove(entry);
            }

        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}