Example usage for java.util HashSet size

List of usage examples for java.util HashSet size

Introduction

In this page you can find the example usage for java.util HashSet size.

Prototype

public int size() 

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:dao.DirectoryAuthorDaoDb.java

/**
  *  This methods lists all the users of directories with
  *  user specific scope/*from   w w w.  j  a v  a2  s. c om*/
  *  @param loginId  - loginId
  *  @param accessFlag - slave(0) or master (1)
  *  @return List - list all users for directories
  *  @throws BaseDaoException
  */
public HashSet listDirUsers(String loginId, String login, int accessFlag) throws BaseDaoException {
    if (RegexStrUtil.isNull(loginId) || RegexStrUtil.isNull(login)) {
        throw new BaseDaoException("params are null");
    }

    /** Jboss methods
     * fqn - full qualified name
     */
    Fqn fqn = cacheUtil.fqn(DbConstants.DIR_USERS_SCOPE);
    Object obj = treeCache.get(fqn, loginId);
    if (obj != null) {
        return (HashSet) obj;
    }

    List dirList = listDirectoriesWithScope(loginId, login, DbConstants.READ_FROM_SLAVE);
    if (dirList == null) {
        throw new BaseDaoException("dirList is null, listDirUsers() login = " + login);
    }

    /**
          *  Get scalability datasource, not partitioned
          */
    String sourceName = scalabilityManager.getReadZeroScalability();
    ds = scalabilityManager.getSource(sourceName);
    if (ds == null) {
        throw new BaseDaoException("ds null, listDirUsers() " + sourceName + " loginId = " + loginId);
    }

    /**
     * list users for directory user scope access
     */
    Connection conn = null;
    HashSet users = new HashSet();
    try {
        conn = ds.getConnection();
        for (int i = 0; i < dirList.size(); i++) {
            if (dirList.get(i) == null) {
                throw new BaseDaoException("dirList.get(i) is null, i= " + i + "dirAuthor = " + login);
            } else {
                String directoryId = ((Directory) dirList.get(i)).getValue(DbConstants.DIRECTORY_ID);
                HashSet result = null;
                Object params[] = { (Object) directoryId };
                result = listDirUsersQuery.run(conn, directoryId);
                if ((result != null) && (result.size() > 0)) {
                    users.add(result);
                }
            }
        } // for
    } catch (Exception e) {
        try {
            if (conn != null) {
                conn.close();
            }
        } catch (Exception e1) {
            throw new BaseDaoException("error listDirUsers()", e1);
        }
        throw new BaseDaoException("error in listDirUsers() ", e);
    }

    try {
        if (conn != null) {
            conn.close();
        }
    } catch (Exception e) {
        throw new BaseDaoException("error, listDirUsers() conn.close(), login = " + login, e);
    }

    if (users != null) {
        treeCache.put(fqn, loginId, users);
    }
    return users;
}

From source file:me.azenet.UHPlugin.UHPluginCommand.java

/**
 * This command manages startup spectators (aka ignored players).
 * /*from w  w  w  . j  a  v  a 2s  .  c  o m*/
 * Usage: /uh spec (doc)
 * Usage: /uh spec <add|remove|list>
 * 
 * @param sender
 * @param command
 * @param label
 * @param args
 */
@SuppressWarnings("unused")
private void doSpec(CommandSender sender, Command command, String label, String[] args) {
    if (args.length == 1) { // /uh spec
        displaySeparator(sender);
        sender.sendMessage(
                i.t("cmd.titleHelp", p.getDescription().getDescription(), p.getDescription().getVersion()));

        sender.sendMessage(i.t("cmd.legendHelp"));
        if (!p.getSpectatorPlusIntegration().isSPIntegrationEnabled()) {
            sender.sendMessage(i.t("cmd.specHelpNoticeSpectatorPlusNotInstalled"));
        }

        sender.sendMessage(i.t("cmd.specHelpTitle"));
        sender.sendMessage(i.t("cmd.specHelpAdd"));
        sender.sendMessage(i.t("cmd.specHelpRemove"));
        sender.sendMessage(i.t("cmd.specHelpList"));

        displaySeparator(sender);
    } else {
        String subcommand = args[1];

        if (subcommand.equalsIgnoreCase("add")) {
            if (args.length == 2) { // /uh spec add
                sender.sendMessage(i.t("spectators.syntaxError"));
            } else { // /uh spec add <player>
                Player newSpectator = p.getServer().getPlayer(args[2]);
                if (newSpectator == null) {
                    sender.sendMessage(i.t("spectators.offline", args[2]));
                } else {
                    p.getGameManager().addStartupSpectator(newSpectator);
                    sender.sendMessage(i.t("spectators.add.success", args[2]));
                }
            }
        }

        else if (subcommand.equalsIgnoreCase("remove")) {
            if (args.length == 2) { // /uh spec remove
                sender.sendMessage(i.t("spectators.syntaxError"));
            } else { // /uh spec remove <player>
                Player oldSpectator = p.getServer().getPlayer(args[2]);
                if (oldSpectator == null) {
                    sender.sendMessage(i.t("spectators.offline", args[2]));
                } else {
                    p.getGameManager().removeStartupSpectator(oldSpectator);
                    sender.sendMessage(i.t("spectators.remove.success", args[2]));
                }
            }
        }

        else if (subcommand.equalsIgnoreCase("list")) {
            HashSet<String> spectators = p.getGameManager().getStartupSpectators();
            if (spectators.size() == 0) {
                sender.sendMessage(i.t("spectators.list.nothing"));
            } else {
                sender.sendMessage(i.t("spectators.list.countSpectators", String.valueOf(spectators.size())));
                sender.sendMessage(i.t("spectators.list.countOnlyInitial"));
                for (String spectator : spectators) {
                    sender.sendMessage(i.t("spectators.list.itemSpec", spectator));
                }
            }
        }
    }
}

From source file:com.vgi.mafscaling.LogView.java

private void view3dPlots() {
    if (xAxisColumn.getSelectedItem() == null || xAxisColumn.getSelectedItem().toString().isEmpty()
            || yAxisColumn.getSelectedItem() == null || yAxisColumn.getSelectedItem().toString().isEmpty()
            || plotsColumn.getSelectedItems() == null)
        return;//w w w. j av a 2  s. c o  m
    plot3d.removeAllPlots();
    String val;
    String xAxisColName = (String) xAxisColumn.getSelectedItem();
    String yAxisColName = (String) yAxisColumn.getSelectedItem();
    List<String> dataColNames = plotsColumn.getSelectedItems();
    if (dataColNames.size() > 5) {
        JOptionPane.showMessageDialog(null,
                "Sorry, only 5 plots are supported. More plots will make the graph too slow.",
                "Too many parameters", JOptionPane.ERROR_MESSAGE);
        return;
    }

    int xColIdx = logDataTable.getColumnByHeaderName(xAxisColName).getModelIndex() - 1;
    xColIdx = logDataTable.getCurrentIndexForOriginalColumn(xColIdx);
    int yColIdx = logDataTable.getColumnByHeaderName(yAxisColName).getModelIndex() - 1;
    yColIdx = logDataTable.getCurrentIndexForOriginalColumn(yColIdx);
    ArrayList<Color> colorsArray = new ArrayList<Color>();
    colorsArray.add(Color.BLUE);
    colorsArray.add(Color.RED);
    colorsArray.add(Color.GREEN);
    colorsArray.add(Color.ORANGE);
    colorsArray.add(Color.GRAY);
    double x, y, z;
    XYZ xyz;
    for (int j = 0; j < dataColNames.size(); ++j) {
        HashSet<XYZ> uniqueXYZ = new HashSet<XYZ>();
        int zColIdx = logDataTable.getColumnByHeaderName(dataColNames.get(j)).getModelIndex() - 1;
        zColIdx = logDataTable.getCurrentIndexForOriginalColumn(zColIdx);
        int count = 0;
        double[][] xyzArrayTemp = new double[logDataTable.getRowCount()][3];
        for (int i = 0; i < logDataTable.getRowCount(); ++i) {
            val = (String) logDataTable.getValueAt(i, xColIdx);
            x = Double.valueOf(val);
            val = (String) logDataTable.getValueAt(i, yColIdx);
            y = Double.valueOf(val);
            val = (String) logDataTable.getValueAt(i, zColIdx);
            z = Double.valueOf(val);
            xyz = new XYZ(x, y, z);
            if (uniqueXYZ.contains(xyz))
                continue;
            uniqueXYZ.add(xyz);
            xyzArrayTemp[count][0] = x;
            xyzArrayTemp[count][1] = y;
            xyzArrayTemp[count][2] = z;
            count += 1;
        }
        double[][] xyzArray = new double[uniqueXYZ.size()][3];
        for (int k = 0; k < xyzArray.length; ++k)
            System.arraycopy(xyzArrayTemp[k], 0, xyzArray[k], 0, 3);
        plot3d.addScatterPlot(dataColNames.get(j), colorsArray.get(j), xyzArray);
    }
    plot3d.setAxisLabel(0, xAxisColumn.getSelectedItem().toString());
    plot3d.setAxisLabel(1, yAxisColumn.getSelectedItem().toString());
    plot3d.setAxisLabel(2, plotsColumn.getSelectedItemsString());
}

From source file:net.longfalcon.newsj.Releases.java

public void processReleases() {
    String startDateString = DateUtil.displayDateFormatter.print(System.currentTimeMillis());
    _log.info(String.format("Starting release update process (%s)", startDateString));

    // get site config TODO: use config service
    Site site = siteDAO.getDefaultSite();

    int retcount = 0;

    Directory nzbBaseDir = fileSystemService.getDirectory("/nzbs");

    checkRegexesUptoDate(site.getLatestRegexUrl(), site.getLatestRegexRevision());

    // Stage 0//from   w ww  .  j a  v  a  2  s  .  co m

    // this is a hack - tx is not working ATM
    TransactionStatus transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));

    //
    // Get all regexes for all groups which are to be applied to new binaries
    // in order of how they should be applied
    //
    List<ReleaseRegex> releaseRegexList = releaseRegexDAO.getRegexes(true, "-1", false);
    for (ReleaseRegex releaseRegex : releaseRegexList) {

        String releaseRegexGroupName = releaseRegex.getGroupName();
        _log.info(String.format("Applying regex %d for group %s", releaseRegex.getId(),
                ValidatorUtil.isNull(releaseRegexGroupName) ? "all" : releaseRegexGroupName));

        // compile the regex early, to test them
        String regex = releaseRegex.getRegex();
        Pattern pattern = Pattern.compile(fixRegex(regex), Pattern.CASE_INSENSITIVE); // remove '/' and '/i'

        HashSet<Long> groupMatch = new LinkedHashSet<>();

        //
        // Groups ending in * need to be like matched when getting out binaries for groups and children
        //
        Matcher matcher = _wildcardPattern.matcher(releaseRegexGroupName);
        if (matcher.matches()) {
            releaseRegexGroupName = releaseRegexGroupName.substring(0, releaseRegexGroupName.length() - 1);
            List<Group> groups = groupDAO.findGroupsByName(releaseRegexGroupName);
            for (Group group : groups) {
                groupMatch.add(group.getId());
            }
        } else if (!ValidatorUtil.isNull(releaseRegexGroupName)) {
            Group group = groupDAO.getGroupByName(releaseRegexGroupName);
            if (group != null) {
                groupMatch.add(group.getId());
            }
        }

        List<Binary> binaries = new ArrayList<>();
        if (groupMatch.size() > 0) {
            // Get out all binaries of STAGE0 for current group
            binaries = binaryDAO.findByGroupIdsAndProcStat(groupMatch, Defaults.PROCSTAT_NEW);
        }

        Map<String, String> arrNoPartBinaries = new LinkedHashMap<>();
        DateTime fiveHoursAgo = DateTime.now().minusHours(5);

        // this for loop should probably be a single transaction
        for (Binary binary : binaries) {
            String testMessage = "Test run - Binary Name " + binary.getName();

            Matcher groupRegexMatcher = pattern.matcher(binary.getName());
            if (groupRegexMatcher.find()) {
                String reqIdGroup = null;
                try {
                    reqIdGroup = groupRegexMatcher.group("reqid");
                } catch (IllegalArgumentException e) {
                    _log.debug(e.toString());
                }
                String partsGroup = null;
                try {
                    partsGroup = groupRegexMatcher.group("parts");
                } catch (IllegalArgumentException e) {
                    _log.debug(e.toString());
                }
                String nameGroup = null;
                try {
                    nameGroup = groupRegexMatcher.group("name");
                } catch (Exception e) {
                    _log.debug(e.toString());
                }
                _log.debug(testMessage + " matches with: \n reqId = " + reqIdGroup + " parts = " + partsGroup
                        + " and name = " + nameGroup);

                if ((ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup))
                        && ValidatorUtil.isNull(nameGroup)) {
                    nameGroup = reqIdGroup;
                }

                if (ValidatorUtil.isNull(nameGroup)) {
                    _log.warn(String.format(
                            "regex applied which didnt return right number of capture groups - %s", regex));
                    _log.warn(String.format("regex matched: reqId = %s parts = %s and name = %s", reqIdGroup,
                            partsGroup, nameGroup));
                    continue;
                }

                // If theres no number of files data in the subject, put it into a release if it was posted to usenet longer than five hours ago.
                if ((ValidatorUtil.isNull(partsGroup) && fiveHoursAgo.isAfter(binary.getDate().getTime()))) {
                    //
                    // Take a copy of the name of this no-part release found. This can be used
                    // next time round the loop to find parts of this set, but which have not yet reached 3 hours.
                    //
                    arrNoPartBinaries.put(nameGroup, "1");
                    partsGroup = "01/01";
                }

                if (ValidatorUtil.isNotNull(nameGroup) && ValidatorUtil.isNotNull(partsGroup)) {

                    if (partsGroup.indexOf('/') == -1) {
                        partsGroup = partsGroup.replaceFirst("(-)|(~)|(\\sof\\s)", "/"); // replace weird parts delimiters
                    }

                    Integer regexCategoryId = releaseRegex.getCategoryId();
                    Integer reqId = null;
                    if (ValidatorUtil.isNotNull(reqIdGroup) && ValidatorUtil.isNumeric(reqIdGroup)) {
                        reqId = Integer.parseInt(reqIdGroup);
                    }

                    //check if post is repost
                    Pattern repostPattern = Pattern.compile("(repost\\d?|re\\-?up)", Pattern.CASE_INSENSITIVE);
                    Matcher binaryNameRepostMatcher = repostPattern.matcher(binary.getName());

                    if (binaryNameRepostMatcher.find()
                            && !nameGroup.toLowerCase().matches("^[\\s\\S]+(repost\\d?|re\\-?up)")) {
                        nameGroup = nameGroup + (" " + binaryNameRepostMatcher.group(1));
                    }

                    String partsStrings[] = partsGroup.split("/");
                    int relpart = Integer.parseInt(partsStrings[0]);
                    int relTotalPart = Integer.parseInt(partsStrings[1]);

                    binary.setRelName(nameGroup.replace("_", " "));
                    binary.setRelPart(relpart);
                    binary.setRelTotalPart(relTotalPart);
                    binary.setProcStat(Defaults.PROCSTAT_TITLEMATCHED);
                    binary.setCategoryId(regexCategoryId);
                    binary.setRegexId(releaseRegex.getId());
                    binary.setReqId(reqId);
                    binaryDAO.updateBinary(binary);

                }
            }
        }

    }

    transactionManager.commit(transaction);

    // this is a hack - tx is not working ATM
    transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
    //
    // Move all binaries from releases which have the correct number of files on to the next stage.
    //
    _log.info("Stage 2");
    List<MatchedReleaseQuery> matchedReleaseQueries = binaryDAO
            .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_TITLEMATCHED);
    matchedReleaseQueries = combineMatchedQueries(matchedReleaseQueries);

    int siteMinFilestoFormRelease = site.getMinFilesToFormRelease();

    for (MatchedReleaseQuery matchedReleaseQuery : matchedReleaseQueries) {
        retcount++;

        //
        // Less than the site permitted number of files in a release. Dont discard it, as it may
        // be part of a set being uploaded.
        //
        int minFiles = siteMinFilestoFormRelease;
        String releaseName = matchedReleaseQuery.getReleaseName();
        long matchedReleaseQueryGroup = matchedReleaseQuery.getGroup();
        Long matchedReleaseQueryNumberOfBinaries = matchedReleaseQuery.getNumberOfBinaries();
        int matchecReleaseTotalParts = matchedReleaseQuery.getReleaseTotalParts();
        String fromName = matchedReleaseQuery.getFromName();
        Integer reqId = matchedReleaseQuery.getReqId();

        Group group = groupDAO.findGroupByGroupId(matchedReleaseQueryGroup);
        if (group != null && group.getMinFilesToFormRelease() != null) {
            minFiles = group.getMinFilesToFormRelease();
        }

        if (matchedReleaseQueryNumberOfBinaries < minFiles) {

            _log.warn(String.format("Number of files in release %s less than site/group setting (%s/%s)",
                    releaseName, matchedReleaseQueryNumberOfBinaries, minFiles));

            binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                    matchedReleaseQueryGroup, fromName);
        } else if (matchedReleaseQueryNumberOfBinaries >= matchecReleaseTotalParts) {
            // Check that the binary is complete
            List<Binary> releaseBinaryList = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName(
                    releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);

            boolean incomplete = false;
            for (Binary binary : releaseBinaryList) {
                long partsCount = partDAO.countPartsByBinaryId(binary.getId());
                if (partsCount < binary.getTotalParts()) {
                    float percentComplete = ((float) partsCount / (float) binary.getTotalParts()) * 100;
                    _log.warn(String.format("binary %s from %s has missing parts = %s/%s (%s%% complete)",
                            binary.getId(), releaseName, partsCount, binary.getTotalParts(), percentComplete));

                    // Allow to binary to release if posted to usenet longer than four hours ago and we still don't have all the parts
                    DateTime fourHoursAgo = DateTime.now().minusHours(4);
                    if (fourHoursAgo.isAfter(new DateTime(binary.getDate()))) {
                        _log.info("allowing incomplete binary " + binary.getId());
                    } else {
                        incomplete = true;
                    }
                }
            }

            if (incomplete) {
                _log.warn(String.format("Incorrect number of parts %s-%s-%s", releaseName,
                        matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts));
                binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                        matchedReleaseQueryGroup, fromName);
            }

            //
            // Right number of files, but see if the binary is a allfilled/reqid post, in which case it needs its name looked up
            // TODO: Does this even work anymore?
            else if (ValidatorUtil.isNotNull(site.getReqIdUrl()) && ValidatorUtil.isNotNull(reqId)) {

                //
                // Try and get the name using the group
                //
                _log.info("Looking up " + reqId + " in " + group.getName() + "...");
                String newTitle = getReleaseNameForReqId(site.getReqIdUrl(), group, reqId, true);

                //
                // if the feed/group wasnt supported by the scraper, then just use the release name as the title.
                //
                if (ValidatorUtil.isNull(newTitle) || newTitle.equals("no feed")) {
                    newTitle = releaseName;
                    _log.warn("Group not supported");
                }

                //
                // Valid release with right number of files and title now, so move it on
                //
                if (ValidatorUtil.isNotNull(newTitle)) {
                    binaryDAO.updateBinaryNameAndStatus(newTitle, Defaults.PROCSTAT_READYTORELEASE, releaseName,
                            Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                } else {
                    //
                    // Item not found, if the binary was added to the index yages ago, then give up.
                    //
                    Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName(
                            releaseName, Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                    DateTime maxAddedDate = new DateTime(timestamp);
                    DateTime twoDaysAgo = DateTime.now().minusDays(2);

                    if (maxAddedDate.isBefore(twoDaysAgo)) {
                        binaryDAO.updateBinaryNameAndStatus(releaseName,
                                Defaults.PROCSTAT_NOREQIDNAMELOOKUPFOUND, releaseName,
                                Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);
                        _log.warn("Not found in 48 hours");
                    }
                }
            } else {
                binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_READYTORELEASE, releaseName,
                        Defaults.PROCSTAT_TITLEMATCHED, matchedReleaseQueryGroup, fromName);

            }
        } else {
            //
            // Theres less than the expected number of files, so update the attempts and move on.
            //

            _log.info(String.format("Incorrect number of files for %s (%d/%d)", releaseName,
                    matchedReleaseQueryNumberOfBinaries, matchecReleaseTotalParts));
            binaryDAO.updateBinaryIncrementProcAttempts(releaseName, Defaults.PROCSTAT_TITLEMATCHED,
                    matchedReleaseQueryGroup, fromName);
        }

        if (retcount % 10 == 0) {
            _log.info(String.format("-processed %d binaries stage two", retcount));
        }

    }
    transactionManager.commit(transaction);

    retcount = 0;
    int nfoCount = 0;

    // this is a hack - tx is not working ATM
    transaction = transactionManager
            .getTransaction(new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRED));
    //
    // Get out all distinct relname, group from binaries of STAGE2
    //
    _log.info("Stage 3");
    List<MatchedReleaseQuery> readyReleaseQueries = binaryDAO
            .findBinariesByProcStatAndTotalParts(Defaults.PROCSTAT_READYTORELEASE);
    readyReleaseQueries = combineMatchedQueries(readyReleaseQueries);
    for (MatchedReleaseQuery readyReleaseQuery : readyReleaseQueries) {
        retcount++;

        String releaseName = readyReleaseQuery.getReleaseName();
        int numParts = readyReleaseQuery.getReleaseTotalParts();
        long binaryCount = readyReleaseQuery.getNumberOfBinaries();
        long groupId = readyReleaseQuery.getGroup();
        //
        // Get the last post date and the poster name from the binary
        //
        String fromName = readyReleaseQuery.getFromName();
        Timestamp timestamp = binaryDAO.findMaxDateAddedBinaryByReleaseNameProcStatGroupIdFromName(releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);
        DateTime addedDate = new DateTime(timestamp);

        //
        // Get all releases with the same name with a usenet posted date in a +1-1 date range.
        //
        Date oneDayBefore = addedDate.minusDays(1).toDate();
        Date oneDayAfter = addedDate.plusDays(1).toDate();
        List<Release> relDupes = releaseDAO.findReleasesByNameAndDateRange(releaseName, oneDayBefore,
                oneDayAfter);

        if (!relDupes.isEmpty()) {
            binaryDAO.updateBinaryNameAndStatus(releaseName, Defaults.PROCSTAT_DUPLICATE, releaseName,
                    Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);
            continue;
        }

        //
        // Get total size of this release
        // Done in a big OR statement, not an IN as the mysql binaryID index on parts table
        // was not being used.
        //

        // SM: TODO this should be revisited, using hb mappings

        long totalSize = 0;
        int regexAppliedCategoryId = 0;
        long regexIdUsed = 0;
        int reqIdUsed = 0;
        int relTotalParts = 0;
        float relCompletion;
        List<Binary> binariesForSize = binaryDAO.findBinariesByReleaseNameProcStatGroupIdFromName(releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);

        long relParts = 0;
        for (Binary binary : binariesForSize) {
            if (ValidatorUtil.isNotNull(binary.getCategoryId()) && regexAppliedCategoryId == 0) {
                regexAppliedCategoryId = binary.getCategoryId();
            }

            if (ValidatorUtil.isNotNull(binary.getRegexId()) && regexIdUsed == 0) {
                regexIdUsed = binary.getRegexId();
            }

            if (ValidatorUtil.isNotNull(binary.getReqId()) && reqIdUsed == 0) {
                reqIdUsed = binary.getReqId();
            }

            relTotalParts += binary.getTotalParts();
            relParts += partDAO.countPartsByBinaryId(binary.getId());
            totalSize += partDAO.sumPartsSizeByBinaryId(binary.getId());
        }
        relCompletion = ((float) relParts / (float) relTotalParts) * 100f;

        //
        // Insert the release
        //

        String releaseGuid = UUID.randomUUID().toString();
        int categoryId;
        Category category = null;
        Long regexId;
        Integer reqId;
        if (regexAppliedCategoryId == 0) {
            categoryId = categoryService.determineCategory(groupId, releaseName);
        } else {
            categoryId = regexAppliedCategoryId;
        }
        if (categoryId > 0) {
            category = categoryService.getCategory(categoryId);
        }

        if (regexIdUsed == 0) {
            regexId = null;
        } else {
            regexId = regexIdUsed;
        }

        if (reqIdUsed == 0) {
            reqId = null;
        } else {
            reqId = reqIdUsed;
        }

        //Clean release name of '#', '@', '$', '%', '^', '', '', '', ''
        String cleanReleaseName = releaseName.replaceAll("[^A-Za-z0-9-_\\ \\.]+", "");
        Release release = new Release();
        release.setName(cleanReleaseName);
        release.setSearchName(cleanReleaseName);
        release.setTotalpart(numParts);
        release.setGroupId(groupId);
        release.setAddDate(new Date());
        release.setGuid(releaseGuid);
        release.setCategory(category);
        release.setRegexId(regexId);
        release.setRageId((long) -1);
        release.setPostDate(addedDate.toDate());
        release.setFromName(fromName);
        release.setSize(totalSize);
        release.setReqId(reqId);
        release.setPasswordStatus(site.getCheckPasswordedRar() == 1 ? -1 : 0); // magic constants
        release.setCompletion(relCompletion);
        releaseDAO.updateRelease(release);
        long releaseId = release.getId();
        _log.info("Added release " + cleanReleaseName);

        //
        // Tag every binary for this release with its parent release id
        // remove the release name from the binary as its no longer required
        //
        binaryDAO.updateBinaryNameStatusReleaseID("", Defaults.PROCSTAT_RELEASED, releaseId, releaseName,
                Defaults.PROCSTAT_READYTORELEASE, groupId, fromName);

        //
        // Find an .nfo in the release
        //
        ReleaseNfo releaseNfo = nfo.determineReleaseNfo(release);
        if (releaseNfo != null) {
            nfo.addReleaseNfo(releaseNfo);
            nfoCount++;
        }

        //
        // Write the nzb to disk
        //
        nzb.writeNZBforReleaseId(release, nzbBaseDir, true);

        if (retcount % 5 == 0) {
            _log.info("-processed " + retcount + " releases stage three");
        }

    }

    _log.info("Found " + nfoCount + " nfos in " + retcount + " releases");

    //
    // Process nfo files
    //
    if (site.getLookupNfo() != 1) {
        _log.info("Site config (site.lookupnfo) prevented retrieving nfos");
    } else {
        nfo.processNfoFiles(site.getLookupImdb(), site.getLookupTvRage());
    }

    //
    // Lookup imdb if enabled
    //
    if (site.getLookupImdb() == 1) {
        movieService.processMovieReleases();
    }

    //
    // Lookup music if enabled
    //
    if (site.getLookupMusic() == 1) {
        musicService.processMusicReleases();
    }

    //
    // Lookup games if enabled
    //
    if (site.getLookupGames() == 1) {
        gameService.processConsoleReleases();
    }

    //
    // Check for passworded releases
    //
    if (site.getCheckPasswordedRar() != 1) {
        _log.info("Site config (site.checkpasswordedrar) prevented checking releases are passworded");
    } else {
        processPasswordedReleases(true);
    }

    //
    // Process all TV related releases which will assign their series/episode/rage data
    //
    tvRageService.processTvReleases(site.getLookupTvRage() == 1);

    //
    // Get the current datetime again, as using now() in the housekeeping queries prevents the index being used.
    //
    DateTime now = new DateTime();

    //
    // Tidy away any binaries which have been attempted to be grouped into
    // a release more than x times (SM: or is it days?)
    //
    int attemtpGroupBinDays = site.getAttemtpGroupBinDays();
    _log.info(String.format("Tidying away binaries which cant be grouped after %s days", attemtpGroupBinDays));

    DateTime maxGroupBinDays = now.minusDays(attemtpGroupBinDays);
    binaryDAO.updateProcStatByProcStatAndDate(Defaults.PROCSTAT_WRONGPARTS, Defaults.PROCSTAT_NEW,
            maxGroupBinDays.toDate());

    //
    // Delete any parts and binaries which are older than the site's retention days
    //
    int maxRetentionDays = site.getRawRetentionDays();
    DateTime maxRetentionDate = now.minusDays(maxRetentionDays);
    _log.info(String.format("Deleting parts which are older than %d days", maxRetentionDays));
    partDAO.deletePartByDate(maxRetentionDate.toDate());

    _log.info(String.format("Deleting binaries which are older than %d days", maxRetentionDays));
    binaryDAO.deleteBinaryByDate(maxRetentionDate.toDate());

    //
    // Delete any releases which are older than site's release retention days
    //
    int releaseretentiondays = site.getReleaseRetentionDays();
    if (releaseretentiondays != 0) {
        _log.info("Determining any releases past retention to be deleted.");

        DateTime maxReleaseRetentionDate = DateTime.now().minusDays(releaseretentiondays);
        List<Release> releasesToDelete = releaseDAO.findReleasesBeforeDate(maxReleaseRetentionDate.toDate());
        for (Iterator<Release> iterator = releasesToDelete.iterator(); iterator.hasNext();) {
            Release release = iterator.next();
            releaseDAO.deleteRelease(release);
        }
    }
    transaction.flush(); // may be unneeded
    transactionManager.commit(transaction);

    _log.info(String.format("Processed %d releases", retcount));
    if (!transaction.isCompleted()) {
        throw new IllegalStateException("Transaction is not completed or rolled back.");
    }
    //return retcount;
}

From source file:org.eclipse.january.dataset.AbstractDatasetTest.java

/**
 * Test equals and hashCode//from   ww  w .j a  va2 s. co m
 */
@SuppressWarnings("deprecation")
@Test
public void testEquals() {
    Dataset a, b, c, d, e;
    a = DatasetFactory.createRange(20, Dataset.FLOAT64);
    b = DatasetFactory.createRange(20, Dataset.FLOAT64);
    c = a.clone();
    d = Maths.add(a, 0.5);
    e = DatasetFactory.createRange(20, Dataset.FLOAT32);

    assertTrue(a.equals(b));
    assertFalse(a == b);
    assertTrue(a.equals(c));
    assertFalse(a == c);
    assertFalse(a.equals(d));
    assertFalse(a.equals(e));
    HashSet<Dataset> set = new HashSet<Dataset>();
    assertTrue(set.add(a));
    assertTrue(set.contains(a));
    assertTrue(set.contains(b));
    assertTrue(set.contains(c));
    assertFalse(set.contains(d));
    assertFalse(set.contains(e));
    assertFalse(set.add(b)); // b is same as a so do nothing
    assertEquals(1, set.size());
    assertTrue(set.add(d));
    assertTrue(set.add(e));
    assertEquals(3, set.size());
    assertTrue(set.contains(d));
    assertTrue(set.contains(e));
    assertTrue(set.contains(Maths.subtract(d, 0.5)));
    assertFalse(set.contains(Maths.subtract(d, 0.5001)));
    assertTrue(set.contains(e.cast(Dataset.FLOAT64)));
    assertTrue(set.contains(b.cast(Dataset.FLOAT32)));
}

From source file:it.iit.genomics.cru.igb.bundles.mi.business.MIWorker.java

@Override
protected void done() {

    boolean failed = false;

    try {/*  w ww .j  ava2 s.  co m*/
        results.addAll(get());
    } catch (InterruptedException | ExecutionException ignore) {
        igbLogger.getLogger().error("Fail to analyze the selected regions", ignore);
        failed = true;
    }

    HashSet<String> querySummaryParts = new HashSet<>();
    HashSet<String> queryChromosomes = new HashSet<>();

    for (MISymContainer container : symManager.getQueryContainers()) {
        String chr = container.getChromosomeName();

        String protein = "<b>" + container.getEntry().getGeneName() + "</b>/"
                + container.getEntry().getUniprotAc();

        querySummaryParts.add(protein);
        queryChromosomes.add(chr.replace("chr", ""));

    }

    String querySummary = "Query: type=" + query.getQueryType() + ", "
            + PsicquicUtils.getInstance().getServerName(query.getPsiquicServer());

    if (query.searchEPPIC()) {
        querySummary += " EPPIC ";
    }

    if (query.searchPDB()) {
        querySummary += " PDB ";
    }

    if (query.searchInteractome3D()) {
        querySummary += " Interactome3D ";
    }
    if (query.searchDSysMap()) {
        querySummary += " DSysMap ";
    }

    if (query.searchPPI()) {
        querySummary += ", protein-protein";
    }

    if (query.searchNucleicAcid()) {
        querySummary += ", DNA/RNA";
    }

    if (query.searchLigands()) {
        querySummary += ", small molecules";
    }

    querySummary += "<br/>";

    if (querySummaryParts.size() <= 5) {
        querySummary += StringUtils.join(querySummaryParts, ", ");
    } else {
        querySummary += querySummaryParts.size() + " genes on chromosome(s) "
                + StringUtils.join(queryChromosomes, ",");
    }

    if (failed) {
        querySummary += " <b><font color=\"red\">The query failed, please check the log for more informations.</font></b>";
    } else if (igbLogger.hasError()) {
        querySummary += " <font color=\"red\">Some errors happend, please check the log for more informations.</font>";
    }

    addResultTab(querySummary, results, query.getLabel(), query);

    MIView.getInstance().getMiConfigurationPanel().updateCacheLabel();

    createTrack();
    setProgress(100);
    logAndPublish("done");

    igbLogger.info("Query over.");
}

From source file:com.datatorrent.stram.engine.StreamingContainer.java

public synchronized void activate(final Map<Integer, OperatorDeployInfo> nodeMap,
        Map<String, ComponentContextPair<Stream, StreamContext>> newStreams) {
    for (ComponentContextPair<Stream, StreamContext> pair : newStreams.values()) {
        if (!(pair.component instanceof BufferServerSubscriber)) {
            activeStreams.put(pair.component, pair.context);
            pair.component.activate(pair.context);
            eventBus.publish(new StreamActivationEvent(pair));
        }//  www .  j a va2 s.  c  om
    }

    final CountDownLatch signal = new CountDownLatch(nodeMap.size());
    for (final OperatorDeployInfo ndi : nodeMap.values()) {
        /*
         * OiO nodes get activated with their primary nodes.
         */
        if (ndi.type == OperatorType.OIO) {
            continue;
        }

        final Node<?> node = nodes.get(ndi.id);
        final String name = new StringBuilder(Integer.toString(ndi.id)).append('/').append(ndi.name).append(':')
                .append(node.getOperator().getClass().getSimpleName()).toString();
        final Thread thread = new Thread(name) {
            @Override
            public void run() {
                HashSet<OperatorDeployInfo> setOperators = new HashSet<OperatorDeployInfo>();
                OperatorDeployInfo currentdi = ndi;
                try {
                    /* primary operator initialization */
                    setupNode(currentdi);
                    setOperators.add(currentdi);

                    /* lets go for OiO operator initialization */
                    List<Integer> oioNodeIdList = oioGroups.get(ndi.id);
                    if (oioNodeIdList != null) {
                        for (Integer oioNodeId : oioNodeIdList) {
                            currentdi = nodeMap.get(oioNodeId);
                            setupNode(currentdi);
                            setOperators.add(currentdi);
                        }
                    }

                    currentdi = null;

                    for (int i = setOperators.size(); i-- > 0;) {
                        signal.countDown();
                    }

                    node.run(); /* this is a blocking call */
                } catch (Error error) {
                    int[] operators;
                    if (currentdi == null) {
                        logger.error("Voluntary container termination due to an error in operator set {}.",
                                setOperators, error);
                        operators = new int[setOperators.size()];
                        int i = 0;
                        for (Iterator<OperatorDeployInfo> it = setOperators.iterator(); it.hasNext(); i++) {
                            operators[i] = it.next().id;
                        }
                    } else {
                        logger.error("Voluntary container termination due to an error in operator {}.",
                                currentdi, error);
                        operators = new int[] { currentdi.id };
                    }
                    umbilical.reportError(containerId, operators,
                            "Voluntary container termination due to an error. "
                                    + ExceptionUtils.getStackTrace(error));
                    System.exit(1);
                } catch (Exception ex) {
                    if (currentdi == null) {
                        failedNodes.add(ndi.id);
                        logger.error("Operator set {} stopped running due to an exception.", setOperators, ex);
                        int[] operators = new int[] { ndi.id };
                        umbilical.reportError(containerId, operators,
                                "Stopped running due to an exception. " + ExceptionUtils.getStackTrace(ex));
                    } else {
                        failedNodes.add(currentdi.id);
                        logger.error("Abandoning deployment of operator {} due to setup failure.", currentdi,
                                ex);
                        int[] operators = new int[] { currentdi.id };
                        umbilical.reportError(containerId, operators,
                                "Abandoning deployment due to setup failure. "
                                        + ExceptionUtils.getStackTrace(ex));
                    }
                } finally {
                    if (setOperators.contains(ndi)) {
                        try {
                            teardownNode(ndi);
                        } catch (Exception ex) {
                            failedNodes.add(ndi.id);
                            logger.error("Shutdown of operator {} failed due to an exception.", ndi, ex);
                        }
                    } else {
                        signal.countDown();
                    }

                    List<Integer> oioNodeIdList = oioGroups.get(ndi.id);
                    if (oioNodeIdList != null) {
                        for (Integer oioNodeId : oioNodeIdList) {
                            OperatorDeployInfo oiodi = nodeMap.get(oioNodeId);
                            if (setOperators.contains(oiodi)) {
                                try {
                                    teardownNode(oiodi);
                                } catch (Exception ex) {
                                    failedNodes.add(oiodi.id);
                                    logger.error("Shutdown of operator {} failed due to an exception.", oiodi,
                                            ex);
                                }
                            } else {
                                signal.countDown();
                            }
                        }
                    }
                }
            }
        };
        node.context.setThread(thread);
        thread.start();
    }

    /**
     * we need to make sure that before any of the operators gets the first message, it's activated.
     */
    try {
        signal.await();
    } catch (InterruptedException ex) {
        logger.debug("Activation of operators interrupted.", ex);
    }

    for (ComponentContextPair<Stream, StreamContext> pair : newStreams.values()) {
        if (pair.component instanceof BufferServerSubscriber) {
            activeStreams.put(pair.component, pair.context);
            pair.component.activate(pair.context);
            eventBus.publish(new StreamActivationEvent(pair));
        }
    }

    for (WindowGenerator wg : generators.values()) {
        if (!activeGenerators.containsKey(wg)) {
            activeGenerators.put(wg, generators);
            wg.activate(null);
        }
    }
}

From source file:org.getobjects.ofs.OFSFileContainerChildInfo.java

protected Exception load() {
    // IMPORTANT: object must be threadsafe after the load! Its cached in a
    //            global map
    if (this.fileNames != null)
        return null; /* already loaded */

    /* load subfiles */

    this.timestamp = this.fileInfo.lastModified();
    this.fileNames = this.fileManager.childNamesAtPath(this.fileInfo.getPath());

    if (this.fileNames == null) {
        log().warn("directory returned no files: " + this);
        return new GoInternalErrorException("could not list directory: " + this.fileInfo.getName());
    }/*from   ww  w . ja  va2 s .c  om*/

    /* check if its empty */

    if (this.fileNames.length == 0) {
        this.fileIds = emptyStringArray;
        this.ids = this.fileIds;
        return null;
    }

    /* extract file information */

    final HashSet<String> idUniquer = new HashSet<String>(this.fileNames.length);
    this.fileIds = new String[this.fileNames.length];
    this.fileTypes = new String[this.fileNames.length];

    int removeCount = 0;
    for (int i = (this.fileNames.length - 1); i >= 0; i--) {
        String fn = this.fileNames[i];
        int dotIdx = fn != null ? fn.lastIndexOf('.') : -1;

        if (!this.accept(fn)) {
            this.fileNames[i] = null;
            removeCount += 1;
            continue;
        }

        if (dotIdx == -1) /* not recommended, file has no extension (README) */
            this.fileIds[i] = fn;
        else {
            this.fileIds[i] = fn.substring(0, dotIdx);
            this.fileTypes[i] = fn.substring(dotIdx + 1);
        }

        if (this.fileIds[i] != null && !(fn.startsWith(this.fileIds[i]))) {
            System.err.println("map: " + fn);
            System.err.println(" to: " + this.fileIds[i]);
        }

        if (this.fileIds[i] != null)
            idUniquer.add(this.fileIds[i]);
    }

    if (removeCount > 0) {
        int len = this.fileNames.length - removeCount;
        if (len == 0) {
            this.fileNames = emptyStringArray;
            this.fileIds = this.fileNames;
            this.ids = this.fileIds;
            this.fileTypes = this.ids;
            return null;
        }
        String[] censoredFileNames = new String[len];
        String[] censoredFileIds = new String[len];
        String[] censoredFileTypes = new String[len];
        int dstIdx = 0;
        for (int i = 0; i < this.fileNames.length; i++) {
            if (this.fileNames[i] != null) {
                censoredFileNames[dstIdx] = this.fileNames[i];
                censoredFileIds[dstIdx] = this.fileIds[i];
                censoredFileTypes[dstIdx] = this.fileTypes[i];
                dstIdx++;
            }
        }
        this.fileNames = censoredFileNames;
        this.fileIds = censoredFileIds;
        this.fileTypes = censoredFileTypes;
    }

    /* check whether all files where unique and included */

    if (this.fileNames.length == idUniquer.size()) {
        /* all IDs were unique */
        this.ids = this.fileIds;
    } else {
        /* we found DUPs */
        this.ids = idUniquer.toArray(emptyStringArray);
    }
    if (this.ids != null) {
        if (this.ids == this.fileIds) {
            /* Note: if we don't do this, both array will be sorted, because they
             *       share the same pointer ...
             */
            this.ids = new String[this.fileIds.length];
            System.arraycopy(this.fileIds, 0, this.ids, 0, this.fileIds.length);
        }
        Arrays.sort(this.ids);
    }

    /* debug */
    if (false) {
        for (int j = 0; j < this.fileNames.length; j++) {
            System.err.println("  id: " + this.fileIds[j]);
            System.err.println("  =>: " + this.fileNames[j]);
        }
    }
    return null; /* everything is awesome */
}