Example usage for java.util TreeSet size

List of usage examples for java.util TreeSet size

Introduction

In this page you can find the example usage for java.util TreeSet size.

Prototype

public int size() 

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.unitime.timetable.solver.exam.ExamSolver.java

public ExamSuggestionsInfo getSuggestions(long examId, ExamProposedChange change, String filter, int depth,
        int limit, long timeOut) {
    Lock lock = currentSolution().getLock().writeLock();
    lock.lock();/* www.  jav  a 2  s . c o m*/
    try {
        Exam exam = getExam(examId);
        if (exam == null)
            return null;
        ExamSuggestions s = new ExamSuggestions(this);
        s.setDepth(depth);
        s.setFilter(filter);
        s.setLimit(limit);
        s.setTimeOut(timeOut);
        TreeSet<ExamProposedChange> suggestions = s.computeSuggestions(exam,
                (change == null ? null : change.getAssignments()));
        String message = null;
        if (s.wasTimeoutReached()) {
            message = "(" + (timeOut / 1000l) + "s timeout reached, " + s.getNrCombinationsConsidered()
                    + " possibilities up to " + depth + " changes were considered, ";
        } else {
            message = "(all " + s.getNrCombinationsConsidered() + " possibilities up to " + depth
                    + " changes were considered, ";
        }
        if (suggestions.isEmpty()) {
            message += "no suggestion found)";
        } else if (s.getNrSolutions() > suggestions.size()) {
            message += "top " + suggestions.size() + " of " + s.getNrSolutions() + " suggestions displayed)";
        } else {
            message += suggestions.size() + " suggestions displayed)";
        }
        return new ExamSuggestionsInfo(suggestions, message, s.wasTimeoutReached());
    } finally {
        lock.unlock();
    }
}

From source file:com.tasktop.c2c.server.scm.service.GitServiceBean.java

private List<RevCommit> getAllCommits(Repository repository, Region region, Set<ObjectId> visited) {
    TreeSet<RevCommit> result = new TreeSet<RevCommit>(new Comparator<RevCommit>() {

        @Override//  ww  w .j  a v a 2s  . c  o  m
        public int compare(RevCommit o1, RevCommit o2) {
            int ctDiff = o2.getCommitTime() - o1.getCommitTime();
            if (ctDiff != 0) {
                return ctDiff;
            }
            return o1.getId().compareTo(o2.getId());
        }
    });

    int maxResultsToConsider = -1;
    if (region != null) {
        maxResultsToConsider = region.getOffset() + region.getSize();
    }
    long minTime = -1;

    try {

        for (Ref ref : getRefsToAdd(repository)) {
            RevWalk revWal = new RevWalk(repository);
            revWal.markStart(revWal.parseCommit(ref.getObjectId()));

            int index = 0;
            for (RevCommit revCommit : revWal) {
                if (region == null
                        || (index >= region.getOffset() && index < region.getOffset() + region.getSize())) {
                    if (minTime > 0 && revCommit.getCommitTime() < minTime) {
                        break;
                    }
                    if (visited.add(revCommit.getId())) {
                        result.add(revCommit);

                        if (maxResultsToConsider > 0 && result.size() > maxResultsToConsider) {
                            RevCommit last = result.last();
                            result.remove(last);
                            minTime = last.getCommitTime();
                        }
                    } else {
                        break; // Done with this branch
                    }
                }
                index++;
                if (region != null && (index >= region.getOffset() + region.getSize())) {
                    break;
                }

            }

        }

    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    return new ArrayList<RevCommit>(result);
}

From source file:de.julielab.jcore.ae.lingpipegazetteer.chunking.ChunkerProviderImpl.java

private void readDictionary(InputStream dictFile) throws IOException, AnalysisEngineProcessException {
    long time = System.currentTimeMillis();
    if (useApproximateMatching) {
        dict = new TrieDictionary<String>();
    } else {/*w  w  w  .j  ava  2 s . co  m*/
        dict = new MapDictionary<String>();
    }
    // now read from file and add entries
    LOGGER.info("readDictionary() - adding entries from " + dictFile + " to dictionary...");
    try (InputStreamReader isr = new InputStreamReader(dictFile)) {
        BufferedReader bf = new BufferedReader(isr);
        String line = "";
        String variant = "";
        TreeSet<String> termVariants;
        TreeSet<String> dictionary = new TreeSet<String>();

        while ((line = bf.readLine()) != null) {
            String[] values = line.split("\t");
            if (values.length != 2) {
                LOGGER.error("readDictionary() - wrong format of line: " + line);
                throw new AnalysisEngineProcessException(AnalysisEngineProcessException.ANNOTATOR_EXCEPTION,
                        null);
            }

            String term = values[0].trim();
            String label = values[1].trim();
            if (term.length() < MIN_TERM_LENGTH)
                continue;

            if (useApproximateMatching && !caseSensitive)
                term = term.toLowerCase();

            if (generateVariants) {
                LOGGER.debug("readDictionary() - make term variants of (" + term + ", " + label
                        + ") and add them to dictionary (NOTE: this may take a while if dictionary is big!)");
                termVariants = makeTermVariants(term);
                Iterator<String> it = termVariants.iterator();
                while (it.hasNext()) {
                    variant = it.next();
                    if (!stopWords.contains(variant.toLowerCase()) && !variant.equals("")) {
                        // System.err.println("ADDING VARIANT: " + variant + "="
                        // + label);
                        dictionary.add(variant + SEPARATOR + label);
                    }
                    // dict.addEntry(new DictionaryEntry(it.next(), label,
                    // CHUNK_SCORE));
                }
                it = null;
            } else {
                if (!stopWords.contains(term.toLowerCase()))
                    dictionary.add(term + SEPARATOR + label);
                // dict.addEntry(new DictionaryEntry(term, label, CHUNK_SCORE));
            }

            if (dictionary.size() >= 10000) {
                LOGGER.debug("readDictionary() - flushing dictionarySet to map dictionary");
                dictionary = flushDictionary(dictionary, dict);
            }

        }

        dictionary = flushDictionary(dictionary, dict);
        dictionary = null;
        time = System.currentTimeMillis() - time;
        LOGGER.info("Reading dictionary took {}ms ({}s)", time, time / 1000);
    }
}

From source file:plugin.lsttokens.DefineStatLst.java

@Override
public String[] unparse(LoadContext context, CDOMObject obj) {
    Changes<StatLock> lockChanges = context.getObjectContext().getListChanges(obj, ListKey.STAT_LOCKS);
    Changes<CDOMSingleRef<PCStat>> ulchanges = context.getObjectContext().getListChanges(obj,
            ListKey.UNLOCKED_STATS);/*from   w  w  w  .ja  v  a 2  s . c  o  m*/
    Changes<CDOMSingleRef<PCStat>> nonStatChanges = context.getObjectContext().getListChanges(obj,
            ListKey.NONSTAT_STATS);
    Changes<CDOMSingleRef<PCStat>> nonStatToStatChanges = context.getObjectContext().getListChanges(obj,
            ListKey.NONSTAT_TO_STAT_STATS);
    Changes<StatLock> minValueChanges = context.getObjectContext().getListChanges(obj, ListKey.STAT_MINVALUE);
    Changes<StatLock> maxValueChanges = context.getObjectContext().getListChanges(obj, ListKey.STAT_MAXVALUE);
    TreeSet<String> set = new TreeSet<>();
    if (lockChanges != null && !lockChanges.isEmpty()) {
        if (lockChanges.includesGlobalClear()) {
            context.addWriteMessage("DEFINE:LOCK does not support .CLEAR");
            return null;
        }
        if (lockChanges.hasAddedItems()) {
            for (StatLock sl : lockChanges.getAdded()) {
                set.add("LOCK|" + sl.getLSTformat() + Constants.PIPE + sl.getLockValue());
            }
        }
    }
    if (ulchanges != null && !ulchanges.isEmpty()) {
        if (ulchanges.includesGlobalClear()) {
            context.addWriteMessage("DEFINE:UNLOCK " + "does not support .CLEAR");
            return null;
        }
        if (ulchanges.hasAddedItems()) {
            for (CDOMSingleRef<PCStat> st : ulchanges.getAdded()) {
                set.add("UNLOCK|" + st.getLSTformat(false));
            }
        }
    }
    if (nonStatChanges != null && !nonStatChanges.isEmpty()) {
        if (nonStatChanges.hasAddedItems()) {
            for (CDOMSingleRef<PCStat> st : nonStatChanges.getAdded()) {
                set.add("NONSTAT|" + st.getLSTformat(false));
            }
        }
    }
    if (nonStatToStatChanges != null && !nonStatToStatChanges.isEmpty()) {
        if (nonStatToStatChanges.hasAddedItems()) {
            for (CDOMSingleRef<PCStat> st : nonStatToStatChanges.getAdded()) {
                set.add("STAT|" + st.getLSTformat(false));
            }
        }
    }
    if (minValueChanges != null && !minValueChanges.isEmpty()) {
        if (minValueChanges.hasAddedItems()) {
            for (StatLock sl : minValueChanges.getAdded()) {
                set.add("MINVALUE|" + sl.getLSTformat() + Constants.PIPE + sl.getLockValue());
            }
        }
    }
    if (maxValueChanges != null && !maxValueChanges.isEmpty()) {
        if (maxValueChanges.hasAddedItems()) {
            for (StatLock sl : maxValueChanges.getAdded()) {
                set.add("MAXVALUE|" + sl.getLSTformat() + Constants.PIPE + sl.getLockValue());
            }
        }
    }
    if (set.isEmpty()) {
        return null;
    }
    return set.toArray(new String[set.size()]);
}

From source file:org.jahia.services.search.facets.SimpleJahiaJcrFacets.java

/**
 * Use the Lucene FieldCache to get counts for each unique field value in <code>docs</code>. The field must have at most one indexed
 * token per document.//ww  w . j av a2s  .co  m
 */
public NamedList<Object> getFieldCacheCounts(IndexSearcher searcher, OpenBitSet docs, String fieldName,
        int offset, int limit, int mincount, boolean missing, String sort, String prefix, String locale,
        ExtendedPropertyDefinition epd) throws IOException {
    // TODO: If the number of terms is high compared to docs.size(), and zeros==false,
    // we should use an alternate strategy to avoid
    // 1) creating another huge int[] for the counts
    // 2) looping over that huge int[] looking for the rare non-zeros.
    //
    // Yet another variation: if docs.size() is small and termvectors are stored,
    // then use them instead of the FieldCache.
    //

    // TODO: this function is too big and could use some refactoring, but
    // we also need a facet cache, and refactoring of SimpleFacets instead of
    // trying to pass all the various params around.
    FieldType ft = getType(epd);
    NamedList<Object> res = new NamedList<Object>();

    FieldCache.StringIndex si = FieldCache.DEFAULT.getStringIndex(searcher.getIndexReader(), fieldName);
    final String[] terms = si.lookup;
    final int[] termNum = si.order;

    if (prefix != null && prefix.length() == 0)
        prefix = null;

    int startTermIndex, endTermIndex;
    if (prefix != null) {
        startTermIndex = Arrays.binarySearch(terms, prefix, nullStrComparator);
        if (startTermIndex < 0)
            startTermIndex = -startTermIndex - 1;
        // find the end term. \uffff isn't a legal unicode char, but only compareTo
        // is used, so it should be fine, and is guaranteed to be bigger than legal chars.
        endTermIndex = Arrays.binarySearch(terms, prefix + "\uffff\uffff\uffff\uffff", nullStrComparator);
        endTermIndex = -endTermIndex - 1;
    } else {
        startTermIndex = 1;
        endTermIndex = terms.length;
    }

    final int nTerms = endTermIndex - startTermIndex;

    if (nTerms > 0 && docs.size() >= mincount) {

        // count collection array only needs to be as big as the number of terms we are
        // going to collect counts for.
        final int[] counts = new int[nTerms];

        DocIdSetIterator iter = docs.iterator();
        while (iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
            int term = termNum[iter.docID()];
            int arrIdx = term - startTermIndex;
            if (arrIdx >= 0 && arrIdx < nTerms)
                counts[arrIdx]++;
        }

        // IDEA: we could also maintain a count of "other"... everything that fell outside
        // of the top 'N'

        int off = offset;
        int lim = limit >= 0 ? limit : Integer.MAX_VALUE;

        if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
            int maxsize = limit > 0 ? offset + limit : Integer.MAX_VALUE - 1;
            maxsize = Math.min(maxsize, nTerms);
            final TreeSet<SimpleFacets.CountPair<String, Integer>> queue = new TreeSet<SimpleFacets.CountPair<String, Integer>>();
            int min = mincount - 1; // the smallest value in the top 'N' values
            for (int i = 0; i < nTerms; i++) {
                int c = counts[i];
                if (c > min) {
                    // NOTE: we use c>min rather than c>=min as an optimization because we are going in
                    // index order, so we already know that the keys are ordered. This can be very
                    // important if a lot of the counts are repeated (like zero counts would be).
                    queue.add(new SimpleFacets.CountPair<String, Integer>(terms[startTermIndex + i], c));
                    if (queue.size() >= maxsize) {
                        break;
                    }
                }
            }
            // now select the right page from the results
            for (SimpleFacets.CountPair<String, Integer> p : queue) {
                if (--off >= 0)
                    continue;
                if (--lim < 0)
                    break;
                res.add(ft.indexedToReadable(p.key), p.val);
            }
        } else {
            // add results in index order
            int i = 0;
            if (mincount <= 0) {
                // if mincount<=0, then we won't discard any terms and we know exactly
                // where to start.
                i = off;
                off = 0;
            }

            for (; i < nTerms; i++) {
                int c = counts[i];
                if (c < mincount || --off >= 0)
                    continue;
                if (--lim < 0)
                    break;
                res.add(ft.indexedToReadable(terms[startTermIndex + i]), c);
            }
        }
    }

    if (missing) {
        res.add(null, getFieldMissingCount(searcher, docs, fieldName, locale));
    }

    return res;
}

From source file:com.digipom.manteresting.android.processor.json.NailsJsonProcessor.java

@Override
public ArrayList<ContentProviderOperation> parse(JSONObject response, Meta meta) throws JSONException {
    final ArrayList<ContentProviderOperation> batch = Lists.newArrayList();
    final TreeSet<Integer> nailIds = new TreeSet<Integer>();
    final Cursor nails = resolver.query(ManterestingContract.Nails.CONTENT_URI, new String[] { Nails.NAIL_ID },
            null, null, Nails.NAIL_ID + " DESC");
    int greatestOfExisting = Integer.MIN_VALUE;

    if (nails != null && !nails.isClosed()) {
        try {/*from www. ja v a  2s .c om*/
            nails.moveToFirst();

            final int idColumn = nails.getColumnIndex(Nails.NAIL_ID);

            while (!nails.isAfterLast()) {
                final int nailId = nails.getInt(idColumn);
                nailIds.add(nailId);
                greatestOfExisting = nailId > greatestOfExisting ? nailId : greatestOfExisting;
                nails.moveToNext();
            }
        } finally {
            if (nails != null) {
                nails.close();
            }
        }
    }

    final JSONArray objects = response.getJSONArray("objects");
    int smallestOfNew = Integer.MAX_VALUE;

    for (int i = 0; i < objects.length(); i++) {
        final JSONObject nailObject = objects.getJSONObject(i);

        final boolean isPrivate = nailObject.getJSONObject("workbench").getBoolean("private");

        if (!isPrivate) {
            final ContentProviderOperation.Builder builder = ContentProviderOperation
                    .newInsert(Nails.CONTENT_URI);
            final int nailId = nailObject.getInt("id");
            smallestOfNew = nailId < smallestOfNew ? nailId : smallestOfNew;

            builder.withValue(Nails.NAIL_ID, nailId);
            builder.withValue(Nails.NAIL_JSON, nailObject.toString());

            batch.add(builder.build());
            nailIds.add(nailId);
        }
    }

    // If more than LIMIT were fetched, and this was the initial fetch, then
    // we flush everything in the DB before adding the new nails (as
    // otherwise we would introduce a gap).
    if (meta.nextOffset == meta.nextLimit // For initial fetch
            && smallestOfNew > greatestOfExisting) {
        if (LoggerConfig.canLog(Log.DEBUG)) {
            Log.d(TAG, "Flushing all existing nails on initial fetch, so as to avoid a gap.");
        }

        resolver.delete(Nails.CONTENT_URI, null, null);
    } else {
        // If more than 500 nails, find the 500th biggest and delete those
        // after it.
        if (nailIds.size() > MAX_COUNT) {
            Iterator<Integer> it = nailIds.descendingIterator();

            for (int i = 0; i < MAX_COUNT; i++) {
                it.next();
            }

            final Integer toDelete = it.next();

            if (LoggerConfig.canLog(Log.DEBUG)) {
                Log.d(TAG, "deleting from nails where NAIL_ID is less than or equal to " + toDelete);
            }

            SelectionBuilder selectionBuilder = new SelectionBuilder();
            selectionBuilder.where(ManterestingContract.Nails.NAIL_ID + " <= ?",
                    new String[] { String.valueOf(toDelete) });
            resolver.delete(ManterestingContract.Nails.CONTENT_URI, selectionBuilder.getSelection(),
                    selectionBuilder.getSelectionArgs());
        }
    }

    return batch;
}

From source file:tokyo.northside.jrst.JRSTReader.java

/**
 * <p>//from w  w  w  .  j ava  2  s  .c om
 * Complexe Table
 * </p>
 *
 * <pre>
 * +------------------------+------------+---------------------+
 * | body row 3             | Cells may  | - Table cells       |
 * +------------------------+ span rows. | - contain           |
 * | body row 4             |            | - body elements.    |
 * +------------------------+------------+---------------------+
 * </pre>
 *
 * <p>
 * And simple table
 * </p>
 *
 * <pre>
 * =====  =====  ======
 *    Inputs     Output
 * ============  ======
 *   A      B    A or B
 * ------------  ------
 *   A      B    A or B
 * =====  =====  ======
 * </pre>
 *
 * @param item
 * @return Element
 *
 */
private Element composeTable(Element item) throws Exception {

    Element result = DocumentHelper.createElement(TABLE);

    int tableWidth = Integer.parseInt(item.attributeValue(JRSTLexer.TABLE_WIDTH));

    TreeSet<Integer> beginCellList = new TreeSet<Integer>();

    for (Element cell : (List<Element>) item.selectNodes(JRSTLexer.ROW + "/" + JRSTLexer.CELL)) {
        Integer begin = Integer.valueOf(cell.attributeValue(JRSTLexer.CELL_INDEX_START));
        beginCellList.add(begin);
    }

    int[] beginCell = new int[beginCellList.size() + 1]; // + 1 to put
    // table width
    // to simulate
    // new cell
    int[] lengthCell = new int[beginCellList.size()];

    int cellNumber = 0;
    for (int b : beginCellList) {
        beginCell[cellNumber] = b;
        if (cellNumber > 0) {
            lengthCell[cellNumber - 1] = beginCell[cellNumber] - beginCell[cellNumber - 1];
        }
        cellNumber++;
    }
    beginCell[cellNumber] = tableWidth;
    lengthCell[cellNumber - 1] = beginCell[cellNumber] - beginCell[cellNumber - 1];

    Element tgroup = result.addElement(TGROUP).addAttribute("cols", String.valueOf(cellNumber));
    for (int width : lengthCell) {
        tgroup.addElement(COLSPEC).addAttribute("colwidth", String.valueOf(width));
    }

    Element rowList = null;
    if (TRUE.equals(item.attributeValue(JRSTLexer.TABLE_HEADER))) {
        rowList = tgroup.addElement(THEAD);
    } else {
        rowList = tgroup.addElement(TBODY);
    }
    List<Element> rows = (List<Element>) item.selectNodes(JRSTLexer.ROW);
    for (int r = 0; r < rows.size(); r++) {
        Element row = rowList.addElement(ROW);
        List<Element> cells = (List<Element>) rows.get(r).selectNodes(JRSTLexer.CELL);
        for (int c = 0; c < cells.size(); c++) {
            Element cell = cells.get(c);
            // si la cellule a ete utilise pour un regroupement vertical on
            // la passe
            if (!TRUE.equals(cell.attributeValue("used"))) {
                Element entry = row.addElement(ENTRY);
                String text = "";

                // on regroupe les cellules verticalement
                int morerows = -1;
                Element tmpCell = null;
                String cellStart = cell.attributeValue(JRSTLexer.CELL_INDEX_START);
                do {
                    morerows++;
                    tmpCell = (Element) rows.get(r + morerows).selectSingleNode(
                            JRSTLexer.CELL + "[@" + JRSTLexer.CELL_INDEX_START + "=" + cellStart + "]");
                    text += tmpCell.getText();
                    // on marque la cellule comme utilise
                    tmpCell.addAttribute("used", TRUE);
                } while (!TRUE.equals(tmpCell.attributeValue(JRSTLexer.CELL_END)));

                if (morerows > 0) {
                    entry.addAttribute("morerows", String.valueOf(morerows));
                }

                // on compte le nombre de cellules regroupees
                // horizontalement
                int morecols = 0;
                tmpCell = cells.get(c + morecols);
                int cellEnd = Integer.parseInt(tmpCell.attributeValue(JRSTLexer.CELL_INDEX_END));
                while (cellEnd + 1 != beginCell[c + morecols + 1]) {
                    morecols++;
                    // tmpCell = cells.get(c + morecols);
                    // cellEnd =
                    // Integer.parseInt(tmpCell.attributeValue(JRSTLexer.
                    // CELL_INDEX_END));
                }
                if (morecols > 0) {
                    entry.addAttribute("morecols", String.valueOf(morecols));
                }
                // parse entry text in table
                Document doc = newJRSTReader(new StringReader(text));
                entry.appendContent(doc.getRootElement());
            }
        }
        if (TRUE.equals(rows.get(r).attributeValue(JRSTLexer.ROW_END_HEADER))) {
            rowList = tgroup.addElement(TBODY);
        }
    }

    return result;
}

From source file:org.odk.collect.android.logic.FormRelationsManager.java

/**
 * Deletes current instance or children, as necessary.
 *
 * Pre-condition: The FormRelationsManager object should have been
 * initialized by one of the getFormRelationsManager methods. Thus all
 * form relations information in the current instance is collected.
 * Furthermore, the current instance should already be saved to disk.
 *
 * Post-condition: If a relevant deleteForm is discovered, then the
 * current instance (and its children) are deleted from the
 * InstanceProvider and from the form relations database. If an irrelevant
 * saveForm is discovered and it is associated with a child, then that
 * child is deleted. In this second case, the parent form is unmodified.
 *
 * @return The number of forms deleted is returned.
 *///from  w  ww.j  av a  2 s.com
private int manageDeletions() {
    int nDeletions = 0;
    int deleteWhat = getWhatToDelete();
    if (deleteWhat == DELETE_THIS) {
        // PMA-Logging BEGIN
        mUseLog.log(UseLogContract.RELATION_SELF_DESTRUCT, mInstanceId, null, null);
        mUseLog.writeBackLogAndClose();
        //            try {
        //                // possible racing? writing for value differences, then writing for deletion
        //                long thisParentId = FormRelationsDb.getParent(mInstanceId);
        //                String thisParent = getInstancePath(getInstanceUriFromId(thisParentId));
        //                String repeatable = FormRelationsDb.getRepeatable(thisParentId, mInstanceId);
        //                int repeatIndex = FormRelationsDb.getRepeatIndex(thisParentId, mInstanceId);
        //                FormRelationsUseLog frul = new FormRelationsUseLog(thisParent);
        //                frul.log(UseLogContract.RELATION_SELF_DESTRUCT, repeatable, String.valueOf(repeatIndex));
        //                frul.writeBackLog(true);
        //                frul.close();
        //            } catch (FormRelationsException e) {
        //                Log.w(TAG, "Failed to log self-deletion", e);
        //            }
        // PMA-Logging END
        nDeletions = deleteInstance(mInstanceId);
    } else if (deleteWhat == DELETE_CHILD) {
        TreeSet<Integer> allRepeatIndices = new TreeSet<Integer>();
        for (TraverseData td : mNonRelevantSaveForm) {
            allRepeatIndices.add(td.repeatIndex);
        }
        TreeSet<Long> allWaywardChildren = new TreeSet<Long>();
        for (Integer i : allRepeatIndices) {
            Long childInstanceId = FormRelationsDb.getChild(mInstanceId, i);
            if (LOCAL_LOG) {
                Log.d(TAG, "ParentId(" + mInstanceId + ") + RepeatIndex(" + i + ") + ChildIdFound("
                        + childInstanceId + ")");
            }
            if (childInstanceId != -1) {
                allWaywardChildren.add(childInstanceId);
            }
        }
        for (Long childInstanceId : allWaywardChildren) {
            // PMA-Logging BEGIN
            // probably not good to keep track here. log file already being written to in formentry
            // // Get true mInstanceId, write rD to parent log.txt
            // PMA-Logging END
            deleteInstance(childInstanceId);
        }
        nDeletions = allWaywardChildren.size();
    }
    return nDeletions;
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

/** check cache via fingerprint - this call blocks and should not be used in an async context
 * /* w w w  .j  a  v  a 2 s  . c om*/
 * @param urlFingerprint
 * @return true if a document with matching fingerprint exists in the cache ... 
 */
public long checkCacheForFingerprint(long urlFingerprint, boolean returnDate) {

    synchronized (this) {
        for (CacheItem item : _writeQueue) {
            if (item.getUrlFingerprint() == urlFingerprint) {
                if (returnDate) {
                    long dateOut = dateFromCacheItem(item);
                    // if no date found, use current date as an approximate...
                    return (dateOut != 0) ? dateOut : System.currentTimeMillis();
                } else
                    return 1;
            }
        }
    }

    synchronized (this) {
        if (_fingerprintToLocalLogPos.get(urlFingerprint).size() != 0) {
            // assume recent date as an approximate 
            return System.currentTimeMillis();
        }
    }

    // now check hdfs indexes 
    ImmutableList<HDFSFileIndex> indexList = null;

    synchronized (CacheManager.this) {
        indexList = ImmutableList.copyOf(_hdfsIndexList);
    }

    long timeStart = System.currentTimeMillis();

    // first check local item cache ...
    TreeSet<Long> cachedItems = new TreeSet<Long>();

    for (HDFSFileIndex index : Lists.reverse(indexList)) {
        try {
            CacheItem itemFound = index.findItem(urlFingerprint, !returnDate);
            if (itemFound != null) {
                if (returnDate) {
                    // get item date from headers .
                    long itemDate = dateFromCacheItem(itemFound);
                    if (itemDate == 0) {
                        itemDate = index.getIndexTimestamp();
                        // if item date still 0, this is BAD !!!
                        if (itemDate == 0) {
                            LOG.error("!!!!!!UNABLE TO PARSE INDEX TIMESTAMP:" + index.getIndexDataPath());
                            itemDate = 1L;
                        }
                    }
                    // ok add it to the map ... 
                    cachedItems.add(itemDate);
                } else {
                    return 1;
                }
            }
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        }
    }
    if (returnDate && cachedItems.size() != 0) {
        return cachedItems.last();
    }
    return 0;
}

From source file:org.apache.hadoop.hbase.regionserver.HRegionServer.java

public String[] getRegionServerCoprocessors() {
    TreeSet<String> coprocessors = new TreeSet<String>(this.hlog.getCoprocessorHost().getCoprocessors());
    Collection<HRegion> regions = getOnlineRegionsLocalContext();
    for (HRegion region : regions) {
        coprocessors.addAll(region.getCoprocessorHost().getCoprocessors());
    }/*from ww  w. j a  v  a2 s .  c  o m*/
    return coprocessors.toArray(new String[coprocessors.size()]);
}