List of usage examples for java.util TreeSet size
public int size()
From source file:org.apache.roller.weblogger.ui.struts2.editor.CategoryRemove.java
/** * Display the remove template confirmation. *///from w w w . ja va2s . c o m public String execute() { // build list of categories for display TreeSet allCategories = new TreeSet(new WeblogCategoryPathComparator()); try { // Build list of all categories, except for current one, sorted by path. WeblogEntryManager wmgr = WebloggerFactory.getWeblogger().getWeblogEntryManager(); List<WeblogCategory> cats = wmgr.getWeblogCategories(getActionWeblog(), true); for (WeblogCategory cat : cats) { if (!cat.getId().equals(getRemoveId())) { allCategories.add(cat); } } } catch (WebloggerException ex) { log.error("Error building categories list", ex); // TODO: i18n addError("Error building categories list"); } if (allCategories.size() > 0) { setAllCategories(allCategories); } return INPUT; }
From source file:org.rhwlab.BHC.BHCTree.java
public TreeSet<NucleusLogNode> cutToExactlyN_Nodes(int n) { TreeSet<NucleusLogNode> cut = firstTreeCut(); while (cut.size() < n) { NucleusLogNode[] next = nextTreeCut(cut); if (next == null) return cut; cut.remove(next[2]);/* w w w .j a va 2 s. c o m*/ cut.add(next[0]); cut.add(next[1]); } return cut; }
From source file:org.rhwlab.BHC.BHCTree.java
public TreeMap<Integer, TreeSet<NucleusLogNode>> allTreeCuts(int maxNodes) { TreeMap<Integer, TreeSet<NucleusLogNode>> ret = new TreeMap<>(); TreeSet<NucleusLogNode> cut = firstTreeCut(); ret.put(cut.size(), cut); while (cut.size() < maxNodes) { NucleusLogNode[] next = nextTreeCut(cut); if (next == null) { return ret; }// ww w .j a va 2s. c o m TreeSet<NucleusLogNode> nextSet = new TreeSet<>(); nextSet.addAll(cut); nextSet.remove(next[2]); nextSet.add(next[0]); nextSet.add(next[1]); ret.put(nextSet.size(), nextSet); cut = nextSet; } return ret; }
From source file:org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.java
/** * Delete old OIV fsimages. Since the target dir is not a full blown * storage directory, we simply list and keep the latest ones. For the * same reason, no storage inspector is used. *//*from w w w . j a v a 2s . co m*/ void purgeOldLegacyOIVImages(String dir, long txid) { File oivImageDir = new File(dir); final String oivImagePrefix = NameNodeFile.IMAGE_LEGACY_OIV.getName(); String filesInStorage[]; // Get the listing filesInStorage = oivImageDir.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.matches(oivImagePrefix + "_(\\d+)"); } }); // Check whether there is any work to do. if (filesInStorage.length <= numCheckpointsToRetain) { return; } // Create a sorted list of txids from the file names. TreeSet<Long> sortedTxIds = new TreeSet<Long>(); for (String fName : filesInStorage) { // Extract the transaction id from the file name. long fTxId; try { fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1)); } catch (NumberFormatException nfe) { // This should not happen since we have already filtered it. // Log and continue. LOG.warn("Invalid file name. Skipping " + fName); continue; } sortedTxIds.add(Long.valueOf(fTxId)); } int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain; Iterator<Long> iter = sortedTxIds.iterator(); while (numFilesToDelete > 0 && iter.hasNext()) { long txIdVal = iter.next().longValue(); String fileName = NNStorage.getLegacyOIVImageFileName(txIdVal); LOG.info("Deleting " + fileName); File fileToDelete = new File(oivImageDir, fileName); if (!fileToDelete.delete()) { // deletion failed. LOG.warn("Failed to delete image file: " + fileToDelete); } numFilesToDelete--; } }
From source file:com.ephesoft.dcma.tabbed.TabbedPdfExporter.java
/** * This will create the document map on the basis of order and load the error pdf if the document is not present. * // w w w. j av a2 s .c om * @param parsedXMLFile {@link Batch} * @param batchInstanceID{@link String} * @param propertyFile{@link String} * @return documentPDFMap {@link LinkedHashMap<String, List<String>>} * @throws Exception */ private LinkedHashMap<String, List<String>> createBatchClassDocumentPDFMap(final Batch parsedXMLFile, String batchInstanceID, String propertyFile) throws Exception { String errorPDFPath = batchSchemaService.getAbsolutePath(parsedXMLFile.getBatchClassIdentifier(), batchSchemaService.getScriptConfigFolderName(), true); List<String> batchDocumentTypeNameList = new ArrayList<String>(); String batchClassId = parsedXMLFile.getBatchClassIdentifier(); BatchClass batchClass = batchClassService.getBatchClassByIdentifierIncludingDeleted(batchClassId); List<DocumentType> batchDocumentList = batchClass.getDocumentTypes(); for (DocumentType docType : batchDocumentList) { if (!docType.isHidden()) { batchDocumentTypeNameList.add(docType.getName()); } } List<String> sortedDocumentList = new ArrayList<String>(); Map<String, Integer> propMap = fetchDocNameMapping(propertyFile); List<String> mapKeys = new ArrayList<String>(propMap.keySet()); List<Integer> mapValues = new ArrayList<Integer>(propMap.values()); TreeSet<Integer> sortedSet = new TreeSet<Integer>(mapValues); if (sortedSet.size() != propMap.size()) { LOGGER.error("Same priority is defined for more than one document type. Invalid scenario."); throw new DCMAApplicationException("Property file for documents not valid"); } else { Object[] sortedArray = sortedSet.toArray(); int size = sortedArray.length; for (int i = 0; i < size; i++) { String documentType = (String) mapKeys.get(mapValues.indexOf(sortedArray[i])); for (int documentIndex = 0; documentIndex < batchDocumentTypeNameList.size(); documentIndex++) { if (documentType.equals(batchDocumentTypeNameList.get(documentIndex))) { sortedDocumentList.add(batchDocumentTypeNameList.get(documentIndex)); } } } List<Document> xmlDocuments = parsedXMLFile.getDocuments().getDocument(); // check if any document in batch xml is not present in export props then send the batch to error. checkIfAnyXmlDocIsNotInProps(sortedDocumentList, xmlDocuments); LinkedHashMap<String, List<String>> documentPDFMap = new LinkedHashMap<String, List<String>>(); int startPageNumber = 1; int docIdentifier = 1; int batchDocumentIndex = 0; for (String document : sortedDocumentList) { List<String> detailsList = new LinkedList<String>(); String documentId = TabbedPdfConstant.DOCUMENT_IDENTIFIER + docIdentifier; int numberOfPages; String documentType; String pdfFile = null; if (batchDocumentIndex + 1 <= xmlDocuments.size()) { Document xmlDocument = xmlDocuments.get(batchDocumentIndex); if (document.equals(xmlDocument.getType())) { List<Page> listOfPages = xmlDocuments.get(batchDocumentIndex).getPages().getPage(); LOGGER.info("Document documentid =" + documentId + " contains the following info:"); numberOfPages = listOfPages.size(); documentType = xmlDocument.getType(); pdfFile = xmlDocument.getMultiPagePdfFile(); detailsList.add(documentType); detailsList.add(String.valueOf(startPageNumber)); if (pdfFile != null && !pdfFile.isEmpty()) { File fPDFFile = batchSchemaService.getFile(batchInstanceID, pdfFile); if (fPDFFile.exists()) { LOGGER.info("PDF File Name:" + fPDFFile); detailsList.add(fPDFFile.getAbsolutePath()); } else { throw new DCMAApplicationException("File does not exist. File Name=" + fPDFFile); } docIdentifier++; startPageNumber = startPageNumber + numberOfPages; documentPDFMap.put(documentId, detailsList); } else { throw new DCMAApplicationException("MultiPagePDF file does not exist in batch xml."); } batchDocumentIndex++; } else { startPageNumber = appendPlaceholder(errorPDFPath, xmlDocuments, documentPDFMap, startPageNumber, docIdentifier, document, detailsList, documentId); docIdentifier++; } } else { startPageNumber = appendPlaceholder(errorPDFPath, xmlDocuments, documentPDFMap, startPageNumber, docIdentifier, document, detailsList, documentId); docIdentifier++; } } return documentPDFMap; } }
From source file:net.sourceforge.fenixedu.presentationTier.Action.messaging.ViewSentEmailsDA.java
@EntryPoint public ActionForward viewSentEmails(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) {//from w w w .j a va2s.c o m final String senderParam = request.getParameter("senderId"); if (senderParam != null && !senderParam.isEmpty()) { return viewSentEmails(mapping, request, senderParam); } final User userView = Authenticate.getUser(); final Set<Sender> sendersGroups = new TreeSet<Sender>(Sender.COMPARATOR_BY_FROM_NAME); final TreeSet<ExecutionCourseSender> sendersGroupsCourses = new TreeSet<ExecutionCourseSender>( ExecutionCourseSender.COMPARATOR_BY_EXECUTION_COURSE_SENDER); for (final Sender sender : Bennu.getInstance().getUtilEmailSendersSet()) { boolean allow = sender.getMembers().isMember(userView); boolean isExecutionCourseSender = sender instanceof ExecutionCourseSender; if (allow && !isExecutionCourseSender) { sendersGroups.add(sender); } if (allow && isExecutionCourseSender) { sendersGroupsCourses.add((ExecutionCourseSender) sender); } } if (isSenderUnique(sendersGroups, sendersGroupsCourses)) { if (sendersGroupsCourses.size() == 1) { return viewSentEmails(mapping, request, (sendersGroupsCourses.iterator().next()).getExternalId()); } else { return viewSentEmails(mapping, request, sendersGroups.iterator().next().getExternalId()); } } request.setAttribute("sendersGroups", sendersGroups); request.setAttribute("sendersGroupsCourses", sendersGroupsCourses); final Person person = AccessControl.getPerson(); if (person != null && person.hasRole(RoleType.MANAGER)) { SearchSendersBean searchSendersBean = getRenderedObject("searchSendersBean"); if (searchSendersBean == null) { searchSendersBean = new SearchSendersBean(); } request.setAttribute("searchSendersBean", searchSendersBean); } return mapping.findForward("view.sent.emails"); }
From source file:org.fenixedu.academic.ui.struts.action.messaging.ViewSentEmailsDA.java
@EntryPoint public ActionForward viewSentEmails(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) {//from w w w. j av a 2s . c o m final String senderParam = request.getParameter("senderId"); if (senderParam != null && !senderParam.isEmpty()) { return viewSentEmails(mapping, request, senderParam); } final User userView = Authenticate.getUser(); final Set<Sender> sendersGroups = new TreeSet<Sender>(Sender.COMPARATOR_BY_FROM_NAME); final TreeSet<ExecutionCourseSender> sendersGroupsCourses = new TreeSet<ExecutionCourseSender>( ExecutionCourseSender.COMPARATOR_BY_EXECUTION_COURSE_SENDER); for (final Sender sender : Bennu.getInstance().getUtilEmailSendersSet()) { boolean allow = sender.getMembers().isMember(userView); boolean isExecutionCourseSender = sender instanceof ExecutionCourseSender; if (allow && !isExecutionCourseSender) { sendersGroups.add(sender); } if (allow && isExecutionCourseSender) { sendersGroupsCourses.add((ExecutionCourseSender) sender); } } if (isSenderUnique(sendersGroups, sendersGroupsCourses)) { if (sendersGroupsCourses.size() == 1) { return viewSentEmails(mapping, request, (sendersGroupsCourses.iterator().next()).getExternalId()); } else { return viewSentEmails(mapping, request, sendersGroups.iterator().next().getExternalId()); } } request.setAttribute("sendersGroups", sendersGroups); request.setAttribute("sendersGroupsCourses", sendersGroupsCourses); final Person person = AccessControl.getPerson(); if (person != null && RoleType.MANAGER.isMember(person.getUser())) { SearchSendersBean searchSendersBean = getRenderedObject("searchSendersBean"); if (searchSendersBean == null) { searchSendersBean = new SearchSendersBean(); } request.setAttribute("searchSendersBean", searchSendersBean); } return mapping.findForward("view.sent.emails"); }
From source file:com.joliciel.jochre.boundaries.RecursiveShapeSplitter.java
List<ShapeSequence> split(Shape shape, int depth, Shape originalShape, boolean leftToRight) { String padding = "-"; for (int i = 0; i < depth; i++) padding += "-"; padding += " "; LOG.trace(padding + "Splitting shape: " + shape.getLeft() + " , " + shape.getRight()); LOG.trace(padding + "depth: " + depth); List<ShapeSequence> shapeSequences = new ArrayList<ShapeSequence>(); // check if shape is wide enough to bother with double widthRatio = (double) shape.getWidth() / (double) shape.getXHeight(); LOG.trace(padding + "widthRatio: " + widthRatio); if (widthRatio < minWidthRatio || depth >= maxDepth) { LOG.trace(padding + "too narrow or too deep"); ShapeSequence shapeSequence = this.boundaryServiceInternal.getEmptyShapeSequence(); shapeSequence.addShape(shape, originalShape); shapeSequences.add(shapeSequence); } else {//from www . j a va 2s. c o m List<Split> splitCandidates = this.splitCandidateFinder.findSplitCandidates(shape); TreeSet<ShapeSequence> myShapeSequences = new TreeSet<ShapeSequence>(); TreeSet<WeightedOutcome<Split>> weightedSplits = new TreeSet<WeightedOutcome<Split>>(); for (Split splitCandidate : splitCandidates) { double splitProb = this.shouldSplit(splitCandidate); WeightedOutcome<Split> weightedSplit = new WeightedOutcome<Split>(splitCandidate, splitProb); weightedSplits.add(weightedSplit); } double maxSplitProb = 0.0; if (weightedSplits.size() > 0) maxSplitProb = weightedSplits.first().getWeight(); double noSplitProb = 1 - maxSplitProb; if (noSplitProb > maxSplitProb) maxSplitProb = noSplitProb; Split noSplit = boundaryServiceInternal.getEmptySplit(shape); noSplit.setPosition(-1); WeightedOutcome<Split> weightedNoSplit = new WeightedOutcome<Split>(noSplit, noSplitProb); weightedSplits.add(weightedNoSplit); boolean topCandidate = true; double topCandidateWeight = 1.0; for (WeightedOutcome<Split> weightedSplit : weightedSplits) { Split splitCandidate = weightedSplit.getOutcome(); double splitProb = weightedSplit.getWeight(); LOG.trace(padding + "splitCandidate: left=" + splitCandidate.getShape().getLeft() + ", pos=" + splitCandidate.getPosition() + ", initial prob: " + splitProb); if (topCandidate) { LOG.trace(padding + "topCandidate"); } if (splitCandidate.getPosition() < 0) { // This is the no-split candidate if (topCandidate) topCandidateWeight = 1.0; ShapeSequence shapeSequence = boundaryServiceInternal.getEmptyShapeSequence(); shapeSequence.addShape(shape, originalShape); double prob = (splitProb / maxSplitProb) * topCandidateWeight; LOG.trace(padding + "noSplit prob=(" + splitProb + " / " + maxSplitProb + ") * " + topCandidateWeight + " = " + prob); Decision<SplitMergeOutcome> decision = boundaryDecisionFactory .createDecision(SplitOutcome.DO_NOT_SPLIT.getCode(), prob); shapeSequence.addDecision(decision); myShapeSequences.add(shapeSequence); } else { // a proper split Shape leftShape = shape.getJochreImage().getShape(shape.getLeft(), shape.getTop(), shape.getLeft() + splitCandidate.getPosition(), shape.getBottom()); Shape rightShape = shape.getJochreImage().getShape( shape.getLeft() + splitCandidate.getPosition() + 1, shape.getTop(), shape.getRight(), shape.getBottom()); // for each split recursively try to split it again up to depth of m // Note: m=2 is probably enough, since we're not expecting more than 4 letters per shape (3 splits) List<ShapeSequence> leftShapeSequences = this.split(leftShape, depth + 1, originalShape, leftToRight); List<ShapeSequence> rightShapeSequences = this.split(rightShape, depth + 1, originalShape, leftToRight); if (topCandidate) { // find the no-split sequence in each sub-sequence ShapeSequence noSplitLeft = null; for (ShapeSequence leftShapeSequence : leftShapeSequences) { if (leftShapeSequence.size() == 1) { noSplitLeft = leftShapeSequence; break; } } ShapeSequence noSplitRight = null; for (ShapeSequence rightShapeSequence : rightShapeSequences) { if (rightShapeSequence.size() == 1) { noSplitRight = rightShapeSequence; break; } } // we should be guaranteed to find a noSplitLeft and noSplitRight // since a no-split candidate is always returned topCandidateWeight = noSplitLeft.getScore() * noSplitRight.getScore(); LOG.trace(padding + "topCandidateWeight=" + noSplitLeft.getScore() + " *" + noSplitRight.getScore() + " = " + topCandidateWeight); } for (ShapeSequence leftShapeSequence : leftShapeSequences) { for (ShapeSequence rightShapeSequence : rightShapeSequences) { ShapeSequence newSequence = null; if (leftToRight) newSequence = boundaryServiceInternal.getShapeSequence(leftShapeSequence, rightShapeSequence); else newSequence = boundaryServiceInternal.getShapeSequence(rightShapeSequence, leftShapeSequence); if (LOG.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); for (ShapeInSequence splitShape : newSequence) { sb.append("(" + splitShape.getShape().getLeft() + "," + splitShape.getShape().getRight() + ") "); } LOG.trace(padding + sb.toString()); } double totalProb = 1.0; for (Decision<SplitMergeOutcome> decision : newSequence.getDecisions()) { totalProb = totalProb * decision.getProbability(); } newSequence.getDecisions().clear(); double prob = 0.0; if (topCandidate) { prob = totalProb * (splitProb / maxSplitProb); LOG.trace(padding + "prob=" + totalProb + " * (" + splitProb + " / " + maxSplitProb + ") = " + prob); } else { prob = totalProb * (splitProb / maxSplitProb) * topCandidateWeight; LOG.trace(padding + "prob=" + totalProb + " * (" + splitProb + " / " + maxSplitProb + ") * " + topCandidateWeight + " = " + prob); } Decision<SplitMergeOutcome> decision = this.boundaryDecisionFactory .createDecision(SplitOutcome.DO_SPLIT.getCode(), prob); newSequence.addDecision(decision); myShapeSequences.add(newSequence); } } } topCandidate = false; } int i = 0; for (ShapeSequence shapeSequence : myShapeSequences) { // Note: we always return the no-split option, even it it's very low probability if (shapeSequence.size() == 1 || i < beamWidth) { shapeSequences.add(shapeSequence); } i++; } } return shapeSequences; }
From source file:org.jactr.core.chunk.basic.AbstractSubsymbolicChunk.java
public void setParameter(String key, String value) { if (LOGGER.isDebugEnabled()) LOGGER.debug("Attempting to set " + key + " to " + value); if (CREATION_TIME.equalsIgnoreCase(key)) setCreationTime(ParameterHandler.numberInstance().coerce(value).doubleValue()); else if (TIMES_NEEDED.equalsIgnoreCase(key)) setTimesNeeded(ParameterHandler.numberInstance().coerce(value).intValue()); else if (TIMES_IN_CONTEXT.equalsIgnoreCase(key)) setTimesInContext(ParameterHandler.numberInstance().coerce(value).intValue()); else if (REFERENCE_COUNT.equalsIgnoreCase(key)) { long referenceCount = ParameterHandler.numberInstance().coerce(value).longValue(); IReferences references = getReferences(); double[] oldTimes = references.getTimes(); long oldCount = references.getNumberOfReferences(); references.clear();//from w w w .java 2s . c o m /* * create referenceCount references from creation time to now */ double min = getCreationTime(); double step = (ACTRRuntime.getRuntime().getClock(getParentChunk().getModel()).getTime() - min) / referenceCount; for (int i = 0; i < referenceCount; i++) references.addReferenceTime(getCreationTime() + (i * step)); _lastActivationComputationTime = -1; if (_parentChunk.hasParameterListeners()) { _parentChunk.dispatch(new ParameterEvent(this, ACTRRuntime.getRuntime().getClock(_parentChunk.getModel()).getTime(), REFERENCE_COUNT, oldCount, referenceCount)); _parentChunk.dispatch(new ParameterEvent(this, ACTRRuntime.getRuntime().getClock(_parentChunk.getModel()).getTime(), REFERENCE_TIMES, oldTimes, references.getTimes())); } } else if (REFERENCE_TIMES.equalsIgnoreCase(key)) { if (LOGGER.isDebugEnabled()) LOGGER.debug("Attempting to set reference times with " + value); CollectionParameterHandler<Number> cph = new CollectionParameterHandler<Number>( ParameterHandler.numberInstance()); Collection<Number> times = cph.coerce(value); // let's make sure they are sorted.. TreeSet<Double> refTimes = new TreeSet<Double>(); for (Number time : times) refTimes.add(time.doubleValue()); IReferences references = getReferences(); double[] oldTimes = references.getTimes(); /* * if count was previously set, we need to maintain it.. */ references.setNumberOfReferences(Math.max(0, references.getNumberOfReferences() - refTimes.size())); /* * now we'll add these times */ for (Double time : refTimes) references.addReferenceTime(time); _lastActivationComputationTime = -1; if (_parentChunk.hasParameterListeners()) _parentChunk.dispatch(new ParameterEvent(this, ACTRRuntime.getRuntime().getClock(_parentChunk.getModel()).getTime(), REFERENCE_TIMES, oldTimes, references.getTimes())); } else if (BASE_LEVEL_ACTIVATION.equalsIgnoreCase(key)) setBaseLevelActivation(ParameterHandler.numberInstance().coerce(value).doubleValue()); else if (SPREADING_ACTIVATION.equalsIgnoreCase(key)) setSpreadingActivation(ParameterHandler.numberInstance().coerce(value).doubleValue()); else if (SOURCE_ACTIVATION.equalsIgnoreCase(key)) setSourceActivation(ParameterHandler.numberInstance().coerce(value).doubleValue()); else if (ACTIVATION.equalsIgnoreCase(key)) setActivation(ParameterHandler.numberInstance().coerce(value).doubleValue()); }
From source file:com.clust4j.algo.MeanShiftTests.java
@Test public void testAutoEstimationWithScale() { Array2DRowRealMatrix iris = (Array2DRowRealMatrix) new StandardScaler().fit(data_).transform(data_); final double[][] X = iris.getData(); // MS estimates bw at 1.6041295821313855 final double bandwidth = 1.6041295821313855; assertTrue(Precision.equals(//from w ww. ja va 2 s . c o m MeanShift.autoEstimateBW(iris, 0.3, Distance.EUCLIDEAN, GlobalState.DEFAULT_RANDOM_STATE, false), bandwidth, 1e-9)); assertTrue(Precision.equals( MeanShift.autoEstimateBW(iris, 0.3, Distance.EUCLIDEAN, GlobalState.DEFAULT_RANDOM_STATE, true), bandwidth, 1e-9)); // Asserting fit works without breaking things... RadiusNeighbors r = new RadiusNeighbors(iris, new RadiusNeighborsParameters(bandwidth)).fit(); TreeSet<MeanShiftSeed> centers = new TreeSet<>(); for (double[] seed : X) centers.add(MeanShift.singleSeed(seed, r, X, 300)); assertTrue(centers.size() == 4); double[][] expected_dists = new double[][] { new double[] { 0.50161528154395962, -0.31685274298813487, 0.65388162422893481, 0.65270450741975761 }, new double[] { 0.52001211065400177, -0.29561728795619946, 0.67106269515983397, 0.67390853215763813 }, new double[] { 0.54861244890482475, -0.25718786696105495, 0.68964559485632182, 0.69326664641211422 }, new double[] { -1.0595457115461515, 0.74408909010240054, -1.2995708885010491, -1.2545442961404225 } }; int[] expected_centers = new int[] { 82, 80, 77, 45 }; int idx = 0; for (MeanShiftSeed seed : centers) { assertTrue(VecUtils.equalsWithTolerance(seed.dists, expected_dists[idx], 1e-1)); assertTrue(seed.count == expected_centers[idx]); idx++; } ArrayList<EntryPair<double[], Integer>> center_intensity = new ArrayList<>(); for (MeanShiftSeed seed : centers) { if (null != seed) { center_intensity.add(seed.getPair()); } } final ArrayList<EntryPair<double[], Integer>> sorted_by_intensity = center_intensity; // test getting the unique vals idx = 0; final int m_prime = sorted_by_intensity.size(); final Array2DRowRealMatrix sorted_centers = new Array2DRowRealMatrix(m_prime, iris.getColumnDimension()); for (Map.Entry<double[], Integer> e : sorted_by_intensity) sorted_centers.setRow(idx++, e.getKey()); // Create a boolean mask, init true final boolean[] unique = new boolean[m_prime]; for (int i = 0; i < unique.length; i++) unique[i] = true; // Fit the new neighbors model RadiusNeighbors nbrs = new RadiusNeighbors(sorted_centers, new RadiusNeighborsParameters(bandwidth).setVerbose(false)).fit(); // Iterate over sorted centers and query radii int[] indcs; double[] center; for (int i = 0; i < m_prime; i++) { if (unique[i]) { center = sorted_centers.getRow(i); indcs = nbrs.getNeighbors(new double[][] { center }, bandwidth, false).getIndices()[0]; for (int id : indcs) { unique[id] = false; } unique[i] = true; // Keep this as true } } // Now assign the centroids... int redundant_ct = 0; final ArrayList<double[]> centroids = new ArrayList<>(); for (int i = 0; i < unique.length; i++) { if (unique[i]) { centroids.add(sorted_centers.getRow(i)); } } redundant_ct = unique.length - centroids.size(); assertTrue(redundant_ct == 2); assertTrue(centroids.size() == 2); assertTrue(VecUtils.equalsWithTolerance(centroids.get(0), new double[] { 0.4999404345258691, -0.3157948009929614, 0.6516983739795399, 0.6505251874544873 }, 1e-6)); assertTrue(VecUtils.equalsExactly(centroids.get(1), new double[] { -1.0560079864392702, 0.7416046454700266, -1.295231741534238, -1.2503554887998656 })); // also put the centroids into a matrix. We have to // wait to perform this op, because we have to know // the size of centroids first... Array2DRowRealMatrix clust_centers = new Array2DRowRealMatrix(centroids.size(), iris.getColumnDimension()); for (int i = 0; i < clust_centers.getRowDimension(); i++) clust_centers.setRow(i, centroids.get(i)); // The final nearest neighbors model -- if this works, we are in the clear... new NearestNeighbors(clust_centers, new NearestNeighborsParameters(1)).fit(); }