List of usage examples for java.util NavigableSet add
boolean add(E e);
From source file:org.apache.kylin.common.persistence.JDBCResourceDAO.java
public NavigableSet<String> listAllResource(final String folderPath, final boolean recursive) throws SQLException { final NavigableSet<String> allResourceName = new TreeSet<>(); executeSql(new SqlOperation() { @Override/*from w w w . j a va2 s .co m*/ public void execute(Connection connection) throws SQLException { String tableName = getMetaTableName(folderPath); pstat = connection.prepareStatement(getListResourceSqlString(tableName)); pstat.setString(1, folderPath + "%"); rs = pstat.executeQuery(); while (rs.next()) { String path = rs.getString(META_TABLE_KEY); Preconditions.checkState(path.startsWith(folderPath)); if (recursive) { allResourceName.add(path); } else { int cut = path.indexOf('/', folderPath.length()); String child = cut < 0 ? path : path.substring(0, cut); allResourceName.add(child); } } } }); return allResourceName; }
From source file:org.apache.kylin.metadata.badquery.BadQueryHistoryManager.java
public BadQueryHistory addEntryToProject(BadQueryEntry badQueryEntry, String project) throws IOException { if (StringUtils.isEmpty(project) || badQueryEntry.getAdj() == null || badQueryEntry.getSql() == null) throw new IllegalArgumentException(); BadQueryHistory badQueryHistory = getBadQueriesForProject(project); NavigableSet<BadQueryEntry> entries = badQueryHistory.getEntries(); if (entries.size() >= kylinConfig.getBadQueryHistoryNum()) { entries.pollFirst();//from w w w .j a v a2 s .com } entries.add(badQueryEntry); getStore().putResource(badQueryHistory.getResourcePath(), badQueryHistory, BAD_QUERY_INSTANCE_SERIALIZER); return badQueryHistory; }
From source file:org.apache.nifi.cluster.manager.impl.WebClusterManager.java
/** * Merges the listing requests in the specified map into the specified listing request * * @param listingRequest the target listing request * @param listingRequestMap the mapping of all responses being merged *//*w ww.j a v a 2 s. c o m*/ private void mergeListingRequests(final ListingRequestDTO listingRequest, final Map<NodeIdentifier, ListingRequestDTO> listingRequestMap) { final Comparator<FlowFileSummaryDTO> comparator = new Comparator<FlowFileSummaryDTO>() { @Override public int compare(final FlowFileSummaryDTO dto1, final FlowFileSummaryDTO dto2) { int positionCompare = dto1.getPosition().compareTo(dto2.getPosition()); if (positionCompare != 0) { return positionCompare; } final String address1 = dto1.getClusterNodeAddress(); final String address2 = dto2.getClusterNodeAddress(); if (address1 == null && address2 == null) { return 0; } if (address1 == null) { return 1; } if (address2 == null) { return -1; } return address1.compareTo(address2); } }; final NavigableSet<FlowFileSummaryDTO> flowFileSummaries = new TreeSet<>(comparator); ListFlowFileState state = null; int numStepsCompleted = 0; int numStepsTotal = 0; int objectCount = 0; long byteCount = 0; boolean finished = true; for (final Map.Entry<NodeIdentifier, ListingRequestDTO> entry : listingRequestMap.entrySet()) { final NodeIdentifier nodeIdentifier = entry.getKey(); final String nodeAddress = nodeIdentifier.getApiAddress() + ":" + nodeIdentifier.getApiPort(); final ListingRequestDTO nodeRequest = entry.getValue(); numStepsTotal++; if (Boolean.TRUE.equals(nodeRequest.getFinished())) { numStepsCompleted++; } final QueueSizeDTO nodeQueueSize = nodeRequest.getQueueSize(); objectCount += nodeQueueSize.getObjectCount(); byteCount += nodeQueueSize.getByteCount(); if (!nodeRequest.getFinished()) { finished = false; } if (nodeRequest.getLastUpdated().after(listingRequest.getLastUpdated())) { listingRequest.setLastUpdated(nodeRequest.getLastUpdated()); } // Keep the state with the lowest ordinal value (the "least completed"). final ListFlowFileState nodeState = ListFlowFileState.valueOfDescription(nodeRequest.getState()); if (state == null || state.compareTo(nodeState) > 0) { state = nodeState; } if (nodeRequest.getFlowFileSummaries() != null) { for (final FlowFileSummaryDTO summaryDTO : nodeRequest.getFlowFileSummaries()) { summaryDTO.setClusterNodeId(nodeIdentifier.getId()); summaryDTO.setClusterNodeAddress(nodeAddress); flowFileSummaries.add(summaryDTO); // Keep the set from growing beyond our max if (flowFileSummaries.size() > listingRequest.getMaxResults()) { flowFileSummaries.pollLast(); } } } if (nodeRequest.getFailureReason() != null) { listingRequest.setFailureReason(nodeRequest.getFailureReason()); } } final List<FlowFileSummaryDTO> summaryDTOs = new ArrayList<>(flowFileSummaries); listingRequest.setFlowFileSummaries(summaryDTOs); final int percentCompleted = numStepsCompleted / numStepsTotal; listingRequest.setPercentCompleted(percentCompleted); listingRequest.setFinished(finished); listingRequest.getQueueSize().setByteCount(byteCount); listingRequest.getQueueSize().setObjectCount(objectCount); }
From source file:org.nuclos.client.wizard.steps.NuclosEntityNameStep.java
/** * Load subnodes from t_md_entity_subnodes table. *//*from w w w .j ava2s. com*/ private void loadTreeView() { final MasterDataDelegate mdd = MasterDataDelegate.getInstance(); int so = 0; final NavigableSet<EntityTreeViewVO> views = new TreeSet<EntityTreeViewVO>(); for (MasterDataVO vo : mdd.getMasterData(NuclosEntity.ENTITYSUBNODES.getEntityName())) { final Long lEntity = IdUtils.toLongId(vo.getField(EntityTreeViewVO.ENTITY_FIELD + "Id")); if (MetaDataDelegate.getInstance().getEntityById(lEntity).getEntity().equals(model.getEntityName())) { final String entity = (String) vo.getField(EntityTreeViewVO.SUBFORM_ENTITY_FIELD); final String field = (String) vo.getField(EntityTreeViewVO.SUBFORM2ENTITY_REF_FIELD); final String foldername = (String) vo.getField(EntityTreeViewVO.FOLDERNAME_FIELD); final Boolean active = (Boolean) vo.getField(EntityTreeViewVO.ACTIVE_FIELD); // active and sortOrder are new fields. // Hence we need a way to default them. (Thomas Pasch) Integer sortOrder = (Integer) vo.getField(EntityTreeViewVO.SORTORDER_FIELD); if (sortOrder == null) { sortOrder = Integer.valueOf(++so); } views.add(new EntityTreeViewVO(IdUtils.toLongId(vo.getId()), lEntity, entity, field, foldername, active, sortOrder)); so = sortOrder.intValue(); } } model.getTreeView().addAll(views); }
From source file:org.structnetalign.merge.BronKerboschCliqueFinder.java
@Override public NavigableSet<Set<V>> transform(UndirectedGraph<V, E> graph) { InternalBronKerboschCliqueFinder<V, E> finder = new InternalBronKerboschCliqueFinder<V, E>(graph); Collection<Set<V>> unsortedCliques = finder.getAllMaximalCliques(); Comparator<Set<V>> comparator = new Comparator<Set<V>>() { @Override/*from w w w . j ava 2 s. co m*/ public int compare(Set<V> clique1, Set<V> clique2) { if (CollectionUtils.isEqualCollection(clique1, clique2)) return 0; if (clique1.size() < clique2.size()) { return 1; } else if (clique1.size() > clique2.size()) { return -1; } else { return -1; } } }; NavigableSet<Set<V>> yes = new TreeSet<>(comparator); for (Set<V> s : unsortedCliques) { yes.add(s); } return yes; }
From source file:org.structnetalign.merge.BronKerboschCliqueFinder.java
@Override public NavigableSet<Set<V>> getMaximumCliques(UndirectedGraph<V, E> graph) { NavigableSet<Set<V>> top = new TreeSet<>(); int max = 0;//from w ww .j a v a2s. co m NavigableSet<Set<V>> cliques = transform(graph); for (Set<V> clique : cliques) { if (clique.size() >= max) { max = clique.size(); top.add(clique); } } return top; }
From source file:org.structnetalign.merge.BronKerboschMergeJob.java
@Override public List<NavigableSet<Integer>> call() throws Exception { logger.info("Searching for cliques on job " + index + " containing " + graph.getVertexCount() + " vertices and " + graph.getHomologyCount() + " homology edges"); // find the cliques BronKerboschCliqueFinder<Integer, HomologyEdge> finder = new BronKerboschCliqueFinder<>(); // these cliques are ordered from largest to smallest Collection<Set<Integer>> cliques = finder.transform(graph.getHomology()); // just report the cliques we're using logger.info("Job " + index + ": " + "Found " + cliques.size() + " maximal cliques"); int i = 1;//w w w .j av a 2 s. c o m for (Set<Integer> clique : cliques) { logger.debug("Job " + index + ": " + "Clique " + i + ": " + clique); i++; } // partition the cliques by sets of interactions // we call these (maximal) degenerate sets List<NavigableSet<Integer>> simpleDegenerateSets = new ArrayList<NavigableSet<Integer>>(); for (Set<Integer> clique : cliques) { NavigableMap<String, NavigableSet<Integer>> degenerateSetMap = new TreeMap<>(); for (int v : clique) { Collection<Integer> neighbors = graph.getInteractionNeighbors(v); String hash = hashVertexInteractions(neighbors); NavigableSet<Integer> degenerateSet = degenerateSetMap.get(hash); if (degenerateSet == null) { degenerateSet = new TreeSet<>(); degenerateSetMap.put(hash, degenerateSet); } degenerateSet.add(v); logger.trace("Job " + index + ": " + "Found " + hash + " --> " + degenerateSetMap.get(hash)); } for (NavigableSet<Integer> set : degenerateSetMap.values()) { simpleDegenerateSets.add(set); } } /* * Now sort the degenerate sets from largest to smallest. * Take into account the edge case where the sizes are the same. */ Comparator<NavigableSet<Integer>> comparator = new Comparator<NavigableSet<Integer>>() { @Override public int compare(NavigableSet<Integer> clique1, NavigableSet<Integer> clique2) { if (CollectionUtils.isEqualCollection(clique1, clique2)) return 0; if (clique1.size() < clique2.size()) { return 1; } else if (clique1.size() > clique2.size()) { return -1; } else { Iterator<Integer> iter1 = clique1.iterator(); Iterator<Integer> iter2 = clique2.iterator(); while (iter1.hasNext()) { // we know they're the same size int v1 = iter1.next(); int v2 = iter2.next(); if (v1 < v2) { return 1; } else if (v1 > v2) { return -1; } } } // they're the same throughout, so they're equal return 0; } }; List<NavigableSet<Integer>> sortedDegenerateSets = new ArrayList<>(simpleDegenerateSets.size()); sortedDegenerateSets.addAll(simpleDegenerateSets); Collections.sort(sortedDegenerateSets, comparator); /* * Now we want to return only the maximal maximal degenerate sets. */ TreeSet<String> verticesAlreadyUsed = new TreeSet<String>(); List<NavigableSet<Integer>> finalDegenerateSets = new ArrayList<>(sortedDegenerateSets.size()); int nTrivial = 0; int nWeak = 0; // a degenerate set is weak if it contains a vertex that is added first forcliques: for (NavigableSet<Integer> set : sortedDegenerateSets) { // discard trivial degenerate sets if (set.size() < 2) { nTrivial++; continue; } // verify that we haven't already used any vertex in this degenerate set for (int v : set) { String hash = NetworkUtils.hash(v); // use MD5 for safety if (verticesAlreadyUsed.contains(hash)) { // discard this degenerate set and do NOT say we've used any of these vertices nWeak++; continue forcliques; } } // we haven't used any vertex in this degenerate set // now add all of these vertices // do NOT add before, or we'll add vertices we haven't used yet for (int v : set) { String hash = NetworkUtils.hash(v); verticesAlreadyUsed.add(hash); } finalDegenerateSets.add(set); // keep this degenerate set } logger.info("Job " + index + ": " + "Found " + finalDegenerateSets.size() + " strong nontrivial maximal degenerate sets found (" + nTrivial + " trivial and " + nWeak + " weak)"); return finalDegenerateSets; }
From source file:org.structnetalign.util.NetworkUtils.java
public static NavigableSet<Integer> getVertexIds(Interaction interaction) { Collection<Participant> participants = interaction.getParticipants(); if (participants.size() != 2) throw new IllegalArgumentException("Cannot handle interactions involving more than 2 participants"); NavigableSet<Integer> set = new TreeSet<>(); for (Participant participant : participants) { int id = participant.getInteractor().getId(); set.add(id); }/*from ww w. j ava2s. c o m*/ return set; }
From source file:sadl.modellearner.rtiplus.SimplePDRTALearner.java
protected NavigableSet<Refinement> getMergeRefs(Transition t, StateColoring sc) { final NavigableSet<Refinement> refs = new TreeSet<>(); //sequential/*from w w w. ja v a2 s . c o m*/ for (final PDRTAState r : sc) { double score = tester.testMerge(r, t.target); if (mainModel == t.ta) { logger.trace("Score: {} (MERGE {} with {})", score, r.getIndex(), t.target.getIndex()); } if (score > significance && score <= 1.0) { score = (score - significance) / (1.0 - significance); final Refinement ref = new Refinement(r, t.target, score, sc); refs.add(ref); } } //parallel (not yet checked for determinism) // // final NavigableSet<Refinement> safeRefs = Collections.synchronizedNavigableSet(refs); // sc.getRedStates().parallelStream().forEach(red -> { // double score = tester.testMerge(red, t.target); // if (mainModel == t.ta) { // logger.trace("Score: {} (MERGE {} with {})", score, red.getIndex(), t.target.getIndex()); // } // if (score > significance && score <= 1.0) { // score = (score - significance) / (1.0 - significance); // final Refinement ref = new Refinement(red, t.target, score, sc); // l1.lock(); // refs.add(ref); // l1.unlock(); // // safeRefs.add(ref); // } // }); return refs; }
From source file:sadl.modellearner.rtiplus.SimplePDRTALearner.java
protected NavigableSet<Refinement> getSplitRefs(Transition t, StateColoring sc) { final NavigableSet<Refinement> refs = new TreeSet<>(); //sequential//from ww w . j a v a 2s.c o m final Iterator<Integer> it = t.in.getTails().keySet().iterator(); if (it.hasNext()) { int last = it.next().intValue(); while (it.hasNext()) { final int cur = it.next().intValue(); int splitTime = -1; switch (splitPos) { case LEFT: splitTime = last; break; case MIDDLE: splitTime = (int) Math.rint(((cur - last) - 1) / 2.0) + last; break; case RIGHT: splitTime = cur - 1; break; default: splitTime = (int) Math.rint(((cur - last) - 1) / 2.0) + last; break; } double score = tester.testSplit(t.source, t.symAlphIdx, splitTime); if (mainModel == t.ta) { logger.trace("Score: {} (SPLIT {} @ ({},{}))", score, t.source.getIndex(), t.ta.getSymbol(t.symAlphIdx), splitTime); } if (score < significance && score >= 0) { score = (significance - score) / significance; final Refinement ref = new Refinement(t.source, t.symAlphIdx, splitTime, score, sc); refs.add(ref); } last = cur; } //parallel (not yet checked for determinism) // final TIntList splitTimes = new TIntArrayList(); // if (it.hasNext()) { // int last = it.next(); // while (it.hasNext()) { // final int cur = it.next(); // int splitTime = -1; // switch (splitPos) { // case LEFT: // splitTime = last; // break; // case MIDDLE: // splitTime = (int) Math.rint(((cur - last) - 1) / 2.0) + last; // break; // case RIGHT: // splitTime = cur - 1; // break; // default: // splitTime = (int) Math.rint(((cur - last) - 1) / 2.0) + last; // break; // } // splitTimes.add(splitTime); // last = cur; // } // // final NavigableSet<Refinement> safeRefs = Collections.synchronizedNavigableSet(refs); // Arrays.stream(splitTimes.toArray()).parallel().forEach(splitTime -> { // double score = tester.testSplit(t.source, t.symAlphIdx, splitTime); // if (mainModel == t.ta) { // logger.trace("Score: {} (SPLIT {} @ ({},{}))", score, t.source.getIndex(), t.ta.getSymbol(t.symAlphIdx), splitTime); // } // if (score < significance && score >= 0) { // score = (significance - score) / significance; // final Refinement ref = new Refinement(t.source, t.symAlphIdx, splitTime, score, sc); // l2.lock(); // refs.add(ref); // l2.unlock(); // // safeRefs.add(ref); // } // }); } return refs; }