List of usage examples for java.util SortedSet add
boolean add(E e);
From source file:de.ailis.xadrian.data.Sector.java
/** * Returns the silicon asteroids of this sector. * /*from ww w . j av a 2 s. c o m*/ * @return The silicon asteroids */ public Asteroid[] getSiliconAsteroids() { final SortedSet<Asteroid> asteroids = new TreeSet<Asteroid>(); for (final Asteroid asteroid : getAsteroids()) { if (asteroid.getWare().isSiliconWafers()) { asteroids.add(asteroid); } } return asteroids.toArray(new Asteroid[asteroids.size()]); }
From source file:de.jcup.egradle.codeassist.dsl.HTMLDescriptionBuilder.java
private void appendAppendixMethods(Type type, StringBuilder description) { description.append("<h5 id='appendix_methods'>Methods:</h5>"); Set<Method> methods = type.getMethods(); SortedSet<String> sortedLinkReferences = new TreeSet<>(); for (Method m : methods) { String methodSignature = MethodUtils.createSignature(m); Type declaringType = m.getParent(); StringBuilder referenceLink = new StringBuilder(); referenceLink.append("\n<a href='type://").append(declaringType.getName()).append("#") .append(methodSignature).append("'>"); referenceLink.append(methodSignature); referenceLink.append("</a>"); sortedLinkReferences.add(referenceLink.toString()); }//from w w w . j a v a 2s . co m appendList(description, sortedLinkReferences); }
From source file:uk.ac.rdg.evoportal.pages.ScaleTestDetail.java
public ScaleTestDetail(PageParameters params) { String idStr = params.getString("id"); final long testID = Long.parseLong(idStr); ScaleTest scaleTest = ScaleTestsDataProvider.get(testID); int percentageDone = scaleTest.getPercentageDone(); String testName = scaleTest.getLabel(); int iterations = scaleTest.getIterations(); add(new Label("id", idStr)); add(new Label("name", testName)); add(new Label("its", Integer.toString(iterations))); add(new Label("pcDone", percentageDone + "%")); add(new TextArea("controlBlock", new Model<String>(scaleTest.getBPBlock()))); SortedSet<Integer> nodesSet = new TreeSet<Integer>(); List<ScaleTestComputeJob> results = scaleTest.getScaleTestComputeJobs(); double[][] runSeries = new double[2][results.size()]; int j = 0;/*from ww w. j av a 2 s . c om*/ double testSize = (double) iterations; for (Iterator<ScaleTestComputeJob> i = results.iterator(); i.hasNext();) { ScaleTestComputeJob result = i.next(); int nodes = result.getNodes(); int duration = result.getDuration(); runSeries[0][j] = (double) nodes; runSeries[1][j] = (double) duration; // in seconds nodesSet.add(result.getNodes()); double m = ((double) duration / testSize); calc.putMultipler(nodes, m); j++; } List<Integer> iterationsChoices = Arrays.asList( new Integer[] { 100000, 200000, 300000, 400000, 500000, 1000000, 1200000, 1300000, 1400000, 1500000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, 10000000 }); Form calcForm = new Form("calcForm", new CompoundPropertyModel(calc)); calcForm.setVisible(false); // only render image if test is completed with all jobs stopped boolean isResultsReady = percentageDone == 100; if (isResultsReady) { DefaultXYDataset xyData = new DefaultXYDataset(); xyData.addSeries(testName, runSeries); JFreeChart scaleChart = ChartFactory.createScatterPlot("Scale test results for " + testName, "Number of nodes", "Time to run in seconds", xyData, PlotOrientation.VERTICAL, true, true, true); add(new ScaleTestResultsChart("chart", scaleChart, 600, 300)); calc.setIterations(iterationsChoices.get(0)); calc.setNodes(nodesSet.first()); calcForm.setVisible(true); } else { add(new Image("chart")); } final Label durationText = new Label("durationString"); durationText.setOutputMarkupId(true); DropDownChoice iterationsDropDown = new DropDownChoice("iterations", iterationsChoices); iterationsDropDown.add(new AjaxFormComponentUpdatingBehavior("onchange") { @Override protected void onUpdate(AjaxRequestTarget target) { target.addComponent(durationText); } }); DropDownChoice nodesDropDown = new DropDownChoice("nodes", Arrays.asList(nodesSet.toArray())); nodesDropDown.add(new AjaxFormComponentUpdatingBehavior("onchange") { @Override protected void onUpdate(AjaxRequestTarget target) { target.addComponent(durationText); } }); calcForm.add(iterationsDropDown); calcForm.add(nodesDropDown); calcForm.add(durationText); add(calcForm); ScaleTestComputeJobsDataProvider scaleTestComputeJobsDataProvider = new ScaleTestComputeJobsDataProvider( testID); add(new ScaleTestComputeJobsDataView("jobDetails", scaleTestComputeJobsDataProvider, testID)); if (!isResultsReady) { AjaxSelfUpdatingTimerBehavior timer = new AjaxSelfUpdatingTimerBehavior(Duration.seconds(15)) { @Override protected void onPostProcessTarget(AjaxRequestTarget target) { ScaleTest refreshedScaleTest = ScaleTestsDataProvider.get(testID); int pcDone = refreshedScaleTest.getPercentageDone(); get("pcDone").replaceWith(new Label("pcDone", pcDone + "%")); if (pcDone == 100) { setResponsePage(ScaleTestDetail.class, new PageParameters("id=" + testID)); } } }; add(timer); } }
From source file:net.sourceforge.fenixedu.presentationTier.Action.publico.department.PublicDepartmentSiteDA.java
public ActionForward employees(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) {// w ww. j av a 2 s . c o m Unit unit = getUnit(request); BeanComparator employeeComparator = new BeanComparator("person", Party.COMPARATOR_BY_NAME_AND_ID); SortedSet<Unit> workingUnits = new TreeSet<Unit>(Unit.COMPARATOR_BY_NAME_AND_ID); SortedSet<Employee> noUnitAvailable = new TreeSet<Employee>(employeeComparator); Map<String, SortedSet<Employee>> employeesMap = new Hashtable<String, SortedSet<Employee>>(); for (Employee employee : unit.getAllCurrentNonTeacherEmployees()) { if (employee.getPerson().hasRole(RoleType.TEACHER)) { continue; } Unit workingUnit = employee.getCurrentWorkingPlace(); if (workingUnit != null) { workingUnits.add(workingUnit); String areaKey = workingUnit.getExternalId().toString(); SortedSet<Employee> employees = employeesMap.get(areaKey); if (employees == null) { employees = new TreeSet<Employee>(employeeComparator); employeesMap.put(areaKey, employees); } employees.add(employee); } else { noUnitAvailable.add(employee); } } if (workingUnits.isEmpty()) { request.setAttribute("ignoreAreas", true); } request.setAttribute("areas", workingUnits); request.setAttribute("employees", employeesMap); request.setAttribute("employeesNoArea", noUnitAvailable); return mapping.findForward("department-employees"); }
From source file:br.com.hslife.orcamento.controller.PanoramaDespesaFixaController.java
private void gerarGraficoDespesa(List<LancamentoConta> pagamentos) { // Verifica se deve continuar com a gerao do grfico a partir da quantidade de lanamentos if (pagamentos == null || pagamentos.size() == 0) { exibirGraficoDespesa = false;// w w w. j av a 2 s . c o m return; } // Instancia o Map que ir gravar os dados para gerar o grfico // necessrio manter o LinkedHashMap para poder preservar a ordenao dos meses e anos // no grfico. Caso contrrio, ser necessrio implementar um Comparator para realizar // a ordenao das chaves (em formato String) de acordo com a sequncia de meses e anos Map<String, Double> dadosPagamento = new LinkedHashMap<String, Double>(); Map<String, Double> dadosAPagar = new LinkedHashMap<String, Double>(); String dataKey = ""; maxValueBarPagamentosDespesa = 1.0; saldoDevedor = 0.0; // Gera as chaves e popula os Maps SortedSet<Date> chaves = new TreeSet<>(); Calendar dataAtual = Calendar.getInstance(); for (Integer i = 1; i <= periodo; i++) { chaves.add(dataAtual.getTime()); if (periodoAConsiderar.equals("ANTERIOR")) dataAtual.add(Calendar.MONTH, -1); else dataAtual.add(Calendar.MONTH, 1); } // Popula os Maps com as chaves geradas for (Date data : chaves) { dataKey = new SimpleDateFormat("MM/yyyy").format(data); dadosPagamento.put(dataKey, 0.0); dadosAPagar.put(dataKey, 0.0); } // Itera a lista de pagamentos para somar no ms/ano correspondente for (LancamentoConta pagamento : pagamentos) { dataKey = new SimpleDateFormat("MM/yyyy").format(this.determinarChaveMesAnoPagamento(pagamento)); if (pagamento.getStatusLancamentoConta().equals(StatusLancamentoConta.QUITADO)) { if (dadosPagamento.get(dataKey) != null) { if (!pagamento.getLancamentoPeriodico().getMoeda().equals(moedaPadrao)) { dadosPagamento.put(dataKey, dadosPagamento.get(dataKey) + (pagamento.getValorPago() * pagamento.getLancamentoPeriodico().getMoeda().getValorConversao())); } else { dadosPagamento.put(dataKey, dadosPagamento.get(dataKey) + pagamento.getValorPago()); } } } else { if (dadosAPagar.get(dataKey) != null) { if (!pagamento.getLancamentoPeriodico().getMoeda().equals(moedaPadrao)) { dadosAPagar.put(dataKey, dadosAPagar.get(dataKey) + (pagamento.getLancamentoPeriodico().getValorParcela() * pagamento.getLancamentoPeriodico().getMoeda().getValorConversao())); } else { dadosAPagar.put(dataKey, dadosAPagar.get(dataKey) + pagamento.getLancamentoPeriodico().getValorParcela()); } } else { if (periodoAConsiderar.equals("ANTERIOR") && pagamento.getDataVencimento().before(new Date())) { saldoDevedor += pagamento.getLancamentoPeriodico().getValorParcela(); } } } } // Popula o grfico com os dados obtido e habilita a exibio ultimosPagamentosDespesaModel = new BarChartModel(); ultimosPagamentosDespesaModel.setLegendPosition("s"); ultimosPagamentosDespesaModel.setTitle("Panorama das Despesas Fixas - Despesa"); ultimosPagamentosDespesaModel.setStacked(true); ultimosPagamentosDespesaModel.setExtender("ext1"); ChartSeries pagamentosSerie = new ChartSeries(); ChartSeries aPagarSerie = new ChartSeries(); pagamentosSerie.setLabel("Despesas Pagas"); aPagarSerie.setLabel("Despesas Pagar"); for (String key : dadosPagamento.keySet()) { pagamentosSerie.set(key, dadosPagamento.get(key)); aPagarSerie.set(key, dadosAPagar.get(key)); if ((dadosPagamento.get(key) + dadosAPagar.get(key) + 100) > maxValueBarPagamentosDespesa) { maxValueBarPagamentosDespesa = dadosPagamento.get(key) + dadosAPagar.get(key) + 100; } } ultimosPagamentosDespesaModel.addSeries(pagamentosSerie); ultimosPagamentosDespesaModel.addSeries(aPagarSerie); exibirGraficoDespesa = true; }
From source file:br.com.hslife.orcamento.controller.PanoramaDespesaFixaController.java
private void gerarGraficoReceita(List<LancamentoConta> pagamentos) { // Verifica se deve continuar com a gerao do grfico a partir da quantidade de lanamentos if (pagamentos == null || pagamentos.size() == 0) { exibirGraficoReceita = false;// w w w . j a va2 s . c o m return; } // Instancia o Map que ir gravar os dados para gerar o grfico // necessrio manter o LinkedHashMap para poder preservar a ordenao dos meses e anos // no grfico. Caso contrrio, ser necessrio implementar um Comparator para realizar // a ordenao das chaves (em formato String) de acordo com a sequncia de meses e anos Map<String, Double> dadosPagamento = new LinkedHashMap<String, Double>(); Map<String, Double> dadosAPagar = new LinkedHashMap<String, Double>(); String dataKey = ""; maxValueBarPagamentosReceita = 1.0; saldoCredor = 0.0; // Gera as chaves e popula os Maps SortedSet<Date> chaves = new TreeSet<>(); Calendar dataAtual = Calendar.getInstance(); for (Integer i = 1; i <= periodo; i++) { chaves.add(dataAtual.getTime()); if (periodoAConsiderar.equals("ANTERIOR")) dataAtual.add(Calendar.MONTH, -1); else dataAtual.add(Calendar.MONTH, 1); } // Popula os Maps com as chaves geradas for (Date data : chaves) { dataKey = new SimpleDateFormat("MM/yyyy").format(data); dadosPagamento.put(dataKey, 0.0); dadosAPagar.put(dataKey, 0.0); } // Itera a lista de pagamentos para somar no ms/ano correspondente for (LancamentoConta pagamento : pagamentos) { dataKey = new SimpleDateFormat("MM/yyyy").format(this.determinarChaveMesAnoPagamento(pagamento)); if (pagamento.getStatusLancamentoConta().equals(StatusLancamentoConta.QUITADO)) { if (dadosPagamento.get(dataKey) != null) { if (!pagamento.getLancamentoPeriodico().getMoeda().equals(moedaPadrao)) { dadosPagamento.put(dataKey, dadosPagamento.get(dataKey) + (pagamento.getValorPago() * pagamento.getLancamentoPeriodico().getMoeda().getValorConversao())); } else { dadosPagamento.put(dataKey, dadosPagamento.get(dataKey) + pagamento.getValorPago()); } } } else { if (dadosAPagar.get(dataKey) != null) { if (!pagamento.getLancamentoPeriodico().getMoeda().equals(moedaPadrao)) { dadosAPagar.put(dataKey, dadosAPagar.get(dataKey) + (pagamento.getLancamentoPeriodico().getValorParcela() * pagamento.getLancamentoPeriodico().getMoeda().getValorConversao())); } else { dadosAPagar.put(dataKey, dadosAPagar.get(dataKey) + pagamento.getLancamentoPeriodico().getValorParcela()); } } else { if (periodoAConsiderar.equals("ANTERIOR") && pagamento.getDataVencimento().before(new Date())) { saldoCredor += pagamento.getLancamentoPeriodico().getValorParcela(); } } } } // Popula o grfico com os dados obtido e habilita a exibio ultimosPagamentosReceitaModel = new BarChartModel(); ultimosPagamentosReceitaModel.setLegendPosition("s"); ultimosPagamentosReceitaModel.setTitle("Panorama das Despesas Fixas - Receita"); ultimosPagamentosReceitaModel.setStacked(true); ultimosPagamentosReceitaModel.setExtender("ext1"); ChartSeries pagamentosSerie = new ChartSeries(); ChartSeries aPagarSerie = new ChartSeries(); pagamentosSerie.setLabel("Receitas Pagas"); aPagarSerie.setLabel("Receitas Pagar"); for (String key : dadosPagamento.keySet()) { pagamentosSerie.set(key, dadosPagamento.get(key)); aPagarSerie.set(key, dadosAPagar.get(key)); if ((dadosPagamento.get(key) + dadosAPagar.get(key) + 100) > maxValueBarPagamentosReceita) { maxValueBarPagamentosReceita = dadosPagamento.get(key) + dadosAPagar.get(key) + 100; } } ultimosPagamentosReceitaModel.addSeries(pagamentosSerie); ultimosPagamentosReceitaModel.addSeries(aPagarSerie); exibirGraficoReceita = true; }
From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java
@Override public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName, ExternalView externalView, List<InstanceConfig> instanceConfigList) { // We build the routing table based off the external view here. What we want to do is to make sure that we uphold // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as // possible between the servers. ///* w w w .ja va2 s .com*/ // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions. // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at // which point the overlapping data is discarded during the reconciliation process with the controller), we need to // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records. // // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately // equal distribution of load between servers. // // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment // assigned to it. This corresponds to alternative three below: // // Alternative 1 (weight is sum of segment counts - segment count in that replica): // (6 - 1) = 5 -> P(0.4166) // (6 - 2) = 4 -> P(0.3333) // (6 - 3) = 3 -> P(0.2500) // // Alternative 2 (weight is max of segment counts - segment count in that replica + 1): // (3 - 1) + 1 = 3 -> P(0.5000) // (3 - 2) + 1 = 2 -> P(0.3333) // (3 - 3) + 1 = 1 -> P(0.1666) // // Alternative 3 (weight is max of segment counts - segment count in that replica): // (3 - 1) = 2 -> P(0.6666) // (3 - 2) = 1 -> P(0.3333) // (3 - 3) = 0 -> P(0.0000) // // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively // 2.112, 1.496 and 0.853. // // This algorithm works as follows: // 1. Gather all segments and group them by Kafka partition, sorted by sequence number // 2. Ensure that for each partition, we have at most one partition in consuming state // 3. Sort all the segments to be used during assignment in ascending order of replicas // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments // assigned to each replica. // 1. Gather all segments and group them by Kafka partition, sorted by sequence number Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>(); for (String helixPartitionName : externalView.getPartitionSet()) { // Ignore segments that are not low level consumer segments if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) { continue; } final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName); String kafkaPartitionName = segmentName.getPartitionRange(); SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName); // Create sorted set if necessary if (segmentsForPartition == null) { segmentsForPartition = new TreeSet<SegmentName>(); sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition); } segmentsForPartition.add(segmentName); } // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>(); for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) { SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition .get(kafkaPartition); SegmentName lastAllowedSegmentInConsumingState = null; for (SegmentName segmentName : sortedSegmentsForKafkaPartition) { Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName()); boolean allInConsumingState = true; int replicasInConsumingState = 0; // Only keep the segment if all replicas have it in CONSUMING state for (String externalViewState : helixPartitionState.values()) { // Ignore ERROR state if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) { continue; } // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING // replicas if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) { allInConsumingState = false; break; } // Otherwise count the replica as being in CONSUMING state if (externalViewState.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) { replicasInConsumingState++; } } // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this // segment to be the last allowed segment to be in CONSUMING state if (allInConsumingState && 0 < replicasInConsumingState) { lastAllowedSegmentInConsumingState = segmentName; break; } } if (lastAllowedSegmentInConsumingState != null) { allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition, lastAllowedSegmentInConsumingState); } } // 3. Sort all the segments to be used during assignment in ascending order of replicas // PriorityQueue throws IllegalArgumentException when given a size of zero int segmentCount = Math.max(externalView.getPartitionSet().size(), 1); PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>( segmentCount, new Comparator<Pair<String, Set<String>>>() { @Override public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) { return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size()); } }); RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList); for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) { String kafkaPartition = entry.getKey(); SortedSet<SegmentName> segmentNames = entry.getValue(); // The only segment name which is allowed to be in CONSUMING state or null SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition); for (SegmentName segmentName : segmentNames) { Set<String> validReplicas = new HashSet<String>(); Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName()); for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) { String instance = instanceAndStateEntry.getKey(); String state = instanceAndStateEntry.getValue(); // Skip pruned replicas (shutting down or otherwise disabled) if (instancePruner.isInactive(instance)) { continue; } // Replicas in ONLINE state are always allowed if (state.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) { validReplicas.add(instance); continue; } // Replicas in CONSUMING state are only allowed on the last segment if (state.equalsIgnoreCase( CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING) && segmentName.equals(validConsumingSegment)) { validReplicas.add(instance); } } segmentToReplicaSetQueue .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas)); // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka // partition if (segmentName.equals(validConsumingSegment)) { break; } } } // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments // assigned to each replica. List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount); for (int i = 0; i < routingTableCount; ++i) { Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>(); PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>( segmentToReplicaSetQueue); while (!segmentToReplicaSetQueueCopy.isEmpty()) { Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll(); String segment = segmentAndValidReplicaSet.getKey(); Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue(); String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap); if (replica != null) { Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica); if (segmentsForInstance == null) { segmentsForInstance = new HashSet<String>(); instanceToSegmentSetMap.put(replica, segmentsForInstance); } segmentsForInstance.add(segment); } } routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap)); } return routingTables; }
From source file:gov.nih.nci.cabig.caaers.domain.SiteResearchStaff.java
/** * Gets the active date.// w ww.j av a2 s .co m * * @return the active date */ @Transient public Date getActiveDate() { SortedSet<Date> dates = new TreeSet<Date>(); if (this.getSiteResearchStaffRoles() == null) return new Date(System.currentTimeMillis()); for (SiteResearchStaffRole srsr : this.getSiteResearchStaffRoles()) { if (srsr.getStartDate() == null) srsr.setStartDate(new Date(System.currentTimeMillis())); dates.add(srsr.getStartDate()); } if (dates.size() > 0) return dates.first(); else return null; }
From source file:net.sourceforge.fenixedu.presentationTier.Action.publico.ViewHomepageDA.java
public ActionForward listEmployees(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) throws Exception { final SortedMap<Unit, SortedSet<Homepage>> homepages = new TreeMap<Unit, SortedSet<Homepage>>( Unit.COMPARATOR_BY_NAME_AND_ID); for (final Employee employee : rootDomainObject.getEmployeesSet()) { final Person person = employee.getPerson(); if (person != null) { final Teacher teacher = person.getTeacher(); if (teacher == null) { final Contract contract = employee.getCurrentWorkingContract(); if (contract != null) { final Unit unit = contract.getWorkingUnit(); final SortedSet<Homepage> unitHomepages; if (homepages.containsKey(unit)) { unitHomepages = homepages.get(unit); } else { unitHomepages = new TreeSet<Homepage>(Homepage.HOMEPAGE_COMPARATOR_BY_NAME); homepages.put(unit, unitHomepages); }//from ww w .j av a 2s . co m final Homepage homepage = person.getHomepage(); if (homepage != null && homepage.getActivated().booleanValue()) { unitHomepages.add(homepage); } } } } } request.setAttribute("homepages", homepages); final String selectedPage = request.getParameter("selectedPage"); if (selectedPage != null) { request.setAttribute("selectedPage", selectedPage); } return mapping.findForward("list-homepages-employees"); }
From source file:com.opensymphony.xwork2.ognl.accessor.CompoundRootAccessor.java
public Object callMethod(Map context, Object target, String name, Object[] objects) throws MethodFailedException { CompoundRoot root = (CompoundRoot) target; if ("describe".equals(name)) { Object v;//from w w w . j a va 2 s .c o m if (objects != null && objects.length == 1) { v = objects[0]; } else { v = root.get(0); } if (v instanceof Collection || v instanceof Map || v.getClass().isArray()) { return v.toString(); } try { Map<String, PropertyDescriptor> descriptors = OgnlRuntime.getPropertyDescriptors(v.getClass()); int maxSize = 0; for (String pdName : descriptors.keySet()) { if (pdName.length() > maxSize) { maxSize = pdName.length(); } } SortedSet<String> set = new TreeSet<String>(); StringBuffer sb = new StringBuffer(); for (PropertyDescriptor pd : descriptors.values()) { sb.append(pd.getName()).append(": "); int padding = maxSize - pd.getName().length(); for (int i = 0; i < padding; i++) { sb.append(" "); } sb.append(pd.getPropertyType().getName()); set.add(sb.toString()); sb = new StringBuffer(); } sb = new StringBuffer(); for (Object aSet : set) { String s = (String) aSet; sb.append(s).append("\n"); } return sb.toString(); } catch (IntrospectionException e) { if (LOG.isDebugEnabled()) { LOG.debug("Got exception in callMethod", e); } } catch (OgnlException e) { if (LOG.isDebugEnabled()) { LOG.debug("Got exception in callMethod", e); } } return null; } for (Object o : root) { if (o == null) { continue; } Class clazz = o.getClass(); Class[] argTypes = getArgTypes(objects); MethodCall mc = null; if (argTypes != null) { mc = new MethodCall(clazz, name, argTypes); } if ((argTypes == null) || !invalidMethods.containsKey(mc)) { try { Object value = OgnlRuntime.callMethod((OgnlContext) context, o, name, objects); if (value != null) { return value; } } catch (OgnlException e) { // try the next one Throwable reason = e.getReason(); if (!context.containsKey(OgnlValueStack.THROW_EXCEPTION_ON_FAILURE) && (mc != null) && (reason != null) && (reason.getClass() == NoSuchMethodException.class)) { invalidMethods.put(mc, Boolean.TRUE); } else if (reason != null) { throw new MethodFailedException(o, name, e.getReason()); } } } } return null; }