List of usage examples for java.util LinkedHashSet size
int size();
From source file:org.codehaus.mojo.webminifier.WebMinifierMojo.java
/** * Main entry point for the MOJO./*from w ww. j a va2 s .com*/ * * @throws MojoExecutionException if there's a problem in the normal course of execution. * @throws MojoFailureException if there's a problem with the MOJO itself. */ public void execute() throws MojoExecutionException, MojoFailureException { // Start off by copying all files over. We'll ultimately remove the js files that we don't need from there, and // create new ones in there (same goes for css files and anything else we minify). FileUtils.deleteQuietly(destinationFolder); try { FileUtils.copyDirectory(sourceFolder, destinationFolder); } catch (IOException e) { throw new MojoExecutionException("Cannot copy file to target folder", e); } // Process each HTML source file and concatenate into unminified output scripts int minifiedCounter = 0; // If a split point already exists on disk then we've been through the minification process. As // minification can be expensive, we would like to avoid performing it multiple times. Thus storing // a set of what we've previously minified enables us. Set<File> existingConcatenatedJsResources = new HashSet<File>(); Set<File> consumedJsResources = new HashSet<File>(); for (String targetHTMLFile : getArrayOfTargetHTMLFiles()) { File targetHTML = new File(destinationFolder, targetHTMLFile); // Parse HTML file and locate SCRIPT elements DocumentResourceReplacer replacer; try { replacer = new DocumentResourceReplacer(targetHTML); } catch (SAXException e) { throw new MojoExecutionException("Problem reading html document", e); } catch (IOException e) { throw new MojoExecutionException("Problem opening html document", e); } List<File> jsResources = replacer.findJSResources(); if (jsSplitPoints == null) { jsSplitPoints = new Properties(); } File concatenatedJsResource = null; URI destinationFolderUri = destinationFolder.toURI(); // Split the js resources into two lists: one containing all external dependencies, the other containing // project sources. We do this so that project sources can be minified without the dependencies (libraries // generally don't need to distribute the dependencies). int jsDependencyProjectResourcesIndex; if (splitDependencies) { List<File> jsDependencyResources = new ArrayList<File>(jsResources.size()); List<File> jsProjectResources = new ArrayList<File>(jsResources.size()); for (File jsResource : jsResources) { String jsResourceUri = destinationFolderUri.relativize(jsResource.toURI()).toString(); File jsResourceFile = new File(projectSourceFolder, jsResourceUri); if (jsResourceFile.exists()) { jsProjectResources.add(jsResource); } else { jsDependencyResources.add(jsResource); } } // Re-constitute the js resource list from dependency resources + project resources and note the index // in the list that represents the start of project sources in the list. We need this information later. jsDependencyProjectResourcesIndex = jsDependencyResources.size(); jsResources = jsDependencyResources; jsResources.addAll(jsProjectResources); } else { jsDependencyProjectResourcesIndex = 0; } // Walk backwards through the script declarations and note what files will map to what split point. Map<File, File> jsResourceTargetFiles = new LinkedHashMap<File, File>(jsResources.size()); ListIterator<File> jsResourcesIter = jsResources.listIterator(jsResources.size()); boolean splittingDependencies = false; while (jsResourcesIter.hasPrevious()) { int jsResourceIterIndex = jsResourcesIter.previousIndex(); File jsResource = jsResourcesIter.previous(); String candidateSplitPointNameUri = destinationFolderUri.relativize(jsResource.toURI()).toString(); String splitPointName = (String) jsSplitPoints.get(candidateSplitPointNameUri); // If we do not have a split point name and the resource is a dependency of this project i.e. it is not // within our src/main folder then we give it a split name of "dependencies". Factoring out dependencies // into their own split point is a useful thing to do and will always be required when building // libraries. if (splitDependencies && splitPointName == null && !splittingDependencies) { if (jsResourceIterIndex < jsDependencyProjectResourcesIndex) { splitPointName = Integer.valueOf(++minifiedCounter).toString(); splittingDependencies = true; } } // If we have no name and we've not been in here before, then assign an initial name based on a number. if (splitPointName == null && concatenatedJsResource == null) { splitPointName = Integer.valueOf(++minifiedCounter).toString(); } // We have a new split name so use it for this file and upwards in the script statements until we // either hit another split point or there are no more script statements. if (splitPointName != null) { concatenatedJsResource = new File(destinationFolder, splitPointName + ".js"); // Note that we've previously created this. if (concatenatedJsResource.exists()) { existingConcatenatedJsResources.add(concatenatedJsResource); } } jsResourceTargetFiles.put(jsResource, concatenatedJsResource); } for (File jsResource : jsResources) { concatenatedJsResource = jsResourceTargetFiles.get(jsResource); if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) { // Concatenate input file onto output resource file try { concatenateFile(jsResource, concatenatedJsResource); } catch (IOException e) { throw new MojoExecutionException("Problem concatenating JS files", e); } // Finally, remove the JS resource from the target folder as it is no longer required (we've // concatenated it). consumedJsResources.add(jsResource); } } // Reduce the list of js resource target files to a distinct set LinkedHashSet<File> concatenatedJsResourcesSet = new LinkedHashSet<File>( jsResourceTargetFiles.values()); File[] concatenatedJsResourcesArray = new File[concatenatedJsResourcesSet.size()]; concatenatedJsResourcesSet.toArray(concatenatedJsResourcesArray); List<File> concatenatedJsResources = Arrays.asList(concatenatedJsResourcesArray); // Minify the concatenated JS resource files if (jsCompressorType != JsCompressorType.NONE) { List<File> minifiedJSResources = new ArrayList<File>(concatenatedJsResources.size()); ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources .listIterator(concatenatedJsResources.size()); while (concatenatedJsResourcesIter.hasPrevious()) { concatenatedJsResource = concatenatedJsResourcesIter.previous(); File minifiedJSResource; try { String uri = concatenatedJsResource.toURI().toString(); int i = uri.lastIndexOf(".js"); String minUri; if (i > -1) { minUri = uri.substring(0, i) + "-min.js"; } else { minUri = uri; } minifiedJSResource = FileUtils.toFile(new URL(minUri)); } catch (MalformedURLException e) { throw new MojoExecutionException("Problem determining file URL", e); } minifiedJSResources.add(minifiedJSResource); // If we've not actually performed the minification before... then do so. This is the expensive bit // so we like to avoid it if we can. if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) { boolean warningsFound; try { warningsFound = minifyJSFile(concatenatedJsResource, minifiedJSResource); } catch (IOException e) { throw new MojoExecutionException("Problem reading/writing JS", e); } logCompressionRatio(minifiedJSResource.getName(), concatenatedJsResource.length(), minifiedJSResource.length()); // If there were warnings then the user may want to manually invoke the compressor for further // investigation. if (warningsFound) { getLog().warn("Warnings were found. " + concatenatedJsResource + " is available for your further investigations."); } } } // Update source references replacer.replaceJSResources(destinationFolder, targetHTML, minifiedJSResources); } else { List<File> unminifiedJSResources = new ArrayList<File>(concatenatedJsResources.size()); ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources .listIterator(concatenatedJsResources.size()); while (concatenatedJsResourcesIter.hasPrevious()) { concatenatedJsResource = concatenatedJsResourcesIter.previous(); unminifiedJSResources.add(concatenatedJsResource); } replacer.replaceJSResources(destinationFolder, targetHTML, unminifiedJSResources); getLog().info("Concatenated resources with no compression"); } // Write HTML file to output dir try { replacer.writeHTML(targetHTML, encoding); } catch (TransformerException e) { throw new MojoExecutionException("Problem transforming html", e); } catch (IOException e) { throw new MojoExecutionException("Problem writing html", e); } } // Clean up including the destination folder recursively where directories have nothing left in them. for (File consumedJsResource : consumedJsResources) { consumedJsResource.delete(); } removeEmptyFolders(destinationFolder); }
From source file:com.rapidminer.gui.plotter.charts.SeriesChartPlotter.java
@Override protected void updatePlotter() { int categoryCount = prepareData(); String maxClassesProperty = ParameterService .getParameterValue(MainFrame.PROPERTY_RAPIDMINER_GUI_PLOTTER_COLORS_CLASSLIMIT); int maxClasses = 20; try {/*from w w w . j a v a 2 s . co m*/ if (maxClassesProperty != null) { maxClasses = Integer.parseInt(maxClassesProperty); } } catch (NumberFormatException e) { // LogService.getGlobal().log("Series plotter: cannot parse property 'rapidminer.gui.plotter.colors.classlimit', using maximal 20 different classes.", // LogService.WARNING); LogService.getRoot().log(Level.WARNING, "com.rapidminer.gui.plotter.charts.SeriesChartPlotter.parsing_property_error"); } boolean createLegend = categoryCount > 0 && categoryCount < maxClasses; JFreeChart chart = createChart(this.dataset, createLegend); // set the background color for the chart... chart.setBackgroundPaint(Color.white); // domain axis if (axis[INDEX] >= 0) { if (!dataTable.isNominal(axis[INDEX])) { if (dataTable.isDate(axis[INDEX]) || dataTable.isDateTime(axis[INDEX])) { DateAxis domainAxis = new DateAxis(dataTable.getColumnName(axis[INDEX])); domainAxis.setTimeZone(Tools.getPreferredTimeZone()); chart.getXYPlot().setDomainAxis(domainAxis); if (getRangeForDimension(axis[INDEX]) != null) { domainAxis.setRange(getRangeForDimension(axis[INDEX])); } domainAxis.setLabelFont(LABEL_FONT_BOLD); domainAxis.setTickLabelFont(LABEL_FONT); domainAxis.setVerticalTickLabels(isLabelRotating()); } } else { LinkedHashSet<String> values = new LinkedHashSet<String>(); for (DataTableRow row : dataTable) { String stringValue = dataTable.mapIndex(axis[INDEX], (int) row.getValue(axis[INDEX])); if (stringValue.length() > 40) { stringValue = stringValue.substring(0, 40); } values.add(stringValue); } ValueAxis categoryAxis = new SymbolAxis(dataTable.getColumnName(axis[INDEX]), values.toArray(new String[values.size()])); categoryAxis.setLabelFont(LABEL_FONT_BOLD); categoryAxis.setTickLabelFont(LABEL_FONT); categoryAxis.setVerticalTickLabels(isLabelRotating()); chart.getXYPlot().setDomainAxis(categoryAxis); } } // legend settings LegendTitle legend = chart.getLegend(); if (legend != null) { legend.setPosition(RectangleEdge.TOP); legend.setFrame(BlockBorder.NONE); legend.setHorizontalAlignment(HorizontalAlignment.LEFT); legend.setItemFont(LABEL_FONT); } AbstractChartPanel panel = getPlotterPanel(); if (panel == null) { panel = createPanel(chart); } else { panel.setChart(chart); } // ATTENTION: WITHOUT THIS WE GET SEVERE MEMORY LEAKS!!! panel.getChartRenderingInfo().setEntityCollection(null); }
From source file:org.pentaho.reporting.engine.classic.extensions.datasources.pmd.SimplePmdDataFactory.java
public String[] getReferencedFields(final String query, final DataRow parameter) throws ReportDataFactoryException { final String queryRaw = computedQuery(query, parameter); if (query == null) { return null; }//from w w w . j ava2s. co m final Query queryObject = parseQuery(queryRaw); final List<Parameter> queryParamValues = queryObject.getParameters(); final LinkedHashSet<String> retval = new LinkedHashSet<String>(); if (userField != null) { retval.add(userField); } if (passwordField != null) { retval.add(passwordField); } if (queryParamValues != null) { for (final Parameter p : queryParamValues) { retval.add(p.getName()); } } retval.add(DataFactory.QUERY_LIMIT); retval.add(DataFactory.QUERY_TIMEOUT); return retval.toArray(new String[retval.size()]); }
From source file:com.espertech.esper.epl.spec.PatternStreamSpecRaw.java
private PatternStreamSpecCompiled compileInternal(StatementContext context, Set<String> eventTypeReferences, boolean isInsertInto, Collection<Integer> assignedTypeNumberStack, MatchEventSpec tags, Set<String> priorAllTags) throws ExprValidationException { if (tags == null) { tags = new MatchEventSpec(); }/*from w w w . java 2 s .c om*/ Deque<Integer> subexpressionIdStack = new ArrayDeque<Integer>(assignedTypeNumberStack); ExprEvaluatorContext evaluatorContextStmt = new ExprEvaluatorContextStatement(context); Stack<EvalFactoryNode> nodeStack = new Stack<EvalFactoryNode>(); // detemine ordered tags Set<EvalFactoryNode> filterFactoryNodes = EvalNodeUtil.recursiveGetChildNodes(evalFactoryNode, FilterForFilterFactoryNodes.INSTANCE); LinkedHashSet<String> allTagNamesOrdered = new LinkedHashSet<String>(); if (priorAllTags != null) { allTagNamesOrdered.addAll(priorAllTags); } for (EvalFactoryNode filterNode : filterFactoryNodes) { EvalFilterFactoryNode factory = (EvalFilterFactoryNode) filterNode; int tagNumber; if (factory.getEventAsName() != null) { if (!allTagNamesOrdered.contains(factory.getEventAsName())) { allTagNamesOrdered.add(factory.getEventAsName()); tagNumber = allTagNamesOrdered.size() - 1; } else { tagNumber = findTagNumber(factory.getEventAsName(), allTagNamesOrdered); } factory.setEventAsTagNumber(tagNumber); } } recursiveCompile(evalFactoryNode, context, evaluatorContextStmt, eventTypeReferences, isInsertInto, tags, subexpressionIdStack, nodeStack, allTagNamesOrdered); Audit auditPattern = AuditEnum.PATTERN.getAudit(context.getAnnotations()); Audit auditPatternInstance = AuditEnum.PATTERNINSTANCES.getAudit(context.getAnnotations()); EvalFactoryNode compiledEvalFactoryNode = evalFactoryNode; if (auditPattern != null || auditPatternInstance != null) { EvalAuditInstanceCount instanceCount = new EvalAuditInstanceCount(); compiledEvalFactoryNode = recursiveAddAuditNode(null, auditPattern != null, auditPatternInstance != null, evalFactoryNode, evalNodeExpressions, instanceCount); } return new PatternStreamSpecCompiled(compiledEvalFactoryNode, tags.getTaggedEventTypes(), tags.getArrayEventTypes(), allTagNamesOrdered, this.getViewSpecs(), this.getOptionalStreamName(), this.getOptions()); }
From source file:com.ehsy.solr.util.SimplePostTool.java
/** * A very simple crawler, pulling URLs to fetch from a backlog and then * recurses N levels deep if recursive>0. Links are parsed from HTML * through first getting an XHTML version using SolrCell with extractOnly, * and followed if they are local. The crawler pauses for a default delay * of 10 seconds between each fetch, this can be configured in the delay * variable. This is only meant for test purposes, as it does not respect * robots or anything else fancy :)/*ww w .j a v a 2 s . c om*/ * @param level which level to crawl * @param out output stream to write to * @return number of pages crawled on this level and below */ protected int webCrawl(int level, OutputStream out) { int numPages = 0; LinkedHashSet<URL> stack = backlog.get(level); int rawStackSize = stack.size(); stack.removeAll(visited); int stackSize = stack.size(); LinkedHashSet<URL> subStack = new LinkedHashSet<>(); info("Entering crawl at level " + level + " (" + rawStackSize + " links total, " + stackSize + " new)"); for (URL u : stack) { try { visited.add(u); PageFetcherResult result = pageFetcher.readPageFromUrl(u); if (result.httpStatus == 200) { u = (result.redirectUrl != null) ? result.redirectUrl : u; URL postUrl = new URL( appendParam(solrUrl.toString(), "literal.id=" + URLEncoder.encode(u.toString(), "UTF-8") + "&literal.url=" + URLEncoder.encode(u.toString(), "UTF-8"))); boolean success = postData(new ByteArrayInputStream(result.content), null, out, result.contentType, postUrl); if (success) { info("POSTed web resource " + u + " (depth: " + level + ")"); Thread.sleep(delay * 1000); numPages++; // Pull links from HTML pages only if (recursive > level && result.contentType.equals("text/html")) { Set<URL> children = pageFetcher.getLinksFromWebPage(u, new ByteArrayInputStream(result.content), result.contentType, postUrl); subStack.addAll(children); } } else { warn("An error occurred while posting " + u); } } else { warn("The URL " + u + " returned a HTTP result status of " + result.httpStatus); } } catch (IOException e) { warn("Caught exception when trying to open connection to " + u + ": " + e.getMessage()); } catch (InterruptedException e) { throw new RuntimeException(); } } if (!subStack.isEmpty()) { backlog.add(subStack); numPages += webCrawl(level + 1, out); } return numPages; }
From source file:net.sf.maltcms.common.charts.overlay.nodes.OverlayNode.java
@Override public Action[] getActions(boolean context) { List<?> interfaces = getAllInterfaces(getBean().getClass()); List<?> superClasses = getAllSuperclasses(getBean().getClass()); LinkedHashSet<Action> containerActions = new LinkedHashSet<>(); for (Object o : interfaces) { Class<?> c = (Class) o; containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/" + c.getName())); containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/" + c.getSimpleName())); }//from ww w. j av a 2 s . c o m for (Object o : superClasses) { Class<?> c = (Class) o; containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/" + c.getName())); containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/" + c.getSimpleName())); } containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/" + getBean().getClass().getName())); containerActions .addAll(actionsForPath("Actions/OverlayNodeActions/" + getBean().getClass().getSimpleName())); containerActions.add(null); containerActions.addAll(actionsForPath("Actions/OverlayNodeActions/DefaultActions")); containerActions.add(get(PropertiesAction.class)); return containerActions.toArray(new Action[containerActions.size()]); }
From source file:org.apache.solr.cloud.TestCloudPivotFacet.java
@Test public void test() throws Exception { sanityCheckAssertNumerics();//from w ww. ja v a2 s. c o m waitForThingsToLevelOut(30000); // TODO: why would we have to wait? // handle.clear(); handle.put("QTime", SKIPVAL); handle.put("timestamp", SKIPVAL); final Set<String> fieldNameSet = new HashSet<>(); // build up a randomized index final int numDocs = atLeast(500); log.info("numDocs: {}", numDocs); for (int i = 1; i <= numDocs; i++) { SolrInputDocument doc = buildRandomDocument(i); // not efficient, but it guarantees that even if people change buildRandomDocument // we'll always have the full list of fields w/o needing to keep code in sync fieldNameSet.addAll(doc.getFieldNames()); cloudClient.add(doc); } cloudClient.commit(); fieldNameSet.remove("id"); assertTrue("WTF, bogus field exists?", fieldNameSet.add("bogus_not_in_any_doc_s")); final String[] fieldNames = fieldNameSet.toArray(new String[fieldNameSet.size()]); Arrays.sort(fieldNames); // need determinism when picking random fields for (int i = 0; i < 5; i++) { String q = "*:*"; if (random().nextBoolean()) { q = "id:[* TO " + TestUtil.nextInt(random(), 300, numDocs) + "]"; } ModifiableSolrParams baseP = params("rows", "0", "q", q); if (random().nextBoolean()) { baseP.add("fq", "id:[* TO " + TestUtil.nextInt(random(), 200, numDocs) + "]"); } final boolean stats = random().nextBoolean(); if (stats) { baseP.add(StatsParams.STATS, "true"); // if we are doing stats, then always generated the same # of STATS_FIELD // params, using multiple tags from a fixed set, but with diff fieldName values. // later, each pivot will randomly pick a tag. baseP.add(StatsParams.STATS_FIELD, "{!key=sk1 tag=st1,st2}" + pickRandomStatsFields(fieldNames)); baseP.add(StatsParams.STATS_FIELD, "{!key=sk2 tag=st2,st3}" + pickRandomStatsFields(fieldNames)); baseP.add(StatsParams.STATS_FIELD, "{!key=sk3 tag=st3,st4}" + pickRandomStatsFields(fieldNames)); // NOTE: there's a chance that some of those stats field names // will be the same, but if so, all the better to test that edge case } ModifiableSolrParams pivotP = params(FACET, "true"); // put our FACET_PIVOT params in a set in case we just happen to pick the same one twice LinkedHashSet<String> pivotParamValues = new LinkedHashSet<String>(); pivotParamValues.add(buildPivotParamValue(buildRandomPivot(fieldNames))); if (random().nextBoolean()) { pivotParamValues.add(buildPivotParamValue(buildRandomPivot(fieldNames))); } pivotP.set(FACET_PIVOT, pivotParamValues.toArray(new String[pivotParamValues.size()])); // keep limit low - lots of unique values, and lots of depth in pivots pivotP.add(FACET_LIMIT, "" + TestUtil.nextInt(random(), 1, 17)); // sometimes use an offset if (random().nextBoolean()) { pivotP.add(FACET_OFFSET, "" + TestUtil.nextInt(random(), 0, 7)); } if (random().nextBoolean()) { String min = "" + TestUtil.nextInt(random(), 0, numDocs + 10); pivotP.add(FACET_PIVOT_MINCOUNT, min); // trace param for validation baseP.add(TRACE_MIN, min); } if (random().nextBoolean()) { pivotP.add(FACET_DISTRIB_MCO, "true"); // trace param for validation baseP.add(TRACE_DISTRIB_MIN, "true"); } if (random().nextBoolean()) { String missing = "" + random().nextBoolean(); pivotP.add(FACET_MISSING, missing); // trace param for validation baseP.add(TRACE_MISS, missing); } if (random().nextBoolean()) { String sort = random().nextBoolean() ? "index" : "count"; pivotP.add(FACET_SORT, sort); // trace param for validation baseP.add(TRACE_SORT, sort); } // overrequest // // NOTE: since this test focuses on accuracy of refinement, and doesn't do // control collection comparisons, there isn't a lot of need for excessive // overrequesting -- we focus here on trying to exercise the various edge cases // involved as different values are used with overrequest if (0 == TestUtil.nextInt(random(), 0, 4)) { // we want a decent chance of no overrequest at all pivotP.add(FACET_OVERREQUEST_COUNT, "0"); pivotP.add(FACET_OVERREQUEST_RATIO, "0"); } else { if (random().nextBoolean()) { pivotP.add(FACET_OVERREQUEST_COUNT, "" + TestUtil.nextInt(random(), 0, 5)); } if (random().nextBoolean()) { // sometimes give a ratio less then 1, code should be smart enough to deal float ratio = 0.5F + random().nextFloat(); // sometimes go negative if (random().nextBoolean()) { ratio *= -1; } pivotP.add(FACET_OVERREQUEST_RATIO, "" + ratio); } } assertPivotCountsAreCorrect(baseP, pivotP); } }
From source file:ArrayUtils.java
/** * Merges all elements of a set of arrays into a single array with no * duplicates./* w ww . ja v a2 s .co m*/ * * @param <T1> * The type of the result * @param <T2> * The type of the input arrays * @param type * The type of the result * @param arrays * The arrays to merge * @return A new array containing all elements of <code>array1</code> and * all elements of <code>array2</code> that are not present in * <code>array1</code> * @throws NullPointerException * If either array is null */ public static <T1, T2 extends T1> T1[] mergeInclusive(Class<T1> type, T2[]... arrays) { java.util.LinkedHashSet<T1> set = new java.util.LinkedHashSet<T1>(); int i, j; for (i = 0; i < arrays.length; i++) { for (j = 0; j < arrays[i].length; j++) set.add(arrays[i][j]); } return set.toArray((T1[]) Array.newInstance(type, set.size())); }
From source file:org.pentaho.reporting.ui.datasources.pmd.PmdDataSourceEditor.java
private ScriptEngineFactory[] getScriptEngineLanguages() { final LinkedHashSet<ScriptEngineFactory> langSet = new LinkedHashSet<ScriptEngineFactory>(); langSet.add(null);/*from w w w. ja v a2 s .c om*/ final List<ScriptEngineFactory> engineFactories = new ScriptEngineManager().getEngineFactories(); for (final ScriptEngineFactory engineFactory : engineFactories) { langSet.add(engineFactory); } return langSet.toArray(new ScriptEngineFactory[langSet.size()]); }
From source file:Simulator.PerformanceCalculation.java
public JPanel waitTime1() { LinkedHashSet no = new LinkedHashSet(); LinkedHashMap<Integer, ArrayList<Double>> wait1 = new LinkedHashMap<>(); for (Map.Entry<Integer, TraceObject> entry : l.getLocalTrace().entrySet()) { TraceObject traceObject = entry.getValue(); if (wait1.get(traceObject.getSurgeonId()) == null) { ArrayList details = new ArrayList(); details.add(traceObject.getWaitTime1()); wait1.put(traceObject.getSurgeonId(), details); } else {//from ww w . ja v a2 s . co m wait1.get(traceObject.getSurgeonId()).add(traceObject.getWaitTime1()); } no.add(traceObject.getSurgeonId()); } String[] column = new String[no.size()]; String series1 = "Wait Time 1"; for (int i = 0; i < no.size(); i++) { column[i] = "Surgeon " + (i + 1); } DefaultCategoryDataset dataset = new DefaultCategoryDataset(); LinkedHashMap<Integer, Double> average = new LinkedHashMap<>(); for (Map.Entry<Integer, ArrayList<Double>> entry : wait1.entrySet()) { Integer integer = entry.getKey(); ArrayList<Double> arrayList = entry.getValue(); double total = 0; for (Double double1 : arrayList) { total += double1; } average.put(integer, total / arrayList.size()); } for (int i = 1; i <= average.size(); i++) { dataset.addValue(Math.round(average.get(i) / 600), series1, column[i - 1]); } JFreeChart chart = ChartFactory.createBarChart("Wait Time 1", // chart title "Surgeon ID", // domain axis label "Days", // range axis label dataset, // data PlotOrientation.VERTICAL, // orientation true, // include legend true, // tooltips? false // URLs? ); return new ChartPanel(chart); }