List of usage examples for java.util Vector elementAt
public synchronized E elementAt(int index)
From source file:com.hexidec.ekit.EkitCore.java
/** * Convenience method for obtaining a custom toolbar */// w w w .j a v a2 s .c om public JToolBar customizeToolBar(int whichToolBar, Vector<String> vcTools, boolean isShowing) { JToolBar jToolBarX = new JToolBar(JToolBar.HORIZONTAL); jToolBarX.setFloatable(false); for (int i = 0; i < vcTools.size(); i++) { String toolToAdd = vcTools.elementAt(i).toUpperCase(); if (toolToAdd.equals(KEY_TOOL_SEP)) { jToolBarX.add(new JToolBar.Separator()); } else if (htTools.containsKey(toolToAdd)) { if (htTools.get(toolToAdd) instanceof JButtonNoFocus) { jToolBarX.add((JButtonNoFocus) (htTools.get(toolToAdd))); } else if (htTools.get(toolToAdd) instanceof JToggleButtonNoFocus) { jToolBarX.add((JToggleButtonNoFocus) (htTools.get(toolToAdd))); } else if (htTools.get(toolToAdd) instanceof JComboBoxNoFocus) { jToolBarX.add((JComboBoxNoFocus) (htTools.get(toolToAdd))); } else { jToolBarX.add((JComponent) (htTools.get(toolToAdd))); } } else { Action a = null; for (HTMLDocumentBehavior b : behaviors) { a = b.getAction(toolToAdd); if (a != null) { JButtonNoFocus button = new JButtonNoFocus(a); button.setText(null); jToolBarX.add(button); break; } } } } if (whichToolBar == TOOLBAR_SINGLE) { jToolBar = jToolBarX; jToolBar.setVisible(isShowing); jcbmiViewToolbar.setSelected(isShowing); return jToolBar; } else if (whichToolBar == TOOLBAR_MAIN) { jToolBarMain = jToolBarX; jToolBarMain.setVisible(isShowing); jcbmiViewToolbarMain.setSelected(isShowing); return jToolBarMain; } else if (whichToolBar == TOOLBAR_FORMAT) { jToolBarFormat = jToolBarX; jToolBarFormat.setVisible(isShowing); jcbmiViewToolbarFormat.setSelected(isShowing); return jToolBarFormat; } else if (whichToolBar == TOOLBAR_STYLES) { jToolBarStyles = jToolBarX; jToolBarStyles.setVisible(isShowing); jcbmiViewToolbarStyles.setSelected(isShowing); return jToolBarStyles; } else { jToolBarMain = jToolBarX; jToolBarMain.setVisible(isShowing); jcbmiViewToolbarMain.setSelected(isShowing); return jToolBarMain; } }
From source file:com.ricemap.spateDB.core.RTree.java
/** * Builds the RTree given a serialized list of elements. It uses the given * stockObject to deserialize these elements and build the tree. Also writes * the created tree to the disk directly. * /*from www . ja v a2s . c o m*/ * @param elements * - serialization of elements to be written * @param offset * - index of the first element to use in the elements array * @param len * - number of bytes to user from the elements array * @param bytesAvailable * - size available (in bytes) to store the tree structures * @param dataOut * - an output to use for writing the tree to * @param fast_sort * - setting this to <code>true</code> allows the method to run * faster by materializing the offset of each element in the list * which speeds up the comparison. However, this requires an * additional 16 bytes per element. So, for each 1M elements, the * method will require an additional 16 M bytes (approximately). */ public void bulkLoadWrite(final byte[] element_bytes, final int offset, final int len, final int degree, DataOutput dataOut, final boolean fast_sort, final boolean columnarStorage) { try { columnar = columnarStorage; //TODO: the order of fields should be stable under Oracle JVM, but not guaranteed Field[] fields = stockObject.getClass().getDeclaredFields(); // Count number of elements in the given text int i_start = offset; final Text line = new Text(); while (i_start < offset + len) { int i_end = skipToEOL(element_bytes, i_start); // Extract the line without end of line character line.set(element_bytes, i_start, i_end - i_start - 1); stockObject.fromText(line); elementCount++; i_start = i_end; } LOG.info("Bulk loading an RTree with " + elementCount + " elements"); // It turns out the findBestDegree returns the best degree when the // whole // tree is loaded to memory when processed. However, as current // algorithms // process the tree while it's on disk, a higher degree should be // selected // such that a node fits one file block (assumed to be 4K). // final int degree = findBestDegree(bytesAvailable, elementCount); LOG.info("Writing an RTree with degree " + degree); int height = Math.max(1, (int) Math.ceil(Math.log(elementCount) / Math.log(degree))); int leafNodeCount = (int) Math.pow(degree, height - 1); if (elementCount < 2 * leafNodeCount && height > 1) { height--; leafNodeCount = (int) Math.pow(degree, height - 1); } int nodeCount = (int) ((Math.pow(degree, height) - 1) / (degree - 1)); int nonLeafNodeCount = nodeCount - leafNodeCount; // Keep track of the offset of each element in the text final int[] offsets = new int[elementCount]; final int[] ids = new int[elementCount]; final double[] ts = fast_sort ? new double[elementCount] : null; final double[] xs = fast_sort ? new double[elementCount] : null; final double[] ys = fast_sort ? new double[elementCount] : null; //initialize columnar data output ByteArrayOutputStream index_bos = new ByteArrayOutputStream(); DataOutputStream index_dos = new DataOutputStream(index_bos); ByteArrayOutputStream[] bos = new ByteArrayOutputStream[fields.length]; DataOutputStream[] dos = new DataOutputStream[fields.length]; for (int i = 0; i < bos.length; i++) { bos[i] = new ByteArrayOutputStream(); dos[i] = new DataOutputStream(bos[i]); } i_start = offset; line.clear(); for (int i = 0; i < elementCount; i++) { offsets[i] = i_start; ids[i] = i; int i_end = skipToEOL(element_bytes, i_start); if (xs != null) { // Extract the line with end of line character line.set(element_bytes, i_start, i_end - i_start - 1); stockObject.fromText(line); // Sample center of the shape ts[i] = (stockObject.getMBR().t1 + stockObject.getMBR().t2) / 2; xs[i] = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2; ys[i] = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2; //build columnar storage if (stockObject instanceof Point3d) { index_dos.writeDouble(ts[i]); index_dos.writeDouble(xs[i]); index_dos.writeDouble(ys[i]); } else { throw new RuntimeException("Indexing non-point shape with RTREE is not supported yet"); } for (int j = 0; j < fields.length; j++) { if (fields[j].getType().equals(Integer.TYPE)) { dos[j].writeInt(fields[j].getInt(stockObject)); } else if (fields[j].getType().equals(Double.TYPE)) { dos[j].writeDouble(fields[j].getDouble(stockObject)); } else if (fields[j].getType().equals(Long.TYPE)) { dos[j].writeLong(fields[j].getLong(stockObject)); } else { continue; //throw new RuntimeException("Field type is not supported yet"); } } } i_start = i_end; } index_dos.close(); for (int i = 0; i < dos.length; i++) { dos[i].close(); } /** A struct to store information about a split */ class SplitStruct extends Prism { /** Start and end index for this split */ int index1, index2; /** Direction of this split */ byte direction; /** Index of first element on disk */ int offsetOfFirstElement; static final byte DIRECTION_T = 0; static final byte DIRECTION_X = 1; static final byte DIRECTION_Y = 2; SplitStruct(int index1, int index2, byte direction) { this.index1 = index1; this.index2 = index2; this.direction = direction; } @Override public void write(DataOutput out) throws IOException { // if (columnarStorage) out.writeInt(index1); else out.writeInt(offsetOfFirstElement); super.write(out); } void partition(Queue<SplitStruct> toBePartitioned) { IndexedSortable sortableT; IndexedSortable sortableX; IndexedSortable sortableY; if (fast_sort) { // Use materialized xs[] and ys[] to do the comparisons sortableT = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap ts double tempt = ts[i]; ts[i] = ts[j]; ts[j] = tempt; // Swap xs double tempx = xs[i]; xs[i] = xs[j]; xs[j] = tempx; // Swap ys double tempY = ys[i]; ys[i] = ys[j]; ys[j] = tempY; // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { if (ts[i] < ts[j]) return -1; if (ts[i] > ts[j]) return 1; return 0; } }; sortableX = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap ts double tempt = ts[i]; ts[i] = ts[j]; ts[j] = tempt; // Swap xs double tempx = xs[i]; xs[i] = xs[j]; xs[j] = tempx; // Swap ys double tempY = ys[i]; ys[i] = ys[j]; ys[j] = tempY; // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { if (ts[i] < ts[j]) return -1; if (xs[i] < xs[j]) return -1; if (xs[i] > xs[j]) return 1; return 0; } }; sortableY = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap ts double tempt = ts[i]; ts[i] = ts[j]; ts[j] = tempt; // Swap xs double tempx = xs[i]; xs[i] = xs[j]; xs[j] = tempx; // Swap ys double tempY = ys[i]; ys[i] = ys[j]; ys[j] = tempY; // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { if (ys[i] < ys[j]) return -1; if (ys[i] > ys[j]) return 1; return 0; } }; } else { // No materialized xs and ys. Always deserialize objects // to compare sortableT = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { // Get end of line int eol = skipToEOL(element_bytes, offsets[i]); line.set(element_bytes, offsets[i], eol - offsets[i] - 1); stockObject.fromText(line); double ti = (stockObject.getMBR().t1 + stockObject.getMBR().t2) / 2; eol = skipToEOL(element_bytes, offsets[j]); line.set(element_bytes, offsets[j], eol - offsets[j] - 1); stockObject.fromText(line); double tj = (stockObject.getMBR().t1 + stockObject.getMBR().t2) / 2; if (ti < tj) return -1; if (ti > tj) return 1; return 0; } }; sortableX = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { // Get end of line int eol = skipToEOL(element_bytes, offsets[i]); line.set(element_bytes, offsets[i], eol - offsets[i] - 1); stockObject.fromText(line); double xi = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2; eol = skipToEOL(element_bytes, offsets[j]); line.set(element_bytes, offsets[j], eol - offsets[j] - 1); stockObject.fromText(line); double xj = (stockObject.getMBR().x1 + stockObject.getMBR().x2) / 2; if (xi < xj) return -1; if (xi > xj) return 1; return 0; } }; sortableY = new IndexedSortable() { @Override public void swap(int i, int j) { // Swap id int tempid = offsets[i]; offsets[i] = offsets[j]; offsets[j] = tempid; tempid = ids[i]; ids[i] = ids[j]; ids[j] = tempid; } @Override public int compare(int i, int j) { int eol = skipToEOL(element_bytes, offsets[i]); line.set(element_bytes, offsets[i], eol - offsets[i] - 1); stockObject.fromText(line); double yi = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2; eol = skipToEOL(element_bytes, offsets[j]); line.set(element_bytes, offsets[j], eol - offsets[j] - 1); stockObject.fromText(line); double yj = (stockObject.getMBR().y1 + stockObject.getMBR().y2) / 2; if (yi < yj) return -1; if (yi > yj) return 1; return 0; } }; } final IndexedSorter sorter = new QuickSort(); final IndexedSortable[] sortables = new IndexedSortable[3]; sortables[SplitStruct.DIRECTION_T] = sortableT; sortables[SplitStruct.DIRECTION_X] = sortableX; sortables[SplitStruct.DIRECTION_Y] = sortableY; sorter.sort(sortables[direction], index1, index2); // Partition into maxEntries partitions (equally) and // create a SplitStruct for each partition int i1 = index1; for (int iSplit = 0; iSplit < degree; iSplit++) { int i2 = index1 + (index2 - index1) * (iSplit + 1) / degree; SplitStruct newSplit; if (direction == 0) { newSplit = new SplitStruct(i1, i2, (byte) 1); } else if (direction == 1) { newSplit = new SplitStruct(i1, i2, (byte) 2); } else { newSplit = new SplitStruct(i1, i2, (byte) 0); } toBePartitioned.add(newSplit); i1 = i2; } } } // All nodes stored in level-order traversal Vector<SplitStruct> nodes = new Vector<SplitStruct>(); final Queue<SplitStruct> toBePartitioned = new LinkedList<SplitStruct>(); toBePartitioned.add(new SplitStruct(0, elementCount, SplitStruct.DIRECTION_X)); while (!toBePartitioned.isEmpty()) { SplitStruct split = toBePartitioned.poll(); if (nodes.size() < nonLeafNodeCount) { // This is a non-leaf split.partition(toBePartitioned); } nodes.add(split); } if (nodes.size() != nodeCount) { throw new RuntimeException( "Expected node count: " + nodeCount + ". Real node count: " + nodes.size()); } // Now we have our data sorted in the required order. Start building // the tree. // Store the offset of each leaf node in the tree FSDataOutputStream fakeOut = new FSDataOutputStream(new java.io.OutputStream() { // Null output stream @Override public void write(int b) throws IOException { // Do nothing } @Override public void write(byte[] b, int off, int len) throws IOException { // Do nothing } @Override public void write(byte[] b) throws IOException { // Do nothing } }, null, TreeHeaderSize + nodes.size() * NodeSize); for (int i_leaf = nonLeafNodeCount, i = 0; i_leaf < nodes.size(); i_leaf++) { nodes.elementAt(i_leaf).offsetOfFirstElement = (int) fakeOut.getPos(); if (i != nodes.elementAt(i_leaf).index1) throw new RuntimeException(); double t1, x1, y1, t2, x2, y2; // Initialize MBR to first object int eol = skipToEOL(element_bytes, offsets[i]); fakeOut.write(element_bytes, offsets[i], eol - offsets[i]); line.set(element_bytes, offsets[i], eol - offsets[i] - 1); stockObject.fromText(line); Prism mbr = stockObject.getMBR(); t1 = mbr.t1; x1 = mbr.x1; y1 = mbr.y1; t2 = mbr.t2; x2 = mbr.x2; y2 = mbr.y2; i++; while (i < nodes.elementAt(i_leaf).index2) { eol = skipToEOL(element_bytes, offsets[i]); fakeOut.write(element_bytes, offsets[i], eol - offsets[i]); line.set(element_bytes, offsets[i], eol - offsets[i] - 1); stockObject.fromText(line); mbr = stockObject.getMBR(); if (mbr.t1 < t1) t1 = mbr.t1; if (mbr.x1 < x1) x1 = mbr.x1; if (mbr.y1 < y1) y1 = mbr.y1; if (mbr.t2 > t2) t2 = mbr.t2; if (mbr.x2 > x2) x2 = mbr.x2; if (mbr.y2 > y2) y2 = mbr.y2; i++; } nodes.elementAt(i_leaf).set(t1, x1, y1, t2, x2, y2); } fakeOut.close(); fakeOut = null; // Calculate MBR and offsetOfFirstElement for non-leaves for (int i_node = nonLeafNodeCount - 1; i_node >= 0; i_node--) { int i_first_child = i_node * degree + 1; nodes.elementAt(i_node).offsetOfFirstElement = nodes.elementAt(i_first_child).offsetOfFirstElement; int i_child = 0; Prism mbr; mbr = nodes.elementAt(i_first_child + i_child); double t1 = mbr.t1; double x1 = mbr.x1; double y1 = mbr.y1; double t2 = mbr.t2; double x2 = mbr.x2; double y2 = mbr.y2; i_child++; while (i_child < degree) { mbr = nodes.elementAt(i_first_child + i_child); if (mbr.t1 < t1) t1 = mbr.t1; if (mbr.x1 < x1) x1 = mbr.x1; if (mbr.y1 < y1) y1 = mbr.y1; if (mbr.t2 > t2) t2 = mbr.t2; if (mbr.x2 > x2) x2 = mbr.x2; if (mbr.y2 > y2) y2 = mbr.y2; i_child++; } nodes.elementAt(i_node).set(t1, x1, y1, t2, x2, y2); } // Start writing the tree // write tree header (including size) // Total tree size. (== Total bytes written - 8 bytes for the size // itself) dataOut.writeInt(TreeHeaderSize + NodeSize * nodeCount + len); // Tree height dataOut.writeInt(height); // Degree dataOut.writeInt(degree); dataOut.writeInt(elementCount); //isColumnar dataOut.writeInt(columnarStorage ? 1 : 0); // write nodes for (SplitStruct node : nodes) { node.write(dataOut); } // write elements if (columnarStorage) { byte[] index_bs = index_bos.toByteArray(); byte[][] bss = new byte[bos.length][]; for (int i = 0; i < bss.length; i++) { bss[i] = bos[i].toByteArray(); } for (int element_i = 0; element_i < elementCount; element_i++) { //int eol = skipToEOL(element_bytes, offsets[element_i]); //dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]); dataOut.write(index_bs, ids[element_i] * IndexUnitSize, IndexUnitSize); } for (int i = 0; i < fields.length; i++) { int fieldSize = 0; if (fields[i].getType().equals(Integer.TYPE)) { fieldSize = 4; } else if (fields[i].getType().equals(Long.TYPE)) { fieldSize = 8; } else if (fields[i].getType().equals(Double.TYPE)) { fieldSize = 8; } else { //throw new RuntimeException("Unsupported field type: " + fields[i].getType().getName()); continue; } for (int element_i = 0; element_i < elementCount; element_i++) { //int eol = skipToEOL(element_bytes, offsets[element_i]); //dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]); dataOut.write(bss[i], ids[element_i] * fieldSize, fieldSize); } } } else { for (int element_i = 0; element_i < elementCount; element_i++) { int eol = skipToEOL(element_bytes, offsets[element_i]); dataOut.write(element_bytes, offsets[element_i], eol - offsets[element_i]); } } } catch (IOException e) { e.printStackTrace(); } catch (IllegalArgumentException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IllegalAccessException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:gov.nih.nci.evs.browser.utils.DataUtils.java
public static HashMap getPropertyValuesForCodes(String scheme, String version, Vector codes, String propertyName) {/*from w ww . j a v a2s . co m*/ try { LexBIGService lbSvc = new RemoteServerUtil().createLexBIGService(); if (lbSvc == null) { _logger.warn("lbSvc = null"); return null; } CodingSchemeVersionOrTag versionOrTag = new CodingSchemeVersionOrTag(); if (version != null) { versionOrTag.setVersion(version); } ConceptReferenceList crefs = new ConceptReferenceList(); for (int i = 0; i < codes.size(); i++) { String code = (String) codes.elementAt(i); ConceptReference cr = new ConceptReference(); cr.setCodingSchemeName(scheme); cr.setConceptCode(code); crefs.addConceptReference(cr); } CodedNodeSet cns = null; try { cns = lbSvc.getCodingSchemeConcepts(scheme, versionOrTag); cns = cns.restrictToCodes(crefs); } catch (Exception e1) { e1.printStackTrace(); } if (cns == null) return null; try { LocalNameList propertyNames = new LocalNameList(); propertyNames.addEntry(propertyName); CodedNodeSet.PropertyType[] propertyTypes = null; //long ms = System.currentTimeMillis(), delay = 0; SortOptionList sortOptions = null; LocalNameList filterOptions = null; boolean resolveObjects = true; // needs to be set to true int maxToReturn = codes.size(); ResolvedConceptReferenceList rcrl = cns.resolveToList(sortOptions, filterOptions, propertyNames, propertyTypes, resolveObjects, maxToReturn); // _logger.debug("resolveToList done"); HashMap hmap = new HashMap(); if (rcrl == null) { _logger.warn("Concept not found."); return null; } if (rcrl.getResolvedConceptReferenceCount() > 0) { for (int i = 0; i < rcrl.getResolvedConceptReferenceCount(); i++) { ResolvedConceptReference rcr = rcrl.getResolvedConceptReference(i); Entity c = rcr.getReferencedEntry(); if (c == null) { _logger.warn("Concept is null."); } else { // _logger.debug(c.getEntityDescription().getContent()); Property[] properties = c.getProperty(); //String values = ""; StringBuffer buf = new StringBuffer(); for (int j = 0; j < properties.length; j++) { Property prop = properties[j]; //values = values + prop.getValue().getContent(); buf.append(prop.getValue().getContent()); if (j < properties.length - 1) { //values = values + "; "; buf.append("; "); } } String values = buf.toString(); hmap.put(rcr.getCode(), values); } } } return hmap; } catch (Exception e) { _logger.error("Method: SearchUtil.searchByProperties"); _logger.error("* ERROR: cns.resolve throws exceptions."); _logger.error("* " + e.getClass().getSimpleName() + ": " + e.getMessage()); } } catch (Exception ex) { ex.printStackTrace(); } return null; }
From source file:escada.tpc.common.clients.jmx.ClientEmulationStartup.java
private void startClientEmulation(boolean exit) { ClientEmulation e = null;/*w w w .j a va2s .c o m*/ Vector<ClientEmulation> ebs = new Vector<ClientEmulation>(); DatabaseManager dbManager = null; try { logger.info("Starting up the client application."); logger.info("Remote Emulator for Database Benchmark ..."); logger.info("Universidade do Minho (Grupo de Sistemas Distribuidos)"); logger.info("Version 0.1"); Class cl = null; Constructor co = null; cl = Class.forName(this.workloadResources.getDbClass()); try { co = cl.getConstructor(new Class[] { Integer.TYPE }); } catch (Exception ex) { } if (co == null) { dbManager = (DatabaseManager) cl.newInstance(); } else { dbManager = (DatabaseManager) co .newInstance(new Object[] { new Integer(this.workloadResources.getClients()) }); } SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-DD_HH_mm"); String date = sdf.format(new Date()); PerformanceLogger.setPrintWriter("TPCC-" + date + "-" + this.workloadResources.getPrefix() + "-time-" + this.workloadResources.getMeasurementTime() + "-clients-" + this.workloadResources.getClients() + "-frag-" + this.workloadResources.getFrag() + "-think-" + this.workloadResources.isThinkTime() + ".dat"); PerformanceCounters.getReference();//Initialize instance dbManager.setConnectionPool(this.workloadResources.isConnectionPoolEnabled()); dbManager.setMaxConnection(this.workloadResources.getPoolSize()); dbManager.setDriverName(this.databaseResources.getDriver()); dbManager.setjdbcPath(this.databaseResources.getConnectionString()); dbManager.setUserInfo(this.databaseResources.getUserName(), this.databaseResources.getPassword()); for (int i = 0; i < this.workloadResources.getClients(); i++) { e = new ClientEmulation(); e.setFinished(false); e.setTraceInformation(this.workloadResources.getTrace()); e.setNumberConcurrentEmulators(this.workloadResources.getClients()); e.setStatusThinkTime(this.workloadResources.isThinkTime()); e.setStatusReSubmit(this.workloadResources.isResubmit()); e.setDatabase(dbManager); e.setEmulationName(this.workloadResources.getPrefix()); e.setHostId(Integer.toString(this.workloadResources.getHostId())); e.create(this.workloadResources.getEbClass(), this.workloadResources.getStClass(), i, this.workloadResources.getFrag(), this, this.workloadResources.getPrefix()); Thread t = new Thread(e); t.setName(this.workloadResources.getPrefix() + "-" + i); e.setThread(t); t.start(); ebs.add(e); } synchronized (this) { server.setClientEmulations(this.workloadResources.getPrefix(), ebs); server.attachClientToServer(this.workloadResources.getPrefix(), this.databaseResources.getConnectionString()); } logger.info("Running simulation for " + this.workloadResources.getMeasurementTime() + " minute(s)."); waitForRampDown(this.workloadResources.getPrefix(), 0, this.workloadResources.getMeasurementTime()); for (int i = 0; i < this.workloadResources.getClients(); i++) { e = (ClientEmulation) ebs.elementAt(i); //logger.info("Waiting for the eb " + i + " to finish its job.."); //try { e.setCompletion(true); //e.getThread().join(); /*} catch (InterruptedException inte) { inte.printStackTrace(); continue; }*/ } for (int i = 0; i < this.workloadResources.getClients(); i++) { e = (ClientEmulation) ebs.elementAt(i); logger.info("Waiting for the eb " + i + " to finish its job.."); try { // e.setCompletion(true); e.getThread().join(); } catch (InterruptedException inte) { inte.printStackTrace(); continue; } } logger.info("EBs finished."); PerformanceLogger.info("-------------------- SUMMARY ---------------------------"); PerformanceLogger.info("Abort rate:" + PerformanceCounters.getReference().getTotalAbortRate()); PerformanceLogger.info("Average latency:" + PerformanceCounters.getReference().getAverageLatency()); PerformanceLogger .info("Measured tpmC:" + PerformanceCounters.getReference().getTotalNewOrderCommitRate()); PerformanceLogger.close(); } catch (Exception ex) { logger.info("Error while creating clients: ", ex); } finally { synchronized (this) { this.server.removeClientEmulations(this.workloadResources.getPrefix()); this.server.removeClientStage(this.workloadResources.getPrefix()); this.server.detachClientToServer(this.workloadResources.getPrefix(), this.databaseResources.getConnectionString()); notifyAll(); } try { dbManager.releaseConnections(); } catch (SQLException e1) { e1.printStackTrace(); } logger.info("Ebs finished their jobs.."); if (exit) System.exit(0); } }
From source file:gov.nih.nci.evs.browser.utils.SearchUtils.java
private CodedNodeSet union(Vector<CodedNodeSet> cns_vec) { if (cns_vec == null) return null; if (cns_vec.size() == 0) return null; CodedNodeSet cns = cns_vec.elementAt(0); if (cns_vec.size() == 1) return cns; for (int i = 1; i < cns_vec.size(); i++) { CodedNodeSet next_cns = cns_vec.elementAt(i); try {/* www . ja va 2 s .c o m*/ cns = cns.union(next_cns); } catch (Exception ex) { _logger.error("WARNING: cns.union throws exception."); } } return cns; }
From source file:gov.nih.nci.evs.browser.utils.SearchUtils.java
public Vector getSuperconcepts(String scheme, String version, String code) { // String assocName = "hasSubtype"; String hierarchicalAssoName = "hasSubtype"; Vector hierarchicalAssoName_vec = getHierarchyAssociationId(scheme, version); if (hierarchicalAssoName_vec != null && hierarchicalAssoName_vec.size() > 0) { hierarchicalAssoName = (String) hierarchicalAssoName_vec.elementAt(0); }//from ww w . ja v a2 s . c o m return getAssociationSources(scheme, version, code, hierarchicalAssoName); }
From source file:gov.nih.nci.evs.browser.utils.SearchUtils.java
public Vector getSubconcepts(String scheme, String version, String code) { // String assocName = "hasSubtype"; String hierarchicalAssoName = "hasSubtype"; Vector hierarchicalAssoName_vec = getHierarchyAssociationId(scheme, version); if (hierarchicalAssoName_vec != null && hierarchicalAssoName_vec.size() > 0) { hierarchicalAssoName = (String) hierarchicalAssoName_vec.elementAt(0); }/*from w w w .ja va 2 s. com*/ return getAssociationTargets(scheme, version, code, hierarchicalAssoName); }
From source file:gov.nih.nci.evs.browser.utils.SearchUtils.java
public Vector getParentCodes(String scheme, String version, String code) { // SearchUtils util = new SearchUtils(); Vector hierarchicalAssoName_vec = getHierarchyAssociationId(scheme, version); if (hierarchicalAssoName_vec == null || hierarchicalAssoName_vec.size() == 0) { return null; }//w w w . jav a 2 s . co m String hierarchicalAssoName = (String) hierarchicalAssoName_vec.elementAt(0); Vector superconcept_vec = getAssociationSources(scheme, version, code, hierarchicalAssoName); //if (superconcept_vec == null) // return null; return superconcept_vec; }
From source file:com.globalsight.everest.workflowmanager.WorkflowManagerLocal.java
private void deleteTasks(Session p_session, Vector p_workflowTaskInstances, Workflow p_workflow, List p_wfTaskInfos) throws Exception { int size = p_workflowTaskInstances.size(); for (int i = 0; i < size; i++) { WorkflowTaskInstance inst = (WorkflowTaskInstance) p_workflowTaskInstances.elementAt(i); // Get a task from Toplink Task task = ServerProxy.getTaskManager().getTask(inst.getTaskId()); if (s_logger.isDebugEnabled()) { s_logger.debug("deleteTasks : " + " workflow=" + p_workflow.getId() + " WorkflowTaskInstance=" + WorkflowHelper.toDebugString(inst) + GlobalSightCategory.getLineContinuation() + " task=" + task.toString());/* ww w.j av a2 s .c o m*/ } // since we can also modify a workflow in Ready state, we need // to check before deletion (in Ready state Task has not been // created yet). if (task != null) { p_session.delete(task); p_workflow.removeTask(task); } } }
From source file:nzilbb.csv.CsvDeserializer.java
/** * Loads the serialized form of the graph, using the given set of named streams. * @param streams A list of named streams that contain all the * transcription/annotation data required, and possibly (a) stream(s) for the media annotated. * @param schema The layer schema, definining layers and the way they interrelate. * @return A list of parameters that require setting before {@link IDeserializer#deserialize()} * can be invoked. This may be an empty list, and may include parameters with the value already * set to a workable default. If there are parameters, and user interaction is possible, then * the user may be presented with an interface for setting/confirming these parameters, before * they are then passed to {@link IDeserializer#setParameters(ParameterSet)}. * @throws SerializationException If the graph could not be loaded. * @throws IOException On IO error./*from w ww . j a va 2 s .c om*/ */ @SuppressWarnings({ "rawtypes", "unchecked" }) public ParameterSet load(NamedStream[] streams, Schema schema) throws SerializationException, IOException { // take the first stream, ignore all others. NamedStream csv = Utility.FindSingleStream(streams, ".csv", "text/csv"); if (csv == null) throw new SerializationException("No CSV stream found"); setName(csv.getName()); setSchema(schema); // create a list of layers we need and possible matching layer names LinkedHashMap<Parameter, List<String>> layerToPossibilities = new LinkedHashMap<Parameter, List<String>>(); HashMap<String, LinkedHashMap<String, Layer>> layerToCandidates = new HashMap<String, LinkedHashMap<String, Layer>>(); LinkedHashMap<String, Layer> metadataLayers = new LinkedHashMap<String, Layer>(); for (Layer layer : schema.getRoot().getChildren().values()) { if (layer.getAlignment() == Constants.ALIGNMENT_NONE) { metadataLayers.put(layer.getId(), layer); } } // next turn child layer // look for person attributes for (Layer layer : schema.getParticipantLayer().getChildren().values()) { if (layer.getAlignment() == Constants.ALIGNMENT_NONE) { metadataLayers.put(layer.getId(), layer); } } // next turn child layer LinkedHashMap<String, Layer> utteranceAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers); utteranceAndMetadataLayers.put(getUtteranceLayer().getId(), getUtteranceLayer()); LinkedHashMap<String, Layer> whoAndMetadataLayers = new LinkedHashMap<String, Layer>(metadataLayers); whoAndMetadataLayers.put(getParticipantLayer().getId(), getParticipantLayer()); // read the header line setParser(CSVParser.parse(csv.getStream(), java.nio.charset.Charset.forName("UTF-8"), CSVFormat.EXCEL.withHeader())); setHeaderMap(parser.getHeaderMap()); Vector<String> possibleIDHeaders = new Vector<String>(); Vector<String> possibleUtteranceHeaders = new Vector<String>(); Vector<String> possibleParticipantHeaders = new Vector<String>(); for (String header : getHeaderMap().keySet()) { if (header.trim().length() == 0) continue; Vector<String> possibleMatches = new Vector<String>(); possibleMatches.add("transcript" + header); possibleMatches.add("participant" + header); possibleMatches.add("speaker" + header); possibleMatches.add(header); // special cases if (header.equalsIgnoreCase("id") || header.equalsIgnoreCase("transcript")) { possibleIDHeaders.add(header); } else if (header.equalsIgnoreCase("text") || header.equalsIgnoreCase("document")) { possibleUtteranceHeaders.add(header); } else if (header.equalsIgnoreCase("name") || header.equalsIgnoreCase("participant") || header.equalsIgnoreCase("participantid")) { possibleParticipantHeaders.add(header); } layerToPossibilities.put(new Parameter("header_" + getHeaderMap().get(header), Layer.class, header), possibleMatches); layerToCandidates.put("header_" + getHeaderMap().get(header), metadataLayers); } // next header ParameterSet parameters = new ParameterSet(); // add utterance/participant parameters int defaultUtterancePossibilityIndex = 0; // if there are no obvious participant column possibilities... Parameter idColumn = new Parameter("id", String.class, "ID Column", "Column containing the ID of the text.", false); if (possibleIDHeaders.size() == 0) { // ...include all columns possibleIDHeaders.addAll(getHeaderMap().keySet()); } else { idColumn.setValue(possibleIDHeaders.firstElement()); } idColumn.setPossibleValues(possibleIDHeaders); parameters.addParameter(idColumn); // if there are no obvious participant column possibilities... if (possibleParticipantHeaders.size() == 0) { // ...include all columns possibleParticipantHeaders.addAll(getHeaderMap().keySet()); // default participant column will be the first column, // so default utterance should be the second (if we didn't find obvious possible text column) if (possibleParticipantHeaders.size() > 1) // but only if there's more than one column { defaultUtterancePossibilityIndex = 1; } } Parameter participantColumn = new Parameter("who", "Participant Column", "Column containing the ID of the author of the text.", true, possibleParticipantHeaders.firstElement()); participantColumn.setPossibleValues(possibleParticipantHeaders); parameters.addParameter(participantColumn); // if there are no obvious text column possibilities... if (possibleUtteranceHeaders.size() == 0) { // ...include all columns possibleUtteranceHeaders.addAll(getHeaderMap().keySet()); } else { // we found a possible text column, so run with it regardless of whether we also found // a possible participant column defaultUtterancePossibilityIndex = 0; } Parameter utteranceColumn = new Parameter("text", "Text Column", "Column containing the transcript text.", true, possibleUtteranceHeaders.elementAt(defaultUtterancePossibilityIndex)); utteranceColumn.setPossibleValues(possibleUtteranceHeaders); parameters.addParameter(utteranceColumn); // add column-mapping parameters, and set possibile/default values for (Parameter p : layerToPossibilities.keySet()) { List<String> possibleNames = layerToPossibilities.get(p); LinkedHashMap<String, Layer> candidateLayers = layerToCandidates.get(p.getName()); parameters.addParameter(p); if (p.getValue() == null && candidateLayers != null && possibleNames != null) { p.setValue(Utility.FindLayerById(candidateLayers, possibleNames)); } if (p.getPossibleValues() == null && candidateLayers != null) { p.setPossibleValues(candidateLayers.values()); } } return parameters; }