List of usage examples for java.util Vector toArray
@SuppressWarnings("unchecked") public synchronized <T> T[] toArray(T[] a)
From source file:org.apache.hadoop.mapred.JobTracker.java
public synchronized TaskReport[] getReduceTaskReports(JobID jobid) throws IOException { JobInProgress job = jobs.get(jobid); if (job != null) { // Check authorization aclsManager.checkAccess(job, UserGroupInformation.getCurrentUser(), Operation.VIEW_JOB_DETAILS); }/* www .j a v a 2 s . c o m*/ if (job == null || !isJobInited(job)) { return EMPTY_TASK_REPORTS; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector completeReduceTasks = job.reportTasksInProgress(false, true); for (Iterator it = completeReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector incompleteReduceTasks = job.reportTasksInProgress(false, false); for (Iterator it = incompleteReduceTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } }
From source file:org.apache.hadoop.mapred.JobTracker.java
public synchronized TaskReport[] getMapTaskReports(JobID jobid) throws IOException { JobInProgress job = jobs.get(jobid); if (job != null) { // Check authorization aclsManager.checkAccess(job, UserGroupInformation.getCurrentUser(), Operation.VIEW_JOB_DETAILS); }//from w ww .j a v a 2s . c om if (job == null || !isJobInited(job)) { return EMPTY_TASK_REPORTS; } else { Vector<TaskReport> reports = new Vector<TaskReport>(); Vector<TaskInProgress> completeMapTasks = job.reportTasksInProgress(true, true); for (Iterator it = completeMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } Vector<TaskInProgress> incompleteMapTasks = job.reportTasksInProgress(true, false); for (Iterator it = incompleteMapTasks.iterator(); it.hasNext();) { TaskInProgress tip = (TaskInProgress) it.next(); reports.add(tip.generateSingleReport()); } return reports.toArray(new TaskReport[reports.size()]); } }
From source file:uk.ac.babraham.SeqMonk.Filters.BinomialFilterForRev.java
protected void generateProbeList() { boolean aboveOnly = false; boolean belowOnly = false; if (options.directionBox.getSelectedItem().equals("Above")) aboveOnly = true;//w w w . ja va 2 s . co m else if (options.directionBox.getSelectedItem().equals("Below")) belowOnly = true; if (options.stringencyField.getText().length() == 0) { stringency = 0.05; } else { stringency = Double.parseDouble(options.stringencyField.getText()); } if (options.minObservationsField.getText().length() == 0) { minObservations = 10; } else { minObservations = Integer.parseInt(options.minObservationsField.getText()); } if (options.minDifferenceField.getText().length() == 0) { minPercentShift = 10; } else { minPercentShift = Integer.parseInt(options.minDifferenceField.getText()); } applyMultipleTestingCorrection = options.multiTestBox.isSelected(); ProbeList newList; if (applyMultipleTestingCorrection) { newList = new ProbeList(startingList, "Filtered Probes", "", "Q-value"); } else { newList = new ProbeList(startingList, "Filtered Probes", "", "P-value"); } Probe[] probes = startingList.getAllProbes(); // We need to create a set of mean end methylation values for all starting values // We found to the nearest percent so we'll end up with a set of 101 values (0-100) // which are the expected end points double[] expectedEnds = calculateEnds(probes); if (expectedEnds == null) return; // They cancelled whilst calculating. for (int i = 0; i < expectedEnds.length; i++) { System.err.println("" + i + "\t" + expectedEnds[i]); } // This is where we'll store any hits Vector<ProbeTTestValue> hits = new Vector<ProbeTTestValue>(); BinomialTest bt = new BinomialTest(); AlternativeHypothesis hypothesis = AlternativeHypothesis.TWO_SIDED; if (aboveOnly) hypothesis = AlternativeHypothesis.GREATER_THAN; if (belowOnly) hypothesis = AlternativeHypothesis.LESS_THAN; for (int p = 0; p < probes.length; p++) { if (p % 100 == 0) { progressUpdated("Processed " + p + " probes", p, probes.length); } if (cancel) { cancel = false; progressCancelled(); return; } long[] reads = fromStore.getReadsForProbe(probes[p]); int forCount = 0; int revCount = 0; for (int r = 0; r < reads.length; r++) { if (SequenceRead.strand(reads[r]) == Location.FORWARD) { ++forCount; } else if (SequenceRead.strand(reads[r]) == Location.REVERSE) { ++revCount; } } if (forCount + revCount < minObservations) continue; int fromPercent = Math.round((forCount * 100f) / (forCount + revCount)); // We need to calculate the confidence range for the from reads and work // out the most pessimistic value we could take as a starting value WilsonScoreInterval wi = new WilsonScoreInterval(); ConfidenceInterval ci = wi.createInterval(forCount + revCount, forCount, 1 - stringency); // System.err.println("From percent="+fromPercent+" meth="+forCount+" unmeth="+revCount+" sig="+stringency+" ci="+ci.getLowerBound()*100+" - "+ci.getUpperBound()*100); reads = toStore.getReadsForProbe(probes[p]); forCount = 0; revCount = 0; for (int r = 0; r < reads.length; r++) { if (SequenceRead.strand(reads[r]) == Location.FORWARD) { ++forCount; } else if (SequenceRead.strand(reads[r]) == Location.REVERSE) { ++revCount; } } if (forCount + revCount < minObservations) continue; float toPercent = (forCount * 100f) / (forCount + revCount); // System.err.println("Observed toPercent is "+toPercent+ "from meth="+forCount+" unmeth="+revCount+" and true predicted is "+expectedEnds[Math.round(toPercent)]); // Find the most pessimistic fromPercent such that the expected toPercent is as close // to the observed value based on the confidence interval we calculated before. double worseCaseExpectedPercent = 0; double smallestTheoreticalToActualDiff = 100; // Just taking the abs diff can still leave us with a closest value which is still // quite far from where we are. We therefore also check if our confidence interval // gives us a potential value range which spans the actual value, and if it does we // fail it without even running the test. boolean seenLower = false; boolean seenHigher = false; for (int m = Math.max((int) Math.floor(ci.getLowerBound() * 100), 0); m <= Math .min((int) Math.ceil(ci.getUpperBound() * 100), 100); m++) { double expectedPercent = expectedEnds[m]; double diff = expectedPercent - toPercent; if (diff <= 0) seenLower = true; if (diff >= 0) seenHigher = true; if (Math.abs(diff) < smallestTheoreticalToActualDiff) { worseCaseExpectedPercent = expectedPercent; smallestTheoreticalToActualDiff = Math.abs(diff); } } // System.err.println("Worst case percent is "+worseCaseExpectedPercent+" with diff of "+smallestTheoreticalToActualDiff+" to "+toPercent); // Sanity check if (smallestTheoreticalToActualDiff > Math.abs((toPercent - expectedEnds[Math.round(fromPercent)]))) { throw new IllegalStateException("Can't have a worst case which is better than the actual"); } if (Math.abs(toPercent - worseCaseExpectedPercent) < minPercentShift) continue; // Check the directionality if (aboveOnly && worseCaseExpectedPercent - toPercent > 0) continue; if (belowOnly && worseCaseExpectedPercent - toPercent < 0) continue; // Now perform the Binomial test. double pValue = bt.binomialTest(forCount + revCount, forCount, worseCaseExpectedPercent / 100d, hypothesis); if (seenLower && seenHigher) pValue = 0.5; // Our confidence range spanned the actual value we had so we can't be significant // System.err.println("P value is "+pValue); // Store this as a potential hit (after correcting p-values later) hits.add(new ProbeTTestValue(probes[p], pValue)); } // Now we can correct the p-values if we need to ProbeTTestValue[] rawHits = hits.toArray(new ProbeTTestValue[0]); if (applyMultipleTestingCorrection) { // System.err.println("Correcting for "+rawHits.length+" tests"); BenjHochFDR.calculateQValues(rawHits); } for (int h = 0; h < rawHits.length; h++) { if (applyMultipleTestingCorrection) { if (rawHits[h].q < stringency) { newList.addProbe(rawHits[h].probe, (float) rawHits[h].q); } } else { if (rawHits[h].p < stringency) { newList.addProbe(rawHits[h].probe, (float) rawHits[h].p); } } } filterFinished(newList); }
From source file:org.kchine.r.server.DirectJNI.java
private String[] listExportableSymbols() { Rengine e = _rEngine;// w w w . jav a2 s .com long resultId = e.rniEval(e.rniParse("ls()", 1), 0); String[] slist = _rEngine.rniGetStringArray(resultId); Vector<String> result = new Vector<String>(); for (int i = 0; i < slist.length; ++i) { if (isExportable(slist[i]) && !slist[i].equals(PENV)) result.add(slist[i]); } return (String[]) result.toArray(new String[0]); }
From source file:com.truledger.client.Client.java
/** * Return storage fees for all assetids//from w w w. j ava 2 s . c o m * @return * @throws ClientException */ public BalanceAndFraction[] getStorageFees() throws ClientException { this.requireCurrentServer(); this.initServerAccts(); String key = this.userStorageFeeKey(); String[] assetids = db.getAccountDB().contents(key); Vector<BalanceAndFraction> res = new Vector<BalanceAndFraction>(); for (String assetid : assetids) { BalanceAndFraction baf = this.getStorageFee(assetid); if (baf != null) res.add(baf); } return res.toArray(new BalanceAndFraction[res.size()]); }
From source file:com.truledger.client.Client.java
/** * Return an array of arrays of Balance instances. * Each array of Balance instances is for one acct, in acct order, sorted by asset name. * @param assetids/*from ww w . j a v a2s . c om*/ * @param accts null means [T.MAIN], zero-length means all accts * @param rawmap * @return * @throws ClientException */ public Balance[][] getBalances(String[] assetids, String[] accts, BalanceMap rawmap) throws ClientException { ClientDB.AccountDB accountDB = db.getAccountDB(); String key = this.userBalanceKey(); if (accts == null) accts = new String[] { T.MAIN }; if (accts.length == 0) accts = accountDB.contents(key); Vector<Balance[]> resv = new Vector<Balance[]>(accts.length); for (String acct : accts) { String[] ids = assetids; if (ids == null) ids = accountDB.contents(key, acct); if (ids == null) continue; Vector<Balance> balsv = new Vector<Balance>(ids.length); for (String id : ids) { Balance bal = this.getBalanceInternal(id, acct, rawmap); if (bal != null) balsv.add(bal); } if (balsv.size() > 0) { Balance[] bals = balsv.toArray(new Balance[balsv.size()]); Arrays.sort(bals, balanceComparator); resv.add(bals); } } if (resv.size() == 0) return null; return resv.toArray(new Balance[resv.size()][]); }
From source file:nzilbb.csv.CsvDeserializer.java
/** * Deserializes the serialized data, generating one or more {@link Graph}s. * <p>Many data formats will only yield one graph (e.g. Transcriber * transcript or Praat textgrid), however there are formats that * are capable of storing multiple transcripts in the same file * (e.g. AGTK, Transana XML export), which is why this method * returns a list.// www . j a va 2s . c o m * <p>This deserializer generates one graph per data row in the CSV file. * @return A list of valid (if incomplete) {@link Graph}s. * @throws SerializerNotConfiguredException if the object has not been configured. * @throws SerializationParametersMissingException if the parameters for this particular graph have not been set. * @throws SerializationException if errors occur during deserialization. */ public Graph[] deserialize() throws SerializerNotConfiguredException, SerializationParametersMissingException, SerializationException { if (participantLayer == null) throw new SerializerNotConfiguredException("Participant layer not set"); if (turnLayer == null) throw new SerializerNotConfiguredException("Turn layer not set"); if (utteranceLayer == null) throw new SerializerNotConfiguredException("Utterance layer not set"); if (wordLayer == null) throw new SerializerNotConfiguredException("Word layer not set"); if (schema == null) throw new SerializerNotConfiguredException("Layer schema not set"); validate(); String participantColumn = (String) parameters.get("who").getValue(); String textColumn = (String) parameters.get("text").getValue(); // if there are errors, accumlate as many as we can before throwing SerializationException SerializationException errors = null; Vector<Graph> graphs = new Vector<Graph>(); Iterator<CSVRecord> records = getParser().iterator(); while (records.hasNext()) { CSVRecord record = records.next(); Graph graph = new Graph(); if (parameters == null || parameters.get("id") == null || parameters.get("id").getValue() == null) { graph.setId(getName() + "-" + record.getRecordNumber()); } else { graph.setId(record.get((String) parameters.get("id").getValue())); } graph.setOffsetUnits(Constants.UNIT_CHARACTERS); // creat the 0 anchor to prevent graph tagging from creating one with no confidence Anchor firstAnchor = graph.getOrCreateAnchorAt(0.0, Constants.CONFIDENCE_MANUAL); Anchor lastAnchor = firstAnchor; // add layers to the graph // we don't just copy the whole schema, because that would imply that all the extra layers // contained no annotations, which is not necessarily true graph.addLayer((Layer) participantLayer.clone()); graph.getSchema().setParticipantLayerId(participantLayer.getId()); graph.addLayer((Layer) turnLayer.clone()); graph.getSchema().setTurnLayerId(turnLayer.getId()); graph.addLayer((Layer) utteranceLayer.clone()); graph.getSchema().setUtteranceLayerId(utteranceLayer.getId()); graph.addLayer((Layer) wordLayer.clone()); graph.getSchema().setWordLayerId(wordLayer.getId()); if (parameters != null) { for (Parameter p : parameters.values()) { if (p.getValue() instanceof Layer) { Layer layer = (Layer) p.getValue(); if (layer != null && graph.getLayer(layer.getId()) == null) { // haven't added this layer yet graph.addLayer((Layer) layer.clone()); } } } } // participant/author Annotation participant = graph.createTag(graph, schema.getParticipantLayerId(), record.get(participantColumn)); // meta-data for (String header : getHeaderMap().keySet()) { if (header.trim().length() == 0) continue; Parameter p = parameters.get("header_" + getHeaderMap().get(header)); if (p != null && p.getValue() != null) { Layer layer = (Layer) p.getValue(); String value = record.get(header); if (layer.getParentId().equals(schema.getRoot().getId())) // graph tag { graph.createTag(graph, layer.getId(), value); } else // participant tag { graph.createTag(participant, layer.getId(), value); } } // parameter set } // next header // text Annotation turn = new Annotation(null, participant.getLabel(), getTurnLayer().getId()); graph.addAnnotation(turn); turn.setParent(participant); turn.setStart(graph.getOrCreateAnchorAt(0.0, Constants.CONFIDENCE_MANUAL)); Annotation line = new Annotation(null, turn.getLabel(), getUtteranceLayer().getId()); line.setParentId(turn.getId()); line.setStart(turn.getStart()); int iLastPosition = 0; String sLine = record.get(textColumn).trim(); int iNumChars = sLine.length(); line = new Annotation(null, sLine, getUtteranceLayer().getId()); line.setParentId(turn.getId()); line.setStart(turn.getStart()); Anchor end = graph.getOrCreateAnchorAt(((double) iNumChars + 1), Constants.CONFIDENCE_MANUAL); line.setEnd(end); graph.addAnnotation(line); // ensure we have an utterance tokenizer if (getTokenizer() == null) { setTokenizer(new SimpleTokenizer(getUtteranceLayer().getId(), getWordLayer().getId())); } try { tokenizer.transform(graph); } catch (TransformationException exception) { if (errors == null) errors = new SerializationException(); if (errors.getCause() == null) errors.initCause(exception); errors.addError(SerializationException.ErrorType.Tokenization, exception.getMessage()); } graph.commit(); OrthographyClumper clumper = new OrthographyClumper(wordLayer.getId(), utteranceLayer.getId()); try { // clump non-orthographic 'words' with real words clumper.transform(graph); graph.commit(); } catch (TransformationException exception) { if (errors == null) errors = new SerializationException(); if (errors.getCause() == null) errors.initCause(exception); errors.addError(SerializationException.ErrorType.Tokenization, exception.getMessage()); } if (errors != null) throw errors; // set end anchors of graph tags for (Annotation a : graph.list(getParticipantLayer().getId())) { a.setStartId(firstAnchor.getId()); a.setEndId(lastAnchor.getId()); } graph.commit(); graphs.add(graph); } // next record return graphs.toArray(new Graph[0]); }
From source file:com.mio.jrdv.sunshine.FetchWeatherTask.java
/** * Take the String representing the complete forecast in JSON Format and * pull out the data we need to construct the Strings needed for the wireframes. * * Fortunately parsing is easy: constructor takes the JSON string and converts it * into an Object hierarchy for us.//from ww w . java 2 s . c o m */ private String[] getWeatherDataFromJson(String forecastJsonStr, String locationSetting) throws JSONException { // Now we have a String representing the complete forecast in JSON Format. // Fortunately parsing is easy: constructor takes the JSON string and converts it // into an Object hierarchy for us. // These are the names of the JSON objects that need to be extracted. // Location information final String OWM_CITY = "city"; final String OWM_CITY_NAME = "name"; final String OWM_COORD = "coord"; // Location coordinate final String OWM_LATITUDE = "lat"; final String OWM_LONGITUDE = "lon"; // Weather information. Each day's forecast info is an element of the "list" array. final String OWM_LIST = "list"; final String OWM_PRESSURE = "pressure"; final String OWM_HUMIDITY = "humidity"; final String OWM_WINDSPEED = "speed"; final String OWM_WIND_DIRECTION = "deg"; // All temperatures are children of the "temp" object. final String OWM_TEMPERATURE = "temp"; final String OWM_MAX = "max"; final String OWM_MIN = "min"; final String OWM_WEATHER = "weather"; final String OWM_DESCRIPTION = "main"; final String OWM_WEATHER_ID = "id"; try { JSONObject forecastJson = new JSONObject(forecastJsonStr); JSONArray weatherArray = forecastJson.getJSONArray(OWM_LIST); JSONObject cityJson = forecastJson.getJSONObject(OWM_CITY); String cityName = cityJson.getString(OWM_CITY_NAME); JSONObject cityCoord = cityJson.getJSONObject(OWM_COORD); double cityLatitude = cityCoord.getDouble(OWM_LATITUDE); double cityLongitude = cityCoord.getDouble(OWM_LONGITUDE); long locationId = addLocation(locationSetting, cityName, cityLatitude, cityLongitude); // Insert the new weather information into the database Vector<ContentValues> cVVector = new Vector<ContentValues>(weatherArray.length()); // OWM returns daily forecasts based upon the local time of the city that is being // asked for, which means that we need to know the GMT offset to translate this data // properly. // Since this data is also sent in-order and the first day is always the // current day, we're going to take advantage of that to get a nice // normalized UTC date for all of our weather. Time dayTime = new Time(); dayTime.setToNow(); // we start at the day returned by local time. Otherwise this is a mess. int julianStartDay = Time.getJulianDay(System.currentTimeMillis(), dayTime.gmtoff); // now we work exclusively in UTC dayTime = new Time(); for (int i = 0; i < weatherArray.length(); i++) { // These are the values that will be collected. long dateTime; double pressure; int humidity; double windSpeed; double windDirection; double high; double low; String description; int weatherId; // Get the JSON object representing the day JSONObject dayForecast = weatherArray.getJSONObject(i); // Cheating to convert this to UTC time, which is what we want anyhow dateTime = dayTime.setJulianDay(julianStartDay + i); pressure = dayForecast.getDouble(OWM_PRESSURE); humidity = dayForecast.getInt(OWM_HUMIDITY); windSpeed = dayForecast.getDouble(OWM_WINDSPEED); windDirection = dayForecast.getDouble(OWM_WIND_DIRECTION); // Description is in a child array called "weather", which is 1 element long. // That element also contains a weather code. JSONObject weatherObject = dayForecast.getJSONArray(OWM_WEATHER).getJSONObject(0); description = weatherObject.getString(OWM_DESCRIPTION); weatherId = weatherObject.getInt(OWM_WEATHER_ID); // Temperatures are in a child object called "temp". Try not to name variables // "temp" when working with temperature. It confuses everybody. JSONObject temperatureObject = dayForecast.getJSONObject(OWM_TEMPERATURE); high = temperatureObject.getDouble(OWM_MAX); low = temperatureObject.getDouble(OWM_MIN); ContentValues weatherValues = new ContentValues(); weatherValues.put(WeatherEntry.COLUMN_LOC_KEY, locationId); weatherValues.put(WeatherEntry.COLUMN_DATE, dateTime); weatherValues.put(WeatherEntry.COLUMN_HUMIDITY, humidity); weatherValues.put(WeatherEntry.COLUMN_PRESSURE, pressure); weatherValues.put(WeatherEntry.COLUMN_WIND_SPEED, windSpeed); weatherValues.put(WeatherEntry.COLUMN_DEGREES, windDirection); weatherValues.put(WeatherEntry.COLUMN_MAX_TEMP, high); weatherValues.put(WeatherEntry.COLUMN_MIN_TEMP, low); weatherValues.put(WeatherEntry.COLUMN_SHORT_DESC, description); weatherValues.put(WeatherEntry.COLUMN_WEATHER_ID, weatherId); cVVector.add(weatherValues); } //////////////////////////////////////////////////////////////////////////////////////////// ////// //////////////////////////////////////////////////////////////////////////////////////////// //////NO SE USA SE PASA AL FORECAST ADAPTER // The lines in getWeatherDataFromJson where we requery the database after the insert. //vamoa acambiarlo por el nuevo FroreCastAdapter!!: //////////////////////////////////////////////////////////////////////////////////////////// // // add to database // if ( cVVector.size() > 0 ) { // // Student: call bulkInsert to add the weatherEntries to the database here // // ContentValues[] cvArray = new ContentValues[cVVector.size()]; // cVVector.toArray(cvArray); // mContext.getContentResolver().bulkInsert(WeatherEntry.CONTENT_URI, cvArray); // // // } // // // Sort order: Ascending, by date. // String sortOrder = WeatherEntry.COLUMN_DATE + " ASC"; // Uri weatherForLocationUri = WeatherEntry.buildWeatherLocationWithStartDate( // locationSetting, System.currentTimeMillis()); // // // Students: Uncomment the next lines to display what what you stored in the bulkInsert // //esto no hace nada !!lo que hace es que inserte 0!!!! si no lo hacemos por medio un bulkinsert!! // // Cursor cur = mContext.getContentResolver().query(weatherForLocationUri, // null, null, null, sortOrder); // // cVVector = new Vector<ContentValues>(cur.getCount()); // if ( cur.moveToFirst() ) { // do { // ContentValues cv = new ContentValues(); // DatabaseUtils.cursorRowToContentValues(cur, cv); // cVVector.add(cv); // } while (cur.moveToNext()); // } // // Log.d(LOG_TAG, "FetchWeatherTask Complete. " + cVVector.size() + " Inserted"); // // String[] resultStrs = convertContentValuesToUXFormat(cVVector); // return resultStrs; int inserted = 0; // add to database if (cVVector.size() > 0) { ContentValues[] cvArray = new ContentValues[cVVector.size()]; cVVector.toArray(cvArray); inserted = mContext.getContentResolver().bulkInsert(WeatherEntry.CONTENT_URI, cvArray); } Log.d(LOG_TAG, "FetchWeatherTask Complete. " + inserted + " Inserted"); } catch (JSONException e) { Log.e(LOG_TAG, e.getMessage(), e); e.printStackTrace(); } return null; }
From source file:com.truledger.client.Client.java
/** * Compute the balance hash of all the server-signed messages in subdirs of balanceKey of db. * @param db/*from www. java 2 s . c o m*/ * @param unpacker Parses and matches a server-signed message string into a Parser.Dict instance * @param balancekey * @param acctbals if non-null, maps acct names to maps of assetids to non-server-signed balance messages. * @return * @throws ClientException */ public Utility.DirHash balancehash(FSDB db, Utility.MsgUnpacker unpacker, String balancekey, StringMapMap acctbals) throws ClientException { String hash = null; int hashcnt = 0; String[] accts = db.contents(balancekey); if (acctbals != null) { Vector<String> acctsv = new Vector<String>(); Set<String> keys = acctbals.keySet(); for (String key : keys) { if (Utility.position(key, accts) < 0) acctsv.add(key); } int size = acctsv.size(); if (size > 0) { String[] newaccts = new String[accts.length + size]; int i = 0; for (String acct : accts) newaccts[i++] = acct; for (String acct : acctsv) newaccts[i++] = acct; accts = newaccts; } } Vector<String> newitemsv = new Vector<String>(); Vector<String> removednamesv = new Vector<String>(); for (String acct : accts) { newitemsv.clear(); removednamesv.clear(); StringMap newacct = acctbals != null ? acctbals.get(acct) : null; if (newacct != null) { Set<String> assetids = newacct.keySet(); for (String assetid : assetids) { String msg = newacct.get(assetid); newitemsv.add(msg); removednamesv.add(assetid); } } int cnt = newitemsv.size(); String[] newitems = cnt > 0 ? newitemsv.toArray(new String[cnt]) : null; cnt = removednamesv.size(); String[] removednames = cnt > 0 ? removednamesv.toArray(new String[cnt]) : null; try { Utility.DirHash dirHash = Utility.dirhash(db, balancekey + '.' + acct, unpacker, removednames, newitems); if (dirHash != null) { hash = hash == null ? dirHash.hash : hash + '.' + dirHash.hash; hashcnt += dirHash.count; } } catch (Exception e) { throw new ClientException(e); } } if (hashcnt > 1) hash = Crypto.sha1(hash); return new Utility.DirHash(hash == null ? "" : hash, hashcnt); }
From source file:com.truledger.client.Client.java
/** * Do the work for getInbox()//from w ww. j a v a 2s . c o m * @param rawmap * @return * @throws ClientException */ protected Inbox[] getInboxInternal(InboxRawmap rawmap) throws ClientException { String key = this.userInboxKey(); ClientDB.AccountDB accountDB = db.getAccountDB(); this.syncInbox(); Vector<Inbox> resv = new Vector<Inbox>(); for (String time : accountDB.contents(key)) { String msg = accountDB.get(key, time); Inbox lastItem = null; Vector<Inbox> lastItems = null; Parser.DictList reqs; try { reqs = parser.parse(msg); } catch (Parser.ParseException e) { throw new ClientException(e); } for (Parser.Dict req : reqs) { Parser.Dict args = this.matchServerReq(req); String argstime = args.stringGet(T.TIME); if (!(argstime == null || time.equals(argstime))) { throw new ClientException("Inbox message timestamp mismatch"); } args = (Parser.Dict) args.get(T.MSG); String request = args.stringGet(T.REQUEST); String id = args.stringGet(T.CUSTOMER); String msgtime = args.stringGet(T.TIME); String note = args.stringGet(T.NOTE); String assetid = null; String amount = null; String assetname = null; String formattedAmount = null; if (request.equals(T.SPEND) || request.equals(T.TRANFEE)) { assetid = args.stringGet(T.ASSET); amount = args.stringGet(T.AMOUNT); Asset asset = null; boolean incnegs = false; try { asset = this.getAsset(assetid); } catch (Exception e) { } if (asset != null) { assetname = asset.name; incnegs = !serverid.equals(args.stringGet(T.CUSTOMER)); formattedAmount = this.formatAssetValue(amount, asset, incnegs); } } else if (request.equals(T.SPENDACCEPT) || request.equals(T.SPENDREJECT)) { // To do: pull in data from outbox to get amounts } else { throw new ClientException("Bad request in inbox: " + request); } try { note = Crypto.decryptNote(this.id, this.privkey, note); } catch (Exception e) { } Inbox item = new Inbox(request, id, time, msgtime, assetid, assetname, amount, formattedAmount, note); if (request.equals(T.SPEND)) { resv.add(item); lastItem = item; } else if (request.equals(T.TRANFEE)) { if (lastItem == null) throw new ClientException("tranfee without matching spend"); if (lastItems == null) lastItems = new Vector<Inbox>(); lastItems.add(item); } else { resv.add(item); if (lastItem != null) { if (lastItems != null) { lastItem.items = lastItems.toArray(new Inbox[lastItems.size()]); lastItems = null; } lastItem = null; } } if (rawmap != null) rawmap.put(item, msg); } } Inbox[] res = resv.toArray(new Inbox[resv.size()]); Arrays.sort(res, new Comparator<Inbox>() { public int compare(Inbox i1, Inbox i2) { return Client.this.bcm.compare(i1.time, i2.time); } }); return res; }