List of usage examples for java.util Vector toArray
@SuppressWarnings("unchecked") public synchronized <T> T[] toArray(T[] a)
From source file:com.delcyon.capo.Configuration.java
@SuppressWarnings("unchecked") public Preference[] getDirectoryPreferences() { boolean isServer = CapoApplication.getApplication() instanceof CapoServer; Vector<Preference> preferenceVector = new Vector<Preference>(); Set<String> directoryProvidersSet = CapoApplication.getAnnotationMap() .get(DirectoyProvider.class.getCanonicalName()); if (directoryProvidersSet != null) { for (String className : directoryProvidersSet) { try { Location location = Class.forName(className).getAnnotation(DirectoyProvider.class).location(); Class preferenceClass = Class.forName(className).getAnnotation(DirectoyProvider.class) .preferences();//from ww w. j av a 2 s .co m String preferenceName = Class.forName(className).getAnnotation(DirectoyProvider.class) .preferenceName(); if (location == Location.BOTH) { preferenceVector.add((Preference) Enum.valueOf(preferenceClass, preferenceName)); } else if (isServer == true && location == Location.SERVER) { preferenceVector.add((Preference) Enum.valueOf(preferenceClass, preferenceName)); } else if (isServer == false && location == Location.CLIENT) { preferenceVector.add((Preference) Enum.valueOf(preferenceClass, preferenceName)); } } catch (ClassNotFoundException classNotFoundException) { CapoApplication.logger.log(Level.WARNING, "Error getting directory providers", classNotFoundException); } } } return preferenceVector.toArray(new Preference[] {}); }
From source file:tech.salroid.filmy.parser.MainActivityParseWork.java
public void parse() { //final List<MovieData> movieDataArrayList = new ArrayList<MovieData>(); //MovieData movieData = null; try {//w w w .j a v a 2 s . c o m JSONObject jsonObject = new JSONObject(result); JSONArray jsonArray = jsonObject.getJSONArray("results"); Vector<ContentValues> cVVector = new Vector<ContentValues>(jsonArray.length()); for (int i = 0; i < jsonArray.length(); i++) { //movieData = new MovieData(); String title, poster, id; title = (jsonArray.getJSONObject(i)).getString("title"); poster = (jsonArray.getJSONObject(i).getString("poster_path")); id = (jsonArray.getJSONObject(i)).getString("id"); String temp_year[] = (jsonArray.getJSONObject(i)).getString("release_date").split("-"); String year = temp_year[0]; String trimmedQuery = (title.toLowerCase()).trim(); String finalQuery = trimmedQuery.replace(" ", "-"); finalQuery = finalQuery.replace("'", "-"); String slug = (finalQuery.replace(":", "")) + "-" + year; //movieData.setMovie(title); //movieData.setYear(year); //movieData.setId(id); //movieData.setPoster(poster); //movieDataArrayList.add(movieData); // Insert the new weather information into the database ContentValues movieValues = new ContentValues(); if (!(poster.equals("null"))) { movieValues.put(FilmContract.MoviesEntry.MOVIE_ID, id); movieValues.put(FilmContract.MoviesEntry.MOVIE_TITLE, title); movieValues.put(FilmContract.MoviesEntry.MOVIE_YEAR, year); movieValues.put(FilmContract.MoviesEntry.MOVIE_POSTER_LINK, "http://image.tmdb.org/t/p/w185" + poster); cVVector.add(movieValues); } } int inserted = 0; // add to database if (cVVector.size() > 0) { ContentValues[] cvArray = new ContentValues[cVVector.size()]; cVVector.toArray(cvArray); context.getContentResolver().delete(FilmContract.MoviesEntry.CONTENT_URI, null, null); inserted = context.getContentResolver().bulkInsert(FilmContract.MoviesEntry.CONTENT_URI, cvArray); } //Log.d(LOG_TAG, "Fetching Complete. " + inserted + " Inserted"); } catch (JSONException e1) { e1.printStackTrace(); } return; }
From source file:edu.cornell.med.icb.geo.tools.ClassificationTask.java
public static ClassificationTask[] parseTaskAndConditions(final Reader taskListReader, final Reader conditionIdsReader) { final ConditionIdentifiers conditionIdentifiers = readConditions(conditionIdsReader); if (conditionIdentifiers == null) { throw new IllegalStateException("conditionIndentifier must not be null"); }//w w w . j a v a2 s. c o m String line; // read tasks: final Vector<ClassificationTask> tasks = new Vector<ClassificationTask>(); final BufferedReader taskListBufferedReader = new BufferedReader(taskListReader); try { while ((line = taskListBufferedReader.readLine()) != null) { if (line.startsWith("#")) { continue; } final String[] tokens = line.split("[\t]"); final ClassificationTask task; final int experimentNameIndex; final int numberOfClasses; if ("one-class".equals(tokens[0])) { experimentNameIndex = 1; numberOfClasses = 1; task = parseNewTaskFormat(tokens, experimentNameIndex, numberOfClasses); } else if ("two-class".equals(tokens[0])) { experimentNameIndex = 1; numberOfClasses = 2; task = parseNewTaskFormat(tokens, experimentNameIndex, numberOfClasses); } else if ("multi-class".equals(tokens[0])) { experimentNameIndex = 1; numberOfClasses = (tokens.length - 2) / 2; task = parseNewTaskFormat(tokens, experimentNameIndex, numberOfClasses); } else if ("regression".equals(tokens[0])) { // System.err.println("Error parsing task file: Keyword regression is reserved for future use, but not yet supported."); throw new UnsupportedOperationException( "Error parsing task file: Keyword regression is reserved for future use, but not yet supported."); } else { // parse legacy format: experimentNameIndex = 0; numberOfClasses = 2; if (tokens.length != 5) { System.err.println( "Error parsing task. Task line must have 5 fields separated by tab. Line was :" + line); return null; } task = new ClassificationTask(2); task.setExperimentDataFilename(tokens[0]); task.setConditionName(0, tokens[1]); task.setConditionName(1, tokens[2]); task.setConditionInstanceNumber(0, Integer.parseInt(tokens[3])); task.setConditionInstanceNumber(1, Integer.parseInt(tokens[4])); task.numberOfClasses = numberOfClasses; } task.setConditionsIdentifiers(conditionIdentifiers); tasks.add(task); } } catch (IOException e) { LOG.error(e); return null; } return tasks.toArray(new ClassificationTask[tasks.size()]); }
From source file:com.nonobay.fana.udacityandroidproject1popularmovies.FetchMovieTask.java
/** * Take the String representing the complete movies in JSON Format and * pull out the data we need to construct the Strings needed for the wireframes. * <p/>/*from w w w. j av a 2s . co m*/ * Fortunately parsing is easy: constructor takes the JSON string and converts it * into an Object hierarchy for us. */ private void getMovieDataFromJson(String moviesJsonStr) throws JSONException { // Now we have a String representing the complete movie list in JSON Format. // Fortunately parsing is easy: constructor takes the JSON string and converts it // into an Object hierarchy for us. /* example { "page": 1, "results": [ { "adult": false, "backdrop_path": "/razvUuLkF7CX4XsLyj02ksC0ayy.jpg", "genre_ids": [ 80, 28, 53 ], "id": 260346, "original_language": "en", "original_title": "Taken 3", "overview": "Ex-government operative Bryan Mills finds his life is shattered when he's falsely accused of a murder that hits close to home. As he's pursued by a savvy police inspector, Mills employs his particular set of skills to track the real killer and exact his unique brand of justice.", "release_date": "2015-01-09", "poster_path": "/c2SSjUVYawDUnQ92bmTqsZsPEiB.jpg", "popularity": 11.737899, "title": "Taken 3", "video": false, "vote_average": 6.2, "vote_count": 698 } ], "total_pages": 11543, "total_results": 230847 }*/ // These are the names of the JSON objects that need to be extracted. final String JSON_PAGE = "page"; final String JSON_PAGE_TOTAL = "total_pages"; final String JSON_MOVIE_LIST = "results"; final String JSON_MOVIE_TOTAL = "total_results"; final String JSON_MOVIE_ID = "id"; final String JSON_MOVIE_TITLE = "original_title"; final String JSON_MOVIE_DATE = "release_date"; final String JSON_MOVIE_POSTER = "poster_path"; final String JSON_MOVIE_OVERVIEW = "overview"; final String JSON_MOVIE_VOTE_AVERAGE = "vote_average"; try { JSONObject moviesJson = new JSONObject(moviesJsonStr); JSONArray movieArray = moviesJson.getJSONArray(JSON_MOVIE_LIST); // Insert the new movie information into the database Vector<ContentValues> cVVector = new Vector<>(movieArray.length()); // These are the values that will be collected. String releaseTime; long movieId; double vote_average; String overview; String original_title; String poster_path; for (int i = 0; i < movieArray.length(); i++) { // Get the JSON object representing the movie JSONObject eachMovie = movieArray.getJSONObject(i); movieId = eachMovie.getLong(JSON_MOVIE_ID); original_title = eachMovie.getString(JSON_MOVIE_TITLE); overview = eachMovie.getString(JSON_MOVIE_OVERVIEW); poster_path = eachMovie.getString(JSON_MOVIE_POSTER); vote_average = eachMovie.getDouble(JSON_MOVIE_VOTE_AVERAGE); releaseTime = eachMovie.getString(JSON_MOVIE_DATE); ContentValues movieValues = new ContentValues(); movieValues.put(MovieContract.MovieEntry.COLUMN_MOVIE_ID, movieId); movieValues.put(MovieContract.MovieEntry.COLUMN_ORIGINAL_TITLE, original_title); movieValues.put(MovieContract.MovieEntry.COLUMN_POSTER_THUMBNAIL, poster_path); movieValues.put(MovieContract.MovieEntry.COLUMN_SYNOPSIS, overview); movieValues.put(MovieContract.MovieEntry.COLUMN_RELEASE_DATE, releaseTime); movieValues.put(MovieContract.MovieEntry.COLUMN_USER_RATING, vote_average); cVVector.add(movieValues); } // add to database if (cVVector.size() > 0) { // Student: call bulkInsert to add the weatherEntries to the database here mContext.getContentResolver().delete(MovieContract.MovieEntry.CONTENT_URI, null, null); mContext.getContentResolver().bulkInsert(MovieContract.MovieEntry.CONTENT_URI, cVVector.toArray(new ContentValues[cVVector.size()])); } } catch (JSONException e) { Log.e(LOG_TAG, e.getMessage(), e); } }
From source file:uk.ac.babraham.SeqMonk.Pipelines.AntisenseTranscriptionPipeline.java
protected void startPipeline() { // We first need to generate probes over all of the features listed in // the feature types. The probes should cover the whole area of the // feature regardless of where it splices. Vector<Probe> probes = new Vector<Probe>(); double pValue = optionsPanel.pValue(); QuantitationStrandType readFilter = optionsPanel.readFilter(); long[] senseCounts = new long[data.length]; long[] antisenseCounts = new long[data.length]; Chromosome[] chrs = collection().genome().getAllChromosomes(); // First find the overall rate of antisense reads for (int c = 0; c < chrs.length; c++) { if (cancel) { progressCancelled();//from w w w . j a va 2 s . c o m return; } progressUpdated("Getting total antisense rate for chr" + chrs[c].name(), c, chrs.length * 2); Feature[] features = getValidFeatures(chrs[c]); for (int f = 0; f < features.length; f++) { if (cancel) { progressCancelled(); return; } Probe p = new Probe(chrs[c], features[f].location().start(), features[f].location().end(), features[f].location().strand(), features[f].name()); probes.add(p); for (int d = 0; d < data.length; d++) { long[] reads = data[d].getReadsForProbe(p); for (int r = 0; r < reads.length; r++) { if (readFilter.useRead(p, reads[r])) { senseCounts[d] += SequenceRead.length(reads[r]); } else { antisenseCounts[d] += SequenceRead.length(reads[r]); } } } } } Probe[] allProbes = probes.toArray(new Probe[0]); collection().setProbeSet(new ProbeSet("Features over " + optionsPanel.getSelectedFeatureType(), allProbes)); // Now we can work out the overall antisense rate double[] antisenseProbability = new double[data.length]; for (int d = 0; d < data.length; d++) { System.err .println("Antisense counts are " + antisenseCounts[d] + " sense counts are " + senseCounts[d]); antisenseProbability[d] = antisenseCounts[d] / (double) (antisenseCounts[d] + senseCounts[d]); System.err.println("Antisense probability for " + data[d].name() + " is " + antisenseProbability[d]); } // Now we can quantitate each individual feature and test for whether it is significantly // showing antisense expression ArrayList<Vector<ProbeTTestValue>> significantProbes = new ArrayList<Vector<ProbeTTestValue>>(); for (int d = 0; d < data.length; d++) { significantProbes.add(new Vector<ProbeTTestValue>()); } int[] readLengths = new int[data.length]; for (int d = 0; d < readLengths.length; d++) { readLengths[d] = data[d].getMaxReadLength(); System.err.println("For " + data[d].name() + " max read len is " + readLengths[d]); } for (int c = 0; c < chrs.length; c++) { if (cancel) { progressCancelled(); return; } progressUpdated("Quantitating features on chr" + chrs[c].name(), chrs.length + c, chrs.length * 2); Probe[] thisChrProbes = collection().probeSet().getProbesForChromosome(chrs[c]); for (int p = 0; p < thisChrProbes.length; p++) { for (int d = 0; d < data.length; d++) { if (cancel) { progressCancelled(); return; } long senseCount = 0; long antisenseCount = 0; long[] reads = data[d].getReadsForProbe(thisChrProbes[p]); for (int r = 0; r < reads.length; r++) { if (readFilter.useRead(thisChrProbes[p], reads[r])) { // TODO: Just count overlap? senseCount += SequenceRead.length(reads[r]); } else { antisenseCount += SequenceRead.length(reads[r]); } } // if (thisChrProbes[p].name().equals("RP4-798A10.2")) { // System.err.println("Raw base counts are sense="+senseCount+" anti="+antisenseCount+" from "+reads.length+" reads"); // } int senseReads = (int) (senseCount / readLengths[d]); int antisenseReads = (int) (antisenseCount / readLengths[d]); // if (thisChrProbes[p].name().equals("RP4-798A10.2")) { // System.err.println("Raw read counts are sense="+senseReads+" anti="+antisenseReads+" from "+reads.length+" reads"); // } BinomialDistribution bd = new BinomialDistribution(senseReads + antisenseReads, antisenseProbability[d]); // Since the binomial distribution gives the probability of getting a value higher than // this we need to subtract one so we get the probability of this or higher. double thisPValue = 1 - bd.cumulativeProbability(antisenseReads - 1); if (antisenseReads == 0) thisPValue = 1; // We have to add all results at this stage so we don't mess up the multiple // testing correction later on. significantProbes.get(d).add(new ProbeTTestValue(thisChrProbes[p], thisPValue)); double expected = ((senseReads + antisenseReads) * antisenseProbability[d]); // if (thisChrProbes[p].name().equals("RP4-798A10.2")) { // System.err.println("Probe="+thisChrProbes[p]+" sense="+senseReads+" anti="+antisenseReads+" anti-prob="+antisenseProbability[d]+" expected="+expected+" raw-p="+thisPValue); // } if (expected < 1) expected = 1; float obsExp = antisenseReads / (float) expected; data[d].setValueForProbe(thisChrProbes[p], obsExp); } } } // Now we can go through the set of significant probes, applying a correction and then // filtering those which pass our p-value cutoff for (int d = 0; d < data.length; d++) { ProbeTTestValue[] ttestResults = significantProbes.get(d).toArray(new ProbeTTestValue[0]); BenjHochFDR.calculateQValues(ttestResults); ProbeList newList = new ProbeList(collection().probeSet(), "Antisense < " + pValue + " in " + data[d].name(), "Probes showing significant antisense transcription from a basal level of " + antisenseProbability[d] + " with a cutoff of " + pValue, "FDR"); for (int i = 0; i < ttestResults.length; i++) { if (ttestResults[i].probe.name().equals("RP4-798A10.2")) { System.err.println("Raw p=" + ttestResults[i].p + " q=" + ttestResults[i].q); } if (ttestResults[i].q < pValue) { newList.addProbe(ttestResults[i].probe, (float) ttestResults[i].q); } } } StringBuffer quantitationDescription = new StringBuffer(); quantitationDescription.append("Antisense transcription pipeline quantitation "); quantitationDescription.append(". Directionality was "); quantitationDescription.append(optionsPanel.libraryTypeBox.getSelectedItem()); if (optionsPanel.ignoreOverlaps()) { quantitationDescription.append(". Ignoring existing overlaps"); } quantitationDescription.append(". P-value cutoff was "); quantitationDescription.append(optionsPanel.pValue()); collection().probeSet().setCurrentQuantitation(quantitationDescription.toString()); quantitatonComplete(); }
From source file:org.bdval.DAVMode.java
private void readGeneLists(final String geneListFilename, final Vector<GEOPlatform> platforms, final DAVOptions options) { BufferedReader geneListReader = null; try {/*from w w w. j a va 2 s.co m*/ // read gene list info: geneListReader = new BufferedReader(new FileReader(geneListFilename)); String line; final Vector<GeneList> list = new Vector<GeneList>(); while ((line = geneListReader.readLine()) != null) { if (line.startsWith("#")) { continue; } final String[] tokens = line.split("[\t]"); if (tokens.length < 1) { throw new IllegalArgumentException( "Gene list line must have at least 1 field." + " Line was : " + line); } final GeneList geneList = GeneList.createList(tokens, options.getGeneFeaturesDir()); geneList.setPlatforms(platforms); list.add(geneList); } options.geneLists = list.toArray(new GeneList[list.size()]); } catch (FileNotFoundException e) { LOG.fatal("Cannot find gene list file: " + geneListFilename, e); System.exit(1); } catch (IOException e) { LOG.error("Cannot read gene list file: " + geneListFilename, e); System.exit(2); } finally { IOUtils.closeQuietly(geneListReader); } }
From source file:com.sos.VirtualFileSystem.FTP.SOSVfsFtpBaseClass.java
@Override public String[] getFilelist(final String folder, final String regexp, final int flag, final boolean withSubFolder) { // TODO vecDirectoryListing = null; prfen, ob notwendig vecDirectoryListing = null;/*w w w .j a v a 2 s . c o m*/ if (vecDirectoryListing == null) { vecDirectoryListing = nList(folder, withSubFolder); } Vector<String> strB = new Vector<String>(); Pattern pattern = Pattern.compile(regexp, 0); for (String strFile : vecDirectoryListing) { /** * the file_spec has to be compared to the filename only ... excluding the path */ String strFileName = new File(strFile).getName(); Matcher matcher = pattern.matcher(strFileName); if (matcher.find() == true) { strB.add(strFile); } } return strB.toArray(new String[strB.size()]); }
From source file:com.sos.VirtualFileSystem.FTP.SOSVfsFtpBaseClass.java
@Override public String[] getFolderlist(final String folder, final String regexp, final int flag, final boolean withSubFolder) { // TODO vecDirectoryListing = null; prfen, ob notwendig vecDirectoryListing = null;/*from w ww. j av a 2s .c o m*/ if (vecDirectoryListing == null) { vecDirectoryListing = nList(folder, withSubFolder); } Vector<String> strB = new Vector<String>(); Pattern pattern = Pattern.compile(regexp, 0); for (String strFile : vecDirectoryListing) { /** * the file_spec has to be compared to the filename only ... excluding the path */ String strFileName = new File(strFile).getName(); Matcher matcher = pattern.matcher(strFileName); if (matcher.find() == true) { strB.add(strFile); } } return strB.toArray(new String[strB.size()]); }
From source file:org.apache.ojb.broker.metadata.ClassDescriptor.java
/** * return an array of FieldDescription for optimistic locking sorted ascending * according to the field-descriptions getOrder() property *///from w w w . ja v a 2s . c om public FieldDescriptor[] getLockingFields() { if (m_lockingFieldDescriptors == null) { // 1. collect all Primary Key fields from Field list Vector vec = new Vector(); for (int i = 0; i < m_FieldDescriptions.length; i++) { FieldDescriptor fd = m_FieldDescriptions[i]; if (fd.isLocking()) { vec.add(fd); } } // 2. Sort fields according to their getOrder() Property Collections.sort(vec, FieldDescriptor.getComparator()); m_lockingFieldDescriptors = (FieldDescriptor[]) vec.toArray(new FieldDescriptor[vec.size()]); } return m_lockingFieldDescriptors; }
From source file:org.apache.ojb.broker.metadata.ClassDescriptor.java
/** * return an array of NONPK-FieldDescription sorted ascending * according to the field-descriptions getOrder() property *///from ww w . j a va 2 s .c om public FieldDescriptor[] getNonPkFields() { if (m_nonPkFieldDescriptors == null) { // 1. collect all Primary Key fields from Field list Vector vec = new Vector(); for (int i = 0; i < m_FieldDescriptions.length; i++) { FieldDescriptor fd = m_FieldDescriptions[i]; if (!fd.isPrimaryKey()) { vec.add(fd); } } // 2. Sort fields according to their getOrder() Property Collections.sort(vec, FieldDescriptor.getComparator()); m_nonPkFieldDescriptors = (FieldDescriptor[]) vec.toArray(new FieldDescriptor[vec.size()]); } return m_nonPkFieldDescriptors; }