Example usage for java.util Vector toArray

List of usage examples for java.util Vector toArray

Introduction

In this page you can find the example usage for java.util Vector toArray.

Prototype

@SuppressWarnings("unchecked")
public synchronized <T> T[] toArray(T[] a) 

Source Link

Document

Returns an array containing all of the elements in this Vector in the correct order; the runtime type of the returned array is that of the specified array.

Usage

From source file:edu.umn.cs.spatialHadoop.mapred.IndexedRectangle.java

@SuppressWarnings("unchecked")
@Override//from w ww  .j av  a  2s  .com
public InputSplit[] getSplits(final JobConf job, int numSplits) throws IOException {
    // Get a list of all input files. There should be exactly two files.
    final Path[] inputFiles = getInputPaths(job);
    GlobalIndex<Partition> gIndexes[] = new GlobalIndex[inputFiles.length];

    BlockFilter blockFilter = null;
    try {
        Class<? extends BlockFilter> blockFilterClass = job.getClass(SpatialSite.FilterClass, null,
                BlockFilter.class);
        if (blockFilterClass != null) {
            // Get all blocks the user wants to process
            blockFilter = blockFilterClass.newInstance();
            blockFilter.configure(job);
        }
    } catch (InstantiationException e1) {
        e1.printStackTrace();
    } catch (IllegalAccessException e1) {
        e1.printStackTrace();
    }

    if (blockFilter != null) {
        // Extract global indexes from input files

        for (int i_file = 0; i_file < inputFiles.length; i_file++) {
            FileSystem fs = inputFiles[i_file].getFileSystem(job);
            gIndexes[i_file] = SpatialSite.getGlobalIndex(fs, inputFiles[i_file]);
        }
    }

    final Vector<CombineFileSplit> matchedSplits = new Vector<CombineFileSplit>();
    if (gIndexes[0] == null || gIndexes[1] == null) {
        // Join every possible pair (Cartesian product)
        InputSplit[][] inputSplits = new InputSplit[inputFiles.length][];

        for (int i_file = 0; i_file < inputFiles.length; i_file++) {
            JobConf temp = new JobConf(job);
            setInputPaths(temp, inputFiles[i_file]);
            inputSplits[i_file] = super.getSplits(temp, 1);
        }
        LOG.info("Doing a Cartesian product of blocks: " + inputSplits[0].length + "x" + inputSplits[1].length);
        for (InputSplit split1 : inputSplits[0]) {
            for (InputSplit split2 : inputSplits[1]) {
                CombineFileSplit combinedSplit = (CombineFileSplit) FileSplitUtil.combineFileSplits(job,
                        (FileSplit) split1, (FileSplit) split2);
                matchedSplits.add(combinedSplit);
            }
        }
    } else {
        // Filter block pairs by the BlockFilter
        blockFilter.selectCellPairs(gIndexes[0], gIndexes[1], new ResultCollector2<Partition, Partition>() {
            @Override
            public void collect(Partition p1, Partition p2) {
                try {
                    List<FileSplit> splits1 = new ArrayList<FileSplit>();
                    Path path1 = new Path(inputFiles[0], p1.filename);
                    splitFile(job, path1, splits1);

                    List<FileSplit> splits2 = new ArrayList<FileSplit>();
                    Path path2 = new Path(inputFiles[1], p2.filename);
                    splitFile(job, path2, splits2);

                    for (FileSplit split1 : splits1) {
                        for (FileSplit split2 : splits2) {
                            matchedSplits.add(
                                    (CombineFileSplit) FileSplitUtil.combineFileSplits(job, split1, split2));
                        }
                    }

                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }

    LOG.info("Matched " + matchedSplits.size() + " combine splits");

    // Return all matched splits
    return matchedSplits.toArray(new InputSplit[matchedSplits.size()]);
}

From source file:org.apache.ojb.broker.metadata.ObjectReferenceDescriptor.java

/**
 *
 *//* ww w .j a  v  a  2 s.co m*/
public FieldDescriptor[] getForeignKeyFieldDescriptors(ClassDescriptor cld) {
    FieldDescriptor[] foreignKeyFieldDescriptors;
    if ((foreignKeyFieldDescriptors = (FieldDescriptor[]) fkFieldMap.get(cld)) == null) {
        // 1. collect vector of indices of Fk-Fields
        Vector v = getForeignKeyFields();
        // 2. get FieldDescriptor for each index from Class-descriptor
        // 2A. In a many-to-many relationship foreignkeyfields vector will be null.
        if (v != null) {
            Vector ret;
            if (cld.isInterface()) {
                //exchange interface class descriptor with first concrete
                //class
                Vector extents = cld.getExtentClasses();
                Class firstConcreteClass = (Class) extents.get(0);
                cld = getClassDescriptor().getRepository().getDescriptorFor(firstConcreteClass);
            }
            ret = new Vector();

            Iterator iter = v.iterator();
            while (iter.hasNext()) {
                Object fk = iter.next();
                FieldDescriptor fkfd = null;
                /*
                     OJB-55
                     it's possible that the FK field is declared in the super classes of this object,
                     so we can search for a valid field in super class-descriptor
                     */
                ClassDescriptor tmp = cld;
                while (tmp != null) {
                    if (fk instanceof Integer) {
                        Integer index = (Integer) fk;
                        fkfd = cld.getFieldDescriptorByIndex(index.intValue());
                    } else {
                        fkfd = tmp.getFieldDescriptorByName((String) fk);
                    }
                    if (fkfd != null) {
                        break;
                    } else {
                        tmp = tmp.getSuperClassDescriptor();
                    }
                }

                if (fkfd == null) {
                    throw new OJBRuntimeException("Incorrect or not found field reference name '" + fk
                            + "' in descriptor " + this + " for class-descriptor '"
                            + (cld != null ? cld.getClassNameOfObject() + "'" : "'null'"));
                }
                ret.add(fkfd);
            }
            foreignKeyFieldDescriptors = (FieldDescriptor[]) ret.toArray(new FieldDescriptor[ret.size()]);
            fkFieldMap.put(cld, foreignKeyFieldDescriptors);
        }
    }
    return foreignKeyFieldDescriptors;
}

From source file:org.osaf.cosmo.calendar.EntityConverter.java

/**
 * Given a calendar with many different components, split into
 * separate calendars that contain only a single component type
 * and a single UID./*from   w w w . j  a  v  a  2 s  .  c  o m*/
 */
private CalendarContext[] splitCalendar(Calendar calendar) {
    Vector<CalendarContext> contexts = new Vector<CalendarContext>();
    Set<String> allComponents = new HashSet<String>();
    Map<String, ComponentList> componentMap = new HashMap<String, ComponentList>();

    ComponentList comps = calendar.getComponents();
    for (Iterator<Component> it = comps.iterator(); it.hasNext();) {
        Component comp = it.next();
        // ignore vtimezones for now
        if (comp instanceof VTimeZone)
            continue;

        Uid uid = (Uid) comp.getProperty(Property.UID);
        RecurrenceId rid = (RecurrenceId) comp.getProperty(Property.RECURRENCE_ID);

        String key = uid.getValue();
        if (rid != null)
            key += rid.toString();

        // ignore duplicates
        if (allComponents.contains(key))
            continue;

        allComponents.add(key);

        ComponentList cl = componentMap.get(uid.getValue());

        if (cl == null) {
            cl = new ComponentList();
            componentMap.put(uid.getValue(), cl);
        }

        cl.add(comp);
    }

    for (Entry<String, ComponentList> entry : componentMap.entrySet()) {

        Component firstComp = (Component) entry.getValue().get(0);

        Calendar cal = ICalendarUtils.createBaseCalendar();
        cal.getComponents().addAll(entry.getValue());
        addTimezones(cal);

        CalendarContext cc = new CalendarContext();
        cc.calendar = cal;
        cc.type = firstComp.getName();

        contexts.add(cc);
    }

    return contexts.toArray(new CalendarContext[0]);
}

From source file:edu.umn.cs.spatialHadoop.nasa.StockQuadTree.java

/**
 * Merges a set of indexes into larger indexes
 * @param fs/*  ww  w . j av  a2s .  co m*/
 * @param srcIndexDir
 * @param dstIndexDir
 * @param srcFormat
 * @param dstFormat
 * @param params
 * @throws IOException
 * @throws ParseException
 * @throws InterruptedException
 */
private static void mergeIndexes(final FileSystem fs, Path srcIndexDir, Path dstIndexDir,
        SimpleDateFormat srcFormat, SimpleDateFormat dstFormat, final OperationsParams params)
        throws IOException, ParseException, InterruptedException {
    TimeRange timeRange = params.get("time") != null ? new TimeRange(params.get("time")) : null;
    final FileStatus[] sourceIndexes = timeRange == null ? fs.listStatus(srcIndexDir)
            : fs.listStatus(srcIndexDir, timeRange);
    Arrays.sort(sourceIndexes); // Alphabetical sort acts as sort-by-date here

    // Scan the source indexes and merge each consecutive run belonging to the
    // same unit
    int i1 = 0;
    while (i1 < sourceIndexes.length) {
        final String indexToCreate = dstFormat.format(srcFormat.parse(sourceIndexes[i1].getPath().getName()));
        int i2 = i1 + 1;
        // Keep scanning as long as the source index belongs to the same dest index
        while (i2 < sourceIndexes.length && dstFormat
                .format(srcFormat.parse(sourceIndexes[i2].getPath().getName())).equals(indexToCreate))
            i2++;

        // Merge all source indexes in the range [i1, i2) into one dest index

        // Copy i1, i2 to other variables as final to be accessible from threads
        final int firstIndex = i1;
        final int lastIndex = i2;

        final Path destIndex = new Path(dstIndexDir, indexToCreate);

        // For each tile, merge all values in all source indexes
        /*A regular expression to catch the tile identifier of a MODIS grid cell*/
        final Pattern MODISTileID = Pattern.compile("^.*(h\\d\\dv\\d\\d).*$");
        final FileStatus[] tilesInFirstDay = fs.listStatus(sourceIndexes[i1].getPath());
        // Shuffle the array for better load balancing across threads
        Random rand = new Random();
        for (int i = 0; i < tilesInFirstDay.length - 1; i++) {
            // Swap the entry at i with any following entry
            int j = i + rand.nextInt(tilesInFirstDay.length - i - 1);
            FileStatus temp = tilesInFirstDay[i];
            tilesInFirstDay[i] = tilesInFirstDay[j];
            tilesInFirstDay[j] = temp;
        }
        Parallel.forEach(tilesInFirstDay.length, new RunnableRange<Object>() {
            @Override
            public Object run(int i_file1, int i_file2) {
                for (int i_file = i_file1; i_file < i_file2; i_file++) {
                    try {
                        FileStatus tileInFirstDay = tilesInFirstDay[i_file];

                        // Extract tile ID
                        Matcher matcher = MODISTileID.matcher(tileInFirstDay.getPath().getName());
                        if (!matcher.matches()) {
                            LOG.warn("Cannot extract tile id from file " + tileInFirstDay.getPath());
                            continue;
                        }

                        final String tileID = matcher.group(1);
                        Path destIndexFile = new Path(destIndex, tileID);

                        PathFilter tileFilter = new PathFilter() {
                            @Override
                            public boolean accept(Path path) {
                                return path.getName().contains(tileID);
                            }
                        };

                        // Find matching tiles in all source indexes to merge
                        Vector<Path> filesToMerge = new Vector<Path>(lastIndex - firstIndex);
                        filesToMerge.add(tileInFirstDay.getPath());
                        for (int iDailyIndex = firstIndex + 1; iDailyIndex < lastIndex; iDailyIndex++) {
                            FileStatus[] matchedTileFile = fs.listStatus(sourceIndexes[iDailyIndex].getPath(),
                                    tileFilter);
                            if (matchedTileFile.length == 0)
                                LOG.warn("Could not find tile " + tileID + " in dir "
                                        + sourceIndexes[iDailyIndex].getPath());
                            else if (matchedTileFile.length == 1)
                                filesToMerge.add(matchedTileFile[0].getPath());
                        }

                        if (fs.exists(destIndexFile)) {
                            // Destination file already exists
                            // Check the date of the destination and source files to see
                            // whether it needs to be updated or not
                            long destTimestamp = fs.getFileStatus(destIndexFile).getModificationTime();
                            boolean needsUpdate = false;
                            for (Path fileToMerge : filesToMerge) {
                                long sourceTimestamp = fs.getFileStatus(fileToMerge).getModificationTime();
                                if (sourceTimestamp > destTimestamp) {
                                    needsUpdate = true;
                                    break;
                                }
                            }
                            if (!needsUpdate)
                                continue;
                            else
                                LOG.info("Updating file " + destIndexFile.getName());
                        }

                        // Do the merge
                        Path tmpFile;
                        do {
                            tmpFile = new Path((int) (Math.random() * 1000000) + ".tmp");
                        } while (fs.exists(tmpFile));
                        tmpFile = tmpFile.makeQualified(fs);
                        LOG.info("Merging tile " + tileID + " into file " + destIndexFile);
                        AggregateQuadTree.merge(params, filesToMerge.toArray(new Path[filesToMerge.size()]),
                                tmpFile);
                        synchronized (fs) {
                            Path destDir = destIndexFile.getParent();
                            if (!fs.exists(destDir))
                                fs.mkdirs(destDir);
                        }
                        fs.rename(tmpFile, destIndexFile);
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
                return null;
            }
        });
        i1 = i2;
    }
}

From source file:uk.ac.gda.analysis.hdf5.Hdf5Helper.java

/**
 * //  w  ww .j a v a 2 s . co m
 * @param fileName 
 * @param location
 * @return  list of names of H5 DATASETS within the group specified by the location 
 * @throws Exception
 */
public String[] getListOfDatasets(String fileName, String location) throws Exception {
    Vector<String> names = new Vector<String>();
    int fileId = -1;
    try {
        fileId = H5.H5Fopen(fileName, HDF5Constants.H5F_ACC_RDONLY, HDF5Constants.H5P_DEFAULT);
        if (fileId < 0) {
            throw new Exception("Unable to open file `" + fileName + "`");
        }
        int groupId = H5.H5Gopen(fileId, location, HDF5Constants.H5P_DEFAULT);
        if (groupId <= 0) {
            throw new Exception("Unable to open location " + location);
        }
        try {
            H5G_info_t h5Gget_info = H5.H5Gget_info(groupId);
            int nelems = (int) h5Gget_info.nlinks;
            if (nelems > 0) {
                try {
                    int[] oTypes = new int[nelems];
                    int[] lTypes = new int[nelems];
                    long[] oids = new long[nelems];
                    String[] oNames = new String[nelems];
                    H5.H5Gget_obj_info_all(fileId, location, oNames, oTypes, lTypes, oids,
                            HDF5Constants.H5_INDEX_NAME);
                    // Iterate through the file to see members of the group
                    for (int i = 0; i < nelems; i++) {
                        if (oNames[i] != null && oTypes[i] == HDF5Constants.H5O_TYPE_DATASET) {
                            names.add(oNames[i]);
                        }
                    }
                } catch (HDF5Exception ex) {
                    throw new Exception("Could not get objects info from group", ex);
                }
            }
        } finally {
            H5.H5Gclose(groupId);
        }
    } finally {
        if (fileId >= 0)
            H5.H5Fclose(fileId);
    }
    return names.toArray(new String[0]);
}

From source file:edu.umn.cs.spatialHadoop.nasa.HTTPFileSystem.java

/**
 * Lists all files and directories in a given Path that points to a directory.
 * While this function is written in a generic way, it was designed and tested
 * only with LP DAAC archives./*w w w.  ja v a2  s  .com*/
 */
@Override
public FileStatus[] listStatus(Path f) throws IOException {
    Vector<FileStatus> statuses = new Vector<FileStatus>();
    final Pattern httpEntryPattern = Pattern
            .compile("<a href=\"[^\"]+\">(.+)</a>\\s*(\\d+-\\w+-\\d+)\\s+(\\d+:\\d+)\\s+([\\d\\.]+[KMG]|-)");
    f = f.makeQualified(this);
    URL url = f.toUri().toURL();
    int retryCount = HTTPFileSystem.retries;
    BufferedReader inBuffer = null;
    try {
        while (inBuffer == null && retryCount-- > 0) {
            try {
                inBuffer = new BufferedReader(new InputStreamReader(url.openStream()));
            } catch (java.net.SocketException e) {
                if (retryCount == 0)
                    throw e;
                LOG.info("Error accessing file '" + url + "'. Trials left: " + retryCount);
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e1) {
                }
            } catch (java.net.UnknownHostException e) {
                if (retryCount == 0)
                    throw e;
                LOG.info("Error accessing file '" + url + "'. Trials left: " + retryCount);
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e1) {
                }
            }
        }
        if (inBuffer == null)
            throw new RuntimeException("Could not access URL " + f);
        String line;
        while ((line = inBuffer.readLine()) != null) {
            Matcher matcher = httpEntryPattern.matcher(line);
            while (matcher.find()) {
                String entryName = matcher.group(1);
                Path entryPath = new Path(f, entryName);

                String entryDate = matcher.group(2);
                String entryTime = matcher.group(3);
                long modificationTime = parseDateTime(entryDate, entryTime);

                String size = matcher.group(4);
                boolean isDir = size.equals("-");
                long length = isDir ? 0 : parseSize(size);

                FileStatus fstatus = new FileStatus(length, isDir, 1, 4096, modificationTime, modificationTime,
                        null, null, null, entryPath);
                statuses.add(fstatus);
            }
        }
    } finally {
        if (inBuffer != null)
            inBuffer.close();
    }

    return statuses.toArray(new FileStatus[statuses.size()]);
}

From source file:org.lamport.tla.toolbox.tool.tlc.model.Model.java

/**
 * Checks whether the checkpoint files exist for a given model
 * If doRefresh is set to true, this method will refresh the model directory,
 * and if a checkpoint folder is found, it will refresh the contents of that folder.
 * This means that the eclipse workspace representation of that directory will
 * synch with the file system. This is a long running job, so this method should not
 * be called within the running of another job unless the scheduling rule for
 * refreshing the model directory is included in the scheduling rule of the job which
 * is calling this method. This scheduling rule can be found by calling
 * //  w w w  . jav a2 s . c  o m
 * Note: Because the Toolbox deletes any existing checkpoint when running TLC,
 * there should be at most one checkpoint.  Therefore, this method should return an array
 * of length 0 or 1.
 * 
 * {@link IResourceRuleFactory#refreshRule(IResource)}
 * @param config
 * @param doRefresh whether the model directory's contents and any checkpoint
 * folders contents should be refreshed
 * @return the array of checkpoint directories, sorted from last to first
 */
public IResource[] getCheckpoints(boolean doRefresh) throws CoreException {
    // yy-MM-dd-HH-mm-ss
    Pattern pattern = Pattern.compile("[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}");

    Vector<IResource> checkpoints = new Vector<IResource>();
    IFolder directory = getTargetDirectory();

    if (directory != null && directory.exists()) {
        // refreshing is necessary because TLC creates
        // the checkpoint folders, but they may not have
        // been incorporated into the toolbox workspace
        // yet
        // the depth is one to find any checkpoint folders
        if (doRefresh) {
            directory.refreshLocal(IResource.DEPTH_ONE, null);
        }
        IResource[] members = directory.members();
        for (int i = 0; i < members.length; i++) {
            if (members[i].getType() == IResource.FOLDER) {
                Matcher matcher = pattern.matcher(members[i].getName());
                if (matcher.matches()) {
                    // if there is a checkpoint folder, it is necessary
                    // to refresh its contents because they may not
                    // be part of the workspace yet
                    if (doRefresh) {
                        members[i].refreshLocal(IResource.DEPTH_ONE, null);
                    }
                    if (((IFolder) members[i]).findMember(CHECKPOINT_QUEUE) != null
                            && ((IFolder) members[i]).findMember(CHECKPOINT_VARS) != null
                            && ((IFolder) members[i]).findMember(CHECKPOINT_STATES) != null) {
                        checkpoints.add(members[i]);
                    }
                }
            }
        }
    }
    IResource[] result = (IResource[]) checkpoints.toArray(new IResource[checkpoints.size()]);
    // sort the result
    Arrays.sort(result, new Comparator<IResource>() {
        public int compare(IResource arg0, IResource arg1) {
            return arg0.getName().compareTo(arg1.getName());
        }
    });

    return result;
}

From source file:com.creationgroundmedia.popularmovies.sync.MovieSyncAdapter.java

private void getMovieDataFromJson(String movieJsonStr) throws JSONException {

    if (movieJsonStr == null) {
        return;//from  w  w w  .  ja  va2s .co m
    }

    JSONObject movieJSON = new JSONObject(movieJsonStr);
    JSONArray movieList = movieJSON.getJSONArray(mContext.getString(R.string.jsonresults));

    Vector<ContentValues> cvVector = new Vector<ContentValues>(movieList.length());

    for (int i = 0; i < movieList.length(); i++) {
        JSONObject titleJSON = movieList.getJSONObject(i);

        ContentValues movieValues = new ContentValues();

        String title = titleJSON.getString(mContext.getString(R.string.jsontitle));

        movieValues.put(MoviesContract.MovieEntry.COLUMN_ADULT,
                titleJSON.getBoolean(mContext.getString(R.string.jsonadult)) ? 1 : 0);
        movieValues.put(MoviesContract.MovieEntry.COLUMN_BACKDROP_PATH,
                titleJSON.getString(mContext.getString(R.string.jsonbackdrop)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_FAVORITE, 0);
        movieValues.put(MoviesContract.MovieEntry.COLUMN_FRESH, 1);
        movieValues.put(MoviesContract.MovieEntry.COLUMN_ID_KEY,
                titleJSON.getLong(mContext.getString(R.string.jsonid)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_ORIGINAL_LANGUAGE,
                titleJSON.getString(mContext.getString(R.string.jsonoriginallanguage)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_OVERVIEW,
                titleJSON.getString(mContext.getString(R.string.jsonoverview)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_POPULARITY,
                titleJSON.getString(mContext.getString(R.string.jsonpopularity)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_POSTER_PATH,
                titleJSON.getString(mContext.getString(R.string.jsonposter)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_RELEASE_DATE,
                titleJSON.getString(mContext.getString(R.string.jsondate)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_SORTTITLE, trimLeadingThe(title));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_TITLE, title);
        movieValues.put(MoviesContract.MovieEntry.COLUMN_VIDEO,
                titleJSON.getBoolean(mContext.getString(R.string.jsonvideo)) ? 1 : 0);
        movieValues.put(MoviesContract.MovieEntry.COLUMN_VOTE_AVERAGE,
                titleJSON.getString(mContext.getString(R.string.jsonvoteaverage)));
        movieValues.put(MoviesContract.MovieEntry.COLUMN_VOTE_COUNT,
                titleJSON.getInt(mContext.getString(R.string.jsonvotecount)));

        cvVector.add(movieValues);
    }

    ContentValues[] cvArray = new ContentValues[cvVector.size()];
    cvVector.toArray(cvArray);
    int inserted = mContext.getContentResolver().bulkInsert(MoviesContract.MovieEntry.CONTENT_URI, cvArray);
}

From source file:org.apache.nutch.parse.swf.SWFParser.java

public ParseResult getParse(Content content) {

    String text = null;/* www .ja  v  a 2  s.  c  om*/
    Vector outlinks = new Vector();

    try {

        byte[] raw = content.getContent();

        String contentLength = content.getMetadata().get(Response.CONTENT_LENGTH);
        if (contentLength != null && raw.length != Integer.parseInt(contentLength)) {
            return new ParseStatus(ParseStatus.FAILED, ParseStatus.FAILED_TRUNCATED,
                    "Content truncated at " + raw.length + " bytes. Parser can't handle incomplete files.")
                            .getEmptyParseResult(content.getUrl(), getConf());
        }
        ExtractText extractor = new ExtractText();

        // TagParser implements SWFTags and drives a SWFTagTypes interface
        TagParser parser = new TagParser(extractor);
        // use this instead to debug the file
        // TagParser parser = new TagParser( new SWFTagDumper(true, true) );

        // SWFReader reads an input file and drives a SWFTags interface
        SWFReader reader = new SWFReader(parser, new InStream(raw));

        // read the input SWF file and pass it through the interface pipeline
        reader.readFile();
        text = extractor.getText();
        String atext = extractor.getActionText();
        if (atext != null && atext.length() > 0)
            text += "\n--------\n" + atext;
        // harvest potential outlinks
        String[] links = extractor.getUrls();
        for (int i = 0; i < links.length; i++) {
            Outlink out = new Outlink(links[i], "");
            outlinks.add(out);
        }
        Outlink[] olinks = OutlinkExtractor.getOutlinks(text, conf);
        if (olinks != null)
            for (int i = 0; i < olinks.length; i++) {
                outlinks.add(olinks[i]);
            }
    } catch (Exception e) { // run time exception
        e.printStackTrace(LogUtil.getErrorStream(LOG));
        return new ParseStatus(ParseStatus.FAILED, "Can't be handled as SWF document. " + e)
                .getEmptyParseResult(content.getUrl(), getConf());
    }
    if (text == null)
        text = "";

    Outlink[] links = (Outlink[]) outlinks.toArray(new Outlink[outlinks.size()]);
    ParseData parseData = new ParseData(ParseStatus.STATUS_SUCCESS, "", links, content.getMetadata());
    return ParseResult.createParseResult(content.getUrl(), new ParseImpl(text, parseData));
}

From source file:org.cfeclipse.cfml.views.explorer.vfs.view.VFSView.java

/**
 * Gets filesystem root entries// ww  w .  j  av a2  s  . c o m
 * @param fsManager
 * @return an array of Files corresponding to the root directories on the platform,
 *         may be empty but not null
 */
FileObject[] getRoots(FileSystemManager fsManager) throws FileSystemException {
    /*
     * On JDK 1.22 only...
     */
    // return File.listRoots();
    FileObject[] roots = null;
    //      FileObject[] newRequest = null;
    /*
     * On JDK 1.1.7 and beyond...
     * -- PORTABILITY ISSUES HERE --
     */
    if (System.getProperty("os.name").indexOf("Windows") != -1) {
        Vector /* of FileObject */ list = new Vector();
        list.add(fsManager.resolveFile(DRIVE_A));

        for (char i = 'c'; i <= 'z'; ++i) {
            //FileObject drive = new FileObject(i + ":" + FileName.SEPARATOR);
            FileObject drive = fsManager.resolveFile(i + ":" + FileName.SEPARATOR);

            if (VFSUtil.isDirectory(drive) && drive.exists()) {
                list.add(drive);
                if (initial && i == 'c') {
                    setCurrentDirectory(drive);
                    setCurrentConnectionId(drive.getName().toString());
                    initial = false;
                }
            }
        }
        roots = (FileObject[]) list.toArray(new FileObject[list.size()]);
        VFSUtil.sortFiles(roots);

        //return roots;
    } else {
        FileObject root = fsManager.resolveFile(FileName.SEPARATOR);

        if (initial) {
            setCurrentDirectory(root);
            setCurrentConnectionId(root.getName().toString());
            initial = false;
        }
        roots = new FileObject[] { root };
    }

    return roots; //newRequest; 
}