Example usage for java.util Vector get

List of usage examples for java.util Vector get

Introduction

In this page you can find the example usage for java.util Vector get.

Prototype

public synchronized E get(int index) 

Source Link

Document

Returns the element at the specified position in this Vector.

Usage

From source file:org.mahasen.util.PutUtil.java

/**
 * @param file//  w w  w.  j a  v  a2  s.  c  o m
 * @throws InterruptedException
 * @throws RegistryException
 * @throws PastException
 * @throws IOException
 */
public void secureUpload(File file, Id resourceId) throws InterruptedException, RegistryException,
        PastException, IOException, MahasenConfigurationException, MahasenException {

    // get the IP addresses pool to upload files.
    Vector<String> nodeIpsToPut = getNodeIpsToPut();

    MahasenFileSplitter mahasenFileSplitter = new MahasenFileSplitter();
    mahasenFileSplitter.split(file);
    HashMap<String, String> fileParts = mahasenFileSplitter.getPartNames();

    mahasenResource.addPartNames(fileParts.keySet().toArray(new String[fileParts.size()]));
    Random random = new Random();

    for (String currentPartName : fileParts.keySet()) {
        File splittedFilePart = new File(fileParts.get(currentPartName));
        int randomNumber = random.nextInt(nodeIpsToPut.size());
        String nodeIp = nodeIpsToPut.get(randomNumber);

        try {
            setTrustStore();
            URI uri = null;

            ArrayList<NameValuePair> qparams = new ArrayList<NameValuePair>();
            qparams.add(new BasicNameValuePair("splittedfilename", splittedFilePart.getName()));
            uri = URIUtils.createURI("https", nodeIp + ":" + MahasenConstants.SERVER_PORT, -1,
                    "/mahasen/upload_request_ajaxprocessor.jsp", URLEncodedUtils.format(qparams, "UTF-8"),
                    null);

            MahasenUploadWorker uploadWorker = new MahasenUploadWorker(uri, currentPartName, splittedFilePart,
                    mahasenResource, nodeIp);
            uploadThread = new Thread(uploadWorker);
            uploadWorker.setJobId(jobId);

            //keep track of uploading parts
            AtomicInteger noOfParts = new AtomicInteger(0);
            storedNoOfParts.put(jobId, noOfParts);

            uploadThread.start();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    final BlockFlag blockFlag = new BlockFlag(true, 6000);
    while (true) {

        AtomicInteger noOfParts = storedNoOfParts.get(jobId);
        if (noOfParts.get() == fileParts.size()) {
            storedNoOfParts.remove(uploadThread.getId());
            System.out.println("uploaded no of parts " + noOfParts + "out of " + fileParts.size() + "going out "
                    + "#####Thread id:" + uploadThread.getId());
            blockFlag.unblock();
            break;
        }

        if (blockFlag.isBlocked()) {
            mahasenManager.getNode().getEnvironment().getTimeSource().sleep(10);
        } else {
            throw new MahasenException("Time out in uploading " + file.getName());
        }
    }

    mahasenManager.insertIntoDHT(resourceId, mahasenResource, false);
    mahasenManager.insertTreeMapIntoDHT(resourceId, mahasenResource, false);

    ReplicateRequestStarter replicateStarter = new ReplicateRequestStarter(mahasenResource);
    Thread replicateThread = new Thread(replicateStarter);
    replicateThread.start();
}

From source file:de.fhg.fokus.odp.middleware.ckan.CKANGatewayUtil.java

/**
 * The function receives a vector with details for a set of revisions and
 * returns the details for the packages affected by these revisions.
 * // w  w  w  .j  a  v a 2s  .c om
 * @param revisionsDetails
 *            a vector of strings containing the JSON details for the
 *            revisions.
 * @return a vector of maps with the details for each affected package.
 */
@SuppressWarnings("rawtypes")
public static Vector<Map> getUpdatedCategoriesDetails(Vector<String> revisionsDetails) {

    // pass the request to the function for the updated data sets
    Vector uDataSetResults = getUpdatedDataSetsDetails(revisionsDetails);
    if (uDataSetResults == null) {
        return null;
    }

    // the vector to contain the visited groups
    Vector<String> visitedGroups = new Vector<String>();

    // the variable which will be returned
    Vector<Map> toreturn = new Vector<Map>();

    // iterate over the data set results
    for (int i = 0; i < uDataSetResults.size(); i++) {

        // get the groups which where updated as a result of the data set
        // update
        Map m = (Map) uDataSetResults.get(i);
        JSONArray arr = (JSONArray) m.get("groups");

        for (int j = 0; j < arr.size(); j++) {

            // get the next group and check if its data was already obtained
            String grp = (String) arr.get(j);
            if (visitedGroups.contains(grp)) {
                continue;
            }

            visitedGroups.add(grp);

            // prepare the next rest call
            String RESTcall = "api/rest/group/" + grp;

            try {
                String restResponse = connectorInstance.restCallWithAuthorization(RESTcall, null);
                Map grMap = (Map) JSONValue.parse(restResponse);

                toreturn.add(grMap);

            } catch (MalformedURLException e) {
                log.log(Level.SEVERE, "Failed to realize api call \"" + url + RESTcall + "\" !!!");
                continue;
            } catch (IOException e) {
                continue;
            }
        }
    }

    return toreturn;
}

From source file:fr.cls.atoll.motu.library.misc.vfs.provider.gsiftp.GsiFtpFileObject.java

/**
 * Fetches the children of this file, if not already cached.
 * /*from   w  ww. j av  a  2  s.com*/
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void doGetChildren() throws IOException {
    if (children != null) {
        return;
    }

    final GridFTPClient client = ftpFs.getClient();
    try {
        // String key =
        // GsiFtpFileSystemConfigBuilder.getInstance().getEntryParser(getFileSystem().getFileSystemOptions());

        /** required to perform multiple requests **/
        client.setLocalPassive();
        client.setActive();

        final Vector tmpChildren = client.list(getName().getPath());

        if (tmpChildren == null || tmpChildren.size() == 0) {
            children = EMPTY_FTP_FILE_MAP;
        } else {
            children = new TreeMap();

            // Remove '.' and '..' elements
            for (int i = 0; i < tmpChildren.size(); i++) {
                // final FTPFile child = tmpChildren[i];
                final FileInfo child = (FileInfo) tmpChildren.get(i);

                if (child == null) {
                    if (log.isDebugEnabled()) {
                        log.debug(Messages.getString("vfs.provider.ftp/invalid-directory-entry.debug",
                                new Object[] { new Integer(i), relPath }));
                    }
                    continue;
                }
                if (!".".equals(child.getName()) && !"..".equals(child.getName())) {
                    children.put(child.getName(), child);
                }
            }
        }
    } catch (ServerException se) {
        se.printStackTrace();
        // System.err.println("GsiFtpFileObject " + se);
        throw new IOException(se.getMessage());
    } catch (ClientException ce) {
        throw new IOException(ce.getMessage());
    } finally {
        ftpFs.putClient(client);
    }
}

From source file:com.clustercontrol.process.factory.RunMonitorProcess.java

/**
 * WBEM?????????????PID????????/*from www .j a  va2  s .  c  o m*/
 * @param wbemResponse
 * @param dataManager
 * @return List????1?1????????????????????null?
 */
private List<ProcessInfo> buildWbemProcessList(DataTable wbemResponse, String facilityId) {
    PollingDataManager dataManager = new PollingDataManager(facilityId);
    String runParam = ""; // WBEM????param??

    // WBEM??????class?DB??

    // cc_monitor_process_polling_mst ? variable_id = "param" ?
    MonitorProcessPollingMstData pollingBean;
    pollingBean = ProcessMasterCache
            .getMonitorProcessPollingMst(new MonitorProcessPollingMstPK(PollerProtocolConstant.PROTOCOL_WBEM,
                    dataManager.getPlatformId(), dataManager.getSubPlatformId(), "param"));
    if (pollingBean == null) {
        m_log.info("collect() pollingBean (param) is null");
        return null;
    }
    runParam = PollerProtocolConstant.getEntryKey(PollerProtocolConstant.PROTOCOL_WBEM,
            pollingBean.getPollingTarget());
    m_log.debug("collect() runParam : " + runParam);

    // ???????
    Set<TableEntry> paramEntrySet = wbemResponse.getValueSetStartWith(runParam);

    if (paramEntrySet == null) {
        // ???????????
        // ?????????????
        m_log.info("collect()  FacilityID : " + facilityId + ", "
                + "paramEntrySet(Set) is null , WBEM Polling failed");
        return null;
    }

    if (m_log.isDebugEnabled()) {
        m_log.debug("process list (Param)   size = " + paramEntrySet.size());
    }

    List<ProcessInfo> processList = new ArrayList<>();

    for (TableEntry paramEntry : paramEntrySet) {
        String pid = null;
        String command = null;
        long time = 0L;
        String param = "";

        if (paramEntry != null) {

            if (!paramEntry.isValid()) {
                m_message = paramEntry.getErrorDetail().getMessage();
                return null;
            }

            @SuppressWarnings("unchecked")
            Vector<String> valueParam = (Vector<String>) paramEntry.getValue();

            if (valueParam == null) {
                m_log.info("collect()  FacilityID : " + facilityId + ", "
                        + "ParamEntry(value) is null. What wbem happened?");
                m_message = MessageConstant.MESSAGE_COULD_NOT_GET_VALUE_PROCESS.getMessage();
                return null;
            }

            command = valueParam.get(0).toString();
            m_log.debug("command : " + command);

            for (int i = 1; i < valueParam.size(); i++) {
                param = param + valueParam.get(i);

                if (i + 1 < valueParam.size()) {
                    param = param + " ";
                }

                m_log.debug("param : " + param);
            }
        } else {
            // ?????null???
            m_log.debug(
                    "collect()  FacilityID : " + facilityId + ", " + "ParamEntry is null. What wbem happened?");
            continue;
        }

        time = paramEntry.getDate();

        processList.add(new ProcessInfo(pid, command, param, time));
    }

    return processList;
}

From source file:com.aquatest.dbinterface.tools.DatabaseUpdater.java

/**
 * Run the updater to synchronise the device with the server. </p> Method
 * cycles through all the database tables, and for each one, it retrieves
 * records that have been added, changed or deleted.
 *//*  w  ww  . jav  a  2  s  . c o m*/
public void run() {
    try {
        long updateTime = System.currentTimeMillis();
        boolean result;

        Vector<String> tables = getTables();
        sendMessage("Table list downloaded: " + tables.size() + " tables found.", ITEM_COMPLETE);

        // begin database transaction
        dA.database.beginTransaction();

        try {

            // loop through all the tables
            int tableCount = tables.size();
            for (int i = 0; i < tableCount; i++) {
                String tableName = tables.get(i);
                int k = i + 1;

                // ignore authoritymanager table
                if (tableName.compareTo("authoritymanager") == 0) {
                    continue;
                } // if

                // retrieve ADDED rows
                sendMessage(tableName + " (table " + k + "/" + tableCount + "): retrieving new records...",
                        ITEM_COMPLETE);
                result = fetchAndExecuteQueries(TYPE_ADDED, tableName);

                if (!result) {
                    // Log.v("THREAD", "KILLING");
                    sendMessage("Update cancelled!", CANCELLED);
                    return;
                } // if

                // retrieve UPDATED rows
                sendMessage(tableName + " (table " + k + "/" + tableCount + "): retrieving updated records...",
                        ITEM_COMPLETE);
                result = fetchAndExecuteQueries(TYPE_UPDATED, tableName);

                if (!result) {
                    // Log.v("THREAD", "KILLING");
                    sendMessage("Update cancelled!", CANCELLED);
                    return;
                } // if

                // retrieve DELETED rows
                sendMessage(tableName + " (table " + k + "/" + tableCount + "): retrieving deleted rows...",
                        ITEM_COMPLETE);
                result = fetchAndExecuteQueries(TYPE_DELETED, tableName);

                if (!result) {
                    // Log.v("THREAD", "KILLING");
                    sendMessage("Update cancelled!", CANCELLED);
                    return;
                } // if
            } // for

            // signal transaction can be committed
            dA.database.setTransactionSuccessful();
        } finally {
            // commit or rollback transaction
            dA.database.endTransaction();

        }

        // return success in a Bundle
        Bundle b = new Bundle();
        b.putString("msg", "Update complete!");
        b.putLong("time", updateTime);
        sendMessage(b, COMPLETE);

    } catch (JSONException jE) {
        sendMessage(jE.getMessage(), ERROR);
        return;
    } catch (ClientProtocolException cE) {
        sendMessage(cE.getMessage() + " This is possibly caused by a lack of connectivity. "
                + "Restart the app and try again after ensuring you have a valid connection.", ERROR);
        return;
    } catch (IOException iE) {
        sendMessage(iE.getMessage() + " This is possibly caused by a lack of connectivity. "
                + "Restart the app and try again after ensuring you have a valid connection.", ERROR);
        return;
    } catch (SQLiteException sE) {
        sendMessage("A SQLite exception occured: " + sE.getMessage(), ERROR);
        return;
    } // catch
}

From source file:org.powertac.householdcustomer.customers.Household.java

private void createDominantOperationVectors() {

    Appliance app = appliances.get(dominantAppliance);
    Vector<Boolean> op = app.getOperationDaysVector();

    for (int i = 0; i < op.size(); i++) {
        if (op.get(i))
            daysDominant++;/*from   w w  w .  ja  va2 s.c  o m*/
        else
            daysNonDominant++;

        for (int j = 0; j < VillageConstants.HOURS_OF_DAY; j++) {
            if (op.get(i))
                dominantConsumption[j] += weeklyBaseLoadInHours.get(i).get(j)
                        + weeklyControllableLoadInHours.get(i).get(j)
                        + weeklyWeatherSensitiveLoadInHours.get(i).get(j);
            else
                nonDominantConsumption[j] += weeklyBaseLoadInHours.get(i).get(j)
                        + weeklyControllableLoadInHours.get(i).get(j)
                        + weeklyWeatherSensitiveLoadInHours.get(i).get(j);
        }
    }

    for (int j = 0; j < VillageConstants.HOURS_OF_DAY; j++) {
        if (daysDominant != 0)
            dominantConsumption[j] /= daysDominant;
        if (daysNonDominant != 0)
            nonDominantConsumption[j] /= daysNonDominant;
    }
    /*
        System.out.println("Household:" + toString());
        System.out.println("Dominant Consumption:"
                   + Arrays.toString(dominantConsumption));
        System.out.println("Non Dominant Consumption:"
                   + Arrays.toString(nonDominantConsumption));
    */
}

From source file:com.duroty.application.bookmark.manager.BookmarkManager.java

/**
 * DOCUMENT ME!/*from w  w  w . j av a 2s  . c  o m*/
 *
 * @param hsession DOCUMENT ME!
 * @param repositoryName DOCUMENT ME!
 * @param links DOCUMENT ME!
 *
 * @throws BookmarkException DOCUMENT ME!
 */
public void addBookmarks(Session hsession, String repositoryName, Vector links) throws BookmarkException {
    //Cal insertar un bookmark a la base de dades i l'optimazer ja s'encarrega d'indexar
    if (links == null) {
        return;
    }

    try {
        Users user = getUser(hsession, repositoryName);

        for (int i = 0; i < links.size(); i++) {
            Bookmark bookmark = new Bookmark();
            bookmark.setBooUrl(((URL) links.get(i)).toString());
            bookmark.setUsers(user);

            hsession.save(bookmark);
            hsession.flush();
        }
    } catch (Exception e) {
        throw new BookmarkException(e);
    } finally {
        GeneralOperations.closeHibernateSession(hsession);
    }
}

From source file:de.mpg.mpdl.inge.pubman.web.sword.SwordUtil.java

public SWORDEntry createResponseAtom(PubItemVO item, Deposit deposit, boolean valid) {
    SWORDEntry se = new SWORDEntry();
    PubManSwordServer server = new PubManSwordServer();

    // This info can only be filled if item was successfully created
    if (item != null) {
        Title title = new Title();
        title.setContent(item.getMetadata().getTitle());
        se.setTitle(title);//www  .  j a v  a2s . c om

        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
        TimeZone utc = TimeZone.getTimeZone("UTC");
        sdf.setTimeZone(utc);
        String milliFormat = sdf.format(new Date());
        se.setUpdated(milliFormat);
    }

    Summary s = new Summary();
    Vector<String> filenames = this.getFileNames();
    String filename = "";
    for (int i = 0; i < filenames.size(); i++) {
        if (filename.equals("")) {
            filename = filenames.get(i);
        } else {
            filename = filename + " ," + filenames.get(i);
        }
    }
    s.setContent(filename);
    se.setSummary(s);

    Content content = new Content();
    content.setSource("");
    // Only set content if item was deposited
    if (!deposit.isNoOp() && item != null && valid) {
        content.setSource(server.getCoreserviceURL() + "/ir/item/" + item.getVersion().getObjectId());
        se.setId(server.getBaseURL() + this.itemPath + item.getVersion().getObjectId());
    }
    se.setContent(content);

    Source source = new Source();
    Generator generator = new Generator();
    generator.setContent(server.getBaseURL());
    source.setGenerator(generator);
    se.setSource(source);

    se.setTreatment(this.treatmentText);
    se.setNoOp(deposit.isNoOp());

    // Add the login name
    Author author = new Author();
    author.setName(deposit.getUsername());
    se.addAuthors(author);

    return se;
}

From source file:de.tudarmstadt.ukp.dkpro.core.decompounding.splitter.AsvToolboxSplitterAlgorithm.java

@Override
public DecompoundingTree split(String aWord) throws ResourceInitializationException {
    // splitter.kZerlegung("katalogleichen");
    // splitter.kZerlegung("nischenthemen");
    // splitter.kZerlegung("brennbauksten");
    // splitter.kZerlegung("autokorrelationszeit");
    // splitter.kZerlegung("providerdaten");
    // splitter.kZerlegung("zahnrzten");

    logger.debug("SPLITTING WORD: " + aWord);
    Vector<String> split = splitter.kZerlegung(aWord);
    String joined = StringUtils.join(split, "").replace("(", "").replace(")", "");
    if (!joined.equals(aWord)) {
        logger.error("Failed while splitting " + aWord + " into " + split);
    }/*from  w w w.ja  va 2 s . c om*/

    if (StringUtils.join(split, "").contains("()")) {
        logger.error(aWord + " -> " + split);
        throw new ResourceInitializationException("Failed while splitting " + aWord + " into " + split, null);
    }

    StringBuilder splitStr = new StringBuilder();
    for (int i = 0; i < split.size(); i++) {
        if (splitStr.length() > 0 && !split.get(i).startsWith("(")) {
            splitStr.append("+");
        }
        splitStr.append(split.get(i));
    }

    return new DecompoundingTree(splitStr.toString());
}

From source file:edu.ku.brc.specify.web.JasperReportHelper.java

/**
 * Starts the report creation process/*w w  w.ja  v  a2 s  .co  m*/
 * @param fileName the XML file name of the report definition
 * @param recrdSet the recordset to use to fill the labels
 * @param paramsArg parameters for the report
*/
public void createReport(final String mainReportName, final Object data, final Properties paramsArg) {
    if (data instanceof RecordSetIFace) {
        this.recordSet = (RecordSetIFace) data;
        this.dataSource = null;

    } else if (data instanceof JRDataSource) {
        this.recordSet = null;
        this.dataSource = (JRDataSource) data;
    }
    this.params = paramsArg;

    JasperReportsCache.refreshCacheFromDatabase();

    Vector<File> reportFiles = new Vector<File>();
    AppResourceIFace appRes = AppContextMgr.getInstance().getResource(mainReportName);
    if (appRes != null) {
        String subReportsStr = appRes.getMetaData("subreports");
        String hqlStr = appRes.getMetaData("hql");

        if (StringUtils.isNotEmpty(hqlStr)) {
            requiresHibernate = Boolean.parseBoolean(hqlStr.toLowerCase());
        }

        if (StringUtils.isNotEmpty(subReportsStr)) {
            String[] subReportNames = subReportsStr.split(",");
            for (String subReportName : subReportNames) {
                AppResourceIFace subReportAppRes = AppContextMgr.getInstance().getResource(subReportName);
                if (subReportAppRes != null) {
                    File subReportPath = new File(cachePath.getAbsoluteFile() + File.separator + subReportName);
                    if (subReportPath.exists()) {
                        reportFiles.add(subReportPath);

                    } else {
                        throw new RuntimeException(
                                "Subreport doesn't exist on disk [" + subReportPath.getAbsolutePath() + "]");
                    }

                } else {
                    throw new RuntimeException("Couldn't load subreport [" + mainReportName + "]"); // ??
                }
            }
        }

        File reportPath = new File(cachePath.getAbsoluteFile() + File.separator + mainReportName);
        if (reportPath.exists()) {
            reportFiles.add(reportPath);

        } else {
            throw new RuntimeException(
                    "Subreport doesn't exist on disk [" + reportPath.getAbsolutePath() + "]");
        }

    } else {
        throw new RuntimeException("Couldn't load report/label [" + mainReportName + "]");
    }

    boolean allAreCompiled = true;
    Vector<ReportCompileInfo> files = new Vector<ReportCompileInfo>();
    for (File file : reportFiles) {
        ReportCompileInfo info = JasperReportsCache.checkReport(file);
        files.add(info);
        if (!info.isCompiled()) {
            allAreCompiled = false;
        }
    }

    // Check to see if it needs to be recompiled, if it doesn't need compiling then
    // call "compileComplete" directly to have it start filling the labels
    // otherswise create the compiler runnable and have it be compiled 
    // asynchronously
    if (allAreCompiled) {
        System.out.println("Everything is compiled");

        this.compileComplete(files.get(files.size() - 1).getCompiledFile());

    } else {
        System.out.println("About to Compile " + isAsynchronous);
        //progressLabel.setText(getResourceString("JasperReportCompiling"));
        compiler = new JasperCompilerRunnable(this, files);
        if (isAsynchronous) {
            compiler.start();
        } else {
            compiler.run();
        }
    }
}