Example usage for java.util LinkedList size

List of usage examples for java.util LinkedList size

Introduction

In this page you can find the example usage for java.util LinkedList size.

Prototype

int size

To view the source code for java.util LinkedList size.

Click Source Link

Usage

From source file:ch.icclab.cyclops.services.iaas.openstack.resource.impl.TelemetryResource.java

/**
 * In this method, usage made is calculated on per resource basis in the cumulative meters
 * <p/>//from   w w w. j a  v  a2  s  . c  o m
 * Pseudo Code<br/>
 * 1. Traverse through the linkedlist<br/>
 * 2. Treat the first point subtracting the last inserted value to the current point one.<br/>
 * 3. Treat the N points with the last volumes.<br/>
 * 4. Add the updates sample object into an arraylist
 *
 * @param cMeterArr  This is an arrayList of type CumulativeMeterData containing sample object with the usage information
 * @param linkedList This is a Linked List of type CumulativeMeterData containing elements from a particular resource
 * @return An arrayList of type CumulativeMeterData containing sample objects with the usage information
 */
private ArrayList<CumulativeMeterData> calculateCumulativeMeterUsage(ArrayList<CumulativeMeterData> cMeterArr,
        LinkedList<CumulativeMeterData> linkedList) {
    logger.trace(
            "BEGIN ArrayList<CumulativeMeterData> calculateCumulativeMeterUsage(ArrayList<CumulativeMeterData> cMeterArr, LinkedList<CumulativeMeterData> linkedList)");
    long diff;
    //BigInteger maxMeterValue ;

    long oldVolume = 0;
    long newVolume;
    long lastUsage = 0;
    TSDBResource dbResource = new TSDBResource();
    for (int i = 0; i < linkedList.size(); i++) {
        if (i == 0) {
            //First point Treatment
            oldVolume = dbResource.getLastVolume(linkedList.get(i).getMeter(),
                    linkedList.get(i).getResource_id(), linkedList.get(i).getUser_id());
        } else
            //N point Treatment
            oldVolume = lastUsage;
        newVolume = linkedList.get(i).getVolume();
        if (newVolume >= oldVolume) {
            //Normal use case where the new usage is greater or equals than the last inserted point.
            //TODO: what if the value is higher but it's coz the counter reset and get higher? (if we have message queues or event based and they advise before reset, that's solved
            lastUsage = newVolume - oldVolume;
        } else {
            //TODO: if the volume is lower than the lastInserted get the maximum for that meter and operate on it.
            lastUsage = newVolume;
        }
        linkedList.get(i).setUsage(lastUsage);
        cMeterArr.add(linkedList.get(i));
    }
    cMeterArr.add(linkedList.getLast());
    logger.trace(
            "END ArrayList<CumulativeMeterData> calculateCumulativeMeterUsage(ArrayList<CumulativeMeterData> cMeterArr, LinkedList<CumulativeMeterData> linkedList)");
    return cMeterArr;
}

From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.opt.PerfTestTool.java

/**
 * //from w  w  w .  j  av  a 2s. c  om
 * @param dirname
 * @return
 * @throws IOException
 * @throws DMLUnsupportedOperationException
 */
@SuppressWarnings("all")
private static HashMap<Integer, Long> writeResults(String dirname)
        throws IOException, DMLUnsupportedOperationException {
    HashMap<Integer, Long> map = new HashMap<Integer, Long>();
    int count = 1;
    int offset = (MODEL_INTERCEPT ? 1 : 0);
    int cols = MODEL_MAX_ORDER + offset;

    for (Entry<Integer, HashMap<Integer, LinkedList<Double>>> inst : _results.entrySet()) {
        int instID = inst.getKey();
        HashMap<Integer, LinkedList<Double>> instCF = inst.getValue();

        for (Entry<Integer, LinkedList<Double>> cfun : instCF.entrySet()) {
            int tDefID = cfun.getKey();
            long ID = IDHandler.concatIntIDsToLong(instID, tDefID);
            LinkedList<Double> dmeasure = cfun.getValue();

            PerfTestDef def = _regTestDef.get(tDefID);
            LinkedList<Double> dvariable = generateSequence(def.getMin(), def.getMax(), NUM_SAMPLES_PER_TEST);
            int dlen = dvariable.size();
            int plen = def.getInternalVariables().length;

            //write variable data set
            CSVWriter writer1 = new CSVWriter(new FileWriter(dirname + count + "_in1.csv"), ',',
                    CSVWriter.NO_QUOTE_CHARACTER);
            if (plen == 1) //one dimensional function
            {
                //write 1, x, x^2, x^3, ...
                String[] sbuff = new String[cols];
                for (Double val : dvariable) {
                    for (int j = 0; j < cols; j++)
                        sbuff[j] = String.valueOf(Math.pow(val, j + 1 - offset));
                    writer1.writeNext(sbuff);
                }
            } else // multi-dimensional function
            {
                //write 1, x,y,z,x^2,y^2,z^2, xy, xz, yz, xyz

                String[] sbuff = new String[(int) Math.pow(2, plen) - 1 + plen + offset - 1];
                //String[] sbuff = new String[plen+offset];
                if (offset == 1)
                    sbuff[0] = "1";

                //init index stack
                int[] index = new int[plen];
                for (int i = 0; i < plen; i++)
                    index[i] = 0;

                //execute test 
                double[] buff = new double[plen];
                while (index[0] < dlen) {
                    //set buffer values
                    for (int i = 0; i < plen; i++)
                        buff[i] = dvariable.get(index[i]);

                    //core writing
                    for (int i = 1; i <= plen; i++) {
                        if (i == 1) {
                            for (int j = 0; j < plen; j++)
                                sbuff[offset + j] = String.valueOf(buff[j]);
                            for (int j = 0; j < plen; j++)
                                sbuff[offset + plen + j] = String.valueOf(Math.pow(buff[j], 2));
                        } else if (i == 2) {
                            int ix = 0;
                            for (int j = 0; j < plen - 1; j++)
                                for (int k = j + 1; k < plen; k++, ix++)
                                    sbuff[offset + 2 * plen + ix] = String.valueOf(buff[j] * buff[k]);
                        } else if (i == plen) {
                            //double tmp=1;
                            //for( int j=0; j<plen; j++ )
                            //   tmp *= buff[j];
                            //sbuff[offset+2*plen+plen*(plen-1)/2] = String.valueOf(tmp);
                        } else
                            throw new DMLUnsupportedOperationException(
                                    "More than 3 dims currently not supported.");

                    }

                    //for( int i=0; i<plen; i++ )   
                    //   sbuff[offset+i] = String.valueOf( buff[i] );

                    writer1.writeNext(sbuff);

                    //increment indexes
                    for (int i = plen - 1; i >= 0; i--) {
                        if (i == plen - 1)
                            index[i]++;
                        else if (index[i + 1] >= dlen) {
                            index[i]++;
                            index[i + 1] = 0;
                        }
                    }
                }
            }
            writer1.close();

            //write measure data set
            CSVWriter writer2 = new CSVWriter(new FileWriter(dirname + count + "_in2.csv"), ',',
                    CSVWriter.NO_QUOTE_CHARACTER);
            String[] buff2 = new String[1];
            for (Double val : dmeasure) {
                buff2[0] = String.valueOf(val);
                writer2.writeNext(buff2);
            }
            writer2.close();

            map.put(count, ID);
            count++;
        }
    }

    return map;
}

From source file:com.googlecode.datasourcetester.server.DataSourceTesterServiceImpl.java

public String[][] queryDataSource(String dataSourceJndiName, String query) {
    Connection conn = null;//from  ww w . j  a va2s  .  c o  m
    try {
        InitialContext jndiContext = new InitialContext();
        DataSource ds = (DataSource) jndiContext.lookup(dataSourceJndiName);
        conn = ds.getConnection();
        PreparedStatement stmt = conn.prepareStatement(query);
        ResultSet rs = stmt.executeQuery();
        ResultSetMetaData resMeta = rs.getMetaData();
        LinkedList<String[]> rowList = new LinkedList<String[]>();
        String[] colLabels = new String[resMeta.getColumnCount()];
        for (int colNr = 1; colNr <= resMeta.getColumnCount(); colNr++) {
            colLabels[colNr - 1] = resMeta.getColumnName(colNr);
        }
        rowList.add(colLabels);
        while (rs.next()) {
            String[] rowData = new String[resMeta.getColumnCount()];
            for (int colNr = 1; colNr <= resMeta.getColumnCount(); colNr++) {
                rowData[colNr - 1] = rs.getString(colNr);
            }
            rowList.add(rowData);
        }
        conn.close();
        return rowList.toArray(new String[rowList.size()][]);
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
        try {
            if (conn != null && !conn.isClosed()) {
                conn.close();
            }
        } catch (SQLException sqlEx) {
            logger.error(sqlEx.getMessage(), sqlEx);
        }
        return null;
    }
}

From source file:pivotal.au.se.gemfirexdweb.controller.QueryController.java

private void addCommandToHistory(HttpSession session, UserPref prefs, String sql) {
    @SuppressWarnings("unchecked")
    LinkedList<String> historyList = (LinkedList<String>) session.getAttribute("history");

    int maxsize = prefs.getHistorySize();

    if (historyList.size() == maxsize) {
        historyList.remove((maxsize - 1));
        historyList.addFirst(sql);/*from www  .  j av a  2 s .  c  om*/
    } else {
        historyList.addFirst(sql);
    }

}

From source file:de.fabianonline.telegram_backup.DownloadManager.java

public void _downloadMessages(Integer limit) throws RpcErrorException, IOException, TimeoutException {
    logger.info("This is _downloadMessages with limit {}", limit);
    int dialog_limit = 100;
    logger.info("Downloading the last {} dialogs", dialog_limit);
    System.out.println("Downloading most recent dialogs... ");
    int max_message_id = 0;
    TLAbsDialogs dialogs = client.messagesGetDialogs(0, 0, new TLInputPeerEmpty(), dialog_limit);
    logger.debug("Got {} dialogs", dialogs.getDialogs().size());
    for (TLDialog d : dialogs.getDialogs()) {
        if (d.getTopMessage() > max_message_id && !(d.getPeer() instanceof TLPeerChannel)) {
            logger.trace("Updating top message id: {} => {}. Dialog type: {}", max_message_id,
                    d.getTopMessage(), d.getPeer().getClass().getName());
            max_message_id = d.getTopMessage();
        }/*from  w w  w .  j  av a  2  s  .c  o m*/
    }
    System.out.println("Top message ID is " + max_message_id);
    int max_database_id = db.getTopMessageID();
    System.out.println("Top message ID in database is " + max_database_id);
    if (limit != null) {
        System.out.println("Limit is set to " + limit);
        max_database_id = Math.max(max_database_id, max_message_id - limit);
        System.out.println("New top message id 'in database' is " + max_database_id);
    }
    if (max_message_id - max_database_id > 1000000) {
        System.out.println(
                "Would have to load more than 1 million messages which is not supported by telegram. Capping the list.");
        logger.debug("max_message_id={}, max_database_id={}, difference={}", max_message_id, max_database_id,
                max_message_id - max_database_id);
        max_database_id = Math.max(0, max_message_id - 1000000);
        logger.debug("new max_database_id: {}", max_database_id);
    }

    if (max_database_id == max_message_id) {
        System.out.println("No new messages to download.");
    } else if (max_database_id > max_message_id) {
        throw new RuntimeException(
                "max_database_id is bigger then max_message_id. This shouldn't happen. But the telegram api nonetheless does that sometimes. Just ignore this error, wait a few seconds and then try again.");
    } else {
        int start_id = max_database_id + 1;
        int end_id = max_message_id;

        List<Integer> ids = makeIdList(start_id, end_id);
        downloadMessages(ids);
    }

    logger.info("Searching for missing messages in the db");
    int count_missing = 0;
    System.out.println("Checking message database for completeness...");
    int db_count = db.getMessageCount();
    int db_max = db.getTopMessageID();
    logger.debug("db_count: {}", db_count);
    logger.debug("db_max: {}", db_max);

    if (db_count != db_max) {
        if (limit != null) {
            System.out.println(
                    "You are missing messages in your database. But since you're using '--limit-messages', I won't download these now.");
        } else {
            LinkedList<Integer> all_missing_ids = db.getMissingIDs();
            LinkedList<Integer> downloadable_missing_ids = new LinkedList<Integer>();
            for (Integer id : all_missing_ids) {
                if (id > max_message_id - 1000000)
                    downloadable_missing_ids.add(id);
            }
            count_missing = all_missing_ids.size();
            System.out.println("" + all_missing_ids.size() + " messages are missing in your Database.");
            System.out.println("I can (and will) download " + downloadable_missing_ids.size() + " of them.");

            downloadMessages(downloadable_missing_ids);
        }
    }

    logger.info("Logging this run");
    db.logRun(Math.min(max_database_id + 1, max_message_id), max_message_id, count_missing);
}

From source file:com.hipu.bdb.util.FileUtils.java

/**
 * Retrieve a number of lines from the file around the given 
 * position, as when paging forward or backward through a file. 
 * /*from  w w  w  .  j a  v a2 s  .c  om*/
 * @param file File to retrieve lines
 * @param position offset to anchor lines
 * @param signedDesiredLineCount lines requested; if negative, 
 *        want this number of lines ending with a line containing
 *        the position; if positive, want this number of lines,
 *        all starting at or after position. 
 * @param lines List<String> to insert found lines
 * @param lineEstimate int estimate of line size, 0 means use default
 *        of 128
 * @return LongRange indicating the file offsets corresponding to 
 *         the beginning of the first line returned, and the point
 *         after the end of the last line returned
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static LongRange pagedLines(File file, long position, int signedDesiredLineCount, List<String> lines,
        int lineEstimate) throws IOException {
    // consider negative positions as from end of file; -1 = last byte
    if (position < 0) {
        position = file.length() + position;
    }

    // calculate a reasonably sized chunk likely to have all desired lines
    if (lineEstimate == 0) {
        lineEstimate = 128;
    }
    int desiredLineCount = Math.abs(signedDesiredLineCount);
    long startPosition;
    long fileEnd = file.length();
    int bufferSize = (desiredLineCount + 5) * lineEstimate;
    if (signedDesiredLineCount > 0) {
        // reading forward; include previous char in case line-end
        startPosition = position - 1;
    } else {
        // reading backward
        startPosition = position - bufferSize + (2 * lineEstimate);
    }
    if (startPosition < 0) {
        startPosition = 0;
    }
    if (startPosition + bufferSize > fileEnd) {
        bufferSize = (int) (fileEnd - startPosition);
    }

    // read that reasonable chunk
    FileInputStream fis = new FileInputStream(file);
    fis.getChannel().position(startPosition);
    byte[] buf = new byte[bufferSize];
    IOUtils.closeQuietly(fis);

    // find all line starts fully in buffer
    // (positions after a line-end, per line-end definition in 
    // BufferedReader.readLine)
    LinkedList<Integer> lineStarts = new LinkedList<Integer>();
    if (startPosition == 0) {
        lineStarts.add(0);
    }
    boolean atLineEnd = false;
    boolean eatLF = false;
    int i;
    for (i = 0; i < bufferSize; i++) {
        if ((char) buf[i] == '\n' && eatLF) {
            eatLF = false;
            continue;
        }
        if (atLineEnd) {
            atLineEnd = false;
            lineStarts.add(i);
            if (signedDesiredLineCount < 0 && startPosition + i > position) {
                // reached next line past position, read no more
                break;
            }
        }
        if ((char) buf[i] == '\r') {
            atLineEnd = true;
            eatLF = true;
            continue;
        }
        if ((char) buf[i] == '\n') {
            atLineEnd = true;
        }
    }
    if (startPosition + i == fileEnd) {
        // add phantom lineStart after end
        lineStarts.add(bufferSize);
    }
    int foundFullLines = lineStarts.size() - 1;

    // if found no lines
    if (foundFullLines < 1) {
        if (signedDesiredLineCount > 0) {
            if (startPosition + bufferSize == fileEnd) {
                // nothing more to read: return nothing
                return new LongRange(fileEnd, fileEnd);
            } else {
                // retry with larger lineEstimate
                return pagedLines(file, position, signedDesiredLineCount, lines,
                        Math.max(bufferSize, lineEstimate));
            }

        } else {
            // try again with much larger line estimate
            // TODO: fail gracefully before growing to multi-MB buffers
            return pagedLines(file, position, signedDesiredLineCount, lines, bufferSize);
        }
    }

    // trim unneeded lines
    while (signedDesiredLineCount > 0 && startPosition + lineStarts.getFirst() < position) {
        // discard lines starting before desired position
        lineStarts.removeFirst();
    }
    while (lineStarts.size() > desiredLineCount + 1) {
        if (signedDesiredLineCount < 0 && (startPosition + lineStarts.get(1) <= position)) {
            // discard from front until reach line containing target position
            lineStarts.removeFirst();
        } else {
            lineStarts.removeLast();
        }
    }
    int firstLine = lineStarts.getFirst();
    int partialLine = lineStarts.getLast();
    LongRange range = new LongRange(startPosition + firstLine, startPosition + partialLine);
    List<String> foundLines = IOUtils
            .readLines(new ByteArrayInputStream(buf, firstLine, partialLine - firstLine));

    if (foundFullLines < desiredLineCount && signedDesiredLineCount < 0 && startPosition > 0) {
        // if needed and reading backward, read more lines from earlier
        range = expandRange(range, pagedLines(file, range.getMinimumLong() - 1,
                signedDesiredLineCount + foundFullLines, lines, bufferSize / foundFullLines));

    }

    lines.addAll(foundLines);

    if (signedDesiredLineCount < 0 && range.getMaximumLong() < position) {
        // did not get line containining start position
        range = expandRange(range, pagedLines(file, partialLine, 1, lines, bufferSize / foundFullLines));
    }

    if (signedDesiredLineCount > 0 && foundFullLines < desiredLineCount && range.getMaximumLong() < fileEnd) {
        // need more forward lines
        range = expandRange(range, pagedLines(file, range.getMaximumLong(), desiredLineCount - foundFullLines,
                lines, bufferSize / foundFullLines));
    }

    return range;
}

From source file:de.unwesen.packrat.api.FeedReader.java

private void updateFeed(Uri uri, Handler handler) {
    // Log.d(LTAG, "Fetching feed: " + uri);

    // Convert Uri to URI... yes, it sucks.
    URI feed_uri = null;// www .  ja v a 2  s  .  c o  m
    try {
        feed_uri = new URI(uri.toString());
    } catch (URISyntaxException ex) {
        Log.e(LTAG, "Invalid feed URI: " + uri);
        Message m = handler.obtainMessage(FR_INVALID_FEED_URI);
        m.sendToTarget();
        return;
    }

    // Construct request
    HttpGet request = new HttpGet(feed_uri);
    request.addHeader("Referer", REFERER_URL);

    HttpResponse response;
    try {
        response = sClient.execute(request);

        // Read response
        HttpEntity entity = response.getEntity();
        if (null == entity) {
            Log.e(LTAG, "Feed is empty: " + uri);
            Message m = handler.obtainMessage(FR_EMPTY_FEED);
            m.sendToTarget();
            return;
        }

        MediaParser parser = new MediaParser();
        MediaParser.AtomFeed feed = parser.parse(entity.getContent());
        if (null == feed) {
            Log.e(LTAG, "Unable to parse feed, exiting: " + uri);
            Message m = handler.obtainMessage(FR_FEED_PARSE_FAILURE);
            m.sendToTarget();
            return;
        }

        LinkedList<Media> result = parseFeed(feed);
        if (null != result && result.size() > 0) {
            Message m = handler.obtainMessage(FR_SUCCESS, result);
            m.sendToTarget();
        } else {
            Message m = handler.obtainMessage(FR_EMPTY_FEED);
            m.sendToTarget();
        }
    } catch (IOException ex) {
        Log.w(LTAG, "IO exception: " + ex);
        Message m = handler.obtainMessage(FR_NETWORK_ERROR);
        m.sendToTarget();
    } catch (Exception ex) {
        Log.e(LTAG, "An exception occurred when reading the feed: " + ex);
        Message m = handler.obtainMessage(FR_UNKNOWN_ERROR);
        m.sendToTarget();
    }
}

From source file:de.metanome.backend.algorithm_loading.AlgorithmFinder.java

/**
 * @param algorithmSubclass Class of algorithms to retrieve, or null if all subclasses
 * @return an array with the names of the available algorithms
 * @throws java.io.IOException if the algorithm folder could not be opened
 * @throws java.lang.ClassNotFoundException if an algorithm contains a not supported algorithm subclass
 */// ww  w .j a va  2  s  .c  o  m
public String[] getAvailableAlgorithmFileNames(Class<?> algorithmSubclass)
        throws IOException, ClassNotFoundException {

    LinkedList<String> availableAlgorithms = new LinkedList<>();

    String pathToFolder = "";
    try {
        pathToFolder = Thread.currentThread().getContextClassLoader().getResource("algorithms").getPath();
    } catch (NullPointerException e) {
        // The algorithm folder does not exist
        return new String[] {};
    }
    File[] jarFiles = retrieveJarFiles(pathToFolder);

    for (File jarFile : jarFiles) {
        if (algorithmSubclass == null || getAlgorithmInterfaces(jarFile).contains(algorithmSubclass)) {
            availableAlgorithms.add(jarFile.getName());
        }
    }

    String[] stringArray = new String[availableAlgorithms.size()];
    return availableAlgorithms.toArray(stringArray);
}

From source file:edu.ku.brc.specify.config.DateConverter.java

/**
 * @param dateStr/*from   w w w .java 2s .c om*/
 * @return
 */
protected DateFormats match(String dateStr) {
    LinkedList<DateFormats> matches = new LinkedList<DateFormats>();
    for (DateFormats format : DateFormats.values()) {
        if (format.matches(dateStr)) {
            matches.add(format);
        }
    }

    if (matches.size() == 0) {
        return null;
    }

    if (matches.size() == 1) {
        return matches.get(0);
    }

    for (DateFormats format : matches) {
        if (preferMonthDay && format.equals(DateFormats.MON_DAY_LYEAR)
                || format.equals(DateFormats.MON_DAY_SYEAR)) {
            return format;
        } else if (!preferMonthDay && format.equals(DateFormats.DAY_MON_LYEAR)
                || format.equals(DateFormats.DAY_MON_SYEAR)) {
            return format;
        }
    }
    /*
     * It shouldn't be possible for multiple matches to exist involving formats other than DAY_MON_LYEAR/SYEAR and MON_DAY_LYEAR/SYEAR.
     * But if it occurs, complain and return null.
     */
    //        if (matches.size() > 0)
    //        {
    //            log.error("Unable to resolve multiple date-format matches for '" + dateStr + "'");
    //        }

    return null;
}

From source file:edu.ku.brc.util.DateConverter.java

/**
 * @param dateStr/*  www .j ava  2s. c  o  m*/
 * @return
 */
protected DateFormats match(String dateStr) {
    LinkedList<DateFormats> matches = new LinkedList<DateFormats>();
    for (DateFormats format : DateFormats.values()) {
        if (format.matches(dateStr)) {
            matches.add(format);
        }
    }

    if (matches.size() == 0) {
        return null;
    }

    if (matches.size() == 1) {
        return matches.get(0);
    }

    for (DateFormats format : matches) {
        if (preferMonthDay && format.equals(DateFormats.MON_DAY_LYEAR)
                || format.equals(DateFormats.MON_DAY_SYEAR)) {
            return format;
        } else if (!preferMonthDay && format.equals(DateFormats.DAY_MON_LYEAR)
                || format.equals(DateFormats.DAY_MON_SYEAR)) {
            return format;
        }
    }
    /*
     * It shouldn't be possible for multiple matches to exist involving formats other than DAY_MON_LYEAR/SYEAR and MON_DAY_LYEAR/SYEAR.
     * But if it occurs, complain and return null.
     */
    if (matches.size() > 0) {
        log.error("Unable to resolve multiple date-format matches for '" + dateStr + "'");
    }

    return null;
}