Example usage for java.util LinkedList size

List of usage examples for java.util LinkedList size

Introduction

In this page you can find the example usage for java.util LinkedList size.

Prototype

int size

To view the source code for java.util LinkedList size.

Click Source Link

Usage

From source file:edu.ur.ir.user.service.DefaultUserFileSystemService.java

/**
 * Allow a user to move files and folders into a given folder
 * //from   w w w.j av  a  2  s .co  m
 * @see edu.ur.ir.user.UserFileSystemService#moveFolderSystemInformation(java.lang.Long, java.util.List, java.util.List)
 */
public List<FileSystem> moveFolderSystemInformation(PersonalFolder destination,
        List<PersonalFolder> foldersToMove, List<PersonalFile> filesToMove) {

    LinkedList<FileSystem> notMoved = new LinkedList<FileSystem>();

    // move folders first
    if (foldersToMove != null) {
        for (PersonalFolder folder : foldersToMove) {
            log.debug("Adding folder " + folder + " to destination " + destination);

            try {
                destination.addChild(folder);
            } catch (DuplicateNameException e) {
                notMoved.add(folder);
            }

        }
    }

    if (filesToMove != null && notMoved.size() == 0) {
        for (PersonalFile file : filesToMove) {
            log.debug("Adding file " + file + " to destination " + destination);
            try {
                destination.addPersonalFile(file);
            } catch (DuplicateNameException e) {
                notMoved.add(file);
            }

        }
    }

    if (notMoved.size() == 0) {
        personalFolderDAO.makePersistent(destination);
    }

    return notMoved;
}

From source file:io.hops.transaction.lock.INodeLock.java

private List<INode> findChildrenRecursively(INode lastINode)
        throws StorageException, TransactionContextException {
    LinkedList<INode> children = new LinkedList<>();
    LinkedList<INode> unCheckedDirs = new LinkedList<>();
    if (lastINode != null) {
        if (lastINode instanceof INodeDirectory) {
            unCheckedDirs.add(lastINode);
        }/*from w  ww  .  j  a  v  a2s.  c o  m*/
    }

    // Find all the children in the sub-directories.
    while (!unCheckedDirs.isEmpty()) {
        INode next = unCheckedDirs.poll();
        if (next instanceof INodeDirectory) {
            setINodeLockType(TransactionLockTypes.INodeLockType.READ_COMMITTED); //locking the parent is sufficient
            List<INode> clist = ((INodeDirectory) next).getChildrenList();
            unCheckedDirs.addAll(clist);
            children.addAll(clist);
        }
    }
    LOG.debug("Added " + children.size() + " children.");
    return children;
}

From source file:edu.ur.ir.user.service.DefaultUserFileSystemService.java

/**
 * Move the folders into the root location of the user.
 * //w  ww .java2  s.co m
 * @throws DuplicateNameException 
 * 
 * @see edu.ur.ir.user.UserFileSystemService#moveFolderSystemInformation(edu.ur.ir.user.IrUser, java.util.List, java.util.List)
 */
public List<FileSystem> moveFolderSystemInformation(IrUser user, List<PersonalFolder> foldersToMove,
        List<PersonalFile> filesToMove) {

    LinkedList<FileSystem> notMoved = new LinkedList<FileSystem>();

    // move folders first
    if (foldersToMove != null) {
        for (PersonalFolder folder : foldersToMove) {
            log.debug("Adding folder " + folder + " to root of user " + user);
            try {
                user.addRootFolder(folder);
            } catch (DuplicateNameException e) {
                notMoved.add(folder);
            }

        }
    }

    // then move the files
    if (filesToMove != null && notMoved.size() == 0) {
        for (PersonalFile file : filesToMove) {
            log.debug("Adding file " + file + " to root of user " + user);

            try {
                user.addRootFile(file);
            } catch (DuplicateNameException e) {
                notMoved.add(file);
            }
        }
    }

    if (notMoved.size() == 0) {
        irUserDAO.makePersistent(user);
    }

    return notMoved;

}

From source file:jext2.DataBlockAccess.java

/**
 * Splice the allocated branch onto inode
 * @throws IOException/* ww w.  j  a va2s  . co  m*/
 */
@NotThreadSafe(useLock = true)
private void spliceBranch(long logicalBlock, int[] offsets, long[] blockNrs, LinkedList<Long> newBlockNrs)
        throws IoError {

    int existDepth = blockNrs.length;

    if (existDepth == 0) { /* add direct block */
        long[] directBlocks = inode.getBlock();
        directBlocks[offsets[0]] = newBlockNrs.getFirst().longValue();
    } else {
        ByteBuffer buf = blocks.read(blockNrs[existDepth - 1]);
        Ext2fsDataTypes.putLE32U(buf, newBlockNrs.getFirst().longValue(), offsets[existDepth] * 4);
        buf.rewind();
        blocks.write(blockNrs[existDepth - 1], buf);
    }

    lastAllocLogicalBlock = logicalBlock;
    lastAllocPhysicalBlock = newBlockNrs.getLast().intValue();

    inode.setBlocks(inode.getBlocks() + newBlockNrs.size() * (superblock.getBlocksize() / 512));
    inode.setModificationTime(new Date());
}

From source file:de.interactive_instruments.ShapeChange.Model.EA.EADocument.java

public void executeCommonInitializationProcedure() throws ShapeChangeAbortException {

    // determine if specific packages should not be loaded
    this.excludedPackageNames = options.getExcludedPackages();

    /** Cache classes and packages */
    // First set up initial evaluation tasks of packages consisting
    // of the models in the repository
    class EvalTask {
        PackageInfoEA fatherPI;// w  w  w  . j  a  v  a  2 s  . c  om
        org.sparx.Package eaPackage;

        EvalTask(PackageInfoEA fpi, org.sparx.Package p) {
            fatherPI = fpi;
            eaPackage = p;
        }
    }

    StatusBoard.getStatusBoard().statusChanged(STATUS_EADOCUMENT_READMODEL);

    LinkedList<EvalTask> evalp = new LinkedList<EvalTask>();
    Collection<org.sparx.Package> model = repository.GetModels();
    for (org.sparx.Package p : model) {

        // Check if this model and all its contents shall be excluded
        String name = p.GetName();
        if (excludedPackageNames != null && excludedPackageNames.contains(name)) {
            // stop processing this model and continue with the next
            continue;
        }

        evalp.addLast(new EvalTask(null, p));
    }

    // Now remove tasks from the list, adding further tasks as we proceed
    // until we have no more tasks to evaluate
    while (evalp.size() > 0) {
        // Remove next evaluation task
        EvalTask et = evalp.removeFirst();
        org.sparx.Package pack = et.eaPackage;
        PackageInfoEA fpi = et.fatherPI;

        // Check if this package and all its contents shall be excluded from
        // the model
        String name = pack.GetName();
        if (excludedPackageNames != null && excludedPackageNames.contains(name)) {
            // stop processing this package and continue with the next
            continue;
        }

        // Add to package cache. The PackageInfo Ctor does the necessary
        // parent/child linkage of packages
        Element packelmt = pack.GetElement();
        PackageInfoEA pi = new PackageInfoEA(this, fpi, pack, packelmt);
        fPackageById.put(pi.id(), pi);
        if (packelmt != null)
            this.fPackageByElmtId.put(new Integer(packelmt.GetElementID()).toString(), pi);
        // Now pick all classes and add these to their to caches.
        for (org.sparx.Element elmt : pack.GetElements()) {
            String type = elmt.GetType();
            if (!type.equals("DataType") && !type.equals("Class") && !type.equals("Interface")
                    && !type.equals("Enumeration"))
                continue;
            ClassInfoEA ci = new ClassInfoEA(this, pi, elmt);
            fClassById.put(ci.id(), ci);
            // TODO What's happening to identical class names? How is this
            // supposed to be handled? Open issue.While classifier names
            // have to be
            // unique per app schema only, it is a legacy from Rational Rose
            // that it is expected that classifier names are unique in the
            // whole
            // model. The correct solution would be to add namespace
            // qualifiers.
            fClassByName.put(ci.name(), ci);
        }
        // Add next level packages for further evaluation
        for (org.sparx.Package pnxt : pack.GetPackages()) {
            evalp.addLast(new EvalTask(pi, pnxt));
        }
    }

    StatusBoard.getStatusBoard().statusChanged(STATUS_EADOCUMENT_ESTABLISHCLASSES);

    /**
     * Now that all classes are collected, in a second go establish class
     * derivation hierarchy and all other associations between classes.
     */
    for (ClassInfoEA ci : fClassById.values()) {

        // Generalization - class derivation hierarchy
        ci.establishClassDerivationHierarchy();
        // Other associations where the class is source or target
        ci.establishAssociations();
    }

    String checkingConstraints = options.parameter("checkingConstraints");
    if (checkingConstraints == null || !checkingConstraints.toLowerCase().trim().equals("disabled")) {
        StatusBoard.getStatusBoard().statusChanged(STATUS_EADOCUMENT_READCONSTARINTS);

        // TODO The following may be removed when constraints have been
        // tested.
        /** In a third go collect all constraints */
        for (ClassInfoEA ci : fClassById.values()) {
            ci.constraints();
            SortedMap<StructuredNumber, PropertyInfo> props = ci.properties();
            for (PropertyInfo pi : props.values())
                pi.constraints();
        }
    }

    /**
     * Loop over all schemas (i.e packages with a target namespace) and
     * store the schema location, so that it can be added in import
     * statements
     */
    SortedSet<PackageInfo> schemas = schemas("");
    for (Iterator<PackageInfo> i = schemas.iterator(); i.hasNext();) {
        PackageInfo pi = i.next();
        options.addSchemaLocation(pi.targetNamespace(), pi.xsdDocument());
    }

    // ==============================
    // load diagrams if so requested
    String loadDiagrams = options.parameter("loadDiagrams");

    if (loadDiagrams != null && loadDiagrams.equalsIgnoreCase("true")) {

        java.io.File tmpDir = options.imageTmpDir();

        if (tmpDir.exists()) {

            // probably content from previous run, delete the content of the directory
            try {
                FileUtils.deleteDirectory(tmpDir);
            } catch (IOException e) {
                result.addWarning(null, 34, tmpDir.getAbsolutePath());
            }

            if (!tmpDir.exists()) {
                try {
                    FileUtils.forceMkdir(tmpDir);
                } catch (IOException e) {
                    result.addWarning(null, 32, tmpDir.getAbsolutePath());
                }
            }
        }

        AtomicInteger imgIdCounter = new AtomicInteger(0);

        SortedSet<? extends PackageInfo> selectedSchema = this.selectedSchemas();

        for (PackageInfo pi : selectedSchema) {

            if (pi == null) {
                continue;
            }

            // Only process schemas in a namespace and name that matches a
            // user-selected pattern
            if (options.skipSchema(null, pi))
                continue;

            saveDiagrams(imgIdCounter, "img", tmpDir, escapeFileName(tmpDir.getName()), pi);
        }
    }

}

From source file:org.openmrs.module.appointmentscheduling.api.impl.AppointmentServiceImpl.java

@Override
@Transactional(readOnly = true)/*ww w .j av  a 2s .  co  m*/
public List<String> getPatientIdentifiersRepresentation(Patient patient) {
    LinkedList<String> identifiers = new LinkedList<String>();

    if (patient == null)
        return identifiers;

    for (PatientIdentifier identifier : patient.getIdentifiers()) {
        //Representation format: <identifier type name> : <identifier value> 
        //for example: "OpenMRS Identification Number: 7532AM-1" 
        String representation = identifier.getIdentifierType().getName() + ": " + identifier.getIdentifier();
        //Put preferred identifier first.
        if (identifier.getPreferred())
            identifiers.add(0, representation);
        //Insert to the end of the list
        else
            identifiers.add(identifiers.size(), representation);
    }

    return identifiers;
}

From source file:net.semanticmetadata.lire.filter.LsaFilter.java

/**
 * @param results/*w  w w. j a v  a2s  .  c  o  m*/
 * @param query
 * @return the filtered results or null if error occurs.
 */
public ImageSearchHits filter(ImageSearchHits results, Document query) {
    // create a double[items][histogram]
    tempFeature = null;
    LinkedList<double[]> features = new LinkedList<double[]>();
    try {
        tempFeature = (LireFeature) featureClass.newInstance();
    } catch (Exception e) {
        logger.severe("Could not create feature " + featureClass.getName() + " (" + e.getMessage() + ").");
        return null;
    }
    // get all features from the result set, take care of those that do not have the respective field.
    for (int i = 0; i < results.length(); i++) {
        Document d = results.doc(i);
        if (d.getField(fieldName) != null) {
            tempFeature.setByteArrayRepresentation(d.getField(fieldName).binaryValue().bytes,
                    d.getField(fieldName).binaryValue().offset, d.getField(fieldName).binaryValue().length);
            features.add(tempFeature.getDoubleHistogram());
        }
    }
    // now go for the query
    if (query.getField(fieldName) != null) {
        tempFeature.setByteArrayRepresentation(query.getField(fieldName).binaryValue().bytes,
                query.getField(fieldName).binaryValue().offset, query.getField(fieldName).binaryValue().length);
    } else {
        logger.severe("Query document is missing the given feature " + featureClass.getName() + ".");
        return null;
    }
    double[][] matrixData = new double[features.size() + 1][tempFeature.getDoubleHistogram().length];
    System.arraycopy(tempFeature.getDoubleHistogram(), 0, matrixData[0], 0,
            tempFeature.getDoubleHistogram().length);
    int count = 1;
    for (Iterator<double[]> iterator = features.iterator(); iterator.hasNext();) {
        double[] next = iterator.next();
        System.arraycopy(next, 0, matrixData[count], 0, next.length);
        count++;
    }
    for (int i = 0; i < matrixData.length; i++) {
        double[] doubles = matrixData[i];
        for (int j = 0; j < doubles.length; j++) {
            if (Double.isNaN(doubles[j]))
                System.err.println("Value is NaN");
            ;
        }
    }
    // create a matrix object and do the magic
    Array2DRowRealMatrix m = new Array2DRowRealMatrix(matrixData);
    long ms = System.currentTimeMillis();
    SingularValueDecomposition svd = new SingularValueDecomposition(m);
    ms = System.currentTimeMillis() - ms;
    double[] singularValues = svd.getSingularValues();
    RealMatrix s = svd.getS();
    // if no number of dimensions is given reduce to a tenth.
    if (numberOfDimensions < 1)
        numberOfDimensions = singularValues.length / 10;
    for (int i = numberOfDimensions; i < singularValues.length; i++) {
        s.setEntry(i, i, 0);
    }
    RealMatrix mNew = svd.getU().multiply(s).multiply(svd.getVT());
    double[][] data = mNew.getData();

    // create the new result set
    TreeSet<SimpleResult> result = new TreeSet<SimpleResult>();
    double maxDistance = 0;
    double[] queryData = data[0];
    for (int i = 1; i < data.length; i++) {
        double[] doubles = data[i];
        double distance = MetricsUtils.distL1(doubles, queryData);
        result.add(new SimpleResult((float) distance, results.doc(i - 1), i - 1));
        maxDistance = Math.max(maxDistance, distance);
    }
    ImageSearchHits hits;
    hits = new SimpleImageSearchHits(result, (float) maxDistance);
    return hits;
}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk./*from   w  ww  .  j  a  v a  2 s  .c  o m*/
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:com.projity.pm.graphic.frames.DocumentFrame.java

/**
 * sees if currently selected row belongs to main project. used to see if can insert a subproject. subprojects can
 * only be inserted into master project/*from  www . j av a  2  s. co m*/
 * @return
 */
public boolean isCurrentRowInMainProject() {
    CommonSpreadSheet spreadSheet = getTopSpreadSheet();
    if (spreadSheet == null)
        return true;
    int row = spreadSheet.getCurrentRow();
    if (row == -1)
        return true;
    Node current = spreadSheet.getCurrentRowNode();
    SpreadSheetModel model = (SpreadSheetModel) spreadSheet.getModel();
    LinkedList previousNodes = model.getPreviousVisibleNodesFromRow(row);
    if (previousNodes == null)
        return true;
    previousNodes.add(current); // treat current node first since going backwards
    ListIterator i = previousNodes.listIterator(previousNodes.size());
    while (i.hasPrevious()) {
        Object o = ((Node) i.previous()).getImpl();
        if (o instanceof Task) {
            if (((Task) o).isInSubproject())
                return false;
            return project == ((Task) o).getOwningProject();
        }
    }

    return true;
}

From source file:psiprobe.controllers.logs.FollowController.java

@Override
protected ModelAndView handleLogFile(HttpServletRequest request, HttpServletResponse response,
        LogDestination logDest) throws Exception {

    ModelAndView mv = new ModelAndView(getViewName());
    File file = logDest.getFile();

    if (file.exists()) {
        LinkedList<String> lines = new LinkedList<>();
        long actualLength = file.length();
        long lastKnownLength = ServletRequestUtils.getLongParameter(request, "lastKnownLength", 0);
        long currentLength = ServletRequestUtils.getLongParameter(request, "currentLength", actualLength);
        long maxReadLines = ServletRequestUtils.getLongParameter(request, "maxReadLines", 0);

        if (lastKnownLength > currentLength || lastKnownLength > actualLength || currentLength > actualLength) {

            // file length got reset
            lastKnownLength = 0;//from www .  j a  v a  2  s.co  m
            lines.add(" ------------- THE FILE HAS BEEN TRUNCATED --------------");
        }

        try (BackwardsFileStream bfs = new BackwardsFileStream(file, currentLength)) {
            BackwardsLineReader br;
            if (logDest.getEncoding() != null) {
                br = new BackwardsLineReader(bfs, logDest.getEncoding());
            } else {
                br = new BackwardsLineReader(bfs);
            }
            long readSize = 0;
            long totalReadSize = currentLength - lastKnownLength;
            String line;
            while (readSize < totalReadSize && (line = br.readLine()) != null) {
                if (!line.isEmpty()) {
                    lines.addFirst(line);
                    readSize += line.length();
                } else {
                    readSize++;
                }
                if (maxReadLines != 0 && lines.size() >= maxReadLines) {
                    break;
                }
            }

            if (lastKnownLength != 0 && readSize > totalReadSize) {
                lines.removeFirst();
            }
        }

        mv.addObject("lines", lines);
    }
    return mv;
}