Example usage for java.lang StringBuilder reverse

List of usage examples for java.lang StringBuilder reverse

Introduction

In this page you can find the example usage for java.lang StringBuilder reverse.

Prototype

@Override
    public StringBuilder reverse() 

Source Link

Usage

From source file:org.ext.uberfire.social.activities.persistence.SocialFile.java

private StringBuilder getNumberOfBytesOfJSON() throws IOException {
    currentCursorReadPosition = currentCursorReadPosition - 1;
    reader.position(currentCursorReadPosition);

    StringBuilder numberOfBytesNextJSON = new StringBuilder();
    if (thereIsSomethingToRead(reader, byteBufferSize)) {
        byteBufferSize.flip();/*from ww  w  .j  a va 2 s .  com*/

        if (!lookforSeparator(byteBufferSize.array())) {
            String charRead = new String(byteBufferSize.array());
            numberOfBytesNextJSON.append(charRead);
            currentCursorReadPosition = currentCursorReadPosition - 1;
            reader.position(currentCursorReadPosition);
            while (thereIsSomethingToRead(reader, byteBufferSize)) {
                byteBufferSize.flip();
                charRead = new String(byteBufferSize.array());
                if (lookforSeparator(byteBufferSize.array())) {
                    break;
                }
                numberOfBytesNextJSON.append(charRead);
                byteBufferSize.clear();
                currentCursorReadPosition = currentCursorReadPosition - 1;
                reader.position(currentCursorReadPosition);
            }
        }
    }
    numberOfBytesNextJSON = numberOfBytesNextJSON.reverse();
    return numberOfBytesNextJSON;
}

From source file:org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.java

/**
 * //  ww  w.  j a  v  a2  s .  c o  m
 * @param indexRanges
 * @param tableName
 * @param isReverse
 *          switch that determines whether or not to reverse the results
 * @param override
 *          mapKey for wildcard and range queries that specify which mapkey to use in the results
 * @param typeFilter
 *          - optional list of datatypes
 * @throws TableNotFoundException
 */
protected Map<MapKey, TermRange> queryGlobalIndex(Map<MapKey, Set<Range>> indexRanges, String specificFieldName,
        String tableName, boolean isReverse, MapKey override, Set<String> typeFilter)
        throws TableNotFoundException {

    // The results map where the key is the field name and field value and the
    // value is a set of ranges. The mapkey will always be the field name
    // and field value that was passed in the original query. The TermRange
    // will contain the field name and field value found in the index.
    Map<MapKey, TermRange> results = new HashMap<MapKey, TermRange>();

    // Seed the results map and create the range set for the batch scanner
    Set<Range> rangeSuperSet = new HashSet<Range>();
    for (Entry<MapKey, Set<Range>> entry : indexRanges.entrySet()) {
        rangeSuperSet.addAll(entry.getValue());
        TermRange tr = new TermRange(entry.getKey().getFieldName(), entry.getKey().getFieldValue());
        if (null == override)
            results.put(entry.getKey(), tr);
        else
            results.put(override, tr);
    }

    if (log.isDebugEnabled())
        log.debug("Querying global index table: " + tableName + ", range: " + rangeSuperSet.toString()
                + " colf: " + specificFieldName);
    BatchScanner bs = this.c.createBatchScanner(tableName, this.auths, this.queryThreads);
    bs.setRanges(rangeSuperSet);
    if (null != specificFieldName) {
        bs.fetchColumnFamily(new Text(specificFieldName));
    }

    for (Entry<Key, Value> entry : bs) {
        if (log.isDebugEnabled()) {
            log.debug("Index entry: " + entry.getKey().toString());
        }
        String fieldValue = null;
        if (!isReverse) {
            fieldValue = entry.getKey().getRow().toString();
        } else {
            StringBuilder buf = new StringBuilder(entry.getKey().getRow().toString());
            fieldValue = buf.reverse().toString();
        }

        String fieldName = entry.getKey().getColumnFamily().toString();
        // Get the shard id and datatype from the colq
        String colq = entry.getKey().getColumnQualifier().toString();
        int separator = colq.indexOf(EvaluatingIterator.NULL_BYTE_STRING);
        String shardId = null;
        String datatype = null;
        if (separator != -1) {
            shardId = colq.substring(0, separator);
            datatype = colq.substring(separator + 1);
        } else {
            shardId = colq;
        }
        // Skip this entry if the type is not correct
        if (null != datatype && null != typeFilter && !typeFilter.contains(datatype))
            continue;
        // Parse the UID.List object from the value
        Uid.List uidList = null;
        try {
            uidList = Uid.List.parseFrom(entry.getValue().get());
        } catch (InvalidProtocolBufferException e) {
            // Don't add UID information, at least we know what shards
            // it is located in.
        }

        // Add the count for this shard to the total count for the term.
        long count = 0;
        Long storedCount = termCardinalities.get(fieldName);
        if (null == storedCount || 0 == storedCount) {
            count = uidList.getCOUNT();
        } else {
            count = uidList.getCOUNT() + storedCount;
        }
        termCardinalities.put(fieldName, count);
        this.indexEntries.put(fieldName, fieldValue);

        if (null == override)
            this.indexValues.put(fieldValue, fieldValue);
        else
            this.indexValues.put(fieldValue, override.getOriginalQueryValue());

        // Create the keys
        Text shard = new Text(shardId);
        if (uidList.getIGNORE()) {
            // Then we create a scan range that is the entire shard
            if (null == override)
                results.get(new MapKey(fieldName, fieldValue)).add(new Range(shard));
            else
                results.get(override).add(new Range(shard));
        } else {
            // We should have UUIDs, create event ranges
            for (String uuid : uidList.getUIDList()) {
                Text cf = new Text(datatype);
                TextUtil.textAppend(cf, uuid);
                Key startKey = new Key(shard, cf);
                Key endKey = new Key(shard, new Text(cf.toString() + EvaluatingIterator.NULL_BYTE_STRING));
                Range eventRange = new Range(startKey, true, endKey, false);
                if (null == override)
                    results.get(new MapKey(fieldName, fieldValue)).add(eventRange);
                else
                    results.get(override).add(eventRange);
            }
        }
    }
    bs.close();
    return results;
}

From source file:org.lockss.util.StringUtil.java

/**
 * Find the longest common suffix of a pair of strings. Case sensitive.
 * @param s1 a string/*from   w  ww . j  a va  2s  .c o  m*/
 * @param s2 another string
 * @return the longest common suffix, which may be the emopty string
 */
public static String commonSuffix(String s1, String s2) {
    char[] c1 = s1.toCharArray();
    char[] c2 = s2.toCharArray();
    StringBuilder sb = new StringBuilder();
    for (int i = 1; i <= Math.min(c1.length, c2.length); i++) {
        if (c1[c1.length - i] == c2[c2.length - i])
            sb.append(c1[c1.length - i]);
        else
            break;
    }
    return sb.reverse().toString();
}

From source file:automenta.knowtention.channel.LineFileChannel.java

@Override
public void run() {

    FileInputStream fileInputStream = null;
    FileChannel channel = null;/*  w  w w.j a  v  a 2  s  . c  om*/
    ByteBuffer buffer = null;
    LinkedList<String> lines = new LinkedList();
    StringBuilder builder = new StringBuilder();
    long lastSize = -1, lastLastModified = -1;

    while (running) {
        try {
            Thread.sleep(delayPeriodMS);
        } catch (InterruptedException ex) {
        }

        lines.clear();
        try {
            fileInputStream = new FileInputStream(file);

            channel = fileInputStream.getChannel();

            long lastModified = file.lastModified();
            long csize = channel.size();
            if ((lastModified == lastLastModified) && (csize == lastSize)) { //also check file update time?
                fileInputStream.close();
                continue;
            }

            int currentPos = (int) csize;

            buffer = channel.map(FileChannel.MapMode.READ_ONLY, 0, csize);
            buffer.position(currentPos);
            lastSize = csize;
            lastLastModified = lastModified;

            int count = 0;

            for (long i = csize - 1; i >= 0; i--) {

                char c = (char) buffer.get((int) i);

                if (c == '\n') {
                    count++;
                    builder.reverse();
                    lines.addFirst(builder.toString());
                    if (count == numLines) {
                        break;
                    }
                    builder.setLength(0);
                } else
                    builder.append(c);
            }

            update(lines);

            lines.clear();
            buffer.clear();
            channel.close();
            fileInputStream.close();
            fileInputStream = null;

        } catch (Exception ex) {
            Logger.getLogger(LineFileChannel.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    try {
        channel.close();
    } catch (IOException ex) {
        Logger.getLogger(LineFileChannel.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:org.apache.accumulo.examples.wikisearch.parser.RangeCalculator.java

/**
 * /*from   w w  w. j a  v a 2  s . c om*/
 * @param c
 * @param auths
 * @param indexedTerms
 * @param terms
 * @param query
 * @param logic
 * @param typeFilter
 * @throws ParseException
 */
public void execute(Connector c, Authorizations auths, Multimap<String, Normalizer> indexedTerms,
        Multimap<String, QueryTerm> terms, String query, AbstractQueryLogic logic, Set<String> typeFilter)
        throws ParseException {
    super.execute(query);
    this.c = c;
    this.auths = auths;
    this.indexedTerms = indexedTerms;
    this.termsCopy.putAll(terms);
    this.indexTableName = logic.getIndexTableName();
    this.reverseIndexTableName = logic.getReverseIndexTableName();
    this.queryThreads = logic.getQueryThreads();

    Map<MapKey, Set<Range>> indexRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> trailingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<MapKey, Set<Range>> leadingWildcardRanges = new HashMap<MapKey, Set<Range>>();
    Map<Text, RangeBounds> rangeMap = new HashMap<Text, RangeBounds>();

    // Here we iterate over all of the terms in the query to determine if they are an equivalence,
    // wildcard, or range type operator
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTERNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTLENode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                || entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
            // If this term is not in the set of indexed terms, then bail
            if (!indexedTerms.containsKey(entry.getKey())) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case of function calls, the query term could be null. Dont query the index for it.
            if (null == entry.getValue()) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }
            // In the case where we are looking for 'null', then skip.
            if (null == entry.getValue().getValue() || ((String) entry.getValue().getValue()).equals("null")) {
                termCardinalities.put(entry.getKey().toUpperCase(), 0L);
                continue;
            }

            // Remove the begin and end ' marks
            String value = null;
            if (((String) entry.getValue().getValue()).startsWith("'")
                    && ((String) entry.getValue().getValue()).endsWith("'"))
                value = ((String) entry.getValue().getValue()).substring(1,
                        ((String) entry.getValue().getValue()).length() - 1);
            else
                value = (String) entry.getValue().getValue();
            // The entries in the index are normalized
            for (Normalizer normalizer : indexedTerms.get(entry.getKey())) {
                String normalizedFieldValue = normalizer.normalizeFieldValue(null, value);
                Text fieldValue = new Text(normalizedFieldValue);
                Text fieldName = new Text(entry.getKey().toUpperCase());

                // EQUALS
                if (entry.getValue().getOperator().equals(JexlOperatorConstants.getOperator(ASTEQNode.class))) {
                    Key startRange = new Key(fieldValue, fieldName);
                    Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                    MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                    key.setOriginalQueryValue(value);
                    this.originalQueryValues.put(value, key);
                    if (!indexRanges.containsKey(key))
                        indexRanges.put(key, new HashSet<Range>());
                    indexRanges.get(key).add(r);
                    // WILDCARD
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTERNode.class))) {
                    // This is a wildcard query using regex. We can only support leading and trailing wildcards at this time. Leading
                    // wildcards will need be reversed and sent to the global reverse index. Trailing wildcard queries will be sent to the
                    // global index. In all cases, the range for the wilcard will be the range of possible UNICODE codepoints, hex 0 to 10FFFF.
                    int loc = normalizedFieldValue.indexOf(WILDCARD);
                    if (-1 == loc)
                        loc = normalizedFieldValue.indexOf(SINGLE_WILDCARD);
                    if (-1 == loc) {
                        // Then no wildcard in the query? Treat like the equals case above.
                        Key startRange = new Key(fieldValue, fieldName);
                        Range r = new Range(startRange, true, startRange.followingKey(PartialKey.ROW), true);

                        MapKey key = new MapKey(fieldName.toString(), fieldValue.toString());
                        key.setOriginalQueryValue(value);
                        this.originalQueryValues.put(value, key);
                        if (!indexRanges.containsKey(key))
                            indexRanges.put(key, new HashSet<Range>());
                        indexRanges.get(key).add(r);
                    } else {
                        if (loc == 0) {
                            // Then we have a leading wildcard, reverse the term and use the global reverse index.
                            StringBuilder buf = new StringBuilder(normalizedFieldValue.substring(2));
                            normalizedFieldValue = buf.reverse().toString();
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!leadingWildcardRanges.containsKey(key))
                                leadingWildcardRanges.put(key, new HashSet<Range>());
                            leadingWildcardRanges.get(key).add(r);
                        } else if (loc == (normalizedFieldValue.length() - 2)) {
                            normalizedFieldValue = normalizedFieldValue.substring(0, loc);
                            // Then we have a trailing wildcard character.
                            Key startRange = new Key(new Text(normalizedFieldValue + "\u0000"), fieldName);
                            Key endRange = new Key(new Text(normalizedFieldValue + "\u10FFFF"), fieldName);
                            Range r = new Range(startRange, true, endRange, true);

                            MapKey key = new MapKey(fieldName.toString(), normalizedFieldValue);
                            key.setOriginalQueryValue(value);
                            this.originalQueryValues.put(value, key);
                            if (!trailingWildcardRanges.containsKey(key))
                                trailingWildcardRanges.put(key, new HashSet<Range>());
                            trailingWildcardRanges.get(key).add(r);
                        } else {
                            // throw new RuntimeException("Unsupported wildcard location. Only trailing or leading wildcards are supported: " + normalizedFieldValue);
                            // Don't throw an exception, there must be a wildcard in the query, we'll treat it as a filter on the results since it is not
                            // leading or trailing.
                        }
                    }
                    // RANGES
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTGTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTGENode.class))) {
                    // Then we have a lower bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setLower(fieldValue);
                    rangeMap.get(fieldName).setOriginalLower(value);
                } else if (entry.getValue().getOperator()
                        .equals(JexlOperatorConstants.getOperator(ASTLTNode.class))
                        || entry.getValue().getOperator()
                                .equals(JexlOperatorConstants.getOperator(ASTLENode.class))) {
                    // Then we have an upper bound to a range query
                    if (!rangeMap.containsKey(fieldName))
                        rangeMap.put(fieldName, new RangeBounds());
                    rangeMap.get(fieldName).setUpper(fieldValue);
                    rangeMap.get(fieldName).setOriginalUpper(value);
                }
            }
        }
    }

    // INDEX RANGE QUERY
    // Now that we have figured out the range bounds, create the index ranges.
    for (Entry<Text, RangeBounds> entry : rangeMap.entrySet()) {
        if (entry.getValue().getLower() != null && entry.getValue().getUpper() != null) {
            // Figure out the key order
            Key lk = new Key(entry.getValue().getLower());
            Key up = new Key(entry.getValue().getUpper());
            Text lower = lk.getRow();
            Text upper = up.getRow();
            // Swith the order if needed.
            if (lk.compareTo(up) > 0) {
                lower = up.getRow();
                upper = lk.getRow();
            }
            Key startRange = new Key(lower, entry.getKey());
            Key endRange = new Key(upper, entry.getKey());
            Range r = new Range(startRange, true, endRange, true);
            // For the range queries we need to query the global index and then handle the results a little differently.
            Map<MapKey, Set<Range>> ranges = new HashMap<MapKey, Set<Range>>();
            MapKey key = new MapKey(entry.getKey().toString(), entry.getValue().getLower().toString());
            key.setOriginalQueryValue(entry.getValue().getOriginalLower().toString());
            this.originalQueryValues.put(entry.getValue().getOriginalLower().toString(), key);
            ranges.put(key, new HashSet<Range>());
            ranges.get(key).add(r);

            // Now query the global index and override the field value used in the results map
            try {
                Map<MapKey, TermRange> lowerResults = queryGlobalIndex(ranges, entry.getKey().toString(),
                        this.indexTableName, false, key, typeFilter);
                // Add the results to the global index results for both the upper and lower field values.
                Map<MapKey, TermRange> upperResults = new HashMap<MapKey, TermRange>();
                for (Entry<MapKey, TermRange> e : lowerResults.entrySet()) {
                    MapKey key2 = new MapKey(e.getKey().getFieldName(), entry.getValue().getUpper().toString());
                    key2.setOriginalQueryValue(entry.getValue().getOriginalUpper().toString());
                    upperResults.put(key2, e.getValue());
                    this.originalQueryValues.put(entry.getValue().getOriginalUpper(), key2);

                }

                this.globalIndexResults.putAll(lowerResults);
                this.globalIndexResults.putAll(upperResults);

            } catch (TableNotFoundException e) {
                log.error("index table not found", e);
                throw new RuntimeException(" index table not found", e);
            }
        } else {
            log.warn("Unbounded range detected, not querying index for it. Field  " + entry.getKey().toString()
                    + " in query: " + query);
        }
    }
    // Now that we have calculated all of the ranges, query the global index.
    try {

        // Query for the trailing wildcards if we have any
        for (Entry<MapKey, Set<Range>> trailing : trailingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(trailing.getKey(), trailing.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, trailing.getKey().getFieldName(),
                    this.indexTableName, false, trailing.getKey(), typeFilter));
        }

        // Query for the leading wildcards if we have any
        for (Entry<MapKey, Set<Range>> leading : leadingWildcardRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(leading.getKey(), leading.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Wildcard Global Reverse Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, leading.getKey().getFieldName(),
                    this.reverseIndexTableName, true, leading.getKey(), typeFilter));
        }

        // Query for the equals case
        for (Entry<MapKey, Set<Range>> equals : indexRanges.entrySet()) {
            Map<MapKey, Set<Range>> m = new HashMap<MapKey, Set<Range>>();
            m.put(equals.getKey(), equals.getValue());
            if (log.isDebugEnabled())
                log.debug("Ranges for Global Index query: " + m.toString());
            this.globalIndexResults.putAll(queryGlobalIndex(m, equals.getKey().getFieldName(),
                    this.indexTableName, false, equals.getKey(), typeFilter));
        }
    } catch (TableNotFoundException e) {
        log.error("index table not found", e);
        throw new RuntimeException(" index table not found", e);
    }

    if (log.isDebugEnabled())
        log.debug("Ranges from Global Index query: " + globalIndexResults.toString());

    // Now traverse the AST
    EvaluationContext ctx = new EvaluationContext();
    this.getAST().childrenAccept(this, ctx);

    if (ctx.lastRange.getRanges().size() == 0) {
        log.debug("No resulting range set");
    } else {
        if (log.isDebugEnabled())
            log.debug("Setting range results to: " + ctx.lastRange.getRanges().toString());
        this.result = ctx.lastRange.getRanges();
    }
}

From source file:net.sf.jabref.importer.fileformat.BibtexParser.java

/**
 * Tries to restore the key//ww w .j a  v a2  s. c  o  m
 *
 * @return rest of key on success, otherwise empty string
 * @throws IOException on Reader-Error
 */
private String fixKey() throws IOException {
    StringBuilder key = new StringBuilder();
    int lookaheadUsed = 0;
    char currentChar;

    // Find a char which ends key (','&&'\n') or entryfield ('='):
    do {
        currentChar = (char) read();
        key.append(currentChar);
        lookaheadUsed++;
    } while ((currentChar != ',') && (currentChar != '\n') && (currentChar != '=')
            && (lookaheadUsed < BibtexParser.LOOKAHEAD));

    // Consumed a char too much, back into reader and remove from key:
    unread(currentChar);
    key.deleteCharAt(key.length() - 1);

    // Restore if possible:
    switch (currentChar) {
    case '=':
        // Get entryfieldname, push it back and take rest as key
        key = key.reverse();

        boolean matchedAlpha = false;
        for (int i = 0; i < key.length(); i++) {
            currentChar = key.charAt(i);

            /// Skip spaces:
            if (!matchedAlpha && (currentChar == ' ')) {
                continue;
            }
            matchedAlpha = true;

            // Begin of entryfieldname (e.g. author) -> push back:
            unread(currentChar);
            if ((currentChar == ' ') || (currentChar == '\n')) {

                /*
                 * found whitespaces, entryfieldname completed -> key in
                 * keybuffer, skip whitespaces
                 */
                StringBuilder newKey = new StringBuilder();
                for (int j = i; j < key.length(); j++) {
                    currentChar = key.charAt(j);
                    if (!Character.isWhitespace(currentChar)) {
                        newKey.append(currentChar);
                    }
                }

                // Finished, now reverse newKey and remove whitespaces:
                parserResult.addWarning(
                        Localization.lang("Line %0: Found corrupted BibTeX key.", String.valueOf(line)));
                key = newKey.reverse();
            }
        }
        break;

    case ',':
        parserResult.addWarning(Localization.lang("Line %0: Found corrupted BibTeX key (contains whitespaces).",
                String.valueOf(line)));
        break;

    case '\n':
        parserResult.addWarning(Localization.lang("Line %0: Found corrupted BibTeX key (comma missing).",
                String.valueOf(line)));
        break;

    default:

        // No more lookahead, give up:
        unreadBuffer(key);
        return "";
    }

    return removeWhitespaces(key).toString();
}

From source file:MSUmpire.SpectrumParser.mzXMLParser.java

private void ParseIndex() throws FileNotFoundException, IOException {
    TotalScan = 0;/*from   w w w .  ja  va  2  s .  co m*/
    ScanIndex = new TreeMap<>();
    try (RandomAccessFile fileHandler = new RandomAccessFile(filename, "r")) {
        StringBuilder sb = new StringBuilder();

        String CurrentLine = "";
        long currentLastPt = fileHandler.length() - 1;
        boolean indexexist = false;
        int linecount = 0;
        while (!(CurrentLine.trim().startsWith("<index name=") | CurrentLine.trim().startsWith("</msRun>"))) {
            //Read backward
            for (long filePointer = currentLastPt; filePointer != -1; filePointer--) {
                fileHandler.seek(filePointer);
                int readByte = fileHandler.readByte();
                if (readByte == 0xA) {
                    if (filePointer == currentLastPt) {
                        continue;
                    } else {
                        currentLastPt = filePointer;
                        break;
                    }
                } else if (readByte == 0xD) {
                    if (filePointer == currentLastPt - 1) {
                        continue;
                    } else {
                        currentLastPt = filePointer;
                        break;
                    }
                }
                sb.append((char) readByte);
            }
            linecount++;
            CurrentLine = sb.reverse().toString();
            sb = new StringBuilder();

            if (CurrentLine.trim().startsWith("</index>")) {
                indexexist = true;
            }

            if (!indexexist && linecount > 10) {
                fileHandler.close();
                Logger.getRootLogger()
                        .debug("File : " + filename + " doesn't have index. the processing will stop.");
                System.exit(1);
            }

            if (CurrentLine.trim().startsWith("<offset id")) {
                int scanNo = Integer.parseInt(
                        CurrentLine.substring(CurrentLine.indexOf("<offset id=\"") + 12).split("\"")[0]);
                long index = (long) Long.parseLong(
                        CurrentLine.substring(CurrentLine.indexOf(">") + 1, CurrentLine.indexOf("</offset>")));
                if (index < 0) {
                    index = index + 2147483647l + 2147483648l;
                }
                if (ScanIndex.containsKey(scanNo + 1) && ScanIndex.get(scanNo + 1) == index) {
                    Logger.getRootLogger().debug("File : " + filename + " index is not correct, ScanNo:"
                            + scanNo + " and " + scanNo + 1 + " have same index");
                    Logger.getRootLogger().debug(
                            "Please use indexmzXML from  TPP package to fix incorrect index of the mzXML file.");
                    Logger.getRootLogger().debug("command: indexmzXML filename.mzXML");
                    System.exit(1);
                }
                ScanIndex.put(scanNo, index);
            } else if (CurrentLine.trim().startsWith("<indexOffset>")) {
                long IndexEnd = (long) Long.parseLong(CurrentLine.substring(
                        CurrentLine.indexOf("<indexOffset>") + 13, CurrentLine.indexOf("</indexOffset>")));
                if (IndexEnd < 0) {
                    IndexEnd = IndexEnd + 2147483647l + 2147483648l;
                }
                ScanIndex.put(Integer.MAX_VALUE, IndexEnd);
            }
        }
        TotalScan = ScanIndex.size();
        sb = null;
        fileHandler.close();
    }
}

From source file:com.peterbochs.PeterBochsDebugger.java

public String tail2(File file, int lines) {
    try {//from   w  w  w.  j  a  va 2  s  .c o m
        RandomAccessFile fileHandler = new RandomAccessFile(file, "r");
        long fileLength = file.length() - 1;
        StringBuilder sb = new StringBuilder();
        int line = 0;

        for (long filePointer = fileLength; filePointer != -1; filePointer--) {
            fileHandler.seek(filePointer);
            int readByte = fileHandler.readByte();

            if (readByte == 0xA) {
                line = line + 1;
                if (line == lines) {
                    if (filePointer == fileLength) {
                        continue;
                    } else {
                        break;
                    }
                }
            }
            sb.append((char) readByte);
        }

        sb.deleteCharAt(sb.length() - 1);
        String lastLine = sb.reverse().toString();
        return lastLine;
    } catch (Exception e) {
        return null;
    }
}