Example usage for java.lang Long MIN_VALUE

List of usage examples for java.lang Long MIN_VALUE

Introduction

In this page you can find the example usage for java.lang Long MIN_VALUE.

Prototype

long MIN_VALUE

To view the source code for java.lang Long MIN_VALUE.

Click Source Link

Document

A constant holding the minimum value a long can have, -263.

Usage

From source file:com.cloudera.sqoop.mapreduce.db.DateSplitter.java

public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException {

    long minVal;//from  ww w .  j ava2s  . co  m
    long maxVal;

    int sqlDataType = results.getMetaData().getColumnType(1);
    minVal = resultSetColToLong(results, 1, sqlDataType);
    maxVal = resultSetColToLong(results, 2, sqlDataType);

    String lowClausePrefix = colName + " >= ";
    String highClausePrefix = colName + " < ";

    int numSplits = ConfigurationHelper.getConfNumMaps(conf);
    if (numSplits < 1) {
        numSplits = 1;
    }

    if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) {
        // The range of acceptable dates is NULL to NULL. Just create a single
        // split.
        List<InputSplit> splits = new ArrayList<InputSplit>();
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
        return splits;
    }

    // Gather the split point integers
    List<Long> splitPoints = split(numSplits, minVal, maxVal);
    List<InputSplit> splits = new ArrayList<InputSplit>();

    // Turn the split points into a set of intervals.
    long start = splitPoints.get(0);
    Date startDate = longToDate(start, sqlDataType);
    if (sqlDataType == Types.TIMESTAMP) {
        // The lower bound's nanos value needs to match the actual lower-bound
        // nanos.
        try {
            ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos());
        } catch (NullPointerException npe) {
            // If the lower bound was NULL, we'll get an NPE; just ignore it and
            // don't set nanos.
        }
    }

    for (int i = 1; i < splitPoints.size(); i++) {
        long end = splitPoints.get(i);
        Date endDate = longToDate(end, sqlDataType);

        if (i == splitPoints.size() - 1) {
            if (sqlDataType == Types.TIMESTAMP) {
                // The upper bound's nanos value needs to match the actual
                // upper-bound nanos.
                try {
                    ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos());
                } catch (NullPointerException npe) {
                    // If the upper bound was NULL, we'll get an NPE; just ignore it
                    // and don't set nanos.
                }
            }
            // This is the last one; use a closed interval.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate)));
        } else {
            // Normal open-interval case.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate)));
        }

        start = end;
        startDate = endDate;
    }

    if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) {
        // Add an extra split to handle the null case that we saw.
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
    }

    return splits;
}

From source file:co.nubetech.apache.hadoop.DateSplitter.java

public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException {

    long minVal;//from   w w  w.  j  a  va2  s  .co  m
    long maxVal;

    int sqlDataType = results.getMetaData().getColumnType(1);
    minVal = resultSetColToLong(results, 1, sqlDataType);
    maxVal = resultSetColToLong(results, 2, sqlDataType);

    String lowClausePrefix = colName + " >= ";
    String highClausePrefix = colName + " < ";

    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
    if (numSplits < 1) {
        numSplits = 1;
    }

    if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) {
        // The range of acceptable dates is NULL to NULL. Just create a
        // single split.
        List<InputSplit> splits = new ArrayList<InputSplit>();
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
        return splits;
    }

    // Gather the split point integers
    List<Long> splitPoints = split(numSplits, minVal, maxVal);
    List<InputSplit> splits = new ArrayList<InputSplit>();

    // Turn the split points into a set of intervals.
    long start = splitPoints.get(0);
    Date startDate = longToDate(start, sqlDataType);
    if (sqlDataType == Types.TIMESTAMP) {
        // The lower bound's nanos value needs to match the actual
        // lower-bound nanos.
        try {
            ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos());
        } catch (NullPointerException npe) {
            // If the lower bound was NULL, we'll get an NPE; just ignore it
            // and don't set nanos.
        }
    }

    for (int i = 1; i < splitPoints.size(); i++) {
        long end = splitPoints.get(i);
        Date endDate = longToDate(end, sqlDataType);

        if (i == splitPoints.size() - 1) {
            if (sqlDataType == Types.TIMESTAMP) {
                // The upper bound's nanos value needs to match the actual
                // upper-bound nanos.
                try {
                    ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos());
                } catch (NullPointerException npe) {
                    // If the upper bound was NULL, we'll get an NPE; just
                    // ignore it and don't set nanos.
                }
            }
            // This is the last one; use a closed interval.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate)));
        } else {
            // Normal open-interval case.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate)));
        }

        start = end;
        startDate = endDate;
    }

    if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) {
        // Add an extra split to handle the null case that we saw.
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
    }

    return splits;
}

From source file:net.sf.sprockets.database.Cursors.java

/**
 * Get the long value in the first row and column.
 *
 * @param close true to close the cursor or false to leave it open
 * @return {@link Long#MIN_VALUE} if the cursor is empty
 *//*  w w  w .j a va2  s.  c  o  m*/
public static long firstLong(Cursor cursor, boolean close) {
    long l = cursor.moveToFirst() ? cursor.getLong(0) : Long.MIN_VALUE;
    close(cursor, close);
    return l;
}

From source file:de.tor.tribes.types.TargetInformation.java

public void updateAttackInfo() {
    snobs = 0;/*from  w ww  . ja  va  2  s. c  o  m*/
    fakes = 0;
    first = Long.MAX_VALUE;
    last = Long.MIN_VALUE;
    for (TimedAttack a : getAttacks()) {
        if (a.isPossibleFake()) {
            fakes++;
        } else if (a.isPossibleSnob()) {
            snobs++;
        }
        if (a.getlArriveTime() < first) {
            first = a.getlArriveTime();
        }
        if (a.getlArriveTime() > last) {
            last = a.getlArriveTime();
        }
    }
    logger.debug(target.getCoordAsString() + " found " + snobs + " snobs and " + fakes + " fakes");
}

From source file:co.nubetech.hiho.mapreduce.lib.db.apache.DateSplitter.java

public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException {

    long minVal;//from w  ww  . j  a v a2s  .  c o  m
    long maxVal;

    int sqlDataType = results.getMetaData().getColumnType(1);
    minVal = resultSetColToLong(results, 1, sqlDataType);
    maxVal = resultSetColToLong(results, 2, sqlDataType);

    String lowClausePrefix = colName + " >= ";
    String highClausePrefix = colName + " < ";

    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
    if (numSplits < 1) {
        numSplits = 1;
    }

    if (minVal == Long.MIN_VALUE && maxVal == Long.MIN_VALUE) {
        // The range of acceptable dates is NULL to NULL. Just create a single split.
        List<InputSplit> splits = new ArrayList<InputSplit>();
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
        return splits;
    }

    // Gather the split point integers
    List<Long> splitPoints = split(numSplits, minVal, maxVal);
    List<InputSplit> splits = new ArrayList<InputSplit>();

    // Turn the split points into a set of intervals.
    long start = splitPoints.get(0);
    Date startDate = longToDate(start, sqlDataType);
    if (sqlDataType == Types.TIMESTAMP) {
        // The lower bound's nanos value needs to match the actual lower-bound nanos.
        try {
            ((java.sql.Timestamp) startDate).setNanos(results.getTimestamp(1).getNanos());
        } catch (NullPointerException npe) {
            // If the lower bound was NULL, we'll get an NPE; just ignore it and don't set nanos.
        }
    }

    for (int i = 1; i < splitPoints.size(); i++) {
        long end = splitPoints.get(i);
        Date endDate = longToDate(end, sqlDataType);

        if (i == splitPoints.size() - 1) {
            if (sqlDataType == Types.TIMESTAMP) {
                // The upper bound's nanos value needs to match the actual upper-bound nanos.
                try {
                    ((java.sql.Timestamp) endDate).setNanos(results.getTimestamp(2).getNanos());
                } catch (NullPointerException npe) {
                    // If the upper bound was NULL, we'll get an NPE; just ignore it and don't set nanos.
                }
            }
            // This is the last one; use a closed interval.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), colName + " <= " + dateToString(endDate)));
        } else {
            // Normal open-interval case.
            splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
                    lowClausePrefix + dateToString(startDate), highClausePrefix + dateToString(endDate)));
        }

        start = end;
        startDate = endDate;
    }

    if (minVal == Long.MIN_VALUE || maxVal == Long.MIN_VALUE) {
        // Add an extra split to handle the null case that we saw.
        splits.add(
                new DataDrivenDBInputFormat.DataDrivenDBInputSplit(colName + " IS NULL", colName + " IS NULL"));
    }

    return splits;
}

From source file:org.apache.camel.component.file.strategy.FileChangedExclusiveReadLockStrategy.java

public boolean acquireExclusiveReadLock(GenericFileOperations<File> operations, GenericFile<File> file,
        Exchange exchange) throws Exception {
    File target = new File(file.getAbsoluteFilePath());
    boolean exclusive = false;

    if (LOG.isTraceEnabled()) {
        LOG.trace("Waiting for exclusive read lock to file: " + file);
    }//from  w w w .  j a v  a  2  s .c  o  m

    try {
        long lastModified = Long.MIN_VALUE;
        long length = Long.MIN_VALUE;
        StopWatch watch = new StopWatch();

        while (!exclusive) {
            // timeout check
            if (timeout > 0) {
                long delta = watch.taken();
                if (delta > timeout) {
                    LOG.warn("Cannot acquire read lock within " + timeout + " millis. Will skip the file: "
                            + file);
                    // we could not get the lock within the timeout period, so return false
                    return false;
                }
            }

            long newLastModified = target.lastModified();
            long newLength = target.length();

            if (LOG.isTraceEnabled()) {
                LOG.trace(
                        "Previous last modified: " + lastModified + ", new last modified: " + newLastModified);
                LOG.trace("Previous length: " + length + ", new length: " + newLength);
            }

            if (newLastModified == lastModified && newLength == length) {
                // let super handle the last part of acquiring the lock now the file is not
                // currently being in progress of being copied as file length and modified
                // are stable
                exclusive = super.acquireExclusiveReadLock(operations, file, exchange);
            } else {
                // set new base file change information
                lastModified = newLastModified;
                length = newLength;

                boolean interrupted = sleep();
                if (interrupted) {
                    // we were interrupted while sleeping, we are likely being shutdown so return false
                    return false;
                }
            }
        }
    } catch (IOException e) {
        // must handle IOException as some apps on Windows etc. will still somehow hold a lock to a file
        // such as AntiVirus or MS Office that has special locks for it's supported files
        if (timeout == 0) {
            // if not using timeout, then we cant retry, so rethrow
            throw e;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Cannot acquire read lock. Will try again.", e);
        }
        boolean interrupted = sleep();
        if (interrupted) {
            // we were interrupted while sleeping, we are likely being shutdown so return false
            return false;
        }
    }

    return exclusive;
}

From source file:business.security.control.OwmClientTest.java

private static void assertForecastWeatherData(ForecastWeatherData forecast) {
    assertNotNull(forecast);/* ww w  .  ja va2 s .c  om*/
    assertFalse(forecast.getDateTime() == Long.MIN_VALUE);
    assertFalse(forecast.getCalcDateTime() == Long.MIN_VALUE);
    assertTrue(forecast.hasMain());
}

From source file:org.apache.camel.component.file.remote.strategy.FtpChangedExclusiveReadLockStrategy.java

public boolean acquireExclusiveReadLock(GenericFileOperations<FTPFile> operations, GenericFile<FTPFile> file,
        Exchange exchange) throws Exception {
    boolean exclusive = false;

    LOG.trace("Waiting for exclusive read lock to file: " + file);

    long lastModified = Long.MIN_VALUE;
    long length = Long.MIN_VALUE;
    StopWatch watch = new StopWatch();

    while (!exclusive) {
        // timeout check
        if (timeout > 0) {
            long delta = watch.taken();
            if (delta > timeout) {
                CamelLogger.log(LOG, readLockLoggingLevel,
                        "Cannot acquire read lock within " + timeout + " millis. Will skip the file: " + file);
                // we could not get the lock within the timeout period, so return false
                return false;
            }//www.ja va 2s  . c om
        }

        long newLastModified = 0;
        long newLength = 0;
        List<FTPFile> files;
        if (fastExistsCheck) {
            // use the absolute file path to only pickup the file we want to check, this avoids expensive
            // list operations if we have a lot of files in the directory
            LOG.trace("Using fast exists to update file information for {}", file);
            files = operations.listFiles(file.getAbsoluteFilePath());
        } else {
            LOG.trace(
                    "Using full directory listing to update file information for {}. Consider enabling fastExistsCheck option.",
                    file);
            // fast option not enabled, so list the directory and filter the file name
            files = operations.listFiles(file.getParent());
        }
        LOG.trace("List files {} found {} files", file.getAbsoluteFilePath(), files.size());
        for (FTPFile f : files) {
            if (f.getName().equals(file.getFileNameOnly())) {
                newLastModified = f.getTimestamp().getTimeInMillis();
                newLength = f.getSize();
            }
        }

        LOG.trace("Previous last modified: " + lastModified + ", new last modified: " + newLastModified);
        LOG.trace("Previous length: " + length + ", new length: " + newLength);

        if (length >= minLength && (newLastModified == lastModified && newLength == length)) {
            LOG.trace("Read lock acquired.");
            exclusive = true;
        } else {
            // set new base file change information
            lastModified = newLastModified;
            length = newLength;

            boolean interrupted = sleep();
            if (interrupted) {
                // we were interrupted while sleeping, we are likely being shutdown so return false
                return false;
            }
        }
    }

    return exclusive;
}

From source file:com.blackberry.bdp.common.versioned.ZkVersionedTest.java

private TestObject getTestObject() {
    TestObject testObject = new TestObject(curator, "/testObject");

    testObject.setLongObject(Long.MIN_VALUE);
    testObject.setStringObject("String");

    Pojo pojo1 = new Pojo();
    Pojo pojo2 = new Pojo();

    List<Pojo> pojos = new ArrayList<>();
    pojos.add(pojo1);// w w  w .java2 s  .  c o  m
    pojos.add(pojo2);
    testObject.setPojoList(pojos);
    return testObject;
}

From source file:org.apache.camel.component.file.remote.strategy.FtpChangedExclusiveReadLockStrategy.java

public boolean acquireExclusiveReadLock(GenericFileOperations<FTPFile> operations, GenericFile<FTPFile> file,
        Exchange exchange) throws Exception {
    boolean exclusive = false;

    LOG.trace("Waiting for exclusive read lock to file: " + file);

    long lastModified = Long.MIN_VALUE;
    long length = Long.MIN_VALUE;
    StopWatch watch = new StopWatch();

    while (!exclusive) {
        // timeout check
        if (timeout > 0) {
            long delta = watch.taken();
            if (delta > timeout) {
                CamelLogger.log(LOG, readLockLoggingLevel,
                        "Cannot acquire read lock within " + timeout + " millis. Will skip the file: " + file);
                // we could not get the lock within the timeout period, so return false
                return false;
            }// w  w  w. j a va2 s  .c  o  m
        }

        long newLastModified = 0;
        long newLength = 0;
        List<FTPFile> files;
        if (fastExistsCheck) {
            // use the absolute file path to only pickup the file we want to check, this avoids expensive
            // list operations if we have a lot of files in the directory
            LOG.trace("Using fast exists to update file information for {}", file);
            files = operations.listFiles(file.getAbsoluteFilePath());
        } else {
            LOG.trace(
                    "Using full directory listing to update file information for {}. Consider enabling fastExistsCheck option.",
                    file);
            // fast option not enabled, so list the directory and filter the file name
            files = operations.listFiles(file.getParent());
        }
        LOG.trace("List files {} found {} files", file.getAbsoluteFilePath(), files.size());
        for (FTPFile f : files) {
            if (f.getName().equals(file.getFileNameOnly())) {
                newLength = f.getSize();
                newLastModified = f.getTimestamp().getTimeInMillis();
            }
        }

        LOG.trace("Previous last modified: " + lastModified + ", new last modified: " + newLastModified);
        LOG.trace("Previous length: " + length + ", new length: " + newLength);

        if (length >= minLength && (newLastModified == lastModified && newLength == length)) {
            LOG.trace("Read lock acquired.");
            exclusive = true;
        } else {
            // set new base file change information
            lastModified = newLastModified;
            length = newLength;

            boolean interrupted = sleep();
            if (interrupted) {
                // we were interrupted while sleeping, we are likely being shutdown so return false
                return false;
            }
        }
    }

    return exclusive;
}