Example usage for org.apache.hadoop.conf Configuration getLong

List of usage examples for org.apache.hadoop.conf Configuration getLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getLong.

Prototype

public long getLong(String name, long defaultValue) 

Source Link

Document

Get the value of the name property as a long.

Usage

From source file:com.inclouds.hbase.utils.ConfigHelper.java

License:Open Source License

/**
 * Gets the cache configuration.//from w  w w  . ja v a 2  s  . com
 *
 * @param cfg the cfg
 * @return the cache configuration
 */
public static CacheConfiguration getCacheConfiguration(Configuration cfg) {

    CacheConfiguration ccfg = new CacheConfiguration();
    String value = cfg.get(CacheConfiguration.COMPRESSION, "none");
    //TODO not safe
    ccfg.setCodecType(CodecType.valueOf(value.toUpperCase()));
    ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100));
    ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0));
    ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true));
    ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30)));
    ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru")));
    ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.95f));
    ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.90f));

    value = cfg.get(CacheConfiguration.KEY_CLASSNAME);
    if (value != null) {
        ccfg.setKeyClassName(value);
    }

    value = cfg.get(CacheConfiguration.VALUE_CLASSNAME);
    if (value != null) {
        ccfg.setValueClassName(value);
    }

    ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0));
    ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0));

    ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0));
    value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY);
    if (value != null) {
        ccfg.setMaxGlobalMemory(Long.parseLong(value));
    } else {
        LOG.warn("[row-cache] Max global memory is not specified.");
    }

    value = cfg.get(CacheConfiguration.MAX_MEMORY);
    if (value != null) {
        ccfg.setMaxMemory(Long.parseLong(value));
    } else {
        LOG.info("[row-cache] Max memory is not specified.");
    }

    ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "row-cache"));

    ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default"));

    ccfg.setSerDeBufferSize(cfg.getInt(RowCache.ROWCACHE_BUFFER_SIZE, RowCache.DEFAULT_BUFFER_SIZE));

    // TODO bucket number must be calculated
    ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000));

    // Done with common cache configurations
    value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none");
    if (value.equals("none")) {
        // We are done
        return ccfg;
    }
    DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value);

    ccfg.setDataStoreConfiguration(dcfg);

    return ccfg;

}

From source file:com.inclouds.hbase.utils.ConfigHelper.java

License:Open Source License

/**
 * Load disk store cfg.//from w  ww . j  a v a  2s .com
 *
 * @param cfg the cfg
 * @param value the value
 * @return the disk store configuration
 */
private static DiskStoreConfiguration loadDiskStoreCfg(Configuration cfg, String value) {
    DiskStoreConfiguration diskCfg = null;
    PersistenceMode mode = PersistenceMode.valueOf(value);
    switch (mode) {
    case ONDEMAND:
    case SNAPSHOT:
        diskCfg = new RawFSConfiguration();
        diskCfg.setDiskStoreImplementation(RawFSStore.class);
        diskCfg.setStoreClassName(RawFSStore.class.getName());

        break;

    }

    diskCfg.setPersistenceMode(mode);

    String val = cfg.get(DiskStoreConfiguration.DATA_DIRS);
    if (val == null) {
        LOG.fatal("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted.");
        throw new RuntimeException("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted.");
    }
    diskCfg.setDbDataStoreRoots(val.split(","));

    diskCfg.setStoreName(cfg.get(DiskStoreConfiguration.NAME, "default")); // DB name/ Subfolder in a root

    diskCfg.setDbSnapshotInterval(cfg.getLong(DiskStoreConfiguration.SNAPSHOT_INTERVAL, 3600000));

    diskCfg.setDbCompressionType(
            CodecType.valueOf(cfg.get(DiskStoreConfiguration.COMPRESSION, "none").toUpperCase()));

    diskCfg.setDiskStoreMaxSize(cfg.getLong(DiskStoreConfiguration.STORE_SIZE_LIMIT, 0));

    diskCfg.setDiskStoreEvictionPolicy(
            EvictionPolicy.valueOf(cfg.get(DiskStoreConfiguration.EVICTION_POLICY, "none")));

    diskCfg.setDiskStoreEvictionHighWatermark(
            cfg.getFloat(DiskStoreConfiguration.EVICTION_HIGHWATERMARK, 0.98f));

    diskCfg.setDiskStoreEvictionLowWatermak(cfg.getFloat(DiskStoreConfiguration.EVICTION_LOWWATERMARK, 0.95f));

    diskCfg = loadSpecific(cfg, diskCfg);

    return diskCfg;
}

From source file:com.inmobi.conduit.distcp.tools.util.DistCpUtils.java

License:Apache License

/**
 * Utility to retrieve a specified key from a Configuration. Throw exception
 * if not found./*from w w w  . j  av a2 s .c  om*/
 * @param configuration The Configuration in which the key is sought.
 * @param label The key being sought.
 * @return Long value of the key.
 */
public static long getLong(Configuration configuration, String label) {
    long value = configuration.getLong(label, -1);
    assert value >= 0 : "Couldn't find " + label;
    return value;
}

From source file:com.junz.hadoop.custom.SytsLogInputFormat.java

License:Apache License

public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
    Configuration job = context.getConfiguration();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {/*from   ww  w  .  j  av a2 s . c o  m*/
        long startId = job.getLong(START_ID_PROPERTY, 1);
        long numberOfIds = job.getLong(NUMBER_LOG_PROPERTY, 1);
        int groups = job.getInt(NUMBER_MAP_PROPERTY, 1);
        long groupSize = (numberOfIds / groups);

        // Split the rows into n-number of chunks and adjust the last chunk
        // accordingly
        for (int i = 0; i < groups; i++) {
            DBInputSplit split;

            if ((i + 1) == groups)
                split = new DBInputSplit(i * groupSize + startId, numberOfIds + startId);
            else
                split = new DBInputSplit(i * groupSize + startId, (i * groupSize) + groupSize + startId);

            splits.add(split);
        }

        return splits;
    } catch (Exception e) {
        throw new IOException(e.getMessage());
    }
}

From source file:com.kenshoo.integrations.plugins.connectors.GCSConnector.java

License:Apache License

public long getTokenExpirationDate(Configuration conf) {
    long accessTokenCreationTime = conf.getLong(PROPERTY_KEY_ACCESS_TOKEN_CREATION_TIME, 0L);
    if (accessTokenCreationTime == 0L) {
        return 0L;
    }//w  ww. j  av a 2s  .  com
    long expiresAfter = conf.getLong(PROPERTY_KEY_ACCESS_TOKEN_EXPIRES_AFTER, 0L);
    if (expiresAfter == 0L) {
        return 0L;
    }
    return accessTokenCreationTime + expiresAfter;
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java

License:Open Source License

/**
 * Instantiates a new off heap block cache.
 *
 * @param conf the conf//  w w w . ja  va 2s  . co  m
 */
public OffHeapBlockCache(Configuration conf) {
    this.blockSize = conf.getInt("hbase.offheapcache.minblocksize", HColumnDescriptor.DEFAULT_BLOCKSIZE);

    blockCacheMaxSize = conf.getLong(BLOCK_CACHE_MEMORY_SIZE, 0L);
    if (blockCacheMaxSize == 0L) {
        throw new RuntimeException("off heap block cache size is not defined");
    }
    nativeBufferSize = conf.getInt(BLOCK_CACHE_BUFFER_SIZE, DEFAULT_BLOCK_CACH_BUFFER_SIZE);
    extCacheMaxSize = conf.getLong(BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE, (long) (0.1 * blockCacheMaxSize));
    youngGenFactor = conf.getFloat(BLOCK_CACHE_YOUNG_GEN_FACTOR, DEFAULT_YOUNG_FACTOR);
    overflowExtEnabled = conf.getBoolean(BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, false);
    isPersistent = conf.getBoolean(BLOCK_CACHE_PERSISTENT, false);
    if (isPersistent) {
        // Check if we have already set CacheableDeserializer
        // We need to set deserializer before starting cache
        // because we can have already cached blocks on cache start up
        // and first get before put will fail. 
        if (CacheableSerializer.getDeserializer() == null) {
            CacheableSerializer.setHFileDeserializer();
        } else {
            LOG.info("CacheableSerializer is already set.");
        }
    }
    isSnapshotsEnabled = conf.getBoolean(BLOCK_CACHE_SNAPSHOTS, false);
    snapshotsInterval = conf.getInt(BLOCK_CACHE_SNAPSHOT_INTERVAL, 600) * 1000;

    String[] dataRoots = getDataRoots(conf.get(BLOCK_CACHE_DATA_ROOTS));

    if (isPersistent && dataRoots == null) {
        dataRoots = getHDFSRoots(conf);

        if (dataRoots == null) {
            LOG.warn("Data roots are not defined. Set persistent mode to false.");
            isPersistent = false;
        }
    }

    adjustMaxMemory();

    /** Possible values: none, snappy, gzip, lz4, lz4hc */
    // TODO: LZ4 is not supported on all platforms
    // TODO: default compression is LZ4?
    CodecType codec = CodecType.LZ4;

    String value = conf.get(BLOCK_CACHE_COMPRESSION);
    if (value != null) {
        codec = CodecType.valueOf(value.toUpperCase());
    }

    try {

        CacheConfiguration cacheCfg = new CacheConfiguration();
        cacheCfg.setCacheName("block-cache");

        cacheCfg.setSerDeBufferSize(nativeBufferSize);

        cacheCfg.setMaxMemory(blockCacheMaxSize);
        cacheCfg.setCodecType(codec);
        String evictionPolicy = conf.get(BLOCK_CACHE_EVICTION, "LRU").toUpperCase();
        cacheCfg.setEvictionPolicy(evictionPolicy);
        // Set this only for LRU2Q
        cacheCfg.setLRU2QInsertPoint(youngGenFactor);
        setBucketNumber(cacheCfg);

        CacheManager manager = CacheManager.getInstance();

        if (overflowExtEnabled == true) {
            LOG.info("Overflow to external storage is enabled.");
            // External storage handle cache
            CacheConfiguration extStorageCfg = new CacheConfiguration();
            extStorageCfg.setCacheName("extStorageCache");
            extStorageCfg.setMaxMemory(extCacheMaxSize);
            extStorageCfg.setEvictionPolicy(EvictionPolicy.FIFO.toString());
            extStorageCfg.setSerDeBufferSize(4096);// small
            extStorageCfg.setPreevictionListSize(40);
            extStorageCfg.setKeyClassName(byte[].class.getName());
            extStorageCfg.setValueClassName(byte[].class.getName());
            // calculate bucket number
            // 50 is estimate of a record size
            int buckets = (extCacheMaxSize / EXT_STORAGE_REF_SIZE) > Integer.MAX_VALUE ? Integer.MAX_VALUE - 1
                    : (int) (extCacheMaxSize / EXT_STORAGE_REF_SIZE);
            extStorageCfg.setBucketNumber(buckets);
            if (isPersistent) {
                // TODO - this in memory cache has same data dirs as a major cache.
                RawFSConfiguration storeConfig = new RawFSConfiguration();

                storeConfig.setStoreName(extStorageCfg.getCacheName());

                storeConfig.setDiskStoreImplementation(RawFSStore.class);

                storeConfig.setDbDataStoreRoots(dataRoots);
                storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND);
                storeConfig.setDbCompressionType(CodecType.LZ4);
                storeConfig.setDbSnapshotInterval(15);
                //storeConfig.setTotalWorkerThreads(Runtime.getRuntime().availableProcessors() /2);
                //storeConfig.setTotalIOThreads(1);          
                extStorageCfg.setDataStoreConfiguration(storeConfig);
            }

            // This will initiate the load of stored cache data
            // if persistence is enabled
            extStorageCache = manager.getCache(extStorageCfg, null);
            // Initialize external storage
            storage = ExtStorageManager.getInstance().getStorage(conf, extStorageCache);
        } else {
            LOG.info("Overflow to external storage is disabled.");
            if (isPersistent) {
                RawFSConfiguration storeConfig = new RawFSConfiguration();

                storeConfig.setStoreName(cacheCfg.getCacheName());

                storeConfig.setDiskStoreImplementation(RawFSStore.class);

                storeConfig.setDbDataStoreRoots(dataRoots);
                storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND);
                storeConfig.setDbSnapshotInterval(15);
                cacheCfg.setDataStoreConfiguration(storeConfig);
                // Load cache data
                offHeapCache = manager.getCache(cacheCfg, null);
            }
        }

        if (offHeapCache == null) {
            offHeapCache = manager.getCache(cacheCfg, null);
        }

    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    boolean onHeapEnabled = conf.getBoolean(BLOCK_CACHE_ONHEAP_ENABLED, true);
    if (onHeapEnabled) {
        long onHeapCacheSize = calculateOnHeapCacheSize(conf);
        if (onHeapCacheSize > 0) {
            onHeapCache = new OnHeapBlockCache(onHeapCacheSize, blockSize, conf);
            LOG.info("Created fast on-heap cache. Size=" + onHeapCacheSize);
        } else {
            LOG.warn("Conflicting configuration options. On-heap cache is disabled.");
        }
    }

    this.stats = new CacheStats();
    this.onHeapStats = new CacheStats();
    this.offHeapStats = new CacheStats();
    this.extStats = new CacheStats();
    this.extRefStats = new CacheStats();

    EvictionListener listener = new EvictionListener() {

        @Override
        public void evicted(long ptr, Reason reason, long nanoTime) {
            stats.evict();
            stats.evicted();

        }

    };

    offHeapCache.setEvictionListener(listener);
    // Cacheable serializer registration

    CacheableSerializer serde = new CacheableSerializer();
    offHeapCache.getSerDe().registerSerializer(serde);

    //    if( extStorageCache != null){
    //      //StorageHandleSerializer serde2 = new StorageHandleSerializer();
    //      //  SmallByteArraySerializer serde2 = new SmallByteArraySerializer();
    //      //   extStorageCache.getSerDe().registerSerializer(serde2);
    //    }
    // Start statistics thread
    statThread = new StatisticsThread(this);
    statThread.start();

}

From source file:com.koda.integ.hbase.util.ConfigHelper.java

License:Open Source License

/**
 * Gets the cache configuration./*w  w  w. j  a v  a 2 s  .  c om*/
 *
 * @param cfg the cfg
 * @return the cache configuration
 */
public static CacheConfiguration getCacheConfiguration(Configuration cfg) {

    CacheConfiguration ccfg = new CacheConfiguration();
    String value = cfg.get(CacheConfiguration.COMPRESSION, "none");
    //TODO not safe
    ccfg.setCodecType(CodecType.valueOf(value.toUpperCase()));
    ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100));
    ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0));
    ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true));
    ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30)));
    ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru")));
    ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.98f));
    ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.95f));

    value = cfg.get(CacheConfiguration.KEY_CLASSNAME);
    if (value != null) {
        ccfg.setKeyClassName(value);
    }

    value = cfg.get(CacheConfiguration.VALUE_CLASSNAME);
    if (value != null) {
        ccfg.setValueClassName(value);
    }

    ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0));
    ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0));

    ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0));
    value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY);
    if (value != null) {
        ccfg.setMaxGlobalMemory(Long.parseLong(value));
    } else {
        LOG.info(" Max global memory is not specified.");
    }

    value = cfg.get(CacheConfiguration.MAX_MEMORY);
    if (value != null) {
        ccfg.setMaxMemory(Long.parseLong(value));
    } else {
        LOG.info(" Max memory is not specified.");
    }

    ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "default"));

    ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default"));

    ccfg.setSerDeBufferSize(cfg.getInt(CacheConfiguration.SERDE_BUFSIZE, 4 * 1024 * 1024));

    // TODO bucket number must be calculated
    ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000));

    // Done with common cache configurations
    value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none");
    if (value.equals("none")) {
        // We are done
        return ccfg;
    }
    DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value);

    ccfg.setDataStoreConfiguration(dcfg);

    return ccfg;

}

From source file:com.koda.integ.hbase.util.ConfigHelper.java

License:Open Source License

/**
 * Load disk store cfg.//from  www. j  av a 2s. com
 *
 * @param cfg the cfg
 * @param value the value
 * @return the disk store configuration
 */
private static DiskStoreConfiguration loadDiskStoreCfg(Configuration cfg, String value) {
    DiskStoreConfiguration diskCfg = null;
    PersistenceMode mode = PersistenceMode.valueOf(value);
    switch (mode) {
    case ONDEMAND:
    case SNAPSHOT:
        diskCfg = new RawFSConfiguration();
        diskCfg.setDiskStoreImplementation(RawFSStore.class);
        diskCfg.setStoreClassName(RawFSStore.class.getName());

        break;
    //         case WRITE_AROUND:
    //         case WRITE_BEHIND:
    //         case WRITE_THROUGH:
    //            diskCfg = new LevelDBConfiguration();
    //            diskCfg.setDiskStoreImplementation(LevelDBStore.class);
    //            diskCfg.setStoreClassName(LevelDBStore.class.getName());
    //            break;
    }

    diskCfg.setPersistenceMode(mode);

    String val = cfg.get(DiskStoreConfiguration.DATA_DIRS);
    if (val == null) {
        LOG.fatal("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted.");
        throw new RuntimeException("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted.");
    }
    diskCfg.setDbDataStoreRoots(val.split(","));

    diskCfg.setStoreName(cfg.get(DiskStoreConfiguration.NAME, "default")); // DB name/ Subfolder in a root

    diskCfg.setDbSnapshotInterval(cfg.getLong(DiskStoreConfiguration.SNAPSHOT_INTERVAL, 3600000));

    diskCfg.setDbCompressionType(
            CodecType.valueOf(cfg.get(DiskStoreConfiguration.COMPRESSION, "none").toUpperCase()));

    diskCfg.setDiskStoreMaxSize(cfg.getLong(DiskStoreConfiguration.STORE_SIZE_LIMIT, 0));

    diskCfg.setDiskStoreEvictionPolicy(
            EvictionPolicy.valueOf(cfg.get(DiskStoreConfiguration.EVICTION_POLICY, "none")));

    diskCfg.setDiskStoreEvictionHighWatermark(
            cfg.getFloat(DiskStoreConfiguration.EVICTION_HIGHWATERMARK, 0.98f));

    diskCfg.setDiskStoreEvictionLowWatermak(cfg.getFloat(DiskStoreConfiguration.EVICTION_LOWWATERMARK, 0.95f));

    diskCfg = loadSpecific(cfg, diskCfg);

    return diskCfg;
}

From source file:com.linkedin.cubert.plan.physical.CubertMapper.java

License:Open Source License

@Override
public void run(Context context) throws IOException, InterruptedException {
    print.f("Mapper init  ----------------------------------");
    Configuration conf = context.getConfiguration();

    FileCache.initialize(conf);/*from   w  w  w. j a v  a2  s .  c om*/
    PhaseContext.create(context, conf);

    ObjectMapper mapper = new ObjectMapper();
    // read the map configuration
    ArrayNode mapCommands = mapper.readValue(conf.get(CubertStrings.JSON_MAP_OPERATOR_LIST), ArrayNode.class);

    int multiMapperIndex = 0;

    if (context.getInputSplit() instanceof MultiMapperSplit) {
        // identify the input, output and operators for this mapper
        MultiMapperSplit mmSplit = (MultiMapperSplit) context.getInputSplit();
        multiMapperIndex = mmSplit.getMultiMapperIndex();
    }
    JsonNode inputJson = mapCommands.get(multiMapperIndex).get("input");
    ArrayNode operatorsJson = (ArrayNode) mapCommands.get(multiMapperIndex).get("operators");

    JsonNode outputJson = null;
    if (conf.get(CubertStrings.JSON_SHUFFLE) != null) {
        outputJson = mapper.readValue(conf.get(CubertStrings.JSON_SHUFFLE), JsonNode.class);
    } else {
        outputJson = mapper.readValue(conf.get(CubertStrings.JSON_OUTPUT), JsonNode.class);
    }

    long blockId = conf.getLong("MY_BLOCK_ID", -1);
    long numRecords = conf.getLong("MY_NUM_RECORDS", -1);

    Tuple partitionKey = null;
    if (conf.get("MY_PARTITION_KEY") != null) {
        try {
            byte[] bytes = (byte[]) SerializerUtils.deserializeFromString(conf.get("MY_PARTITION_KEY"));
            ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
            partitionKey = TupleFactory.getInstance().newTuple();
            partitionKey.readFields(new DataInputStream(bis));
        } catch (ClassNotFoundException e) {
            throw new RuntimeException(e);
        }
    }

    // Create input block
    CommonContext commonContext = new MapContext(context);
    Block input = new ContextBlock(commonContext, partitionKey, blockId, numRecords);
    input.configure(inputJson);

    // Create phase executor
    PhaseExecutor exec = new PhaseExecutor(inputJson.get("name").getTextValue(), input,
            outputJson.get("name").getTextValue(), operatorsJson, conf);

    BlockWriter writer = StorageFactory.get(JsonUtils.getText(outputJson, "type")).getBlockWriter();
    writer.configure(outputJson);

    final int MIN_DELAY = 15000;
    int nBlocks = 0;
    long start = System.currentTimeMillis();
    long curr = start;
    Block outputBlock;
    while ((outputBlock = exec.next()) != null) {
        writer.write(outputBlock, commonContext);
        ++nBlocks;
        curr = System.currentTimeMillis();
        if (curr > start + MIN_DELAY) {
            print.f("Executed operator chain for %d block(s) in %d ms", nBlocks, curr - start);
            start = System.currentTimeMillis();
            nBlocks = 0;
        }
    }
    if (nBlocks > 0) {
        print.f("Executed operator chain for %d block(s) in %d ms", nBlocks, curr - start);
    }

    // HACK!! Asking the TeeOperator to close the files that were opened
    TeeOperator.closeFiles();

    print.f("Mapper complete ----------------------------------");
    MemoryStats.printGCStats();
}

From source file:com.linkedin.drelephant.util.Utils.java

License:Apache License

/**
 * Get non negative long value from Configuration.
 *
 * If the value is not set or not a long, the provided default value is returned.
 * If the value is negative, 0 is returned.
 *
 * @param conf Configuration to be extracted
 * @param key property name//w  ww .  j a v a2 s  .  c  o m
 * @param defaultValue default value
 * @return non negative long value
 */
public static long getNonNegativeLong(Configuration conf, String key, long defaultValue) {
    try {
        long value = conf.getLong(key, defaultValue);
        if (value < 0) {
            value = 0;
            logger.warn("Configuration " + key + " is negative. Resetting it to 0");
        }
        return value;
    } catch (NumberFormatException e) {
        logger.error("Invalid configuration " + key + ". Value is " + conf.get(key)
                + ". Resetting it to default value: " + defaultValue);
        return defaultValue;
    }
}