Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:at.illecker.hama.rootbeer.examples.matrixmultiplication.gpu.MatrixMultiplicationBSPGpu.java

License:Apache License

public static void main(String[] args) throws Exception {

    // Defaults/*  w  ww .  j  a v  a  2 s. co m*/
    int numRowsA = 1024;
    int numColsA = 1024;
    int numRowsB = 1024;
    int numColsB = 1024;
    boolean isDebugging = false;

    Configuration conf = new HamaConfiguration();

    if (args.length > 0) {
        if (args.length == 6) {
            conf.setInt("bsp.peers.num", Integer.parseInt(args[0]));
            numRowsA = Integer.parseInt(args[1]);
            numColsA = Integer.parseInt(args[2]);
            numRowsB = Integer.parseInt(args[3]);
            numColsB = Integer.parseInt(args[4]);
            isDebugging = Boolean.parseBoolean(args[5]);

        } else {
            System.out.println("Wrong argument size!");
            System.out.println("    Argument1=numBspTask");
            System.out.println("    Argument2=numRowsA | Number of rows of the first input matrix");
            System.out.println("    Argument3=numColsA | Number of columns of the first input matrix");
            System.out.println("    Argument4=numRowsB | Number of rows of the second input matrix");
            System.out.println("    Argument5=numColsB | Number of columns of the second input matrix");
            System.out.println("    Argument6=debug | Enable debugging (true|false)");
            return;
        }
    } else {
        conf.setInt("bsp.peers.num", 1); // 1 because only one GPU available
    }

    conf.setBoolean(CONF_DEBUG, isDebugging);
    conf.set(CONF_BLOCKSIZE, "" + BLOCK_SIZE);
    conf.set(CONF_GRIDSIZE, "" + GRID_SIZE);
    conf.setBoolean(CONF_DEBUG, true);

    LOG.info("NumBspTask: " + conf.getInt("bsp.peers.num", 0));
    LOG.info("numRowsA: " + numRowsA);
    LOG.info("numColsA: " + numColsA);
    LOG.info("numRowsB: " + numRowsB);
    LOG.info("numColsB: " + numColsB);
    LOG.info("isDebugging: " + isDebugging);
    LOG.info("outputPath: " + OUTPUT_DIR);

    if (numColsA != numRowsB) {
        throw new Exception("Cols of MatrixA != rows of MatrixB! (" + numColsA + "!=" + numRowsB + ")");
    }

    // Create random DistributedRowMatrix
    // use constant seeds to get reproducable results

    // Matrix A
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsA, numColsA, new Random(42L),
            MATRIX_A_PATH, false);
    // Matrix B
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsB, numColsB, new Random(1337L),
            MATRIX_B_PATH, false);

    // Load DistributedRowMatrix a and b
    DistributedRowMatrix a = new DistributedRowMatrix(MATRIX_A_PATH, OUTPUT_DIR, numRowsA, numColsA);
    a.setConf(conf);

    DistributedRowMatrix b = new DistributedRowMatrix(MATRIX_B_PATH, OUTPUT_DIR, numRowsB, numColsB);
    b.setConf(conf);

    // MatrixMultiply all within a new BSP job
    long startTime = System.currentTimeMillis();
    DistributedRowMatrix c = a.multiplyBSP(b, MATRIX_C_PATH, true);

    System.out.println("MatrixMultiplicationGpu using Hama finished in "
            + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    // Verification
    DistributedRowMatrix d = a.multiplyJava(b, MATRIX_D_PATH);
    if (c.verify(d)) {
        System.out.println("Verify PASSED!");
    } else {
        System.out.println("Verify FAILED!");
    }

    if (isDebugging) {
        System.out.println("Matrix A:");
        a.printDistributedRowMatrix();
        System.out.println("Matrix B:");
        b.printDistributedRowMatrix();
        System.out.println("Matrix C:");
        c.printDistributedRowMatrix();
        System.out.println("Matrix D:");
        d.printDistributedRowMatrix();

        printOutput(conf);
    }
}

From source file:at.illecker.hama.rootbeer.examples.util.RandomGraphGenerator.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.out.println(/*w w w.ja va 2s  .c  o m*/
                "USAGE: <Number of vertices> <Number of edges per vertex> <Number of partitions> <Outpath>");
        return;
    }
    System.out.println(Arrays.toString(args));
    Configuration conf = new Configuration();
    conf.setInt("hama.num.vertices", Integer.parseInt(args[0]));
    conf.setInt("hama.num.partitions", Integer.parseInt(args[2]));
    conf.setInt("number.edges", Integer.parseInt(args[1]));
    Job job = new Job(conf);

    Path generated = new Path(new Path(args[3]).getParent(), "generated");
    FileOutputFormat.setOutputPath(job, generated);
    FileSystem.get(conf).delete(generated, true);

    job.setJobName("RangeWriter");

    job.setJarByClass(SortGenMapper.class);
    job.setMapperClass(SortGenMapper.class);
    job.setNumReduceTasks(0);

    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(NullWritable.class);

    job.setInputFormatClass(RangeInputFormat.class);

    job.waitForCompletion(true);
    conf.setInt("max.id", Integer.valueOf(args[0]));
    job = new Job(conf);

    FileOutputFormat.setOutputPath(job, new Path(args[3]));
    FileSystem.get(conf).delete(new Path(args[3]), true);

    job.setJobName("Random Vertex Writer");

    FileInputFormat.addInputPath(job, generated);

    job.setJarByClass(RandomMapper.class);
    job.setMapperClass(RandomMapper.class);
    job.setReducerClass(Reducer.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setNumReduceTasks(conf.getInt("hama.num.partitions", 2));
    job.setPartitionerClass(HashPartitioner.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.waitForCompletion(true);
}

From source file:auction.AuctionVertex.java

License:Apache License

@Override
public void compute(Iterable<AuctionMessage> messages) throws IOException {
    Context context = getContext();
    Configuration conf = context.getConfiguration();
    double epsilon = (double) conf.getFloat("EPSILON", 0);
    int max = conf.getInt("MAX", Integer.MAX_VALUE);

    if (epsilon == 0) {
        epsilon = Double.MIN_VALUE;
    }//from   w ww  .j a  va2 s. c  o m

    // //even supersteps are bidding steps
    // //odd supersteps are updating steps
    long superstep = getSuperstep();

    //System.out.printf("Step: %d, ID: %d Value: %s\n", superstep, getId().get(), getValue().toString());

    if (superstep == max) {
        voteToHalt();
    } else if (superstep % 2 == 0) {

        // /////////////update the benefits based on the messages//////////////////////
        System.out.println("\t starting");

        for (AuctionMessage message : messages) {
            if (message.getBid() != -Double.MAX_VALUE && message.getBid() != Double.MAX_VALUE) {

                System.out.printf("\t\told benefit %s\n",
                        String.valueOf(getValue().getBenefit((int) message.getSender().get())));

                getValue().setBenefit((int) message.getSender().get(),
                        getValue().getBenefit((int) message.getSender().get()) - message.getBid());

                System.out.printf("\t\tnew benefit %s\n",
                        String.valueOf(getValue().getBenefit((int) message.getSender().get())));

            }

            else if (message.getBid() == Double.MAX_VALUE) {

                System.out.printf("\t\told column %s\n", String.valueOf(getValue().getColOwned().toString()));

                getValue().setColOwned(message.getSender());

                System.out.printf("\t\tnew column %s\n", String.valueOf(getValue().getColOwned().toString()));
            }

            else {

                System.out.printf("\tunowned now\n");
                getValue().setColOwned(-1);
            }

        }

        System.out.println("updated prices");

        // //only bid where you don't have a column already
        if (getValue().getColOwned().get() == -1) {

            System.out.println("\t preparing for loop");

            // //should store a max and a max column - and then when we re-calculate test
            /////////////////////get Max /////////////////////////////////////////////
            double[] maxBenefit = { -Double.MAX_VALUE, -Double.MAX_VALUE };
            long[] maxIdx = { -1, -1 };
            for (int i = 0; i < getValue().N; i++) {

                if (getValue().getBenefit(i) > maxBenefit[0]) {
                    maxBenefit[1] = maxBenefit[0];
                    maxBenefit[0] = getValue().getBenefit(i);
                    maxIdx[1] = maxIdx[0];
                    maxIdx[0] = (long) i;
                } else if (getValue().getBenefit(i) > maxBenefit[1]) {
                    maxBenefit[1] = getValue().getBenefit(i);
                    maxIdx[1] = (long) i;
                }
            }

            // //System.out.printf("\tmax1: %s\n", String.valueOf(maxBenefit[0]));
            // //System.out.printf("\tmax2: %s\n", String.valueOf(maxBenefit[1]));
            // //System.out.printf("\tmax1: %s\n", String.valueOf(maxIdx[0]));
            // //System.out.printf("\tmax2: %s\n", String.valueOf(maxIdx[1]));

            // //System.out.println("got maxes");

            //////////////////get bid////////////////////////////////////////////////
            double bidValue = maxBenefit[0] - maxBenefit[1] + epsilon;

            System.out.printf("\tbid value:%s\n", String.valueOf(bidValue));

            System.out.printf("\tbid on:%s\n", String.valueOf(maxIdx[0]));

            System.out.println("Got bid");

            ///////////////////send bid/////////////////////////////////////////////
            to.set(maxIdx[0]);
            bid.set(getId(), bidValue);
            sendMessage(to, bid);

            System.out.println("sent bid");
        }

        voteToHalt();
        System.out.println("voted to halt");
    }

    else {

        ///////////////////////get the maximum bid
        double maxBid = -Double.MAX_VALUE;
        long maxIdx = -1;

        // //System.out.println("starting mesage loop");

        for (AuctionMessage message : messages) {
            if (message.getBid() > maxBid) {

                maxBid = message.getBid();
                maxIdx = message.getSender().get();

                System.out.printf("\tnew Bid: %s\n", String.valueOf(maxBid));
                System.out.printf("\tnew winner: %s\n", String.valueOf(maxIdx));
            }
        }

        // //System.out.println("got hihest message");

        if (maxIdx != -1) {

            System.out.println("need to update");

            /////////////////send a message to the winner////////////////////////////
            to.set(maxIdx);
            bid.set(getId(), Double.MAX_VALUE);
            sendMessage(to, bid);

            System.out.printf("\tsending message to winner %s\n", String.valueOf(maxIdx));
            System.out.println("sent message to winner");

            ////////////////send a message to the loser//////////////////////////////
            if (getValue().getRowOwnedBy().get() != -1) {
                bid.set(getId(), -Double.MAX_VALUE);
                sendMessage(getValue().getRowOwnedBy(), bid);

                System.out.printf("\tsending message to loser %s\n",
                        String.valueOf(getValue().getRowOwnedBy()));
                System.out.println("sent message to loser");

            }

            ///////////////////update the price///////////////////////////////////////
            getValue().setPrice(maxBid + getValue().getPrice());
            getValue().setRowOwnedBy(maxIdx);

            System.out.printf("\tset the price to: %s\n", String.valueOf(getValue().getPrice()));
            System.out.printf("\tset ownership to: %s\n",
                    String.valueOf(getValue().getRowOwnedBy().toString()));

            System.out.println("updated the price");

            //////////////////send a message to all indicating the price change/////////
            for (int i = 0; i < getValue().N; i++) {
                to.set(i);
                bid.set(getId(), maxBid);
                sendMessage(to, bid);

                System.out.printf("\tsent priceChange: %s message to row %s\n", String.valueOf(maxBid),
                        String.valueOf(i));

            }

            System.out.println("sent message to all");
        }

        voteToHalt();
        System.out.println("voted to halt");
    }
}

From source file:authordetect.input.SingleBookReader.java

/**
 * @param inputSplit/*from ww  w. j  a v a 2  s  .c  om*/
 * @param context    the information about the task
 * @throws java.io.IOException
 * @throws InterruptedException
 */
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {

    FileSplit split = (FileSplit) inputSplit;
    Configuration configuration = context.getConfiguration();

    // get the option from configuration:
    // 0 for group by author, 1 for group by book
    int option = configuration.getInt("GROUP_OPTION", 0);

    Path path = split.getPath();
    filename = path.getName();
    FileSystem fileSystem = path.getFileSystem(configuration);
    FSDataInputStream inputStream = fileSystem.open(path);
    lineReader = new LineReader(inputStream, configuration);

    //initial start point and end point
    start = split.getStart();
    end = start + split.getLength();

    inputStream.seek(start);
    if (start != 0) {
        start += lineReader.readLine(new Text(), 0, (int) Math.min(Integer.MAX_VALUE, end - start));
    }

    start += lineReader.readLine(currentLine);

    prepareToScanBook(option);
}

From source file:backup.datanode.DataNodeBackupProcessorBase.java

License:Apache License

public DataNodeBackupProcessorBase(Configuration conf) throws Exception {
    int backupThreads = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_KEY,
            DFS_BACKUP_DATANODE_BACKUP_THREAD_COUNT_DEFAULT);
    int queueDepth = conf.getInt(DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_KEY,
            DFS_BACKUP_DATANODE_BACKUP_QUEUE_DEPTH_DEFAULT);
    _defaultAge = conf.getLong(DFS_BACKUP_DATANODE_BACKUP_AGE_KEY, DFS_BACKUP_DATANODE_BACKUP_AGE_DEFAULT);

    _closer = Closer.create();/*from  w ww  .j  av  a2s  . co m*/
    _service = _closer.register(Executors.newFixedThreadPool(backupThreads + 1));
    _backupQueue = new PriorityBlockingQueue<>(queueDepth);

    _backupQueueDepth = Metrics.METRICS.counter(QUEUE_BACKUP);
    _enqueueBackupDropMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_DROP);
    _enqueueBackupRetryMetric = Metrics.METRICS.histogram(ENQUEUE_BACKUP_RETRY);
    _backupThroughput = Metrics.METRICS.meter(BACKUP_THROUGHPUT);

    startBackupThreads(backupThreads);
}

From source file:backup.datanode.DataNodeBackupServicePlugin.java

License:Apache License

@Override
public void start(Object service) {
    DataNode datanode = (DataNode) service;
    Configuration conf = getConf();
    RPC.setProtocolEngine(conf, DataNodeBackupRPC.class, WritableRpcEngine.class);
    // This object is created here so that it's lifecycle follows the datanode
    try {/*from   w  w w. j  ava 2s .  c om*/
        backupProcessor = SingletonManager.getManager(DataNodeBackupProcessor.class).getInstance(datanode,
                () -> new DataNodeBackupProcessor(conf, datanode));
        restoreProcessor = SingletonManager.getManager(DataNodeRestoreProcessor.class).getInstance(datanode,
                () -> new DataNodeRestoreProcessor(conf, datanode));

        DataNodeBackupRPCImpl backupRPCImpl = new DataNodeBackupRPCImpl(backupProcessor, restoreProcessor);

        InetSocketAddress listenerAddress = datanode.ipcServer.getListenerAddress();
        int ipcPort = listenerAddress.getPort();
        String bindAddress = listenerAddress.getAddress().getHostAddress();
        int port = conf.getInt(DFS_BACKUP_DATANODE_RPC_PORT_KEY, DFS_BACKUP_DATANODE_RPC_PORT_DEFAULT);
        if (port == 0) {
            port = ipcPort + 1;
        }
        server = new RPC.Builder(conf).setBindAddress(bindAddress).setPort(port).setInstance(backupRPCImpl)
                .setProtocol(DataNodeBackupRPC.class).build();
        ServiceAuthorizationManager serviceAuthorizationManager = server.getServiceAuthorizationManager();
        serviceAuthorizationManager.refresh(conf, new BackupPolicyProvider());
        server.start();

        LOG.info("DataNode Backup RPC listening on {}", port);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:backup.datanode.DataNodeRestoreProcessor.java

License:Apache License

public DataNodeRestoreProcessor(Configuration conf, DataNode datanode) throws Exception {
    _closer = Closer.create();/*from   ww  w. jav a 2 s. c o  m*/
    _datanode = datanode;
    _restoreThroughput = Metrics.METRICS.meter(RESTORE_THROUGHPUT);
    _bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
            DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    _checksumType = Type
            .valueOf(conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT));
    int threads = conf.getInt(DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_KEY,
            DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_DEFAULT);
    long pauseOnError = conf.getLong(DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_KEY,
            DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_DEFAULT);
    _backupStore = _closer.register(BackupStore.create(BackupUtil.convert(conf)));
    _restoreBlocks = new ArrayBlockingQueue<>(threads);
    _executorService = Executors.newCachedThreadPool();
    _closer.register((Closeable) () -> _executorService.shutdownNow());
    for (int t = 0; t < threads; t++) {
        _executorService.submit(Executable.createDaemon(LOG, pauseOnError, _running, () -> restoreBlocks()));
    }
}

From source file:backup.datanode.ipc.DataNodeBackupRPC.java

License:Apache License

public static DataNodeBackupRPC getDataNodeBackupRPC(InetSocketAddress dataNodeIPCAddress, Configuration conf,
        UserGroupInformation ugi) throws IOException, InterruptedException {
    int port = conf.getInt(DFS_BACKUP_DATANODE_RPC_PORT_KEY, DFS_BACKUP_DATANODE_RPC_PORT_DEFAULT);
    if (port == 0) {
        port = dataNodeIPCAddress.getPort() + 1;
    }/*from  w  ww . j  ava2  s .  co m*/
    InetSocketAddress dataNodeAddress = new InetSocketAddress(dataNodeIPCAddress.getAddress(), port);
    return RPC.getProtocolProxy(DataNodeBackupRPC.class, RPC.getProtocolVersion(DataNodeBackupRPC.class),
            dataNodeAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf)).getProxy();
}

From source file:backup.namenode.ipc.NameNodeBackupRPC.java

License:Apache License

public static NameNodeBackupRPC getDataNodeBackupRPC(InetSocketAddress nameNodeIPCAddres, Configuration conf,
        UserGroupInformation ugi) throws IOException, InterruptedException {
    int port = conf.getInt(DFS_BACKUP_NAMENODE_RPC_PORT_KEY, DFS_BACKUP_NAMENODE_RPC_PORT_DEFAULT);
    if (port == 0) {
        port = nameNodeIPCAddres.getPort() + 1;
    }//from  w w  w.  j  av a 2 s.c  o  m
    InetSocketAddress nameNodeAddress = new InetSocketAddress(nameNodeIPCAddres.getAddress(), port);
    return RPC.getProtocolProxy(NameNodeBackupRPC.class, RPC.getProtocolVersion(NameNodeBackupRPC.class),
            nameNodeAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf)).getProxy();
}

From source file:backup.namenode.NameNodeBackupBlockCheckProcessor.java

License:Apache License

public NameNodeBackupBlockCheckProcessor(Configuration conf, NameNodeRestoreProcessor processor,
        NameNode namenode, UserGroupInformation ugi) throws Exception {
    String[] nnStorageLocations = conf.getStrings(DFS_NAMENODE_NAME_DIR);
    URI uri = new URI(nnStorageLocations[0]);
    _reportPath = new File(new File(uri.getPath()).getParent(), "backup-reports");
    _reportPath.mkdirs();//www.j  ava 2s .  co m
    if (!_reportPath.exists()) {
        throw new IOException("Report path " + _reportPath + " does not exist");
    }

    this.ugi = ugi;
    this.namenode = namenode;
    this.conf = conf;
    this.processor = processor;
    backupStore = BackupStore.create(BackupUtil.convert(conf));
    this.fileSystem = (DistributedFileSystem) FileSystem.get(conf);
    this.ignorePath = conf.get(DFS_BACKUP_IGNORE_PATH_FILE_KEY, DFS_BACKUP_IGNORE_PATH_FILE_DEFAULT);
    this.batchSize = conf.getInt(DFS_BACKUP_REMOTE_BACKUP_BATCH_KEY, DFS_BACKUP_REMOTE_BACKUP_BATCH_DEFAULT);
    this.checkInterval = conf.getLong(DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_KEY,
            DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DEFAULT);
    this.initInterval = conf.getLong(DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DELAY_KEY,
            DFS_BACKUP_NAMENODE_BLOCK_CHECK_INTERVAL_DELAY_DEFAULT);
    start();
}