List of usage examples for java.util Random nextLong
public long nextLong()
From source file:com.ovrhere.android.morseflash.ui.fragments.MainFragment.java
private long generateRandomValue() { Random rand = new Random(); long secretCode = rand.nextLong(); return secretCode; }
From source file:com.sube.daos.mongodb.StatisticDaoTest.java
private long nextLong(Random rng, long n) { long bits, val; do {//from www .j a v a2 s . com bits = (rng.nextLong() << 1) >>> 1; val = bits % n; } while (bits - val + (n - 1) < 0L); return val; }
From source file:org.t2framework.commons.util.LazyLoadingReference.java
public void test_loadHeavy() throws Exception { final LazyLoadingReference<String> target = new LazyLoadingReference<String>( new LazyLoadingReference.Factory<String>() { public String create() { return "hoge"; }/*from ww w. jav a 2 s. c o m*/ }); final Random random = new Random(System.currentTimeMillis()); List<Thread> list = new ArrayList<Thread>(); for (int i = 0; i < 20; i++) { Runnable r = new Runnable() { public void run() { try { long l = random.nextLong() % 100; Thread.sleep(l < 0 ? l * -1 : l); assertEquals("hoge", target.get()); } catch (InterruptedException e) { e.printStackTrace(); } } }; Thread t = new Thread(r); list.add(t); t.start(); } for (Thread t : list) { t.join(); } }
From source file:org.jbpm.console.ng.pr.client.editors.instance.list.variables.dash.DataSetProcessInstanceWithVariablesListPresenterTest.java
@Test public void abortProcessInstancesTest() { final Random random = new Random(); final List<Long> pIds = new ArrayList<Long>(); pIds.add(random.nextLong()); pIds.add(random.nextLong());//from www . j a v a 2 s. c om pIds.add(random.nextLong()); presenter.abortProcessInstance(pIds); verify(kieSessionEntryPointMock).abortProcessInstances(pIds); }
From source file:it.crs4.seal.tsv_sort.TsvSort.java
public int run(String[] args) throws Exception { LOG.info("starting"); TsvSortOptionParser parser = new TsvSortOptionParser(); parser.parse(getConf(), args);/*from w ww .ja va 2 s . co m*/ LOG.info("Using " + parser.getNReduceTasks() + " reduce tasks"); Job job = new Job(getConf()); job.setJobName("TsvSort " + parser.getInputPaths().get(0)); job.setJarByClass(TsvSort.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setInputFormatClass(TsvInputFormat.class); job.setOutputFormatClass(TextValueOutputFormat.class); job.setPartitionerClass(TotalOrderPartitioner.class); // output path FileOutputFormat.setOutputPath(job, parser.getOutputPath()); FileSystem fs = parser.getOutputPath().getFileSystem(job.getConfiguration()); /* * * Pick a random name for the partition file in the same directory as the * output path. So, TsvSort /user/me/input /user/me/output * results in the partition file being placed in /user/me/_partition.lst.12340921387402174 * * Why not place it directly in the input path? * * We wouldn't be able to run two sorts on the same data at the same time. * We've received complaints about this in the past, so it has been a * limit in practice. * * Why not place it directly in the output path? * * We'd have to create the output path before the output format did. * For this to work we'd have to disable the FileOutputFormat's default check * that verifies that the output directory doesn't exist. This means that we'd * need some other way to ensure that we're not writing to the same path where * some other job wrote. */ Path partitionFile; Random rnd = new Random(); do { partitionFile = new Path(parser.getOutputPath().getParent(), String.format("_partition.lst.%012d", Math.abs(rnd.nextLong()))); } while (fs.exists(partitionFile)); // this is still subject to a race condition between it and another instance of this program partitionFile = partitionFile.makeQualified(fs); LOG.info("partition file path: " + partitionFile); URI partitionUri = new URI(partitionFile.toString() + "#" + PARTITION_SYMLINK); LOG.debug("partitionUri for distributed cache: " + partitionUri); // input paths for (Path p : parser.getInputPaths()) TsvInputFormat.addInputPath(job, p); LOG.info("sampling input"); TextSampler.writePartitionFile(new TsvInputFormat(), job, partitionFile); LOG.info("created partitions"); try { DistributedCache.addCacheFile(partitionUri, job.getConfiguration()); DistributedCache.createSymlink(job.getConfiguration()); int retcode = job.waitForCompletion(true) ? 0 : 1; LOG.info("done"); return retcode; } finally { LOG.debug("deleting partition file " + partitionFile); fs.delete(partitionFile, false); } }
From source file:org.apache.hadoop.hdfs.notifier.benchmark.TxnConsumer.java
private JobConf createJobConf(Configuration conf2) throws IOException { JobConf jobConf = new JobConf(conf); String jobName = "transaction_consumer"; jobConf.setJobName(jobName);//ww w. j av a2 s. c o m String splitDir = workplace + "split/"; jobConf.set(TEST_DIR_LABEL, workplace); jobConf.set(NOTIFIER_SERVER_ADDR_KEY, notifierServerAddrStr); jobConf.set(NOTIFIER_SERVER_PORT_KEY, notifierServerPortStr); jobConf.setMapSpeculativeExecution(false); jobConf.setReduceSpeculativeExecution(false); jobConf.setJarByClass(TxnConsumer.class); jobConf.setMapperClass(ConsumerMapper.class); jobConf.setReducerClass(ConsumerReducer.class); jobConf.setMapOutputKeyClass(Text.class); jobConf.setMapOutputValueClass(Text.class); jobConf.setOutputKeyClass(Text.class); jobConf.setOutputValueClass(Text.class); jobConf.setInputFormat(TextInputFormat.class); jobConf.setOutputFormat(TextOutputFormat.class); FileInputFormat.addInputPath(jobConf, new Path(splitDir)); Random random = new Random(); FileOutputFormat.setOutputPath(jobConf, new Path(workplace, "output" + random.nextLong())); jobConf.setNumMapTasks(numMappers); createSplitFiles(conf, new Path(splitDir)); return jobConf; }
From source file:org.springframework.security.saml.websso.AttributeQueryImpl.java
/** * Generates random ID to be used as Request/Response ID. * * @return random ID/* www . j a v a 2s.co m*/ */ protected String generateID() { Random r = new Random(); return 'a' + Long.toString(Math.abs(r.nextLong()), 20) + Long.toString(Math.abs(r.nextLong()), 20); }
From source file:com.linkedin.pinot.segments.v1.creator.OnHeapDictionariesTest.java
/** * Helper method to build a segment with random data as per the schema. * * @param segmentDirName Name of segment directory * @param segmentName Name of segment//from w ww . ja v a 2 s.c om * @param schema Schema for segment * @return Schema built for the segment * @throws Exception */ private Schema buildSegment(String segmentDirName, String segmentName, Schema schema) throws Exception { SegmentGeneratorConfig config = new SegmentGeneratorConfig(schema); config.setOutDir(segmentDirName); config.setFormat(FileFormat.AVRO); config.setSegmentName(segmentName); Random random = new Random(RANDOM_SEED); List<GenericRow> rows = new ArrayList<>(NUM_ROWS); for (int rowId = 0; rowId < NUM_ROWS; rowId++) { HashMap<String, Object> map = new HashMap<>(); map.put(INT_COLUMN, random.nextInt()); map.put(LONG_COLUMN, random.nextLong()); map.put(FLOAT_COLUMN, random.nextFloat()); map.put(DOUBLE_COLUMN, random.nextDouble()); map.put(STRING_COLUMN, RandomStringUtils.randomAscii(100)); GenericRow genericRow = new GenericRow(); genericRow.init(map); rows.add(genericRow); } SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); driver.init(config, new GenericRowRecordReader(rows, schema)); driver.build(); LOGGER.info("Built segment {} at {}", segmentName, segmentDirName); return schema; }
From source file:org.rhq.enterprise.server.content.test.RepoManagerBeanTest.java
@Test(enabled = ENABLED) public void createABunchOfRepos() throws Exception { PageList<Repo> repos = repoManager.findRepos(overlord, new PageControl()); int origsize = 0; if (repos != null) { origsize = repos.size();//from www. j av a 2 s . c om } for (int i = 0; i < 10; i++) { Random r = new Random(System.currentTimeMillis()); Repo repo = new Repo(r.nextLong() + ""); repoManager.createRepo(overlord, repo); } repos = repoManager.findRepos(overlord, new PageControl()); assert repos.size() == (origsize + 10); }
From source file:org.apache.hadoop.hbase.mapreduce.CopyTable.java
/** * Sets up the actual job./*www . j a v a 2 s . co m*/ * * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ public Job createSubmittableJob(String[] args) throws IOException { if (!doCommandLine(args)) { return null; } Job job = Job.getInstance(getConf(), getConf().get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CopyTable.class); Scan scan = new Scan(); scan.setCacheBlocks(false); if (startTime != 0) { scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime); } if (allCells) { scan.setRaw(true); } if (shuffle) { job.getConfiguration().set(TableInputFormat.SHUFFLE_MAPS, "true"); } if (versions >= 0) { scan.setMaxVersions(versions); } if (startRow != null) { scan.setStartRow(Bytes.toBytes(startRow)); } if (stopRow != null) { scan.setStopRow(Bytes.toBytes(stopRow)); } if (families != null) { String[] fams = families.split(","); Map<String, String> cfRenameMap = new HashMap<String, String>(); for (String fam : fams) { String sourceCf; if (fam.contains(":")) { // fam looks like "sourceCfName:destCfName" String[] srcAndDest = fam.split(":", 2); sourceCf = srcAndDest[0]; String destCf = srcAndDest[1]; cfRenameMap.put(sourceCf, destCf); } else { // fam is just "sourceCf" sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } Import.configureCfRenaming(job.getConfiguration(), cfRenameMap); } job.setNumReduceTasks(0); if (bulkload) { TableMapReduceUtil.initTableMapperJob(tableName, scan, Import.KeyValueImporter.class, null, null, job); // We need to split the inputs by destination tables so that output of Map can be bulk-loaded. TableInputFormat.configureSplitTable(job, TableName.valueOf(dstTableName)); FileSystem fs = FileSystem.get(getConf()); Random rand = new Random(); Path root = new Path(fs.getWorkingDirectory(), "copytable"); fs.mkdirs(root); while (true) { bulkloadDir = new Path(root, "" + rand.nextLong()); if (!fs.exists(bulkloadDir)) { break; } } System.out.println("HFiles will be stored at " + this.bulkloadDir); HFileOutputFormat2.setOutputPath(job, bulkloadDir); try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { HFileOutputFormat2.configureIncrementalLoadMap(job, admin.getTableDescriptor((TableName.valueOf(dstTableName)))); } } else { TableMapReduceUtil.initTableMapperJob(tableName, scan, Import.Importer.class, null, null, job); TableMapReduceUtil.initTableReducerJob(dstTableName, null, job, null, peerAddress, null, null); } return job; }