List of usage examples for java.util Random setSeed
public synchronized void setSeed(long seed)
From source file:com.zzisoo.toylibrary.adapter.ToyListAdapter.java
private int getPastelRBG() { Random r = new Random(); r.setSeed(System.currentTimeMillis()); final int Red = r.nextInt(120) + 100; int Green = r.nextInt(120) + 100; int Blue = r.nextInt(120) + 100; return Color.rgb(Red, Green, Blue); }
From source file:io.undertow.server.handlers.ChunkedRequestTransferCodingTestCase.java
@Test public void testChunkedRequest() throws IOException { connection = null;/*from w w w .j ava 2s. c om*/ HttpPost post = new HttpPost(DefaultServer.getDefaultServerURL() + "/path"); TestHttpClient client = new TestHttpClient(); try { generateMessage(1); post.setEntity(new StringEntity(message) { @Override public long getContentLength() { return -1; } }); HttpResponse result = client.execute(post); Assert.assertEquals(StatusCodes.OK, result.getStatusLine().getStatusCode()); HttpClientUtils.readResponse(result); final Random random = new Random(); final int seed = -964339432; System.out.print("Using Seed " + seed); random.setSeed(seed); for (int i = 0; i < 10; ++i) { generateMessage(100 * i); post.setEntity(new StringEntity(message) { @Override public long getContentLength() { return -1; } @Override public boolean isChunked() { return true; } @Override public void writeTo(OutputStream outstream) throws IOException { int l = 0; int i = 0; while (i <= message.length()) { i += random.nextInt(1000); i = Math.min(i, message.length()); outstream.write(message.getBytes(), l, i - l); l = i; ++i; } } }); result = client.execute(post); Assert.assertEquals(StatusCodes.OK, result.getStatusLine().getStatusCode()); HttpClientUtils.readResponse(result); } } finally { client.getConnectionManager().shutdown(); } }
From source file:org.xbib.elasticsearch.index.analysis.langdetect.LangdetectService.java
private double[] detectBlock(List<String> list, String text) throws LanguageDetectionException { // clean all non-work characters from text text = text.replaceAll(word.pattern(), " "); extractNGrams(list, text);//from ww w . j a v a2 s . c o m if (list.isEmpty()) { throw new LanguageDetectionException("no features in text"); } double[] langprob = new double[langlist.size()]; Random rand = new Random(); Long seed = 0L; rand.setSeed(seed); for (int t = 0; t < n_trial; ++t) { double[] prob = initProbability(); double a = this.alpha + rand.nextGaussian() * alpha_width; for (int i = 0;; ++i) { int r = rand.nextInt(list.size()); updateLangProb(prob, list.get(r), a); if (i % 5 == 0) { if (normalizeProb(prob) > conv_threshold || i >= iteration_limit) { break; } } } for (int j = 0; j < langprob.length; ++j) { langprob[j] += prob[j] / n_trial; } } return langprob; }
From source file:com.ebay.erl.mobius.core.mapred.MobiusInputSampler.java
@Override public Object[] getSample(InputFormat inf, JobConf job) throws IOException { // the following codes are copied from {@link InputSampler#RandomSampler}, // but require some modifications. InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks()); ArrayList<DataJoinKey> samples = new ArrayList<DataJoinKey>(this.numSamples); int splitsToSample = Math.min(this.maxSplitsSampled, splits.length); Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); // get Sorters Sorter[] sorters = null;//from w w w. ja va2s .c o m if (job.get(ConfigureConstants.SORTERS, null) != null) { // total sort job sorters = (Sorter[]) SerializableUtil.deserializeFromBase64(job.get(ConfigureConstants.SORTERS), job); } else { // there is no sorter, should be reducer/join job Column[] keys = (Column[]) SerializableUtil .deserializeFromBase64(job.get(ConfigureConstants.ALL_GROUP_KEY_COLUMNS), job); sorters = new Sorter[keys.length]; for (int i = 0; i < keys.length; i++) { sorters[i] = new Sorter(keys[i].getInputColumnName(), Ordering.ASC); } } long proportion = 10L; while ((int) (this.freq * proportion) == 0) { proportion = proportion * 10; } proportion = 5L * proportion; // shuffle splits for (int i = 0; i < splits.length; ++i) { InputSplit tmp = splits[i]; int j = r.nextInt(splits.length); splits[i] = splits[j]; splits[j] = tmp; } SamplingOutputCollector collector = new SamplingOutputCollector(); for (int i = 0; i < splitsToSample || (i < splits.length && samples.size() < numSamples); i++) { LOGGER.info("Sampling from split #" + (i + 1) + ", collected samples:" + samples.size()); RecordReader<WritableComparable, WritableComparable> reader = inf.getRecordReader(splits[i], job, Reporter.NULL); WritableComparable key = reader.createKey(); WritableComparable value = reader.createValue(); if (!(inf instanceof MobiusDelegatingInputFormat)) { // not mobius delegating input format, so the CURRENT_DATASET_ID // will not be set by inf#getRecordReader, we set them here. // // set the current dataset id, as the AbstractMobiusMapper#configure // method needs this property. job.set(ConfigureConstants.CURRENT_DATASET_ID, job.get(ConfigureConstants.ALL_DATASET_IDS)); } Byte datasetID = Byte.valueOf(job.get(ConfigureConstants.CURRENT_DATASET_ID)); LOGGER.info("Samples coming from dataset: " + datasetID.toString()); AbstractMobiusMapper mapper = this.getMapper(inf, splits[i], job); mapper.configure(job); // reading elements from one split long readElement = 0; while (reader.next(key, value)) { collector.clear(); Tuple tuple = mapper.parse(key, value); readElement++; if (readElement > (((long) numSamples) * ((long) proportion))) { // a split might be very big (ex: a large gz file), // so we just need to read the break; } if (r.nextDouble() <= freq) { if (samples.size() < numSamples) { mapper.joinmap(key, value, collector, Reporter.NULL); // joinmap function might generate more than one output key // per <code>key</code> input. for (Tuple t : collector.getOutKey()) { Tuple mt = Tuple.merge(tuple, t); DataJoinKey nkey = this.getKey(mt, sorters, datasetID, mapper, job); samples.add(nkey); } } else { // When exceeding the maximum number of samples, replace // a random element with this one, then adjust the // frequency to reflect the possibility of existing // elements being pushed out mapper.joinmap(key, value, collector, Reporter.NULL); for (Tuple t : collector.getOutKey()) { int ind = r.nextInt(numSamples); if (ind != numSamples) { Tuple mt = Tuple.merge(tuple, t); DataJoinKey nkey = this.getKey(mt, sorters, datasetID, mapper, job); samples.set(ind, nkey); } } freq *= (numSamples - collector.getOutKey().size()) / (double) numSamples; } key = reader.createKey(); value = reader.createValue(); } } reader.close(); } LOGGER.info("Samples have been collected, return."); return samples.toArray(); }
From source file:org.apache.hadoop.mapred.TestCombineTextInputFormat.java
@Test(timeout = 10000) public void testFormat() throws Exception { JobConf job = new JobConf(defaultConf); Random random = new Random(); long seed = random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir, true);//from www . ja va 2 s .c o m FileInputFormat.setInputPaths(job, workDir); final int length = 10000; final int numFiles = 10; createFiles(length, numFiles, random); // create a combined split for the files CombineTextInputFormat format = new CombineTextInputFormat(); LongWritable key = new LongWritable(); Text value = new Text(); for (int i = 0; i < 3; i++) { int numSplits = random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits = format.getSplits(job, numSplits); LOG.info("splitting: got = " + splits.length); // we should have a single split as the length is comfortably smaller than // the block size assertEquals("We got more than one splits!", 1, splits.length); InputSplit split = splits[0]; assertEquals("It should be CombineFileSplit", CombineFileSplit.class, split.getClass()); // check the split BitSet bits = new BitSet(length); LOG.debug("split= " + split); RecordReader<LongWritable, Text> reader = format.getRecordReader(split, job, voidReporter); try { int count = 0; while (reader.next(key, value)) { int v = Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " at position " + reader.getPos()); } assertFalse("Key in multiple partitions.", bits.get(v)); bits.set(v); count++; } LOG.info("splits=" + split + " count=" + count); } finally { reader.close(); } assertEquals("Some keys in no partition.", length, bits.cardinality()); } }
From source file:com.bilibili.magicasakurademo.MainActivity.java
private String getSnackContent(int current) { Random random = new Random(); random.setSeed(System.currentTimeMillis()); return getResources().getString(getResources().getIdentifier("magicasrkura_prompt_" + random.nextInt(3), "string", getPackageName())) + ThemeHelper.getName(current); }
From source file:bluevia.InitService.java
boolean subscribeNotifications() { boolean result = true; String[] countryShortNumbers = { MO_UK, MO_SP, MO_GE, MO_BR, MO_MX, MO_AR, MO_CH, MO_CO }; int i = 0;//from w ww.j av a2 s . co m for (i = 0; i < countryShortNumbers.length; i++) { try { OAuthConsumer consumer = (OAuthConsumer) new DefaultOAuthConsumer(Util.BlueViaOAuth.consumer_key, Util.BlueViaOAuth.consumer_secret); consumer.setMessageSigner(new HmacSha1MessageSigner()); com.google.appengine.api.urlfetch.FetchOptions.Builder.doNotValidateCertificate(); URL apiURI = new URL( "https://api.bluevia.com/services/REST/SMS/inbound/subscriptions?version=v1&alt=json"); HttpURLConnection request = (HttpURLConnection) apiURI.openConnection(); Random rand = new Random(); Date now = new Date(); rand.setSeed(now.getTime()); Long correlator = rand.nextLong(); if (correlator < 0) correlator = -1 * correlator; String jsonSubscriptionMsg = "{\"smsNotification\":{\"reference\":{\"correlator\": \"%s\",\"endpoint\": \"%s\"},\"destinationAddress\":{\"phoneNumber\":\"%s\"},\"criteria\":\"%s\"}}"; String szBody = String.format(jsonSubscriptionMsg, "bv" + correlator.toString().substring(0, 16), Util.getCallbackDomain() + "/notifySmsReception", countryShortNumbers[i], Util.BlueViaOAuth.app_keyword); request.setRequestProperty("Content-Type", "application/json"); request.setRequestProperty("Content-Length", "" + Integer.toString(szBody.getBytes().length)); request.setRequestMethod("POST"); request.setDoOutput(true); consumer.sign(request); request.connect(); OutputStream os = request.getOutputStream(); os.write(szBody.getBytes()); os.flush(); int rc = request.getResponseCode(); if (rc == HttpURLConnection.HTTP_CREATED) Util.addUnsubscriptionURI(countryShortNumbers[i], request.getHeaderField("Location"), "bv" + correlator.toString().substring(0, 16)); else { logger.severe(String.format("Error %d registering Notification URLs:%s", rc, request.getResponseMessage())); } } catch (Exception e) { logger.severe("Exception raised: %s" + e.getMessage()); } } return result; }
From source file:org.apache.hadoop.mapreduce.lib.input.TestCombineTextInputFormat.java
@Test(timeout = 10000) public void testFormat() throws Exception { Job job = Job.getInstance(new Configuration(defaultConf)); Random random = new Random(); long seed = random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir, true);// w w w.j a v a 2 s . c o m FileInputFormat.setInputPaths(job, workDir); final int length = 10000; final int numFiles = 10; // create files with various lengths createFiles(length, numFiles, random); // create a combined split for the files CombineTextInputFormat format = new CombineTextInputFormat(); for (int i = 0; i < 3; i++) { int numSplits = random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); List<InputSplit> splits = format.getSplits(job); LOG.info("splitting: got = " + splits.size()); // we should have a single split as the length is comfortably smaller than // the block size assertEquals("We got more than one splits!", 1, splits.size()); InputSplit split = splits.get(0); assertEquals("It should be CombineFileSplit", CombineFileSplit.class, split.getClass()); // check the split BitSet bits = new BitSet(length); LOG.debug("split= " + split); TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader<LongWritable, Text> reader = format.createRecordReader(split, context); assertEquals("reader class is CombineFileRecordReader.", CombineFileRecordReader.class, reader.getClass()); MapContext<LongWritable, Text, LongWritable, Text> mcontext = new MapContextImpl<LongWritable, Text, LongWritable, Text>( job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split); reader.initialize(split, mcontext); try { int count = 0; while (reader.nextKeyValue()) { LongWritable key = reader.getCurrentKey(); assertNotNull("Key should not be null.", key); Text value = reader.getCurrentValue(); final int v = Integer.parseInt(value.toString()); LOG.debug("read " + v); assertFalse("Key in multiple partitions.", bits.get(v)); bits.set(v); count++; } LOG.debug("split=" + split + " count=" + count); } finally { reader.close(); } assertEquals("Some keys in no partition.", length, bits.cardinality()); } }
From source file:com.googlecode.streamflyer.regex.OnStreamMatcherPerformanceTest.java
/** * @param numberOfCharactersInStream/*ww w . j a v a2 s . co m*/ * @return Returns a string containing segments of whitespace (max length * 399 characters) and segments of x-Elements containing whitespace * (max length 199 characters). */ private String createInput(int numberOfCharactersInStream) { StringBuilder sb = new StringBuilder(numberOfCharactersInStream); Random random = new Random(); // random.setSeed(43753658); random.setSeed(65753433); int charsToAppend = 0; while (sb.length() < numberOfCharactersInStream - 600) { // append \s{0,399}, up to 399 whitespace characters charsToAppend = random.nextInt(400); for (int index = 0; index < charsToAppend; index++) { sb.append(' '); } // append <x>\s{0,199}</x>, i.e. an x-Element containing up to 199 // whitespace characters sb.append("<x>"); // append some characters charsToAppend = random.nextInt(200); for (int index = 0; index < charsToAppend; index++) { sb.append(' '); } sb.append("</x>"); } while (sb.length() < numberOfCharactersInStream) { // append some characters sb.append(' '); } ZzzAssert.isTrue(sb.length() == numberOfCharactersInStream); return sb.toString(); }
From source file:org.apache.hadoop.hdfs.TestGetBlocks.java
@Test public void testBlockKey() { Map<Block, Long> map = new HashMap<>(); final Random RAN = new Random(); final long seed = RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); long[] blkids = new long[10]; for (int i = 0; i < blkids.length; i++) { blkids[i] = 1000L + RAN.nextInt(100000); map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]); }/*from ww w .j a v a 2 s .c o m*/ System.out.println("map=" + map.toString().replace(",", "\n ")); for (long blkid : blkids) { Block b = new Block(blkid, 0, GenerationStamp.GRANDFATHER_GENERATION_STAMP); Long v = map.get(b); System.out.println(b + " => " + v); assertEquals(v.longValue(), blkid); } }