List of usage examples for java.util Random nextBoolean
public boolean nextBoolean()
From source file:org.pentaho.di.trans.steps.cpythonscriptexecutor.CPythonScriptExecutorData.java
/** * Generate some random rows to send to python in the case where a single variable (data frame) is being extracted * and we want to try and determine the types of the output fields * * @param inputMeta incoming row meta/*ww w . j a v a 2 s . c om*/ * @param r Random instance to use * @return a list of randomly generated rows with types matching the incoming row types. * @throws KettleException if a problem occurs */ protected static List<Object[]> generateRandomRows(RowMetaInterface inputMeta, Random r) throws KettleException { List<Object[]> rows = new ArrayList<Object[]>(NUM_RANDOM_ROWS); // ValueMetaInterface numericVM = new ValueMeta( "num", ValueMetaInterface.TYPE_NUMBER ); //$NON-NLS-1$ ValueMetaInterface numericVM = ValueMetaFactory.createValueMeta("num", ValueMetaInterface.TYPE_NUMBER); //$NON-NLS-1$ for (int i = 0; i < NUM_RANDOM_ROWS; i++) { Object[] currentRow = new Object[inputMeta.size()]; for (int j = 0; j < inputMeta.size(); j++) { ValueMetaInterface vm = inputMeta.getValueMeta(j); ValueMetaInterface tempVM = vm.clone(); tempVM.setStorageType(ValueMetaInterface.STORAGE_TYPE_NORMAL); Object newVal; double d = r.nextDouble(); switch (vm.getType()) { case ValueMetaInterface.TYPE_NUMBER: case ValueMetaInterface.TYPE_INTEGER: case ValueMetaInterface.TYPE_BIGNUMBER: d *= 100.0; newVal = d; if (vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_BINARY_STRING) { newVal = tempVM.convertData(numericVM, newVal); } currentRow[j] = vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_NORMAL ? vm.convertData(numericVM, newVal) : tempVM.convertToBinaryStringStorageType(newVal); break; case ValueMetaInterface.TYPE_DATE: newVal = new Date(new Date().getTime() + (long) (d * 100000)); currentRow[j] = vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_NORMAL ? newVal : tempVM.convertToBinaryStringStorageType(newVal); break; case ValueMetaInterface.TYPE_TIMESTAMP: newVal = new Timestamp(new Date().getTime() + (long) (d * 100000)); currentRow[j] = vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_NORMAL ? newVal : tempVM.convertToBinaryStringStorageType(newVal); break; case ValueMetaInterface.TYPE_BOOLEAN: newVal = r.nextBoolean(); currentRow[j] = vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_NORMAL ? newVal : tempVM.convertToBinaryStringStorageType(newVal); break; default: newVal = d < 0.5 ? "value1" : "value2"; currentRow[j] = vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_NORMAL ? newVal : tempVM.convertToBinaryStringStorageType(newVal); } } rows.add(currentRow); } return rows; }
From source file:org.onebusaway.nyc.vehicle_tracking.impl.VehicleLocationSimulationServiceImpl.java
public void generateRunSim(Random random, SimulatorTask task, RunTripEntry runTrip, long serviceDate, int scheduleTime, int shiftStartTime, boolean reportsOperatorId, boolean reportsRunId, boolean allowRunTransitions, SortedMap<Double, Integer> scheduleDeviations, double locationSigma, AgencyAndId vehicleId) {//from www .j a va2 s. c o m /* * here we format the runId to have a run-route that looks similar to what * an operator would enter. */ final String runNumber = runTrip.getRunNumber(); String runRoute = runTrip.getRunRoute(); if (StringUtils.equals(runRoute, "MISC")) { // runRoute = "0" + random.nextInt(9) + random.nextInt(9); runRoute = "000"; } else if (runRoute.length() >= 5) { final String firstPart = runRoute.substring(1, 3); final String lastPart = runRoute.substring(3); runRoute = "0" + (random.nextBoolean() ? firstPart : lastPart); } else { final String firstPart = runRoute.substring(1, 3); runRoute = "0" + firstPart; } final String reportedRunId = RunTripEntry.createId(runRoute, runNumber); if (reportsRunId) _log.info("using reported runId=" + reportedRunId); String lastBlockId = null; final List<RunTripEntry> rtes = new ArrayList<RunTripEntry>(); for (final RunTripEntry rte : _runService.getRunTripEntriesForRun(runTrip.getRunId())) { if (_calendarService.isLocalizedServiceIdActiveOnDate(rte.getTripEntry().getServiceId(), new Date(serviceDate))) rtes.add(rte); } if (rtes.isEmpty()) { _log.error("no active runTrips for service date=" + new Date(serviceDate)); } // TODO necessary? Collections.sort(rtes, new RunTripComparator(serviceDate, _blockCalendarService)); // String agencyId = runTrip.getTripEntry().getId().getAgencyId(); CoordinatePoint lastLocation = null; final RunTripEntry lastRunTrip = rtes.get(rtes.size() - 1); final int firstTime = runTrip.getStartTime(); final int lastTime = lastRunTrip.getStopTime(); int runningLastTime = runTrip.getStopTime(); int currRunIdx = rtes.indexOf(runTrip); // We could go by run trip entry sequence or perturbed schedule-time // evolution. The latter is chosen, for now. while (scheduleTime <= lastTime) { final NycTestInferredLocationRecord record = new NycTestInferredLocationRecord(); final long unperturbedTimestamp = serviceDate + (scheduleTime + shiftStartTime) * 1000; if (scheduleTime >= runningLastTime) { if (currRunIdx == rtes.size() - 1) break; currRunIdx += 1; runTrip = rtes.get(currRunIdx); // runTrip = _runService.getNextEntry(runTrip); runningLastTime = runTrip.getStopTime(); // FIXME saw weird run setups like this. are these in error? if (scheduleTime >= runningLastTime) { _log.error("runs are ordered oddly:" + rtes.get(currRunIdx - 1) + " -> " + runTrip); break; } } /* * this could mean that the run has ended. We could reassign the driver? * for now we'll terminate the simulation */ if (runTrip == null) break; final TripEntry trip = runTrip.getTripEntry(); final AgencyAndId tripId = trip.getId(); record.setActualTripId(AgencyAndIdLibrary.convertToString(tripId)); // TODO dsc changes for new block/run? String dsc = _destinationSignCodeService.getDestinationSignCodeForTripId(tripId); if (StringUtils.isEmpty(dsc)) dsc = "0"; // FIXME straighten these out... if (scheduleTime < runTrip.getStartTime()) { record.setActualPhase(EVehiclePhase.DEADHEAD_BEFORE.toString()); dsc = "0"; } else { record.setActualPhase(EVehiclePhase.IN_PROGRESS.toString()); } int scheduleDeviation = 0; if (!scheduleDeviations.isEmpty()) { final double ratio = (scheduleTime - firstTime) / ((double) (lastTime - firstTime)); scheduleDeviation = (int) InterpolationLibrary.interpolate(scheduleDeviations, ratio, EOutOfRangeStrategy.LAST_VALUE); } final long perterbedTimestamp = unperturbedTimestamp + scheduleDeviation * 1000; /* * sample runTrips active 30 minutes from now. TODO make the time range * less arbitrary? */ if (allowRunTransitions) { final RunTripEntry newRun = sampleNearbyRunTrips(runTrip, unperturbedTimestamp + 30 * 60 * 1000); if (newRun != null) { // FIXME can we get to this trip in 30, or whatever minutes? // FIXME what geometry do we follow to get there? /* * we need to simulate the dsc/headsign: could set it to 0 the new * trip the current trip ... TODO should this be a user option? */ record.setActualPhase(EVehiclePhase.DEADHEAD_BEFORE.toString()); dsc = "0"; runTrip = newRun; /* * similarly, do we reset the reportedRunId? TODO also a user option? */ // reportedRunId = runTrip.getRun(); } } // TODO when are there multiples and which do we choose when there are? final ScheduledBlockLocation blockLocation = _runService.getSchedBlockLocForRunTripEntryAndTime(runTrip, unperturbedTimestamp); if (blockLocation == null) break; final BlockEntry blockEntry = blockLocation.getActiveTrip().getBlockConfiguration().getBlock(); _log.debug("sim blockLocation: " + blockLocation.toString()); CoordinatePoint location = blockLocation.getLocation(); record.setActualRunId(runTrip.getRunId()); final String currentBlockId = AgencyAndIdLibrary.convertToString(blockEntry.getId()); // if (_log.isDebugEnabled()) if (lastBlockId != null && !StringUtils.equals(currentBlockId, lastBlockId)) { _log.info("changed blocks: " + lastBlockId + " -> " + currentBlockId); } record.setActualBlockId(currentBlockId); lastBlockId = currentBlockId; record.setActualDistanceAlongBlock(blockLocation.getDistanceAlongBlock()); /* * during block changes we get a weird null location. this is a * "work-around"... */ if (location == null) { location = lastLocation; } else { lastLocation = location; } final CoordinatePoint p = applyLocationNoise(location.getLat(), location.getLon(), locationSigma, random); record.setDsc(dsc); record.setLat(p.getLat()); record.setLon(p.getLon()); record.setTimestamp(perterbedTimestamp); record.setVehicleId(vehicleId); // TODO options for whether these are reported or not? if (reportsOperatorId) record.setOperatorId("0000"); if (reportsRunId) { record.setReportedRunId(reportedRunId); } record.setActualServiceDate(serviceDate); final int actualScheduleTime = blockLocation.getScheduledTime(); record.setActualScheduleTime(actualScheduleTime); record.setActualDsc(dsc); record.setActualBlockLat(location.getLat()); record.setActualBlockLon(location.getLon()); // TODO setActualStatus? task.addRecord(record); scheduleTime += 30 + random.nextGaussian() * 2; } }
From source file:org.hellojavaer.testcase.generator.TestCaseGenerator.java
@SuppressWarnings({ "unchecked", "rawtypes" }) private static <T> T produceBean(Class<T> clazz, ControlParam countrolParam, Stack<Class> parseClassList) { try {// ww w . ja v a 2 s .co m T item = clazz.newInstance(); for (PropertyDescriptor pd : BeanUtils.getPropertyDescriptors(clazz)) { Method writeMethod = pd.getWriteMethod(); if (writeMethod == null || pd.getReadMethod() == null || // countrolParam.getExcludeFieldList() != null && countrolParam.getExcludeFieldList().contains(pd.getName())// ) {// continue; } Class fieldClazz = pd.getPropertyType(); Long numIndex = countrolParam.getNumIndex(); // int enumIndex = countrolParam.getEnumIndex(); Random random = countrolParam.getRandom(); long strIndex = countrolParam.getStrIndex(); int charIndex = countrolParam.getCharIndex(); Calendar time = countrolParam.getTime(); if (TypeUtil.isBaseType(fieldClazz)) { if (TypeUtil.isNumberType(fieldClazz)) { if (fieldClazz == Byte.class) { writeMethod.invoke(item, Byte.valueOf((byte) (numIndex & 0x7F))); } else if (fieldClazz == Short.class) { writeMethod.invoke(item, Short.valueOf((short) (numIndex & 0x7FFF))); } else if (fieldClazz == Integer.class) { writeMethod.invoke(item, Integer.valueOf((int) (numIndex & 0x7FFFFFFF))); } else if (fieldClazz == Long.class) { writeMethod.invoke(item, Long.valueOf((long) numIndex)); } else if (fieldClazz == Float.class) { writeMethod.invoke(item, Float.valueOf((float) numIndex)); } else if (fieldClazz == Double.class) { writeMethod.invoke(item, Double.valueOf((double) numIndex)); } else if (fieldClazz == byte.class) {// writeMethod.invoke(item, (byte) (numIndex & 0x7F)); } else if (fieldClazz == short.class) { writeMethod.invoke(item, (short) (numIndex & 0x7FFF)); } else if (fieldClazz == int.class) { writeMethod.invoke(item, (int) (numIndex & 0x7FFFFFFF)); } else if (fieldClazz == long.class) { writeMethod.invoke(item, (long) numIndex); } else if (fieldClazz == float.class) { writeMethod.invoke(item, (float) numIndex); } else if (fieldClazz == double.class) { writeMethod.invoke(item, (double) numIndex); } numIndex++; if (numIndex < 0) { numIndex &= 0x7FFFFFFFFFFFFFFFL; } countrolParam.setNumIndex(numIndex); } else if (fieldClazz == boolean.class) { writeMethod.invoke(item, random.nextBoolean()); } else if (fieldClazz == Boolean.class) { writeMethod.invoke(item, Boolean.valueOf(random.nextBoolean())); } else if (fieldClazz == char.class) { writeMethod.invoke(item, CHAR_RANGE[charIndex]); charIndex++; if (charIndex >= CHAR_RANGE.length) { charIndex = 0; } countrolParam.setCharIndex(charIndex); } else if (fieldClazz == Character.class) { writeMethod.invoke(item, Character.valueOf(CHAR_RANGE[charIndex])); charIndex++; if (charIndex >= CHAR_RANGE.length) { charIndex = 0; } countrolParam.setCharIndex(charIndex); } else if (fieldClazz == String.class) { if (countrolParam.getUniqueFieldList() != null && countrolParam.getUniqueFieldList().contains(pd.getName())) { StringBuilder sb = new StringBuilder(); convertNum(strIndex, STRING_RANGE, countrolParam.getRandom(), sb); writeMethod.invoke(item, sb.toString()); strIndex += countrolParam.getStrStep(); if (strIndex < 0) { strIndex &= 0x7FFFFFFFFFFFFFFFL; } countrolParam.setStrIndex(strIndex); } else { writeMethod.invoke(item, String.valueOf(CHAR_RANGE[charIndex])); charIndex++; if (charIndex >= CHAR_RANGE.length) { charIndex = 0; } countrolParam.setCharIndex(charIndex); } } else if (fieldClazz == Date.class) { writeMethod.invoke(item, time.getTime()); time.add(Calendar.DAY_OF_YEAR, 1); } else if (fieldClazz.isEnum()) { int index = random.nextInt(fieldClazz.getEnumConstants().length); writeMethod.invoke(item, fieldClazz.getEnumConstants()[index]); } else { // throw new RuntimeException("out of countrol Class " + fieldClazz.getName()); } } else { parseClassList.push(fieldClazz); // TODO ? Set<Class> set = new HashSet<Class>(parseClassList); if (parseClassList.size() - set.size() <= countrolParam.getRecursiveCycleLimit()) { Object bean = produceBean(fieldClazz, countrolParam, parseClassList); writeMethod.invoke(item, bean); } parseClassList.pop(); } } return item; } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.elasticsearch.test.ESIntegTestCase.java
protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) { setRandomIndexMergeSettings(random, builder); setRandomIndexTranslogSettings(random, builder); setRandomIndexNormsLoading(random, builder); if (random.nextBoolean()) { builder.put(MergeSchedulerConfig.AUTO_THROTTLE, false); }// w w w. j a v a 2 s . co m if (random.nextBoolean()) { builder.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, random.nextBoolean()); } if (random.nextBoolean()) { builder.put("index.shard.check_on_startup", randomFrom(random, "false", "checksum", "true")); } if (randomBoolean()) { // keep this low so we don't stall tests builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, RandomInts.randomIntBetween(random, 1, 15) + "ms"); } return builder; }
From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java
protected Settings.Builder setRandomSettings(Random random, Settings.Builder builder) { setRandomMerge(random, builder);//from w w w. j av a 2s .c om setRandomTranslogSettings(random, builder); setRandomNormsLoading(random, builder); setRandomScriptingSettings(random, builder); if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here builder.put(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { builder.put(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { builder.put(IndicesStore.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values())); } if (random.nextBoolean()) { builder.put(MergeSchedulerConfig.AUTO_THROTTLE, false); } if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, random.nextBoolean()); } if (random.nextBoolean()) { builder.put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogWriter.Type.values()).name()); } if (random.nextBoolean()) { builder.put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, random.nextBoolean()); } if (random.nextBoolean()) { builder.put("index.shard.check_on_startup", randomFrom(random, "false", "checksum", "true")); } if (random.nextBoolean()) { builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); } if (random.nextBoolean()) { builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms"); } return builder; }
From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java
/** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout.//from ww w. j a va 2 s .c om * * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException { Random random = getRandom(); Set<String> indicesSet = new HashSet<>(); for (IndexRequestBuilder builder : builders) { indicesSet.add(builder.request().index()); } Set<Tuple<String, String>> bogusIds = new HashSet<>(); if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) { builders = new ArrayList<>(builders); final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); // inject some bogus docs final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2); final int unicodeLen = between(1, 10); for (int i = 0; i < numBogusDocs; i++) { String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet()); String index = RandomPicks.randomFrom(random, indices); bogusIds.add(new Tuple<>(index, id)); builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}")); } } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); Collections.shuffle(builders, random); final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>(); List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) { if (frequently()) { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder .execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>( indexRequestBuilder, newLatch(inFlightAsyncOperations), errors)); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } else { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute().actionGet(); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } } else { List<List<IndexRequestBuilder>> partition = Lists.partition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble())))); logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size()); for (List<IndexRequestBuilder> segmented : partition) { BulkRequestBuilder bulkBuilder = client().prepareBulk(); for (IndexRequestBuilder indexRequestBuilder : segmented) { bulkBuilder.add(indexRequestBuilder); } BulkResponse actionGet = bulkBuilder.execute().actionGet(); assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false)); } } for (CountDownLatch operation : inFlightAsyncOperations) { operation.await(); } final List<Throwable> actualErrors = new ArrayList<>(); for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) { if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) { tuple.v1().execute().actionGet(); // re-index if rejected } else { actualErrors.add(tuple.v2()); } } assertThat(actualErrors, emptyIterable()); if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (Tuple<String, String> doc : bogusIds) { // see https://github.com/elasticsearch/elasticsearch/issues/8706 final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()) .get(); if (deleteResponse.isFound() == false) { logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2()); } } } if (forceRefresh) { assertNoFailures(client().admin().indices().prepareRefresh(indices) .setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); } }
From source file:com.linkedin.pinot.controller.helix.core.realtime.PinotLLCRealtimeSegmentManagerTest.java
@Test public void testCompleteCommittingSegments() throws Exception { // Run multiple times randomizing the situation. for (int i = 0; i < 100; i++) { final List<ZNRecord> existingSegmentMetadata = new ArrayList<>(64); final int nPartitions = 16; final long seed = new Random().nextLong(); Random random = new Random(seed); final int maxSeq = 10; final long now = System.currentTimeMillis(); final String tableName = "table"; final String realtimeTableName = TableNameBuilder.REALTIME.tableNameWithType(tableName); final IdealState idealState = PinotTableIdealStateBuilder .buildEmptyKafkaConsumerRealtimeIdealStateFor(realtimeTableName, 19); int nIncompleteCommitsStepOne = 0; int nIncompleteCommitsStepTwo = 0; final String topic = "someTopic"; final int nInstances = 5; final int nReplicas = 3; List<String> instances = getInstanceList(nInstances); FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(false, null); TableConfig tableConfig = makeTableConfig(realtimeTableName, nReplicas, KAFKA_OFFSET, DUMMY_HOST, topic, DEFAULT_SERVER_TENANT, DEFAULT_STREAM_ASSIGNMENT_STRATEGY); segmentManager.addTableToStore(realtimeTableName, tableConfig, nPartitions); StreamMetadata streamMetadata = makeKafkaStreamMetadata(topic, KAFKA_OFFSET, DUMMY_HOST); RealtimeTagConfig realtimeTagConfig = new RealtimeTagConfig(tableConfig, null); segmentManager.setupHelixEntries(realtimeTagConfig, streamMetadata, nPartitions, instances, idealState, false);/*from w w w . jav a 2 s . c o m*/ PartitionAssignment partitionAssignment = segmentManager .getStreamPartitionAssignment(realtimeTableName); for (int p = 0; p < nPartitions; p++) { int curSeq = random.nextInt(maxSeq); // Current segment sequence ID for that partition if (curSeq == 0) { curSeq++; } // Step-1 : update committing segment metadata status to DONE // Step-2 : create new segment metadata // Step-3 : update ideal state - committing segments to ONLINE, new segments to CONSUMING boolean failAfterStepOne = false; boolean failAfterStepTwo = false; if (random.nextBoolean()) { failAfterStepOne = true; } if (!failAfterStepOne) { if (random.nextBoolean()) { failAfterStepTwo = true; } } for (int s = 0; s < curSeq; s++) { LLCSegmentName segmentName = new LLCSegmentName(tableName, p, s, now); String segNameStr = segmentName.getSegmentName(); CommonConstants.Segment.Realtime.Status status = CommonConstants.Segment.Realtime.Status.DONE; String state = PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE; if (s == curSeq - 1) { // for last segment in the sequence if (failAfterStepOne) { // failAfterStepOne means segment metadata was updated to DONE, // but no steps after that status = CommonConstants.Segment.Realtime.Status.DONE; nIncompleteCommitsStepOne++; } else if (failAfterStepTwo) { // failAfterStepTwo means segment metadata was updated to DONE, // new segment metadata was created with IN_PROGRESS (will do that below), // but no steps after that status = CommonConstants.Segment.Realtime.Status.DONE; nIncompleteCommitsStepTwo++; } else { status = CommonConstants.Segment.Realtime.Status.IN_PROGRESS; } state = PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE; } // add metadata to segment metadata LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(); metadata.setSegmentName(segNameStr); metadata.setStatus(status); existingSegmentMetadata.add(metadata.toZNRecord()); // add segment to ideal state List<String> instancesForThisSeg = partitionAssignment .getInstancesListForPartition(Integer.toString(p)); for (String instance : instancesForThisSeg) { idealState.setPartitionState(segNameStr, instance, state); } } if (failAfterStepTwo) { // failAfterStepTwo means segment metadata was updated to DONE (did that above), // new segment metadata was created with IN_PROGRESS, but no steps after that LLCSegmentName segmentName = new LLCSegmentName(tableName, p, curSeq, now); LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(); metadata.setSegmentName(segmentName.getSegmentName()); metadata.setStatus(CommonConstants.Segment.Realtime.Status.IN_PROGRESS); existingSegmentMetadata.add(metadata.toZNRecord()); } } segmentManager._tableIdealState = idealState; segmentManager._existingSegmentMetadata = existingSegmentMetadata; segmentManager.completeCommittingSegments(TableNameBuilder.REALTIME.tableNameWithType(tableName)); Assert.assertEquals(segmentManager._nCallsToCreateNewSegmentMetadata, nIncompleteCommitsStepOne, "Failed with seed " + seed); Assert.assertEquals(segmentManager._nCallsToUpdateHelix, nIncompleteCommitsStepOne + nIncompleteCommitsStepTwo, "Failed with seed " + seed); } }
From source file:org.lilyproject.hadooptestfw.fork.HBaseTestingUtility.java
/** * Creates a random table with the given parameters */// ww w.jav a 2 s . com public HTable createRandomTable(String tableName, final Collection<String> families, final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, final int numRowsPerFlush) throws IOException, InterruptedException { LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + "\n"); final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L); final int numCF = families.size(); final byte[][] cfBytes = new byte[numCF][]; final byte[] tableNameBytes = Bytes.toBytes(tableName); { int cfIndex = 0; for (String cf : families) { cfBytes[cfIndex++] = Bytes.toBytes(cf); } } final int actualStartKey = 0; final int actualEndKey = Integer.MAX_VALUE; final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions; final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; final HTable table = createTable(tableNameBytes, cfBytes, maxVersions, Bytes.toBytes(String.format(keyFormat, splitStartKey)), Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME); } for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { final byte[] row = Bytes.toBytes( String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); Put put = new Put(row); Delete del = new Delete(row); for (int iCol = 0; iCol < numColsPerRow; ++iCol) { final byte[] cf = cfBytes[rand.nextInt(numCF)]; final long ts = rand.nextInt(); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { final byte[] value = Bytes .toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.add(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.deleteColumn(cf, qual, ts); } else { del.deleteColumns(cf, qual, ts); } } if (!put.isEmpty()) { table.put(put); } if (!del.isEmpty()) { table.delete(del); } } LOG.info("Initiating flush #" + iFlush + " for table " + tableName); table.flushCommits(); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(tableNameBytes); } } return table; }