List of usage examples for java.util Collections shuffle
@SuppressWarnings({ "rawtypes", "unchecked" }) public static void shuffle(List<?> list, Random rnd)
From source file:edu.brown.utils.ArgumentsParser.java
/** * @throws Exception/*from w ww .ja v a 2 s .c om*/ */ private void loadWorkload() throws Exception { final boolean debug = LOG.isDebugEnabled(); // Workload Trace if (this.params.containsKey(PARAM_WORKLOAD)) { assert (this.catalog_db != null) : "Missing catalog!"; File path = new File(this.params.get(PARAM_WORKLOAD)); boolean weightedTxns = this.getBooleanParam(PARAM_WORKLOAD_XACT_WEIGHTS, false); if (debug) LOG.debug("Use Transaction Weights in Limits: " + weightedTxns); // This will prune out duplicate trace records... if (params.containsKey(PARAM_WORKLOAD_REMOVE_DUPES)) { DuplicateTraceFilter filter = new DuplicateTraceFilter(); this.workload_filter = (this.workload_filter != null ? filter.attach(this.workload_filter) : filter); if (debug) LOG.debug("Attached " + filter.debugImpl()); } // TRANSACTION OFFSET if (params.containsKey(PARAM_WORKLOAD_XACT_OFFSET)) { this.workload_xact_offset = Long.parseLong(params.get(PARAM_WORKLOAD_XACT_OFFSET)); ProcedureLimitFilter filter = new ProcedureLimitFilter(-1l, this.workload_xact_offset, weightedTxns); // Important! The offset should go in the front! this.workload_filter = (this.workload_filter != null ? filter.attach(this.workload_filter) : filter); if (debug) LOG.debug("Attached " + filter.debugImpl()); } // BASE PARTITIONS if (params.containsKey(PARAM_WORKLOAD_RANDOM_PARTITIONS) || params.containsKey(PARAM_WORKLOAD_BASE_PARTITIONS)) { BasePartitionTxnFilter filter = new BasePartitionTxnFilter( new PartitionEstimator(this.catalogContext)); // FIXED LIST if (params.containsKey(PARAM_WORKLOAD_BASE_PARTITIONS)) { for (String p_str : this.getParam(PARAM_WORKLOAD_BASE_PARTITIONS).split(",")) { workload_base_partitions.add(Integer.valueOf(p_str)); } // FOR // RANDOM } else { double factor = this.getDoubleParam(PARAM_WORKLOAD_RANDOM_PARTITIONS); List<Integer> all_partitions = new ArrayList<Integer>(catalogContext.getAllPartitionIds()); Collections.shuffle(all_partitions, new Random()); workload_base_partitions .addAll(all_partitions.subList(0, (int) (all_partitions.size() * factor))); } filter.addPartitions(workload_base_partitions); this.workload_filter = (this.workload_filter != null ? this.workload_filter.attach(filter) : filter); if (debug) LOG.debug("Attached " + filter.debugImpl()); } // Txn Limit this.workload_xact_limit = this.getLongParam(PARAM_WORKLOAD_XACT_LIMIT); ObjectHistogram<String> proc_histogram = null; // Include/exclude procedures from the traces if (params.containsKey(PARAM_WORKLOAD_PROC_INCLUDE) || params.containsKey(PARAM_WORKLOAD_PROC_EXCLUDE)) { Filter filter = new ProcedureNameFilter(weightedTxns); // INCLUDE String temp = params.get(PARAM_WORKLOAD_PROC_INCLUDE); if (temp != null && !temp.equals(ProcedureNameFilter.INCLUDE_ALL)) { // We can take the counts for PROC_INCLUDE and scale them // with the multiplier double multiplier = 1.0d; if (this.hasDoubleParam(PARAM_WORKLOAD_PROC_INCLUDE_MULTIPLIER)) { multiplier = this.getDoubleParam(PARAM_WORKLOAD_PROC_INCLUDE_MULTIPLIER); if (debug) LOG.debug("Workload Procedure Multiplier: " + multiplier); } // Default Txn Frequencies String procinclude = params.get(PARAM_WORKLOAD_PROC_INCLUDE); if (procinclude.equalsIgnoreCase("default")) { procinclude = AbstractProjectBuilder.getProjectBuilder(catalog_type) .getTransactionFrequencyString(); } Map<String, Integer> limits = new HashMap<String, Integer>(); int total_unlimited = 0; int total = 0; for (String proc_name : procinclude.split(",")) { int limit = -1; // Check if there is a limit for this procedure if (proc_name.contains(":")) { String pieces[] = proc_name.split(":"); proc_name = pieces[0]; limit = (int) Math.round(Integer.parseInt(pieces[1]) * multiplier); } if (limit < 0) { if (proc_histogram == null) { if (debug) LOG.debug("Generating procedure histogram from workload file"); proc_histogram = WorkloadUtil.getProcedureHistogram(path); } limit = (int) proc_histogram.get(proc_name, 0); total_unlimited += limit; } else { total += limit; } limits.put(proc_name, limit); } // FOR // If we have a workload limit and some txns that we want // to get unlimited // records from, then we want to modify the other txns so // that we fill in the "gap" if (this.workload_xact_limit != null && total_unlimited > 0) { int remaining = this.workload_xact_limit.intValue() - total - total_unlimited; if (remaining > 0) { for (Entry<String, Integer> e : limits.entrySet()) { double ratio = e.getValue() / (double) total; e.setValue((int) Math.ceil(e.getValue() + (ratio * remaining))); } // FOR } } ObjectHistogram<String> proc_multiplier_histogram = null; if (debug) { if (proc_histogram != null) LOG.debug("Full Workload Histogram:\n" + proc_histogram); proc_multiplier_histogram = new ObjectHistogram<String>(); } total = 0; for (Entry<String, Integer> e : limits.entrySet()) { if (debug) proc_multiplier_histogram.put(e.getKey(), e.getValue()); ((ProcedureNameFilter) filter).include(e.getKey(), e.getValue()); total += e.getValue(); } // FOR if (debug) LOG.debug("Multiplier Histogram [total=" + total + "]:\n" + proc_multiplier_histogram); } // EXCLUDE temp = params.get(PARAM_WORKLOAD_PROC_EXCLUDE); if (temp != null) { for (String proc_name : params.get(PARAM_WORKLOAD_PROC_EXCLUDE).split(",")) { ((ProcedureNameFilter) filter).exclude(proc_name); } // FOR } // Sampling!! if (this.getBooleanParam(PARAM_WORKLOAD_PROC_SAMPLE, false)) { if (debug) LOG.debug("Attaching sampling filter"); if (proc_histogram == null) proc_histogram = WorkloadUtil.getProcedureHistogram(path); Map<String, Integer> proc_includes = ((ProcedureNameFilter) filter).getProcIncludes(); SamplingFilter sampling_filter = new SamplingFilter(proc_includes, proc_histogram); filter = sampling_filter; if (debug) LOG.debug("Workload Procedure Histogram:\n" + proc_histogram); } // Attach our new filter to the chain (or make it the head if // it's the first one) this.workload_filter = (this.workload_filter != null ? this.workload_filter.attach(filter) : filter); if (debug) LOG.debug("Attached " + filter.debugImpl()); } // TRANSACTION LIMIT if (this.workload_xact_limit != null) { ProcedureLimitFilter filter = new ProcedureLimitFilter(this.workload_xact_limit, weightedTxns); this.workload_filter = (this.workload_filter != null ? this.workload_filter.attach(filter) : filter); if (debug) LOG.debug("Attached " + filter.debugImpl()); } // QUERY LIMIT if (params.containsKey(PARAM_WORKLOAD_QUERY_LIMIT)) { this.workload_query_limit = Long.parseLong(params.get(PARAM_WORKLOAD_QUERY_LIMIT)); QueryLimitFilter filter = new QueryLimitFilter(this.workload_query_limit); this.workload_filter = (this.workload_filter != null ? this.workload_filter.attach(filter) : filter); } if (this.workload_filter != null && debug) LOG.debug("Workload Filters: " + this.workload_filter.toString()); this.workload = new Workload(this.catalog); this.workload.load(path, this.catalog_db, this.workload_filter); this.workload_path = path; if (this.workload_filter != null) this.workload_filter.reset(); } // Workload Statistics if (this.catalog_db != null) { if (this.params.containsKey(PARAM_STATS)) { this.stats = new WorkloadStatistics(this.catalog_db); String path = this.params.get(PARAM_STATS); if (debug) LOG.debug("Loading in workload statistics from '" + path + "'"); this.stats_path = new File(path); try { this.stats.load(this.stats_path, this.catalog_db); } catch (Throwable ex) { throw new RuntimeException("Failed to load stats file '" + this.stats_path + "'", ex); } } // Scaling if (this.params.containsKey(PARAM_STATS_SCALE_FACTOR) && this.stats != null) { double scale_factor = this.getDoubleParam(PARAM_STATS_SCALE_FACTOR); LOG.info("Scaling TableStatistics: " + scale_factor); AbstractTableStatisticsGenerator generator = AbstractTableStatisticsGenerator .factory(this.catalog_db, this.catalog_type, scale_factor); generator.apply(this.stats); } } }
From source file:org.apache.solr.client.solrj.impl.CloudSolrServer.java
@Override public NamedList<Object> request(SolrRequest request) throws SolrServerException, IOException { connect();/*from w w w. j a v a 2s . co m*/ ClusterState clusterState = zkStateReader.getClusterState(); boolean sendToLeaders = false; List<String> replicas = null; if (request instanceof IsUpdateRequest) { if (request instanceof UpdateRequest) { NamedList response = directUpdate((AbstractUpdateRequest) request, clusterState); if (response != null) { return response; } } sendToLeaders = true; replicas = new ArrayList<>(); } SolrParams reqParams = request.getParams(); if (reqParams == null) { reqParams = new ModifiableSolrParams(); } List<String> theUrlList = new ArrayList<>(); if (request.getPath().equals("/admin/collections") || request.getPath().equals("/admin/cores")) { Set<String> liveNodes = clusterState.getLiveNodes(); for (String liveNode : liveNodes) { theUrlList.add(zkStateReader.getBaseUrlForNodeName(liveNode)); } } else { String collection = reqParams.get(UpdateParams.COLLECTION, defaultCollection); if (collection == null) { throw new SolrServerException( "No collection param specified on request and no default collection has been set."); } Set<String> collectionsList = getCollectionList(clusterState, collection); if (collectionsList.size() == 0) { throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection: " + collection); } String shardKeys = reqParams.get(ShardParams._ROUTE_); if (shardKeys == null) { shardKeys = reqParams.get(ShardParams.SHARD_KEYS); // deprecated } // TODO: not a big deal because of the caching, but we could avoid looking // at every shard // when getting leaders if we tweaked some things // Retrieve slices from the cloud state and, for each collection // specified, // add it to the Map of slices. Map<String, Slice> slices = new HashMap<>(); for (String collectionName : collectionsList) { DocCollection col = clusterState.getCollection(collectionName); Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col); ClientUtils.addSlices(slices, collectionName, routeSlices, true); } Set<String> liveNodes = clusterState.getLiveNodes(); List<String> leaderUrlList = null; List<String> urlList = null; List<String> replicasList = null; // build a map of unique nodes // TODO: allow filtering by group, role, etc Map<String, ZkNodeProps> nodes = new HashMap<>(); List<String> urlList2 = new ArrayList<>(); for (Slice slice : slices.values()) { for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) { ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps); String node = coreNodeProps.getNodeName(); if (!liveNodes.contains(coreNodeProps.getNodeName()) || !coreNodeProps.getState().equals(ZkStateReader.ACTIVE)) continue; if (nodes.put(node, nodeProps) == null) { if (!sendToLeaders || (sendToLeaders && coreNodeProps.isLeader())) { String url; if (reqParams.get(UpdateParams.COLLECTION) == null) { url = ZkCoreNodeProps.getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection); } else { url = coreNodeProps.getCoreUrl(); } urlList2.add(url); } else if (sendToLeaders) { String url; if (reqParams.get(UpdateParams.COLLECTION) == null) { url = ZkCoreNodeProps.getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), defaultCollection); } else { url = coreNodeProps.getCoreUrl(); } replicas.add(url); } } } } if (sendToLeaders) { leaderUrlList = urlList2; replicasList = replicas; } else { urlList = urlList2; } if (sendToLeaders) { theUrlList = new ArrayList<>(leaderUrlList.size()); theUrlList.addAll(leaderUrlList); } else { theUrlList = new ArrayList<>(urlList.size()); theUrlList.addAll(urlList); } Collections.shuffle(theUrlList, rand); if (sendToLeaders) { ArrayList<String> theReplicas = new ArrayList<>(replicasList.size()); theReplicas.addAll(replicasList); Collections.shuffle(theReplicas, rand); // System.out.println("leaders:" + theUrlList); // System.out.println("replicas:" + theReplicas); theUrlList.addAll(theReplicas); } } // System.out.println("########################## MAKING REQUEST TO " + // theUrlList); LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, theUrlList); LBHttpSolrServer.Rsp rsp = lbServer.request(req); return rsp.getResponse(); }
From source file:ch.uzh.supersede.feedbacklibrary.utils.Utils.java
/** * This method opens the FeedbackActivity from the feedback library in case if a PULL feedback is triggered with a random PULL configuration. * * @param baseURL the base URL/*from w ww . j a v a 2 s . com*/ * @param activity the activity in which the method is called * @param applicationId the application id * @param language the language */ public static void triggerRandomPullFeedback(@NonNull final String baseURL, @NonNull final Activity activity, final long applicationId, final @NonNull String language) { Retrofit rtf = new Retrofit.Builder().baseUrl(baseURL).addConverterFactory(GsonConverterFactory.create()) .build(); feedbackAPI fbAPI = rtf.create(feedbackAPI.class); final Call<ResponseBody> checkUpAndRunning = fbAPI.pingOrchestrator(); if (checkUpAndRunning != null) { checkUpAndRunning.enqueue(new Callback<ResponseBody>() { @Override public void onFailure(Call<ResponseBody> call, Throwable t) { Log.e(TAG, "Failed to ping the server. onFailure method called", t); } @Override public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) { if (response.code() == 200) { Retrofit rtf = new Retrofit.Builder().baseUrl(baseURL) .addConverterFactory(GsonConverterFactory.create()).build(); feedbackAPI fbAPI = rtf.create(feedbackAPI.class); Call<OrchestratorConfigurationItem> result = fbAPI.getConfiguration(language, applicationId); // Asynchronous call if (result != null) { result.enqueue(new Callback<OrchestratorConfigurationItem>() { @Override public void onFailure(Call<OrchestratorConfigurationItem> call, Throwable t) { Log.e(TAG, "Failed to retrieve the configuration. onFailure method called", t); DialogUtils.showInformationDialog(activity, new String[] { activity.getResources().getString( R.string.supersede_feedbacklibrary_feedback_application_unavailable_text) }, true); } @Override public void onResponse(Call<OrchestratorConfigurationItem> call, Response<OrchestratorConfigurationItem> response) { if (response.code() == 200) { Log.i(TAG, "Configuration successfully retrieved"); OrchestratorConfigurationItem configuration = response.body(); if (configuration != null) { List<ConfigurationItem> configurationItems = configuration .getConfigurationItems(); List<Long> shuffleIds = new ArrayList<>(); Map<Long, List<Map<String, Object>>> idParameters = new HashMap<>(); for (ConfigurationItem configurationItem : configurationItems) { if (configurationItem.getType().equals("PULL")) { shuffleIds.add(configurationItem.getId()); idParameters.put(configurationItem.getId(), configurationItem .getGeneralConfigurationItem().getParameters()); } } Random rnd = new Random(System.nanoTime()); Collections.shuffle(shuffleIds, rnd); for (int i = 0; i < shuffleIds.size(); ++i) { double likelihood = -1; // If no "showIntermediateDialog" is provided, show it boolean showIntermediateDialog = true; for (Map<String, Object> parameter : idParameters .get(shuffleIds.get(i))) { String key = (String) parameter.get("key"); // Likelihood if (key.equals("likelihood")) { likelihood = (((Double) parameter.get("value")) .floatValue()); } // Intermediate dialog if (key.equals("showIntermediateDialog")) { showIntermediateDialog = (Utils.intToBool( ((Double) parameter.get("value")).intValue())); } } if (!(rnd.nextDouble() > likelihood)) { Intent intent = new Intent(activity, FeedbackActivity.class); String jsonString = new Gson().toJson(configuration); intent.putExtra(FeedbackActivity.IS_PUSH_STRING, false); intent.putExtra(FeedbackActivity.JSON_CONFIGURATION_STRING, jsonString); intent.putExtra( FeedbackActivity.SELECTED_PULL_CONFIGURATION_INDEX_STRING, shuffleIds.get(i)); intent.putExtra(FeedbackActivity.EXTRA_KEY_BASE_URL, baseURL); intent.putExtra(FeedbackActivity.EXTRA_KEY_LANGUAGE, language); if (!showIntermediateDialog) { // Start the feedback activity without asking the user activity.startActivity(intent); break; } else { // Ask the user if (s)he would like to give feedback or not DialogUtils.PullFeedbackIntermediateDialog d = DialogUtils.PullFeedbackIntermediateDialog .newInstance(activity.getResources().getString( ch.uzh.supersede.feedbacklibrary.R.string.supersede_feedbacklibrary_pull_feedback_question_string), jsonString, shuffleIds.get(i), baseURL, language); d.show(activity.getFragmentManager(), "feedbackPopupDialog"); break; } } } } } else { Log.e(TAG, "Failed to retrieve the configuration. Response code == " + response.code()); } } }); } else { Log.e(TAG, "Failed to retrieve the configuration. Call<OrchestratorConfigurationItem> result is null"); } } else { Log.e(TAG, "The server is not up and running. Response code == " + response.code()); } } }); } else { Log.e(TAG, "Failed to ping the server. Call<ResponseBody> checkUpAndRunning result is null"); } }
From source file:com.hichinaschool.flashcards.libanki.Sched.java
private boolean _fillLrnDay() { if (mLrnCount == 0) { return false; }//from ww w. j av a 2s . c om if (!mLrnDayQueue.isEmpty()) { return true; } while (mLrnDids.size() > 0) { long did = mLrnDids.getFirst(); // fill the queue with the current did mLrnDayQueue.clear(); Cursor cur = null; try { cur = mCol.getDb().getDatabase().rawQuery("SELECT id FROM cards WHERE did = " + did + " AND queue = 3 AND due <= " + mToday + " LIMIT " + mQueueLimit, null); while (cur.moveToNext()) { mLrnDayQueue.add(cur.getLong(0)); } } finally { if (cur != null && !cur.isClosed()) { cur.close(); } } if (mLrnDayQueue.size() > 0) { // order Random r = new Random(); r.setSeed(mToday); Collections.shuffle(mLrnDayQueue, r); // is the current did empty? if (mLrnDayQueue.size() < mQueueLimit) { mLrnDids.remove(); } return true; } // nothing left in the deck; move to next mLrnDids.remove(); } return false; }
From source file:com.pinterest.pinlater.PinLaterBackendBase.java
private List<String> getRandomShardShuffle() { List<String> shards = new ArrayList<String>(); shards.addAll(getShards());//from ww w .ja va 2s .c o m Collections.shuffle(shards, RANDOM); return shards; }
From source file:org.apache.solr.client.solrj.impl.CloudSolrClient.java
private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection, ClusterState clusterState) throws SolrServerException { UpdateRequest updateRequest = (UpdateRequest) request; ModifiableSolrParams params = (ModifiableSolrParams) request.getParams(); ModifiableSolrParams routableParams = new ModifiableSolrParams(); ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams(); if (params != null) { nonRoutableParams.add(params);/*from w ww . j a v a 2s .c om*/ routableParams.add(params); for (String param : NON_ROUTABLE_PARAMS) { routableParams.remove(param); } } if (collection == null) { throw new SolrServerException( "No collection param specified on request and no default collection has been set."); } //Check to see if the collection is an alias. Aliases aliases = zkStateReader.getAliases(); if (aliases != null) { Map<String, String> collectionAliases = aliases.getCollectionAliasMap(); if (collectionAliases != null && collectionAliases.containsKey(collection)) { collection = collectionAliases.get(collection); } } DocCollection col = getDocCollection(clusterState, collection, null); DocRouter router = col.getRouter(); if (router instanceof ImplicitDocRouter) { // short circuit as optimization return null; } //Create the URL map, which is keyed on slice name. //The value is a list of URLs for each replica in the slice. //The first value in the list is the leader for the slice. final Map<String, List<String>> urlMap = buildUrlMap(col); final Map<String, LBHttpSolrClient.Req> routes = (urlMap == null ? null : updateRequest.getRoutes(router, col, urlMap, routableParams, this.idField)); if (routes == null) { if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, idField)) { // we have info (documents with ids and/or ids to delete) with // which to find the leaders but we could not find (all of) them throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "directUpdatesToLeadersOnly==true but could not find leader(s)"); } else { // we could not find a leader or routes yet - use unoptimized general path return null; } } final NamedList<Throwable> exceptions = new NamedList<>(); final NamedList<NamedList> shardResponses = new NamedList<>(routes.size() + 1); // +1 for deleteQuery long start = System.nanoTime(); if (parallelUpdates) { final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size()); for (final Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) { final String url = entry.getKey(); final LBHttpSolrClient.Req lbRequest = entry.getValue(); try { MDC.put("CloudSolrClient.url", url); responseFutures.put(url, threadPool.submit(() -> lbClient.request(lbRequest).getResponse())); } finally { MDC.remove("CloudSolrClient.url"); } } for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) { final String url = entry.getKey(); final Future<NamedList<?>> responseFuture = entry.getValue(); try { shardResponses.add(url, responseFuture.get()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } catch (ExecutionException e) { exceptions.add(url, e.getCause()); } } if (exceptions.size() > 0) { Throwable firstException = exceptions.getVal(0); if (firstException instanceof SolrException) { SolrException e = (SolrException) firstException; throw new RouteException(ErrorCode.getErrorCode(e.code()), exceptions, routes); } else { throw new RouteException(ErrorCode.SERVER_ERROR, exceptions, routes); } } } else { for (Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) { String url = entry.getKey(); LBHttpSolrClient.Req lbRequest = entry.getValue(); try { NamedList<Object> rsp = lbClient.request(lbRequest).getResponse(); shardResponses.add(url, rsp); } catch (Exception e) { if (e instanceof SolrException) { throw (SolrException) e; } else { throw new SolrServerException(e); } } } } UpdateRequest nonRoutableRequest = null; List<String> deleteQuery = updateRequest.getDeleteQuery(); if (deleteQuery != null && deleteQuery.size() > 0) { UpdateRequest deleteQueryRequest = new UpdateRequest(); deleteQueryRequest.setDeleteQuery(deleteQuery); nonRoutableRequest = deleteQueryRequest; } Set<String> paramNames = nonRoutableParams.getParameterNames(); Set<String> intersection = new HashSet<>(paramNames); intersection.retainAll(NON_ROUTABLE_PARAMS); if (nonRoutableRequest != null || intersection.size() > 0) { if (nonRoutableRequest == null) { nonRoutableRequest = new UpdateRequest(); } nonRoutableRequest.setParams(nonRoutableParams); List<String> urlList = new ArrayList<>(); urlList.addAll(routes.keySet()); Collections.shuffle(urlList, rand); LBHttpSolrClient.Req req = new LBHttpSolrClient.Req(nonRoutableRequest, urlList); try { LBHttpSolrClient.Rsp rsp = lbClient.request(req); shardResponses.add(urlList.get(0), rsp.getResponse()); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e); } } long end = System.nanoTime(); RouteResponse rr = condenseResponse(shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS)); rr.setRouteResponses(shardResponses); rr.setRoutes(routes); return rr; }
From source file:org.apache.solr.servlet.HttpSolrCall.java
private String getCoreUrl(String collectionName, String origCorename, ClusterState clusterState, Collection<Slice> slices, boolean byCoreName, boolean activeReplicas) { String coreUrl;//from w ww . j a v a 2s . c om Set<String> liveNodes = clusterState.getLiveNodes(); List<Slice> randomizedSlices = new ArrayList<>(slices.size()); randomizedSlices.addAll(slices); Collections.shuffle(randomizedSlices, random); for (Slice slice : randomizedSlices) { List<Replica> randomizedReplicas = new ArrayList<>(); randomizedReplicas.addAll(slice.getReplicas()); Collections.shuffle(randomizedReplicas, random); for (Replica replica : randomizedReplicas) { if (!activeReplicas || (liveNodes.contains(replica.getNodeName()) && replica.getState() == Replica.State.ACTIVE)) { if (byCoreName && !collectionName.equals(replica.getStr(CORE_NAME_PROP))) { // if it's by core name, make sure they match continue; } if (replica.getStr(BASE_URL_PROP).equals(cores.getZkController().getBaseUrl())) { // don't count a local core continue; } if (origCorename != null) { coreUrl = replica.getStr(BASE_URL_PROP) + "/" + origCorename; } else { coreUrl = replica.getCoreUrl(); if (coreUrl.endsWith("/")) { coreUrl = coreUrl.substring(0, coreUrl.length() - 1); } } return coreUrl; } } } return null; }
From source file:jeplus.JEPlusProject.java
public String[][] getLHSJobList(int LHSsize, Random randomsrc) { if (randomsrc == null) randomsrc = RandomSource.getRandomGenerator(); String[][] JobList = new String[LHSsize][]; // Get all parameters (inc. idf and weather) and their distributions if (ParamTree != null) { // Create sample for each parameter String[][] SampledValues = getSampleInEqualProbSegments(LHSsize, randomsrc); // debug/* w w w. jav a2s.co m*/ logger.debug(Arrays.deepToString(SampledValues)); // int length = SampledValues.length; // Shuffle the sample value vector of each parameter for (int i = 1; i < length; i++) { Collections.shuffle(Arrays.asList(SampledValues[i]), randomsrc); } // n jobs are created by taking a value from each parameter's vector // sequentially for (int i = 0; i < LHSsize; i++) { JobList[i] = new String[length]; JobList[i][0] = new Formatter().format("LHS-%06d", i).toString(); // Job id for (int j = 1; j < length; j++) { JobList[i][j] = SampledValues[j][i]; } } return JobList; } return null; }
From source file:edu.cornell.med.icb.learning.CrossValidation.java
/** * Calculates semi-random fold assignments. Ideally fold assignments would be as random as * possible. Because prediction results on test folds are evaluated with ROCR (to calculate * ROC AUC), and because ROCR cannot handle situations where all the labels are only one * category (i.e., all class 1 or all class 2), we force folds generated by this * method to exclude this situation.// ww w .j ava 2s . c om * * @param k Number of folds * @return An array where each element is the index of the fold to which the given instance * of the training set belongs. */ private int[] assignFolds(final int k) { final IntList indices = new IntArrayList(); do { indices.clear(); for (int i = 0; i < problem.getSize(); ++i) { indices.add(i % k); } Collections.shuffle(indices, randomAdapter); } while (invalidFold(indices, k)); final int[] splitIndex = new int[problem.getSize()]; indices.toArray(splitIndex); return splitIndex; }
From source file:com.clust4j.utils.VecUtils.java
/** * Shuffle in the input/* www . ja v a2 s .c om*/ * @param in * @param rand - a random seed * @return a shuffled int array */ public static int[] permutation(final int[] in, final Random rand) { checkDimsPermitEmpty(in); final int m = in.length; ArrayList<Integer> recordIndices = new ArrayList<Integer>(m); for (int i = 0; i < m; i++) recordIndices.add(i); Collections.shuffle(recordIndices, rand); final int[] out = new int[m]; for (int i = 0; i < m; i++) out[i] = recordIndices.get(i); return out; }