Example usage for java.util Collections shuffle

List of usage examples for java.util Collections shuffle

Introduction

In this page you can find the example usage for java.util Collections shuffle.

Prototype

public static void shuffle(List<?> list) 

Source Link

Document

Randomly permutes the specified list using a default source of randomness.

Usage

From source file:clummy.classes.DataHandlingClass.java

/**
 * get the list of race/*from   w  w  w.j a  v a 2s.c  om*/
 * @return 
 */
public List<String> getListofRace() {
    String temp;
    List<String> racelist = readFile(AllFileList.RACELIST);
    while (racelist.size() < totalListFromName) {
        Collections.shuffle(racelist);
        Collections.shuffle(racelist);
        temp = racelist.get(randBetween(0, racelist.size() - 1));
        racelist.add(temp);
    }
    return racelist;
}

From source file:de.unibw.inf2.fishification.FishWorld.java

public void increaseFishSize(String source) {

    // Prevent too many increase operations
    long actualTime = System.currentTimeMillis();
    if (actualTime - s_lastFeedTime < s_feedFrequency) {
        return;//from   ww  w  . j a  va  2s.  c  o  m
    }
    s_lastFeedTime = actualTime;

    // Convert source name to category type
    CategoryType type = CategoryMapper.map(source);

    // Copy all FishEntities
    List<FishEntity> fishEntities = new ArrayList<FishEntity>();
    for (Entity entity : getEntityManager().getEntities()) {
        if (entity instanceof FishEntity) {
            FishEntity fishEntity = (FishEntity) entity;

            // Add fish if the category matches
            if (type == CategoryType.NONE || type == fishEntity.getType()) {
                fishEntities.add(fishEntity);
            }
        }
    }

    if (type == CategoryType.NONE) {

        // Shuffle Fish randomly
        Collections.shuffle(fishEntities);

        // Increase only the first in the shuffled list
        FishEntity fishToFeed = fishEntities.get(0);
        fishToFeed.increaseSize(s_fishIncreaseFactor);

    } else {

        // Increase all Fish according to source
        for (FishEntity fishToFeed : fishEntities) {
            fishToFeed.increaseSize(s_fishIncreaseFactor);
        }

    }
}

From source file:edu.amc.sakai.user.PooledLDAPConnectionFactory.java

/**
 * Get the host to connect to. Attempt to resolve all addresses for a hostname when round robin DNS
 * is used./*from  w  ww .  j  a  v  a  2  s.  co m*/
 *
 * We do this resolution low down in the stack as we don't want the results cached at all.
 * Unless configured otherwise the JVM will cache DNS lookups. Even if this is just for 30 seconds
 * (default in 1.6/1.7) it means when the pool is getting prebuilt at startup the connections will
 * all point to the same server.
 *
 * @return A hostname or a space separated string of IP addresses to connect to. 
 */
protected String getHost() {
    List<InetAddress> addresses = new ArrayList<InetAddress>();
    // The host may already be space separated.
    StringTokenizer hosts = new StringTokenizer(host, " ");
    while (hosts.hasMoreTokens()) {
        try {
            addresses.addAll(Arrays.asList(InetAddress.getAllByName(hosts.nextToken())));
        } catch (UnknownHostException e) {
            if (log.isDebugEnabled()) {
                log.debug("Failed to resolve " + host + " not handling now, will deal with later.");
            }
        }
    }
    if (addresses.size() > 1) {
        StringBuilder resolvedHosts = new StringBuilder();
        // So that we don't always connect to the same host.
        // Is need on platforms that don't do round robin DNS well.
        Collections.shuffle(addresses);
        for (InetAddress address : addresses) {
            resolvedHosts.append(address.getHostAddress() + " ");
        }
        return resolvedHosts.toString();
    } else {
        // Just return the configured hostname and let it be resolved when making the connection.
        return host;
    }
}

From source file:com.epam.gepard.datadriven.feeders.LabelBasedDataFeeder.java

private int loadFeeder(final LabelBasedFeederDetails feederDetails) throws DataFeederException {
    int result = 0;
    // convert feeder file parameter into existing path
    LabelBasedFeederFileLoader loaderType = LabelBasedFeederFileLoader.TXT;
    String filePath = detectFilePath(feederDetails);
    if (filePath.endsWith(".csv")) {
        loaderType = LabelBasedFeederFileLoader.CSV;
    }/*www .j  a  v  a2  s .c o m*/
    feederDetails.setFeederFile(filePath);

    // Select the required value
    try {
        // load feeder
        loaderType.loadFeeder(feederDetails);
    } catch (IOException e) {
        String errorText = "Error reading LabelBasedDataFeeder: " + feederDetails.getFeederFile()
                + "\nfilePath: " + filePath;
        throw new DataFeederException(errorText, ERROR_FEEDER_FILE_MISSING);
    }

    // if feeder label type is random, then shuffle parameter rows
    if (LabelBasedFeederDetails.LabelType.RANDOM.equals(feederDetails.getLabelType())) {
        Collections.shuffle(feederDetails.getParameterList());
        feederDetails.reduceParameterList();
    }

    // if feeder label type is merge, then append the specified parameters in a row to a string
    if (LabelBasedFeederDetails.LabelType.MERGE.equals(feederDetails.getLabelType())) {
        String[] merged = new String[feederDetails.getParameterList().get(0).length];
        Arrays.fill(merged, "");

        for (int rowNum = 0; rowNum < feederDetails.getParameterList().size(); rowNum++) {
            for (int colNum = 0; colNum < merged.length; colNum++) {
                merged[colNum] += (rowNum == 0) ? feederDetails.getParameterList().get(rowNum)[colNum]
                        : "," + feederDetails.getParameterList().get(rowNum)[colNum];
            }
        }

        feederDetails.getParameterList().clear();
        feederDetails.getParameterList().add(merged);
    }

    if (feederDetails.getParameterList().isEmpty()) {
        result = ERROR_FEEDER_RELATION_NO_ROW_LOADED;
        DataFeederLoader.reportError("Error reading LabelBasedDataFeeder - No rows loaded by the feeder: "
                + feederDetails.getFeederFile() + "\nfilePath: " + filePath);
    }

    return result;
}

From source file:net.semanticmetadata.lire.solr.FastLireRequestHandler.java

/**
 * Search based on the given image hashes.
 *
 * @param req/*from w  w w . j  a  va2  s  . co  m*/
 * @param rsp
 * @throws java.io.IOException
 * @throws IllegalAccessException
 * @throws InstantiationException
 */
private void handleHashSearch(SolrQueryRequest req, SolrQueryResponse rsp)
        throws IOException, IllegalAccessException, InstantiationException {
    SolrParams params = req.getParams();
    SolrIndexSearcher searcher = req.getSearcher();
    // get the params needed:
    // hashes=x y z ...
    // feature=<base64>
    // field=<cl_ha|ph_ha|...>

    String[] hashStrings = params.get("hashes").trim().split(" ");
    byte[] featureVector = Base64.decodeBase64(params.get("feature"));
    String paramField = "cl_ha";
    if (req.getParams().get("field") != null)
        paramField = req.getParams().get("field");
    int paramRows = defaultNumberOfResults;
    if (params.getInt("rows") != null)
        paramRows = params.getInt("rows");
    numberOfQueryTerms = req.getParams().getDouble("accuracy", DEFAULT_NUMBER_OF_QUERY_TERMS);
    numberOfCandidateResults = req.getParams().getInt("candidates", DEFAULT_NUMBER_OF_CANDIDATES);
    // create boolean query:
    //        System.out.println("** Creating query.");
    LinkedList<Term> termFilter = new LinkedList<Term>();
    BooleanQuery query = new BooleanQuery();
    for (int i = 0; i < hashStrings.length; i++) {
        // be aware that the hashFunctionsFileName of the field must match the one you put the hashes in before.
        hashStrings[i] = hashStrings[i].trim();
        if (hashStrings[i].length() > 0) {
            termFilter.add(new Term(paramField, hashStrings[i].trim()));
            //                System.out.println("** " + field + ": " + hashes[i].trim());
        }
    }
    Collections.shuffle(termFilter);
    for (int k = 0; k < termFilter.size() * numberOfQueryTerms; k++) {
        query.add(new BooleanClause(new TermQuery(termFilter.get(k)), BooleanClause.Occur.SHOULD));
    }
    //        System.out.println("** Doing search.");

    // query feature
    LireFeature queryFeature = (LireFeature) FeatureRegistry.getClassForHashField(paramField).newInstance();
    queryFeature.setByteArrayRepresentation(featureVector);

    // get results:
    doSearch(req, rsp, searcher, paramField, paramRows, termFilter, new MatchAllDocsQuery(), queryFeature);
}

From source file:com.intuit.tank.harness.functions.StringFunctions.java

/**
 * @param minId/*from  www  . j a  v a2 s . co m*/
 * @param maxId
 * @return
 */
private synchronized static Stack<Integer> getStackWithMods(Integer minId, Integer maxId, Integer mod,
        boolean include) {
    String key = getStackKey(minId, maxId, mod, include);
    Stack<Integer> stack = stackMap.get(key);
    if (stack == null) {
        int blockSize = (maxId - minId) / APITestHarness.getInstance().getAgentRunData().getTotalAgents();
        int offset = APITestHarness.getInstance().getAgentRunData().getAgentInstanceNum() * blockSize;
        LOG.info(LogUtil.getLogMessage(
                "Creating userId Block starting at " + offset + " and containing  " + blockSize + " entries.",
                LogEventType.System));
        List<Integer> list = new ArrayList<Integer>();

        for (int i = 0; i < blockSize; i++) {
            int nextNum = i + minId + offset;
            if (include && nextNum < maxId && nextNum % mod == 0) {
                list.add(nextNum);
            } else if (!include && nextNum < maxId && nextNum % mod != 0) {
                list.add(nextNum);
            }
        }
        Collections.shuffle(list);
        // Collections.reverse(list);
        stack = new Stack<Integer>();
        stack.addAll(list);
        stackMap.put(key, stack);
    }
    return stack;
}

From source file:clummy.classes.DataHandlingClass.java

/**
 * get shuffle level//from  w w  w .j  ava 2s .  c o m
 * @return 
 */
public String getShuffledLevel() {
    List<String> levelList = defaultList.getLevelList();
    Collections.shuffle(levelList);
    Collections.shuffle(levelList);
    return levelList.get(randBetween(0, levelList.size() - 1));
}

From source file:edu.uga.cs.fluxbuster.clustering.ClusterGenerator.java

/**
 * Copies candidate flux domains into a list if its corresponding 2LD is present
 * in a list of recent flux domains up to a limit on the size of the list.  The 
 * candidate flux domains are copied from a map of candidate flux domains.  Domains 
 * are only considered if they appear in the all domains list.  Once a candidate flux 
 * domain is copied it's corresponding domain name is removed from the all domains list.
 * //from  w  w w  . ja  va  2s  . c o  m
 * @param recentFluxDomains the list of recent flux 2LD's
 * @param maxCandidateDomains the limit on the total number of domains to add
 * @param resultBuf the list in which to store the candidate flux domains
 * @param seenDomains the map of candidate flux domains.
 * @param allDomains this list of domains to consider
 */
private void addRecentFluxDomains(Set<String> recentFluxDomains, int maxCandidateDomains,
        List<CandidateFluxDomain> resultBuf, HashMap<String, CandidateFluxDomain> seenDomains,
        ArrayList<String> allDomains) {
    ArrayList<String> removeDomains = new ArrayList<String>();
    Collections.shuffle(allDomains); // this is probably not necessary
    for (String domainname : allDomains) {
        if (resultBuf.size() == maxCandidateDomains) {
            break;
        }
        String domainname2LD = DomainNameUtils.extractEffective2LD(domainname);
        if (domainname2LD != null && recentFluxDomains.contains(domainname2LD)) {
            resultBuf.add(seenDomains.get(domainname));
            removeDomains.add(domainname);
        }
    }
    allDomains.removeAll(removeDomains);
}

From source file:edu.cornell.med.icb.clustering.TestQTClusterer.java

/**
 * Tests clustering with lists of object types.
 *//*from  ww  w.  j  a  v  a 2 s  . c o m*/
@Test
public void clusterObjectCollections() {
    final List<Object> peoplePlacesAndThings = new ArrayList<Object>();
    final Person tom = new Person() {
    };
    final Person dick = new Person() {
    };
    final Person harry = new Person() {
    };

    peoplePlacesAndThings.add(tom);
    peoplePlacesAndThings.add(dick);
    peoplePlacesAndThings.add(harry);

    final Place home = new Place() {
    };
    final Place work = new Place() {
    };
    final Place school = new Place() {
    };

    peoplePlacesAndThings.add(home);
    peoplePlacesAndThings.add(work);
    peoplePlacesAndThings.add(school);

    final Thing pencil = new Thing() {
    };
    final Thing pen = new Thing() {
    };
    final Thing paper = new Thing() {
    };
    final Thing stapler = new Thing() {
    };

    peoplePlacesAndThings.add(pencil);
    peoplePlacesAndThings.add(pen);
    peoplePlacesAndThings.add(paper);
    peoplePlacesAndThings.add(stapler);

    // put things in a random order just to make things interesting
    Collections.shuffle(peoplePlacesAndThings);

    final Clusterer clusterer = new QTClusterer(peoplePlacesAndThings.size());
    final List<int[]> clusters = clusterer.cluster(new MaxLinkageDistanceCalculator() {
        public double distance(final int i, final int j) {
            final Object object1 = peoplePlacesAndThings.get(i);
            final Object object2 = peoplePlacesAndThings.get(j);
            if (object1 instanceof Person && object2 instanceof Person) {
                return 0;
            } else if (object1 instanceof Place && object2 instanceof Place) {
                return 0;
            } else if (object1 instanceof Thing && object2 instanceof Thing) {
                return 0;
            } else {
                return 42;
            }
        }
    }, 1.0f);

    assertNotNull("Cluster should not be null", clusters);
    assertEquals("There should be 3 clusters", 3, clusters.size());

    boolean peopleClustered = false;
    boolean placesClustered = false;
    boolean thingsClustered = false;

    for (final int[] cluster : clusters) {
        // check the type of the first, so we know what we're dealing with
        final Object object = peoplePlacesAndThings.get(cluster[0]);
        if (object instanceof Person) {
            assertEquals("There should be 3 people", 3, cluster.length);
            assertFalse("There appears to be more than one cluster of people", peopleClustered);
            peopleClustered = true;
            for (int i = 1; i < cluster.length; i++) {
                final Object person = peoplePlacesAndThings.get(cluster[i]);
                assertTrue("Cluster contains more than people", person instanceof Person);
            }
        } else if (object instanceof Place) {
            assertEquals("There should be 3 places", 3, cluster.length);
            assertFalse("There appears to be more than one cluster of places", placesClustered);
            placesClustered = true;
            for (int i = 1; i < cluster.length; i++) {
                final Object place = peoplePlacesAndThings.get(cluster[i]);
                assertTrue("Cluster contains more than places", place instanceof Place);
            }
        } else if (object instanceof Thing) {
            assertEquals("There should be 4 things", 4, cluster.length);
            assertFalse("There appears to be more than one cluster of things", thingsClustered);
            thingsClustered = true;
            for (int i = 1; i < cluster.length; i++) {
                final Object thing = peoplePlacesAndThings.get(cluster[i]);
                assertTrue("Cluster contains more than things", thing instanceof Thing);
            }
        } else {
            fail("Cluster contains an unknown object type: " + object.getClass().getName());
        }
    }

    assertTrue("People should have been clustered", peopleClustered);
    assertTrue("Places should have been clustered", placesClustered);
    assertTrue("Things should have been clustered", thingsClustered);
}

From source file:edu.rice.cs.bioinfo.programs.phylonet.algos.network.NetworkPseudoLikelihoodFromGTT.java

protected double findOptimalBranchLength(final Network<Object> speciesNetwork,
        final Map<String, List<String>> species2alleles, final List tripleFrequencies,
        final List gtCorrespondence, final Set<String> singleAlleleSpecies) {
    boolean continueRounds = true; // keep trying to improve network

    for (NetNode<Object> node : speciesNetwork.dfs()) {
        for (NetNode<Object> parent : node.getParents()) {
            node.setParentDistance(parent, 1.0);
            if (node.isNetworkNode()) {
                node.setParentProbability(parent, 0.5);
            }/*from  w  w w .j  a  v a 2s  .  c  o  m*/
        }
    }

    Set<NetNode> node2ignoreForBL = findEdgeHavingNoBL(speciesNetwork);

    double initalProb = computeProbability(speciesNetwork, tripleFrequencies, species2alleles,
            gtCorrespondence);
    if (_printDetails)
        System.out.println(speciesNetwork.toString() + " : " + initalProb);

    final Container<Double> lnGtProbOfSpeciesNetwork = new Container<Double>(initalProb); // records the GTProb of the network at all times

    int roundIndex = 0;
    for (; roundIndex < _maxRounds && continueRounds; roundIndex++) {
        /*
        * Prepare a random ordering of network edge examinations each of which attempts to change a branch length or hybrid prob to improve the GTProb score.
        */
        double lnGtProbLastRound = lnGtProbOfSpeciesNetwork.getContents();
        List<Proc> assigmentActions = new ArrayList<Proc>(); // store adjustment commands here.  Will execute them one by one later.

        for (final NetNode<Object> parent : edu.rice.cs.bioinfo.programs.phylonet.structs.network.util.Networks
                .postTraversal(speciesNetwork)) {

            for (final NetNode<Object> child : parent.getChildren()) {
                if (node2ignoreForBL.contains(child)) {
                    continue;
                }

                assigmentActions.add(new Proc() {
                    public void execute() {

                        UnivariateFunction functionToOptimize = new UnivariateFunction() {
                            public double value(double suggestedBranchLength) {
                                double incumbentBranchLength = child.getParentDistance(parent);

                                child.setParentDistance(parent, suggestedBranchLength);

                                double lnProb = computeProbability(speciesNetwork, tripleFrequencies,
                                        species2alleles, gtCorrespondence);
                                //System.out.println(speciesNetwork + ": " + lnProb);
                                if (lnProb > lnGtProbOfSpeciesNetwork.getContents()) // did improve, keep change
                                {
                                    lnGtProbOfSpeciesNetwork.setContents(lnProb);

                                } else // didn't improve, roll back change
                                {
                                    child.setParentDistance(parent, incumbentBranchLength);
                                }
                                return lnProb;
                            }
                        };
                        BrentOptimizer optimizer = new BrentOptimizer(_Brent1, _Brent2); // very small numbers so we control when brent stops, not brent.

                        try {
                            optimizer.optimize(_maxTryPerBranch, functionToOptimize, GoalType.MAXIMIZE,
                                    Double.MIN_VALUE, _maxBranchLength);
                        } catch (TooManyEvaluationsException e) // _maxAssigmentAttemptsPerBranchParam exceeded
                        {
                        }

                        if (_printDetails)
                            System.out.println(
                                    speciesNetwork.toString() + " : " + lnGtProbOfSpeciesNetwork.getContents());

                    }
                });
            }
        }

        for (final NetNode<Object> child : speciesNetwork.getNetworkNodes()) // find every hybrid node
        {

            Iterator<NetNode<Object>> hybridParents = child.getParents().iterator();
            final NetNode hybridParent1 = hybridParents.next();
            final NetNode hybridParent2 = hybridParents.next();

            assigmentActions.add(new Proc() {
                public void execute() {
                    UnivariateFunction functionToOptimize = new UnivariateFunction() {
                        public double value(double suggestedProb) {
                            double incumbentHybridProbParent1 = child.getParentProbability(hybridParent1);

                            child.setParentProbability(hybridParent1, suggestedProb);
                            child.setParentProbability(hybridParent2, 1.0 - suggestedProb);

                            double lnProb = computeProbability(speciesNetwork, tripleFrequencies,
                                    species2alleles, gtCorrespondence);
                            //System.out.println(speciesNetwork + ": " + lnProb);
                            if (lnProb > lnGtProbOfSpeciesNetwork.getContents()) // change improved GTProb, keep it
                            {

                                lnGtProbOfSpeciesNetwork.setContents(lnProb);
                            } else // change did not improve, roll back
                            {

                                child.setParentProbability(hybridParent1, incumbentHybridProbParent1);
                                child.setParentProbability(hybridParent2, 1.0 - incumbentHybridProbParent1);
                            }
                            return lnProb;
                        }
                    };
                    BrentOptimizer optimizer = new BrentOptimizer(_Brent1, _Brent2); // very small numbers so we control when brent stops, not brent.

                    try {
                        optimizer.optimize(_maxTryPerBranch, functionToOptimize, GoalType.MAXIMIZE, 0, 1.0);
                    } catch (TooManyEvaluationsException e) // _maxAssigmentAttemptsPerBranchParam exceeded
                    {
                    }
                    if (_printDetails)
                        System.out.println(
                                speciesNetwork.toString() + " : " + lnGtProbOfSpeciesNetwork.getContents());
                }
            });

        }

        // add hybrid probs to hybrid edges
        Collections.shuffle(assigmentActions);

        for (Proc assigment : assigmentActions) // for each change attempt, perform attempt
        {
            assigment.execute();
        }
        if (_printDetails) {
            System.out.println("Round end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
            System.out
                    .println(speciesNetwork.toString() + "\n" + lnGtProbOfSpeciesNetwork.getContents() + "\n");
        }
        if (((double) lnGtProbOfSpeciesNetwork.getContents()) == lnGtProbLastRound) // if no improvement was made wrt to last around, stop trying to find a better assignment
        {
            continueRounds = false;
        } else if (lnGtProbOfSpeciesNetwork.getContents() > lnGtProbLastRound) // improvement was made, ensure it is large enough wrt to improvement threshold to continue searching
        {

            double improvementPercentage = Math.pow(Math.E,
                    (lnGtProbOfSpeciesNetwork.getContents() - lnGtProbLastRound)) - 1.0; // how much did we improve over last round
            if (improvementPercentage < _improvementThreshold) // improved, but not enough to keep searching
            {
                continueRounds = false;
            }
        } else {
            throw new IllegalStateException("Should never have decreased prob.");
        }
    }
    //System.out.println(speciesNetwork + " " + lnGtProbOfSpeciesNetwork.getContents());
    return lnGtProbOfSpeciesNetwork.getContents();
}