Example usage for java.lang Thread yield

List of usage examples for java.lang Thread yield

Introduction

In this page you can find the example usage for java.lang Thread yield.

Prototype

public static native void yield();

Source Link

Document

A hint to the scheduler that the current thread is willing to yield its current use of a processor.

Usage

From source file:ac.core.ConfigLoader.java

/**
 * loadNodes(...) method is a recursive method which parses the app.config
 * XML file piece by piece and calls other methods passing the node
 * reference//from   ww  w  .ja v  a 2s.  c om
 *
 * @param node is the value of the current XML node being processed.
 * @see loadBaseConfig
 * @throws Exception
 */
protected void loadNodes(org.w3c.dom.Node node) throws Exception {
    // store the application properties.  these are custom properties and
    // can be changed without impacting the core property parsing
    // collects the list of attribute key names from the application block
    org.w3c.dom.NodeList attributes = _xmlr
            .getNodeListByXPath("/application/elsuFramework/attributes/key/@name");
    ArrayList<String> attributesList = new ArrayList<>();
    for (int i = 0; i < attributes.getLength(); i++) {
        attributesList.add(attributes.item(i).getNodeValue());

        // yield processing to other threads
        Thread.yield();
    }

    // retrive the value of the attribute key name
    for (String key : attributesList) {
        String value = _xmlr
                .getNodeValueByXPath("/application/elsuFramework/attributes/key[@name='" + key + "']");

        // remove old value if loaded from framework.config
        getApplicationProperties().remove(key);
        getApplicationProperties().put(key, value);

        // yield processing to other threads
        Thread.yield();
    }

    // retrive the log property value from the application attributes 
    if (getApplicationProperties().get("log.config") != null) {
        // log attribute value is defined, set the static variable to the 
        // log property file location; also, check if path is provided as
        // part of the file name - if yes, then ignore class path
        String configFile;

        if (!getApplicationProperties().get("log.config").toString().contains("\\")
                && !getApplicationProperties().get("log.config").toString().contains("/")) {
            configFile = (new File(getClass().getName().replace(".", "\\"))).getParent() + "\\"
                    + getApplicationProperties().get("log.config").toString();
        } else {
            configFile = getApplicationProperties().get("log.config").toString();
        }

        Log4JManager.LOG4JCONFIG = configFile;

        // check if the log property file exists, if not extract it 
        extractConfigFile(Log4JManager.LOG4JCONFIG);

        // create the instance of the Log4JManager using the properties file
        new Log4JManager("dataLogger");

        // yield processing to other threads
        Thread.yield();
    }

    // load any actions defined into the config properties
    org.w3c.dom.NodeList actionNodes = _xmlr.getNodeListByXPath("/application/actions/action/@class");
    ArrayList<String> actionList = new ArrayList<>();
    for (int i = 0; i < actionNodes.getLength(); i++) {
        actionList.add(actionNodes.item(i).getNodeValue());

        // show the entry in the log as info
        Log4JManager.info(actionNodes.item(i).getNodeValue());

        // yield processing to other threads
        Thread.yield();
    }

    // for each service in the array list, read the config for core service
    // properties and any child properties defined
    for (String actionName : actionList) {
        // show entry in log for the service being processed
        Log4JManager.info(".. loading config for action (" + actionName + ")");

        // call reference method to load base config properties
        ActionConfig action = loadBaseConfig(actionName);

        // store the config in collection
        getActionProperties().remove(actionName);
        getActionProperties().put(actionName, action);

        // yield processing to other threads
        Thread.yield();
    }
}

From source file:com.xerox.amazonws.sdb.Domain.java

/**
 * Gets attributes of items specified in the query string. This method threads off the
 * get requests and aggregates the responses.
 *
 * @param queryString the filter statement
 * @param listener class that will be notified when items are ready
 * @throws SDBException wraps checked exceptions
 *//*from w w  w  .j ava  2  s  . c  om*/
public void listItemsAttributes(String queryString, ItemListener listener) throws SDBException {
    ThreadPoolExecutor pool = getThreadPoolExecutor();
    pool.setRejectedExecutionHandler(new RejectionHandler());
    String nextToken = "";
    Counter running = new Counter(0);
    do {
        try {
            QueryResult result = listItems(queryString, nextToken, 250);
            List<Item> items = result.getItemList();
            for (Item i : items) {
                while (pool.getActiveCount() == pool.getMaximumPoolSize()) {
                    try {
                        Thread.sleep(100);
                    } catch (InterruptedException ex) {
                    }
                }
                synchronized (running) {
                    running.increment();
                }
                pool.execute(new AttrWorker(i, running, null, listener));
                Thread.yield();
            }
            nextToken = result.getNextToken();
        } catch (SDBException ex) {
            System.out.println("Query '" + queryString + "' Failure: ");
            ex.printStackTrace();
        }
    } while (nextToken != null && nextToken.trim().length() > 0);
    while (true) {
        if (running.getValue() == 0) {
            break;
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException ex) {
        }
    }
    if (this.executor == null) {
        pool.shutdown();
    }
}

From source file:jCloisterZone.CarcassonneEnvironment.java

@Override
protected void startState() {
    relationalWrapper_.startState();/*  ww  w  . j av  a2 s  .  c o m*/

    while (!ProgramArgument.EXPERIMENT_MODE.booleanValue() && client_.isRunning()) {
        try {
            Thread.yield();
        } catch (Exception e) {
        }
    }
    client_.createGame();
    earlyExit_ = false;
    earlyExitPlayers_.clear();
    prevScores_.clear();

    if (environment_ == null) {
        // Sleep only as long as it needs to to get the clientID.
        long clientID = (ProgramArgument.EXPERIMENT_MODE.booleanValue()) ? client_.getClientId() : -1;
        while (!ProgramArgument.EXPERIMENT_MODE.booleanValue() && clientID == -1) {
            try {
                Thread.yield();
                clientID = client_.getClientId();
            } catch (Exception e) {
            }
        }

        server_ = client_.getServer();
        server_.setRandomGenerator(RRLExperiment.random_);

        // Handle number of players playing
        slots_ = new ArrayList<PlayerSlot>(players_.length);
        int slotIndex = 0;
        for (String playerName : players_) {
            String playerNameIndex = playerName + slotIndex;
            if (playerName.equals(CERRLA_NAME)) {
                // Agent-controlled
                slots_.add(new PlayerSlot(slotIndex, PlayerSlot.SlotType.PLAYER, playerNameIndex, clientID));
            } else if (playerName.equals(AI_NAME)) {
                // AI controlled
                PlayerSlot slot = new PlayerSlot(slotIndex, PlayerSlot.SlotType.AI, playerNameIndex, clientID);
                slot.setAiClassName(LegacyAiPlayer.class.getName());
                slots_.add(slot);
            } else if (playerName.equals(RANDOM_NAME)) {
                // AI controlled
                PlayerSlot slot = new PlayerSlot(slotIndex, PlayerSlot.SlotType.AI, playerNameIndex, clientID);
                slot.setAiClassName(RandomAIPlayer.class.getName());
                slots_.add(slot);
            } else if (playerName.equals(HUMAN_NAME)) {
                // Human-controlled
                slots_.add(new PlayerSlot(slotIndex, PlayerSlot.SlotType.PLAYER, playerNameIndex, clientID));
            }
            slotIndex++;
        }

        // Start the game.
        environment_ = client_.getGame();
        while (!ProgramArgument.EXPERIMENT_MODE.booleanValue() && environment_ == null) {
            try {
                Thread.yield();
            } catch (Exception e) {
            }
            environment_ = client_.getGame();
        }
        relationalWrapper_.setGame(environment_);
        environment_.addGameListener(relationalWrapper_);
        // Ad-hoc fix
        if (ProgramArgument.EXPERIMENT_MODE.booleanValue())
            environment_.addUserInterface(relationalWrapper_);
        clientInterface_ = environment_.getUserInterface();
    } else if (players_.length > 1) {
        // Reset the UIs
        server_.stopGame();
        environment_.clearUserInterface();
        environment_.addUserInterface(clientInterface_);

        // Clear the slots and re-add them.
        for (int i = 0; i < PlayerSlot.COUNT; i++) {
            server_.updateSlot(new PlayerSlot(i), null);
        }
    }
    // Ad-hoc fix
    if (!ProgramArgument.EXPERIMENT_MODE.booleanValue()) {
        environment_.addUserInterface(relationalWrapper_);
    }

    // Randomise the slots
    Collections.shuffle(slots_, RRLExperiment.random_);
    for (int i = 0; i < slots_.size(); i++) {
        PlayerSlot slot = slots_.get(i);
        PlayerSlot cloneSlot = new PlayerSlot(i, slot.getType(), slot.getNick(), slot.getOwner());
        cloneSlot.setAiClassName(slot.getAiClassName());
        server_.updateSlot(cloneSlot, LegacyAiPlayer.supportedExpansions());
    }

    server_.startGame();
    // Sleep until game has started
    while (!ProgramArgument.EXPERIMENT_MODE.booleanValue() && (environment_ == null
            || environment_.getBoard() == null || environment_.getTilePack() == null)) {
        environment_ = ((ClientStub) Proxy.getInvocationHandler(server_)).getGame();
        try {
            Thread.yield();
        } catch (Exception e) {
        }
    }

    runPhases();

    currentPlayer_ = null;
}

From source file:org.apache.hadoop.hbase.regionserver.TestSplitLogWorker.java

@Test(timeout = 60000)
public void testMultipleTasks() throws Exception {
    LOG.info("testMultipleTasks");
    SplitLogCounters.resetCounters();//from  w w w.ja  v a  2s.c  o  m
    final ServerName SRV = ServerName.valueOf("tmt_svr,1,1");
    final String PATH1 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task");
    RegionServerServices mockedRS = getRegionServer(SRV);
    SplitLogWorker slw = new SplitLogWorker(zkw, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
    slw.start();
    try {
        Thread.yield(); // let the worker start
        Thread.sleep(100);
        waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME);

        SplitLogTask unassignedManager = new SplitLogTask.Unassigned(MANAGER);
        zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE,
                CreateMode.PERSISTENT);

        waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
        // now the worker is busy doing the above task

        // create another task
        final String PATH2 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task_2");
        zkw.getRecoverableZooKeeper().create(PATH2, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE,
                CreateMode.PERSISTENT);

        // preempt the first task, have it owned by another worker
        final ServerName anotherWorker = ServerName.valueOf("another-worker,1,1");
        SplitLogTask slt = new SplitLogTask.Owned(anotherWorker);
        ZKUtil.setData(zkw, PATH1, slt.toByteArray());
        waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);

        waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME);
        assertEquals(2, slw.taskReadySeq);
        byte[] bytes = ZKUtil.getData(zkw, PATH2);
        slt = SplitLogTask.parseFrom(bytes);
        assertTrue(slt.isOwned(SRV));
    } finally {
        stopSplitLogWorker(slw);
    }
}

From source file:uk.co.tfd.symplectic.harvester.SymplecticFetch.java

/**
 * Executes the task/*  w w  w .j a v a 2  s. c  om*/
 * 
 * @param baseUrl
 * 
 * @throws UnsupportedEncodingException
 * 
 * @throws IOException
 *             error processing search
 * @throws TransformerException
 * @throws TransformerFactoryConfigurationError
 * @throws ParserConfigurationException
 * @throws SAXException
 * @throws DOMException
 * @throws NoSuchAlgorithmException
 * @throws AtomEntryLoadException
 */

private void execute() throws DOMException, NoSuchAlgorithmException, UnsupportedEncodingException, IOException,
        SAXException, ParserConfigurationException, TransformerFactoryConfigurationError, TransformerException {
    ProgressTracker progress = null;
    try {
        progress = new JDBCProgressTrackerImpl(rh, limitListPages, updateLists, objectTypes,
                excludedRelationshipObjectTypes);
    } catch (SQLException e) {
        LOGGER.info(e.getMessage(), e);
        progress = new FileProgressTrackerImpl("loadstate", rh, limitListPages, updateLists, objectTypes,
                excludedRelationshipObjectTypes);
    } catch (IOException e) {
        LOGGER.info(e.getMessage(), e);
        progress = new FileProgressTrackerImpl("loadstate", rh, limitListPages, updateLists, objectTypes,
                excludedRelationshipObjectTypes);
    }

    // re-scan relationships to extract API objects
    // reScanRelationships(progress);
    // baseUrl + "/objects?categories=users&groups=3"
    progress.toload(seedQueryUrl, new APIObjects(rh, "users", progress, limitListPages, objectTypes));
    // progress.toload(baseUrl+"publication", new APIObjects(rh,
    // "publications", progress));
    int i = 0;
    int threadPoolSize = 20;
    ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);
    final ConcurrentHashMap<String, FutureTask<String>> worklist = new ConcurrentHashMap<String, FutureTask<String>>();
    while (i < maxUrlFetch) {
        Entry<String, AtomEntryLoader> next = progress.next();
        if (next == null) {
            int startingWorklistSize = worklist.size();
            while (worklist.size() > 0 && worklist.size() >= startingWorklistSize) {
                consumeTasks(worklist, progress);
                if (worklist.size() >= startingWorklistSize) {
                    try {
                        Thread.sleep(500);
                    } catch (InterruptedException e) {
                    }
                }
            }
            if (!progress.hasPending() && worklist.size() == 0) {
                break; // there are none left to come, the workers are empty, and so is pending
            }
        } else {
            final AtomEntryLoader loader = next.getValue();
            final String key = next.getKey();
            FutureTask<String> task = new FutureTask<String>(new Callable<String>() {

                @Override
                public String call() throws Exception {

                    try {
                        loader.loadEntry(key);
                    } catch (Exception e) {
                        LOGGER.error(e.getMessage(), e);
                    }
                    return "Done Loading " + key;
                }
            });
            worklist.put(key, task);
            executorService.execute(task);
            i++;
            // dont overfill the queue
            while (worklist.size() > threadPoolSize * 2) {
                consumeTasks(worklist, progress);
                if (worklist.size() > threadPoolSize) {
                    try {
                        Thread.sleep(500);
                    } catch (InterruptedException e) {
                    }
                }
            }
        }
    }
    while (worklist.size() > 0) {
        consumeTasks(worklist, progress);
        Thread.yield();
    }
    executorService.shutdown();
    LOGGER.info("End ToDo list contains {} urls ", progress.pending());
    progress.dumpLoaded();
    progress.checkpoint();

}

From source file:org.apache.hadoop.mapreduce.v2.app.TestRuntimeEstimators.java

private void coreTestEstimator(TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
    estimator = testedEstimator;/*from  w w w  .j av  a2  s. c om*/
    clock = new ControlledClock();
    dispatcher = new AsyncDispatcher();
    myJob = null;
    slotsInUse.set(0);
    completedMaps.set(0);
    completedReduces.set(0);
    successfulSpeculations.set(0);
    taskTimeSavedBySpeculation.set(0);

    clock.tickMsec(1000);

    Configuration conf = new Configuration();

    myAppContext = new MyAppContext(MAP_TASKS, REDUCE_TASKS);
    myJob = myAppContext.getAllJobs().values().iterator().next();

    estimator.contextualize(conf, myAppContext);

    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, 500L);
    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, 5000L);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, 0.1);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, 0.001);
    conf.setInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, 5);
    speculator = new DefaultSpeculator(conf, myAppContext, estimator, clock);
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_NO_SPECULATE value", 500L,
            speculator.getSoonestRetryAfterNoSpeculate());
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_SPECULATE value", 5000L,
            speculator.getSoonestRetryAfterSpeculate());
    Assert.assertEquals(speculator.getProportionRunningTasksSpeculatable(), 0.1, 0.00001);
    Assert.assertEquals(speculator.getProportionTotalTasksSpeculatable(), 0.001, 0.00001);
    Assert.assertEquals("wrong SPECULATIVE_MINIMUM_ALLOWED_TASKS value", 5,
            speculator.getMinimumAllowedSpeculativeTasks());

    dispatcher.register(Speculator.EventType.class, speculator);

    dispatcher.register(TaskEventType.class, new SpeculationRequestEventHandler());

    dispatcher.init(conf);
    dispatcher.start();

    speculator.init(conf);
    speculator.start();

    // Now that the plumbing is hooked up, we do the following:
    //  do until all tasks are finished, ...
    //  1: If we have spare capacity, assign as many map tasks as we can, then
    //     assign as many reduce tasks as we can.  Note that an odd reduce
    //     task might be started while there are still map tasks, because
    //     map tasks take 3 slots and reduce tasks 2 slots.
    //  2: Send a speculation event for every task attempt that's running
    //  note that new attempts might get started by the speculator

    // discover undone tasks
    int undoneMaps = MAP_TASKS;
    int undoneReduces = REDUCE_TASKS;

    // build a task sequence where all the maps precede any of the reduces
    List<Task> allTasksSequence = new LinkedList<Task>();

    allTasksSequence.addAll(myJob.getTasks(TaskType.MAP).values());
    allTasksSequence.addAll(myJob.getTasks(TaskType.REDUCE).values());

    while (undoneMaps + undoneReduces > 0) {
        undoneMaps = 0;
        undoneReduces = 0;
        // start all attempts which are new but for which there is enough slots
        for (Task task : allTasksSequence) {
            if (!task.isFinished()) {
                if (task.getType() == TaskType.MAP) {
                    ++undoneMaps;
                } else {
                    ++undoneReduces;
                }
            }
            for (TaskAttempt attempt : task.getAttempts().values()) {
                if (attempt.getState() == TaskAttemptState.NEW
                        && INITIAL_NUMBER_FREE_SLOTS - slotsInUse.get() >= taskTypeSlots(task.getType())) {
                    MyTaskAttemptImpl attemptImpl = (MyTaskAttemptImpl) attempt;
                    SpeculatorEvent event = new SpeculatorEvent(attempt.getID(), false, clock.getTime());
                    speculator.handle(event);
                    attemptImpl.startUp();
                } else {
                    // If a task attempt is in progress we should send the news to
                    // the Speculator.
                    TaskAttemptStatus status = new TaskAttemptStatus();
                    status.id = attempt.getID();
                    status.progress = attempt.getProgress();
                    status.stateString = attempt.getState().name();
                    status.taskState = attempt.getState();
                    SpeculatorEvent event = new SpeculatorEvent(status, clock.getTime());
                    speculator.handle(event);
                }
            }
        }

        long startTime = System.currentTimeMillis();

        // drain the speculator event queue
        while (!speculator.eventQueueEmpty()) {
            Thread.yield();
            if (System.currentTimeMillis() > startTime + 130000) {
                return;
            }
        }

        clock.tickMsec(1000L);

        if (clock.getTime() % 10000L == 0L) {
            speculator.scanForSpeculations();
        }
    }

    Assert.assertEquals("We got the wrong number of successful speculations.", expectedSpeculations,
            successfulSpeculations.get());
}

From source file:org.apache.solr.cloud.TestStressInPlaceUpdates.java

@Test
@ShardsFixed(num = 3)//w w  w . ja va 2s . c  o m
public void stressTest() throws Exception {
    waitForRecoveriesToFinish(true);

    this.leaderClient = getClientForLeader();
    assertNotNull("Couldn't obtain client for the leader of the shard", this.leaderClient);

    final int commitPercent = 5 + random().nextInt(20);
    final int softCommitPercent = 30 + random().nextInt(75); // what percent of the commits are soft
    final int deletePercent = 4 + random().nextInt(25);
    final int deleteByQueryPercent = random().nextInt(8);
    final int ndocs = atLeast(5);
    int nWriteThreads = 5 + random().nextInt(25);
    int fullUpdatePercent = 5 + random().nextInt(50);

    // query variables
    final int percentRealtimeQuery = 75;
    // number of cumulative read/write operations by all threads
    final AtomicLong operations = new AtomicLong(25000);
    int nReadThreads = 5 + random().nextInt(25);

    /** // testing
     final int commitPercent = 5;
     final int softCommitPercent = 100; // what percent of the commits are soft
     final int deletePercent = 0;
     final int deleteByQueryPercent = 50;
     final int ndocs = 10;
     int nWriteThreads = 10;
            
     final int maxConcurrentCommits = nWriteThreads;   // number of committers at a time... it should be <= maxWarmingSearchers
            
     // query variables
     final int percentRealtimeQuery = 101;
     final AtomicLong operations = new AtomicLong(50000);  // number of query operations to perform in total
     int nReadThreads = 10;
            
     int fullUpdatePercent = 20;
     **/

    log.info("{}",
            Arrays.asList("commitPercent", commitPercent, "softCommitPercent", softCommitPercent,
                    "deletePercent", deletePercent, "deleteByQueryPercent", deleteByQueryPercent, "ndocs",
                    ndocs, "nWriteThreads", nWriteThreads, "percentRealtimeQuery", percentRealtimeQuery,
                    "operations", operations, "nReadThreads", nReadThreads));

    initModel(ndocs);

    List<Thread> threads = new ArrayList<>();

    for (int i = 0; i < nWriteThreads; i++) {
        Thread thread = new Thread("WRITER" + i) {
            Random rand = new Random(random().nextInt());

            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() > 0) {
                        int oper = rand.nextInt(100);

                        if (oper < commitPercent) {
                            Map<Integer, DocInfo> newCommittedModel;
                            long version;

                            synchronized (TestStressInPlaceUpdates.this) {
                                // take a snapshot of the model
                                // this is safe to do w/o synchronizing on the model because it's a ConcurrentHashMap
                                newCommittedModel = new HashMap<>(model);
                                version = snapshotCount++;

                                int chosenClientIndex = rand.nextInt(clients.size());

                                if (rand.nextInt(100) < softCommitPercent) {
                                    log.info("softCommit start");
                                    clients.get(chosenClientIndex).commit(true, true, true);
                                    log.info("softCommit end");
                                } else {
                                    log.info("hardCommit start");
                                    clients.get(chosenClientIndex).commit();
                                    log.info("hardCommit end");
                                }

                                // install this model snapshot only if it's newer than the current one
                                if (version >= committedModelClock) {
                                    if (VERBOSE) {
                                        log.info("installing new committedModel version={}",
                                                committedModelClock);
                                    }
                                    clientIndexUsedForCommit = chosenClientIndex;
                                    committedModel = newCommittedModel;
                                    committedModelClock = version;
                                }
                            }
                            continue;
                        }

                        int id;

                        if (rand.nextBoolean()) {
                            id = rand.nextInt(ndocs);
                        } else {
                            id = lastId; // reuse the last ID half of the time to force more race conditions
                        }

                        // set the lastId before we actually change it sometimes to try and
                        // uncover more race conditions between writing and reading
                        boolean before = rand.nextBoolean();
                        if (before) {
                            lastId = id;
                        }

                        DocInfo info = model.get(id);

                        // yield after getting the next version to increase the odds of updates happening out of order
                        if (rand.nextBoolean())
                            Thread.yield();

                        if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
                            final boolean dbq = (oper >= commitPercent + deletePercent);
                            final String delType = dbq ? "DBI" : "DBQ";
                            log.info("{} id {}: {}", delType, id, info);

                            Long returnedVersion = null;

                            try {
                                returnedVersion = deleteDocAndGetVersion(Integer.toString(id),
                                        params("_version_", Long.toString(info.version)), dbq);
                                log.info(delType + ": Deleting id=" + id + ", version=" + info.version
                                        + ".  Returned version=" + returnedVersion);
                            } catch (RuntimeException e) {
                                if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                        || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                    // Its okay for a leader to reject a concurrent request
                                    log.warn("Conflict during {}, rejected id={}, {}", delType, id, e);
                                    returnedVersion = null;
                                } else {
                                    throw e;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), 0, 0));
                                }
                            }

                        } else {
                            int val1 = info.intFieldValue;
                            long val2 = info.longFieldValue;
                            int nextVal1 = val1;
                            long nextVal2 = val2;

                            int addOper = rand.nextInt(100);
                            Long returnedVersion;
                            if (addOper < fullUpdatePercent || info.version <= 0) { // if document was never indexed or was deleted
                                // FULL UPDATE
                                nextVal1 = Primes.nextPrime(val1 + 1);
                                nextVal2 = nextVal1 * 1000000000l;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "title_s", "title" + id,
                                            "val1_i_dvo", nextVal1, "val2_l_dvo", nextVal2, "_version_",
                                            info.version);
                                    log.info("FULL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);

                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during full update, rejected id={}, {}", id, e);
                                        returnedVersion = null;
                                    } else {
                                        throw e;
                                    }
                                }
                            } else {
                                // PARTIAL
                                nextVal2 = val2 + val1;
                                try {
                                    returnedVersion = addDocAndGetVersion("id", id, "val2_l_dvo",
                                            map("inc", String.valueOf(val1)), "_version_", info.version);
                                    log.info("PARTIAL: Writing id=" + id + ", val=[" + nextVal1 + "," + nextVal2
                                            + "], version=" + info.version + ", Prev was=[" + val1 + "," + val2
                                            + "].  Returned version=" + returnedVersion);
                                } catch (RuntimeException e) {
                                    if (e.getMessage() != null && e.getMessage().contains("version conflict")
                                            || e.getMessage() != null && e.getMessage().contains("Conflict")) {
                                        // Its okay for a leader to reject a concurrent request
                                        log.warn("Conflict during partial update, rejected id={}, {}", id, e);
                                    } else if (e.getMessage() != null
                                            && e.getMessage().contains("Document not found for update.")
                                            && e.getMessage().contains("id=" + id)) {
                                        log.warn(
                                                "Attempted a partial update for a recently deleted document, rejected id={}, {}",
                                                id, e);
                                    } else {
                                        throw e;
                                    }
                                    returnedVersion = null;
                                }
                            }

                            // only update model if update had no conflict & the version is newer
                            synchronized (model) {
                                DocInfo currInfo = model.get(id);
                                if (null != returnedVersion && (Math.abs(returnedVersion.longValue()) > Math
                                        .abs(currInfo.version))) {
                                    model.put(id, new DocInfo(returnedVersion.longValue(), nextVal1, nextVal2));
                                }

                            }
                        }

                        if (!before) {
                            lastId = id;
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);

    }

    // Read threads
    for (int i = 0; i < nReadThreads; i++) {
        Thread thread = new Thread("READER" + i) {
            Random rand = new Random(random().nextInt());

            @SuppressWarnings("unchecked")
            @Override
            public void run() {
                try {
                    while (operations.decrementAndGet() >= 0) {
                        // bias toward a recently changed doc
                        int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);

                        // when indexing, we update the index, then the model
                        // so when querying, we should first check the model, and then the index

                        boolean realTime = rand.nextInt(100) < percentRealtimeQuery;
                        DocInfo expected;

                        if (realTime) {
                            expected = model.get(id);
                        } else {
                            synchronized (TestStressInPlaceUpdates.this) {
                                expected = committedModel.get(id);
                            }
                        }

                        if (VERBOSE) {
                            log.info("querying id {}", id);
                        }
                        ModifiableSolrParams params = new ModifiableSolrParams();
                        if (realTime) {
                            params.set("wt", "json");
                            params.set("qt", "/get");
                            params.set("ids", Integer.toString(id));
                        } else {
                            params.set("wt", "json");
                            params.set("q", "id:" + Integer.toString(id));
                            params.set("omitHeader", "true");
                        }

                        int clientId = rand.nextInt(clients.size());
                        if (!realTime)
                            clientId = clientIndexUsedForCommit;

                        QueryResponse response = clients.get(clientId).query(params);
                        if (response.getResults().size() == 0) {
                            // there's no info we can get back with a delete, so not much we can check without further synchronization
                        } else if (response.getResults().size() == 1) {
                            final SolrDocument actual = response.getResults().get(0);
                            final String msg = "Realtime=" + realTime + ", expected=" + expected + ", actual="
                                    + actual;
                            assertNotNull(msg, actual);

                            final Long foundVersion = (Long) actual.getFieldValue("_version_");
                            assertNotNull(msg, foundVersion);
                            assertTrue(msg + "... solr doc has non-positive version???",
                                    0 < foundVersion.longValue());
                            final Integer intVal = (Integer) actual.getFieldValue("val1_i_dvo");
                            assertNotNull(msg, intVal);

                            final Long longVal = (Long) actual.getFieldValue("val2_l_dvo");
                            assertNotNull(msg, longVal);

                            assertTrue(msg + " ...solr returned older version then model. "
                                    + "should not be possible given the order of operations in writer threads",
                                    Math.abs(expected.version) <= foundVersion.longValue());

                            if (foundVersion.longValue() == expected.version) {
                                assertEquals(msg, expected.intFieldValue, intVal.intValue());
                                assertEquals(msg, expected.longFieldValue, longVal.longValue());
                            }

                            // Some things we can assert about any Doc returned from solr,
                            // even if it's newer then our (expected) model information...

                            assertTrue(msg + " ...how did a doc in solr get a non positive intVal?",
                                    0 < intVal);
                            assertTrue(msg + " ...how did a doc in solr get a non positive longVal?",
                                    0 < longVal);
                            assertEquals(msg
                                    + " ...intVal and longVal in solr doc are internally (modulo) inconsistent w/eachother",
                                    0, (longVal % intVal));

                            // NOTE: when foundVersion is greater then the version read from the model,
                            // it's not possible to make any assertions about the field values in solr relative to the
                            // field values in the model -- ie: we can *NOT* assert expected.longFieldVal <= doc.longVal
                            //
                            // it's tempting to think that this would be possible if we changed our model to preserve the
                            // "old" valuess when doing a delete, but that's still no garuntee because of how oportunistic
                            // concurrency works with negative versions:  When adding a doc, we can assert that it must not
                            // exist with version<0, but we can't assert that the *reason* it doesn't exist was because of
                            // a delete with the specific version of "-42".
                            // So a wrtier thread might (1) prep to add a doc for the first time with "intValue=1,_version_=-1",
                            // and that add may succeed and (2) return some version X which is put in the model.  but
                            // inbetween #1 and #2 other threads may have added & deleted the doc repeatedly, updating
                            // the model with intValue=7,_version_=-42, and a reader thread might meanwhile read from the
                            // model before #2 and expect intValue=5, but get intValue=1 from solr (with a greater version)

                        } else {
                            fail(String.format(Locale.ENGLISH, "There were more than one result: {}",
                                    response));
                        }
                    }
                } catch (Throwable e) {
                    operations.set(-1L);
                    log.error("", e);
                    throw new RuntimeException(e);
                }
            }
        };

        threads.add(thread);
    }
    // Start all threads
    for (Thread thread : threads) {
        thread.start();
    }

    for (Thread thread : threads) {
        thread.join();
    }

    { // final pass over uncommitted model with RTG

        for (SolrClient client : clients) {
            for (Map.Entry<Integer, DocInfo> entry : model.entrySet()) {
                final Integer id = entry.getKey();
                final DocInfo expected = entry.getValue();
                final SolrDocument actual = client.getById(id.toString());

                String msg = "RTG: " + id + "=" + expected;
                if (null == actual) {
                    // a deleted or non-existent document
                    // sanity check of the model agrees...
                    assertTrue(msg + " is deleted/non-existent in Solr, but model has non-neg version",
                            expected.version < 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.intFieldValue, 0);
                    assertEquals(msg + " is deleted/non-existent in Solr", expected.longFieldValue, 0);
                } else {
                    msg = msg + " <==VS==> " + actual;
                    assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                    assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                    assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                    assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);
                }
            }
        }
    }

    { // do a final search and compare every result with the model

        // because commits don't provide any sort of concrete versioning (or optimistic concurrency constraints)
        // there's no way to garuntee that our committedModel matches what was in Solr at the time of the last commit.
        // It's possible other threads made additional writes to solr before the commit was processed, but after
        // the committedModel variable was assigned it's new value.
        //
        // what we can do however, is commit all completed updates, and *then* compare solr search results
        // against the (new) committed model....

        waitForThingsToLevelOut(30); // NOTE: this does an automatic commit for us & ensures replicas are up to date
        committedModel = new HashMap<>(model);

        // first, prune the model of any docs that have negative versions
        // ie: were never actually added, or were ultimately deleted.
        for (int i = 0; i < ndocs; i++) {
            DocInfo info = committedModel.get(i);
            if (info.version < 0) {
                // first, a quick sanity check of the model itself...
                assertEquals("Inconsistent int value in model for deleted doc" + i + "=" + info, 0,
                        info.intFieldValue);
                assertEquals("Inconsistent long value in model for deleted doc" + i + "=" + info, 0L,
                        info.longFieldValue);

                committedModel.remove(i);
            }
        }

        for (SolrClient client : clients) {
            QueryResponse rsp = client.query(params("q", "*:*", "sort", "id asc", "rows", ndocs + ""));
            for (SolrDocument actual : rsp.getResults()) {
                final Integer id = Integer.parseInt(actual.getFieldValue("id").toString());
                final DocInfo expected = committedModel.get(id);

                assertNotNull("Doc found but missing/deleted from model: " + actual, expected);

                final String msg = "Search: " + id + "=" + expected + " <==VS==> " + actual;
                assertEquals(msg, expected.intFieldValue, actual.getFieldValue("val1_i_dvo"));
                assertEquals(msg, expected.longFieldValue, actual.getFieldValue("val2_l_dvo"));
                assertEquals(msg, expected.version, actual.getFieldValue("_version_"));
                assertTrue(msg + " doc exists in solr, but version is negative???", 0 < expected.version);

                // also sanity check the model (which we already know matches the doc)
                assertEquals("Inconsistent (modulo) values in model for id " + id + "=" + expected, 0,
                        (expected.longFieldValue % expected.intFieldValue));
            }
            assertEquals(committedModel.size(), rsp.getResults().getNumFound());
        }
    }
}

From source file:org.sakaiproject.kernel.test.KernelIntegrationBase.java

/**
 * @throws IOException//from w  w  w. j  ava  2 s .co m
 * @throws AccessDeniedException
 * @throws RepositoryException
 * @throws JCRNodeFactoryServiceException
 * @throws InterruptedException
 * @throws NoSuchAlgorithmException
 */
public static void loadTestUsers() throws IOException, AccessDeniedException, RepositoryException,
        JCRNodeFactoryServiceException, InterruptedException, NoSuchAlgorithmException {
    KernelManager km = new KernelManager();

    JCRNodeFactoryService jcrNodeFactoryService = km.getService(JCRNodeFactoryService.class);
    JCRService jcrService = km.getService(JCRService.class);
    jcrService.loginSystem();
    for (String user : USERS) {
        InputStream in = ResourceLoader.openResource(USERBASE + user + ".json",
                SakaiAuthenticationFilter.class.getClassLoader());
        Node n = jcrNodeFactoryService.setInputStream(getUserEnvPath(user), in, RestProvider.CONTENT_TYPE);

        n.setProperty(JcrAuthenticationResolverProvider.JCRPASSWORDHASH, StringUtils.sha1Hash("password"));
        in.close();
    }
    jcrService.getSession().save();
    jcrService.logout();
    Thread.yield();
    Thread.sleep(1000);

}

From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java

public void testSeekPrevious() throws Exception {
    final int[] ints = createTree();
    final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir,
            new IntSerializer(), new LongSerializer(), false);
    final int max = ints[ints.length - 1];
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//  w  ww .  j  a  v a 2  s.co  m
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < treeSize; i++) {
                        int rand = r.nextInt(max + 10);
                        int insertionindex = Arrays.binarySearch(ints, rand);
                        final Iterator<Generation.Entry<Integer, Long>> iterator = reader.reverseIterator(rand,
                                true);
                        final boolean hasPrevious = iterator.hasNext();
                        Generation.Entry<Integer, Long> entry = null;
                        assertEquals(
                                "rand: " + rand + " hasPrevious: " + hasPrevious
                                        + (hasPrevious ? " previous: " + (entry = iterator.next()) : ""),
                                hasPrevious, insertionindex != -1);
                        if (hasPrevious) {
                            if (entry == null)
                                entry = iterator.next();
                            assertTrue(entry.getKey() <= rand);
                            assertTrue(entry.getKey().longValue() == entry.getValue());
                        }
                        if (insertionindex >= 0) {
                            if (entry == null)
                                entry = iterator.next();
                            assertTrue(rand == ints[insertionindex]);
                            assertTrue(entry.getKey() == rand);
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result.getValue() == rand);
                        } else {
                            if (hasPrevious) {
                                assertTrue(ints[(~insertionindex) - 1] < rand);
                                assertTrue(ints[(~insertionindex) - 1] == entry.getKey());
                            }
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result == null);
                        }
                    }
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    reader.close();
}

From source file:org.projectbuendia.client.ui.FunctionalTestCase.java

protected void checkViewDisplayedWithin(Matcher<View> matcher, int timeoutMs) {
    long timeoutTime = System.currentTimeMillis() + timeoutMs;
    boolean viewFound = false;
    Throwable viewAssertionError = null;
    while (timeoutTime > System.currentTimeMillis() && !viewFound) {
        try {/* www  . j  a v a  2 s  .  co  m*/
            onView(matcher).check(matches(isDisplayed()));
            viewFound = true;
        } catch (Throwable t) {
            viewAssertionError = t;
            try {
                Thread.sleep(100);
            } catch (InterruptedException e1) {
                LOG.w("Sleep interrupted, yielding instead.");
                Thread.yield();
            }
        }
    }

    if (!viewFound) {
        throw new RuntimeException(viewAssertionError);
    }
}