Example usage for java.lang Thread yield

List of usage examples for java.lang Thread yield

Introduction

In this page you can find the example usage for java.lang Thread yield.

Prototype

public static native void yield();

Source Link

Document

A hint to the scheduler that the current thread is willing to yield its current use of a processor.

Usage

From source file:com.xerox.amazonws.sdb.Domain.java

/**
 * Gets attributes of given items. This method threads off the get requests and
 * aggregates the responses./*from   ww  w.  j ava2  s  .c  o m*/
 *
 * @param items the list of items to get attributes for
 * @param listener class that will be notified when items are ready
 * @throws SDBException wraps checked exceptions
 */
public void getItemsAttributes(List<String> items, ItemListener listener) throws SDBException {
    ThreadPoolExecutor pool = getThreadPoolExecutor();
    pool.setRejectedExecutionHandler(new RejectionHandler());

    Counter running = new Counter(0);
    for (String item : items) {
        while (pool.getActiveCount() == pool.getMaximumPoolSize()) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ex) {
            }
        }
        synchronized (running) {
            running.increment();
        }
        pool.execute(new AttrWorker(getItem(item), running, null, listener));
        Thread.yield();
    }
    while (true) {
        if (running.getValue() == 0) {
            break;
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException ex) {
        }
    }
    if (this.executor == null) {
        pool.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestSplitLogWorker.java

@Test(timeout = 60000)
public void testPreemptTask() throws Exception {
    LOG.info("testPreemptTask");
    SplitLogCounters.resetCounters();//from  w  w w  .j  a  va 2 s.co m
    final ServerName SRV = ServerName.valueOf("tpt_svr,1,1");
    final String PATH = ZKSplitLog.getEncodedNodeName(zkw, "tpt_task");
    RegionServerServices mockedRS = getRegionServer(SRV);
    SplitLogWorker slw = new SplitLogWorker(zkw, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
    slw.start();
    try {
        Thread.yield(); // let the worker start
        Thread.sleep(1000);
        waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME);

        // this time create a task node after starting the splitLogWorker
        zkw.getRecoverableZooKeeper().create(PATH, new SplitLogTask.Unassigned(MANAGER).toByteArray(),
                Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

        waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
        assertEquals(1, slw.taskReadySeq);
        byte[] bytes = ZKUtil.getData(zkw, PATH);
        SplitLogTask slt = SplitLogTask.parseFrom(bytes);
        assertTrue(slt.isOwned(SRV));
        slt = new SplitLogTask.Owned(MANAGER);
        ZKUtil.setData(zkw, PATH, slt.toByteArray());
        waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);
    } finally {
        stopSplitLogWorker(slw);
    }
}

From source file:de.uni_rostock.goodod.evaluator.OntologyTest.java

public void executeTest() throws Throwable {

    ExecutorService executor = Executors.newFixedThreadPool(threadCount);
    Set<URI> fromOntologies = new HashSet<URI>(25);
    Set<URI> toOntologies = new HashSet<URI>(25);
    Set<? extends OWLOntologyIRIMapper> bioTopLiteMapper = null;
    if (null != bioTopLiteURI) {
        bioTopLiteMapper = Collections.singleton(new SimpleIRIMapper(
                IRI.create("http://purl.org/biotop/biotoplite.owl"), IRI.create(bioTopLiteURI)));
    }/* www  .  ja v  a2 s .  c o m*/
    OntologyCache cache = OntologyCache.setupSharedCache(bioTopLiteMapper, getIgnoredImports(), threadCount);
    NormalizerChainFactory chain = new NormalizerChainFactory();/* new NormalizerChainFactory(importer, intersector, namer, decomposer, subsumer);*/
    cache.setNormalizerFactory(chain);

    fromOntologies.addAll(groupAOntologies);

    if (globalConfig.getBoolean("one-way", false)) {
        /*
         * If one way comparisons are requested, we only compare group A to
         * group B (and model).
         */
        toOntologies.addAll(groupBOntologies);
    } else {

        /*
         * By default, we do cross-comparisons between the groups, so we
         * create a global set for both. For simplicity, this just 
         * means adding the second set to fromOntologies and aliasing it
         * as toOntologies.
         */

        fromOntologies.addAll(groupBOntologies);
        toOntologies = fromOntologies;
    }

    if (null != modelOntology) {
        toOntologies.add(modelOntology);
    }
    logger.info("Running comparisons for test '" + getTestName() + "'.");

    for (URI u1 : fromOntologies) {
        for (URI u2 : toOntologies) {
            if (u1.equals(u2)) {
                continue;
            }
            /*
             *  Working with the ontologies is resource intensive. We want
             *  to handle more than one at a time, especially on multicore
             *  machines, but neigher starving ourselves from I/O nor
             *  generating massive cache or memory churn is very smart.
             */
            int waitCount = 0;
            while (inProgressCount.get() > threadCount) {
                if (0 == ++waitCount % 8) {

                    /* 
                     * Thight loop a few times, then yield in order to let
                     * the other threads finish.
                     */
                    Thread.yield();
                }
            }
            comparisonStarted();
            try {
                OntologyPair p = new OntologyPair(cache, u1, u2);
                executor.execute(new ComparisonRunner(u1, u2, p));
            } catch (Throwable e) {
                logger.warn("Could not compare " + u1.toString() + " and " + u2.toString() + ".", e);
                Set<URI> values = failedComparisons.get(u1);
                if (null != values) {
                    values.add(u2);
                } else {
                    values = new HashSet<URI>();
                    values.add(u2);
                    failedComparisons.put(u2, values);
                }
            }
        }

    }
    executor.shutdown();
    while (false == executor.isTerminated()) {
        // wait until we're done.
    }
    logger.info("Comparisons on '" + getTestName() + "' completed.");
    if (logger.isDebugEnabled()) {
        writeNormalizedOntologiesTo(fromOntologies, cache, new File(System.getProperty("java.io.tmpdir")));
    }
    cache.teardown();
    cache = null;
}

From source file:org.gradle.api.changedetection.state.DefaultDirectoryStateChangeDetecter.java

public void detectChanges(ChangeProcessor changeProcessor) {
    Clock c = new Clock();
    try {/*from ww  w . j  av a  2 s.co  m*/
        int lowestLevel = 0;
        try {
            lowestLevel = directoryListFileCreator.createDirectoryListFiles(directoryToProcess);
        } catch (IOException e) {
            throw new GradleException("failed to create directory list files", e);
        }

        // Calculate the digests of files and directories
        Map<String, DirectoryState> previousLevelDirectoryStates = Collections
                .unmodifiableMap(new HashMap<String, DirectoryState>());
        Map<String, DirectoryState> currentLevelDirectoryStates = new ConcurrentHashMap<String, DirectoryState>();
        for (int levelIndex = lowestLevel; levelIndex >= 0; levelIndex--) {
            final File directoryLevelListFile = stateFileUtil.getDirsListFile(levelIndex);
            threadPool = ThreadUtils.newFixedThreadPool(4);

            if (directoryLevelListFile.exists()) {
                final File stateFile = stateFileUtil
                        .getNewDirsStateFile(stateFileUtil.getDirsStateFilename(levelIndex));
                final StateFileWriter newDirectoriesStateFileWriter = new StateFileWriter(ioFactory, stateFile);

                BufferedReader directoryListFileReader = null;

                try {
                    directoryListFileReader = new BufferedReader(new FileReader(directoryLevelListFile));

                    String absoluteDirectoryPath = null;
                    while ((absoluteDirectoryPath = directoryListFileReader.readLine()) != null) {
                        final DirectoryState directoryState = directoryStateBuilder
                                .directory(new File(absoluteDirectoryPath)).getDirectoryState();

                        final DirectoryStateDigestCalculator digestCalculator = new DirectoryStateDigestCalculator(
                                directoryState, digesterCache, digesterUtil, this, currentLevelDirectoryStates,
                                previousLevelDirectoryStates, ioFactory);

                        threadPool.submit(digestCalculator);
                    }

                    // each directory level has to be processed before continueing with the next(higher) level
                    ThreadUtils.shutdown(threadPool);

                    final List<DirectoryState> currentLevelDirectoryStatesList = new ArrayList<DirectoryState>(
                            currentLevelDirectoryStates.values());

                    Collections.sort(currentLevelDirectoryStatesList);
                    // if one of the directory state processors failed -> change detection fails
                    // This is a simple not so efficient way, we could fail earlier but that will make everything more complicated

                    for (final DirectoryState directoryState : currentLevelDirectoryStatesList) {
                        final Throwable failureCause = directoryState.getFailureCause();
                        if (failureCause != null)
                            throw new GradleException("Failed to detect changes", failureCause);
                        else {
                            newDirectoriesStateFileWriter.addDigest(directoryState.getRelativePath(),
                                    directoryState.getDigest());
                        }
                    }

                } catch (IOException e) {
                    throw new GradleException("failed to detect changes (dirs."
                            + newDirectoriesStateFileWriter.getStateFile().getAbsolutePath()
                            + ".state write failed)", e);
                } finally {
                    IOUtils.closeQuietly(directoryListFileReader);
                    FileUtils.deleteQuietly(directoryLevelListFile);
                    newDirectoriesStateFileWriter.close();
                }
            }

            previousLevelDirectoryStates = Collections
                    .unmodifiableMap(new HashMap<String, DirectoryState>(currentLevelDirectoryStates));
            currentLevelDirectoryStates = new ConcurrentHashMap<String, DirectoryState>();
        }

        // Compare new and old directory state + notify DirectoryStateChangeDetecterListener
        try {
            boolean keepComparing = true;
            int currentLevel = 0;

            final StateChangeEventDispatcher stateChangeEventDispatcher = new StateChangeEventDispatcher(
                    stateChangeEventQueue, 100L, TimeUnit.MILLISECONDS, changeProcessor);
            final Thread changeProcessorEventThread = new Thread(stateChangeEventDispatcher);
            changeProcessorEventThread.start();

            threadPool = ThreadUtils.newFixedThreadPool(4);

            while (keepComparing && currentLevel <= lowestLevel) {
                keepComparing = stateComparator.compareState(this, currentLevel);

                currentLevel++;
            }

            ThreadUtils.shutdown(threadPool);

            while (!stateChangeEventQueue.isEmpty()) {
                Thread.yield();
            }
            stateChangeEventDispatcher.stopConsuming();

            ThreadUtils.join(changeProcessorEventThread);

            for (DirectoryStateDigestComparator directoryStateDigestComparator : directoryStateDigestComparators) {
                final Throwable failureCause = directoryStateDigestComparator.getFailureCause();
                if (failureCause != null)
                    throw new GradleException("failed to compare directory state", failureCause);
            }
        } catch (IOException e) {
            throw new GradleException("failed to compare new and old state", e);
        }

        // Remove old directory state
        try {
            FileUtils.deleteDirectory(stateFileUtil.getOldDirectoryStateDir());
        } catch (IOException e) {
            throw new GradleException("failed to clean old state", e);
        }
        // Move new to old directory state
        try {
            FileUtils.moveDirectory(stateFileUtil.getNewDirectoryStateDir(),
                    stateFileUtil.getOldDirectoryStateDir());
        } catch (IOException e) {
            throw new GradleException("failed to transfer current state to old state", e);
        }
    } finally {
        System.out.println(c.getTime());
    }
}

From source file:org.jahia.tools.jvm.ThreadMonitorTestIT.java

private void runParallelTest(String testName, Runnable runnable) throws InterruptedException {

    StopWatch stopWatch = new StopWatch(testName);
    stopWatch.start(Thread.currentThread().getName() + " dumping thread info");

    threadSet.clear();/*w  w  w .  jav  a 2  s .  c  o  m*/
    ThreadMonitor.getInstance().setActivated(true);
    ThreadMonitor.getInstance().setDebugLogging(enabledDebugLogging);
    ThreadMonitor.getInstance().setMinimalIntervalBetweenDumps(minimalIntervalBetweenDumps);

    for (int i = 0; i < THREAD_COUNT; i++) {
        Thread newThread = new Thread(runnable, testName + i);
        threadSet.add(newThread);
        Thread.yield();
        Thread.sleep(50);
        newThread.start();
    }

    logger.info("Waiting for test completion...");

    Thread.yield();
    while (ThreadMonitor.getInstance().isDumping()) {
        Thread.sleep(100);
    }

    for (Thread curThread : threadSet) {
        curThread.join();
    }

    ThreadMonitor.shutdownInstance();

    stopWatch.stop();
    logger.info(stopWatch.prettyPrint());

    Thread.sleep(minimalIntervalBetweenDumps * 2);
}

From source file:com.slytechs.jnetstream.livecapture.AbstractLiveCapture.java

/**
 * Captures count packets using an open capture session. Once the capture
 * session has been closed it can not be opened again.
 * /*from   w ww.  j a va2 s  .com*/
 * @param count
 * @throws IOException
 */
private void start(final int count) throws IOException {
    if (isOpen() == false) {
        throw new IOException("Capture session is closed");
    }

    /**
     * Check if we still have running workers
     */
    if (workers != null) {
        return;
    }

    this.dispatched = 0;

    if (workers == null) {
        workers = new Thread[devices.length];

        /*
         * Initialize with dummy data, so that the array is not full of nulls,
         * which would break the below loop if early workers start exitting
         * quickly and reseting workers back to null as well.
         */
        Arrays.fill(workers, Thread.currentThread());
    }

    for (int i = 0; i < devices.length; i++) {
        final int index = i;

        final String name = "Live-" + devices[i].getDisplayName();

        /*
         * Our capture session worker thread. Closes the LiveCapture when its done
         */
        workers[i] = new Thread(new Runnable() {

            public void run() {

                try {
                    capture(count, index);
                } catch (IOException e) {
                    /*
                     * The IO exception will be reported at next available opportunity
                     * such as in LiveIterator's next or hasNext method calls.
                     */
                    ioError.set(e);

                } finally {
                    workers[index] = null;

                    /*
                     * Now check if there are any workers left, if not then reset the
                     * workers variable, as a flag
                     */
                    int j = 0;
                    for (; j < workers.length; j++) {
                        if (workers[j] != null) {
                            break;
                        }
                    }
                    if (j == workers.length) {
                        workers = null;
                    }

                    Thread.yield();
                }
            }

        }, name);

        workers[i].start();
    }
}

From source file:org.apache.synapse.transport.mail.MailEchoRawXMLTest.java

public void testRoundTripMultiPartKorean() throws Exception {

    String msgId = UUIDGenerator.getUUID();
    Options options = new Options();
    options.setTo(new EndpointReference("mailto:synapse.test.6@gmail.com"));
    options.setReplyTo(new EndpointReference("mailto:synapse.test.0@gmail.com"));
    options.setAction(Constants.AXIS2_NAMESPACE_URI + "/echoOMElement");
    options.setMessageId(msgId);//  ww w . j a va  2 s .  c o  m

    options.setProperty(MailConstants.TRANSPORT_MAIL_FORMAT, MailConstants.TRANSPORT_FORMAT_MP);

    ServiceClient sender = new ServiceClient(getClientCfgCtx(), null);
    sender.setOptions(options);
    sender.fireAndForget(createKoreanPayload());

    Thread.yield();
    Thread.sleep(1000 * 10);

    Object reply = null;
    boolean replyNotFound = true;
    int retryCount = 3;
    while (replyNotFound) {
        log.debug("Checking for response ... with MessageID : " + msgId);
        reply = getMessage(msgId);
        if (reply != null) {
            replyNotFound = false;
        } else {
            if (retryCount-- > 0) {
                Thread.sleep(10000);
            } else {
                break;
            }
        }
    }

    if (reply != null && reply instanceof String) {
        log.debug("Result Body : " + reply);
        XMLStreamReader reader = StAXUtils.createXMLStreamReader(new StringReader((String) reply));
        SOAPEnvelope env = new StAXSOAPModelBuilder(reader).getSOAPEnvelope();
        if (env != null) {
            AXIOMXPath xpath = new AXIOMXPath("//my:myValue");
            xpath.addNamespace("my", "http://localhost/axis2/services/EchoXMLService");
            Object result = xpath.evaluate(env);
            if (result != null && result instanceof OMElement) {
                assertEquals("omTextValue", ((OMElement) result).getText());
            }
        }
    } else {
        fail("Did not receive the reply mail");
    }
}

From source file:org.apache.zeppelin.helium.HeliumApplicationFactoryTest.java

@Test
public void testUnloadOnInterpreterUnbind() throws IOException {
    // given//from   www  .  j av a  2s .c  o  m
    HeliumPackage pkg1 = new HeliumPackage(HeliumType.APPLICATION, "name1", "desc1", "",
            HeliumTestApplication.class.getName(), new String[][] {}, "", "");

    Note note1 = notebook.createNote(anonymous);
    notebook.bindInterpretersToNote("user", note1.getId(),
            interpreterSettingManager.getDefaultInterpreterSettingList());

    Paragraph p1 = note1.addParagraph(AuthenticationInfo.ANONYMOUS);

    // make sure interpreter process running
    p1.setText("%mock1 job");
    p1.setAuthenticationInfo(anonymous);
    note1.run(p1.getId());
    while (p1.isTerminated() == false || p1.getResult() == null)
        Thread.yield();

    assertEquals(0, p1.getAllApplicationStates().size());
    String appId = heliumAppFactory.loadAndRun(pkg1, p1);
    ApplicationState app = p1.getApplicationState(appId);
    while (app.getStatus() != ApplicationState.Status.LOADED) {
        Thread.yield();
    }

    // when unbind interpreter
    notebook.bindInterpretersToNote("user", note1.getId(), new LinkedList<String>());

    // then
    assertEquals(ApplicationState.Status.UNLOADED, app.getStatus());

    // clean
    notebook.removeNote(note1.getId(), anonymous);
}

From source file:edu.upenn.cis.stormlite.DistributedCluster.java

/**
 * Shut down the cluster// w  ww.  j a v a 2  s .com
 * 
 * @param string
 */
public void killTopology(String string) {
    if (quit.getAndSet(true) == false) {
        while (!quit.get())
            Thread.yield();
    }
    System.out.println(context.getMapOutputs() + " local map outputs and " + context.getReduceOutputs()
            + " local reduce outputs.");

    for (String key : context.getSendOutputs().keySet())
        System.out.println("Sent " + context.getSendOutputs().get(key) + " to " + key);
}

From source file:org.apache.zeppelin.notebook.NotebookTest.java

@Test
public void testCloneNote() throws IOException, CloneNotSupportedException, InterruptedException {
    Note note = notebook.createNote();/*  w ww.  java2s .  c  om*/
    note.getNoteReplLoader().setInterpreters(factory.getDefaultInterpreterSettingList());

    final Paragraph p = note.addParagraph();
    p.setText("hello world");
    note.runAll();
    while (p.isTerminated() == false || p.getResult() == null)
        Thread.yield();

    p.setStatus(Status.RUNNING);
    Note cloneNote = notebook.cloneNote(note.getId(), "clone note");
    Paragraph cp = cloneNote.paragraphs.get(0);
    assertEquals(cp.getStatus(), Status.READY);
    assertNotEquals(cp.getId(), p.getId());
    assertEquals(cp.text, p.text);
    assertEquals(cp.getResult().message(), p.getResult().message());
}