List of usage examples for java.lang Thread yield
public static native void yield();
From source file:jp.terasoluna.fw.web.thin.LimitedLock.java
/** * ?bN?B/*from w w w. j a v a 2s .c om*/ * <p> * ?Xbh?bN?AXbh?Xbh??s?A?Xbh@?B<br> * ?Xbh?bN???A?\bhA?B<br> * Xbh?Xbh??s???AInterruptedExceptionX??[?A?Xbh?Xe?[^XNA?B<br> * (?ANXO????A?Xe?[^Xs?B) * </p> * <p> * ?L?AX?[p?[NX?Bg|Cg?B<br> * <ul> * <li>?bNXbh?l??\bh?s?A?bNO?AlXbh?A?bNXbhf?B<br> * ?bNXbh?\bh?s(??bN)?A?bNXbh??AXbhf?s?B</li> * </ul> * </p> * @throws InterruptedException ?Xbh????(NX@\?A?bNf??) * @see java.util.concurrent.locks.ReentrantLock#lockInterruptibly() */ @Override public void lockInterruptibly() throws InterruptedException { boolean successToLock = false; // ?bNXbh????A // ?VXbh?bNv??A // ?bNXbh??B // (?bN?Xbh?AI?bN(??bN)|???A // ?bNXbh??B) if (getOwner() != Thread.currentThread()) { synchronized (lock) { // u?bN???Asuper.unlock();?s?A?bNXbh?bN?B // Xbh?bN???A // ?bNXbh?bN?A // Xbh?A?J?A?bN?oR?bN??A // ?bN?\bhXbh?A??Xbh?B // ?AXbh?bN??A // ?Au?bN?Asuper.lockInterruptibly();?sOXbh????A // Xbh?Au?bN???A?????? // (?bNXbh???AXbh?A??)?A // Xbh?bN??A?bNv?Xbh?l??A // ??@?Ax??B int queueLength = getQueueLength(); if (queueLength > threshold) { HashSet<Thread> oldWaitingThreadSet = null; synchronized (waitingThreadList) { List<Thread> oldWaitingThreadList = waitingThreadList.subList(0, queueLength - threshold); oldWaitingThreadSet = new HashSet<Thread>(oldWaitingThreadList); } // waitingThreadListXbh?A // ??bNXbh?A // ??bNXbhXg?A // oldWaitingThreadListoldWaitingThreadSet?AgetQueuedThreads()?B for (Thread queuedThread : getQueuedThreads()) { if (oldWaitingThreadSet.contains(queuedThread)) { if (log.isDebugEnabled()) { log.debug("interrupt thread '" + queuedThread + "'."); } synchronized (waitingThreadList) { // ?waitingThreadList.remove?A?XbhfinallyO?A?s? // ?XbhremoveO?AXbh?f?s\??A // f??Xbh??A // remove?B waitingThreadList.remove(queuedThread); queuedThread.interrupt(); // ??A // Xbh?bNL?[?o^CO?A // Xbh?A???getQueueLength()?s?A // getQueueLength()?AwaitingThreadList??A // waitingThreadList.subLists(ListsubLists)?A // (synchronized (lock))?AXbh?bNL?[?o?B while (getQueuedThreads().contains(queuedThread)) { Thread.yield(); } } } } } } } try { synchronized (waitingThreadList) { waitingThreadList.add(Thread.currentThread()); } super.lockInterruptibly(); successToLock = true; } finally { // O??A // NX?(?s)???s?? // NX?(/?s)???I // ???sKv?A // locktB?[h?bN?B synchronized (lock) { synchronized (waitingThreadList) { waitingThreadList.remove(Thread.currentThread()); // O?remove?Aremove if (!successToLock) { // O?NX????A // ?Xe?[^Xc?A // ?Xe?[^XNA?B // ?bN??AreturnO????A // ?Xe?[^XNAreturn?B Thread.interrupted(); } } } } }
From source file:org.apache.zeppelin.helium.HeliumApplicationFactoryTest.java
@Test public void testUnloadOnInterpreterRestart() throws IOException { // given/*from ww w . j a va 2 s . c o m*/ HeliumPackage pkg1 = new HeliumPackage(HeliumType.APPLICATION, "name1", "desc1", "", HeliumTestApplication.class.getName(), new String[][] {}, "", ""); Note note1 = notebook.createNote(anonymous); notebook.bindInterpretersToNote("user", note1.getId(), interpreterSettingManager.getDefaultInterpreterSettingList()); String mock1IntpSettingId = null; for (InterpreterSetting setting : notebook.getBindedInterpreterSettings(note1.getId())) { if (setting.getName().equals("mock1")) { mock1IntpSettingId = setting.getId(); break; } } Paragraph p1 = note1.addParagraph(AuthenticationInfo.ANONYMOUS); // make sure interpreter process running p1.setText("%mock1 job"); p1.setAuthenticationInfo(anonymous); note1.run(p1.getId()); while (p1.isTerminated() == false || p1.getResult() == null) Thread.yield(); assertEquals(0, p1.getAllApplicationStates().size()); String appId = heliumAppFactory.loadAndRun(pkg1, p1); ApplicationState app = p1.getApplicationState(appId); while (app.getStatus() != ApplicationState.Status.LOADED) { Thread.yield(); } // wait until application is executed while (!"Hello world 1".equals(app.getOutput())) { Thread.yield(); } // when restart interpreter interpreterSettingManager.restart(mock1IntpSettingId); while (app.getStatus() == ApplicationState.Status.LOADED) { Thread.yield(); } // then assertEquals(ApplicationState.Status.UNLOADED, app.getStatus()); // clean notebook.removeNote(note1.getId(), anonymous); }
From source file:org.apache.streams.sysomos.provider.SysomosProvider.java
protected void enqueueItem(StreamsDatum datum) { boolean success; do {/* w w w .ja v a 2s . c o m*/ try { pauseForSpace(); //Dont lock before this pause. We don't want to block the readCurrent method lock.readLock().lock(); success = providerQueue.offer(datum); Thread.yield(); } finally { lock.readLock().unlock(); } } while (!success); }
From source file:phex.util.FileUtils.java
/** * This method performs a multi fallback file delete operation to try to * work around the Java problems with delete operations. * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6213298 * * @param file the file to delete.// w w w .j a v a 2s .c o m */ public static void deleteFileMultiFallback(File file) { boolean succ = file.delete(); if (succ) { return; } NLogger.warn(FileUtils.class, "First delete operation failed."); // Try to run garbage collector to make the file delete operation work System.gc(); // Yield thread and to hope GC kicks in... Thread.yield(); // Try again... succ = file.delete(); if (succ) { return; } NLogger.warn(FileUtils.class, "Second delete operation failed."); // Last chance... try to delete on exit... file.deleteOnExit(); // and truncate file to at least free up the space... try { FileUtils.truncateFile(file, 0); } catch (IOException exp) { NLogger.warn(FileUtils.class, "Delete/truncate operation failed."); } }
From source file:phex.utils.FileUtils.java
/** * This method performs a multi fallback file delete operation to try to * work around the Java problems with delete operations. * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6213298 * @param file the file to delete. //from ww w. j a v a2 s . c o m */ public static void deleteFileMultiFallback(File file) { boolean succ = file.delete(); if (succ) { return; } //NLogger.warn( FileUtils.class, "First delete operation failed." ); // Try to run garbage collector to make the file delete operation work System.gc(); // Yield thread and to hope GC kicks in... Thread.yield(); // Try again... succ = file.delete(); if (succ) { return; } //NLogger.warn( FileUtils.class, "Second delete operation failed." ); // Last chance... try to delete on exit... file.deleteOnExit(); // and truncate file to at least free up the space... try { FileUtils.truncateFile(file, 0); } catch (IOException exp) { //NLogger.warn( FileUtils.class, "Delete/truncate operation failed." ); } }
From source file:org.apache.streams.elasticsearch.ElasticsearchPersistWriter.java
private synchronized void waitToCatchUp(int batchThreshold, int timeOutThresholdInMS) { int counter = 0; // If we still have 5 batches outstanding, we need to give it a minute to catch up while (this.getBatchesSent() - this.getBatchesResponded() > batchThreshold && counter < timeOutThresholdInMS) { try {/*from w ww.ja va 2 s.c o m*/ Thread.yield(); Thread.sleep(1); counter++; } catch (InterruptedException ie) { LOGGER.warn("Catchup was interrupted. Data may be lost"); return; } } }
From source file:edu.iu.incntre.flowscalestatcollector.StatCollector.java
public void startUp() { logger.trace("Startup of StatCollector"); try {//from www . j av a 2s . com if (isQuery) { // initiate sqlite database try { Class.forName(databaseClass); conn = DriverManager.getConnection(databaseDriver, dbUsername, dbPassword); } catch (ClassNotFoundException e2) { logger.error("{}", e2); } catch (SQLException e1) { // TODO Auto-generated catch block logger.error("{}", e1); } // end initiate database // start up thread statThread = new Thread(new Runnable() { @Override public void run() { try { logger.trace("Starting Thread .."); logger.trace("Getting flows from switch every {} seconds", intervalTime); List<OFStatistics> portStats; List<OFStatistics> flowStats; List<OFPhysicalPort> portStatus; SwitchDevice swd = null; String[] datapathIdStringElements = datapathIdStrings.split(","); try { while (statThread != null) { calendar = Calendar.getInstance(); logger.trace("getting flows from switches"); //check if conn is null if it is, reset connection if (conn == null) { conn = DriverManager.getConnection(databaseDriver, dbUsername, dbPassword); } for (String datapathIdString : datapathIdStringElements) { try { swd = flowscaleController.getSwitchDevices() .get(HexString.toLong(datapathIdString)); if (swd == null) { logger.info("switch {} does not exist, is it connected?", datapathIdString); continue; } logger.info("Getting flows from switch {} with ID {}", swd.getSwitchName(), datapathIdString); try { portStats = flowscaleController .getSwitchStatisticsFromInterface(datapathIdString, "port"); flowStats = flowscaleController .getSwitchStatisticsFromInterface(datapathIdString, "flow"); portStatus = swd.getPortStates(); if (flowStats != null && portStats != null) { String flowStatsJSON = JSONConverter.toStat(flowStats, "flow") .toJSONString(); String portStatsJSON = JSONConverter.toStat(portStats, "port") .toJSONString(); String portStatusJSON = JSONConverter.toPortStatus(portStatus) .toJSONString(); // initialize or set hashmaps HashMap<Long, Long> tempPortStatTransmitted; HashMap<Long, Long> tempPortStatReceived; HashMap<String, Long> tempFlowStat; long datapathId = HexString.toLong(datapathIdString); if (tempPortStatTransmittedHashMap.get(datapathId) == null) { tempPortStatTransmitted = new HashMap<Long, Long>(); tempPortStatTransmittedHashMap.put(datapathId, tempPortStatTransmitted); } else { tempPortStatTransmitted = tempPortStatTransmittedHashMap .get(datapathId); } if (tempPortStatReceivedHashMap.get(datapathId) == null) { tempPortStatReceived = new HashMap<Long, Long>(); tempPortStatReceivedHashMap.put(datapathId, tempPortStatReceived); } else { tempPortStatReceived = tempPortStatReceivedHashMap .get(datapathId); } if (tempFlowStatHashMap.get(datapathId) == null) { tempFlowStat = new HashMap<String, Long>(); tempFlowStatHashMap.put(datapathId, tempFlowStat); } else { tempFlowStat = tempFlowStatHashMap.get(datapathId); } storeSwitchDetails(HexString.toLong(datapathIdString), portStatsJSON, flowStatsJSON, portStatusJSON, tempPortStatTransmitted, tempPortStatReceived, tempFlowStat); } else { logger.error( "Switch {} returned a null result possibility because the switch is not connected to the controller", datapathIdString); } } catch (NoSwitchException e1) { // TODO Auto-generated catch block logger.error("Switch {} with ID {} is not connected aborting", swd.getSwitchName(), datapathIdString); } catch (IOException e1) { logger.error("IOException {}", e1); } catch (InterruptedException e1) { logger.error("Thread Interrupted {}", e1); killThread(); } catch (ExecutionException e1) { logger.error("Execution Exception {}", e1); } catch (TimeoutException e1) { logger.error("Switch Timeout Exception {}", e1); killThread(); } } catch (Exception e) { logger.error("unchecked exception here {}", e); killThread(); shutDown(); Thread.yield(); } } try { Thread.sleep(intervalTime); } catch (InterruptedException e) { logger.error("{}", e); break; } } } catch (Exception e) { logger.error("exception in while {}", e); shutDown(); } try { conn.close(); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("{}", e); } } catch (Exception generalException) { logger.error("General Exception throws {} ", generalException); } } /** * insert details into database, 3 tables will be populated: flow_stats, port_stats ,and port_status * * @param datapathId * @param portStats * @param flowStats * @param portStatus * @param tempPortStatTransmitted * @param tempPortStatReceived * @param tempFlowStat */ private void storeSwitchDetails(long datapathId, String portStats, String flowStats, String portStatus, HashMap<Long, Long> tempPortStatTransmitted, HashMap<Long, Long> tempPortStatReceived, HashMap<String, Long> tempFlowStat) { Object obj = JSONValue.parse(portStats); JSONArray jsonArray = (JSONArray) obj; for (int i = 0; i < jsonArray.size(); i++) { JSONObject jsonObject = (JSONObject) jsonArray.get(i); long transmittedPackets = (Long) jsonObject.get("transmit_packets"); long receivedPackets = (Long) jsonObject.get("receive_packets"); long portId = (Long) jsonObject.get("port_id"); // logger.info("the port is {}", portId); // logger.info("{} packets transmitted and {} packets received", // receivedPackets,transmittedPackets); PreparedStatement prep = null; try { prep = null; if (conn != null) { prep = conn.prepareStatement("insert into port_stats values (?,?,?,?,?);"); } else { logger.error("no connection object instantiated aborting .. "); return; } prep.setLong(1, datapathId); prep.setLong(2, calendar.getTimeInMillis()); if (tempPortStatTransmitted.get(portId) != null) { long currentTransmittedPackets = transmittedPackets - tempPortStatTransmitted.get(portId); if (currentTransmittedPackets < 0) { prep.setLong(5, transmittedPackets); } else { prep.setLong(5, currentTransmittedPackets); } } else { prep.setLong(5, transmittedPackets); } tempPortStatTransmitted.put(portId, transmittedPackets); // take care of port received if (tempPortStatReceived.get(portId) != null) { long currentReceivedPackets = receivedPackets - tempPortStatReceived.get(portId); if (currentReceivedPackets < 0) { prep.setLong(4, receivedPackets); } else { prep.setLong(4, currentReceivedPackets); } } else { prep.setLong(4, receivedPackets); } tempPortStatReceived.put(portId, receivedPackets); prep.setLong(3, portId); prep.addBatch(); conn.setAutoCommit(false); prep.executeBatch(); conn.setAutoCommit(true); } catch (SQLRecoverableException sqlRecoverableException) { logger.error("{}", sqlRecoverableException); //exit function since there is a timeout return; } catch (SQLException e) { logger.error("{}", e); } finally { if (prep != null) { try { prep.close(); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("{}", e); } } } } Object flowJSONobj = JSONValue.parse(flowStats); JSONArray flowJsonArray = (JSONArray) flowJSONobj; for (int i = 0; i < flowJsonArray.size(); i++) { JSONObject jsonObject = (JSONObject) flowJsonArray.get(i); long packets = (Long) jsonObject.get("packet_count"); String matchString = (String) jsonObject.get("match"); String action = (String) jsonObject.get("actions"); long priority = (Long) jsonObject.get("priority"); PreparedStatement prep = null; try { prep = conn.prepareStatement("insert into flow_stats values (?,?,?,?,?,?);"); String insertString = datapathId + "," + calendar.getTimeInMillis() + "," + matchString + "," + action; logger.debug("flow_stat values to insert are {}", insertString); prep.setLong(1, datapathId); prep.setLong(2, calendar.getTimeInMillis()); if (tempFlowStat.get(matchString) != null) { long packetsReceived = packets - tempFlowStat.get(matchString); if (packetsReceived < 0) { prep.setLong(5, packets); } else { prep.setLong(5, packetsReceived); } } else { prep.setLong(5, packets); } tempFlowStat.put(matchString, packets); prep.setString(3, matchString); prep.setString(4, action); prep.setShort(6, (short) priority); prep.addBatch(); conn.setAutoCommit(false); prep.executeBatch(); conn.setAutoCommit(true); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("error when insert flow {} in switch {}", matchString, datapathId); logger.error("{}", e); } finally { if (prep != null) { try { prep.close(); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("{}", e); } } } } Object portStatusJSONobj = JSONValue.parse(portStatus); JSONArray portStatusJsonArray = (JSONArray) portStatusJSONobj; for (int i = 0; i < portStatusJsonArray.size(); i++) { byte portStatusValue = 0; JSONObject jsonObject = (JSONObject) portStatusJsonArray.get(i); long portId = (Long) jsonObject.get("port_id"); String portAddress = (String) jsonObject.get("port_address"); try { portStatusValue = (byte) (Integer.parseInt(jsonObject.get("state").toString()) % 2); } catch (NumberFormatException nfe) { logger.error("{}", nfe); continue; } PreparedStatement prep = null; try { prep = conn.prepareStatement("insert into port_status values (?,?,?,?,?);"); prep.setLong(1, datapathId); prep.setLong(2, calendar.getTimeInMillis()); prep.setLong(3, portId); prep.setString(4, portAddress); prep.setByte(5, portStatusValue); prep.addBatch(); conn.setAutoCommit(false); prep.executeBatch(); conn.setAutoCommit(true); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("{}", e); } finally { if (prep != null) { try { prep.close(); } catch (SQLException e) { // TODO Auto-generated catch block logger.error("{}", e); } } } } } }, "Switch Stat Collector"); statThread.start(); } } catch (Exception e) { logger.error("general excecption thrown {}", e); } }
From source file:org.alfresco.bm.server.EventController.java
/** * Do the actual run but without concern for exceptions, which will be logged. */// w w w. j a v a2 s. c o m private void runImpl() { Set<String> staleDrivers = new HashSet<String>(3); // Keep track of any stale drivers int eventsPerSecond = (threadCount * eventsPerSecondPerThread); String msgStarted = "Event processing started: " + testRunFqn + " (" + eventsPerSecond + " events per second using " + threadCount + " threads)"; logger.info("\t" + msgStarted); logService.log(LogLevel.INFO, msgStarted); // Keep details on when we started looking for events long eventProcessStartTime = System.currentTimeMillis(); int eventSearchesPerformed = 0; runStateChanged: while (isRunning()) { long eventProcessSearchTime = System.currentTimeMillis(); // Make sure we don't look for events too frequently while (true) { if (!isRunning()) { break runStateChanged; } // Calculate how many searches we are allowed to have performed long eventProcessElapsedTime = eventProcessSearchTime - eventProcessStartTime; int eventSearchesAllowed = (int) Math.floor((eventProcessElapsedTime / 1000.0) * eventsPerSecond); if (eventSearchesPerformed < eventSearchesAllowed) { // Yield to other threads Thread.yield(); // We are allowed to do more break; } // We need to wait and allow enough time to elapse. // We cut the mean time between checks in half long toSleep = (long) (1000L / eventsPerSecond) / 2; toSleep = (toSleep < 10L) ? 10L : toSleep; synchronized (this) { try { this.wait(toSleep); } catch (InterruptedException e) { } } // Now go back around the see if we are allowed to proceed eventProcessSearchTime = System.currentTimeMillis(); } // We record the event search regardless of missing or hit in the queue eventSearchesPerformed++; // Grab an event // First look for events specific to this driver Event event = eventService.nextEvent(driverId, eventProcessSearchTime); if (event == null) { // Nothing found for the driver. // Look for events from other drivers, giving them a grace period event = eventService.nextEvent(null, eventProcessSearchTime - assignedEventGracePeriod); if (event != null) { String driver = event.getDriver(); if (staleDrivers.add(driver)) { logger.error("Driver " + driver + " is leaving stale events. Check server load."); } } } // Do we have an event to process? if (event == null) { long count = eventService.count(); if (count == 0) { // Look in the results to see if the run was started at some point List<EventRecord> startRecords = resultService.getResults(Event.EVENT_NAME_START, 0, 1); if (startRecords.size() == 0) { // The test has not *ever* been started. // We do that now; note that the event name will enforce a unique ID event = new Event(Event.EVENT_NAME_START, 0L, null); try { eventService.putEvent(event); // There is no guarantee that it actually went in } catch (RuntimeException e) { // We were unable to start the whole process. // We assume that someone else has. } } else { // The test was started but there are no more events remaining. // Quit if (ctx != null) // The controller might have been run manually { ctx.publishEvent(new ContextStoppedEvent(ctx)); } } } // Go back to the queue continue; } // Find the processor for the event EventProcessor processor = getProcessor(event); // Schedule it EventWork work = new EventWork(driverId, testRunFqn, event, driverIds, processor, eventProducers, eventService, resultService, sessionService, logService); try { // Grabbing an event automatically applies a short-lived lock to prevent // any other drivers from grabbing the same event before the event is locked // for execution. executor.execute(work); } catch (RejectedExecutionException e) { // Should not occur as the caller executes eventSearchesPerformed += threadCount; // Log it logService.log(LogLevel.WARN, "EventController's execution of an event was rejected. " + "Are there enough drivers to handle the event load?"); } catch (RuntimeException e) { // Put here in case a CallerRunsPolicy is used logger.error("execute failed (pool or CallerRunsPolicy)", e); } } String msgStopped = "Event processing stopped: " + testRunFqn; logger.info("\t" + msgStopped); logService.log(LogLevel.INFO, msgStopped); }
From source file:org.sakaiproject.kernel.test.KernelIntegrationBase.java
public static void loadTestSites() throws IOException, JCRNodeFactoryServiceException, RepositoryException, NoSuchAlgorithmException, InterruptedException { KernelManager km = new KernelManager(); JCRNodeFactoryService jcrNodeFactoryService = km.getService(JCRNodeFactoryService.class); JCRService jcrService = km.getService(JCRService.class); jcrService.loginSystem();/* ww w . ja v a 2s . c om*/ for (String siteName : SITES) { InputStream in = ResourceLoader.openResource(SITEBASE + siteName + "/groupdef.json", KernelIntegrationBase.class.getClassLoader()); @SuppressWarnings("unused") Node n = jcrNodeFactoryService.setInputStream( PathUtils.normalizePath(siteName + SiteService.PATH_SITE + SiteService.FILE_GROUPDEF), in, RestProvider.CONTENT_TYPE); in.close(); LOG.info("Test site saved: " + siteName + SiteService.PATH_SITE + SiteService.FILE_GROUPDEF); } jcrService.getSession().save(); Thread.yield(); Thread.sleep(1000); LOG.info("test sites loaded."); jcrService.logout(); }
From source file:org.apache.hadoop.hbase.rest.TestGetAndPutResource.java
@Test public void testSingleCellGetJSON() throws IOException, JAXBException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(response.getCode(), 200); Thread.yield();//from www . j ava2 s. com response = client.get(path, Constants.MIMETYPE_JSON); assertEquals(response.getCode(), 200); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); response = deleteRow(TABLE, ROW_4); assertEquals(response.getCode(), 200); }