List of usage examples for java.lang Thread isAlive
public final native boolean isAlive();
From source file:org.apache.flink.test.recovery.JobManagerHAProcessFailureBatchRecoveryITCase.java
@Test public void testJobManagerProcessFailure() throws Exception { // Config/*from ww w . ja v a 2s . c om*/ final int numberOfJobManagers = 2; final int numberOfTaskManagers = 2; final int numberOfSlotsPerTaskManager = 2; assertEquals(PARALLELISM, numberOfTaskManagers * numberOfSlotsPerTaskManager); // Setup // Test actor system ActorSystem testActorSystem; // Job managers final JobManagerProcess[] jmProcess = new JobManagerProcess[numberOfJobManagers]; // Task managers final ActorSystem[] tmActorSystem = new ActorSystem[numberOfTaskManagers]; // Leader election service LeaderRetrievalService leaderRetrievalService = null; // Coordination between the processes goes through a directory File coordinateTempDir = null; try { final Deadline deadline = TestTimeOut.fromNow(); // Coordination directory coordinateTempDir = createTempDirectory(); // Job Managers Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(ZooKeeper.getConnectString(), FileStateBackendBasePath.getPath()); // Start first process jmProcess[0] = new JobManagerProcess(0, config); jmProcess[0].startProcess(); // Task manager configuration config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 4); config.setInteger(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY, 100); config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 2); // Start the task manager process for (int i = 0; i < numberOfTaskManagers; i++) { tmActorSystem[i] = AkkaUtils.createActorSystem(AkkaUtils.getDefaultAkkaConfig()); TaskManager.startTaskManagerComponentsAndActor(config, ResourceID.generate(), tmActorSystem[i], "localhost", Option.<String>empty(), Option.<LeaderRetrievalService>empty(), false, TaskManager.class); } // Test actor system testActorSystem = AkkaUtils.createActorSystem(AkkaUtils.getDefaultAkkaConfig()); jmProcess[0].getActorRef(testActorSystem, deadline.timeLeft()); // Leader listener TestingListener leaderListener = new TestingListener(); leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config); leaderRetrievalService.start(leaderListener); // Initial submission leaderListener.waitForNewLeader(deadline.timeLeft().toMillis()); String leaderAddress = leaderListener.getAddress(); UUID leaderId = leaderListener.getLeaderSessionID(); // Get the leader ref ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testActorSystem, deadline.timeLeft()); ActorGateway leaderGateway = new AkkaActorGateway(leaderRef, leaderId); // Wait for all task managers to connect to the leading job manager JobManagerActorTestUtils.waitForTaskManagers(numberOfTaskManagers, leaderGateway, deadline.timeLeft()); final File coordinateDirClosure = coordinateTempDir; final Throwable[] errorRef = new Throwable[1]; // we trigger program execution in a separate thread Thread programTrigger = new Thread("Program Trigger") { @Override public void run() { try { testJobManagerFailure(ZooKeeper.getConnectString(), coordinateDirClosure); } catch (Throwable t) { t.printStackTrace(); errorRef[0] = t; } } }; //start the test program programTrigger.start(); // wait until all marker files are in place, indicating that all tasks have started AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir, READY_MARKER_FILE_PREFIX, PARALLELISM, deadline.timeLeft().toMillis()); // Kill one of the job managers and trigger recovery jmProcess[0].destroy(); jmProcess[1] = new JobManagerProcess(1, config); jmProcess[1].startProcess(); jmProcess[1].getActorRef(testActorSystem, deadline.timeLeft()); // we create the marker file which signals the program functions tasks that they can complete AbstractTaskManagerProcessFailureRecoveryTest .touchFile(new File(coordinateTempDir, PROCEED_MARKER_FILE)); programTrigger.join(deadline.timeLeft().toMillis()); // We wait for the finish marker file. We don't wait for the program trigger, because // we submit in detached mode. AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir, FINISH_MARKER_FILE_PREFIX, 1, deadline.timeLeft().toMillis()); // check that the program really finished assertFalse("The program did not finish in time", programTrigger.isAlive()); // check whether the program encountered an error if (errorRef[0] != null) { Throwable error = errorRef[0]; error.printStackTrace(); fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage()); } } catch (Throwable t) { // Print early (in some situations the process logs get too big // for Travis and the root problem is not shown) t.printStackTrace(); for (JobManagerProcess p : jmProcess) { if (p != null) { p.printProcessLog(); } } throw t; } finally { for (int i = 0; i < numberOfTaskManagers; i++) { if (tmActorSystem[i] != null) { tmActorSystem[i].shutdown(); } } if (leaderRetrievalService != null) { leaderRetrievalService.stop(); } for (JobManagerProcess jmProces : jmProcess) { if (jmProces != null) { jmProces.destroy(); } } // Delete coordination directory if (coordinateTempDir != null) { try { FileUtils.deleteDirectory(coordinateTempDir); } catch (Throwable ignored) { } } } }
From source file:org.lockss.db.DbManager.java
/** * Waits for all recorded threads to finish. Useful to avoid ugly but harmless * exceptions when running tests.//from w ww .j av a 2 s . co m * * @param timeout A long with the number of millisecons to wait at most for * threads to die. */ synchronized void waitForThreadsToFinish(long timeout) { final String DEBUG_HEADER = "waitForThreadsToFinish(): "; if (log.isDebug2()) log.debug2(DEBUG_HEADER + "timeout = " + timeout); for (Thread thread : threads) { if (log.isDebug3()) log.debug3(DEBUG_HEADER + "Waiting for thread = '" + thread + "'..."); if (thread.isAlive()) { try { thread.join(timeout); } catch (InterruptedException ie) { // Do Nothing. } } if (log.isDebug3()) log.debug3(DEBUG_HEADER + "Done."); } if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Done."); }
From source file:org.apache.flink.test.recovery.JobManagerHAProcessFailureRecoveryITCase.java
@Test public void testDispatcherProcessFailure() throws Exception { final Time timeout = Time.seconds(30L); final File zookeeperStoragePath = temporaryFolder.newFolder(); // Config//w w w. j a v a 2 s . c om final int numberOfJobManagers = 2; final int numberOfTaskManagers = 2; final int numberOfSlotsPerTaskManager = 2; assertEquals(PARALLELISM, numberOfTaskManagers * numberOfSlotsPerTaskManager); // Job managers final DispatcherProcess[] dispatcherProcesses = new DispatcherProcess[numberOfJobManagers]; // Task managers TaskManagerRunner[] taskManagerRunners = new TaskManagerRunner[numberOfTaskManagers]; HighAvailabilityServices highAvailabilityServices = null; LeaderRetrievalService leaderRetrievalService = null; // Coordination between the processes goes through a directory File coordinateTempDir = null; // Cluster config Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(zooKeeper.getConnectString(), zookeeperStoragePath.getPath()); // Task manager configuration config.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "4m"); config.setInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS, 100); config.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, 2); final RpcService rpcService = AkkaRpcServiceUtils.createRpcService("localhost", 0, config); try { final Deadline deadline = TestTimeOut.fromNow(); // Coordination directory coordinateTempDir = temporaryFolder.newFolder(); // Start first process dispatcherProcesses[0] = new DispatcherProcess(0, config); dispatcherProcesses[0].startProcess(); highAvailabilityServices = HighAvailabilityServicesUtils.createAvailableOrEmbeddedServices(config, TestingUtils.defaultExecutor()); // Start the task manager process for (int i = 0; i < numberOfTaskManagers; i++) { taskManagerRunners[i] = new TaskManagerRunner(config, ResourceID.generate()); taskManagerRunners[i].start(); } // Leader listener TestingListener leaderListener = new TestingListener(); leaderRetrievalService = highAvailabilityServices.getDispatcherLeaderRetriever(); leaderRetrievalService.start(leaderListener); // Initial submission leaderListener.waitForNewLeader(deadline.timeLeft().toMillis()); String leaderAddress = leaderListener.getAddress(); UUID leaderId = leaderListener.getLeaderSessionID(); final CompletableFuture<DispatcherGateway> dispatcherGatewayFuture = rpcService.connect(leaderAddress, DispatcherId.fromUuid(leaderId), DispatcherGateway.class); final DispatcherGateway dispatcherGateway = dispatcherGatewayFuture.get(); // Wait for all task managers to connect to the leading job manager waitForTaskManagers(numberOfTaskManagers, dispatcherGateway, deadline.timeLeft()); final File coordinateDirClosure = coordinateTempDir; final Throwable[] errorRef = new Throwable[1]; // we trigger program execution in a separate thread Thread programTrigger = new Thread("Program Trigger") { @Override public void run() { try { testJobManagerFailure(zooKeeper.getConnectString(), coordinateDirClosure, zookeeperStoragePath); } catch (Throwable t) { t.printStackTrace(); errorRef[0] = t; } } }; //start the test program programTrigger.start(); // wait until all marker files are in place, indicating that all tasks have started AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir, READY_MARKER_FILE_PREFIX, PARALLELISM, deadline.timeLeft().toMillis()); // Kill one of the job managers and trigger recovery dispatcherProcesses[0].destroy(); dispatcherProcesses[1] = new DispatcherProcess(1, config); dispatcherProcesses[1].startProcess(); // we create the marker file which signals the program functions tasks that they can complete AbstractTaskManagerProcessFailureRecoveryTest .touchFile(new File(coordinateTempDir, PROCEED_MARKER_FILE)); programTrigger.join(deadline.timeLeft().toMillis()); // We wait for the finish marker file. We don't wait for the program trigger, because // we submit in detached mode. AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(coordinateTempDir, FINISH_MARKER_FILE_PREFIX, 1, deadline.timeLeft().toMillis()); // check that the program really finished assertFalse("The program did not finish in time", programTrigger.isAlive()); // check whether the program encountered an error if (errorRef[0] != null) { Throwable error = errorRef[0]; error.printStackTrace(); fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage()); } } catch (Throwable t) { // Print early (in some situations the process logs get too big // for Travis and the root problem is not shown) t.printStackTrace(); for (DispatcherProcess p : dispatcherProcesses) { if (p != null) { p.printProcessLog(); } } throw t; } finally { for (int i = 0; i < numberOfTaskManagers; i++) { if (taskManagerRunners[i] != null) { taskManagerRunners[i].close(); } } if (leaderRetrievalService != null) { leaderRetrievalService.stop(); } for (DispatcherProcess dispatcherProcess : dispatcherProcesses) { if (dispatcherProcess != null) { dispatcherProcess.destroy(); } } if (highAvailabilityServices != null) { highAvailabilityServices.closeAndCleanupAllData(); } RpcUtils.terminateRpcService(rpcService, timeout); // Delete coordination directory if (coordinateTempDir != null) { try { FileUtils.deleteDirectory(coordinateTempDir); } catch (Throwable ignored) { } } } }
From source file:hu.sztaki.ilab.bigdata.common.tools.hbase.PerformanceEvaluation.java
private void doMultipleClients(final Class<? extends Test> cmd) throws IOException { final List<Thread> threads = new ArrayList<Thread>(N); final int perClientRows = R / N; for (int i = 0; i < N; i++) { Thread t = new Thread(Integer.toString(i)) { @Override//ww w . j a v a2 s .co m public void run() { super.run(); PerformanceEvaluation pe = new PerformanceEvaluation(conf); int index = Integer.parseInt(getName()); try { long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, B, new Status() { public void setStatus(final String msg) throws IOException { LOG.info("client-" + getName() + " " + msg); } }); LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + " rows"); } catch (IOException e) { throw new RuntimeException(e); } } }; threads.add(t); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { while (t.isAlive()) { try { t.join(); } catch (InterruptedException e) { LOG.debug("Interrupted, continuing" + e.toString()); } } } }
From source file:org.apache.hadoop.hbase.stargate.PerformanceEvaluation.java
private void doMultipleClients(final Class<? extends Test> cmd) throws IOException { final List<Thread> threads = new ArrayList<Thread>(this.N); final int perClientRows = R / N; for (int i = 0; i < this.N; i++) { Thread t = new Thread(Integer.toString(i)) { @Override//w w w . j a va 2s . com public void run() { super.run(); PerformanceEvaluation pe = new PerformanceEvaluation(conf); int index = Integer.parseInt(getName()); try { long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, B, new Status() { public void setStatus(final String msg) throws IOException { LOG.info("client-" + getName() + " " + msg); } }); LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + " rows"); } catch (IOException e) { throw new RuntimeException(e); } } }; threads.add(t); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { while (t.isAlive()) { try { t.join(); } catch (InterruptedException e) { LOG.debug("Interrupted, continuing" + e.toString()); } } } }
From source file:org.pentaho.di.job.entries.pig.JobEntryPigScriptExecutor.java
public Result execute(final Result result, int arg1) throws KettleException { result.setNrErrors(0);/* www.ja va2 s. c o m*/ // Set up an appender that will send all pig log messages to Kettle's log // via logBasic(). KettleLoggingPrintWriter klps = new KettleLoggingPrintWriter(); WriterAppender pigToKettleAppender = new WriterAppender(new Log4jKettleLayout(true), klps); Logger pigLogger = Logger.getLogger("org.apache.pig"); Level log4jLevel = getLog4jLevel(parentJob.getLogLevel()); pigLogger.setLevel(log4jLevel); Log4jFileAppender appender = null; String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$ LogWriter logWriter = LogWriter.getInstance(); try { appender = LogWriter.createFileAppender(logFileName, true, false); logWriter.addAppender(appender); log.setLogLevel(parentJob.getLogLevel()); if (pigLogger != null) { pigLogger.addAppender(pigToKettleAppender); } } catch (Exception e) { logError(BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.FailedToOpenLogFile", logFileName, //$NON-NLS-1$ e.toString())); logError(Const.getStackTracker(e)); } if (Const.isEmpty(m_scriptFile)) { throw new KettleException( BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.Error.NoPigScriptSpecified")); } try { URL scriptU = null; String scriptFileS = m_scriptFile; scriptFileS = environmentSubstitute(scriptFileS); if (scriptFileS.indexOf("://") == -1) { File scriptFile = new File(scriptFileS); scriptU = scriptFile.toURI().toURL(); } else { scriptU = new URL(scriptFileS); } HadoopConfiguration active = HadoopConfigurationBootstrap.getHadoopConfigurationProvider() .getActiveConfiguration(); HadoopShim hadoopShim = active.getHadoopShim(); final PigShim pigShim = active.getPigShim(); // Make sure we can execute locally if desired if (m_localExecution && !pigShim.isLocalExecutionSupported()) { throw new KettleException( BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.Warning.LocalExecution")); } // configure for connection to hadoop Configuration conf = hadoopShim.createConfiguration(); if (!m_localExecution) { String hdfsHost = environmentSubstitute(m_hdfsHostname); String hdfsP = environmentSubstitute(m_hdfsPort); String jobTrackerHost = environmentSubstitute(m_jobTrackerHostname); String jobTP = environmentSubstitute(m_jobTrackerPort); List<String> configMessages = new ArrayList<String>(); hadoopShim.configureConnectionInformation(hdfsHost, hdfsP, jobTrackerHost, jobTP, conf, configMessages); for (String m : configMessages) { logBasic(m); } } final Properties properties = new Properties(); pigShim.configure(properties, m_localExecution ? null : conf); // transform the map type to list type which can been accepted by ParameterSubstitutionPreprocessor List<String> paramList = new ArrayList<String>(); if (m_params != null) { for (Map.Entry<String, String> entry : m_params.entrySet()) { String name = entry.getKey(); name = environmentSubstitute(name); // do environment variable substitution String value = entry.getValue(); value = environmentSubstitute(value); // do environment variable substitution paramList.add(name + "=" + value); } } final String pigScript = pigShim.substituteParameters(scriptU, paramList); final ExecutionMode execMode = (m_localExecution ? ExecutionMode.LOCAL : ExecutionMode.MAPREDUCE); if (m_enableBlocking) { int[] executionStatus = pigShim.executeScript(pigScript, execMode, properties); logBasic(BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.JobCompletionStatus", "" + executionStatus[0], "" + executionStatus[1])); if (executionStatus[1] > 0) { result.setStopped(true); result.setNrErrors(executionStatus[1]); result.setResult(false); } removeAppender(appender, pigToKettleAppender); if (appender != null) { ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } } else { final Log4jFileAppender fa = appender; final WriterAppender ptk = pigToKettleAppender; final Thread runThread = new Thread() { public void run() { try { int[] executionStatus = pigShim.executeScript(pigScript, execMode, properties); logBasic(BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.JobCompletionStatus", "" + executionStatus[0], "" + executionStatus[1])); } catch (Exception ex) { ex.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); } finally { removeAppender(fa, ptk); if (fa != null) { ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, fa.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } } } }; runThread.start(); parentJob.addJobListener(new JobListener() { @Override public void jobStarted(Job job) throws KettleException { } @Override public void jobFinished(Job job) throws KettleException { if (runThread.isAlive()) { logMinimal(BaseMessages.getString(PKG, "JobEntryPigScriptExecutor.Warning.AsynctaskStillRunning", getName(), job.getJobname())); } } }); } } catch (Exception ex) { ex.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); logError(ex.getMessage(), ex); } return result; }
From source file:com.ibm.bi.dml.debug.DMLDebugger.java
/** * Controls the communication between debugger CLI and DML runtime. *///from w w w . j ava 2 s.co m @SuppressWarnings("deprecation") public synchronized void runSystemMLDebugger() { debuggerUI.setOptions(); debuggerUI.getDebuggerCLI(); Thread runtime = new Thread(DMLRuntime); boolean isRuntimeInstruction = false; while (!quit) { try { //get debugger function from CLI getCommand(); if (cmd != null) { isRuntimeInstruction = false; //check for help if (cmd.hasOption("h")) { debuggerUI.getDebuggerCLI(); } //check for exit else if (cmd.hasOption("q")) { synchronized (DMLDebugger.class) { quit = true; } runtime.stop(); } else if (cmd.hasOption("r")) { if (currEC != null) { System.out.println( "Runtime has already started. Try \"s\" to go to next line, or \"c\" to continue running your DML script."); } else { currEC = preEC; runtime.start(); isRuntimeInstruction = true; } } else if (cmd.hasOption("c")) { if (currEC == null) System.out.println( "Runtime has not been started. Try \"r\" to start DML runtime execution."); else if (!runtime.isAlive()) { System.err.println("Invalid debug state."); //System.out.println("Runtime terminated. Try \"-c\" to recompile followed by \"r\" to restart DML runtime execution."); } else { System.out.println("Resuming DML script execution ..."); preEC.getDebugState().setCommand(null); runtime.resume(); isRuntimeInstruction = true; } } else if (cmd.hasOption("si")) { if (!runtime.isAlive()) { currEC = preEC; runtime.start(); isRuntimeInstruction = true; // System.out.println("Runtime must be started before single stepping can be enabled. Try \"r\" to start DML runtime execution."); } //else { preEC.getDebugState().setCommand("step_instruction"); runtime.resume(); isRuntimeInstruction = true; //} } else if (cmd.hasOption("s")) { if (!runtime.isAlive()) { currEC = preEC; runtime.start(); isRuntimeInstruction = true; //System.out.println("Runtime must be started before step over can be enabled. Try \"r\" to start DML runtime execution."); } //else { preEC.getDebugState().setCommand("step_line"); runtime.resume(); isRuntimeInstruction = true; //} } // else if (cmd.hasOption("step_return")) { // if (!runtime.isAlive()) { // System.out.println("Runtime must be started before step return can be enabled. Try \"r\" to start DML runtime execution."); // } // else { // String fname = dbFunctions.getValue(cmd.getOptionValues("step_return")); // dbprog.rtprog.setCommand("step return"); // if (fname != null) { // dbprog.rtprog.setCommandArg(fname); // } // runtime.resume(); // isRuntimeInstruction = true; // } // } else if (cmd.hasOption("b")) { int lineNumber = dbFunctions.getValue(cmd.getOptionValues("b"), lines.length); if (lineNumber > 0) { if (DMLBreakpointManager.getBreakpoint(lineNumber) == null) System.out.println("Sorry, a breakpoint cannot be inserted at line " + lineNumber + ". Please try a different line number."); else { if (DMLBreakpointManager.getBreakpoint(lineNumber) .getBPInstructionStatus() != BPINSTRUCTION_STATUS.INVISIBLE) { System.out.format("Breakpoint at line %d already exists.\n", lineNumber); } else { dbprog.accessBreakpoint(lineNumber, 0, BPINSTRUCTION_STATUS.ENABLED); } } } } else if (cmd.hasOption("d")) { int lineNumber = dbFunctions.getValue(cmd.getOptionValues("d"), lines.length); if (lineNumber > 0 && DMLBreakpointManager.getBreakpoint(lineNumber) != null && DMLBreakpointManager.getBreakpoint(lineNumber) .getBPInstructionStatus() != BPINSTRUCTION_STATUS.INVISIBLE) { dbprog.accessBreakpoint(lineNumber, 1, BPINSTRUCTION_STATUS.INVISIBLE); //dbprog.accessBreakpoint(lineNumber, 1, BPINSTRUCTION_STATUS.DISABLED); } else { System.out.println("Sorry, a breakpoint cannot be deleted at line " + lineNumber + ". Please try a different line number."); } } else if (cmd.hasOption("i")) { String[] infoOptions = cmd.getOptionValues("i"); if (infoOptions == null || infoOptions.length == 0) { System.err.println( "The command \"info\" requires option. Try \"info break\" or \"info frame\"."); } else if (infoOptions[0].trim().compareTo("break") == 0) { dbFunctions.listBreakpoints(DMLBreakpointManager.getBreakpoints()); } else if (infoOptions[0].trim().compareTo("frame") == 0) { if (!runtime.isAlive()) System.err.println( "Runtime has not been started. Try \"r\" or \"s\" to start DML runtime execution."); else dbFunctions.printCallStack(currEC.getDebugState().getCurrentFrame(), currEC.getDebugState().getCallStack()); } else { System.err.println( "Invalid option for command \"info\". Try \"info break\" or \"info frame\"."); } } else if (cmd.hasOption("p")) { String[] pOptions = cmd.getOptionValues("p"); if (pOptions == null || pOptions.length != 1) { System.err.println("Incorrect options for command \"print\""); } else { String varName = pOptions[0].trim(); if (runtime.isAlive()) { if (varName.contains("[")) { // matrix with index: can be cell or column or row try { //System.out.println("" + varName); String variableNameWithoutIndices = varName.split("\\[")[0].trim(); //System.out.println("" + variableNameWithoutIndices); String indexString = (varName.split("\\[")[1].trim()).split("\\]")[0] .trim(); //System.out.println(">>" + indexString + "<<"); String rowIndexStr = ""; String colIndexStr = ""; if (indexString.startsWith(",")) { colIndexStr = indexString.split(",")[1].trim(); } else if (indexString.endsWith(",")) { rowIndexStr = indexString.split(",")[0].trim(); } else { rowIndexStr = indexString.split(",")[0].trim(); colIndexStr = indexString.split(",")[1].trim(); } int rowIndex = -1; int colIndex = -1; if (rowIndexStr.compareTo("") != 0) { rowIndex = Integer.parseInt(rowIndexStr); } if (colIndexStr.compareTo("") != 0) { colIndex = Integer.parseInt(colIndexStr); } //System.out.println("" + rowIndex + " " + colIndex); dbFunctions.print(currEC.getDebugState().getVariables(), variableNameWithoutIndices, "value", rowIndex, colIndex); } catch (Exception indicesException) { System.err.println( "Incorrect fomat for \"p\". If you are trying to print matrix variable M, you can use M[1,] or M[,1] or M[1,1] (without spaces)."); } } else { // Print entire matrix dbFunctions.print(currEC.getDebugState().getVariables(), varName, "value", -1, -1); } } else System.err.println( "Runtime has not been started. Try \"r\" or \"s\" to start DML runtime execution."); } } else if (cmd.hasOption("whatis")) { String[] pOptions = cmd.getOptionValues("whatis"); if (pOptions == null || pOptions.length != 1) { System.err.println("Incorrect options for command \"whatis\""); } else { String varName = pOptions[0].trim(); dbFunctions.print(currEC.getDebugState().getVariables(), varName, "metadata", -1, -1); } } else if (cmd.hasOption("set")) { String[] pOptions = cmd.getOptionValues("set"); if (pOptions == null || pOptions.length != 2) { System.err.println("Incorrect options for command \"set\""); } else { try { if (pOptions[0].contains("[")) { String[] paramsToSetMatrix = new String[4]; paramsToSetMatrix[0] = pOptions[0].split("\\[")[0].trim(); String indexString = (pOptions[0].split("\\[")[1].trim()).split("\\]")[0] .trim(); paramsToSetMatrix[1] = indexString.split(",")[0].trim(); paramsToSetMatrix[2] = indexString.split(",")[1].trim(); paramsToSetMatrix[3] = pOptions[1].trim(); dbFunctions.setMatrixCell(currEC.getDebugState().getVariables(), paramsToSetMatrix); } else { dbFunctions.setScalarValue(currEC.getDebugState().getVariables(), pOptions); } } catch (Exception exception1) { System.out.println( "Only scalar variable or a matrix cell available in current frame can be set in current version."); } } } else if (cmd.hasOption("l")) { String[] pOptions = cmd.getOptionValues("l"); String[] argsForRange = new String[2]; int currentPC = 1; if (runtime.isAlive()) { currentPC = currEC.getDebugState().getPC().getLineNumber(); } IntRange range = null; if (pOptions == null) { // Print first 10 lines range = new IntRange(currentPC, Math.min(lines.length, currentPC + 10)); } else if (pOptions.length == 1 && pOptions[0].trim().toLowerCase().compareTo("all") == 0) { // Print entire program range = new IntRange(1, lines.length); } else if (pOptions.length == 2 && pOptions[0].trim().toLowerCase().compareTo("next") == 0) { int numLines = 10; try { numLines = Integer.parseInt(pOptions[1]); } catch (Exception e1) { } argsForRange[0] = "" + currentPC; argsForRange[1] = "" + Math.min(lines.length, numLines + currentPC); range = dbFunctions.getRange(argsForRange, lines.length); } else if (pOptions.length == 2 && pOptions[0].trim().toLowerCase().compareTo("prev") == 0) { int numLines = 10; try { numLines = Integer.parseInt(pOptions[1]); } catch (Exception e1) { } argsForRange[0] = "" + Math.max(1, currentPC - numLines); argsForRange[1] = "" + currentPC; range = dbFunctions.getRange(argsForRange, lines.length); } if (range == null) { System.err.println( "Incorrect usage of command \"l\". Try \"l\" or \"l all\" or \"l next 5\" or \"l prev 5\"."); } else { if (range.getMinimumInteger() > 0) { dbFunctions.printLines(lines, range); } else { System.err.println( "Sorry no lines that can be printed. Try \"l\" or \"l all\" or \"l next 5\" or \"l prev 5\"."); } } // Old code: // IntRange range = dbFunctions.getRange(cmd.getOptionValues("p"), lines.length); //if (range.getMinimumInteger() > 0) { // dbFunctions.printLines(lines, range); // } } else if (cmd.hasOption("li")) { String[] pOptions = cmd.getOptionValues("li"); String[] argsForRange = new String[2]; int currentPC = 1; if (runtime.isAlive()) { currentPC = currEC.getDebugState().getPC().getLineNumber(); } IntRange range = null; if (pOptions == null) { // Print first 10 lines range = new IntRange(currentPC, Math.min(lines.length, currentPC + 10)); } else if (pOptions.length == 1 && pOptions[0].trim().toLowerCase().compareTo("all") == 0) { // Print entire program range = new IntRange(1, lines.length); } else if (pOptions.length == 2 && pOptions[0].trim().toLowerCase().compareTo("next") == 0) { int numLines = 10; try { numLines = Integer.parseInt(pOptions[1]); } catch (Exception e1) { } argsForRange[0] = "" + currentPC; argsForRange[1] = "" + Math.min(lines.length, numLines + currentPC); range = dbFunctions.getRange(argsForRange, lines.length); } else if (pOptions.length == 2 && pOptions[0].trim().toLowerCase().compareTo("prev") == 0) { int numLines = 10; try { numLines = Integer.parseInt(pOptions[1]); } catch (Exception e1) { } argsForRange[0] = "" + Math.max(1, currentPC - numLines); argsForRange[1] = "" + currentPC; range = dbFunctions.getRange(argsForRange, lines.length); } if (range == null) { System.err.println( "Incorrect usage of command \"li\". Try \"li\" or \"li all\" or \"li next 5\" or \"li prev 5\"."); } else { if (range.getMinimumInteger() > 0) { dbFunctions.printInstructions(lines, dbprog.getDMLInstMap(), range, false); } else { System.err.println( "Sorry no lines that can be printed. Try \"li\" or \"li all\" or \"li next 5\" or \"li prev 5\"."); } } // Old code: // IntRange range = dbFunctions.getRange(cmd.getOptionValues("p"), lines.length); //if (range.getMinimumInteger() > 0) { // dbFunctions.printLines(lines, range); // } } else if (cmd.hasOption("set_scalar")) { if (!runtime.isAlive()) System.err.println( "Runtime has not been started. Try \"r\" to start DML runtime execution."); else dbFunctions.setScalarValue(currEC.getDebugState().getVariables(), cmd.getOptionValues("set_scalar")); } else if (cmd.hasOption("m")) { String varname = dbFunctions.getValue(cmd.getOptionValues("m")); if (runtime.isAlive()) dbFunctions.printMatrixVariable(currEC.getDebugState().getVariables(), varname); else System.err.println( "Runtime has not been started. Try \"r\" to start DML runtime execution."); } else if (cmd.hasOption("x")) { if (!runtime.isAlive()) System.err.println( "Runtime has not been started. Try \"r\" to start DML runtime execution."); else { dbFunctions.printMatrixCell(currEC.getDebugState().getVariables(), cmd.getOptionValues("x")); } } else if (cmd.hasOption("set_cell")) { if (!runtime.isAlive()) System.err.println( "Runtime has not been started. Try \"r\" to start DML runtime execution."); else { dbFunctions.setMatrixCell(currEC.getDebugState().getVariables(), cmd.getOptionValues("set_cell")); } } else { System.err.println("Undefined command. Try \"help\"."); } //block until runtime suspends execution or terminates //while(runtime.isAlive() && !currEC.getProgram().isStopped()) { wait(300); // To avoid race condition between submitting job and //System.out.println(">> Before while"); while (isRuntimeInstruction && !currEC.getDebugState().canAcceptNextCommand()) { if (quit) { break; } else { wait(300); //wait } } //System.out.println(">> After while"); } wait(300); } catch (Exception e) { System.err.println("Error processing debugger command. Try \"help\"."); } } }
From source file:org.apache.hadoop.hive.serde2.objectinspector.TestReflectionObjectInspectors.java
public void testObjectInspectorThreadSafety() throws InterruptedException { final int workerCount = 5; // 5 workers to run getReflectionObjectInspector concurrently final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(workerCount); final MutableObject exception = new MutableObject(); Thread runner = new Thread(new Runnable() { @Override//from ww w. ja v a 2 s.c o m @SuppressWarnings("unchecked") public void run() { Future<ObjectInspector>[] results = (Future<ObjectInspector>[]) new Future[workerCount]; ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[] types = (ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[]) new ObjectPair[] { new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(Complex.class, ObjectInspectorFactory.ObjectInspectorOptions.THRIFT), new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(MyStruct.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA), }; try { for (int i = 0; i < 20; i++) { // repeat 20 times for (final ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions> t : types) { ObjectInspectorFactory.objectInspectorCache.clear(); for (int k = 0; k < workerCount; k++) { results[k] = executorService.schedule(new Callable<ObjectInspector>() { @Override public ObjectInspector call() throws Exception { return ObjectInspectorFactory.getReflectionObjectInspector(t.getFirst(), t.getSecond()); } }, 50, TimeUnit.MILLISECONDS); } ObjectInspector oi = results[0].get(); for (int k = 1; k < workerCount; k++) { assertEquals(oi, results[k].get()); } } } } catch (Throwable e) { exception.setValue(e); } } }); try { runner.start(); long endTime = System.currentTimeMillis() + 300000; // timeout in 5 minutes while (runner.isAlive()) { if (System.currentTimeMillis() > endTime) { runner.interrupt(); // Interrupt the runner thread fail("Timed out waiting for the runner to finish"); } runner.join(10000); } if (exception.getValue() != null) { fail("Got exception: " + exception.getValue()); } } finally { executorService.shutdownNow(); } }
From source file:PerformanceEvaluation.java
private void doMultipleClients(final Class<? extends Test> cmd) throws IOException { final List<Thread> threads = new ArrayList<Thread>(this.N); final int perClientRows = R / N; for (int i = 0; i < this.N; i++) { Thread t = new Thread(Integer.toString(i)) { @Override/*from ww w . j a v a2s . c om*/ public void run() { super.run(); PerformanceEvaluation pe = new PerformanceEvaluation(conf); int index = Integer.parseInt(getName()); try { long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, flushCommits, writeToWAL, new Status() { public void setStatus(final String msg) throws IOException { LOG.info("client-" + getName() + " " + msg); } }, S); LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + " rows"); } catch (IOException e) { throw new RuntimeException(e); } } }; threads.add(t); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { while (t.isAlive()) { try { t.join(); } catch (InterruptedException e) { LOG.debug("Interrupted, continuing" + e.toString()); } } } }
From source file:com.chen.emailsync.SyncManager.java
private long checkMailboxes() { // First, see if any running mailboxes have been deleted ArrayList<Long> deletedMailboxes = new ArrayList<Long>(); synchronized (sSyncLock) { for (long mailboxId : mServiceMap.keySet()) { Mailbox m = Mailbox.restoreMailboxWithId(this, mailboxId); if (m == null) { deletedMailboxes.add(mailboxId); }/*from w ww .j a v a2 s .c om*/ } // If so, stop them or remove them from the map for (Long mailboxId : deletedMailboxes) { AbstractSyncService svc = mServiceMap.get(mailboxId); if (svc == null || svc.mThread == null) { releaseMailbox(mailboxId); continue; } else { boolean alive = svc.mThread.isAlive(); log("Deleted mailbox: " + svc.mMailboxName); if (alive) { stopManualSync(mailboxId); } else { log("Removing from serviceMap"); releaseMailbox(mailboxId); } } } } long nextWait = SYNC_SERVICE_HEARTBEAT_TIME; long now = System.currentTimeMillis(); // Start up threads that need it; use a query which finds eas mailboxes where the // the sync interval is not "never". This is the set of mailboxes that we control if (mAccountObserver == null) { log("mAccountObserver null; service died??"); return nextWait; } Cursor c = getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION, mAccountObserver.getSyncableMailboxWhere(), null, null); if (c == null) throw new ProviderUnavailableException(); try { while (c.moveToNext()) { long mailboxId = c.getLong(Mailbox.CONTENT_ID_COLUMN); AbstractSyncService service = getRunningService(mailboxId); if (service == null) { // Get the cached account Account account = getAccountById(c.getInt(Mailbox.CONTENT_ACCOUNT_KEY_COLUMN)); if (account == null) continue; // We handle a few types of mailboxes specially int mailboxType = c.getInt(Mailbox.CONTENT_TYPE_COLUMN); if (!isMailboxSyncable(account, mailboxType)) { continue; } // Check whether we're in a hold (temporary or permanent) SyncError syncError = mSyncErrorMap.get(mailboxId); if (syncError != null) { // Nothing we can do about fatal errors if (syncError.fatal) continue; if (now < syncError.holdEndTime) { // If release time is earlier than next wait time, // move next wait time up to the release time if (syncError.holdEndTime < now + nextWait) { nextWait = syncError.holdEndTime - now; mNextWaitReason = "Release hold"; } continue; } else { // Keep the error around, but clear the end time syncError.holdEndTime = 0; } } // Otherwise, we use the sync interval long syncInterval = c.getInt(Mailbox.CONTENT_SYNC_INTERVAL_COLUMN); if (syncInterval == Mailbox.CHECK_INTERVAL_PUSH) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_PUSH, null); } else if (mailboxType == Mailbox.TYPE_OUTBOX) { if (hasSendableMessages(c)) { Mailbox m = EmailContent.getContent(c, Mailbox.class); startServiceThread(getServiceForMailbox(this, m)); } } else if (syncInterval > 0 && syncInterval <= ONE_DAY_MINUTES) { // TODO: Migrating to use system SyncManager, so this should be dead code. long lastSync = c.getLong(Mailbox.CONTENT_SYNC_TIME_COLUMN); long sinceLastSync = now - lastSync; long toNextSync = syncInterval * MINUTES - sinceLastSync; String name = c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN); if (toNextSync <= 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_SCHEDULED, null); } else if (toNextSync < nextWait) { nextWait = toNextSync; if (sUserLog) { log("Next sync for " + name + " in " + nextWait / 1000 + "s"); } mNextWaitReason = "Scheduled sync, " + name; } else if (sUserLog) { log("Next sync for " + name + " in " + toNextSync / 1000 + "s"); } } } else { Thread thread = service.mThread; // Look for threads that have died and remove them from the map if (thread != null && !thread.isAlive()) { if (sUserLog) { log("Dead thread, mailbox released: " + c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN)); } synchronized (sSyncLock) { releaseMailbox(mailboxId); } // Restart this if necessary if (nextWait > 3 * SECONDS) { nextWait = 3 * SECONDS; mNextWaitReason = "Clean up dead thread(s)"; } } else { long requestTime = service.mRequestTime; if (requestTime > 0) { long timeToRequest = requestTime - now; if (timeToRequest <= 0) { service.mRequestTime = 0; service.alarm(); } else if (requestTime > 0 && timeToRequest < nextWait) { if (timeToRequest < 11 * MINUTES) { nextWait = timeToRequest < 250 ? 250 : timeToRequest; mNextWaitReason = "Sync data change"; } else { log("Illegal timeToRequest: " + timeToRequest); } } } } } } } finally { c.close(); } return nextWait; }