List of usage examples for java.util Timer schedule
public void schedule(TimerTask task, Date firstTime, long period)
From source file:hydrograph.server.execution.tracking.client.main.HydrographMain.java
/** * /* w w w . j a v a 2 s .c om*/ * @param latch * @param session * @param jobId * @param timer * @param execution * @param socket * @throws IOException */ private void sendExecutionTrackingStatus(final CountDownLatch latch, Session session, final String jobId, final Timer timer, final HydrographService execution, final HydrographEngineCommunicatorSocket socket) throws IOException { try { TimerTask task = new TimerTask() { ExecutionStatus previousExecutionStatus = null; @Override public void run() { List<ComponentInfo> componentInfos = execution.getStatus(); if (!componentInfos.isEmpty()) { List<ComponentStatus> componentStatusList = new ArrayList<ComponentStatus>(); for (ComponentInfo componentInfo : componentInfos) { ComponentStatus componentStatus = new ComponentStatus(componentInfo.getComponentId(), componentInfo.getComponentName(), componentInfo.getCurrentStatus(), componentInfo.getBatch(), componentInfo.getProcessedRecords()); componentStatusList.add(componentStatus); } ExecutionStatus executionStatus = new ExecutionStatus(componentStatusList); executionStatus.setJobId(jobId); executionStatus.setClientId(Constants.ENGINE_CLIENT + jobId); executionStatus.setType(Constants.POST); Gson gson = new Gson(); try { if (previousExecutionStatus == null || !executionStatus.equals(previousExecutionStatus)) { socket.sendMessage(gson.toJson(executionStatus)); previousExecutionStatus = executionStatus; } } catch (IOException e) { logger.error("Fail to send status for job - " + jobId, e); timer.cancel(); } if (StringUtils.isNotBlank(jobId)) { //moved this after sendMessage in order to log even if the service is not running ExecutionTrackingFileLogger.INSTANCE.log(jobId, executionStatus); } } if (!execution.getJobRunningStatus()) { timer.cancel(); latch.countDown(); } } }; timer.schedule(task, 0l, ExecutionTrackingUtils.INSTANCE.getStatusFrequency()); latch.await(); } catch (Throwable t) { logger.error("Failure in job - " + jobId, t); timer.cancel(); throw new RuntimeException(t); } finally { if (session != null && session.isOpen()) { logger.debug("Closing Websocket engine client"); CloseReason closeReason = new CloseReason(CloseReason.CloseCodes.NORMAL_CLOSURE, "Session Closed"); session.close(closeReason); } } }
From source file:org.accada.reader.rprm.core.Source.java
/** * Add a list of read triggers to this source. * @param triggerList/*from www. jav a 2 s . c o m*/ * The list of read triggers * @throws ReaderProtocolException * "ERROR_TOO_MANY_TRIGGERS" */ public void addReadTriggers(final Trigger[] triggerList) throws ReaderProtocolException { Vector triggers = readerDevice.getVector(triggerList); if (readerDevice.getMaxTriggerNumber() <= triggers.size()) { throw new ReaderProtocolException("ERROR_TOO_MANY_TRIGGERS", MessagingConstants.ERROR_TOO_MANY_TRIGGERS); } Enumeration iterator = triggers.elements(); Trigger cur; while (iterator.hasMoreElements()) { cur = (Trigger) iterator.nextElement(); if (!this.readTriggers.containsKey(cur.getName())) { this.readTriggers.put(cur.getName(), cur); if (cur.getType().equals(TriggerType.CONTINUOUS)) { // conitnuous trigger if (continuousThread == null && timerThreads.size() == 0) { continuousThread = new ContinuousReadThread(this, cur); continuousThread.start(); } } else if (cur.getType().equals(TriggerType.TIMER)) { // timer trigger if (continuousThread == null) { Timer timerThread = new Timer(); timerThreads.put(cur.getName(), timerThread); final int num = 3; timerThread.schedule(new TimerReadThread(this, cur), 0, Integer.parseInt(cur.getValue().substring(num))); } } else if (cur.getType().equals(TriggerType.IO_EDGE)) { // io edge trigger if (continuousThread == null) { // get port final int num = 6; String port = cur.getValue().substring(cur.getValue().indexOf(';') + num, cur.getValue().lastIndexOf(';')); if (readerDevice.getEdgeTriggers().containsKey(port)) { IOEdgeTriggerPortManager manager = (IOEdgeTriggerPortManager) readerDevice .getEdgeTriggers().get(port); manager.addListener(cur, this.getName()); if (manager.getNumberOfTriggers() == 1) { manager.start(); } } else { throw new ReaderProtocolException("no trigger manager available", MessagingConstants.ERROR_UNKNOWN); } } } else if (cur.getType().equals(TriggerType.IO_VALUE)) { // io value trigger if (continuousThread == null) { // get port final int num = 5; String port = cur.getValue().substring(num, cur.getValue().indexOf(';')); if (readerDevice.getValueTriggers().containsKey(port)) { IOValueTriggerPortManager manager = (IOValueTriggerPortManager) readerDevice .getValueTriggers().get(port); manager.addListener(cur, this.getName()); if (manager.getNumberOfTriggers() == 1) { manager.start(); } } else { throw new ReaderProtocolException("no trigger manager available", MessagingConstants.ERROR_UNKNOWN); } } } } } }
From source file:org.pentaho.di.job.Job.java
/** * Handle logging at start//from ww w .j av a2 s. c om * * @return true if it went OK. * * @throws KettleException */ public boolean beginProcessing() throws KettleException { currentDate = new Date(); logDate = new Date(); startDate = Const.MIN_DATE; endDate = currentDate; resetErrors(); final JobLogTable jobLogTable = jobMeta.getJobLogTable(); int intervalInSeconds = Const.toInt(environmentSubstitute(jobLogTable.getLogInterval()), -1); if (jobLogTable.isDefined()) { DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); String schemaName = environmentSubstitute(jobMeta.getJobLogTable().getActualSchemaName()); String tableName = environmentSubstitute(jobMeta.getJobLogTable().getActualTableName()); String schemaAndTable = jobMeta.getJobLogTable().getDatabaseMeta() .getQuotedSchemaTableCombination(schemaName, tableName); Database ldb = new Database(this, logcon); ldb.shareVariablesWith(this); ldb.connect(); ldb.setCommit(logCommitSize); try { // See if we have to add a batch id... Long id_batch = new Long(1); if (jobMeta.getJobLogTable().isBatchIdUsed()) { id_batch = logcon.getNextBatchId(ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName()); setBatchId(id_batch.longValue()); if (getPassedBatchId() <= 0) { setPassedBatchId(id_batch.longValue()); } } Object[] lastr = ldb.getLastLogDate(schemaAndTable, jobMeta.getName(), true, LogStatus.END); if (!Const.isEmpty(lastr)) { Date last; try { last = ldb.getReturnRowMeta().getDate(lastr, 0); } catch (KettleValueException e) { throw new KettleJobException( BaseMessages.getString(PKG, "Job.Log.ConversionError", "" + tableName), e); } if (last != null) { startDate = last; } } depDate = currentDate; ldb.writeLogRecord(jobMeta.getJobLogTable(), LogStatus.START, this, null); if (!ldb.isAutoCommit()) { ldb.commitLog(true, jobMeta.getJobLogTable()); } ldb.disconnect(); // If we need to do periodic logging, make sure to install a timer for // this... // if (intervalInSeconds > 0) { final Timer timer = new Timer(getName() + " - interval logging timer"); TimerTask timerTask = new TimerTask() { public void run() { try { endProcessing(); } catch (Exception e) { log.logError( BaseMessages.getString(PKG, "Job.Exception.UnableToPerformIntervalLogging"), e); // Also stop the show... // errors.incrementAndGet(); stopAll(); } } }; timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000); addJobListener(new JobAdapter() { public void jobFinished(Job job) { timer.cancel(); } }); } // Add a listener at the end of the job to take of writing the final job // log record... // addJobListener(new JobAdapter() { public void jobFinished(Job job) throws KettleException { try { endProcessing(); } catch (KettleJobException e) { log.logError(BaseMessages.getString(PKG, "Job.Exception.UnableToWriteToLoggingTable", jobLogTable.toString()), e); //do not skip exception here //job is failed in case log database record is failed! throw new KettleException(e); } } }); } catch (KettleDatabaseException dbe) { addErrors(1); // This is even before actual execution throw new KettleJobException( BaseMessages.getString(PKG, "Job.Log.UnableToProcessLoggingStart", "" + tableName), dbe); } finally { ldb.disconnect(); } } // If we need to write out the job entry logging information, do so at the end of the job: // JobEntryLogTable jobEntryLogTable = jobMeta.getJobEntryLogTable(); if (jobEntryLogTable.isDefined()) { addJobListener(new JobAdapter() { public void jobFinished(Job job) throws KettleException { try { writeJobEntryLogInformation(); } catch (KettleException e) { throw new KettleException( BaseMessages.getString(PKG, "Job.Exception.UnableToPerformJobEntryLoggingAtJobEnd"), e); } } }); } // If we need to write the log channel hierarchy and lineage information, // add a listener for that too... // ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); if (channelLogTable.isDefined()) { addJobListener(new JobAdapter() { public void jobFinished(Job job) throws KettleException { try { writeLogChannelInformation(); } catch (KettleException e) { throw new KettleException( BaseMessages.getString(PKG, "Job.Exception.UnableToPerformLoggingAtTransEnd"), e); } } }); } JobExecutionExtension extension = new JobExecutionExtension(this, result, null, false); ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobBeginProcessing.id, extension); return true; }
From source file:org.pentaho.di.trans.Trans.java
/** * Begin processing. Also handle logging operations related to the start of the transformation * * @throws KettleTransException/*from ww w. j a va2s . c om*/ * the kettle trans exception */ public void beginProcessing() throws KettleTransException { TransLogTable transLogTable = transMeta.getTransLogTable(); int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1); try { String logTable = transLogTable.getActualTableName(); SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT); log.logDetailed( BaseMessages.getString(PKG, "Trans.Log.TransformationCanBeReplayed") + df.format(currentDate)); try { if (transLogTableDatabaseConnection != null && !Const.isEmpty(logTable) && !Const.isEmpty(transMeta.getName())) { transLogTableDatabaseConnection.writeLogRecord(transLogTable, LogStatus.START, this, null); // Pass in a commit to release transaction locks and to allow a user to actually see the log record. // if (!transLogTableDatabaseConnection.isAutoCommit()) { transLogTableDatabaseConnection.commitLog(true, transLogTable); } // If we need to do periodic logging, make sure to install a timer for this... // if (intervalInSeconds > 0) { final Timer timer = new Timer(getName() + " - interval logging timer"); TimerTask timerTask = new TimerTask() { public void run() { try { endProcessing(); } catch (Exception e) { log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalLogging"), e); // Also stop the show... // errors.incrementAndGet(); stopAll(); } } }; timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000); addTransListener(new TransAdapter() { public void transFinished(Trans trans) { timer.cancel(); } }); } // Add a listener to make sure that the last record is also written when transformation finishes... // addTransListener(new TransAdapter() { public void transFinished(Trans trans) throws KettleException { try { endProcessing(); lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END); } catch (KettleException e) { throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); } } }); } // If we need to write out the step logging information, do so at the end of the transformation too... // StepLogTable stepLogTable = transMeta.getStepLogTable(); if (stepLogTable.isDefined()) { addTransListener(new TransAdapter() { public void transFinished(Trans trans) throws KettleException { try { writeStepLogInformation(); } catch (KettleException e) { throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); } } }); } // If we need to write the log channel hierarchy and lineage information, add a listener for that too... // ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); if (channelLogTable.isDefined()) { addTransListener(new TransAdapter() { public void transFinished(Trans trans) throws KettleException { try { writeLogChannelInformation(); } catch (KettleException e) { throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); } } }); } // See if we need to write the step performance records at intervals too... // PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); int perfLogInterval = Const.toInt(environmentSubstitute(performanceLogTable.getLogInterval()), -1); if (performanceLogTable.isDefined() && perfLogInterval > 0) { final Timer timer = new Timer(getName() + " - step performance log interval timer"); TimerTask timerTask = new TimerTask() { public void run() { try { lastWrittenStepPerformanceSequenceNr = writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING); } catch (Exception e) { log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging"), e); // Also stop the show... // errors.incrementAndGet(); stopAll(); } } }; timer.schedule(timerTask, perfLogInterval * 1000, perfLogInterval * 1000); addTransListener(new TransAdapter() { public void transFinished(Trans trans) { timer.cancel(); } }); } } catch (KettleException e) { throw new KettleTransException( BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable), e); } finally { // If we use interval logging, we keep the connection open for performance reasons... // if (transLogTableDatabaseConnection != null && (intervalInSeconds <= 0)) { transLogTableDatabaseConnection.disconnect(); transLogTableDatabaseConnection = null; } } } catch (KettleException e) { throw new KettleTransException( BaseMessages.getString(PKG, "Trans.Exception.UnableToBeginProcessingTransformation"), e); } }