List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:org.restcomm.connect.telephony.Call.java
public Call(final SipFactory factory, final ActorRef mediaSessionController, final Configuration configuration, final URI statusCallback, final String statusCallbackMethod, final List<String> statusCallbackEvent, Map<String, ArrayList<String>> headers) { super();/*from w w w .jav a 2 s . com*/ final ActorRef source = self(); this.system = context().system(); this.statusCallback = statusCallback; this.statusCallbackMethod = statusCallbackMethod; this.statusCallbackEvent = statusCallbackEvent; if (statusCallback != null) { downloader = downloader(); } this.extensionHeaders = new HashMap<String, ArrayList<String>>(); if (headers != null) { this.extensionHeaders = headers; } // States for the FSM this.uninitialized = new State("uninitialized", null, null); this.initializing = new State("initializing", new Initializing(source), null); this.waitingForAnswer = new State("waiting for answer", new WaitingForAnswer(source), null); this.queued = new State("queued", new Queued(source), null); this.ringing = new State("ringing", new Ringing(source), null); this.failingBusy = new State("failing busy", new FailingBusy(source), null); this.busy = new State("busy", new Busy(source), null); this.notFound = new State("not found", new NotFound(source), null); //This time the --new Canceling(source)-- is an ActionOnState. Overloaded constructor is used here this.canceling = new State("canceling", new Canceling(source)); this.canceled = new State("canceled", new Canceled(source), null); this.failingNoAnswer = new State("failing no answer", new FailingNoAnswer(source), null); this.noAnswer = new State("no answer", new NoAnswer(source), null); this.dialing = new State("dialing", new Dialing(source), null); this.updatingMediaSession = new State("updating media session", new UpdatingMediaSession(source), null); this.inProgress = new State("in progress", new InProgress(source), null); this.joining = new State("joining", new Joining(source), null); this.leaving = new State("leaving", new Leaving(source), null); this.stopping = new State("stopping", new Stopping(source), null); this.completed = new State("completed", new Completed(source), null); this.failed = new State("failed", new Failed(source), null); this.inDialogRequest = new State("InDialogRequest", new InDialogRequest(source), null); // Transitions for the FSM final Set<Transition> transitions = new HashSet<Transition>(); transitions.add(new Transition(this.uninitialized, this.ringing)); transitions.add(new Transition(this.uninitialized, this.queued)); transitions.add(new Transition(this.uninitialized, this.canceled)); transitions.add(new Transition(this.uninitialized, this.completed)); transitions.add(new Transition(this.queued, this.canceled)); transitions.add(new Transition(this.queued, this.initializing)); transitions.add(new Transition(this.ringing, this.busy)); transitions.add(new Transition(this.ringing, this.notFound)); transitions.add(new Transition(this.ringing, this.canceling)); transitions.add(new Transition(this.ringing, this.canceled)); transitions.add(new Transition(this.ringing, this.failingNoAnswer)); transitions.add(new Transition(this.ringing, this.failingBusy)); transitions.add(new Transition(this.ringing, this.noAnswer)); transitions.add(new Transition(this.ringing, this.initializing)); transitions.add(new Transition(this.ringing, this.updatingMediaSession)); transitions.add(new Transition(this.ringing, this.completed)); transitions.add(new Transition(this.ringing, this.stopping)); transitions.add(new Transition(this.ringing, this.failed)); transitions.add(new Transition(this.initializing, this.canceling)); transitions.add(new Transition(this.initializing, this.dialing)); transitions.add(new Transition(this.initializing, this.failed)); transitions.add(new Transition(this.initializing, this.inProgress)); transitions.add(new Transition(this.initializing, this.waitingForAnswer)); transitions.add(new Transition(this.initializing, this.stopping)); transitions.add(new Transition(this.waitingForAnswer, this.inProgress)); transitions.add(new Transition(this.waitingForAnswer, this.joining)); transitions.add(new Transition(this.waitingForAnswer, this.canceling)); transitions.add(new Transition(this.waitingForAnswer, this.completed)); transitions.add(new Transition(this.waitingForAnswer, this.stopping)); transitions.add(new Transition(this.dialing, this.canceling)); transitions.add(new Transition(this.dialing, this.stopping)); transitions.add(new Transition(this.dialing, this.failingBusy)); transitions.add(new Transition(this.dialing, this.ringing)); transitions.add(new Transition(this.dialing, this.failed)); transitions.add(new Transition(this.dialing, this.failingNoAnswer)); transitions.add(new Transition(this.dialing, this.noAnswer)); transitions.add(new Transition(this.dialing, this.updatingMediaSession)); transitions.add(new Transition(this.inProgress, this.stopping)); transitions.add(new Transition(this.inProgress, this.joining)); transitions.add(new Transition(this.inProgress, this.leaving)); transitions.add(new Transition(this.inProgress, this.failed)); transitions.add(new Transition(this.inProgress, this.inDialogRequest)); transitions.add(new Transition(this.joining, this.inProgress)); transitions.add(new Transition(this.joining, this.stopping)); transitions.add(new Transition(this.joining, this.failed)); transitions.add(new Transition(this.leaving, this.inProgress)); transitions.add(new Transition(this.leaving, this.stopping)); transitions.add(new Transition(this.leaving, this.failed)); transitions.add(new Transition(this.leaving, this.completed)); transitions.add(new Transition(this.canceling, this.canceled)); transitions.add(new Transition(this.canceling, this.completed)); transitions.add(new Transition(this.failingBusy, this.busy)); transitions.add(new Transition(this.failingNoAnswer, this.noAnswer)); transitions.add(new Transition(this.failingNoAnswer, this.canceling)); transitions.add(new Transition(this.updatingMediaSession, this.inProgress)); transitions.add(new Transition(this.updatingMediaSession, this.failed)); transitions.add(new Transition(this.stopping, this.completed)); transitions.add(new Transition(this.stopping, this.failed)); transitions.add(new Transition(this.failed, this.completed)); transitions.add(new Transition(this.completed, this.stopping)); transitions.add(new Transition(this.completed, this.failed)); // FSM this.fsm = new FiniteStateMachine(this.uninitialized, transitions); // SIP runtime stuff. this.factory = factory; // Conferencing this.conferencing = false; // Media Session Control runtime stuff. this.msController = mediaSessionController; this.fail = false; // Initialize the runtime stuff. this.id = Sid.generate(Sid.Type.CALL); this.instanceId = RestcommConfiguration.getInstance().getMain().getInstanceId(); this.created = DateTime.now(); this.observers = Collections.synchronizedList(new ArrayList<ActorRef>()); this.receivedBye = false; // Media Group runtime stuff this.liveCallModification = false; this.recording = false; this.configuration = configuration; final Configuration runtime = this.configuration.subset("runtime-settings"); this.disableSdpPatchingOnUpdatingMediaSession = runtime .getBoolean("disable-sdp-patching-on-updating-mediasession", false); this.enable200OkDelay = runtime.getBoolean("enable-200-ok-delay", false); if (!runtime.subset("ims-authentication").isEmpty()) { final Configuration imsAuthentication = runtime.subset("ims-authentication"); this.actAsImsUa = imsAuthentication.getBoolean("act-as-ims-ua"); } }
From source file:org.olat.commons.calendar.ICalFileCalendarManagerTest.java
/** * Test concurrent add/update event with two threads and code-point to control concurrency. * *//* w w w .java2 s . c o m*/ @Test public void testConcurrentAddUpdateEvent() { final String TEST_EVENT_ID_1 = "id-testConcurrentAddUpdateEvent-1"; final String TEST_EVENT_SUBJECT_1 = "testEvent1"; final String TEST_EVENT_ID_2 = "id-testConcurrentAddUpdateEvent-2"; final String TEST_EVENT_SUBJECT_2 = "testEvent2"; final String TEST_EVENT_SUBJECT_2_UPDATED = "testUpdatedEvent2"; final Identity test = JunitTestHelper.createAndPersistIdentityAsRndUser("ical-3-"); final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>(1)); final List<Boolean> statusList = Collections.synchronizedList(new ArrayList<Boolean>(1)); // Generate event for update CalendarManager calManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar cal = calManager.getPersonalCalendar(test).getKalendar(); calManager.addEventTo(cal, new KalendarEvent(TEST_EVENT_ID_2, TEST_EVENT_SUBJECT_2, new Date(), 1)); cal = calManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event2 = cal.getEvent(TEST_EVENT_ID_2); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_2, event2); assertEquals("Wrong calendar-event subject", event2.getSubject(), TEST_EVENT_SUBJECT_2); log.info("testConcurrentAddUpdateEvent thread2 addEvent2 DONE"); final CountDownLatch doneSignal = new CountDownLatch(2); // thread 1 Thread thread1 = new Thread() { public void run() { try { // 1. load calendar CalendarManager calendarManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar currentCalendar = calendarManager.getPersonalCalendar(test).getKalendar(); // 2. add Event1 => breakpoint hit log.info("testConcurrentAddUpdateEvent thread1 addEvent1"); calendarManager.addEventTo(currentCalendar, new KalendarEvent(TEST_EVENT_ID_1, TEST_EVENT_SUBJECT_1, new Date(), 1)); log.info("testConcurrentAddUpdateEvent thread1 addEvent1 DONE"); // 3. check event1 exist currentCalendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event1 = currentCalendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); // 4. sleep 2sec // 5. check event1 still exist (event2 added in meantime) currentCalendar = calendarManager.getPersonalCalendar(test).getKalendar(); event1 = currentCalendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); statusList.add(Boolean.TRUE); log.info("testConcurrentAddUpdateEvent thread1 finished"); } catch (Exception ex) { exceptionHolder.add(ex);// no exception should happen } finally { doneSignal.countDown(); DBFactory.getInstance().commitAndCloseSession(); } } }; // thread 2 Thread thread2 = new Thread() { public void run() { try { CalendarManager calendarManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar calendar = calendarManager.getPersonalCalendar(test).getKalendar(); // 3. add Event2 (breakpoint of thread1 blocks) log.info("testConcurrentAddUpdateEvent thread2 updateEvent2"); calendarManager.updateEventFrom(calendar, new KalendarEvent(TEST_EVENT_ID_2, TEST_EVENT_SUBJECT_2_UPDATED, new Date(), 1)); log.info("testConcurrentAddUpdateEvent thread1 updateEvent2 DONE"); // 4. check event2 exist calendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent updatedEvent = calendar.getEvent(TEST_EVENT_ID_2); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_2, updatedEvent); assertEquals("Wrong calendar-event subject", updatedEvent.getSubject(), TEST_EVENT_SUBJECT_2_UPDATED); // 5. check event1 exist calendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event1 = calendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); // Delete Event boolean removed = calendarManager.removeEventFrom(calendar, new KalendarEvent(TEST_EVENT_ID_2, TEST_EVENT_SUBJECT_2_UPDATED, new Date(), 1)); assertTrue(removed); statusList.add(Boolean.TRUE); log.info("testConcurrentAddUpdateEvent thread2 finished"); } catch (Exception ex) { exceptionHolder.add(ex);// no exception should happen } finally { doneSignal.countDown(); DBFactory.getInstance().commitAndCloseSession(); } } }; thread1.start(); thread2.start(); try { boolean interrupt = doneSignal.await(10, TimeUnit.SECONDS); assertTrue("Test takes too long (more than 10s)", interrupt); } catch (InterruptedException e) { fail("" + e.getMessage()); } // if not -> they are in deadlock and the db did not detect it for (Exception exception : exceptionHolder) { log.info("exception: " + exception.getMessage()); exception.printStackTrace(); } assertTrue("It throws an exception in test => see sysout", exceptionHolder.isEmpty()); log.info("testConcurrentAddUpdateEvent finish successful"); }
From source file:org.omnaest.utils.proxy.MethodCallCapturer.java
/** * Gets an available list for the given proxy object or creates a new one. * /*w w w . j a v a 2s.c om*/ * @param stub * @return */ protected List<MethodCallCaptureContext> getOrCreateMethodCallCaptureContextListForStub(Object stub) { // List<MethodCallCaptureContext> retlist = null; // if (!this.stubToMethodCallCaptureContextListMap.containsKey(stub)) { this.stubToMethodCallCaptureContextListMap.put(stub, Collections.synchronizedList(new ArrayList<MethodCallCaptureContext>())); } // retlist = this.stubToMethodCallCaptureContextListMap.get(stub); // return retlist; }
From source file:com.emc.ecs.sync.storage.CasStorageTest.java
private List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS); System.out.print("Creating clips"); List<String> clipIds = Collections.synchronizedList(new ArrayList<String>()); List<String> summaries = Collections.synchronizedList(new ArrayList<String>()); for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) { service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries)); }/*from ww w. j a v a 2 s. c o m*/ service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); Collections.sort(summaries); for (String summary : summaries) { summaryWriter.append(summary); } System.out.println(); return clipIds; }
From source file:com.meltmedia.cadmium.servlets.guice.CadmiumListener.java
private Module createModule() { return new AbstractModule() { @SuppressWarnings("unchecked") @Override// ww w .j a v a 2 s .c o m protected void configure() { Properties configProperties = configManager.getDefaultProperties(); org.apache.shiro.web.env.WebEnvironment shiroEnv = (org.apache.shiro.web.env.WebEnvironment) context .getAttribute(EnvironmentLoader.ENVIRONMENT_ATTRIBUTE_KEY); if (shiroEnv != null && shiroEnv instanceof WebEnvironment) { WebEnvironment cadmiumShiroEnv = (WebEnvironment) shiroEnv; if (cadmiumShiroEnv.getPersistablePropertiesRealm() != null) { log.debug("Binding shiro configurable realm: " + PersistablePropertiesRealm.class); bind(PersistablePropertiesRealm.class) .toInstance(cadmiumShiroEnv.getPersistablePropertiesRealm()); cadmiumShiroEnv.getPersistablePropertiesRealm() .loadProperties(applicationContentRoot.getAbsoluteFile()); } } bind(Reflections.class).toInstance(reflections); bind(Boolean.class).annotatedWith(com.meltmedia.cadmium.core.ISJBoss.class).toInstance(jboss); bind(Boolean.class).annotatedWith(com.meltmedia.cadmium.core.ISOLDJBoss.class).toInstance(oldJBoss); bind(com.meltmedia.cadmium.core.SiteDownService.class).toInstance(MaintenanceFilter.siteDown); bind(com.meltmedia.cadmium.core.ApiEndpointAccessController.class) .toInstance(ApiEndpointAccessFilter.controller); bind(ScheduledExecutorService.class).toInstance(executor); bind(ExecutorService.class).toInstance(executor); bind(Executor.class).toInstance(executor); bind(FileServlet.class).in(Scopes.SINGLETON); bind(com.meltmedia.cadmium.core.ContentService.class).to(FileServlet.class); bind(MessageConverter.class); bind(MessageSender.class).to(JGroupsMessageSender.class); bind(DelayedGitServiceInitializer.class) .annotatedWith(com.meltmedia.cadmium.core.ContentGitService.class) .toInstance(new DelayedGitServiceInitializer()); bind(DelayedGitServiceInitializer.class) .annotatedWith(com.meltmedia.cadmium.core.ConfigurationGitService.class) .toInstance(new DelayedGitServiceInitializer()); members = Collections.synchronizedList(new ArrayList<ChannelMember>()); bind(new TypeLiteral<List<ChannelMember>>() { }).annotatedWith(com.meltmedia.cadmium.core.ClusterMembers.class).toInstance(members); Multibinder<com.meltmedia.cadmium.core.CommandAction<?>> commandActionBinder = Multibinder .newSetBinder(binder(), new TypeLiteral<com.meltmedia.cadmium.core.CommandAction<?>>() { }); @SuppressWarnings("rawtypes") Set<Class<? extends com.meltmedia.cadmium.core.CommandAction>> commandActionSet = reflections .getSubTypesOf(com.meltmedia.cadmium.core.CommandAction.class); log.debug("Found {} CommandAction classes.", commandActionSet.size()); for (@SuppressWarnings("rawtypes") Class<? extends com.meltmedia.cadmium.core.CommandAction> commandActionClass : commandActionSet) { commandActionBinder.addBinding() .to((Class<? extends com.meltmedia.cadmium.core.CommandAction<?>>) commandActionClass); } bind(new TypeLiteral<CommandResponse<HistoryResponse>>() { }).to(HistoryResponseCommandAction.class).in(Scopes.SINGLETON); bind(new TypeLiteral<CommandResponse<LoggerConfigResponse>>() { }).to(LoggerConfigResponseCommandAction.class).in(Scopes.SINGLETON); bind(new TypeLiteral<Map<String, com.meltmedia.cadmium.core.CommandAction<?>>>() { }).annotatedWith(com.meltmedia.cadmium.core.CommandMap.class).toProvider(CommandMapProvider.class); bind(new TypeLiteral<Map<String, Class<?>>>() { }).annotatedWith(com.meltmedia.cadmium.core.CommandBodyMap.class) .toProvider(CommandBodyMapProvider.class); bind(String.class).annotatedWith(com.meltmedia.cadmium.core.ContentDirectory.class) .toInstance(contentDir); bind(String.class).annotatedWith(com.meltmedia.cadmium.core.SharedContentRoot.class) .toInstance(sharedContentRoot.getAbsolutePath()); bind(String.class).annotatedWith(com.meltmedia.cadmium.core.CurrentWarName.class) .toInstance(warName); String environment = configProperties.getProperty("com.meltmedia.cadmium.environment", "development"); // Bind channel name bind(String.class).annotatedWith(MessagingChannelName.class) .toInstance("CadmiumChannel-v2.0-" + vHostName + "-" + environment); bind(String.class).annotatedWith(VHost.class).toInstance(vHostName); bind(String.class).annotatedWith(com.meltmedia.cadmium.core.ApplicationContentRoot.class) .toInstance(applicationContentRoot.getAbsoluteFile().getAbsolutePath()); bind(HistoryManager.class); bind(ConfigManager.class).toInstance(configManager); // Bind Config file URL if (channelConfigUrl == null) { log.info("Using internal tcp.xml configuration file for JGroups."); URL propsUrl = JChannelProvider.class.getClassLoader().getResource("tcp.xml"); bind(URL.class).annotatedWith(MessagingConfigurationUrl.class).toInstance(propsUrl); } else { try { log.info("Using {} configuration file for JGroups.", channelConfigUrl); bind(URL.class).annotatedWith(MessagingConfigurationUrl.class) .toInstance(new URL(channelConfigUrl)); } catch (MalformedURLException e) { log.error("Failed to setup jgroups with the file specified [" + channelConfigUrl + "]. Failing back to built in configuration!", e); } } // Bind JChannel provider bind(JChannel.class).toProvider(JChannelProvider.class).in(Scopes.SINGLETON); bind(MembershipListener.class).to(JGroupsMembershipTracker.class); bind(MembershipTracker.class).to(JGroupsMembershipTracker.class); bind(MessageListener.class).to(MessageReceiver.class); bind(LifecycleService.class); bind(new TypeLiteral<com.meltmedia.cadmium.core.CoordinatedWorker<com.meltmedia.cadmium.core.commands.ContentUpdateRequest>>() { }).annotatedWith(com.meltmedia.cadmium.core.ContentWorker.class).to(CoordinatedWorkerImpl.class); bind(new TypeLiteral<com.meltmedia.cadmium.core.CoordinatedWorker<com.meltmedia.cadmium.core.commands.ContentUpdateRequest>>() { }).annotatedWith(com.meltmedia.cadmium.core.ConfigurationWorker.class) .to(ConfigCoordinatedWorkerImpl.class); bind(SiteConfigProcessor.class); bind(Api.class); bind(EventQueue.class); Multibinder<ConfigProcessor> configProcessorBinder = Multibinder.newSetBinder(binder(), ConfigProcessor.class); Set<Class<? extends ConfigProcessor>> configProcessorSet = reflections .getSubTypesOf(ConfigProcessor.class); log.debug("Found {} ConfigProcessor classes.", configProcessorSet.size()); for (Class<? extends ConfigProcessor> configProcessorClass : configProcessorSet) { configProcessorBinder.addBinding().to(configProcessorClass); //bind(ConfigProcessor.class).to(configProcessorClass); } bind(Receiver.class).to(MultiClassReceiver.class).asEagerSingleton(); Set<Class<?>> modules = reflections .getTypesAnnotatedWith(com.meltmedia.cadmium.core.CadmiumModule.class); log.debug("Found {} Module classes.", modules.size()); for (Class<?> module : modules) { if (Module.class.isAssignableFrom(module)) { log.debug("Installing module {}", module.getName()); try { install(((Class<? extends Module>) module).newInstance()); } catch (InstantiationException e) { log.warn("Failed to instantiate " + module.getName(), e); } catch (IllegalAccessException e) { log.debug("Modules [" + module.getName() + "] constructor is not accessible.", e); } } } //Discover configuration classes. install(new ConfigurationModule(reflections)); // Bind Jersey Endpoints Set<Class<? extends Object>> jerseySet = reflections.getTypesAnnotatedWith(Path.class); log.debug("Found {} jersey services with the Path annotation.", jerseySet.size()); for (Class<? extends Object> jerseyService : jerseySet) { log.debug("Binding jersey endpoint class {}", jerseyService.getName()); bind(jerseyService).asEagerSingleton(); } SchedulerService.bindScheduled(binder(), reflections); bind(SchedulerService.class); } }; }
From source file:org.jets3t.service.utils.FileComparer.java
/** * Lists the objects in a bucket using a partitioning technique to divide * the object namespace into separate partitions that can be listed by * multiple simultaneous threads. This method divides the object namespace * using the given delimiter, traverses this space up to the specified * depth to identify prefix names for multiple "partitions", and * then lists the objects in each partition. It returns the complete list * of objects in the bucket path.//from w w w .j a va2 s. c o m * <p> * This partitioning technique will work best for buckets with many objects * that are divided into a number of virtual subdirectories of roughly equal * size. * * @param service * the service object that will be used to perform listing requests. * @param bucketName * the name of the bucket whose contents will be listed. * @param targetPath * a root path within the bucket to be listed. If this parameter is null, all * the bucket's objects will be listed. Otherwise, only the objects below the * virtual path specified will be listed. * @param delimiter * the delimiter string used to identify virtual subdirectory partitions * in a bucket. If this parameter is null, or it has a value that is not * present in your object names, no partitioning will take place. * @param toDepth * the number of delimiter levels this method will traverse to identify * subdirectory partions. If this value is zero, no partitioning will take * place. * * @return * the list of objects under the target path in the bucket. * * @throws ServiceException */ public StorageObject[] listObjectsThreaded(StorageService service, final String bucketName, String targetPath, final String delimiter, int toDepth) throws ServiceException { final List<StorageObject> allObjects = Collections.synchronizedList(new ArrayList<StorageObject>()); final List<String> lastCommonPrefixes = Collections.synchronizedList(new ArrayList<String>()); final ServiceException serviceExceptions[] = new ServiceException[1]; /* * Create a ThreadedStorageService object with an event listener that responds to * ListObjectsEvent notifications and populates a complete object listing. */ final ThreadedStorageService threadedService = new ThreadedStorageService(service, new StorageServiceEventAdaptor() { @Override public void event(ListObjectsEvent event) { if (ListObjectsEvent.EVENT_IN_PROGRESS == event.getEventCode()) { Iterator<StorageObjectsChunk> chunkIter = event.getChunkList().iterator(); while (chunkIter.hasNext()) { StorageObjectsChunk chunk = chunkIter.next(); if (log.isDebugEnabled()) { log.debug("Listed " + chunk.getObjects().length + " objects and " + chunk.getCommonPrefixes().length + " common prefixes in bucket '" + bucketName + "' using prefix=" + chunk.getPrefix() + ", delimiter=" + chunk.getDelimiter()); } allObjects.addAll(Arrays.asList(chunk.getObjects())); lastCommonPrefixes.addAll(Arrays.asList(chunk.getCommonPrefixes())); } } else if (ListObjectsEvent.EVENT_ERROR == event.getEventCode()) { serviceExceptions[0] = new ServiceException("Failed to list all objects in bucket", event.getErrorCause()); } } }); // The first listing partition we use as a starting point is the target path. String[] prefixesToList = new String[] { targetPath }; int currentDepth = 0; while (currentDepth <= toDepth && prefixesToList.length > 0) { if (log.isDebugEnabled()) { log.debug("Listing objects in '" + bucketName + "' using " + prefixesToList.length + " prefixes: " + Arrays.asList(prefixesToList)); } // Initialize the variables that will be used, or populated, by the // multi-threaded listing. lastCommonPrefixes.clear(); final String[] finalPrefixes = prefixesToList; final String finalDelimiter = (currentDepth < toDepth ? delimiter : null); /* * Perform a multi-threaded listing, where each prefix string * will be used as a unique partition to be listed in a separate thread. */ (new Thread() { @Override public void run() { threadedService.listObjects(bucketName, finalPrefixes, finalDelimiter, Constants.DEFAULT_OBJECT_LIST_CHUNK_SIZE); }; }).run(); // Throw any exceptions that occur inside the threads. if (serviceExceptions[0] != null) { throw serviceExceptions[0]; } // We use the common prefix paths identified in the last listing // iteration, if any, to identify partitions for follow-up listings. prefixesToList = lastCommonPrefixes.toArray(new String[lastCommonPrefixes.size()]); currentDepth++; } return allObjects.toArray(new StorageObject[allObjects.size()]); }
From source file:org.springframework.batch.core.jsr.launch.JsrJobOperator.java
/** * Creates a child {@link ApplicationContext} for the job being requested based upon * the /META-INF/batch.xml (if exists) and the /META-INF/batch-jobs/<jobName>.xml * configuration and restart the job.//from w ww .j a v a2s. c om * * @param executionId the database id of the job execution to be restarted. * @param params any job parameters to be used during the execution of this job. * @throws JobExecutionAlreadyCompleteException thrown if the requested job execution has * a status of COMPLETE * @throws NoSuchJobExecutionException throw if the requested job execution does not exist * in the repository * @throws JobExecutionNotMostRecentException thrown if the requested job execution is not * the most recent attempt for the job instance it's related to. * @throws JobRestartException thrown for any general errors during the job restart process */ @Override public long restart(long executionId, Properties params) throws JobExecutionAlreadyCompleteException, NoSuchJobExecutionException, JobExecutionNotMostRecentException, JobRestartException, JobSecurityException { org.springframework.batch.core.JobExecution previousJobExecution = jobExplorer.getJobExecution(executionId); if (previousJobExecution == null) { throw new NoSuchJobExecutionException("No JobExecution found for id: [" + executionId + "]"); } else if (previousJobExecution.getStatus().equals(BatchStatus.COMPLETED)) { throw new JobExecutionAlreadyCompleteException("The requested job has already completed"); } List<org.springframework.batch.core.JobExecution> previousExecutions = jobExplorer .getJobExecutions(previousJobExecution.getJobInstance()); for (org.springframework.batch.core.JobExecution jobExecution : previousExecutions) { if (jobExecution.getCreateTime().compareTo(previousJobExecution.getCreateTime()) > 0) { throw new JobExecutionNotMostRecentException( "The requested JobExecution to restart was not the most recently run"); } if (jobExecution.getStatus().equals(BatchStatus.ABANDONED)) { throw new JobRestartException("JobExecution ID: " + jobExecution.getId() + " is abandoned and attempted to be restarted."); } } final String jobName = previousJobExecution.getJobInstance().getJobName(); Properties jobRestartProperties = getJobRestartProperties(params, previousJobExecution); final JsrXmlApplicationContext batchContext = new JsrXmlApplicationContext(jobRestartProperties); batchContext.setValidating(false); Resource batchXml = new ClassPathResource("/META-INF/batch.xml"); Resource jobXml = new ClassPathResource(previousJobExecution.getJobConfigurationName()); if (batchXml.exists()) { batchContext.load(batchXml); } if (jobXml.exists()) { batchContext.load(jobXml); } AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder .genericBeanDefinition("org.springframework.batch.core.jsr.JsrJobContextFactoryBean") .getBeanDefinition(); beanDefinition.setScope(BeanDefinition.SCOPE_SINGLETON); batchContext.registerBeanDefinition(JSR_JOB_CONTEXT_BEAN_NAME, beanDefinition); batchContext.setParent(baseContext); try { batchContext.refresh(); } catch (BeanCreationException e) { throw new JobRestartException(e); } final org.springframework.batch.core.JobExecution jobExecution; try { JobParameters jobParameters = jobParametersConverter.getJobParameters(jobRestartProperties); jobExecution = jobRepository.createJobExecution(previousJobExecution.getJobInstance(), jobParameters, previousJobExecution.getJobConfigurationName()); } catch (Exception e) { throw new JobRestartException(e); } try { final Semaphore semaphore = new Semaphore(1); final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>()); semaphore.acquire(); taskExecutor.execute(new Runnable() { @Override public void run() { JsrJobContextFactoryBean factoryBean = null; try { factoryBean = (JsrJobContextFactoryBean) batchContext .getBean("&" + JSR_JOB_CONTEXT_BEAN_NAME); factoryBean.setJobExecution(jobExecution); final Job job = batchContext.getBean(Job.class); if (!job.isRestartable()) { throw new JobRestartException("Job " + jobName + " is not restartable"); } semaphore.release(); // Initialization of the JobExecution for job level dependencies jobRegistry.register(job, jobExecution); job.execute(jobExecution); jobRegistry.remove(jobExecution); } catch (Exception e) { exceptionHolder.add(e); } finally { if (factoryBean != null) { factoryBean.close(); } batchContext.close(); if (semaphore.availablePermits() == 0) { semaphore.release(); } } } }); semaphore.acquire(); if (exceptionHolder.size() > 0) { semaphore.release(); throw new JobRestartException(exceptionHolder.get(0)); } } catch (Exception e) { jobExecution.upgradeStatus(BatchStatus.FAILED); if (jobExecution.getExitStatus().equals(ExitStatus.UNKNOWN)) { jobExecution.setExitStatus(ExitStatus.FAILED.addExitDescription(e)); } jobRepository.update(jobExecution); if (batchContext.isActive()) { batchContext.close(); } throw new JobRestartException(e); } return jobExecution.getId(); }
From source file:org.pentaho.di.trans.step.BaseStep.java
/** * This is the base step that forms that basis for all steps. You can derive from this class to implement your own * steps./*from ww w . j a v a 2 s . c o m*/ * * @param stepMeta The StepMeta object to run. * @param stepDataInterface the data object to store temporary data, database connections, caches, result sets, * hashtables etc. * @param copyNr The copynumber for this step. * @param transMeta The TransInfo of which the step stepMeta is part of. * @param trans The (running) transformation to obtain information shared among the steps. */ public BaseStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int copyNr, TransMeta transMeta, Trans trans) { this.stepMeta = stepMeta; this.stepDataInterface = stepDataInterface; this.stepcopy = copyNr; this.transMeta = transMeta; this.trans = trans; this.stepname = stepMeta.getName(); this.socketRepository = trans.getSocketRepository(); // Set the name of the thread if (stepMeta.getName() == null) { throw new RuntimeException("A step in transformation [" + transMeta.toString() + "] doesn't have a name. A step should always have a name to identify it by."); } log = KettleLogStore.getLogChannelInterfaceFactory().create(this, trans); first = true; clusteredPartitioningFirst = true; running = new AtomicBoolean(false); stopped = new AtomicBoolean(false); safeStopped = new AtomicBoolean(false); paused = new AtomicBoolean(false); init = false; synchronized (statusCountersLock) { linesRead = 0L; // new AtomicLong(0L); // Keep some statistics! linesWritten = 0L; // new AtomicLong(0L); linesUpdated = 0L; // new AtomicLong(0L); linesSkipped = 0L; // new AtomicLong(0L); linesRejected = 0L; // new AtomicLong(0L); linesInput = 0L; // new AtomicLong(0L); linesOutput = 0L; // new AtomicLong(0L); } inputRowSets = null; outputRowSets = null; nextSteps = null; terminator = stepMeta.hasTerminator(); if (terminator) { terminator_rows = new ArrayList<Object[]>(); } else { terminator_rows = null; } // debug="-"; start_time = null; stop_time = null; distributed = stepMeta.isDistributes(); rowDistribution = stepMeta.getRowDistribution(); if (distributed) { if (rowDistribution != null) { if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "BaseStep.Log.CustomRowDistributionActivated", rowDistributionCode)); } } else { if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "BaseStep.Log.DistributionActivated")); } } } else { if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "BaseStep.Log.DistributionDeactivated")); } } rowListeners = new CopyOnWriteArrayList<RowListener>(); resultFiles = new HashMap<String, ResultFile>(); resultFilesLock = new ReentrantReadWriteLock(); repartitioning = StepPartitioningMeta.PARTITIONING_METHOD_NONE; partitionTargets = new Hashtable<String, BlockingRowSet>(); serverSockets = new ArrayList<ServerSocket>(); extensionDataMap = new HashMap<String, Object>(); // tuning parameters // putTimeOut = 10; //s // getTimeOut = 500; //s // timeUnit = TimeUnit.MILLISECONDS; // the smaller singleWaitTime, the faster the program run but cost CPU // singleWaitTime = 1; //ms // maxPutWaitCount = putTimeOut*1000/singleWaitTime; // maxGetWaitCount = getTimeOut*1000/singleWaitTime; // worker = Executors.newFixedThreadPool(10); checkTransRunning = false; blockPointer = 0; stepListeners = Collections.synchronizedList(new ArrayList<StepListener>()); dispatch(); upperBufferBoundary = (int) (transMeta.getSizeRowset() * 0.99); lowerBufferBoundary = (int) (transMeta.getSizeRowset() * 0.01); }
From source file:nl.knaw.dans.common.ldap.repo.LdapMapper.java
private void populateMethodLists() { annotatedGetMethods = Collections.synchronizedList(new ArrayList<Method>()); annotatedSetMethods = Collections.synchronizedList(new ArrayList<Method>()); Class<?> superC = clazz; while (superC != null) { Method[] methods = superC.getDeclaredMethods(); for (Method method : methods) { if (method.isAnnotationPresent(LdapAttribute.class)) { if (method.getReturnType().equals(void.class)) { // this is a setter method annotatedSetMethods.add(method); } else { // its a getter method annotatedGetMethods.add(method); }/*w w w . ja v a2 s .c o m*/ } } superC = superC.getSuperclass(); } }
From source file:org.apache.hive.spark.client.SparkClientImpl.java
private Thread startDriver(final RpcServer rpcServer, final String clientId, final String secret) throws IOException { Runnable runnable;/*from ww w . j a va2s . co m*/ final String serverAddress = rpcServer.getAddress(); final String serverPort = String.valueOf(rpcServer.getPort()); if (conf.containsKey(SparkClientFactory.CONF_KEY_IN_PROCESS)) { // Mostly for testing things quickly. Do not do this in production. // when invoked in-process it inherits the environment variables of the parent LOG.warn("!!!! Running remote driver in-process. !!!!"); runnable = new Runnable() { @Override public void run() { List<String> args = Lists.newArrayList(); args.add("--remote-host"); args.add(serverAddress); args.add("--remote-port"); args.add(serverPort); args.add("--client-id"); args.add(clientId); args.add("--secret"); args.add(secret); for (Map.Entry<String, String> e : conf.entrySet()) { args.add("--conf"); args.add(String.format("%s=%s", e.getKey(), conf.get(e.getKey()))); } try { RemoteDriver.main(args.toArray(new String[args.size()])); } catch (Exception e) { LOG.error("Error running driver.", e); } } }; } else { // If a Spark installation is provided, use the spark-submit script. Otherwise, call the // SparkSubmit class directly, which has some caveats (like having to provide a proper // version of Guava on the classpath depending on the deploy mode). String sparkHome = Strings.emptyToNull(conf.get(SPARK_HOME_KEY)); if (sparkHome == null) { sparkHome = Strings.emptyToNull(System.getenv(SPARK_HOME_ENV)); } if (sparkHome == null) { sparkHome = Strings.emptyToNull(System.getProperty(SPARK_HOME_KEY)); } String sparkLogDir = conf.get("hive.spark.log.dir"); if (sparkLogDir == null) { if (sparkHome == null) { sparkLogDir = "./target/"; } else { sparkLogDir = sparkHome + "/logs/"; } } String osxTestOpts = ""; if (Strings.nullToEmpty(System.getProperty("os.name")).toLowerCase().contains("mac")) { osxTestOpts = Strings.nullToEmpty(System.getenv(OSX_TEST_OPTS)); } String driverJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir, osxTestOpts, conf.get(DRIVER_OPTS_KEY)); String executorJavaOpts = Joiner.on(" ").skipNulls().join("-Dhive.spark.log.dir=" + sparkLogDir, osxTestOpts, conf.get(EXECUTOR_OPTS_KEY)); // Create a file with all the job properties to be read by spark-submit. Change the // file's permissions so that only the owner can read it. This avoid having the // connection secret show up in the child process's command line. File properties = File.createTempFile("spark-submit.", ".properties"); if (!properties.setReadable(false) || !properties.setReadable(true, true)) { throw new IOException("Cannot change permissions of job properties file."); } properties.deleteOnExit(); Properties allProps = new Properties(); // first load the defaults from spark-defaults.conf if available try { URL sparkDefaultsUrl = Thread.currentThread().getContextClassLoader() .getResource("spark-defaults.conf"); if (sparkDefaultsUrl != null) { LOG.info("Loading spark defaults: " + sparkDefaultsUrl); allProps.load(new ByteArrayInputStream(Resources.toByteArray(sparkDefaultsUrl))); } } catch (Exception e) { String msg = "Exception trying to load spark-defaults.conf: " + e; throw new IOException(msg, e); } // then load the SparkClientImpl config for (Map.Entry<String, String> e : conf.entrySet()) { allProps.put(e.getKey(), conf.get(e.getKey())); } allProps.put(SparkClientFactory.CONF_CLIENT_ID, clientId); allProps.put(SparkClientFactory.CONF_KEY_SECRET, secret); allProps.put(DRIVER_OPTS_KEY, driverJavaOpts); allProps.put(EXECUTOR_OPTS_KEY, executorJavaOpts); String isTesting = conf.get("spark.testing"); if (isTesting != null && isTesting.equalsIgnoreCase("true")) { String hiveHadoopTestClasspath = Strings.nullToEmpty(System.getenv("HIVE_HADOOP_TEST_CLASSPATH")); if (!hiveHadoopTestClasspath.isEmpty()) { String extraDriverClasspath = Strings .nullToEmpty((String) allProps.get(DRIVER_EXTRA_CLASSPATH)); if (extraDriverClasspath.isEmpty()) { allProps.put(DRIVER_EXTRA_CLASSPATH, hiveHadoopTestClasspath); } else { extraDriverClasspath = extraDriverClasspath.endsWith(File.pathSeparator) ? extraDriverClasspath : extraDriverClasspath + File.pathSeparator; allProps.put(DRIVER_EXTRA_CLASSPATH, extraDriverClasspath + hiveHadoopTestClasspath); } String extraExecutorClasspath = Strings .nullToEmpty((String) allProps.get(EXECUTOR_EXTRA_CLASSPATH)); if (extraExecutorClasspath.isEmpty()) { allProps.put(EXECUTOR_EXTRA_CLASSPATH, hiveHadoopTestClasspath); } else { extraExecutorClasspath = extraExecutorClasspath.endsWith(File.pathSeparator) ? extraExecutorClasspath : extraExecutorClasspath + File.pathSeparator; allProps.put(EXECUTOR_EXTRA_CLASSPATH, extraExecutorClasspath + hiveHadoopTestClasspath); } } } Writer writer = new OutputStreamWriter(new FileOutputStream(properties), Charsets.UTF_8); try { allProps.store(writer, "Spark Context configuration"); } finally { writer.close(); } // Define how to pass options to the child process. If launching in client (or local) // mode, the driver options need to be passed directly on the command line. Otherwise, // SparkSubmit will take care of that for us. String master = conf.get("spark.master"); Preconditions.checkArgument(master != null, "spark.master is not defined."); String deployMode = conf.get("spark.submit.deployMode"); List<String> argv = Lists.newLinkedList(); if (sparkHome != null) { argv.add(new File(sparkHome, "bin/spark-submit").getAbsolutePath()); } else { LOG.info("No spark.home provided, calling SparkSubmit directly."); argv.add(new File(System.getProperty("java.home"), "bin/java").getAbsolutePath()); if (master.startsWith("local") || master.startsWith("mesos") || SparkClientUtilities.isYarnClientMode(master, deployMode) || master.startsWith("spark")) { String mem = conf.get("spark.driver.memory"); if (mem != null) { argv.add("-Xms" + mem); argv.add("-Xmx" + mem); } String cp = conf.get("spark.driver.extraClassPath"); if (cp != null) { argv.add("-classpath"); argv.add(cp); } String libPath = conf.get("spark.driver.extraLibPath"); if (libPath != null) { argv.add("-Djava.library.path=" + libPath); } String extra = conf.get(DRIVER_OPTS_KEY); if (extra != null) { for (String opt : extra.split("[ ]")) { if (!opt.trim().isEmpty()) { argv.add(opt.trim()); } } } } argv.add("org.apache.spark.deploy.SparkSubmit"); } if (SparkClientUtilities.isYarnClusterMode(master, deployMode)) { String executorCores = conf.get("spark.executor.cores"); if (executorCores != null) { argv.add("--executor-cores"); argv.add(executorCores); } String executorMemory = conf.get("spark.executor.memory"); if (executorMemory != null) { argv.add("--executor-memory"); argv.add(executorMemory); } String numOfExecutors = conf.get("spark.executor.instances"); if (numOfExecutors != null) { argv.add("--num-executors"); argv.add(numOfExecutors); } } // The options --principal/--keypad do not work with --proxy-user in spark-submit.sh // (see HIVE-15485, SPARK-5493, SPARK-19143), so Hive could only support doAs or // delegation token renewal, but not both. Since doAs is a more common case, if both // are needed, we choose to favor doAs. So when doAs is enabled, we use kinit command, // otherwise, we pass the principal/keypad to spark to support the token renewal for // long-running application. if ("kerberos".equals(hiveConf.get(HADOOP_SECURITY_AUTHENTICATION))) { String principal = SecurityUtil .getServerPrincipal(hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL), "0.0.0.0"); String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); if (StringUtils.isNotBlank(principal) && StringUtils.isNotBlank(keyTabFile)) { if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { List<String> kinitArgv = Lists.newLinkedList(); kinitArgv.add("kinit"); kinitArgv.add(principal); kinitArgv.add("-k"); kinitArgv.add("-t"); kinitArgv.add(keyTabFile + ";"); kinitArgv.addAll(argv); argv = kinitArgv; } else { // if doAs is not enabled, we pass the principal/keypad to spark-submit in order to // support the possible delegation token renewal in Spark argv.add("--principal"); argv.add(principal); argv.add("--keytab"); argv.add(keyTabFile); } } } if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { try { String currentUser = Utils.getUGI().getShortUserName(); // do not do impersonation in CLI mode if (!currentUser.equals(System.getProperty("user.name"))) { LOG.info("Attempting impersonation of " + currentUser); argv.add("--proxy-user"); argv.add(currentUser); } } catch (Exception e) { String msg = "Cannot obtain username: " + e; throw new IllegalStateException(msg, e); } } argv.add("--properties-file"); argv.add(properties.getAbsolutePath()); argv.add("--class"); argv.add(RemoteDriver.class.getName()); String jar = "spark-internal"; if (SparkContext.jarOfClass(this.getClass()).isDefined()) { jar = SparkContext.jarOfClass(this.getClass()).get(); } argv.add(jar); argv.add("--remote-host"); argv.add(serverAddress); argv.add("--remote-port"); argv.add(serverPort); //hive.spark.* keys are passed down to the RemoteDriver via --conf, //as --properties-file contains the spark.* keys that are meant for SparkConf object. for (String hiveSparkConfKey : RpcConfiguration.HIVE_SPARK_RSC_CONFIGS) { String value = RpcConfiguration.getValue(hiveConf, hiveSparkConfKey); argv.add("--conf"); argv.add(String.format("%s=%s", hiveSparkConfKey, value)); } String cmd = Joiner.on(" ").join(argv); LOG.info("Running client driver with argv: {}", cmd); ProcessBuilder pb = new ProcessBuilder("sh", "-c", cmd); // Prevent hive configurations from being visible in Spark. pb.environment().remove("HIVE_HOME"); pb.environment().remove("HIVE_CONF_DIR"); // Add credential provider password to the child process's environment // In case of Spark the credential provider location is provided in the jobConf when the job is submitted String password = getSparkJobCredentialProviderPassword(); if (password != null) { pb.environment().put(Constants.HADOOP_CREDENTIAL_PASSWORD_ENVVAR, password); } if (isTesting != null) { pb.environment().put("SPARK_TESTING", isTesting); } final Process child = pb.start(); String threadName = Thread.currentThread().getName(); final List<String> childErrorLog = Collections.synchronizedList(new ArrayList<String>()); redirect("RemoteDriver-stdout-redir-" + threadName, new Redirector(child.getInputStream())); redirect("RemoteDriver-stderr-redir-" + threadName, new Redirector(child.getErrorStream(), childErrorLog)); runnable = new Runnable() { @Override public void run() { try { int exitCode = child.waitFor(); if (exitCode != 0) { StringBuilder errStr = new StringBuilder(); synchronized (childErrorLog) { Iterator iter = childErrorLog.iterator(); while (iter.hasNext()) { errStr.append(iter.next()); errStr.append('\n'); } } LOG.warn("Child process exited with code {}", exitCode); rpcServer.cancelClient(clientId, "Child process (spark-submit) exited before connecting back with error log " + errStr.toString()); } } catch (InterruptedException ie) { LOG.warn( "Thread waiting on the child process (spark-submit) is interrupted, killing the child process."); rpcServer.cancelClient(clientId, "Thread waiting on the child porcess (spark-submit) is interrupted"); Thread.interrupted(); child.destroy(); } catch (Exception e) { String errMsg = "Exception while waiting for child process (spark-submit)"; LOG.warn(errMsg, e); rpcServer.cancelClient(clientId, errMsg); } } }; } Thread thread = new Thread(runnable); thread.setDaemon(true); thread.setName("Driver"); thread.start(); return thread; }