List of usage examples for java.util.concurrent Executors newScheduledThreadPool
public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize)
From source file:com.barchart.netty.server.http.TestHttpServer.java
@Test(expected = HttpHostConnectException.class) public void testKill() throws Exception { final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); executor.schedule(new Runnable() { @Override/*from w w w . j a v a 2 s. c o m*/ public void run() { server.kill(); } }, 500, TimeUnit.MILLISECONDS); final HttpGet get = new HttpGet("http://localhost:" + port + "/client-disconnect"); // Should throw exception client.execute(get).getEntity(); }
From source file:edu.umass.cs.nio.MessageNIOTransport.java
@SuppressWarnings("unchecked") public static void main(String[] args) { int msgNum = 0; int port = 2000; int nNodes = 100; SampleNodeConfig<Integer> snc = new SampleNodeConfig<Integer>(port); snc.localSetup(nNodes + 2);/*from ww w . ja va 2 s.co m*/ MessageExtractor[] workers = new MessageExtractor[nNodes + 1]; for (int i = 0; i < nNodes + 1; i++) { workers[i] = new MessageExtractor(new PacketDemultiplexerDefault()); } MessageNIOTransport<?, ?>[] niots = new MessageNIOTransport[nNodes]; try { int smallNNodes = 2; for (int i = 0; i < smallNNodes; i++) { niots[i] = new MessageNIOTransport<Integer, JSONObject>(i, snc, workers[i]); new Thread(niots[i]).start(); } /* Test a few simple hellos. The sleep is there to test that the * successive writes do not "accidentally" benefit from concurrency, * i.e., to check that OP_WRITE flags will be set correctly. */ ((MessageNIOTransport<Integer, JSONObject>) niots[1]).sendToIDInternal(0, JSONify(msgNum++, "Hello from 1 to 0")); ((MessageNIOTransport<Integer, JSONObject>) niots[0]).sendToIDInternal(1, JSONify(msgNum++, "Hello back from 0 to 1")); ((MessageNIOTransport<Integer, JSONObject>) niots[0]).sendToIDInternal(1, JSONify(msgNum++, "Second hello back from 0 to 1")); try { Thread.sleep(1000); } catch (Exception e) { e.printStackTrace(); } ((MessageNIOTransport<Integer, JSONObject>) niots[0]).sendToIDInternal(1, JSONify(msgNum++, "Third hello back from 0 to 1")); ((MessageNIOTransport<Integer, JSONObject>) niots[1]).sendToIDInternal(0, JSONify(msgNum++, "Thank you for all the hellos back from 1 to 0")); // ////////////////////////////////////////////////////////////////////// int seqTestNum = 1; Thread.sleep(2000); System.out.println("\n\n\nBeginning test of " + seqTestNum + " random, sequential messages"); Thread.sleep(1000); // ////////////////////////////////////////////////////////////////////// // Create the remaining nodes up to nNodes for (int i = smallNNodes; i < nNodes; i++) { niots[i] = new MessageNIOTransport<Integer, JSONObject>(i, snc, workers[i]); new Thread(niots[i]).start(); } // Test a random, sequential communication pattern for (int i = 0; i < nNodes * seqTestNum; i++) { int k = (int) (Math.random() * nNodes); int j = (int) (Math.random() * nNodes); System.out.println("Message " + i + " with msgNum " + msgNum); ((MessageNIOTransport<Integer, JSONObject>) niots[k]).sendToIDInternal(j, JSONify(msgNum++, "Hello from " + k + " to " + j)); } int oneToOneTestNum = 1; // ////////////////////////////////////////////////////////////////////// Thread.sleep(1000); System.out.println("\n\n\nBeginning test of " + oneToOneTestNum * nNodes + " random, concurrent, 1-to-1 messages with emulated delays"); Thread.sleep(1000); // ////////////////////////////////////////////////////////////////////// // Random, concurrent communication pattern with emulated delays ScheduledExecutorService execpool = Executors.newScheduledThreadPool(5); class TX extends TimerTask { MessageNIOTransport<Integer, JSONObject> sndr = null; private int rcvr = -1; int msgNum = -1; TX(int i, int id, MessageNIOTransport<?, ?>[] n, int m) { sndr = (MessageNIOTransport<Integer, JSONObject>) n[i]; rcvr = id; msgNum = m; } TX(MessageNIOTransport<Integer, JSONObject> niot, int id, int m) { sndr = niot; rcvr = id; msgNum = m; } public void run() { try { sndr.sendToIDInternal(rcvr, JSONify(msgNum, "Hello from " + sndr.myID + " to " + rcvr)); } catch (IOException e) { e.printStackTrace(); } catch (JSONException e) { e.printStackTrace(); } } } JSONDelayEmulator.emulateDelays(); MessageNIOTransport<Integer, JSONObject> concurrentSender = new MessageNIOTransport<Integer, JSONObject>( nNodes, snc, workers[nNodes]); new Thread(concurrentSender).start(); ScheduledFuture<?>[] futuresRandom = new ScheduledFuture[nNodes * oneToOneTestNum]; for (int i = 0; i < nNodes * oneToOneTestNum; i++) { TX task = new TX(concurrentSender, 0, msgNum++); System.out.println("Scheduling random message " + i + " with msgNum " + msgNum); futuresRandom[i] = execpool.schedule(task, 0, TimeUnit.MILLISECONDS); } for (int i = 0; i < nNodes * oneToOneTestNum; i++) { try { futuresRandom[i].get(); } catch (Exception e) { e.printStackTrace(); } } // ////////////////////////////////////////////////////////////////////// Thread.sleep(1000); System.out.println( "\n\n\nBeginning test of random, concurrent, " + " any-to-any messages with emulated delays"); Thread.sleep(1000); // ////////////////////////////////////////////////////////////////////// int load = nNodes * 25; int msgsToFailed = 0; ScheduledFuture<?>[] futures = new ScheduledFuture[load]; for (int i = 0; i < load; i++) { int k = (int) (Math.random() * nNodes); int j = (int) (Math.random() * nNodes); // long millis = (long)(Math.random()*1000); if (i % 100 == 0) { // Periodically try sending to a non-existent node j = nNodes + 1; msgsToFailed++; } TX task = new TX(k, j, niots, msgNum++); System.out.println("Scheduling random message " + i + " with msgNum " + msgNum); futures[i] = (ScheduledFuture<?>) execpool.schedule(task, 0, TimeUnit.MILLISECONDS); } int numExceptions = 0; for (int i = 0; i < load; i++) { try { futures[i].get(); } catch (Exception e) { // e.printStackTrace(); numExceptions++; } } // //////////////////////////////////////////////////////////////// Thread.sleep(2000); System.out.println("\n\n\nPrinting overall stats. Number of exceptions = " + numExceptions); System.out.println((new NIOInstrumenter() + "\n")); boolean pending = false; for (int i = 0; i < nNodes; i++) { if (niots[i].getPendingSize() > 0) { System.out.println("Pending messages at node " + i + " : " + niots[i].getPendingSize()); pending = true; } } int missing = NIOInstrumenter.getMissing(); assert (pending == false || missing == msgsToFailed) : "Unsent pending messages in NIO"; for (NIOTransport<?> niot : niots) { niot.stop(); } concurrentSender.stop(); execpool.shutdown(); if (!pending || missing == msgsToFailed) { System.out.println("\nSUCCESS: no pending messages to non-failed nodes!"); } } catch (IOException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } }
From source file:org.apache.myriad.Main.java
private void initTerminatorService(Injector injector) { LOGGER.info("Initializing Terminator"); terminatorService = Executors.newScheduledThreadPool(1); final int initialDelay = 100; final int period = 2000; terminatorService.scheduleAtFixedRate(injector.getInstance(TaskTerminator.class), initialDelay, period, TimeUnit.MILLISECONDS); }
From source file:org.ohmage.reporting.AuditReporter.java
/** * Starts a timer task to generate a report at the beginning of every day. *//* w w w . jav a 2s . c om*/ private AuditReporter() { try { // Get the location to save the audit logs. saveLocation = PreferenceCache.instance().lookup(PreferenceCache.KEY_AUDIT_LOG_LOCATION); } catch (CacheMissException e) { throw new IllegalStateException( "The audit log location is missing: " + PreferenceCache.KEY_AUDIT_LOG_LOCATION, e); } try { // If it doesn't exist, create it. If it does exist, make sure it's a // directory. File saveFolder = new File(saveLocation); if (!saveFolder.exists()) { saveFolder.mkdir(); } else if (!saveFolder.isDirectory()) { throw new IllegalStateException( "The directory that is to be used for saving the audit reports exists but isn't a directory: " + saveLocation); } } catch (SecurityException e) { throw new IllegalStateException("We are not allowed to read or write in the specified directory.", e); } // Generate the number of milliseconds until the first run. Calendar firstRun = Calendar.getInstance(); // Fast-forward to the beginning of the next day. firstRun.add(Calendar.DAY_OF_YEAR, 1); // Reset the hours, minutes, seconds, and milliseconds. firstRun.set(Calendar.HOUR_OF_DAY, 0); firstRun.set(Calendar.MINUTE, 0); firstRun.set(Calendar.SECOND, 0); firstRun.set(Calendar.MILLISECOND, 0); // Calculate the time between now and when the task should first run. long initialDelay = firstRun.getTimeInMillis() - Calendar.getInstance().getTimeInMillis(); // Begin the task. Executors.newScheduledThreadPool(THREAD_POOL_SIZE).scheduleAtFixedRate(new GenerateReport(), initialDelay, MILLIS_IN_A_DAY, TimeUnit.MILLISECONDS); }
From source file:de.zib.scalaris.examples.wikipedia.bliki.WikiServlet.java
/** * Starts the service updating the bloom filter for existing pages. *//*from w w w. j a v a 2 s. com*/ protected void startExistingPagesUpdate() { final int rebuildDelay = Options.getInstance().WIKI_REBUILD_PAGES_CACHE; if (rebuildDelay > 0) { updateExistingPages(); ScheduledExecutorService ses = Executors.newScheduledThreadPool(1); ses.scheduleWithFixedDelay(new Runnable() { @Override public void run() { updateExistingPages(); } }, rebuildDelay, rebuildDelay, TimeUnit.SECONDS); } }
From source file:org.apache.myriad.Main.java
private void initRebalancerService(MyriadConfiguration cfg, Injector injector) { if (cfg.isRebalancer()) { LOGGER.info("Initializing Rebalancer"); rebalancerService = Executors.newScheduledThreadPool(1); final int initialDelay = 100; final int period = 5000; rebalancerService.scheduleAtFixedRate(injector.getInstance(Rebalancer.class), initialDelay, period, TimeUnit.MILLISECONDS); } else {/* www . j a va2 s .c om*/ LOGGER.info("Rebalancer is not turned on"); } }
From source file:com.stimulus.archiva.index.VolumeIndex.java
public void startup() { logger.debug("volumeindex is starting up"); File lockFile = new File(volume.getIndexPath() + File.separatorChar + "write.lock"); if (lockFile.exists()) { logger.warn(//from w ww . j a v a 2 s . c o m "The server lock file already exists. Either another indexer is running or the server was not shutdown correctly."); logger.warn( "If it is the latter, the lock file must be manually deleted at " + lockFile.getAbsolutePath()); logger.warn( "index lock file detected. the server was shutdown incorrectly. automatically deleting lock file."); logger.warn("indexer is configured to deal with only one indexer process."); logger.warn("if you are running more than one indexer, your index could be subject to corruption."); lockFile.delete(); } scheduler = Executors.newScheduledThreadPool(1); scheduledTask = scheduler.scheduleWithFixedDelay(new TimerTask() { @Override public void run() { closeIndex(); } }, indexOpenTime, indexOpenTime, TimeUnit.MILLISECONDS); Runtime.getRuntime().addShutdownHook(this); }
From source file:com.alibaba.jstorm.daemon.worker.WorkerData.java
@SuppressWarnings({ "rawtypes", "unchecked" }) public WorkerData(Map conf, IContext context, String topology_id, String supervisor_id, int port, String worker_id, String jar_path) throws Exception { this.conf = conf; this.context = context; this.topologyId = topology_id; this.supervisorId = supervisor_id; this.port = port; this.workerId = worker_id; this.shutdown = new AtomicBoolean(false); this.monitorEnable = new AtomicBoolean(true); this.topologyStatus = StatusType.active; if (StormConfig.cluster_mode(conf).equals("distributed")) { String pidDir = StormConfig.worker_pids_root(conf, worker_id); JStormServerUtils.createPid(pidDir); }// w w w.j a v a 2s . c om // create zk interface this.zkClusterstate = ZkTool.mk_distributed_cluster_state(conf); this.zkCluster = Cluster.mk_storm_cluster_state(zkClusterstate); Map rawConf = StormConfig.read_supervisor_topology_conf(conf, topology_id); this.stormConf = new HashMap<Object, Object>(); this.stormConf.putAll(conf); this.stormConf.putAll(rawConf); JStormMetrics.setTopologyId(topology_id); JStormMetrics.setPort(port); JStormMetrics.setDebug(ConfigExtension.isEnableMetricDebug(stormConf)); JStormMetrics.setEnabled(ConfigExtension.isEnableMetrics(stormConf)); JStormMetrics.addDebugMetrics(ConfigExtension.getDebugMetricNames(stormConf)); AsmMetric.setSampleRate(ConfigExtension.getMetricSampleRate(stormConf)); ConfigExtension.setLocalSupervisorId(stormConf, supervisorId); ConfigExtension.setLocalWorkerId(stormConf, workerId); ConfigExtension.setLocalWorkerPort(stormConf, port); ControlMessage.setPort(port); JStormMetrics.registerWorkerTopologyMetric( JStormMetrics.workerMetricName(MetricDef.CPU_USED_RATIO, MetricType.GAUGE), new AsmGauge(new Gauge<Double>() { @Override public Double getValue() { return JStormUtils.getCpuUsage(); } })); JStormMetrics.registerWorkerTopologyMetric( JStormMetrics.workerMetricName(MetricDef.MEMORY_USED, MetricType.GAUGE), new AsmGauge(new Gauge<Double>() { @Override public Double getValue() { return JStormUtils.getMemUsage(); } })); JStormMetrics.registerWorkerMetric(JStormMetrics.workerMetricName(MetricDef.DISK_USAGE, MetricType.GAUGE), new AsmGauge(new Gauge<Double>() { @Override public Double getValue() { return JStormUtils.getDiskUsage(); } })); LOG.info("Worker Configuration " + stormConf); try { boolean enableClassloader = ConfigExtension.isEnableTopologyClassLoader(stormConf); boolean enableDebugClassloader = ConfigExtension.isEnableClassloaderDebug(stormConf); if (jar_path == null && enableClassloader == true && !conf.get(Config.STORM_CLUSTER_MODE).equals("local")) { LOG.error("enable classloader, but not app jar"); throw new InvalidParameterException(); } URL[] urlArray = new URL[0]; if (jar_path != null) { String[] paths = jar_path.split(":"); Set<URL> urls = new HashSet<URL>(); for (String path : paths) { if (StringUtils.isBlank(path)) continue; URL url = new URL("File:" + path); urls.add(url); } urlArray = urls.toArray(new URL[0]); } WorkerClassLoader.mkInstance(urlArray, ClassLoader.getSystemClassLoader(), ClassLoader.getSystemClassLoader().getParent(), enableClassloader, enableDebugClassloader); } catch (Exception e) { LOG.error("init jarClassLoader error!", e); throw new InvalidParameterException(); } if (this.context == null) { this.context = TransportFactory.makeContext(stormConf); } boolean disruptorUseSleep = ConfigExtension.isDisruptorUseSleep(stormConf); DisruptorQueue.setUseSleep(disruptorUseSleep); boolean isLimited = ConfigExtension.getTopologyBufferSizeLimited(stormConf); DisruptorQueue.setLimited(isLimited); LOG.info("Disruptor use sleep:" + disruptorUseSleep + ", limited size:" + isLimited); // this.transferQueue = new LinkedBlockingQueue<TransferData>(); int buffer_size = Utils.getInt(stormConf.get(Config.TOPOLOGY_TRANSFER_BUFFER_SIZE)); WaitStrategy waitStrategy = (WaitStrategy) JStormUtils.createDisruptorWaitStrategy(stormConf); this.transferQueue = DisruptorQueue.mkInstance("TotalTransfer", ProducerType.MULTI, buffer_size, waitStrategy); this.transferQueue.consumerStarted(); this.sendingQueue = DisruptorQueue.mkInstance("TotalSending", ProducerType.MULTI, buffer_size, waitStrategy); this.sendingQueue.consumerStarted(); this.nodeportSocket = new ConcurrentHashMap<WorkerSlot, IConnection>(); this.taskNodeport = new ConcurrentHashMap<Integer, WorkerSlot>(); this.workerToResource = new ConcurrentSkipListSet<ResourceWorkerSlot>(); this.innerTaskTransfer = new ConcurrentHashMap<Integer, DisruptorQueue>(); this.deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>(); this.tasksToComponent = new ConcurrentHashMap<Integer, String>(); this.componentToSortedTasks = new ConcurrentHashMap<String, List<Integer>>(); Assignment assignment = zkCluster.assignment_info(topologyId, null); if (assignment == null) { String errMsg = "Failed to get Assignment of " + topologyId; LOG.error(errMsg); throw new RuntimeException(errMsg); } workerToResource.addAll(assignment.getWorkers()); // get current worker's task list this.taskids = assignment.getCurrentWorkerTasks(supervisorId, port); if (taskids.size() == 0) { throw new RuntimeException("No tasks running current workers"); } LOG.info("Current worker taskList:" + taskids); // deserialize topology code from local dir rawTopology = StormConfig.read_supervisor_topology_code(conf, topology_id); sysTopology = Common.system_topology(stormConf, rawTopology); generateMaps(); contextMaker = new ContextMaker(this); outTaskStatus = new ConcurrentHashMap<Integer, Boolean>(); threadPool = Executors.newScheduledThreadPool(THREAD_POOL_NUM); TimerTrigger.setScheduledExecutorService(threadPool); if (!StormConfig.local_mode(stormConf)) { healthReporterThread = new AsyncLoopThread(new JStormHealthReporter(this)); } try { Long tmp = StormConfig.read_supervisor_topology_timestamp(conf, topology_id); assignmentTS = (tmp == null ? System.currentTimeMillis() : tmp); } catch (FileNotFoundException e) { assignmentTS = System.currentTimeMillis(); } outboundTasks = new HashSet<Integer>(); LOG.info("Successfully create WorkerData"); }
From source file:edu.umass.cs.gigapaxos.testing.TESTPaxosClient.java
protected TESTPaxosClient(int id, NodeConfig<Integer> nc) throws IOException { this.myID = id; this.nc = (nc == null ? TESTPaxosConfig.getNodeConfig() : nc); niot = (new MessageNIOTransport<Integer, Object>(id, this.nc, (new ClientPacketDemultiplexer(this)), true, SSLDataProcessingWorker.SSL_MODES.valueOf(Config.getGlobalString(PC.CLIENT_SSL_MODE)))); this.timer = new Timer(TESTPaxosClient.class.getSimpleName() + myID); synchronized (TESTPaxosClient.class) { if (executor == null || executor.isShutdown()) // one extra thread for response tracker executor = (ScheduledThreadPoolExecutor) Executors .newScheduledThreadPool(Config.getGlobalInt(TC.NUM_CLIENTS) + 1); }/*w ww . j av a 2 s .c o m*/ }
From source file:com.jivesoftware.os.amza.deployable.Main.java
public void run(String[] args) throws Exception { String hostname = args[0];//from w w w . ja v a2s. co m String clusterName = (args.length > 1 ? args[1] : "unnamed"); String hostPortPeers = (args.length > 2 ? args[2] : null); int port = Integer.parseInt(System.getProperty("amza.port", "1175")); String multicastGroup = System.getProperty("amza.discovery.group", "225.4.5.6"); int multicastPort = Integer.parseInt(System.getProperty("amza.discovery.port", "1223")); String logicalName = System.getProperty("amza.logicalName", hostname + ":" + port); String datacenter = System.getProperty("host.datacenter", "unknownDatacenter"); String rack = System.getProperty("host.rack", "unknownRack"); RingMember ringMember = new RingMember(logicalName); RingHost ringHost = new RingHost(datacenter, rack, hostname, port); // todo need a better way to create writer id. int writerId = Integer.parseInt(System.getProperty("amza.id", String.valueOf(new Random().nextInt(512)))); SnowflakeIdPacker idPacker = new SnowflakeIdPacker(); JiveEpochTimestampProvider timestampProvider = new JiveEpochTimestampProvider(); final TimestampedOrderIdProvider orderIdProvider = new OrderIdProviderImpl( new ConstantWriterIdProvider(writerId), idPacker, timestampProvider); final ObjectMapper mapper = new ObjectMapper(); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); mapper.configure(SerializationFeature.INDENT_OUTPUT, false); final AmzaServiceConfig amzaServiceConfig = new AmzaServiceConfig(); final AmzaStats amzaSystemStats = new AmzaStats(); final AmzaStats amzaStats = new AmzaStats(); final SickThreads sickThreads = new SickThreads(); final SickPartitions sickPartitions = new SickPartitions(); AtomicInteger systemRingSize = new AtomicInteger(-1); amzaServiceConfig.workingDirectories = System.getProperty("amza.working.dirs", "./data1,./data2,./data3") .split(","); amzaServiceConfig.systemRingSize = Integer.parseInt(System.getProperty("amza.system.ring.size", "-1")); if (amzaServiceConfig.systemRingSize > 0) { systemRingSize.set(amzaServiceConfig.systemRingSize); } AmzaInterner amzaInterner = new AmzaInterner(); PartitionPropertyMarshaller partitionPropertyMarshaller = new PartitionPropertyMarshaller() { @Override public PartitionProperties fromBytes(byte[] bytes) { try { return mapper.readValue(bytes, PartitionProperties.class); } catch (IOException ex) { throw new RuntimeException(ex); } } @Override public byte[] toBytes(PartitionProperties partitionProperties) { try { return mapper.writeValueAsBytes(partitionProperties); } catch (JsonProcessingException ex) { throw new RuntimeException(ex); } } }; // hmmm LABPointerIndexConfig labConfig = BindInterfaceToConfiguration.bindDefault(LABPointerIndexConfig.class); labConfig.setLeapCacheMaxCapacity( Integer.parseInt(System.getProperty("amza.leap.cache.max.capacity", "1000000"))); BinaryPrimaryRowMarshaller primaryRowMarshaller = new BinaryPrimaryRowMarshaller(); // hehe you cant change this :) BinaryHighwaterRowMarshaller highwaterRowMarshaller = new BinaryHighwaterRowMarshaller(amzaInterner); AtomicReference<Callable<RingTopology>> topologyProvider = new AtomicReference<>(); // bit of a hack InstanceDescriptor instanceDescriptor = new InstanceDescriptor(datacenter, rack, "", "", "", "", "", "", "", "", 0, "", "", "", 0L, true); ConnectionDescriptorsProvider connectionsProvider = (connectionDescriptorsRequest, expectedReleaseGroup) -> { try { RingTopology systemRing = topologyProvider.get().call(); List<ConnectionDescriptor> descriptors = Lists.newArrayList(Iterables.transform(systemRing.entries, input -> new ConnectionDescriptor(instanceDescriptor, false, false, new HostPort(input.ringHost.getHost(), input.ringHost.getPort()), Collections.emptyMap(), Collections.emptyMap()))); return new ConnectionDescriptorsResponse(200, Collections.emptyList(), "", descriptors, connectionDescriptorsRequest.getRequestUuid()); } catch (Exception e) { throw new RuntimeException(e); } }; TenantsServiceConnectionDescriptorProvider<String> connectionPoolProvider = new TenantsServiceConnectionDescriptorProvider<>( Executors.newScheduledThreadPool(1), "", connectionsProvider, "", "", 10_000); // TODO config connectionPoolProvider.start(); TenantAwareHttpClient<String> httpClient = new TenantRoutingHttpClientInitializer<String>(null) .builder(connectionPoolProvider, new HttpDeliveryClientHealthProvider("", null, "", 5000, 100)) .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000) .socketTimeoutInMillis(60_000).build(); //TODO expose to conf AvailableRowsTaker availableRowsTaker = new HttpAvailableRowsTaker(httpClient, amzaInterner, mapper); // TODO config AquariumStats aquariumStats = new AquariumStats(); AmzaService amzaService = new AmzaServiceInitializer().initialize(amzaServiceConfig, amzaInterner, aquariumStats, amzaSystemStats, amzaStats, new HealthTimer(CountersAndTimers.getOrCreate("quorumLatency"), "quorumLatency", new NoOpHealthChecker<>("quorumLatency")), () -> amzaServiceConfig.systemRingSize, sickThreads, sickPartitions, primaryRowMarshaller, highwaterRowMarshaller, ringMember, ringHost, Collections.emptySet(), orderIdProvider, idPacker, partitionPropertyMarshaller, (workingIndexDirectories, indexProviderRegistry, ephemeralRowIOProvider, persistentRowIOProvider, partitionStripeFunction) -> { indexProviderRegistry .register( new BerkeleyDBWALIndexProvider(BerkeleyDBWALIndexProvider.INDEX_CLASS_NAME, partitionStripeFunction, workingIndexDirectories), persistentRowIOProvider); indexProviderRegistry.register(new LABPointerIndexWALIndexProvider(amzaInterner, labConfig, Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), LABPointerIndexWALIndexProvider.INDEX_CLASS_NAME, partitionStripeFunction, workingIndexDirectories), persistentRowIOProvider); }, availableRowsTaker, () -> { return new HttpRowsTaker("system", amzaStats, httpClient, mapper, amzaInterner, Executors.newSingleThreadExecutor(), Executors.newCachedThreadPool()); }, () -> { return new HttpRowsTaker("striped", amzaStats, httpClient, mapper, amzaInterner, Executors.newSingleThreadExecutor(), Executors.newCachedThreadPool()); }, Optional.absent(), (changes) -> { }, (threadCount, name) -> { return Executors.newCachedThreadPool(); }); topologyProvider.set(() -> amzaService.getRingReader().getRing(AmzaRingReader.SYSTEM_RING, -1)); TailAtScaleStrategy tailAtScaleStrategy = new TailAtScaleStrategy( BoundedExecutor.newBoundedExecutor(1024, "tas"), 100, // TODO config 95, // TODO config 1000 // TODO config ); AmzaClientProvider<HttpClient, HttpClientException> clientProvider = new AmzaClientProvider<>( new HttpPartitionClientFactory(), new HttpPartitionHostsProvider(httpClient, tailAtScaleStrategy, mapper), new RingHostHttpClientProvider(httpClient), BoundedExecutor.newBoundedExecutor(1024, "amza-client"), 10_000, //TODO expose to conf -1, -1); final JerseyEndpoints jerseyEndpoints = new JerseyEndpoints().addEndpoint(AmzaEndpoints.class) .addInjectable(AmzaService.class, amzaService).addEndpoint(AmzaReplicationRestEndpoints.class) .addInjectable(AmzaInstance.class, amzaService).addEndpoint(AmzaClientRestEndpoints.class) .addInjectable(AmzaInterner.class, amzaInterner).addInjectable(ObjectMapper.class, mapper) .addInjectable(AmzaClientService.class, new AmzaClientService(amzaService.getRingReader(), amzaService.getRingWriter(), amzaService)); new AmzaUIInitializer().initialize(clusterName, ringHost, amzaService, clientProvider, aquariumStats, amzaStats, timestampProvider, idPacker, amzaInterner, new AmzaUIInitializer.InjectionCallback() { @Override public void addEndpoint(Class clazz) { System.out.println("Adding endpoint=" + clazz); jerseyEndpoints.addEndpoint(clazz); } @Override public void addInjectable(Class clazz, Object instance) { System.out.println("Injecting " + clazz + " " + instance); jerseyEndpoints.addInjectable(clazz, instance); } @Override public void addSessionAuth(String... paths) throws Exception { System.out.println("Ignoring session auth request for paths: " + Arrays.toString(paths)); } }); InitializeRestfulServer initializeRestfulServer = new InitializeRestfulServer(false, port, "AmzaNode", false, null, null, null, 128, 10000); initializeRestfulServer.addClasspathResource("/resources"); initializeRestfulServer.addContextHandler("/", jerseyEndpoints); RestfulServer restfulServer = initializeRestfulServer.build(); restfulServer.start(); System.out.println("-----------------------------------------------------------------------"); System.out.println("| Jetty Service Online"); System.out.println("-----------------------------------------------------------------------"); amzaService.start(ringMember, ringHost); System.out.println("-----------------------------------------------------------------------"); System.out.println("| Amza Service Online"); System.out.println("-----------------------------------------------------------------------"); if (clusterName != null) { if (hostPortPeers != null) { System.out.println("-----------------------------------------------------------------------"); System.out.println("| Amza Service is in manual Discovery mode. Cluster Name:" + clusterName); String[] peers = hostPortPeers.split(","); for (String peer : peers) { String[] hostPort = peer.trim().split(":"); if (hostPort.length != 2 && hostPort.length != 3) { System.out.println("| Malformed peer:" + peer + " expected form: <host>:<port> or <logicalName>:<host>:<port>"); } else { String peerLogicalName = (hostPort.length == 2) ? hostPort[0] + ":" + hostPort[1] : hostPort[0]; String peerHostname = (hostPort.length == 2) ? hostPort[0] : hostPort[1]; String peerPort = (hostPort.length == 2) ? hostPort[1] : hostPort[2]; RingMember peerRingMember = new RingMember(peerLogicalName); RingHost peerRingHost = new RingHost("unknown", "unknown", peerHostname, Integer.parseInt(peerPort)); System.out.println("| Adding ringMember:" + peerRingMember + " on host:" + peerRingHost + " to cluster: " + clusterName); amzaService.getRingWriter().register(peerRingMember, peerRingHost, writerId, false); } } systemRingSize.set(1 + peers.length); System.out.println("-----------------------------------------------------------------------"); } else { AmzaDiscovery amzaDiscovery = new AmzaDiscovery(amzaService.getRingReader(), amzaService.getRingWriter(), clusterName, multicastGroup, multicastPort, systemRingSize); amzaDiscovery.start(); System.out.println("-----------------------------------------------------------------------"); System.out.println("| Amza Service Discovery Online: Cluster Name:" + clusterName); System.out.println("-----------------------------------------------------------------------"); } } else { System.out.println("-----------------------------------------------------------------------"); System.out.println("| Amza Service is in manual Discovery mode. No cluster name was specified"); System.out.println("-----------------------------------------------------------------------"); } }