List of usage examples for java.lang Thread interrupt
public void interrupt()
From source file:org.sourcepit.docker.watcher.Main.java
public static void main(String[] args) throws IOException { final HttpClientFactory clientFactory = new HttpClientFactory() { @Override/*from w w w. j av a2 s . co m*/ public CloseableHttpClient createHttpClient() { return HttpClients.createDefault(); } }; final String dockerDaemonUri = "http://192.168.56.101:2375"; final String consulAgentUri = "http://192.168.56.101:8500"; final BlockingQueue<List<JsonObject>> queue = new LinkedBlockingQueue<>(); final ConsulForwarder consulForwarder = new ConsulForwarder(clientFactory.createHttpClient(), consulAgentUri); final Thread containerStateDispatcher = new Thread("Consul Forwarder") { @Override public void run() { while (true) { try { consulForwarder.forward(queue.take()); } catch (InterruptedException e) { break; } catch (Exception e) { LOG.error("Error while forwarding Docker container state to Consul.", e); } } } }; containerStateDispatcher.start(); final DockerWatcher watcher = new DockerWatcher(clientFactory, dockerDaemonUri) { @Override protected void handle(List<JsonObject> containerState) { queue.add(containerState); } }; Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { watcher.stop(); while (containerStateDispatcher.isAlive()) { containerStateDispatcher.interrupt(); try { Thread.sleep(100L); } catch (InterruptedException e) { } } } }); watcher.start(); }
From source file:j8583.example.Client.java
public static void main(String[] args) throws Exception { Random rng = new Random(System.currentTimeMillis()); log.debug("Reading config"); mfact = ConfigParser.createFromClasspathConfig("j8583/example/config.xml"); mfact.setAssignDate(true);/* w ww . ja v a 2 s. c o m*/ mfact.setTraceNumberGenerator(new SimpleTraceGenerator((int) (System.currentTimeMillis() % 10000))); System.err.println("Connecting to server"); Socket sock = new Socket("localhost", 9999); // Send 10 messages, then wait for the responses Client client = new Client(sock); Thread reader = new Thread(client, "j8583-client"); reader.start(); for (int i = 0; i < 10; i++) { IsoMessage req = mfact.newMessage(0x200); req.setValue(4, amounts[rng.nextInt(amounts.length)], IsoType.AMOUNT, 0); req.setValue(12, req.getObjectValue(7), IsoType.TIME, 0); req.setValue(13, req.getObjectValue(7), IsoType.DATE4, 0); req.setValue(15, req.getObjectValue(7), IsoType.DATE4, 0); req.setValue(17, req.getObjectValue(7), IsoType.DATE4, 0); req.setValue(37, System.currentTimeMillis() % 1000000, IsoType.NUMERIC, 12); req.setValue(41, data[rng.nextInt(data.length)], IsoType.ALPHA, 16); req.setValue(48, data[rng.nextInt(data.length)], IsoType.LLLVAR, 0); pending.put(req.getField(11).toString(), req); System.err.println(String.format("Sending request %s", req.getField(11))); req.write(sock.getOutputStream(), 2); } log.debug("Waiting for responses"); while (pending.size() > 0 && sock.isConnected()) { Thread.sleep(500); } client.stop(); reader.interrupt(); log.debug("DONE."); }
From source file:asl.seedscan.SeedScan.java
public static void main(String args[]) { // Default locations of config and schema files File configFile = new File("config.xml"); File schemaFile = new File("schemas/SeedScanConfig.xsd"); boolean parseConfig = true; ArrayList<File> schemaFiles = new ArrayList<File>(); schemaFiles.add(schemaFile);//from w w w . jav a 2s .co m // ==== Command Line Parsing ==== Options options = new Options(); Option opConfigFile = new Option("c", "config-file", true, "The config file to use for seedscan. XML format according to SeedScanConfig.xsd."); Option opSchemaFile = new Option("s", "schema-file", true, "The xsd schema file which should be used to verify the config file format. "); OptionGroup ogConfig = new OptionGroup(); ogConfig.addOption(opConfigFile); OptionGroup ogSchema = new OptionGroup(); ogConfig.addOption(opSchemaFile); options.addOptionGroup(ogConfig); options.addOptionGroup(ogSchema); PosixParser optParser = new PosixParser(); CommandLine cmdLine = null; try { cmdLine = optParser.parse(options, args, true); } catch (org.apache.commons.cli.ParseException e) { logger.error("Error while parsing command-line arguments."); System.exit(1); } Option opt; Iterator<?> iter = cmdLine.iterator(); while (iter.hasNext()) { opt = (Option) iter.next(); if (opt.getOpt().equals("c")) { configFile = new File(opt.getValue()); } else if (opt.getOpt().equals("s")) { schemaFile = new File(opt.getValue()); } } // ==== Configuration Read and Parse Actions ==== ConfigParser parser = new ConfigParser(schemaFiles); ConfigT config = parser.parseConfig(configFile); // Print out configuration file contents Formatter formatter = new Formatter(new StringBuilder(), Locale.US); // ===== CONFIG: LOCK FILE ===== File lockFile = new File(config.getLockfile()); logger.info("SeedScan lock file is '" + lockFile + "'"); LockFile lock = new LockFile(lockFile); if (!lock.acquire()) { logger.error("Could not acquire lock."); System.exit(1); } // ===== CONFIG: LOGGING ===== // MTH: This is now done in log4j.properties file // ===== CONFIG: DATABASE ===== MetricDatabase readDB = new MetricDatabase(config.getDatabase()); MetricDatabase writeDB = new MetricDatabase(config.getDatabase()); MetricReader reader = new MetricReader(readDB); MetricInjector injector = new MetricInjector(writeDB); // ===== CONFIG: SCANS ===== Hashtable<String, Scan> scans = new Hashtable<String, Scan>(); if (config.getScans().getScan() == null) { logger.error("No scans in configuration."); System.exit(1); } else { for (ScanT scanCfg : config.getScans().getScan()) { String name = scanCfg.getName(); if (scans.containsKey(name)) { logger.error("Duplicate scan name '" + name + "' encountered."); System.exit(1); } // This should really be handled by jaxb by setting it up in schemas/SeedScanConfig.xsd if (scanCfg.getStartDay() == null && scanCfg.getStartDate() == null) { logger.error( "== SeedScan Error: Must set EITHER cfg:start_day -OR- cfg:start_date in config.xml to start Scan!"); System.exit(1); } // Configure this Scan Scan scan = new Scan(scanCfg.getName()); scan.setPathPattern(scanCfg.getPath()); scan.setDatalessDir(scanCfg.getDatalessDir()); scan.setEventsDir(scanCfg.getEventsDir()); scan.setPlotsDir(scanCfg.getPlotsDir()); scan.setDaysToScan(scanCfg.getDaysToScan().intValue()); if (scanCfg.getStartDay() != null) { scan.setStartDay(scanCfg.getStartDay().intValue()); } if (scanCfg.getStartDate() != null) { scan.setStartDate(scanCfg.getStartDate().intValue()); } if (scanCfg.getNetworkSubset() != null) { logger.debug("Filter on Network Subset=[{}]", scanCfg.getNetworkSubset()); Filter filter = new Filter(false); for (String network : scanCfg.getNetworkSubset().split(",")) { logger.debug("Network =[{}]", network); filter.addFilter(network); } scan.setNetworks(filter); } if (scanCfg.getStationSubset() != null) { logger.debug("Filter on Station Subset=[{}]", scanCfg.getStationSubset()); Filter filter = new Filter(false); for (String station : scanCfg.getStationSubset().split(",")) { logger.debug("Station =[{}]", station); filter.addFilter(station); } scan.setStations(filter); } if (scanCfg.getLocationSubset() != null) { logger.debug("Filter on Location Subset=[{}]", scanCfg.getLocationSubset()); Filter filter = new Filter(false); for (String location : scanCfg.getLocationSubset().split(",")) { logger.debug("Location =[{}]", location); filter.addFilter(location); } scan.setLocations(filter); } if (scanCfg.getChannelSubset() != null) { logger.debug("Filter on Channel Subset=[{}]", scanCfg.getChannelSubset()); Filter filter = new Filter(false); for (String channel : scanCfg.getChannelSubset().split(",")) { logger.debug("Channel =[{}]", channel); filter.addFilter(channel); } scan.setChannels(filter); } for (MetricT met : scanCfg.getMetrics().getMetric()) { try { Class<?> metricClass = Class.forName(met.getClassName()); MetricWrapper wrapper = new MetricWrapper(metricClass); for (ArgumentT arg : met.getArgument()) { wrapper.add(arg.getName(), arg.getValue()); } scan.addMetric(wrapper); } catch (ClassNotFoundException ex) { logger.error("No such metric class '" + met.getClassName() + "'"); System.exit(1); } catch (InstantiationException ex) { logger.error("Could not dynamically instantiate class '" + met.getClassName() + "'"); System.exit(1); } catch (IllegalAccessException ex) { logger.error("Illegal access while loading class '" + met.getClassName() + "'"); System.exit(1); } catch (NoSuchFieldException ex) { logger.error("Invalid dynamic argument to Metric subclass '" + met.getClassName() + "'"); System.exit(1); } } scans.put(name, scan); } } // ==== Establish Database Connection ==== // TODO: State Tracking in the Database // - Record scan started in database. // - Track our progress as we go so a new process can pick up where // we left off if our process dies. // - Mark when each date-station-channel-operation is complete //LogDatabaseHandler logDB = new LogDatabaseHandler(configuration.get // For each day ((yesterday - scanDepth) to yesterday) // scan for these channel files, only process them if // they have not yet been scanned, or if changes have // occurred to the file since its last scan. Do this for // each scan type. Do not re-scan data for each type, // launch processes for each scan and use the same data set // for each. If we can pipe the data as it is read, do so. // If we need to push all of it at once, do these in sequence // in order to preserve overall system memory resources. Scan scan = null; // ==== Perform Scans ==== scan = scans.get("daily"); //MTH: This part could/should be moved up higher except that we need to know datalessDir, which, // at this point, is configured on a per scan basis ... so we need to know what scan we're doing MetaServer metaServer = null; if (config.getMetaserver() != null) { if (config.getMetaserver().getUseRemote().equals("yes") || config.getMetaserver().getUseRemote().equals("true")) { String remoteServer = config.getMetaserver().getRemoteUri(); try { metaServer = new MetaServer(new URI(remoteServer)); } catch (Exception e) { logger.error("caught URI exception:" + e.getMessage()); } } else { metaServer = new MetaServer(scan.getDatalessDir()); } } else { // Use local MetaServer metaServer = new MetaServer(scan.getDatalessDir()); } List<Station> stations = null; if (config.getStationList() == null) { // get StationList from MetaServer logger.info("Get StationList from MetaServer"); stations = metaServer.getStationList(); } else { // read StationList from config.xml logger.info("Read StationList from config.xml"); List<String> stationList = config.getStationList().getStation(); if (stationList.size() > 0) { stations = new ArrayList<Station>(); for (String station : stationList) { String[] words = station.split("_"); if (words.length != 2) { logger.warn(String.format("stationList: station=[%s] is NOT a valid station --> Skip", station)); } else { stations.add(new Station(words[0], words[1])); logger.info("config.xml: Read station:" + station); } } } else { logger.error("Error: No valid stations read from config.xml"); } } if (stations == null) { logger.error("Found NO stations to scan --> EXITTING SeedScan"); System.exit(1); } Thread readerThread = new Thread(reader); readerThread.start(); logger.info("Reader thread started."); Thread injectorThread = new Thread(injector); injectorThread.start(); logger.info("Injector thread started."); // Loop over scans and hand each one to a ScanManager logger.info("Hand scan to ScanManager"); for (String key : scans.keySet()) { scan = scans.get(key); logger.info(String.format("Scan=[%s] startDay=%d startDate=%d daysToScan=%d\n", key, scan.getStartDay(), scan.getStartDate(), scan.getDaysToScan())); ScanManager scanManager = new ScanManager(reader, injector, stations, scan, metaServer); } logger.info("ScanManager is [ FINISHED ] --> stop the injector and reader threads"); try { injector.halt(); logger.info("All stations processed. Waiting for injector thread to finish..."); synchronized (injectorThread) { //injectorThread.wait(); injectorThread.interrupt(); } logger.info("Injector thread halted."); } catch (InterruptedException ex) { logger.warn("The injector thread was interrupted while attempting to complete requests."); } try { reader.halt(); logger.info("All stations processed. Waiting for reader thread to finish..."); synchronized (readerThread) { //readerThread.wait(); readerThread.interrupt(); } logger.info("Reader thread halted."); } catch (InterruptedException ex) { logger.warn("The reader thread was interrupted while attempting to complete requests."); } try { lock.release(); } catch (IOException e) { ; } finally { logger.info("Release seedscan lock and quit metaServer"); lock = null; metaServer.quit(); } }
From source file:org.apache.helix.filestore.IntegrationTest.java
public static void main(String[] args) throws InterruptedException { ZkServer server = null;/*www . ja v a2 s . com*/ ; try { String baseDir = "/tmp/IntegrationTest/"; final String dataDir = baseDir + "zk/dataDir"; final String logDir = baseDir + "/tmp/logDir"; FileUtils.deleteDirectory(new File(dataDir)); FileUtils.deleteDirectory(new File(logDir)); IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() { @Override public void createDefaultNameSpace(ZkClient zkClient) { } }; int zkPort = 2199; final String zkAddress = "localhost:" + zkPort; server = new ZkServer(dataDir, logDir, defaultNameSpace, zkPort); server.start(); ClusterSetup setup = new ClusterSetup(zkAddress); final String clusterName = "file-store-test"; setup.deleteCluster(clusterName); setup.addCluster(clusterName, true); setup.addInstanceToCluster(clusterName, "localhost_12001"); setup.addInstanceToCluster(clusterName, "localhost_12002"); setup.addInstanceToCluster(clusterName, "localhost_12003"); setup.addResourceToCluster(clusterName, "repository", 1, "MasterSlave"); setup.rebalanceResource(clusterName, "repository", 3); // Set the configuration final String instanceName1 = "localhost_12001"; addConfiguration(setup, baseDir, clusterName, instanceName1); final String instanceName2 = "localhost_12002"; addConfiguration(setup, baseDir, clusterName, instanceName2); final String instanceName3 = "localhost_12003"; addConfiguration(setup, baseDir, clusterName, instanceName3); Thread thread1 = new Thread(new Runnable() { @Override public void run() { FileStore fileStore = null; try { fileStore = new FileStore(zkAddress, clusterName, instanceName1); fileStore.connect(); } catch (Exception e) { System.err.println("Exception" + e); fileStore.disconnect(); } } }); // START NODES Thread thread2 = new Thread(new Runnable() { @Override public void run() { FileStore fileStore = new FileStore(zkAddress, clusterName, instanceName2); fileStore.connect(); } }); // START NODES Thread thread3 = new Thread(new Runnable() { @Override public void run() { FileStore fileStore = new FileStore(zkAddress, clusterName, instanceName3); fileStore.connect(); } }); System.out.println("STARTING NODES"); thread1.start(); thread2.start(); thread3.start(); // Start Controller final HelixManager manager = HelixControllerMain.startHelixController(zkAddress, clusterName, "controller", HelixControllerMain.STANDALONE); Thread.sleep(5000); printStatus(manager); listFiles(baseDir); System.out.println("Writing files a.txt and b.txt to current master " + baseDir + "localhost_12001" + "/filestore"); FileUtils.writeStringToFile(new File(baseDir + "localhost_12001" + "/filestore/a.txt"), "some_data in a"); FileUtils.writeStringToFile(new File(baseDir + "localhost_12001" + "/filestore/b.txt"), "some_data in b"); Thread.sleep(10000); listFiles(baseDir); Thread.sleep(5000); System.out.println("Stopping the MASTER node:" + "localhost_12001"); thread1.interrupt(); Thread.sleep(10000); printStatus(manager); System.out.println("Writing files c.txt and d.txt to current master " + baseDir + "localhost_12002" + "/filestore"); FileUtils.writeStringToFile(new File(baseDir + "localhost_12002" + "/filestore/c.txt"), "some_data in c"); FileUtils.writeStringToFile(new File(baseDir + "localhost_12002" + "/filestore/d.txt"), "some_data in d"); Thread.sleep(10000); listFiles(baseDir); System.out.println("Create or modify any files under " + baseDir + "localhost_12002" + "/filestore" + " and it should get replicated to " + baseDir + "localhost_12003" + "/filestore"); } catch (Exception e) { e.printStackTrace(); } finally { if (server != null) { // server.shutdown(); } } Thread.currentThread().join(); }
From source file:org.apache.bookkeeper.benchmark.BenchThroughputLatency.java
@SuppressWarnings("deprecation") public static void main(String[] args) throws KeeperException, IOException, InterruptedException, ParseException, BKException { Options options = new Options(); options.addOption("time", true, "Running time (seconds), default 60"); options.addOption("entrysize", true, "Entry size (bytes), default 1024"); options.addOption("ensemble", true, "Ensemble size, default 3"); options.addOption("quorum", true, "Quorum size, default 2"); options.addOption("ackQuorum", true, "Ack quorum size, default is same as quorum"); options.addOption("throttle", true, "Max outstanding requests, default 10000"); options.addOption("ledgers", true, "Number of ledgers, default 1"); options.addOption("zookeeper", true, "Zookeeper ensemble, default \"localhost:2181\""); options.addOption("password", true, "Password used to create ledgers (default 'benchPasswd')"); options.addOption("coordnode", true, "Coordination znode for multi client benchmarks (optional)"); options.addOption("timeout", true, "Number of seconds after which to give up"); options.addOption("sockettimeout", true, "Socket timeout for bookkeeper client. In seconds. Default 5"); options.addOption("skipwarmup", false, "Skip warm up, default false"); options.addOption("sendlimit", true, "Max number of entries to send. Default 20000000"); options.addOption("latencyFile", true, "File to dump latencies. Default is latencyDump.dat"); options.addOption("help", false, "This message"); CommandLineParser parser = new PosixParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("help")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("BenchThroughputLatency <options>", options); System.exit(-1);// w w w .j a v a 2 s .c o m } long runningTime = Long.parseLong(cmd.getOptionValue("time", "60")); String servers = cmd.getOptionValue("zookeeper", "localhost:2181"); int entrysize = Integer.parseInt(cmd.getOptionValue("entrysize", "1024")); int ledgers = Integer.parseInt(cmd.getOptionValue("ledgers", "1")); int ensemble = Integer.parseInt(cmd.getOptionValue("ensemble", "3")); int quorum = Integer.parseInt(cmd.getOptionValue("quorum", "2")); int ackQuorum = quorum; if (cmd.hasOption("ackQuorum")) { ackQuorum = Integer.parseInt(cmd.getOptionValue("ackQuorum")); } int throttle = Integer.parseInt(cmd.getOptionValue("throttle", "10000")); int sendLimit = Integer.parseInt(cmd.getOptionValue("sendlimit", "20000000")); final int sockTimeout = Integer.parseInt(cmd.getOptionValue("sockettimeout", "5")); String coordinationZnode = cmd.getOptionValue("coordnode"); final byte[] passwd = cmd.getOptionValue("password", "benchPasswd").getBytes(UTF_8); String latencyFile = cmd.getOptionValue("latencyFile", "latencyDump.dat"); Timer timeouter = new Timer(); if (cmd.hasOption("timeout")) { final long timeout = Long.parseLong(cmd.getOptionValue("timeout", "360")) * 1000; timeouter.schedule(new TimerTask() { public void run() { System.err.println("Timing out benchmark after " + timeout + "ms"); System.exit(-1); } }, timeout); } LOG.warn("(Parameters received) running time: " + runningTime + ", entry size: " + entrysize + ", ensemble size: " + ensemble + ", quorum size: " + quorum + ", throttle: " + throttle + ", number of ledgers: " + ledgers + ", zk servers: " + servers + ", latency file: " + latencyFile); long totalTime = runningTime * 1000; // Do a warmup run Thread thread; byte data[] = new byte[entrysize]; Arrays.fill(data, (byte) 'x'); ClientConfiguration conf = new ClientConfiguration(); conf.setThrottleValue(throttle).setReadTimeout(sockTimeout).setZkServers(servers); if (!cmd.hasOption("skipwarmup")) { long throughput; LOG.info("Starting warmup"); throughput = warmUp(data, ledgers, ensemble, quorum, passwd, conf); LOG.info("Warmup tp: " + throughput); LOG.info("Warmup phase finished"); } // Now do the benchmark BenchThroughputLatency bench = new BenchThroughputLatency(ensemble, quorum, ackQuorum, passwd, ledgers, sendLimit, conf); bench.setEntryData(data); thread = new Thread(bench); ZooKeeper zk = null; if (coordinationZnode != null) { final CountDownLatch connectLatch = new CountDownLatch(1); zk = new ZooKeeper(servers, 15000, new Watcher() { @Override public void process(WatchedEvent event) { if (event.getState() == KeeperState.SyncConnected) { connectLatch.countDown(); } } }); if (!connectLatch.await(10, TimeUnit.SECONDS)) { LOG.error("Couldn't connect to zookeeper at " + servers); zk.close(); System.exit(-1); } final CountDownLatch latch = new CountDownLatch(1); LOG.info("Waiting for " + coordinationZnode); if (zk.exists(coordinationZnode, new Watcher() { @Override public void process(WatchedEvent event) { if (event.getType() == EventType.NodeCreated) { latch.countDown(); } } }) != null) { latch.countDown(); } latch.await(); LOG.info("Coordination znode created"); } thread.start(); Thread.sleep(totalTime); thread.interrupt(); thread.join(); LOG.info("Calculating percentiles"); int numlat = 0; for (int i = 0; i < bench.latencies.length; i++) { if (bench.latencies[i] > 0) { numlat++; } } int numcompletions = numlat; numlat = Math.min(bench.sendLimit, numlat); long[] latency = new long[numlat]; int j = 0; for (int i = 0; i < bench.latencies.length && j < numlat; i++) { if (bench.latencies[i] > 0) { latency[j++] = bench.latencies[i]; } } Arrays.sort(latency); long tp = (long) ((double) (numcompletions * 1000.0) / (double) bench.getDuration()); LOG.info(numcompletions + " completions in " + bench.getDuration() + " milliseconds: " + tp + " ops/sec"); if (zk != null) { zk.create(coordinationZnode + "/worker-", ("tp " + tp + " duration " + bench.getDuration()).getBytes(UTF_8), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL); zk.close(); } // dump the latencies for later debugging (it will be sorted by entryid) OutputStream fos = new BufferedOutputStream(new FileOutputStream(latencyFile)); for (Long l : latency) { fos.write((Long.toString(l) + "\t" + (l / 1000000) + "ms\n").getBytes(UTF_8)); } fos.flush(); fos.close(); // now get the latencies LOG.info("99th percentile latency: {}", percentile(latency, 99)); LOG.info("95th percentile latency: {}", percentile(latency, 95)); bench.close(); timeouter.cancel(); }
From source file:fr.bmartel.protocol.google.main.LaunchOauthApiServer.java
/** * The main method./*from w w w.j a v a 2s . c o m*/ * * @param args the arguments */ public static void main(String[] args) { webPath = ""; clientId = ""; clientSecret = ""; if (args.length == 3) { for (int i = 0; i < 3; i++) { if (args[i].toLowerCase().startsWith("webpath=")) webPath = args[i].substring(args[i].indexOf("webpath=") + "webpath=".length() + 1, args[i].length()); if (args[i].toLowerCase().startsWith("clientid=")) clientId = args[i].substring(args[i].indexOf("clientid=") + "clientid=".length() + 1, args[i].length()); if (args[i].toLowerCase().startsWith("clientsecret=")) clientSecret = args[i].substring( args[i].indexOf("clientsecret=") + "clientsecret=".length() + 1, args[i].length()); } if (webPath.equals("")) { printHelp("Error web path is missing"); return; } else if (clientId.equals("")) { printHelp("Error client Id is missing"); return; } else if (clientSecret.equals("")) { printHelp("Error client secret is missing"); return; } } else { printHelp(""); return; } // start http server HttpServer server = new HttpServer(SERVER_PORT); websocketServer = new WebsocketServer(WEBSOCKET_SERVER_PORT); websocketServer.addServerEventListener(new IClientEventListener() { @SuppressWarnings("unchecked") @Override public void onMessageReceivedFromClient(final IWebsocketClient client, String message) { JSONObject obj = (JSONObject) JSONValue.parse(message); if (obj != null && obj.containsKey(JsonConstants.API_ACTION)) { System.out.println("[API] > " + obj.toJSONString()); String action = obj.get(JsonConstants.API_ACTION).toString(); if (action != null) { if (action.equals(JsonConstants.API_REGISTRATION_STATE)) { JSONObject registrationResponse = new JSONObject(); if (calendarNotifManager != null && calendarNotifManager.getOauthRegistration() != null) { registrationResponse.put(JsonConstants.GOOGLE_OAUTH_DEVICE_CODE, calendarNotifManager.getOauthRegistration().getDeviceCode()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_EXPIRING_BEFORE, calendarNotifManager.getOauthRegistration().getExpiringBefore()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_INTERVAL, calendarNotifManager.getOauthRegistration().getInterval()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_USERCODE, calendarNotifManager.getOauthRegistration().getUserCode()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_VERIFICATION_URL, calendarNotifManager.getOauthRegistration().getVerificationUrl()); } System.out.println("[API] < " + registrationResponse.toJSONString()); client.sendMessage(registrationResponse.toJSONString()); } else if (action.equals(JsonConstants.API_TOKEN_STATE)) { JSONObject requestTokenResponse = new JSONObject(); if (calendarNotifManager != null && calendarNotifManager.getCurrentToken() != null) { requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_ACCESS_TOKEN, calendarNotifManager.getCurrentToken().getAccessToken()); requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_TOKEN_TYPE, calendarNotifManager.getCurrentToken().getTokenType()); requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_EXPIRE_IN, calendarNotifManager.getCurrentToken().getExpiresIn()); } System.out.println("[API] < " + requestTokenResponse.toJSONString()); client.sendMessage(requestTokenResponse.toJSONString()); } else if (action.equals(JsonConstants.API_REGISTRATION)) { calendarNotifManager = new CalendarNotifManager(clientId, clientSecret); calendarNotifManager.requestDeviceAuth(new IOauthDeviceResponseListener() { @Override public void onResponseReceived(OauthForDeviceResponse response) { if (response != null) { JSONObject registrationResponse = new JSONObject(); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_DEVICE_CODE, response.getDeviceCode()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_EXPIRING_BEFORE, response.getExpiringBefore()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_INTERVAL, response.getInterval()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_USERCODE, response.getUserCode()); registrationResponse.put(JsonConstants.GOOGLE_OAUTH_VERIFICATION_URL, response.getVerificationUrl()); System.out.println("[API] < " + registrationResponse.toJSONString()); client.sendMessage(registrationResponse.toJSONString()); } } }); } else if (action.equals(JsonConstants.API_REQUEST_TOKEN)) { if (calendarNotifManager != null) { calendarNotifManager.requestToken(new IRequestTokenListener() { @Override public void onRequestTokenReceived(OauthToken token) { if (token != null) { JSONObject requestTokenResponse = new JSONObject(); requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_ACCESS_TOKEN, token.getAccessToken()); requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_TOKEN_TYPE, token.getTokenType()); requestTokenResponse.put(JsonConstants.GOOGLE_OAUTH_EXPIRE_IN, token.getExpiresIn()); System.out.println("[API] < " + requestTokenResponse.toJSONString()); client.sendMessage(requestTokenResponse.toJSONString()); } } @Override public void onRequestTokenError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } } else if (action.equals(JsonConstants.API_REVOKE_TOKEN)) { if (calendarNotifManager != null) { calendarNotifManager.revokeToken(new IRevokeTokenListener() { @Override public void onSuccess() { System.out.println("[API] < " + "{\"revokeToken\":\"success\"}"); client.sendMessage("{\"revokeToken\":\"success\"}"); } @Override public void onError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } } else if (action.equals(JsonConstants.API_USER_PROFILE)) { if (calendarNotifManager != null) { calendarNotifManager.getUserProfileManager() .getUserProfile(new IUserProfileListener() { @Override public void onSuccess(UserProfile userProfile) { if (userProfile != null) { JSONObject userProfileResponse = new JSONObject(); userProfileResponse.put(JsonConstants.GOOGLE_API_PROFILE_GENDER, userProfile.getGender()); userProfileResponse.put( JsonConstants.GOOGLE_API_PROFILE_DISPLAY_NAME, userProfile.getDisplayName()); userProfileResponse.put( JsonConstants.GOOGLE_API_PROFILE_FAMILY_NAME, userProfile.getFamilyName()); userProfileResponse.put( JsonConstants.GOOGLE_API_PROFILE_GIVEN_NAME, userProfile.getGivenName()); userProfileResponse.put( JsonConstants.GOOGLE_API_PROFILE_LANGUAGE, userProfile.getLanguage()); System.out.println( "[API] < " + userProfileResponse.toJSONString()); client.sendMessage(userProfileResponse.toJSONString()); } } @Override public void onError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } } else if (action.equals(JsonConstants.API_CREATE_EVENT) && obj.containsKey(JsonConstants.API_DATE_BEGIN) && obj.containsKey(JsonConstants.API_DATE_END) && obj.containsKey(JsonConstants.API_SUMMARY)) { String dateBegin = obj.get(JsonConstants.API_DATE_BEGIN).toString(); String dateEnd = obj.get(JsonConstants.API_DATE_END).toString(); String summary = obj.get(JsonConstants.API_SUMMARY).toString(); if (calendarNotifManager != null) { calendarNotifManager.getCalendarManager().createEvent(dateBegin, dateEnd, summary, new ICreateEventListener() { @Override public void onError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage( "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"); } @Override public void onCreateSuccess(String id) { String response = "{\"createEvent\":\"success\",\"eventId\":\"" + id + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } } else if (action.equals(JsonConstants.API_DELETE_EVENT) && obj.containsKey(JsonConstants.API_EVENT_ID)) { final String eventId = obj.get(JsonConstants.API_EVENT_ID).toString(); calendarNotifManager.getCalendarManager().deleteEvent(eventId, new IDeleteEventListener() { @Override public void onSuccess() { String response = "{\"deleteEvent\":\"success\",\"eventId\":\"" + eventId + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } @Override public void onError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } else if (action.equals(JsonConstants.API_GET_EVENTS) && obj.containsKey(JsonConstants.API_DATE_BEGIN) && obj.containsKey(JsonConstants.API_DATE_END) && obj.containsKey("searchText") && calendarNotifManager != null) { String dateBegin = obj.get(JsonConstants.API_DATE_BEGIN).toString(); String dateEnd = obj.get(JsonConstants.API_DATE_END).toString(); String searchText = obj.get(JsonConstants.API_SEARCH_TEXT).toString(); calendarNotifManager.getCalendarManager().getEventList(dateBegin, dateEnd, searchText, new IEventListListener() { @Override public void onEventListReceived(List<CalendarEvents> calendarEventList) { String response = "{\"eventList\":" + CalendarUtils .convertCalendarListToJsonArray(calendarEventList) .toJSONString() + "}"; System.out.println("[API] < " + response); client.sendMessage(response); } @Override public void onError(String description) { String response = "{\"error\":\"request token error\",\"error_description\":\"" + description + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } else if (action.equals(JsonConstants.API_SUBSCRIBE_EVENT) && obj.containsKey(JsonConstants.API_EVENT_ID) && obj.containsKey(JsonConstants.API_TIME_ABOUT_TO_START)) { String eventId = obj.get(JsonConstants.API_EVENT_ID).toString(); int timeAboutToStart = Integer .parseInt(obj.get(JsonConstants.API_TIME_ABOUT_TO_START).toString()); calendarNotifManager.getNotificationManager().subscribeEvent(eventId, timeAboutToStart, new IEventListener() { @Override public void onEventStart(String eventId, String summary) { String response = "{\"subscribedEvent\":\"" + eventId + "\",\"eventType\":\"started\",\"summary\":\"" + summary + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } @Override public void onEventAboutToStart(String eventId, String summary) { String response = "{\"subscribedEvent\":\"" + eventId + "\",\"eventType\":\"aboutToStart\",\"summary\":\"" + summary + "\"}"; System.out.println("[API] < " + response); client.sendMessage(response); } }); } else if (action.equals(JsonConstants.API_UNSUBSCRIBE_EVENT) && obj.containsKey(JsonConstants.API_EVENT_ID)) { String eventId = obj.get(JsonConstants.API_EVENT_ID).toString(); calendarNotifManager.getNotificationManager().unsubscribeEvent(eventId); } else { System.out.println("[API] Error api target is inconsistent"); } } } } @Override public void onClientConnection(IWebsocketClient client) { System.out.println("Websocket client connected"); } @Override public void onClientClose(IWebsocketClient client) { System.out.println("Websocket client disconnected"); } }); Runnable websocketTask = new Runnable() { @Override public void run() { websocketServer.start(); } }; Thread thread = new Thread(websocketTask, "WEBSOCKET_THREAD"); thread.start(); server.addServerEventListener(new IHttpServerEventListener() { @Override public void onHttpFrameReceived(IHttpFrame httpFrame, HttpStates receptionStates, IHttpStream httpStream) { // check if http frame is OK if (receptionStates == HttpStates.HTTP_FRAME_OK) { // you can check here http frame type (response or request // frame) if (httpFrame.isHttpRequestFrame()) { // we want to send a message to client for http GET // request on page with uri /index if (httpFrame.getMethod().equals("GET") && httpFrame.getUri().equals("/gcalendar")) { String defaultPage = ""; try { defaultPage = FileUtils.readFile(webPath + "/index.html", "UTF-8"); } catch (IOException e) { e.printStackTrace(); } // return default html page for this HTTP Server httpStream.writeHttpFrame(new HttpResponseFrame(StatusCodeList.OK, new HttpVersion(1, 1), new HashMap<String, String>(), defaultPage.getBytes()) .toString().getBytes()); } else if (httpFrame.getMethod().equals("GET") && (httpFrame.getUri().endsWith(".css") || httpFrame.getUri().endsWith(".js"))) { String defaultPage = ""; try { defaultPage = FileUtils.readFile(webPath + httpFrame.getUri(), "UTF-8"); } catch (IOException e) { e.printStackTrace(); } // return default html page for this HTTP Server httpStream.writeHttpFrame(new HttpResponseFrame(StatusCodeList.OK, new HttpVersion(1, 1), new HashMap<String, String>(), defaultPage.getBytes()) .toString().getBytes()); } } } } }); server.start(); thread.interrupt(); }
From source file:org.apache.flink.streaming.connectors.kinesis.manualtests.ManualExactlyOnceWithStreamReshardingTest.java
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); LOG.info("Starting exactly once with stream resharding test"); final String streamName = "flink-test-" + UUID.randomUUID().toString(); final String accessKey = pt.getRequired("accessKey"); final String secretKey = pt.getRequired("secretKey"); final String region = pt.getRequired("region"); final Properties configProps = new Properties(); configProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, accessKey); configProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey); configProps.setProperty(ConsumerConfigConstants.AWS_REGION, region); configProps.setProperty(ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS, "0"); final AmazonKinesisClient client = AWSUtil.createKinesisClient(configProps); // the stream is first created with 1 shard client.createStream(streamName, 1);/*from w ww.java2 s .c o m*/ // wait until stream has been created DescribeStreamResult status = client.describeStream(streamName); LOG.info("status {}", status); while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) { status = client.describeStream(streamName); LOG.info("Status of stream {}", status); Thread.sleep(1000); } final Configuration flinkConfig = new Configuration(); flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1); flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8); flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16); flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s"); LocalFlinkMiniCluster flink = new LocalFlinkMiniCluster(flinkConfig, false); flink.start(); final int flinkPort = flink.getLeaderRPCPort(); try { // we have to use a manual generator here instead of the FlinkKinesisProducer // because the FlinkKinesisProducer currently has a problem where records will be resent to a shard // when resharding happens; this affects the consumer exactly-once validation test and will never pass final AtomicReference<Throwable> producerError = new AtomicReference<>(); Runnable manualGenerate = new Runnable() { @Override public void run() { AmazonKinesisClient client = AWSUtil.createKinesisClient(configProps); int count = 0; final int batchSize = 30; while (true) { try { Thread.sleep(10); Set<PutRecordsRequestEntry> batch = new HashSet<>(); for (int i = count; i < count + batchSize; i++) { if (i >= TOTAL_EVENT_COUNT) { break; } batch.add(new PutRecordsRequestEntry() .withData( ByteBuffer.wrap(((i) + "-" + RandomStringUtils.randomAlphabetic(12)) .getBytes(ConfigConstants.DEFAULT_CHARSET))) .withPartitionKey(UUID.randomUUID().toString())); } count += batchSize; PutRecordsResult result = client.putRecords( new PutRecordsRequest().withStreamName(streamName).withRecords(batch)); // the putRecords() operation may have failing records; to keep this test simple // instead of retrying on failed records, we simply pass on a runtime exception // and let this test fail if (result.getFailedRecordCount() > 0) { producerError.set(new RuntimeException( "The producer has failed records in one of the put batch attempts.")); break; } if (count >= TOTAL_EVENT_COUNT) { break; } } catch (Exception e) { producerError.set(e); } } } }; Thread producerThread = new Thread(manualGenerate); producerThread.start(); final AtomicReference<Throwable> consumerError = new AtomicReference<>(); Thread consumerThread = ExactlyOnceValidatingConsumerThread.create(TOTAL_EVENT_COUNT, 10000, 2, 500, 500, accessKey, secretKey, region, streamName, consumerError, flinkPort, flinkConfig); consumerThread.start(); // reshard the Kinesis stream while the producer / and consumers are running Runnable splitShard = new Runnable() { @Override public void run() { try { // first, split shard in the middle of the hash range Thread.sleep(5000); LOG.info("Splitting shard ..."); client.splitShard(streamName, KinesisShardIdGenerator.generateFromShardOrder(0), "170141183460469231731687303715884105727"); // wait until the split shard operation finishes updating ... DescribeStreamResult status; Random rand = new Random(); do { status = null; while (status == null) { // retry until we get status try { status = client.describeStream(streamName); } catch (LimitExceededException lee) { LOG.warn("LimitExceededException while describing stream ... retrying ..."); Thread.sleep(rand.nextInt(1200)); } } } while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")); // then merge again Thread.sleep(7000); LOG.info("Merging shards ..."); client.mergeShards(streamName, KinesisShardIdGenerator.generateFromShardOrder(1), KinesisShardIdGenerator.generateFromShardOrder(2)); } catch (InterruptedException iex) { // } } }; Thread splitShardThread = new Thread(splitShard); splitShardThread.start(); boolean deadlinePassed = false; long deadline = System.currentTimeMillis() + (1000 * 5 * 60); // wait at most for five minutes // wait until both producer and consumer finishes, or an unexpected error is thrown while ((consumerThread.isAlive() || producerThread.isAlive()) && (producerError.get() == null && consumerError.get() == null)) { Thread.sleep(1000); if (System.currentTimeMillis() >= deadline) { LOG.warn("Deadline passed"); deadlinePassed = true; break; // enough waiting } } if (producerThread.isAlive()) { producerThread.interrupt(); } if (consumerThread.isAlive()) { consumerThread.interrupt(); } if (producerError.get() != null) { LOG.info("+++ TEST failed! +++"); throw new RuntimeException("Producer failed", producerError.get()); } if (consumerError.get() != null) { LOG.info("+++ TEST failed! +++"); throw new RuntimeException("Consumer failed", consumerError.get()); } if (!deadlinePassed) { LOG.info("+++ TEST passed! +++"); } else { LOG.info("+++ TEST failed! +++"); } } finally { client.deleteStream(streamName); client.shutdown(); // stopping flink flink.stop(); } }
From source file:org.apache.flink.streaming.connectors.kinesis.manualtests.ManualExactlyOnceTest.java
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); LOG.info("Starting exactly once test"); // create a stream for the test: Properties configProps = new Properties(); configProps.setProperty(KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_ACCESSKEYID, pt.getRequired("accessKey")); configProps.setProperty(KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_SECRETKEY, pt.getRequired("secretKey")); AmazonKinesisClient client = new AmazonKinesisClient( AWSUtil.getCredentialsProvider(configProps).getCredentials()); client.setRegion(Region.getRegion(Regions.fromName(pt.getRequired("region")))); final String streamName = "flink-test-" + UUID.randomUUID().toString(); client.createStream(streamName, 1);/*from ww w .jav a2s . c om*/ // wait until stream has been created DescribeStreamResult status = client.describeStream(streamName); LOG.info("status {}", status); while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) { status = client.describeStream(streamName); LOG.info("Status of stream {}", status); Thread.sleep(1000); } final Configuration flinkConfig = new Configuration(); flinkConfig.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 1); flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8); flinkConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16); flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s"); ForkableFlinkMiniCluster flink = new ForkableFlinkMiniCluster(flinkConfig, false); flink.start(); final int flinkPort = flink.getLeaderRPCPort(); try { final Tuple1<Throwable> producerException = new Tuple1<>(); Runnable producer = new Runnable() { @Override public void run() { try { StreamExecutionEnvironment see = StreamExecutionEnvironment .createRemoteEnvironment("localhost", flinkPort, flinkConfig); see.setParallelism(2); // start data generator DataStream<String> simpleStringStream = see .addSource(new EventsGenerator(TOTAL_EVENT_COUNT)).setParallelism(1); Properties producerProps = new Properties(); producerProps.setProperty( KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_ACCESSKEYID, pt.getRequired("accessKey")); producerProps.setProperty( KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_SECRETKEY, pt.getRequired("secretKey")); producerProps.setProperty(KinesisConfigConstants.CONFIG_AWS_REGION, pt.getRequired("region")); FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(new SimpleStringSchema(), producerProps); kinesis.setFailOnError(true); kinesis.setDefaultStream(streamName); kinesis.setDefaultPartition("0"); simpleStringStream.addSink(kinesis); LOG.info("Starting producing topology"); see.execute("Producing topology"); LOG.info("Producing topo finished"); } catch (Exception e) { LOG.warn("Error while running producing topology", e); producerException.f0 = e; } } }; Thread producerThread = new Thread(producer); producerThread.start(); final Tuple1<Throwable> consumerException = new Tuple1<>(); Runnable consumer = new Runnable() { @Override public void run() { try { StreamExecutionEnvironment see = StreamExecutionEnvironment .createRemoteEnvironment("localhost", flinkPort, flinkConfig); see.setParallelism(2); see.enableCheckpointing(500); // we restart two times see.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 500L)); // consuming topology Properties consumerProps = new Properties(); consumerProps.setProperty( KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_ACCESSKEYID, pt.getRequired("accessKey")); consumerProps.setProperty( KinesisConfigConstants.CONFIG_AWS_CREDENTIALS_PROVIDER_BASIC_SECRETKEY, pt.getRequired("secretKey")); consumerProps.setProperty(KinesisConfigConstants.CONFIG_AWS_REGION, pt.getRequired("region")); // start reading from beginning consumerProps.setProperty(KinesisConfigConstants.CONFIG_STREAM_INIT_POSITION_TYPE, InitialPosition.TRIM_HORIZON.name()); DataStream<String> consuming = see.addSource( new FlinkKinesisConsumer<>(streamName, new SimpleStringSchema(), consumerProps)); consuming.flatMap(new RichFlatMapFunction<String, String>() { int count = 0; @Override public void flatMap(String value, Collector<String> out) throws Exception { if (count++ >= 200 && getRuntimeContext().getAttemptNumber() == 0) { throw new RuntimeException("Artificial failure. Restart pls"); } out.collect(value); } }).flatMap(new ExactlyOnceValidatingMapper()); // validate consumed records for correctness LOG.info("Starting consuming topology"); tryExecute(see, "Consuming topo"); LOG.info("Consuming topo finished"); } catch (Exception e) { LOG.warn("Error while running consuming topology", e); consumerException.f0 = e; } } }; Thread consumerThread = new Thread(consumer); consumerThread.start(); long deadline = System.currentTimeMillis() + (1000 * 2 * 60); // wait at most for two minutes while (consumerThread.isAlive() || producerThread.isAlive()) { Thread.sleep(1000); if (System.currentTimeMillis() >= deadline) { LOG.warn("Deadline passed"); break; // enough waiting } } if (producerThread.isAlive()) { producerThread.interrupt(); } if (consumerThread.isAlive()) { consumerThread.interrupt(); } if (producerException.f0 != null) { throw new RuntimeException("Producer failed", producerException.f0); } if (consumerException.f0 != null) { throw new RuntimeException("Consumer failed", consumerException.f0); } LOG.info("+++ TEST passed! +++"); } finally { client.deleteStream(streamName); client.shutdown(); // stopping flink flink.stop(); } }
From source file:Main.java
public static void interrupt(Thread thread) { try {/*from w ww . j a v a 2 s.c om*/ thread.interrupt(); } catch (Throwable t) { } }
From source file:Main.java
public static void interruptThreadAndJoin(Thread thread) { thread.interrupt(); try {/*w w w . j a v a2 s .co m*/ thread.join(); } catch (InterruptedException e) { /* Ignore */ } }