List of usage examples for java.util Collections emptySet
@SuppressWarnings("unchecked") public static final <T> Set<T> emptySet()
From source file:Main.java
public static void main(String args[]) { // create an empty set Set emptyset = Collections.emptySet(); System.out.println("Created empty immutable set: " + emptyset); // try to add elements emptyset.add("from java2s.com"); }
From source file:Main.java
public static void main(String args[]) { List list = Collections.EMPTY_LIST; Set set = Collections.EMPTY_SET; Map map = Collections.EMPTY_MAP; List<String> s = Collections.emptyList(); Set<Long> l = Collections.emptySet(); Map<Date, String> d = Collections.emptyMap(); }
From source file:com.ikanow.aleph2.analytics.spark.assets.SparkPassthroughTopology.java
public static void main(String[] args) throws InstantiationException, IllegalAccessException, ClassNotFoundException { try {/*from w w w.j a v a 2 s . c om*/ final Tuple2<IAnalyticsContext, Optional<ProcessingTestSpecBean>> aleph2_tuple = SparkTechnologyUtils .initializeAleph2(args); final IAnalyticsContext context = aleph2_tuple._1(); final Optional<ProcessingTestSpecBean> test_spec = aleph2_tuple._2(); // Optional: make really really sure it exists after the specified timeout SparkTechnologyUtils.registerTestTimeout(test_spec, () -> { System.exit(0); }); //INFO: System.out.println("Starting SparkPassthroughTopology"); SparkConf spark_context = new SparkConf().setAppName("SparkPassthroughTopology"); final Optional<Double> sub_sample = test_spec .map(__ -> Optional.ofNullable(spark_context.getDouble(SUBSAMPLE_TEST, -1))) .orElseGet(() -> Optional.ofNullable(spark_context.getDouble(SUBSAMPLE_NORMAL, -1))) .filter(d -> d > 0); //INFO: sub_sample.ifPresent(d -> System.out.println("OPTIONS: sub_sample = " + d)); test_spec.ifPresent(spec -> System.out .println("OPTIONS: test_spec = " + BeanTemplateUtils.toJson(spec).toString())); //DEBUG //final boolean test_mode = test_spec.isPresent(); // (serializable thing i can pass into the map) try (final JavaSparkContext jsc = new JavaSparkContext(spark_context)) { final Multimap<String, JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> inputs = SparkTechnologyUtils .buildBatchSparkInputs(context, test_spec, jsc, Collections.emptySet()); final Optional<JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> input = inputs.values().stream() .reduce((acc1, acc2) -> acc1.union(acc2)); long written = input.map(in -> in.values()) .map(rdd -> sub_sample.map(sample -> rdd.sample(true, sample)).orElse(rdd)).map(rdd -> { return rdd.map(t2 -> { final Validation<BasicMessageBean, JsonNode> ret_val = context.emitObject( Optional.empty(), context.getJob().get(), Either.left(t2._2().getJson()), Optional.empty()); return ret_val; // (doesn't matter what I return, just want to count it up) }) //DEBUG: (print the output JSON on success and the error message on fail) //.map(val -> test_mode ? val.f().bind(f -> Validation.fail("FAIL: " + f.message())) : val) .count(); }).orElse(-1L); jsc.stop(); //INFO: System.out.println("Wrote: data_objects=" + written); } } catch (Throwable t) { System.out.println(ErrorUtils.getLongForm("ERROR: {0}", t)); } }
From source file:com.ikanow.aleph2.analytics.spark.assets.SparkSqlTopology.java
public static void main(String[] args) throws InstantiationException, IllegalAccessException, ClassNotFoundException { try {/*w ww .j a v a 2 s. c o m*/ final Tuple2<IAnalyticsContext, Optional<ProcessingTestSpecBean>> aleph2_tuple = SparkTechnologyUtils .initializeAleph2(args); final IAnalyticsContext context = aleph2_tuple._1(); final Optional<ProcessingTestSpecBean> test_spec = aleph2_tuple._2(); // Optional: make really really sure it exists after the specified timeout SparkTechnologyUtils.registerTestTimeout(test_spec, () -> { System.exit(0); }); final SparkTopologyConfigBean config = BeanTemplateUtils .from(context.getJob().map(job -> job.config()).orElse(Collections.emptyMap()), SparkTopologyConfigBean.class) .get(); final String sql_string = Optional.ofNullable(config.script()).orElse(""); //INFO: System.out.println("Starting SparkSqlTopology"); SparkConf spark_context = new SparkConf().setAppName("SparkSqlTopology"); final Optional<Double> sub_sample = test_spec .map(__ -> Optional.ofNullable(spark_context.getDouble(SUBSAMPLE_TEST, -1))) .orElseGet(() -> Optional.ofNullable(spark_context.getDouble(SUBSAMPLE_NORMAL, -1))) .filter(d -> d > 0); //INFO: sub_sample.ifPresent(d -> System.out.println("OPTIONS: sub_sample = " + d)); test_spec.ifPresent(spec -> System.out .println("OPTIONS: test_spec = " + BeanTemplateUtils.toJson(spec).toString())); System.out.println("OPTIONS: sql = " + sql_string); //DEBUG //final boolean test_mode = test_spec.isPresent(); // (serializable thing i can pass into the map) try (final JavaSparkContext jsc = new JavaSparkContext(spark_context)) { SQLContext sql_context = new SQLContext(jsc); final Multimap<String, DataFrame> inputs = SparkTechnologyUtils.buildBatchSparkSqlInputs(context, test_spec, sql_context, Collections.emptySet()); //INFO System.out.println("Registered tables = " + inputs.keySet().toString()); final DataFrame filtered_df = sql_context.sql(sql_string); final String[] columns = filtered_df.columns(); // (have to do this here because columns() depends on transient code) final long written = filtered_df.javaRDD().map(row -> { final ObjectNode j = _mapper.createObjectNode(); //.put("message", row.toString()); (Don't think we want this now that we're using the columns) for (int ii = 0; ii < row.length(); ++ii) { j.set(columns[ii], _mapper.convertValue(row.get(ii), JsonNode.class)); } return context.emitObject(Optional.empty(), context.getJob().get(), Either.left(j), Optional.empty()); }).count(); //INFO: System.out.println("Wrote: data_objects=" + written); } } catch (Throwable t) { System.out.println(ErrorUtils.getLongForm("ERROR: {0}", t)); } }
From source file:com.ikanow.aleph2.analytics.spark.assets.SparkJsInterpreterTopology.java
public static void main(String[] args) throws InstantiationException, IllegalAccessException, ClassNotFoundException { final SetOnce<IBucketLogger> bucket_logger = new SetOnce<>(); final SetOnce<String> job_name = new SetOnce<>(); // (the string we'll use in logging activities) try {//from w ww.j av a 2s.com final Tuple2<IAnalyticsContext, Optional<ProcessingTestSpecBean>> aleph2_tuple = SparkTechnologyUtils .initializeAleph2(args); final IAnalyticsContext context = aleph2_tuple._1(); final Optional<ProcessingTestSpecBean> test_spec = aleph2_tuple._2(); bucket_logger.set(context.getLogger(context.getBucket())); job_name.set(context.getJob().map(j -> j.name()).orElse("no_name")); // Optional: make really really sure it exists after the specified timeout SparkTechnologyUtils.registerTestTimeout(test_spec, () -> { System.exit(0); }); final SparkTopologyConfigBean config = BeanTemplateUtils .from(context.getJob().map(job -> job.config()).orElse(Collections.emptyMap()), SparkTopologyConfigBean.class) .get(); final String js_script = Optional.ofNullable(config.script()).orElse(""); //INFO: System.out.println("Starting " + job_name.get()); SparkConf spark_context = new SparkConf().setAppName(job_name.get()); test_spec.ifPresent(spec -> System.out .println("OPTIONS: test_spec = " + BeanTemplateUtils.toJson(spec).toString())); try (final JavaSparkContext jsc = new JavaSparkContext(spark_context)) { final Multimap<String, JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> inputs = SparkTechnologyUtils .buildBatchSparkInputs(context, test_spec, jsc, Collections.emptySet()); final JavaPairRDD<Object, Tuple2<Long, IBatchRecord>> all_inputs = inputs.values().stream() .reduce((acc1, acc2) -> acc1.union(acc2)).orElse(null); // Load globals: ScriptEngineManager manager = new ScriptEngineManager(); ScriptEngine engine = manager.getEngineByName("JavaScript"); engine.put("_a2_global_context", context); engine.put("_a2_global_bucket", context.getBucket().get()); engine.put("_a2_global_job", context.getJob().get()); engine.put("_a2_global_config", BeanTemplateUtils.configureMapper(Optional.empty()).convertValue(config, JsonNode.class)); engine.put("_a2_global_mapper", BeanTemplateUtils.configureMapper(Optional.empty())); //TODO (until bucket logger is serializable, don't allow anywhere) //engine.put("_a2_bucket_logger", bucket_logger.optional().orElse(null)); engine.put("_a2_enrichment_name", job_name.get()); engine.put("_a2_spark_inputs", inputs); engine.put("_a2_spark_inputs_all", all_inputs); engine.put("_a2_spark_context", jsc); Stream.concat(config.uploaded_lang_files().stream(), Stream.of("aleph2_sparkjs_globals_before.js", "")) .flatMap(Lambdas.flatWrap_i(import_path -> { try { if (import_path.equals("")) { // also import the user script just before here return js_script; } else return IOUtils.toString(SparkJsInterpreterTopology.class.getClassLoader() .getResourceAsStream(import_path), "UTF-8"); } catch (Throwable e) { bucket_logger.optional() .ifPresent(l -> l.log(Level.ERROR, ErrorUtils.lazyBuildMessage(false, () -> SparkJsInterpreterTopology.class.getSimpleName(), () -> job_name.get() + ".main", () -> null, () -> ErrorUtils.get( "Error initializing stage {0} (script {1}): {2}", job_name.get(), import_path, e.getMessage()), () -> ImmutableMap.<String, Object>of("full_error", ErrorUtils.getLongForm("{0}", e))))); System.out.println(ErrorUtils.getLongForm("onStageInitialize: {0}", e)); throw e; // ignored } })).forEach(Lambdas.wrap_consumer_i(script -> { try { engine.eval(script); } catch (Throwable e) { bucket_logger.optional() .ifPresent(l -> l.log(Level.ERROR, ErrorUtils.lazyBuildMessage(false, () -> SparkJsInterpreterTopology.class.getSimpleName(), () -> job_name.get() + ".main", () -> null, () -> ErrorUtils.get( "Error initializing stage {0} (main script): {1}", job_name.get(), e.getMessage()), () -> ImmutableMap.<String, Object>of("full_error", ErrorUtils.getLongForm("{0}", e))))); System.out.println(ErrorUtils.getLongForm("onStageInitialize: {0}", e)); throw e; // ignored } })); ; jsc.stop(); //INFO: System.out.println("Finished " + job_name.get()); } } catch (Throwable t) { System.out.println(ErrorUtils.getLongForm("ERROR: {0}", t)); bucket_logger.optional().ifPresent(l -> l.log(Level.ERROR, ErrorUtils.lazyBuildMessage(false, () -> SparkJsInterpreterTopology.class.getSimpleName() + job_name.optional().map(j -> "." + j).orElse(""), () -> job_name.optional().orElse("global") + ".main", () -> null, () -> ErrorUtils.get("Error on batch in job {0}: {1}", job_name.optional().orElse("global") + ".main", t.getMessage()), () -> ImmutableMap.<String, Object>of("full_error", ErrorUtils.getLongForm("{0}", t))))); } }
From source file:com.act.lcms.v2.MZCollisionCounter.java
public static void main(String[] args) throws Exception { CLIUtil cliUtil = new CLIUtil(MassChargeCalculator.class, HELP_MESSAGE, OPTION_BUILDERS); CommandLine cl = cliUtil.parseCommandLine(args); File inputFile = new File(cl.getOptionValue(OPTION_INPUT_INCHI_LIST)); if (!inputFile.exists()) { cliUtil.failWithMessage("Input file at does not exist at %s", inputFile.getAbsolutePath()); }//from www. jav a2 s. c om List<MassChargeCalculator.MZSource> sources = new ArrayList<>(); try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) { String line; while ((line = reader.readLine()) != null) { line = line.trim(); sources.add(new MassChargeCalculator.MZSource(line)); if (sources.size() % 1000 == 0) { LOGGER.info("Loaded %d sources from input file", sources.size()); } } } Set<String> considerIons = Collections.emptySet(); if (cl.hasOption(OPTION_ONLY_CONSIDER_IONS)) { List<String> ions = Arrays.asList(cl.getOptionValues(OPTION_ONLY_CONSIDER_IONS)); LOGGER.info("Only considering ions for m/z calculation: %s", StringUtils.join(ions, ", ")); considerIons = new HashSet<>(ions); } TSVWriter<String, Long> tsvWriter = new TSVWriter<>(Arrays.asList("collisions", "count")); tsvWriter.open(new File(cl.getOptionValue(OPTION_OUTPUT_FILE))); try { LOGGER.info("Loaded %d sources in total from input file", sources.size()); MassChargeCalculator.MassChargeMap mzMap = MassChargeCalculator.makeMassChargeMap(sources, considerIons); if (!cl.hasOption(OPTION_COUNT_WINDOW_INTERSECTIONS)) { // Do an exact analysis of the m/z collisions if windowing is not specified. LOGGER.info("Computing precise collision histogram."); Iterable<Double> mzs = mzMap.ionMZIter(); Map<Integer, Long> collisionHistogram = histogram( StreamSupport.stream(mzs.spliterator(), false).map(mz -> { // See comment about Iterable below. try { return mzMap.ionMZToMZSources(mz).size(); } catch (NoSuchElementException e) { LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage()); throw e; } })); List<Integer> sortedCollisions = new ArrayList<>(collisionHistogram.keySet()); Collections.sort(sortedCollisions); for (Integer collision : sortedCollisions) { tsvWriter.append(new HashMap<String, Long>() { { put("collisions", collision.longValue()); put("count", collisionHistogram.get(collision)); } }); } } else { /* After some deliberation (thanks Gil!), the windowed variant of this calculation counts the number of * structures whose 0.01 Da m/z windows (for some set of ions) overlap with each other. * * For example, let's assume we have five total input structures, and are only searching for one ion. Let's * also assume that three of those structures have m/z A and the remaining two have m/z B. The windows might * look like this in the m/z domain: * |----A----| * |----B----| * Because A represents three structures and overlaps with B, which represents two, we assign A a count of 5-- * this is the number of structures we believe could fall into the range of A given our current peak calling * approach. Similarly, B is assigned a count of 5, as the possibility for collision/confusion is symmetric. * * Note that this is an over-approximation of collisions, as we could more precisely only consider intersections * when the exact m/z of B falls within the window around A and vice versa. However, because we have observed * cases where the MS sensor doesn't report structures at exactly the m/z we predict, we employ this weaker * definition of intersection to give a slightly pessimistic view of what confusions might be possible. */ // Compute windows for every m/z. We don't care about the original mz values since we just want the count. List<Double> mzs = mzMap.ionMZsSorted(); final Double windowHalfWidth; if (cl.hasOption(OPTION_WINDOW_HALFWIDTH)) { // Don't use get with default for this option, as we want the exact FP value of the default tolerance. windowHalfWidth = Double.valueOf(cl.getOptionValue(OPTION_WINDOW_HALFWIDTH)); } else { windowHalfWidth = DEFAULT_WINDOW_TOLERANCE; } /* Window = (lower bound, upper bound), counter of represented m/z's that collide with this window, and number * of representative structures (which will be used in counting collisions). */ LinkedList<CollisionWindow> allWindows = new LinkedList<CollisionWindow>() { { for (Double mz : mzs) { // CPU for memory trade-off: don't re-compute the window bounds over and over and over and over and over. try { add(new CollisionWindow(mz, windowHalfWidth, mzMap.ionMZToMZSources(mz).size())); } catch (NoSuchElementException e) { LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage()); throw e; } } } }; // Sweep line time! The window ranges are the interesting points. We just accumulate overlap counts as we go. LinkedList<CollisionWindow> workingSet = new LinkedList<>(); List<CollisionWindow> finished = new LinkedList<>(); while (allWindows.size() > 0) { CollisionWindow thisWindow = allWindows.pop(); // Remove any windows from the working set that don't overlap with the next window. while (workingSet.size() > 0 && workingSet.peekFirst().getMaxMZ() < thisWindow.getMinMZ()) { finished.add(workingSet.pop()); } for (CollisionWindow w : workingSet) { /* Add the size of the new overlapping window's structure count to each of the windows in the working set, * which represents the number of possible confused structures that fall within the overlapping region. * We exclude the window itself as it should already have counted the colliding structures it represents. */ w.getAccumulator().add(thisWindow.getStructureCount()); /* Reciprocally, add the structure counts of all windows with which the current window overlaps to it. */ thisWindow.getAccumulator().add(w.getStructureCount()); } // Now that accumulation is complete, we can safely add the current window. workingSet.add(thisWindow); } // All the interesting events are done, so drop the remaining windows into the finished set. finished.addAll(workingSet); Map<Long, Long> collisionHistogram = histogram( finished.stream().map(w -> w.getAccumulator().longValue())); List<Long> sortedCollisions = new ArrayList<>(collisionHistogram.keySet()); Collections.sort(sortedCollisions); for (Long collision : sortedCollisions) { tsvWriter.append(new HashMap<String, Long>() { { put("collisions", collision); put("count", collisionHistogram.get(collision)); } }); } } } finally { if (tsvWriter != null) { tsvWriter.close(); } } }
From source file:com.ikanow.aleph2.analytics.spark.assets.SparkScalaInterpreterTopology.java
public static void main(String[] args) throws InstantiationException, IllegalAccessException, ClassNotFoundException { final SetOnce<IBucketLogger> logger = new SetOnce<>(); try {/*from w ww . ja va2 s . co m*/ final Tuple2<IAnalyticsContext, Optional<ProcessingTestSpecBean>> aleph2_tuple = SparkTechnologyUtils .initializeAleph2(args); final IAnalyticsContext context = aleph2_tuple._1(); final Optional<ProcessingTestSpecBean> test_spec = aleph2_tuple._2(); logger.set(context.getLogger(context.getBucket())); // Optional: make really really sure it exists after the specified timeout SparkTechnologyUtils.registerTestTimeout(test_spec, () -> { System.exit(0); }); //INFO: System.out.println("Starting SparkScalaInterpreterTopology logging=" + logger.optional().isPresent()); logger.optional().ifPresent(l -> { l.inefficientLog(Level.INFO, ErrorUtils.buildSuccessMessage("SparkScalaInterpreterTopology", "main", "Starting SparkScalaInterpreterTopology.{0}", Optionals.of(() -> context.getJob().get().name()).orElse("no_name"))); }); final SparkTopologyConfigBean job_config = BeanTemplateUtils .from(context.getJob().map(job -> job.config()).orElse(Collections.emptyMap()), SparkTopologyConfigBean.class) .get(); final String scala_script = Optional.ofNullable(job_config.script()).orElse(""); final String wrapper_script = IOUtils.toString( SparkScalaInterpreterTopology.class.getClassLoader().getResourceAsStream("ScriptRunner.scala"), "UTF-8"); final String to_compile = wrapper_script.replace("USER_SCRIPT", scala_script); final SparkCompilerService scs = new SparkCompilerService(); final Tuple2<ClassLoader, Object> o = scs.buildClass(to_compile, "ScriptRunner", logger.optional()); Thread.currentThread().setContextClassLoader(o._1()); test_spec.ifPresent(spec -> System.out .println("OPTIONS: test_spec = " + BeanTemplateUtils.toJson(spec).toString())); SparkConf spark_context = new SparkConf().setAppName("SparkPassthroughTopology"); final long streaming_batch_interval = (long) spark_context .getInt(SparkTopologyConfigBean.STREAMING_BATCH_INTERVAL, 10); // MAIN PROCESSING final Method m = o._2().getClass().getMethod("runScript", SparkScriptEngine.class); //DEBUG //final boolean test_mode = test_spec.isPresent(); // (serializable thing i can pass into the map) boolean is_streaming = context.getJob().map(j -> j.analytic_type()) .map(t -> MasterEnrichmentType.streaming == t).orElse(false); final Either<JavaSparkContext, JavaStreamingContext> jsc = Lambdas.get(() -> { return is_streaming ? Either.<JavaSparkContext, JavaStreamingContext>right(new JavaStreamingContext( spark_context, Durations.seconds(streaming_batch_interval))) : Either.<JavaSparkContext, JavaStreamingContext>left(new JavaSparkContext(spark_context)); }); try { final JavaSparkContext jsc_batch = jsc.either(l -> l, r -> r.sparkContext()); final Multimap<String, JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> inputs = SparkTechnologyUtils .buildBatchSparkInputs(context, test_spec, jsc_batch, Collections.emptySet()); final Multimap<String, JavaPairDStream<String, Tuple2<Long, IBatchRecord>>> streaming_inputs = jsc .<Multimap<String, JavaPairDStream<String, Tuple2<Long, IBatchRecord>>>>either( l -> HashMultimap .<String, JavaPairDStream<String, Tuple2<Long, IBatchRecord>>>create(), r -> SparkTechnologyUtils.buildStreamingSparkInputs(context, test_spec, r, Collections.emptySet())); final SparkScriptEngine script_engine_bridge = new SparkScriptEngine(context, inputs, streaming_inputs, test_spec, jsc_batch, jsc.either(l -> null, r -> r), job_config); // Add driver and generated JARs to path: jsc_batch.addJar(LiveInjector.findPathJar(o._2().getClass())); m.invoke(o._2(), script_engine_bridge); jsc.either(l -> { l.stop(); return null; }, r -> { r.stop(); return null; }); logger.optional().ifPresent(l -> { l.inefficientLog(Level.INFO, ErrorUtils.buildSuccessMessage("SparkScalaInterpreterTopology", "main", "Stopping SparkScalaInterpreterTopology.{0}", Optionals.of(() -> context.getJob().get().name()).orElse("no_name"))); }); //INFO: System.out.println("Finished interpreter"); } finally { jsc.either(l -> { l.close(); return null; }, r -> { r.close(); return null; }); } logger.optional().ifPresent(Lambdas.wrap_consumer_u(l -> l.flush().get(10, TimeUnit.SECONDS))); } catch (Throwable t) { logger.optional().ifPresent(l -> { l.inefficientLog(Level.ERROR, ErrorUtils.buildSuccessMessage("SparkScalaInterpreterTopology", "main", ErrorUtils.getLongForm("Error executing SparkScalaInterpreterTopology.unknown: {0}", t))); }); System.out.println(ErrorUtils.getLongForm("ERROR: {0}", t)); logger.optional().ifPresent(Lambdas.wrap_consumer_u(l -> l.flush().get(10, TimeUnit.SECONDS))); System.exit(-1); } }
From source file:Main.java
public static final <T> Set<T> emptySet() { return Collections.emptySet(); }
From source file:Main.java
public static <E> Set<E> asSet(E... elements) { if (elements == null || elements.length == 0) { return Collections.emptySet(); }/*from ww w . jav a 2 s. c o m*/ LinkedHashSet<E> set = new LinkedHashSet<E>(elements.length * 4 / 3 + 1); Collections.addAll(set, elements); return set; }
From source file:Main.java
public static <T> Set removeRepetition(Collection<T> ts) { if (ts == null || ts.isEmpty()) { return Collections.emptySet(); }// www. j a va 2s . co m Map<T, Object> map = new LinkedHashMap(); for (T t : ts) { if (!map.containsKey(t)) { map.put(t, -1); } } Set<T> set = map.keySet(); return set; }