List of usage examples for java.util LinkedList toArray
@SuppressWarnings("unchecked") public <T> T[] toArray(T[] a)
From source file:org.opennms.core.test.MockLogAppender.java
/** * <p>getEventsAtLevel</p>//from www . j a v a 2 s .co m * * Returns events that were logged at the specified level * */ public static LoggingEvent[] getEventsAtLevel(final Level level) { final LinkedList<LoggingEvent> matching = new LinkedList<LoggingEvent>(); synchronized (s_events) { for (final LoggingEvent event : s_events) { if (event.getLevel().ge(level)) { matching.add(event); } } } return matching.toArray(EMPTY_LOGGING_EVENT); }
From source file:azkaban.jobtype.HadoopSecureSparkWrapper.java
/** * This method is used to enforce queue for Spark application. Rules are explained below. * a) If dynamic resource allocation is enabled for selected spark version and application requires large container * then schedule it into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf. * b) If dynamic resource allocation is enabled for selected spark version and application requires small container * then schedule it into Org specific queue. * c) If dynamic resource allocation is disabled for selected spark version then schedule application into default * queue by a default conf(spark.yarn.queue) in spark-defaults.conf. * @param argArray/*from ww w . ja v a 2 s.c om*/ * @return */ protected static String[] handleQueueEnforcement(String[] argArray) { SparkConf sparkConf = getSparkProperties(); Configuration conf = new Configuration(); int queueParameterIndex = getUserSpecifiedQueueParameterIndex(argArray); boolean requiredSparkDefaultQueue = false; if (sparkConf.getBoolean(SPARK_CONF_DYNAMIC_ALLOC_ENABLED, false)) { if (isLargeContainerRequired(argArray, conf, sparkConf)) { // Case A requiredSparkDefaultQueue = true; logger.info( "Spark application requires Large containers. Scheduling this application into default queue by a " + "default conf(spark.yarn.queue) in spark-defaults.conf."); } else { // Case B logger.info( "Dynamic allocation is enabled for selected spark version and application requires small container. " + "Hence, scheduling this application into Org specific queue"); if (queueParameterIndex == -1) { LinkedList<String> argList = new LinkedList(Arrays.asList(argArray)); argList.addFirst(SPARK_CONF_QUEUE + "=" + DEFAULT_QUEUE); argList.addFirst(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName); argArray = argList.toArray(new String[argList.size()]); } } } else { // Case C logger.info( "Spark version, selected for this application, doesn't support dynamic allocation. Scheduling this " + "application into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf."); requiredSparkDefaultQueue = true; } if (queueParameterIndex != -1 && requiredSparkDefaultQueue) { logger.info("Azbakan enforces spark.yarn.queue queue. Ignore user param: " + argArray[queueParameterIndex] + " " + argArray[queueParameterIndex + 1]); argArray[queueParameterIndex] = null; argArray[queueParameterIndex + 1] = null; } return argArray; }
From source file:gr.abiss.calipso.tiers.specifications.GenericSpecifications.java
/** * Get the root predicate, either a conjunction or disjunction * @param clazz the entity type to query for * @param searchTerms the search terms to match * @param root the criteria root//from w ww .jav a2 s .c o m * @param cb the criteria builder * @return the resulting predicate */ protected static Predicate buildRootPredicate(final Class clazz, final Map<String, String[]> searchTerms, Root<Persistable> root, CriteriaBuilder cb) { // build a list of criteria/predicates LinkedList<Predicate> predicates = buildSearchPredicates(clazz, searchTerms, root, cb); // wrap list in AND/OR junction Predicate predicate; if (searchTerms.containsKey(SEARCH_MODE) && searchTerms.get(SEARCH_MODE)[0].equalsIgnoreCase(OR) // A disjunction of zero predicates is false so... && predicates.size() > 0) { predicate = cb.or(predicates.toArray(new Predicate[predicates.size()])); } else { predicate = cb.and(predicates.toArray(new Predicate[predicates.size()])); } // return the resulting junction return predicate; }
From source file:com.erudika.para.validation.ValidationUtils.java
/** * Validates objects using Hibernate Validator. * @param content an object to be validated * @return a list of error messages or empty if object is valid *///from w w w . j a v a 2 s. c o m public static String[] validateObject(ParaObject content) { if (content == null) { return new String[] { "Object cannot be null." }; } LinkedList<String> list = new LinkedList<String>(); try { for (ConstraintViolation<ParaObject> constraintViolation : getValidator().validate(content)) { String prop = "'".concat(constraintViolation.getPropertyPath().toString()).concat("'"); list.add(prop.concat(" ").concat(constraintViolation.getMessage())); } } catch (Exception e) { logger.error(null, e); } return list.toArray(new String[] {}); }
From source file:azkaban.jobtype.HadoopSecureSparkWrapper.java
protected static String[] handleNodeLabeling(String[] argArray) { // HadoopSparkJob will set env var on this process if we enable node labeling policy for spark jobtype. // We also detect the yarn cluster settings has enable node labeling // Enabling node labeling policy for spark job type is different from enabling node labeling // feature for Yarn. This config inside Spark job type is to enforce node labeling feature for all // Spark applications submitted via Azkaban Spark job type. Configuration conf = new Configuration(); boolean nodeLabelingYarn = conf.getBoolean(YARN_CONF_NODE_LABELING_ENABLED, false); String nodeLabelingProp = System.getenv(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR); boolean nodeLabelingPolicy = nodeLabelingProp != null && nodeLabelingProp.equals(Boolean.TRUE.toString()); String autoNodeLabelProp = System.getenv(HadoopSparkJob.SPARK_AUTO_NODE_LABELING_ENV_VAR); boolean autoNodeLabeling = autoNodeLabelProp != null && autoNodeLabelProp.equals(Boolean.TRUE.toString()); String desiredNodeLabel = System.getenv(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR); SparkConf sparkConf = getSparkProperties(); if (nodeLabelingYarn && nodeLabelingPolicy) { ignoreUserSpecifiedNodeLabelParameter(argArray, autoNodeLabeling); // If auto node labeling is enabled, automatically sets spark.yarn.executor.nodeLabelExpression // config based on user requested resources. if (autoNodeLabeling) { if (isLargeContainerRequired(argArray, conf, sparkConf)) { LinkedList<String> argList = new LinkedList<String>(Arrays.asList(argArray)); argList.addFirst(SPARK_EXECUTOR_NODE_LABEL_EXP + "=" + desiredNodeLabel); argList.addFirst(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName); argArray = argList.toArray(new String[argList.size()]); }// w w w . j a v a2 s . c o m } } return argArray; }
From source file:at.bitfire.davdroid.mirakel.resource.LocalCalendar.java
public static LocalCalendar[] findAll(Account account, ContentProviderClient providerClient, Context ctx) throws RemoteException { @Cleanup//from w ww. j a va 2s.c om Cursor cursor = providerClient.query(calendarsURI(account), new String[] { Calendars._ID, Calendars.NAME }, Calendars.DELETED + "=0 AND " + Calendars.SYNC_EVENTS + "=1", null, null); LinkedList<LocalCalendar> calendars = new LinkedList<LocalCalendar>(); while (cursor != null && cursor.moveToNext()) calendars.add(new LocalCalendar(account, providerClient, cursor.getInt(0), cursor.getString(1), ctx)); return calendars.toArray(new LocalCalendar[0]); }
From source file:AndroidUninstallStock.java
public static void reboot(String adb, String... args) throws Exception { LinkedList<String> adb_and_args = new LinkedList<String>(Arrays.asList(args)); adb_and_args.addFirst(adb);//from www. j ava 2 s . c o m run(adb_and_args.toArray(new String[0])); String deverror, lastdeverror = ""; while (!(deverror = getDeviceStatus(adb, null)).isEmpty()) { if (!lastdeverror.equals(deverror)) { System.out.println(deverror); lastdeverror = deverror; System.out.println("Retry... For Cancel press CTRL+C"); } Thread.sleep(1000); } }
From source file:com.granita.icloudcalsync.resource.LocalCalendar.java
public static LocalCalendar[] findAll(Account account, ContentProviderClient providerClient) throws RemoteException { @Cleanup/*from www . j a v a 2 s. co m*/ Cursor cursor = providerClient.query(calendarsURI(account), new String[] { Calendars._ID, Calendars.NAME }, Calendars.DELETED + "=0 AND " + Calendars.SYNC_EVENTS + "=1", null, null); LinkedList<LocalCalendar> calendars = new LinkedList<>(); while (cursor != null && cursor.moveToNext()) calendars.add(new LocalCalendar(account, providerClient, cursor.getInt(0), cursor.getString(1))); return calendars.toArray(new LocalCalendar[0]); }
From source file:at.bitfire.davdroid.resource.LocalCalendar.java
public static LocalCalendar[] findAll(Account account, ContentProviderClient providerClient) throws RemoteException { @Cleanup// w ww. j ava 2 s.c o m Cursor cursor = providerClient.query(calendarsURI(account), new String[] { Calendars._ID, Calendars.NAME }, Calendars.DELETED + "=0 AND " + Calendars.SYNC_EVENTS + "=1", null, null); LinkedList<LocalCalendar> calendars = new LinkedList<>(); while (cursor != null && cursor.moveToNext()) calendars.add(new LocalCalendar(account, providerClient, cursor.getInt(0), cursor.getString(1))); return calendars.toArray(new LocalCalendar[calendars.size()]); }
From source file:dk.netarkivet.common.utils.ProcessUtils.java
/** * Runs a system process (Unix sort) to sort a file. * @param inputFile the input file.// w w w. j a va 2 s .co m * @param outputFile the output file. * @param tempDir the directory where to store temporary files (null for default system temp). * @param crawllogSorting Should we sort crawllog style ("-k 4b") or not * @return the process exit code. */ public static int runUnixSort(File inputFile, File outputFile, File tempDir, boolean crawllogSorting) { String[] environment = new String[] { "LANG=C" }; LinkedList<String> cmdAndParams = new LinkedList<String>(); cmdAndParams.add("sort"); cmdAndParams.add(inputFile.getAbsolutePath()); if (crawllogSorting) { // -k 4b means fourth field (from 1) ignoring leading blanks cmdAndParams.add("-k"); cmdAndParams.add("4b"); } // -o means output to (file) cmdAndParams.add("-o"); cmdAndParams.add(outputFile.getAbsolutePath()); if (tempDir != null) { // -T configures where to store temporary files cmdAndParams.add("-T"); cmdAndParams.add(tempDir.getAbsolutePath()); } return ProcessUtils.runProcess(environment, (String[]) cmdAndParams.toArray(new String[cmdAndParams.size()])); }