List of usage examples for java.util LinkedList addFirst
public void addFirst(E e)
From source file:Main.java
public static <T> Collection<T> reverse(final Collection<T> collection) { final LinkedList<T> newCollection = new LinkedList<>(); final Iterator<T> i = collection.iterator(); while (i.hasNext()) { newCollection.addFirst(i.next()); }/* w w w . ja v a 2 s .c o m*/ return newCollection; }
From source file:Main.java
/** * Returns the parent folders of this file, starting from the root. * * @return the parent folders of this file, starting from the root *///from w w w. j a v a2 s. c o m public static List<File> getParents(File file) { LinkedList<File> parents = new LinkedList<>(); File parent = file.getParentFile(); while (null != parent) { parents.addFirst(parent); parent = parent.getParentFile(); } return parents; }
From source file:CollectionUtilities.java
public static Collection reverse(Collection collection) { LinkedList newCollection = new LinkedList(); Iterator i = collection.iterator(); while (i.hasNext()) { newCollection.addFirst(i.next()); }/*from ww w . j av a 2 s. c o m*/ return newCollection; }
From source file:Main.java
public static int deepHashCode(Object obj) { Set visited = new HashSet(); LinkedList<Object> stack = new LinkedList<Object>(); stack.addFirst(obj); int hash = 0; while (!stack.isEmpty()) { obj = stack.removeFirst();/*from w w w . j av a2 s.c om*/ if (obj == null || visited.contains(obj)) { continue; } visited.add(obj); if (obj.getClass().isArray()) { int len = Array.getLength(obj); for (int i = 0; i < len; i++) { stack.addFirst(Array.get(obj, i)); } continue; } if (obj instanceof Collection) { stack.addAll(0, (Collection) obj); continue; } if (obj instanceof Map) { stack.addAll(0, ((Map) obj).keySet()); stack.addAll(0, ((Map) obj).values()); continue; } if (hasCustomHashCode(obj.getClass())) { hash += obj.hashCode(); continue; } Collection<Field> fields = getDeepDeclaredFields(obj.getClass()); for (Field field : fields) { try { stack.addFirst(field.get(obj)); } catch (Exception ignored) { } } } return hash; }
From source file:com.github.jknack.amd4j.Amd4j.java
/** * Resolve a candidate uri to an existing uri. We need this bc, dependencies might or mightn't * have a file extension, or they might have a '.' in the file's name. * * @param loader The resource loader./*from w ww . jav a 2s . co m*/ * @param uri The candidate uri. * @return An existing uri for the candidate uri. * @throws IOException If the uri can't be resolved. */ private static ResourceURI resolve(final ResourceLoader loader, final ResourceURI uri) throws IOException { String path = uri.getPath(); LinkedList<ResourceURI> candidates = new LinkedList<ResourceURI>(); candidates.add(uri); ResourceURI alternative = ResourceURI.create(uri.toString() + ".js"); if (isEmpty(getExtension(path))) { candidates.addFirst(alternative); } else { candidates.addLast(alternative); } for (ResourceURI candidate : candidates) { if (loader.exists(candidate)) { return candidate; } } // force a file not found exception throw new FileNotFoundException(uri.toString()); }
From source file:och.service.props.impl.FileProps.java
public static List<FileProps> createFileProps(Collection<String> paths) { LinkedList<FileProps> list = new LinkedList<>(); for (String path : paths) { File file = new File(path); if (!file.exists()) { log.error("can't find file by path: " + path); continue; }//from ww w. ja v a 2 s. co m list.addFirst(new FileProps(file)); } return list; }
From source file:azkaban.jobtype.HadoopSecureSparkWrapper.java
protected static String[] handleNodeLabeling(String[] argArray) { // HadoopSparkJob will set env var on this process if we enable node labeling policy for spark jobtype. // We also detect the yarn cluster settings has enable node labeling // Enabling node labeling policy for spark job type is different from enabling node labeling // feature for Yarn. This config inside Spark job type is to enforce node labeling feature for all // Spark applications submitted via Azkaban Spark job type. Configuration conf = new Configuration(); boolean nodeLabelingYarn = conf.getBoolean(YARN_CONF_NODE_LABELING_ENABLED, false); String nodeLabelingProp = System.getenv(HadoopSparkJob.SPARK_NODE_LABELING_ENV_VAR); boolean nodeLabelingPolicy = nodeLabelingProp != null && nodeLabelingProp.equals(Boolean.TRUE.toString()); String autoNodeLabelProp = System.getenv(HadoopSparkJob.SPARK_AUTO_NODE_LABELING_ENV_VAR); boolean autoNodeLabeling = autoNodeLabelProp != null && autoNodeLabelProp.equals(Boolean.TRUE.toString()); String desiredNodeLabel = System.getenv(HadoopSparkJob.SPARK_DESIRED_NODE_LABEL_ENV_VAR); SparkConf sparkConf = getSparkProperties(); if (nodeLabelingYarn && nodeLabelingPolicy) { ignoreUserSpecifiedNodeLabelParameter(argArray, autoNodeLabeling); // If auto node labeling is enabled, automatically sets spark.yarn.executor.nodeLabelExpression // config based on user requested resources. if (autoNodeLabeling) { if (isLargeContainerRequired(argArray, conf, sparkConf)) { LinkedList<String> argList = new LinkedList<String>(Arrays.asList(argArray)); argList.addFirst(SPARK_EXECUTOR_NODE_LABEL_EXP + "=" + desiredNodeLabel); argList.addFirst(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName); argArray = argList.toArray(new String[argList.size()]); }//from w w w .ja v a2 s. c om } } return argArray; }
From source file:azkaban.jobtype.HadoopSecureSparkWrapper.java
/** * This method is used to enforce queue for Spark application. Rules are explained below. * a) If dynamic resource allocation is enabled for selected spark version and application requires large container * then schedule it into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf. * b) If dynamic resource allocation is enabled for selected spark version and application requires small container * then schedule it into Org specific queue. * c) If dynamic resource allocation is disabled for selected spark version then schedule application into default * queue by a default conf(spark.yarn.queue) in spark-defaults.conf. * @param argArray/*from w w w. j a va 2 s. c o m*/ * @return */ protected static String[] handleQueueEnforcement(String[] argArray) { SparkConf sparkConf = getSparkProperties(); Configuration conf = new Configuration(); int queueParameterIndex = getUserSpecifiedQueueParameterIndex(argArray); boolean requiredSparkDefaultQueue = false; if (sparkConf.getBoolean(SPARK_CONF_DYNAMIC_ALLOC_ENABLED, false)) { if (isLargeContainerRequired(argArray, conf, sparkConf)) { // Case A requiredSparkDefaultQueue = true; logger.info( "Spark application requires Large containers. Scheduling this application into default queue by a " + "default conf(spark.yarn.queue) in spark-defaults.conf."); } else { // Case B logger.info( "Dynamic allocation is enabled for selected spark version and application requires small container. " + "Hence, scheduling this application into Org specific queue"); if (queueParameterIndex == -1) { LinkedList<String> argList = new LinkedList(Arrays.asList(argArray)); argList.addFirst(SPARK_CONF_QUEUE + "=" + DEFAULT_QUEUE); argList.addFirst(SparkJobArg.SPARK_CONF_PREFIX.sparkParamName); argArray = argList.toArray(new String[argList.size()]); } } } else { // Case C logger.info( "Spark version, selected for this application, doesn't support dynamic allocation. Scheduling this " + "application into default queue by a default conf(spark.yarn.queue) in spark-defaults.conf."); requiredSparkDefaultQueue = true; } if (queueParameterIndex != -1 && requiredSparkDefaultQueue) { logger.info("Azbakan enforces spark.yarn.queue queue. Ignore user param: " + argArray[queueParameterIndex] + " " + argArray[queueParameterIndex + 1]); argArray[queueParameterIndex] = null; argArray[queueParameterIndex + 1] = null; } return argArray; }
From source file:org.search.niem.uml.merge.NamespaceMergeUtil.java
private static <T extends EObject> T findEquivalentNamedElement(final NamedElement referenceLibraryElement, final Package inThePIM) { // first find the target namespace of the referenceLibraryElement final Package namespaceInTheReferenceLibrary = findNearestNiemNamespacePackage( referenceLibraryElement.getNearestPackage()); if (namespaceInTheReferenceLibrary == null) { return null; }/*from w ww .j a v a 2 s.com*/ // find the Package with the same targetNamespace in the PIM final Package namespaceInThePIM = findPackageWithTargetNamespace(inThePIM, getTargetNamespace(namespaceInTheReferenceLibrary)); if (namespaceInThePIM == null) { return null; } final LinkedList<Element> pathToReferenceLibraryElement = new LinkedList<>(); NamedElement next = referenceLibraryElement; while (next != namespaceInTheReferenceLibrary) { pathToReferenceLibraryElement.addFirst(next); next = next.getNamespace(); } return findNamedElement(namespaceInThePIM, pathToReferenceLibraryElement); }
From source file:org.codehaus.groovy.grails.commons.GrailsDomainConfigurationUtil.java
public static LinkedList<?> getSuperClassChain(Class<?> theClass) { LinkedList<Class<?>> classChain = new LinkedList<Class<?>>(); Class<?> clazz = theClass; while (clazz != Object.class && clazz != null) { classChain.addFirst(clazz); clazz = clazz.getSuperclass();// w ww.j a va 2s . c o m } return classChain; }