List of usage examples for java.util LinkedList addFirst
public void addFirst(E e)
From source file:com.zimbra.cs.mime.Mime.java
private static List<MPartInfo> listParts(MimePart root, String defaultCharset) throws MessagingException, IOException { List<MPartInfo> parts = new ArrayList<MPartInfo>(); LinkedList<MPartInfo> queue = new LinkedList<MPartInfo>(); queue.add(generateMPartInfo(root, null, "", 0)); MimeMultipart emptyMultipart = null; while (!queue.isEmpty()) { MPartInfo mpart = queue.removeFirst(); MimePart mp = mpart.getMimePart(); parts.add(mpart);//from www .j a v a 2 s.c o m String cts = mpart.mContentType; boolean isMultipart = cts.startsWith(MimeConstants.CT_MULTIPART_PREFIX); boolean isMessage = !isMultipart && cts.equals(MimeConstants.CT_MESSAGE_RFC822); if (isMultipart) { // IMAP part numbering is screwy: top-level multipart doesn't get a number String prefix = mpart.mPartName.length() > 0 ? (mpart.mPartName + '.') : ""; if (mp instanceof MimeMessage) { mpart.mPartName = prefix + "TEXT"; } MimeMultipart multi = getMultipartContent(mp, cts); if (multi != null) { if (multi.getCount() == 0 && LC.mime_promote_empty_multipart.booleanValue()) { if (emptyMultipart == null) { emptyMultipart = multi; } if (MimeConstants.CT_MULTIPART_APPLEDOUBLE.equalsIgnoreCase(getContentType(mp))) { ZimbraLog.misc.debug( "appledouble with no children; assuming it is malformed and really applefile"); mpart.mContentType = mpart.mContentType.replace(MimeConstants.CT_MULTIPART_APPLEDOUBLE, MimeConstants.CT_APPLEFILE); } } mpart.mChildren = new ArrayList<MPartInfo>(multi.getCount()); for (int i = 1; i <= multi.getCount(); i++) { mpart.mChildren .add(generateMPartInfo((MimePart) multi.getBodyPart(i - 1), mpart, prefix + i, i)); } queue.addAll(0, mpart.mChildren); } } else if (isMessage) { MimeMessage mm = getMessageContent(mp); if (mm != null) { MPartInfo child = generateMPartInfo(mm, mpart, mpart.mPartName, 0); queue.addFirst(child); mpart.mChildren = Arrays.asList(child); } } else { // nothing to do at this stage } } if (emptyMultipart != null && parts.size() == 1) { String text = emptyMultipart.getPreamble(); if (!StringUtil.isNullOrEmpty(text)) { ZimbraLog.misc .debug("single multipart with no children. promoting the preamble into a single text part"); parts.remove(0); MPartInfo mpart = new MPartInfo(); ZMimeBodyPart mp = new ZMimeBodyPart(); mp.setText(text, defaultCharset); mpart.mPart = mp; mpart.mContentType = mp.getContentType(); mpart.mDisposition = ""; mpart.mPartName = "1"; parts.add(mpart); } } return parts; }
From source file:com.crushpaper.DbLogic.java
/** Helper method. Returns the children of a restored entry in order. */ private LinkedList<String> getRealChildIdsInOrderForUserRestore(ChildrenInfo childrenInfo, HashMap<String, String> realEntryIdToRestoredEntryId, Errors errors) { LinkedList<String> realChildIdsInOrder = new LinkedList<String>(); String realCurrentId = childrenInfo.lastRealId; if (realCurrentId == null) { Errors.add(errors, errorMessages.errorNoLastWasFound()); return null; }//from w ww . j ava2 s.c o m childrenInfo.restoredNextToRealPreviousIds.remove(null); realChildIdsInOrder.addFirst(realCurrentId); int i = 0, numChildren = childrenInfo.restoredNextToRealPreviousIds.entrySet().size(); for (; i < numChildren; ++i) { String restoredCurrentId = realEntryIdToRestoredEntryId.get(realCurrentId); if (restoredCurrentId == null) { Errors.add(errors, errorMessages.errorRestoredIdWasNotFound(realCurrentId)); return null; } realCurrentId = childrenInfo.restoredNextToRealPreviousIds.get(restoredCurrentId); if (realCurrentId == null) { Errors.add(errors, errorMessages.errorRealIdWasNotFound(restoredCurrentId)); return null; } // Eliminate the potential for duplicates. childrenInfo.restoredNextToRealPreviousIds.remove(restoredCurrentId); realChildIdsInOrder.addFirst(realCurrentId); } return realChildIdsInOrder; }
From source file:org.rhq.enterprise.server.resource.ResourceManagerBean.java
public List<Resource> getResourceLineage(int resourceId) { LinkedList<Resource> resourceLineage = new LinkedList<Resource>(); Resource resource = entityManager.find(Resource.class, resourceId); if (resource == null) { throw new ResourceNotFoundException(resourceId); }/*w w w .j a va2s .c om*/ resourceLineage.add(resource); int childResourceId = resourceId; Resource parent; while ((parent = getParentResource(childResourceId)) != null) { resourceLineage.addFirst(parent); childResourceId = parent.getId(); // This also ensures Hibernate actually populates parent's fields. } return resourceLineage; }
From source file:com.oltpbenchmark.benchmarks.auctionmark.AuctionMarkProfile.java
/** * // w w w .j a v a 2s. c o m * @param itemSet * @param needCurrentPrice * @param needFutureEndDate TODO * @return */ private ItemInfo getRandomItem(LinkedList<ItemInfo> itemSet, boolean needCurrentPrice, boolean needFutureEndDate) { Timestamp currentTime = this.updateAndGetCurrentTime(); int num_items = itemSet.size(); int idx = -1; ItemInfo itemInfo = null; if (LOG.isTraceEnabled()) LOG.trace(String.format("Getting random ItemInfo [numItems=%d, currentTime=%s, needCurrentPrice=%s]", num_items, currentTime, needCurrentPrice)); long tries = 1000; tmp_seenItems.clear(); while (num_items > 0 && tries-- > 0 && tmp_seenItems.size() < num_items) { idx = this.rng.nextInt(num_items); ItemInfo temp = itemSet.get(idx); assert (temp != null); if (tmp_seenItems.contains(temp)) continue; tmp_seenItems.add(temp); // Needs to have an embedded currentPrice if (needCurrentPrice && temp.hasCurrentPrice() == false) { continue; } // If they want an item that is ending in the future, then we compare it with // the current timestamp if (needFutureEndDate) { boolean compareTo = (temp.getEndDate().compareTo(currentTime) < 0); if (LOG.isTraceEnabled()) LOG.trace("CurrentTime:" + currentTime + " / EndTime:" + temp.getEndDate() + " [compareTo=" + compareTo + "]"); if (temp.hasEndDate() == false || compareTo) { continue; } } // Uniform itemInfo = temp; break; } // WHILE if (itemInfo == null) { if (LOG.isDebugEnabled()) LOG.debug("Failed to find ItemInfo [hasCurrentPrice=" + needCurrentPrice + ", needFutureEndDate=" + needFutureEndDate + "]"); return (null); } assert (idx >= 0); // Take the item out of the set and insert back to the front // This is so that we can maintain MRU->LRU ordering itemSet.remove(idx); itemSet.addFirst(itemInfo); if (needCurrentPrice) { assert (itemInfo.hasCurrentPrice()) : "Missing currentPrice for " + itemInfo; assert (itemInfo.getCurrentPrice() > 0) : "Negative currentPrice '" + itemInfo.getCurrentPrice() + "' for " + itemInfo; } if (needFutureEndDate) { assert (itemInfo.hasEndDate()) : "Missing endDate for " + itemInfo; } return itemInfo; }
From source file:org.deeplearning4j.nn.graph.ComputationGraph.java
/** * Pretrain a specified layer with the given MultiDataSetIterator * * @param layerName Layer name/*from ww w . java 2 s . co m*/ * @param iter Training data */ public void pretrainLayer(String layerName, MultiDataSetIterator iter) { if (!configuration.isPretrain()) return; if (flattenedGradients == null) initGradientsView(); if (!verticesMap.containsKey(layerName)) { throw new IllegalStateException("Invalid vertex name: " + layerName); } if (!verticesMap.get(layerName).hasLayer()) { //No op return; } int layerIndex = verticesMap.get(layerName).getVertexIndex(); //Need to do partial forward pass. Simply folowing the topological ordering won't be efficient, as we might // end up doing forward pass on layers we don't need to. //However, we can start with the topological order, and prune out any layers we don't need to do LinkedList<Integer> partialTopoSort = new LinkedList<>(); Set<Integer> seenSoFar = new HashSet<>(); partialTopoSort.add(topologicalOrder[layerIndex]); seenSoFar.add(topologicalOrder[layerIndex]); for (int j = layerIndex - 1; j >= 0; j--) { //Do we need to do forward pass on this GraphVertex? //If it is input to any other layer we need, then yes. Otherwise: no VertexIndices[] outputsTo = vertices[topologicalOrder[j]].getOutputVertices(); boolean needed = false; for (VertexIndices vi : outputsTo) { if (seenSoFar.contains(vi.getVertexIndex())) { needed = true; break; } } if (needed) { partialTopoSort.addFirst(topologicalOrder[j]); seenSoFar.add(topologicalOrder[j]); } } int[] fwdPassOrder = new int[partialTopoSort.size()]; int k = 0; for (Integer g : partialTopoSort) fwdPassOrder[k++] = g; GraphVertex gv = vertices[fwdPassOrder[fwdPassOrder.length - 1]]; Layer layer = gv.getLayer(); if (!iter.hasNext() && iter.resetSupported()) { iter.reset(); } while (iter.hasNext()) { MultiDataSet multiDataSet = iter.next(); setInputs(multiDataSet.getFeatures()); for (int j = 0; j < fwdPassOrder.length - 1; j++) { GraphVertex current = vertices[fwdPassOrder[j]]; if (current.isInputVertex()) { VertexIndices[] inputsTo = current.getOutputVertices(); INDArray input = inputs[current.getVertexIndex()]; for (VertexIndices v : inputsTo) { int vIdx = v.getVertexIndex(); int vIdxInputNum = v.getVertexEdgeNumber(); //This input: the 'vIdxInputNum'th input to vertex 'vIdx' vertices[vIdx].setInput(vIdxInputNum, input.dup()); //TODO When to dup? } } else { //Do forward pass: INDArray out = current.doForward(true); //Now, set the inputs for the next vertices: VertexIndices[] outputsTo = current.getOutputVertices(); if (outputsTo != null) { for (VertexIndices v : outputsTo) { int vIdx = v.getVertexIndex(); int inputNum = v.getVertexEdgeNumber(); //This (jth) connection from the output: is the 'inputNum'th input to vertex 'vIdx' vertices[vIdx].setInput(inputNum, out); } } } } //At this point: have done all of the required forward pass stuff. Can now pretrain layer on current input layer.fit(gv.getInputs()[0]); layer.conf().setPretrain(false); } }
From source file:net.spfbl.data.Block.java
public static String find(String userEmail, String token, boolean findDNSBL, boolean findREGEX, boolean findWHOIS, boolean autoBlock) { TreeSet<String> whoisSet = new TreeSet<String>(); LinkedList<String> regexList = new LinkedList<String>(); if (token == null) { return null; } else if (Domain.isEmail(token)) { String sender = token.toLowerCase(); int index1 = sender.indexOf('@'); int index2 = sender.lastIndexOf('@'); String part = sender.substring(0, index1 + 1); String senderDomain = sender.substring(index2); if (SET.contains(sender)) { return sender; } else if (userEmail != null && SET.contains(userEmail + ':' + sender)) { return userEmail + ':' + sender; } else if (SET.contains(part)) { return part; } else if (userEmail != null && SET.contains(userEmail + ':' + part)) { return userEmail + ':' + part; } else if (SET.contains(senderDomain)) { return senderDomain; } else if (userEmail != null && SET.contains(userEmail + ':' + senderDomain)) { return userEmail + ':' + senderDomain; } else {// www . j a va 2s . c o m int index3 = senderDomain.length(); while ((index3 = senderDomain.lastIndexOf('.', index3 - 1)) > index2) { String subdomain = senderDomain.substring(0, index3 + 1); if (SET.contains(subdomain)) { return subdomain; } else if (userEmail != null && SET.contains(userEmail + ':' + subdomain)) { return userEmail + ':' + subdomain; } } String host = '.' + senderDomain.substring(1); do { int index = host.indexOf('.') + 1; host = host.substring(index); String token2 = '.' + host; if (SET.contains(token2)) { return token2; } else if (userEmail != null && SET.contains(userEmail + ':' + token2)) { return userEmail + ':' + token2; } regexList.addFirst(token2); } while (host.contains(".")); int index4 = sender.length(); while ((index4 = sender.lastIndexOf('.', index4 - 1)) > index2) { String subsender = sender.substring(0, index4 + 1); if (SET.contains(subsender)) { return subsender; } else if (userEmail != null && SET.contains(userEmail + ':' + subsender)) { return userEmail + ':' + subsender; } } } if (senderDomain.endsWith(".br")) { whoisSet.add(senderDomain); } regexList.add(sender); } else if (Subnet.isValidIP(token)) { token = Subnet.normalizeIP(token); String cidr; String dnsbl; if (SET.contains(token)) { return token; } else if (userEmail != null && SET.contains(userEmail + ':' + token)) { return userEmail + ':' + token; } else if ((cidr = CIDR.get(userEmail, token)) != null) { return cidr; } else if (findDNSBL && (dnsbl = DNSBL.get(userEmail, token)) != null) { return dnsbl; } Reverse reverse = Reverse.get(token); if (reverse != null) { for (String host : reverse.getAddressSet()) { String block = find(userEmail, host, findDNSBL, findREGEX, findWHOIS, autoBlock); if (block != null) { return block; } } } regexList.add(token); } else if (Domain.isHostname(token)) { token = Domain.normalizeHostname(token, true); String host = token; do { int index = host.indexOf('.') + 1; host = host.substring(index); String token2 = '.' + host; if (SET.contains(token2)) { return token2; } else if (userEmail != null && SET.contains(userEmail + ':' + token2)) { return userEmail + ':' + token2; } regexList.addFirst(token2); } while (host.contains(".")); if (token.endsWith(".br")) { whoisSet.add(token); } } else { regexList.add(token); } if (findREGEX) { try { // Verifica um critrio do REGEX. String regex; if ((regex = REGEX.get(userEmail, regexList, autoBlock)) != null) { return regex; } } catch (Exception ex) { Server.logError(ex); } } if (findWHOIS) { try { // Verifica critrios do WHOIS. String whois; if ((whois = WHOIS.get(userEmail, whoisSet, autoBlock)) != null) { return whois; } } catch (Exception ex) { Server.logError(ex); } } return null; }
From source file:org.nuxeo.ecm.platform.routing.core.impl.GraphRunner.java
/** * Runs the graph starting with the given node. * * @param graph the graph/*from w w w .ja v a2 s. c o m*/ * @param initialNode the initial node to run */ protected void runGraph(CoreSession session, DocumentRouteElement element, GraphNode initialNode) throws DocumentRouteException { GraphRoute graph = (GraphRoute) element; List<GraphNode> pendingSubRoutes = new LinkedList<GraphNode>(); LinkedList<GraphNode> pendingNodes = new LinkedList<GraphNode>(); pendingNodes.add(initialNode); boolean done = false; int count = 0; while (!pendingNodes.isEmpty()) { GraphNode node = pendingNodes.pop(); count++; if (count > MAX_LOOPS) { throw new DocumentRouteException("Execution is looping, node: " + node); } State jump = null; switch (node.getState()) { case READY: log.debug("Doing node " + node); if (node.isMerge()) { jump = State.WAITING; } else { jump = State.RUNNING_INPUT; } break; case WAITING: if (node.canMerge()) { recursiveCancelInput(graph, node, pendingNodes); jump = State.RUNNING_INPUT; } // else leave state to WAITING break; case RUNNING_INPUT: node.starting(); node.executeChain(node.getInputChain()); if (node.hasTask() || node.hasMultipleTasks()) { createTask(session, graph, node); // may create several node.setState(State.SUSPENDED); } if (node.hasSubRoute()) { if (!pendingSubRoutes.contains(node)) { pendingSubRoutes.add(node); } node.setState(State.SUSPENDED); } if (node.getState() != State.SUSPENDED) { jump = State.RUNNING_OUTPUT; } // else this node is suspended, // remove it from queue of nodes to process break; case SUSPENDED: if (node != initialNode) { throw new DocumentRouteException("Executing unexpected SUSPENDED state"); } // actor NuxeoPrincipal principal = (NuxeoPrincipal) session.getPrincipal(); String actor = principal.getActingUser(); node.setLastActor(actor); // resuming, variables have been set by resumeGraph jump = State.RUNNING_OUTPUT; break; case RUNNING_OUTPUT: node.executeChain(node.getOutputChain()); List<Transition> trueTrans = node.evaluateTransitions(); node.ending(); node.setState(State.READY); if (node.isStop()) { if (!pendingNodes.isEmpty()) { throw new DocumentRouteException(String .format("Route %s stopped with still pending nodes: %s", graph, pendingNodes)); } done = true; } else { if (trueTrans.isEmpty()) { throw new DocumentRouteException("No transition evaluated to true from node " + node); } for (Transition t : trueTrans) { node.executeTransitionChain(t); GraphNode target = graph.getNode(t.target); if (!pendingNodes.contains(target)) { pendingNodes.add(target); } } } break; } if (jump != null) { node.setState(jump); // loop again on this node count--; pendingNodes.addFirst(node); } } if (done) { element.setDone(session); /* * Resume the parent route if this is a sub-route. */ if (graph.hasParentRoute()) { graph.resumeParentRoute(session); } } /* * Now run the sub-routes. If they are done, they'll call back into the routing service to resume the parent * node (above code). */ for (GraphNode node : pendingSubRoutes) { DocumentRoute subRoute = node.startSubRoute(); } session.save(); }
From source file:org.docx4j.model.datastorage.OpenDoPEHandler.java
private WordprocessingMLPackage fetchComponents(WordprocessingMLPackage srcPackage, ContentAccessor contentAccessor) throws Docx4JException { // convert components to altChunk Map<Integer, CTAltChunk> replacements = new HashMap<Integer, CTAltChunk>(); Integer index = 0;//w w w . j a v a 2 s . c o m justGotAComponent = false; LinkedList<Integer> continuousBeforeIndex = new LinkedList<Integer>(); List<Boolean> continuousBefore = new ArrayList<Boolean>(); List<Boolean> continuousAfter = new ArrayList<Boolean>(); for (Object block : contentAccessor.getContent()) { // Object ublock = XmlUtils.unwrap(block); if (block instanceof org.docx4j.wml.SdtBlock) { org.docx4j.wml.SdtBlock sdt = (org.docx4j.wml.SdtBlock) block; Tag tag = getSdtPr(sdt).getTag(); if (tag == null) { List<Object> newContent = new ArrayList<Object>(); newContent.add(sdt); continue; } log.info(tag.getVal()); HashMap<String, String> map = QueryString.parseQueryString(tag.getVal(), true); String componentId = map.get(BINDING_ROLE_COMPONENT); if (componentId == null) continue; // Convert the sdt to a w:altChunk // .. get the IRI String iri = ComponentsPart.getComponentById(components, componentId).getIri(); log.debug("Fetching " + iri); if (docxFetcher == null) { log.error("You need a docxFetcher (and the MergeDocx extension) to fetch components"); return srcPackage; } // .. create the part AlternativeFormatInputPart afiPart = new AlternativeFormatInputPart( getNewPartName("/chunk", ".docx", srcPackage.getMainDocumentPart().getRelationshipsPart())); afiPart.setBinaryData(docxFetcher.getDocxFromIRI(iri)); afiPart.setContentType(new ContentType( "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml")); // docx Relationship altChunkRel = srcPackage.getMainDocumentPart().addTargetPart(afiPart); CTAltChunk ac = Context.getWmlObjectFactory().createCTAltChunk(); ac.setId(altChunkRel.getId()); replacements.put(index, ac); /* * 2011 12 11 TODO. Rethink support for * od:continuousBefore and od:continuousAfter. */ // This is handled in this class if (map.get(BINDING_ROLE_COMPONENT_BEFORE) != null && map.get(BINDING_ROLE_COMPONENT_BEFORE).equals("true")) { continuousBefore.add(Boolean.TRUE); continuousBeforeIndex.addFirst(index); log.info("ctsBefore index: " + index); } else { continuousBefore.add(Boolean.FALSE); continuousBeforeIndex.addFirst(index); } // The following is handled in ProcessAltChunk if (map.get(BINDING_ROLE_COMPONENT_AFTER) != null && map.get(BINDING_ROLE_COMPONENT_AFTER).equals("true")) { continuousAfter.add(Boolean.TRUE); } else { continuousAfter.add(Boolean.TRUE); } justGotAComponent = true; } index++; } if (!justGotAComponent) { return srcPackage; } // Now replace in list for (Integer key : replacements.keySet()) { contentAccessor.getContent().set(key, replacements.get(key)); } // Go through docx in reverse order List<Object> bodyChildren = contentAccessor.getContent(); int i = 0; for (Integer indexIntoBody : continuousBeforeIndex) { if (continuousBefore.get(i)) { // Element before the w:altChunk if (indexIntoBody == 0) { // // Insert a sectPr right at the beginning of the docx? // // TODO check this isn't necessary // SectPr newSectPr = // Context.getWmlObjectFactory().createSectPr(); // SectPr.Type type = // Context.getWmlObjectFactory().createSectPrType(); // type.setVal("continuous"); // newSectPr.setType( type ); // // bodyChildren.add(0, newSectPr); } else { Object block = bodyChildren.get(indexIntoBody.intValue() - 1); if (block instanceof P && ((P) block).getPPr() != null && ((P) block).getPPr().getSectPr() != null) { makeContinuous(((P) block).getPPr().getSectPr()); } else if (block instanceof P) { // More likely PPr ppr = ((P) block).getPPr(); if (ppr == null) { ppr = Context.getWmlObjectFactory().createPPr(); ((P) block).setPPr(ppr); } SectPr newSectPr = Context.getWmlObjectFactory().createSectPr(); SectPr.Type type = Context.getWmlObjectFactory().createSectPrType(); type.setVal("continuous"); newSectPr.setType(type); ppr.setSectPr(newSectPr); } else { // Equally likely - its a table or something, so add a p P newP = Context.getWmlObjectFactory().createP(); PPr ppr = Context.getWmlObjectFactory().createPPr(); newP.setPPr(ppr); SectPr newSectPr = Context.getWmlObjectFactory().createSectPr(); SectPr.Type type = Context.getWmlObjectFactory().createSectPrType(); type.setVal("continuous"); newSectPr.setType(type); ppr.setSectPr(newSectPr); bodyChildren.add(indexIntoBody.intValue(), newP); // add // before // altChunk } } } // else nothing specified, so go with normal MergeDocx behaviour i++; } // process altChunk try { // Use reflection, so docx4j can be built // by users who don't have the MergeDocx utility Class<?> documentBuilder = Class.forName("com.plutext.merge.ProcessAltChunk"); // Method method = documentBuilder.getMethod("merge", // wmlPkgList.getClass()); Method[] methods = documentBuilder.getMethods(); Method processMethod = null; for (int j = 0; j < methods.length; j++) { log.debug(methods[j].getName()); if (methods[j].getName().equals("process")) { processMethod = methods[j]; } } if (processMethod == null) throw new NoSuchMethodException(); return (WordprocessingMLPackage) processMethod.invoke(null, srcPackage); } catch (ClassNotFoundException e) { extensionMissing(e); justGotAComponent = false; return srcPackage; // throw new Docx4JException("Problem processing w:altChunk", e); } catch (NoSuchMethodException e) { // Degrade gracefully extensionMissing(e); justGotAComponent = false; return srcPackage; // throw new Docx4JException("Problem processing w:altChunk", e); } catch (Exception e) { throw new Docx4JException("Problem processing w:altChunk", e); } }
From source file:spade.reporter.Audit.java
/** * Returns a list of audit log files if the rotate flag is true. * Else returns a list with only the given audit log file as it's element. * /*from w w w. ja va 2 s . c om*/ * The audit log files are added in the convention defined in the function code. * * @param inputAuditLogFilePath path of the audit log file * @param rotate a flag to tell whether to read the rotated logs or not * @return list if input log files or null if error */ private List<String> getListOfInputAuditLogs(String inputAuditLogFilePath, boolean rotate) { // Build a list of audit log files to be read LinkedList<String> inputAuditLogFiles = new LinkedList<String>(); inputAuditLogFiles.addFirst(inputAuditLogFilePath); //add the file in the argument if (rotate) { //if rotate is true then add the rest too based on the decided convention //convention: name format of files to be processed -> name.1, name.2 and so on where //name is the name of the file passed in as argument //can only process 99 logs for (int logCount = 1; logCount <= 99; logCount++) { String logPath = inputAuditLogFilePath + "." + logCount; try { if (FileUtility.doesPathExist(logPath)) { if (FileUtility.isFile(logPath)) { if (FileUtility.isFileReadable(logPath)) { inputAuditLogFiles.addFirst(logPath); //adding first so that they are added in the reverse order } else { logger.log(Level.WARNING, "Log skipped because file not readable: " + logPath); } } } } catch (Exception e) { logger.log(Level.SEVERE, "Failed to check if log path is readable: " + logPath, e); return null; } } } return inputAuditLogFiles; }