List of usage examples for java.util LinkedList getFirst
public E getFirst()
From source file:jext2.DataBlockAccess.java
/** * Splice the allocated branch onto inode * @throws IOException// ww w .j a v a2s .com */ @NotThreadSafe(useLock = true) private void spliceBranch(long logicalBlock, int[] offsets, long[] blockNrs, LinkedList<Long> newBlockNrs) throws IoError { int existDepth = blockNrs.length; if (existDepth == 0) { /* add direct block */ long[] directBlocks = inode.getBlock(); directBlocks[offsets[0]] = newBlockNrs.getFirst().longValue(); } else { ByteBuffer buf = blocks.read(blockNrs[existDepth - 1]); Ext2fsDataTypes.putLE32U(buf, newBlockNrs.getFirst().longValue(), offsets[existDepth] * 4); buf.rewind(); blocks.write(blockNrs[existDepth - 1], buf); } lastAllocLogicalBlock = logicalBlock; lastAllocPhysicalBlock = newBlockNrs.getLast().intValue(); inode.setBlocks(inode.getBlocks() + newBlockNrs.size() * (superblock.getBlocksize() / 512)); inode.setModificationTime(new Date()); }
From source file:appeng.items.tools.powered.ToolColorApplicator.java
private ItemStack findNextColor(final ItemStack is, final ItemStack anchor, final int scrollOffset) { ItemStack newColor = null;/* w w w.j a v a 2 s . c o m*/ final IMEInventory<IAEItemStack> inv = AEApi.instance().registries().cell().getCellInventory(is, null, StorageChannel.ITEMS); if (inv != null) { final IItemList<IAEItemStack> itemList = inv .getAvailableItems(AEApi.instance().storage().createItemList()); if (anchor == null) { final IAEItemStack firstItem = itemList.getFirstItem(); if (firstItem != null) { newColor = firstItem.getItemStack(); } } else { final LinkedList<IAEItemStack> list = new LinkedList<IAEItemStack>(); for (final IAEItemStack i : itemList) { list.add(i); } Collections.sort(list, new Comparator<IAEItemStack>() { @Override public int compare(final IAEItemStack a, final IAEItemStack b) { return ItemSorters.compareInt(a.getItemDamage(), b.getItemDamage()); } }); if (list.size() <= 0) { return null; } IAEItemStack where = list.getFirst(); int cycles = 1 + list.size(); while (cycles > 0 && !where.equals(anchor)) { list.addLast(list.removeFirst()); cycles--; where = list.getFirst(); } if (scrollOffset > 0) { list.addLast(list.removeFirst()); } if (scrollOffset < 0) { list.addFirst(list.removeLast()); } return list.get(0).getItemStack(); } } if (newColor != null) { this.setColor(is, newColor); } return newColor; }
From source file:org.commonjava.emb.project.ProjectLoader.java
private void addProjects(final ProjectToolsSession session, final List<MavenProject> projects) { final DependencyGraph depGraph = session.getDependencyGraph(); for (final MavenProject project : projects) { final LinkedList<Artifact> parentage = new LinkedList<Artifact>(); MavenProject parent = project;// w w w . j a v a 2 s . c om while (parent != null) { final org.apache.maven.artifact.Artifact pomArtifact = mavenRepositorySystem .createArtifact(project.getGroupId(), project.getArtifactId(), project.getVersion(), "pom"); final Artifact aetherPomArtifact = RepositoryUtils.toArtifact(pomArtifact); parentage.addFirst(aetherPomArtifact); parent = parent.getParent(); } Artifact current = parentage.removeFirst(); while (!parentage.isEmpty()) { final Artifact next = parentage.getFirst(); // This is WEIRD, but the parent POM is actually a dependency of the current one, // since it's required in order to build the current project... if (LOGGER.isDebugEnabled()) { LOGGER.debug("Marking parent POM: " + current + " as dependency of POM: " + next); } depGraph.addDependency(next, current, true, true); if (!parentage.isEmpty()) { current = parentage.removeFirst(); } } } }
From source file:de.quadrillenschule.azocamsyncd.ftpservice.FTPConnection.java
public LinkedList<AZoFTPFile> download(LinkedList<AZoFTPFile> afs, LocalStorage localStorage) { if (afs.size() <= 0) { return afs; }//from ww w .j av a 2s. c om LinkedList<AZoFTPFile> retval = new LinkedList<>(); /* for (AZoFTPFile a : afs) { retval.add(a); }*/ Collections.sort(afs, new Comparator<AZoFTPFile>() { @Override public int compare(AZoFTPFile o1, AZoFTPFile o2) { return o1.ftpFile.getTimestamp().compareTo(o2.ftpFile.getTimestamp()); } }); simplyConnect(FTP.BINARY_FILE_TYPE); notify(FTPConnectionStatus.CONNECTED, getLastWorkingConnection(), -1); if (afs.size() > 0) { AZoFTPFile af = afs.getFirst();//) { File localFile = null; try { localFile = localStorage.getLocalFile(af); } catch (IOException ex) { notify(FTPConnectionStatus.LOCALSTORAGEERROR, af.dir + af.ftpFile.getName(), -1); close(); return retval; } if (!localStorage.prepareLocalFile(localFile)) { notify(FTPConnectionStatus.LOCALSTORAGEERROR, af.dir + af.ftpFile.getName(), -1); close(); return retval; } FileOutputStream fos = null; InputStream is = null; try { fos = new FileOutputStream(localFile); ftpclient.setSoTimeout(LONG_TIMEOUT); is = ftpclient.retrieveFileStream(af.dir + af.ftpFile.getName()); cis = new CountingInputStream(is); downloadsize = af.ftpFile.getSize(); notify(FTPConnectionStatus.DOWNLOADING, af.dir + af.ftpFile.getName(), ((int) (100.0 * ((afs.indexOf(af) + 1.0) / (double) afs.size())))); // ftpclient.setDataTimeout(TIMEOUT); // ftpclient.setSoTimeout(TIMEOUT); // Files.copy(cis, localFile.toPath(), StandardCopyOption.REPLACE_EXISTING); try { IOUtils.copyLarge(cis, fos); } catch (Exception ie) { fos.close(); is.close(); } while (!ftpclient.completePendingCommand()) { try { Thread.currentThread().wait(500); } catch (InterruptedException ex) { Logger.getLogger(FTPConnection.class.getName()).log(Level.SEVERE, null, ex); } } ; is.close(); fos.close(); localStorage.setLatestIncoming(localFile); localStorage.addSyncedFile(af); notify(FTPConnectionStatus.NEW_LOCAL_FILE, localFile.getAbsolutePath(), -1); retval.add(af); notify(FTPConnectionStatus.SUCCESS, af.dir + af.ftpFile.getName(), ((int) (100.0 * ((afs.indexOf(af) + 2.0) / (double) afs.size())))); } catch (Exception ex) { try { is.close(); fos.close(); close(); localFile.delete(); simplyConnect(FTP.BINARY_FILE_TYPE); } catch (Exception ex2) { close(); } } } close(); return retval; }
From source file:org.fusesource.mop.MOP.java
private ArrayList<ArtifactId> parseArtifactList(LinkedList<String> values) throws UsageException { ArrayList<ArtifactId> rc = new ArrayList<ArtifactId>(); assertNotEmpty(values);//from w w w . j a v a 2 s . c o m String value = values.removeFirst(); ArtifactId id = parseArtifactId(value); rc.add(id); while (!values.isEmpty() && isAnotherArtifactId(values.getFirst())) { value = values.removeFirst().substring(1); id = parseArtifactId(value); rc.add(id); } return rc; }
From source file:com.datatorrent.stram.client.EventsAgent.java
public List<EventInfo> getLatestEvents(String appId, int limit) { LinkedList<EventInfo> result = new LinkedList<EventInfo>(); String dir = getEventsDirectory(appId); if (dir == null) { return null; }//from w w w . j ava 2 s . c o m long totalNumEvents = 0; IndexFileBufferedReader ifbr = null; LinkedList<Pair<String, Long>> partFiles = new LinkedList<Pair<String, Long>>(); try { ifbr = new IndexFileBufferedReader(new InputStreamReader( stramAgent.getFileSystem().open(new Path(dir, FSPartFileCollection.INDEX_FILE))), dir); EventsIndexLine indexLine; while ((indexLine = (EventsIndexLine) ifbr.readIndexLine()) != null) { if (indexLine.isEndLine) { continue; } partFiles.add(new Pair<String, Long>(indexLine.partFile, indexLine.numEvents)); totalNumEvents += indexLine.numEvents; } } catch (Exception ex) { LOG.warn("Got exception when reading events", ex); return result; } finally { IOUtils.closeQuietly(ifbr); } long offset = 0; while (totalNumEvents > limit && !partFiles.isEmpty()) { Pair<String, Long> head = partFiles.getFirst(); if (totalNumEvents - head.second < limit) { offset = Math.max(0, totalNumEvents - limit); break; } totalNumEvents -= head.second; partFiles.removeFirst(); } String lastProcessPartFile = null; for (Pair<String, Long> partFile : partFiles) { BufferedReader partBr = null; try { partBr = new BufferedReader( new InputStreamReader(stramAgent.getFileSystem().open(new Path(dir, partFile.first)))); processPartFile(partBr, null, null, offset, limit, result); offset = 0; lastProcessPartFile = partFile.first; } catch (Exception ex) { LOG.warn("Got exception when reading events", ex); } finally { IOUtils.closeQuietly(partBr); } } BufferedReader partBr = null; try { String extraPartFile = getNextPartFile(lastProcessPartFile); if (extraPartFile != null && limit > 0) { partBr = new BufferedReader( new InputStreamReader(stramAgent.getFileSystem().open(new Path(dir, extraPartFile)))); processPartFile(partBr, null, null, 0, Integer.MAX_VALUE, result); } } catch (Exception ex) { // ignore } finally { IOUtils.closeQuietly(partBr); } while (result.size() > limit) { result.removeFirst(); } return result; }
From source file:jext2.DataInode.java
/** * Read Inode data/*from w w w . j ava 2s.com*/ * @param size size of the data to be read * @param offset start address in data area * @return buffer of size size containing data. * @throws FileTooLarge * @throws IoError */ public ByteBuffer readData(int size, long fileOffset) throws JExt2Exception, FileTooLarge { /* Returning null may break things somewhere.. * Zero length buffer breaks something in jlowfuse's c code */ if (getSize() == 0) return ByteBuffer.allocateDirect(1); /* * size may be larger than the inode.size, it doesn't make sense to return * 4k of zeros */ if (size > getSize()) size = (int) getSize(); ByteBuffer buf = ByteBuffer.allocateDirect(size); int blocksize = superblock.getBlocksize(); long i = 0; long firstBlock = fileOffset / blocksize; long offset = fileOffset % blocksize; /* * just as size may be larger than the inode's data, the number of blocks * may also be. */ long approxBlocks = (size / blocksize) + 1; long maxBlocks = this.getBlocks() / (superblock.getBlocksize() / 512); if (approxBlocks > maxBlocks) approxBlocks = maxBlocks; while (i < approxBlocks) { long start = firstBlock + i; long stop = firstBlock + approxBlocks; LinkedList<Long> b = accessData().getBlocks(start, stop); int blocksRead; /* * Note on the sparse file support: * getBlocks will return null if there is no data block for this * logical address. So just move the position count blocks forward. */ if (b == null) { /* hole */ blocksRead = 1; int unboundedLimit = buf.position() + blocksize; int limit = Math.min(unboundedLimit, buf.capacity()); assert limit <= buf.capacity() : "New position, limit " + limit + " is beyond buffer's capacity, " + buf; buf.limit(limit); buf.position(limit); assert buf.limit() == buf.position(); } else { /* blocks */ blocksRead = b.size(); long pos = b.getFirst() * blocksize + offset; int unboundedLimit = buf.position() + blocksRead * blocksize; int limit = Math.min(unboundedLimit, buf.capacity()); assert limit <= buf.capacity() : "New limit " + limit + " is beyond buffer's capacity, " + buf; buf.limit(limit); blockAccess.readToBufferUnsynchronized(pos, buf); } i += blocksRead; offset = 0; /* This should be removed soon. IllegalMonitorStateException happen * occasionally for unknown reasons. */ try { accessData().getHierarchyLock().readLock().unlock(); } catch (IllegalMonitorStateException e) { Logger log = Filesystem.getLogger(); log.warning("IllegalMonitorStateException encountered in readData, inode=" + this); log.warning(String.format( "context for exception: blocks=%s i=%d approxBlocks=%d off=%d buf=%s readlock=%s lock.readlock.holds=%s", b, i, approxBlocks, fileOffset, buf, accessData().getHierarchyLock(), accessData().getHierarchyLock().getReadHoldCount())); } if (buf.capacity() == buf.limit()) break; } assert buf.position() == buf.limit() : "Buffer wasn't filled completely"; assert buf.limit() == size : "Read buffer size does not match request size"; if (buf.limit() > getSize()) buf.limit((int) getSize()); buf.rewind(); return buf; }
From source file:org.jahia.modules.modulemanager.flow.ModuleManagementFlowHandler.java
public void loadModuleInformation(RequestContext context) { String selectedModuleName = moduleName != null ? moduleName : (String) context.getFlowScope().get("selectedModule"); Map<ModuleVersion, JahiaTemplatesPackage> selectedModule = templateManagerService .getTemplatePackageRegistry().getAllModuleVersions().get(selectedModuleName); if (selectedModule != null) { if (selectedModule.size() > 1) { boolean foundActiveVersion = false; for (Map.Entry<ModuleVersion, JahiaTemplatesPackage> entry : selectedModule.entrySet()) { JahiaTemplatesPackage value = entry.getValue(); if (value.isActiveVersion()) { foundActiveVersion = true; populateActiveVersion(context, value); }/*from w w w. j a v a 2 s. c o m*/ } if (!foundActiveVersion) { // there is no active version take information from most recent installed version LinkedList<ModuleVersion> sortedVersions = new LinkedList<ModuleVersion>( selectedModule.keySet()); Collections.sort(sortedVersions); populateActiveVersion(context, selectedModule.get(sortedVersions.getFirst())); } } else { populateActiveVersion(context, selectedModule.values().iterator().next()); } context.getRequestScope().put("otherVersions", selectedModule); } else { // module is not yet parsed probably because it depends on unavailable modules so look for it in module states final Map<Bundle, ModuleState> moduleStates = templateManagerService.getModuleStates(); for (Bundle bundle : moduleStates.keySet()) { JahiaTemplatesPackage module = BundleUtils.getModule(bundle); if (module.getId().equals(selectedModuleName)) { populateActiveVersion(context, module); final List<String> missing = getMissingDependenciesFrom(module.getDepends(), null); if (!missing.isEmpty()) { createMessageForMissingDependencies(context.getMessageContext(), missing); } break; } } } populateSitesInformation(context); Set<String> systemSiteRequiredModules = getSystemSiteRequiredModules(); context.getRequestScope().put("systemSiteRequiredModules", systemSiteRequiredModules); // Get list of definitions NodeTypeIterator nodeTypes = NodeTypeRegistry.getInstance().getNodeTypes(selectedModuleName); Map<String, Boolean> booleanMap = new TreeMap<String, Boolean>(); while (nodeTypes.hasNext()) { ExtendedNodeType nodeType = (ExtendedNodeType) nodeTypes.next(); booleanMap.put(nodeType.getLabel(LocaleContextHolder.getLocale()), nodeType.isNodeType("jmix:droppableContent")); } context.getRequestScope().put("nodeTypes", booleanMap); }
From source file:org.pf9.pangu.app.act.rest.diagram.services.ProcessInstanceHighlightsResource.java
/** * getHighlightedFlows//from w w w . j a v a 2s . c o m * * code logic: 1. Loop all activities by id asc order; 2. Check each activity's outgoing transitions and eventBoundery outgoing transitions, if * outgoing transitions's destination.id is in other executed activityIds, add this transition to highLightedFlows List; 3. But if activity is not * a parallelGateway or inclusiveGateway, only choose the earliest flow. * * @param activityList * @param hisActInstList * @param highLightedFlows */ private void getHighlightedFlows(List<ActivityImpl> activityList, LinkedList<HistoricActivityInstance> hisActInstList, List<String> highLightedFlows) { //check out startEvents in activityList List<ActivityImpl> startEventActList = new ArrayList<ActivityImpl>(); Map<String, ActivityImpl> activityMap = new HashMap<String, ActivityImpl>(activityList.size()); for (ActivityImpl activity : activityList) { activityMap.put(activity.getId(), activity); String actType = (String) activity.getProperty("type"); if (actType != null && actType.toLowerCase().indexOf("startevent") >= 0) { startEventActList.add(activity); } } //These codes is used to avoid a bug: //ACT-1728 If the process instance was started by a callActivity, it will be not have the startEvent activity in ACT_HI_ACTINST table //Code logic: //Check the first activity if it is a startEvent, if not check out the startEvent's highlight outgoing flow. HistoricActivityInstance firstHistActInst = hisActInstList.getFirst(); String firstActType = (String) firstHistActInst.getActivityType(); if (firstActType != null && firstActType.toLowerCase().indexOf("startevent") < 0) { PvmTransition startTrans = getStartTransaction(startEventActList, firstHistActInst); if (startTrans != null) { highLightedFlows.add(startTrans.getId()); } } while (!hisActInstList.isEmpty()) { HistoricActivityInstance histActInst = hisActInstList.removeFirst(); ActivityImpl activity = activityMap.get(histActInst.getActivityId()); if (activity != null) { boolean isParallel = false; String type = histActInst.getActivityType(); if ("parallelGateway".equals(type) || "inclusiveGateway".equals(type)) { isParallel = true; } else if ("subProcess".equals(histActInst.getActivityType())) { getHighlightedFlows(activity.getActivities(), hisActInstList, highLightedFlows); } List<PvmTransition> allOutgoingTrans = new ArrayList<PvmTransition>(); allOutgoingTrans.addAll(activity.getOutgoingTransitions()); allOutgoingTrans.addAll(getBoundaryEventOutgoingTransitions(activity)); List<String> activityHighLightedFlowIds = getHighlightedFlows(allOutgoingTrans, hisActInstList, isParallel); highLightedFlows.addAll(activityHighLightedFlowIds); } } }
From source file:com.mirth.connect.server.controllers.MuleEngineController.java
private void configureOutboundRouter(UMODescriptor descriptor, Channel channel) throws Exception { logger.debug(//from w w w.ja va2 s . c o m "configuring outbound router for channel: " + channel.getId() + " (" + channel.getName() + ")"); FilteringMulticastingRouter fmr = new FilteringMulticastingRouter(); boolean enableTransactions = false; Exception exceptionRegisteringOutboundRouter = null; // If there was an exception registering a connector, break the loop. for (ListIterator<Connector> iterator = channel.getDestinationConnectors().listIterator(); iterator .hasNext() && (exceptionRegisteringOutboundRouter == null);) { Connector connector = iterator.next(); if (connector.isEnabled()) { MuleEndpoint endpoint = new MuleEndpoint(); // Don't throw an exception if a malformed URI was passed // in for one of the destinations. try { endpoint.setEndpointURI(new MuleEndpointURI(getEndpointUri(connector), channel.getId())); } catch (Exception e) { exceptionRegisteringOutboundRouter = e; } // if there are multiple endpoints, make them all // synchronous to // ensure correct ordering of fired events if (channel.getDestinationConnectors().size() > 0) { endpoint.setSynchronous(true); // TODO: routerElement.setAttribute("synchronous", // "true"); } String connectorReference = getConnectorReferenceForOutboundRouter(channel, iterator.nextIndex()); // add the destination connector String connectorName = getConnectorNameForRouter(connectorReference); try { endpoint.setConnector(registerConnector(connector, connectorName, channel.getId())); } catch (Exception e) { exceptionRegisteringOutboundRouter = e; } // 1. append the JavaScriptTransformer that does the // mappings UMOTransformer javascriptTransformer = createTransformer(channel, connector, connectorReference + "_transformer"); try { muleManager.registerTransformer(javascriptTransformer); } catch (Exception e) { exceptionRegisteringOutboundRouter = e; } // 2. finally, append any transformers needed by the // transport (ie. StringToByteArray) ConnectorMetaData transport = transports.get(connector.getTransportName()); LinkedList<UMOTransformer> defaultTransformerList = null; if (transport.getTransformers() != null) { defaultTransformerList = chainTransformers(transport.getTransformers()); if (!defaultTransformerList.isEmpty()) { javascriptTransformer.setTransformer(defaultTransformerList.getFirst()); } } // enable transactions for the outbound router only if it // has a JDBC connector if (transport.getProtocol().equalsIgnoreCase("jdbc")) { enableTransactions = true; } endpoint.setTransformer(javascriptTransformer); fmr.addEndpoint(endpoint); } } // check for enabled transactions boolean transactional = ((channel.getProperties().get("transactional") != null) && channel.getProperties().get("transactional").toString().equalsIgnoreCase("true")); if (enableTransactions && transactional) { MuleTransactionConfig mtc = new MuleTransactionConfig(); mtc.setActionAsString("BEGIN_OR_JOIN"); mtc.setFactory(new JdbcTransactionFactory()); fmr.setTransactionConfig(mtc); } OutboundMessageRouter outboundRouter = new OutboundMessageRouter(); outboundRouter.addRouter(fmr); descriptor.setOutboundRouter(outboundRouter); /* * Throw an exception after the FilteringMulticastingRouter is created * and added to the outbound router, even though the connector * registration is aborted. This is so casting to a * FilteringMulticastingRouter doesn't fail when unregistering the * failed channel and stopping its dispatchers. */ if (exceptionRegisteringOutboundRouter != null) { throw exceptionRegisteringOutboundRouter; } }