List of usage examples for java.util LinkedHashSet size
int size();
From source file:org.mskcc.cbio.importer.converter.internal.ConverterImpl.java
/** * Generates case lists for the given portal. * * @param portal String//from www . ja va 2s. c om * @throws Exception */ @Override public void generateCaseLists(String portal) throws Exception { if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists()"); } // check args if (portal == null) { throw new IllegalArgumentException("portal must not be null"); } // get portal metadata PortalMetadata portalMetadata = config.getPortalMetadata(portal).iterator().next(); if (portalMetadata == null) { if (LOG.isInfoEnabled()) { LOG.info("convertData(), cannot find PortalMetadata, returning"); } return; } // get CaseListMetadata Collection<CaseListMetadata> caseListMetadatas = config.getCaseListMetadata(Config.ALL); // iterate over all cancer studies for (CancerStudyMetadata cancerStudyMetadata : config.getCancerStudyMetadata(portalMetadata.getName())) { // iterate over case lists for (CaseListMetadata caseListMetadata : caseListMetadatas) { if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), processing cancer study: " + cancerStudyMetadata + ", case list: " + caseListMetadata.getCaseListFilename()); } // how many staging files are we working with? String[] stagingFilenames = null; // setup union/intersection bools boolean unionCaseList = caseListMetadata.getStagingFilenames() .contains(CaseListMetadata.CASE_LIST_UNION_DELIMITER); boolean intersectionCaseList = caseListMetadata.getStagingFilenames() .contains(CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER); // union (like all cases) if (unionCaseList) { stagingFilenames = caseListMetadata.getStagingFilenames() .split("\\" + CaseListMetadata.CASE_LIST_UNION_DELIMITER); } // intersection (like complete or cna-seq) else if (intersectionCaseList) { stagingFilenames = caseListMetadata.getStagingFilenames() .split("\\" + CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER); } // just a single staging file else { stagingFilenames = new String[] { caseListMetadata.getStagingFilenames() }; } if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), stagingFilenames: " + java.util.Arrays.toString(stagingFilenames)); } // this is the set we will pass to writeCaseListFile LinkedHashSet<String> caseSet = new LinkedHashSet<String>(); // this indicates the number of staging files processed - // used to verify that an intersection should be written int numStagingFilesProcessed = 0; for (String stagingFilename : stagingFilenames) { if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), processing stagingFile: " + stagingFilename); } // compute the case set List<String> caseList = fileUtils.getCaseListFromStagingFile(caseIDs, portalMetadata, cancerStudyMetadata, stagingFilename); // we may not have this datatype in study if (caseList.size() == 0) { if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), stagingFileHeader is empty: " + stagingFilename + ", skipping..."); } continue; } // intersection if (intersectionCaseList) { if (caseSet.isEmpty()) { caseSet.addAll(caseList); } else { caseSet.retainAll(caseList); } } // otherwise union or single staging (treat the same) else { caseSet.addAll(caseList); } ++numStagingFilesProcessed; } // write the case list file (don't make empty case lists) if (caseSet.size() > 0) { if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), calling writeCaseListFile()..."); } // do not write out complete cases file unless we've processed all the files required if (intersectionCaseList && (numStagingFilesProcessed != stagingFilenames.length)) { if (LOG.isInfoEnabled()) { LOG.info( "generateCaseLists(), number of staging files processed != number staging files required for cases_complete.txt, skipping call to writeCaseListFile()..."); } continue; } fileUtils.writeCaseListFile(portalMetadata, cancerStudyMetadata, caseListMetadata, caseSet.toArray(new String[0])); } else if (LOG.isInfoEnabled()) { LOG.info("generateCaseLists(), caseSet.size() <= 0, skipping call to writeCaseListFile()..."); } // if union, write out the cancer study metadata file if (caseSet.size() > 0 && caseListMetadata.getCaseListFilename().equals(ALL_CASES_FILENAME)) { if (LOG.isInfoEnabled()) { LOG.info( "generateCaseLists(), processed all cases list, we can now update cancerStudyMetadata file()..."); } fileUtils.writeCancerStudyMetadataFile(portalMetadata, cancerStudyMetadata, caseSet.size()); } } } }
From source file:org.apache.tajo.engine.planner.LogicalPlanner.java
public TableSubQueryNode visitTableSubQuery(PlanContext context, Stack<Expr> stack, TablePrimarySubQuery expr) throws PlanningException { QueryBlock block = context.queryBlock; QueryBlock childBlock = context.plan.getBlock(context.plan.getBlockNameByExpr(expr.getSubQuery())); PlanContext newContext = new PlanContext(context, childBlock); LogicalNode child = visit(newContext, new Stack<Expr>(), expr.getSubQuery()); TableSubQueryNode subQueryNode = context.queryBlock.getNodeFromExpr(expr); context.plan.connectBlocks(childBlock, context.queryBlock, BlockType.TableSubQuery); subQueryNode.setSubQuery(child);/*from w ww . ja va2 s. com*/ // Add additional expressions required in upper nodes. Set<String> newlyEvaluatedExprs = TUtil.newHashSet(); for (NamedExpr rawTarget : block.namedExprsMgr.getAllNamedExprs()) { try { EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(), NameResolvingMode.RELS_ONLY); if (checkIfBeEvaluatedAtRelation(block, evalNode, subQueryNode)) { block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode); newlyEvaluatedExprs.add(rawTarget.getAlias()); // newly added exr } } catch (VerifyException ve) { } } // Assume that each unique expr is evaluated once. LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, subQueryNode, newlyEvaluatedExprs); for (String newAddedExpr : newlyEvaluatedExprs) { targets.add(block.namedExprsMgr.getTarget(newAddedExpr, true)); } subQueryNode.setTargets(targets.toArray(new Target[targets.size()])); return subQueryNode; }
From source file:org.pdfsam.console.business.pdf.handlers.SplitCmdExecutor.java
/** * Execute the split of a pdf document when split type is S_BLEVEL * /*from w w w .jav a 2 s. c o m*/ * @param inputCommand * @throws Exception */ private void executeBookmarksSplit(SplitParsedCommand inputCommand) throws Exception { pdfReader = PdfUtility.readerFor(inputCommand.getInputFile()); int bLevel = inputCommand.getBookmarksLevel().intValue(); Hashtable bookmarksTable = new Hashtable(); if (bLevel > 0) { pdfReader.removeUnusedObjects(); pdfReader.consolidateNamedDestinations(); List bookmarks = SimpleBookmark.getBookmark(pdfReader); ByteArrayOutputStream out = new ByteArrayOutputStream(); SimpleBookmark.exportToXML(bookmarks, out, "UTF-8", false); ByteArrayInputStream input = new ByteArrayInputStream(out.toByteArray()); int maxDepth = PdfUtility.getMaxBookmarksDepth(input); input.reset(); if (bLevel <= maxDepth) { SAXReader reader = new SAXReader(); org.dom4j.Document document = reader.read(input); // head node String headBookmarkXQuery = "/Bookmark/Title[@Action=\"GoTo\"]"; Node headNode = document.selectSingleNode(headBookmarkXQuery); if (headNode != null && headNode.getText() != null && headNode.getText().trim().length() > 0) { bookmarksTable.put(new Integer(1), headNode.getText().trim()); } // bLevel nodes StringBuffer buffer = new StringBuffer("/Bookmark"); for (int i = 0; i < bLevel; i++) { buffer.append("/Title[@Action=\"GoTo\"]"); } String xQuery = buffer.toString(); List nodes = document.selectNodes(xQuery); input.close(); input = null; if (nodes != null && nodes.size() > 0) { LinkedHashSet pageSet = new LinkedHashSet(nodes.size()); for (Iterator nodeIter = nodes.iterator(); nodeIter.hasNext();) { Node currentNode = (Node) nodeIter.next(); Node pageAttribute = currentNode.selectSingleNode("@Page"); if (pageAttribute != null && pageAttribute.getText().length() > 0) { String attribute = pageAttribute.getText(); int blankIndex = attribute.indexOf(' '); if (blankIndex > 0) { Integer currentNumber = new Integer(attribute.substring(0, blankIndex)); String bookmarkText = currentNode.getText().trim(); // fix #2789963 if (currentNumber.intValue() > 0) { // bookmarks regexp matching if any if (StringUtils.isBlank(inputCommand.getBookmarkRegexp()) || bookmarkText.matches(inputCommand.getBookmarkRegexp())) { // to split just before the given page if ((currentNumber.intValue()) > 1) { pageSet.add(new Integer(currentNumber.intValue() - 1)); } if (StringUtils.isNotBlank(bookmarkText)) { bookmarksTable.put(currentNumber, bookmarkText.trim()); } } } } } } if (pageSet.size() > 0) { if (StringUtils.isBlank(inputCommand.getBookmarkRegexp())) { LOG.debug("Found " + pageSet.size() + " destination pages at level " + bLevel); } else { LOG.debug("Found " + pageSet.size() + " destination pages at level " + bLevel + " matching '" + inputCommand.getBookmarkRegexp() + "'"); } inputCommand.setSplitPageNumbers((Integer[]) pageSet.toArray(new Integer[pageSet.size()])); } else { throw new SplitException(SplitException.ERR_BLEVEL_NO_DEST, new String[] { "" + bLevel }); } } else { throw new SplitException(SplitException.ERR_BLEVEL, new String[] { "" + bLevel }); } } else { input.close(); pdfReader.close(); throw new SplitException(SplitException.ERR_BLEVEL_OUTOFBOUNDS, new String[] { "" + bLevel, "" + maxDepth }); } } else { pdfReader.close(); throw new SplitException(SplitException.ERR_NOT_VALID_BLEVEL, new String[] { "" + bLevel }); } pdfReader.close(); executeSplit(inputCommand, bookmarksTable); }
From source file:com.android.phone.common.mail.store.ImapFolder.java
public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener) throws MessagingException { if (messages.length == 0) { return;//from w ww.j av a 2 s . c o m } checkOpen(); HashMap<String, Message> messageMap = new HashMap<String, Message>(); for (Message m : messages) { messageMap.put(m.getUid(), m); } /* * Figure out what command we are going to run: * FLAGS - UID FETCH (FLAGS) * ENVELOPE - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[ * HEADER.FIELDS (date subject from content-type to cc)]) * STRUCTURE - UID FETCH (BODYSTRUCTURE) * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned * BODY - UID FETCH (BODY.PEEK[]) * Part - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID */ final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>(); fetchFields.add(ImapConstants.UID); if (fp.contains(FetchProfile.Item.FLAGS)) { fetchFields.add(ImapConstants.FLAGS); } if (fp.contains(FetchProfile.Item.ENVELOPE)) { fetchFields.add(ImapConstants.INTERNALDATE); fetchFields.add(ImapConstants.RFC822_SIZE); fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { fetchFields.add(ImapConstants.BODYSTRUCTURE); } if (fp.contains(FetchProfile.Item.BODY_SANE)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE); } if (fp.contains(FetchProfile.Item.BODY)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK); } // TODO Why are we only fetching the first part given? final Part fetchPart = fp.getFirstPart(); if (fetchPart != null) { final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA); // TODO Why can a single part have more than one Id? And why should we only fetch // the first id if there are more than one? if (partIds != null) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]"); } } try { mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)", ImapStore.joinMessageUids(messages), Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false); ImapResponse response; do { response = null; try { response = mConnection.readResponse(); if (!response.isDataResponse(1, ImapConstants.FETCH)) { continue; // Ignore } final ImapList fetchList = response.getListOrEmpty(2); final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString(); if (TextUtils.isEmpty(uid)) continue; ImapMessage message = (ImapMessage) messageMap.get(uid); if (message == null) continue; if (fp.contains(FetchProfile.Item.FLAGS)) { final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS); for (int i = 0, count = flags.size(); i < count; i++) { final ImapString flag = flags.getStringOrEmpty(i); if (flag.is(ImapConstants.FLAG_DELETED)) { message.setFlagInternal(Flag.DELETED, true); } else if (flag.is(ImapConstants.FLAG_ANSWERED)) { message.setFlagInternal(Flag.ANSWERED, true); } else if (flag.is(ImapConstants.FLAG_SEEN)) { message.setFlagInternal(Flag.SEEN, true); } else if (flag.is(ImapConstants.FLAG_FLAGGED)) { message.setFlagInternal(Flag.FLAGGED, true); } } } if (fp.contains(FetchProfile.Item.ENVELOPE)) { final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE) .getDateOrNull(); final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE) .getNumberOrZero(); final String header = fetchList .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString(); message.setInternalDate(internalDate); message.setSize(size); message.parse(Utility.streamFromAsciiString(header)); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE); if (!bs.isEmpty()) { try { parseBodyStructure(bs, message, ImapConstants.TEXT); } catch (MessagingException e) { LogUtils.v(TAG, e, "Error handling message"); message.setBody(null); } } } if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) { // Body is keyed by "BODY[]...". // Previously used "BODY[..." but this can be confused with "BODY[HEADER..." // TODO Should we accept "RFC822" as well?? ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true); InputStream bodyStream = body.getAsStream(); message.parse(bodyStream); } if (fetchPart != null) { InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream(); String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING); String contentTransferEncoding = null; if (encodings != null && encodings.length > 0) { contentTransferEncoding = encodings[0]; } else { // According to http://tools.ietf.org/html/rfc2045#section-6.1 // "7bit" is the default. contentTransferEncoding = "7bit"; } try { // TODO Don't create 2 temp files. // decodeBody creates BinaryTempFileBody, but we could avoid this // if we implement ImapStringBody. // (We'll need to share a temp file. Protect it with a ref-count.) fetchPart.setBody(decodeBody(mStore.getContext(), bodyStream, contentTransferEncoding, fetchPart.getSize(), listener)); } catch (Exception e) { // TODO: Figure out what kinds of exceptions might actually be thrown // from here. This blanket catch-all is because we're not sure what to // do if we don't have a contentTransferEncoding, and we don't have // time to figure out what exceptions might be thrown. LogUtils.e(TAG, "Error fetching body %s", e); } } if (listener != null) { listener.messageRetrieved(message); } } finally { destroyResponses(); } } while (!response.isTagged()); } catch (IOException ioe) { throw ioExceptionHandler(mConnection, ioe); } }
From source file:org.apache.tajo.plan.LogicalPlanner.java
private void setTargetOfTableSubQuery(PlanContext context, QueryBlock block, TableSubQueryNode subQueryNode) throws TajoException { // Add additional expressions required in upper nodes. Set<String> newlyEvaluatedExprs = TUtil.newHashSet(); for (NamedExpr rawTarget : block.namedExprsMgr.getAllNamedExprs()) { try {//from w ww. ja v a2 s . c om EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(), NameResolvingMode.RELS_ONLY); if (checkIfBeEvaluatedAtRelation(block, evalNode, subQueryNode)) { block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode); newlyEvaluatedExprs.add(rawTarget.getAlias()); // newly added exr } } catch (UndefinedColumnException ve) { } } // Assume that each unique expr is evaluated once. LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, subQueryNode, newlyEvaluatedExprs); for (String newAddedExpr : newlyEvaluatedExprs) { targets.add(block.namedExprsMgr.getTarget(newAddedExpr, true)); } subQueryNode.setTargets(targets.toArray(new Target[targets.size()])); }
From source file:org.apache.tajo.engine.planner.LogicalPlanner.java
@Override public ScanNode visitRelation(PlanContext context, Stack<Expr> stack, Relation expr) throws PlanningException { QueryBlock block = context.queryBlock; ScanNode scanNode = block.getNodeFromExpr(expr); updatePhysicalInfo(scanNode.getTableDesc()); // Find expression which can be evaluated at this relation node. // Except for column references, additional expressions used in select list, where clause, order-by clauses // can be evaluated here. Their reference names are kept in newlyEvaluatedExprsRef. Set<String> newlyEvaluatedExprsReferences = new LinkedHashSet<String>(); for (Iterator<NamedExpr> iterator = block.namedExprsMgr.getIteratorForUnevaluatedExprs(); iterator .hasNext();) {// www .j a v a2 s . c om NamedExpr rawTarget = iterator.next(); try { EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(), NameResolvingMode.RELS_ONLY); if (checkIfBeEvaluatedAtRelation(block, evalNode, scanNode)) { block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode); newlyEvaluatedExprsReferences.add(rawTarget.getAlias()); // newly added exr } } catch (VerifyException ve) { } } // Assume that each unique expr is evaluated once. LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, scanNode, newlyEvaluatedExprsReferences); // The fact the some expr is included in newlyEvaluatedExprsReferences means that it is already evaluated. // So, we get a raw expression and then creates a target. for (String reference : newlyEvaluatedExprsReferences) { NamedExpr refrer = block.namedExprsMgr.getNamedExpr(reference); EvalNode evalNode = exprAnnotator.createEvalNode(context, refrer.getExpr(), NameResolvingMode.RELS_ONLY); targets.add(new Target(evalNode, reference)); } scanNode.setTargets(targets.toArray(new Target[targets.size()])); verifyProjectedFields(block, scanNode); return scanNode; }
From source file:ca.uhn.fhir.jpa.dao.SearchBuilder.java
private void doSetPids(Collection<Long> thePids) { if (myParams.isPersistResults()) { if (mySearchEntity.getTotalCount() != null) { reinitializeSearch();/*from w w w . ja v a2s. c o m*/ } LinkedHashSet<SearchResult> results = new LinkedHashSet<SearchResult>(); int index = 0; for (Long next : thePids) { SearchResult nextResult = new SearchResult(mySearchEntity); nextResult.setResourcePid(next); nextResult.setOrder(index); results.add(nextResult); index++; } mySearchResultDao.save(results); mySearchEntity.setTotalCount(results.size()); mySearchEntity = myEntityManager.merge(mySearchEntity); myEntityManager.flush(); } else { myPids = thePids; } }
From source file:org.apache.tajo.plan.LogicalPlanner.java
@Override public ScanNode visitRelation(PlanContext context, Stack<Expr> stack, Relation expr) throws TajoException { QueryBlock block = context.queryBlock; ScanNode scanNode = block.getNodeFromExpr(expr); updatePhysicalInfo(context, scanNode.getTableDesc()); // Find expression which can be evaluated at this relation node. // Except for column references, additional expressions used in select list, where clause, order-by clauses // can be evaluated here. Their reference names are kept in newlyEvaluatedExprsRef. Set<String> newlyEvaluatedExprsReferences = new LinkedHashSet<String>(); for (Iterator<NamedExpr> iterator = block.namedExprsMgr.getIteratorForUnevaluatedExprs(); iterator .hasNext();) {/*from ww w . j a v a 2 s . co m*/ NamedExpr rawTarget = iterator.next(); try { EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(), NameResolvingMode.RELS_ONLY); if (checkIfBeEvaluatedAtRelation(block, evalNode, scanNode)) { block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode); newlyEvaluatedExprsReferences.add(rawTarget.getAlias()); // newly added exr } } catch (UndefinedColumnException ve) { } } // Assume that each unique expr is evaluated once. LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, scanNode, newlyEvaluatedExprsReferences); // The fact the some expr is included in newlyEvaluatedExprsReferences means that it is already evaluated. // So, we get a raw expression and then creates a target. for (String reference : newlyEvaluatedExprsReferences) { NamedExpr refrer = block.namedExprsMgr.getNamedExpr(reference); EvalNode evalNode = exprAnnotator.createEvalNode(context, refrer.getExpr(), NameResolvingMode.RELS_ONLY); targets.add(new Target(evalNode, reference)); } scanNode.setTargets(targets.toArray(new Target[targets.size()])); verifyProjectedFields(block, scanNode); return scanNode; }
From source file:com.android.email.mail.store.ImapFolder.java
public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener) throws MessagingException { if (messages.length == 0) { return;//from ww w . ja v a 2s . co m } checkOpen(); HashMap<String, Message> messageMap = new HashMap<String, Message>(); for (Message m : messages) { messageMap.put(m.getUid(), m); } /* * Figure out what command we are going to run: * FLAGS - UID FETCH (FLAGS) * ENVELOPE - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[ * HEADER.FIELDS (date subject from content-type to cc)]) * STRUCTURE - UID FETCH (BODYSTRUCTURE) * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned * BODY - UID FETCH (BODY.PEEK[]) * Part - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID */ final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>(); fetchFields.add(ImapConstants.UID); if (fp.contains(FetchProfile.Item.FLAGS)) { fetchFields.add(ImapConstants.FLAGS); } if (fp.contains(FetchProfile.Item.ENVELOPE)) { fetchFields.add(ImapConstants.INTERNALDATE); fetchFields.add(ImapConstants.RFC822_SIZE); fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { fetchFields.add(ImapConstants.BODYSTRUCTURE); } if (fp.contains(FetchProfile.Item.BODY_SANE)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE); } if (fp.contains(FetchProfile.Item.BODY)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK); } // TODO Why are we only fetching the first part given? final Part fetchPart = fp.getFirstPart(); if (fetchPart != null) { final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA); // TODO Why can a single part have more than one Id? And why should we only fetch // the first id if there are more than one? if (partIds != null) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]"); } } try { mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)", ImapStore.joinMessageUids(messages), Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false); ImapResponse response; do { response = null; try { response = mConnection.readResponse(); if (!response.isDataResponse(1, ImapConstants.FETCH)) { continue; // Ignore } final ImapList fetchList = response.getListOrEmpty(2); final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString(); if (TextUtils.isEmpty(uid)) continue; ImapMessage message = (ImapMessage) messageMap.get(uid); if (message == null) continue; if (fp.contains(FetchProfile.Item.FLAGS)) { final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS); for (int i = 0, count = flags.size(); i < count; i++) { final ImapString flag = flags.getStringOrEmpty(i); if (flag.is(ImapConstants.FLAG_DELETED)) { message.setFlagInternal(Flag.DELETED, true); } else if (flag.is(ImapConstants.FLAG_ANSWERED)) { message.setFlagInternal(Flag.ANSWERED, true); } else if (flag.is(ImapConstants.FLAG_SEEN)) { message.setFlagInternal(Flag.SEEN, true); } else if (flag.is(ImapConstants.FLAG_FLAGGED)) { message.setFlagInternal(Flag.FLAGGED, true); } } } if (fp.contains(FetchProfile.Item.ENVELOPE)) { final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE) .getDateOrNull(); final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE) .getNumberOrZero(); final String header = fetchList .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString(); message.setInternalDate(internalDate); message.setSize(size); message.parse(Utility.streamFromAsciiString(header)); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE); if (!bs.isEmpty()) { try { parseBodyStructure(bs, message, ImapConstants.TEXT); } catch (MessagingException e) { if (Logging.LOGD) { LogUtils.v(Logging.LOG_TAG, e, "Error handling message"); } message.setBody(null); } } } if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) { // Body is keyed by "BODY[]...". // Previously used "BODY[..." but this can be confused with "BODY[HEADER..." // TODO Should we accept "RFC822" as well?? ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true); InputStream bodyStream = body.getAsStream(); message.parse(bodyStream); } if (fetchPart != null) { InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream(); String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING); String contentTransferEncoding = null; if (encodings != null && encodings.length > 0) { contentTransferEncoding = encodings[0]; } else { // According to http://tools.ietf.org/html/rfc2045#section-6.1 // "7bit" is the default. contentTransferEncoding = "7bit"; } try { // TODO Don't create 2 temp files. // decodeBody creates BinaryTempFileBody, but we could avoid this // if we implement ImapStringBody. // (We'll need to share a temp file. Protect it with a ref-count.) fetchPart.setBody( decodeBody(bodyStream, contentTransferEncoding, fetchPart.getSize(), listener)); } catch (Exception e) { // TODO: Figure out what kinds of exceptions might actually be thrown // from here. This blanket catch-all is because we're not sure what to // do if we don't have a contentTransferEncoding, and we don't have // time to figure out what exceptions might be thrown. LogUtils.e(Logging.LOG_TAG, "Error fetching body %s", e); } } if (listener != null) { listener.messageRetrieved(message); } } finally { destroyResponses(); } } while (!response.isTagged()); } catch (IOException ioe) { throw ioExceptionHandler(mConnection, ioe); } }