List of usage examples for java.util LinkedHashSet toArray
<T> T[] toArray(T[] a);
From source file:ArrayUtils.java
/** * Merges all elements of a set of arrays into a single array with no * duplicates./* w w w.jav a2 s . co m*/ * * @param <T1> * The type of the result * @param <T2> * The type of the input arrays * @param type * The type of the result * @param arrays * The arrays to merge * @return A new array containing all elements of <code>array1</code> and * all elements of <code>array2</code> that are not present in * <code>array1</code> * @throws NullPointerException * If either array is null */ public static <T1, T2 extends T1> T1[] mergeInclusive(Class<T1> type, T2[]... arrays) { java.util.LinkedHashSet<T1> set = new java.util.LinkedHashSet<T1>(); int i, j; for (i = 0; i < arrays.length; i++) { for (j = 0; j < arrays[i].length; j++) set.add(arrays[i][j]); } return set.toArray((T1[]) Array.newInstance(type, set.size())); }
From source file:gaffer.accumulostore.operation.spark.handler.AccumuloStoreRelation.java
private void buildSchema() { LOGGER.info("Building Spark SQL schema for groups {}", StringUtils.join(groups, ',')); for (final String group : groups) { final SchemaElementDefinition elementDefn = store.getSchema().getElement(group); final List<StructField> structFieldList = new ArrayList<>(); if (elementDefn instanceof SchemaEntityDefinition) { entityOrEdgeByGroup.put(group, EntityOrEdge.ENTITY); final SchemaEntityDefinition entityDefinition = (SchemaEntityDefinition) elementDefn; final String vertexClass = store.getSchema().getType(entityDefinition.getVertex()).getClassString(); final DataType vertexType = getType(vertexClass); if (vertexType == null) { throw new RuntimeException("Vertex must be a recognised type: found " + vertexClass); }/* w w w .j ava 2s . c o m*/ LOGGER.info("Group {} is an entity group - {} is of type {}", group, VERTEX_COL_NAME, vertexType); structFieldList.add(new StructField(VERTEX_COL_NAME, vertexType, true, Metadata.empty())); } else { entityOrEdgeByGroup.put(group, EntityOrEdge.EDGE); final SchemaEdgeDefinition edgeDefinition = (SchemaEdgeDefinition) elementDefn; final String srcClass = store.getSchema().getType(edgeDefinition.getSource()).getClassString(); final String dstClass = store.getSchema().getType(edgeDefinition.getDestination()).getClassString(); final DataType srcType = getType(srcClass); final DataType dstType = getType(dstClass); if (srcType == null || dstType == null) { throw new RuntimeException("Both source and destination must be recognised types: source was " + srcClass + " destination was " + dstClass); } LOGGER.info("Group {} is an edge group - {} is of type {}, {} is of type {}", group, SRC_COL_NAME, srcType, DST_COL_NAME, dstType); structFieldList.add(new StructField(SRC_COL_NAME, srcType, true, Metadata.empty())); structFieldList.add(new StructField(DST_COL_NAME, dstType, true, Metadata.empty())); } final Set<String> properties = elementDefn.getProperties(); for (final String property : properties) { final String propertyClass = elementDefn.getPropertyClass(property).getCanonicalName(); final DataType propertyType = getType(propertyClass); if (propertyType == null) { LOGGER.warn("Ignoring property {} as it is not a recognised type", property); } else { LOGGER.info("Property {} is of type {}", property, propertyType); structFieldList.add(new StructField(property, propertyType, true, Metadata.empty())); } } structTypeByGroup.put(group, new StructType(structFieldList.toArray(new StructField[structFieldList.size()]))); } // Create reverse map of field name to StructField final Map<String, Set<StructField>> fieldToStructs = new HashMap<>(); for (final String group : groups) { final StructType groupSchema = structTypeByGroup.get(group); for (final String field : groupSchema.fieldNames()) { if (fieldToStructs.get(field) == null) { fieldToStructs.put(field, new HashSet<StructField>()); } fieldToStructs.get(field).add(groupSchema.apply(field)); } } // Check consistency, i.e. if the same field appears in multiple groups then the types are consistent for (final Map.Entry<String, Set<StructField>> entry : fieldToStructs.entrySet()) { final Set<StructField> schemas = entry.getValue(); if (schemas.size() > 1) { throw new IllegalArgumentException("Inconsistent fields: the field " + entry.getKey() + " has more than one definition: " + StringUtils.join(schemas, ',')); } } // Merge schemas for groups together - fields should appear in the order the groups were provided final LinkedHashSet<StructField> fields = new LinkedHashSet<>(); fields.add(new StructField(GROUP, DataTypes.StringType, false, Metadata.empty())); usedProperties.add(GROUP); for (final String group : groups) { final StructType groupSchema = structTypeByGroup.get(group); for (final String field : groupSchema.fieldNames()) { final StructField struct = groupSchema.apply(field); // Add struct to fields unless it has already been added if (!fields.contains(struct)) { fields.add(struct); usedProperties.add(field); } } } structType = new StructType(fields.toArray(new StructField[fields.size()])); LOGGER.info("Schema is {}", structType); }
From source file:net.sf.maltcms.chromaui.project.spi.nodes.DescriptorNode.java
@Override public Action[] getActions(boolean context) { List<?> interfaces = ClassUtils.getAllInterfaces(getBean().getClass()); List<?> superClasses = ClassUtils.getAllSuperclasses(getBean().getClass()); LinkedHashSet<Action> descriptorActions = new LinkedHashSet<>(); for (Object o : interfaces) { Class<?> c = (Class) o; descriptorActions.addAll(Utilities.actionsForPath("Actions/DescriptorNodeActions/" + c.getName())); descriptorActions//from www . j a v a 2 s .c o m .addAll(Utilities.actionsForPath("Actions/DescriptorNodeActions/" + c.getSimpleName())); } for (Object o : superClasses) { Class<?> c = (Class) o; descriptorActions.addAll(Utilities.actionsForPath("Actions/DescriptorNodeActions/" + c.getName())); descriptorActions .addAll(Utilities.actionsForPath("Actions/DescriptorNodeActions/" + c.getSimpleName())); } descriptorActions.addAll( Utilities.actionsForPath("Actions/DescriptorNodeActions/" + getBean().getClass().getName())); descriptorActions.addAll( Utilities.actionsForPath("Actions/DescriptorNodeActions/" + getBean().getClass().getSimpleName())); descriptorActions.add(null); descriptorActions.addAll(Utilities.actionsForPath("Actions/DescriptorNodeActions/DefaultActions")); descriptorActions.add(SystemAction.get(PropertiesAction.class)); return descriptorActions.toArray(new Action[descriptorActions.size()]); }
From source file:com.sun.faban.driver.transport.hc3.ApacheHC3Transport.java
/** * Obtains the list of cookie values by the name of the cookies. * @param name The cookie name//from ww w .ja va 2 s.c o m * @return An array of non-duplicating cookie values. */ public String[] getCookieValuesByName(String name) { LinkedHashSet<String> valueSet = new LinkedHashSet<String>(); Cookie[] cookies = hc.getState().getCookies(); for (Cookie cookie : cookies) { if (name.equals(cookie.getName())) { valueSet.add(cookie.getValue()); } } String[] values = new String[valueSet.size()]; return valueSet.toArray(values); }
From source file:com.android.phone.common.mail.store.ImapFolder.java
public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener) throws MessagingException { if (messages.length == 0) { return;/* w ww. ja v a 2s . c om*/ } checkOpen(); HashMap<String, Message> messageMap = new HashMap<String, Message>(); for (Message m : messages) { messageMap.put(m.getUid(), m); } /* * Figure out what command we are going to run: * FLAGS - UID FETCH (FLAGS) * ENVELOPE - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[ * HEADER.FIELDS (date subject from content-type to cc)]) * STRUCTURE - UID FETCH (BODYSTRUCTURE) * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned * BODY - UID FETCH (BODY.PEEK[]) * Part - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID */ final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>(); fetchFields.add(ImapConstants.UID); if (fp.contains(FetchProfile.Item.FLAGS)) { fetchFields.add(ImapConstants.FLAGS); } if (fp.contains(FetchProfile.Item.ENVELOPE)) { fetchFields.add(ImapConstants.INTERNALDATE); fetchFields.add(ImapConstants.RFC822_SIZE); fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { fetchFields.add(ImapConstants.BODYSTRUCTURE); } if (fp.contains(FetchProfile.Item.BODY_SANE)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE); } if (fp.contains(FetchProfile.Item.BODY)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK); } // TODO Why are we only fetching the first part given? final Part fetchPart = fp.getFirstPart(); if (fetchPart != null) { final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA); // TODO Why can a single part have more than one Id? And why should we only fetch // the first id if there are more than one? if (partIds != null) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]"); } } try { mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)", ImapStore.joinMessageUids(messages), Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false); ImapResponse response; do { response = null; try { response = mConnection.readResponse(); if (!response.isDataResponse(1, ImapConstants.FETCH)) { continue; // Ignore } final ImapList fetchList = response.getListOrEmpty(2); final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString(); if (TextUtils.isEmpty(uid)) continue; ImapMessage message = (ImapMessage) messageMap.get(uid); if (message == null) continue; if (fp.contains(FetchProfile.Item.FLAGS)) { final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS); for (int i = 0, count = flags.size(); i < count; i++) { final ImapString flag = flags.getStringOrEmpty(i); if (flag.is(ImapConstants.FLAG_DELETED)) { message.setFlagInternal(Flag.DELETED, true); } else if (flag.is(ImapConstants.FLAG_ANSWERED)) { message.setFlagInternal(Flag.ANSWERED, true); } else if (flag.is(ImapConstants.FLAG_SEEN)) { message.setFlagInternal(Flag.SEEN, true); } else if (flag.is(ImapConstants.FLAG_FLAGGED)) { message.setFlagInternal(Flag.FLAGGED, true); } } } if (fp.contains(FetchProfile.Item.ENVELOPE)) { final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE) .getDateOrNull(); final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE) .getNumberOrZero(); final String header = fetchList .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString(); message.setInternalDate(internalDate); message.setSize(size); message.parse(Utility.streamFromAsciiString(header)); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE); if (!bs.isEmpty()) { try { parseBodyStructure(bs, message, ImapConstants.TEXT); } catch (MessagingException e) { LogUtils.v(TAG, e, "Error handling message"); message.setBody(null); } } } if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) { // Body is keyed by "BODY[]...". // Previously used "BODY[..." but this can be confused with "BODY[HEADER..." // TODO Should we accept "RFC822" as well?? ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true); InputStream bodyStream = body.getAsStream(); message.parse(bodyStream); } if (fetchPart != null) { InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream(); String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING); String contentTransferEncoding = null; if (encodings != null && encodings.length > 0) { contentTransferEncoding = encodings[0]; } else { // According to http://tools.ietf.org/html/rfc2045#section-6.1 // "7bit" is the default. contentTransferEncoding = "7bit"; } try { // TODO Don't create 2 temp files. // decodeBody creates BinaryTempFileBody, but we could avoid this // if we implement ImapStringBody. // (We'll need to share a temp file. Protect it with a ref-count.) fetchPart.setBody(decodeBody(mStore.getContext(), bodyStream, contentTransferEncoding, fetchPart.getSize(), listener)); } catch (Exception e) { // TODO: Figure out what kinds of exceptions might actually be thrown // from here. This blanket catch-all is because we're not sure what to // do if we don't have a contentTransferEncoding, and we don't have // time to figure out what exceptions might be thrown. LogUtils.e(TAG, "Error fetching body %s", e); } } if (listener != null) { listener.messageRetrieved(message); } } finally { destroyResponses(); } } while (!response.isTagged()); } catch (IOException ioe) { throw ioExceptionHandler(mConnection, ioe); } }
From source file:com.android.email.mail.store.ImapFolder.java
public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener) throws MessagingException { if (messages.length == 0) { return;/* w w w . j av a 2 s. c o m*/ } checkOpen(); HashMap<String, Message> messageMap = new HashMap<String, Message>(); for (Message m : messages) { messageMap.put(m.getUid(), m); } /* * Figure out what command we are going to run: * FLAGS - UID FETCH (FLAGS) * ENVELOPE - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[ * HEADER.FIELDS (date subject from content-type to cc)]) * STRUCTURE - UID FETCH (BODYSTRUCTURE) * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned * BODY - UID FETCH (BODY.PEEK[]) * Part - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID */ final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>(); fetchFields.add(ImapConstants.UID); if (fp.contains(FetchProfile.Item.FLAGS)) { fetchFields.add(ImapConstants.FLAGS); } if (fp.contains(FetchProfile.Item.ENVELOPE)) { fetchFields.add(ImapConstants.INTERNALDATE); fetchFields.add(ImapConstants.RFC822_SIZE); fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { fetchFields.add(ImapConstants.BODYSTRUCTURE); } if (fp.contains(FetchProfile.Item.BODY_SANE)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE); } if (fp.contains(FetchProfile.Item.BODY)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK); } // TODO Why are we only fetching the first part given? final Part fetchPart = fp.getFirstPart(); if (fetchPart != null) { final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA); // TODO Why can a single part have more than one Id? And why should we only fetch // the first id if there are more than one? if (partIds != null) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]"); } } try { mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)", ImapStore.joinMessageUids(messages), Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false); ImapResponse response; do { response = null; try { response = mConnection.readResponse(); if (!response.isDataResponse(1, ImapConstants.FETCH)) { continue; // Ignore } final ImapList fetchList = response.getListOrEmpty(2); final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString(); if (TextUtils.isEmpty(uid)) continue; ImapMessage message = (ImapMessage) messageMap.get(uid); if (message == null) continue; if (fp.contains(FetchProfile.Item.FLAGS)) { final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS); for (int i = 0, count = flags.size(); i < count; i++) { final ImapString flag = flags.getStringOrEmpty(i); if (flag.is(ImapConstants.FLAG_DELETED)) { message.setFlagInternal(Flag.DELETED, true); } else if (flag.is(ImapConstants.FLAG_ANSWERED)) { message.setFlagInternal(Flag.ANSWERED, true); } else if (flag.is(ImapConstants.FLAG_SEEN)) { message.setFlagInternal(Flag.SEEN, true); } else if (flag.is(ImapConstants.FLAG_FLAGGED)) { message.setFlagInternal(Flag.FLAGGED, true); } } } if (fp.contains(FetchProfile.Item.ENVELOPE)) { final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE) .getDateOrNull(); final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE) .getNumberOrZero(); final String header = fetchList .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString(); message.setInternalDate(internalDate); message.setSize(size); message.parse(Utility.streamFromAsciiString(header)); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE); if (!bs.isEmpty()) { try { parseBodyStructure(bs, message, ImapConstants.TEXT); } catch (MessagingException e) { if (Logging.LOGD) { LogUtils.v(Logging.LOG_TAG, e, "Error handling message"); } message.setBody(null); } } } if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) { // Body is keyed by "BODY[]...". // Previously used "BODY[..." but this can be confused with "BODY[HEADER..." // TODO Should we accept "RFC822" as well?? ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true); InputStream bodyStream = body.getAsStream(); message.parse(bodyStream); } if (fetchPart != null) { InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream(); String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING); String contentTransferEncoding = null; if (encodings != null && encodings.length > 0) { contentTransferEncoding = encodings[0]; } else { // According to http://tools.ietf.org/html/rfc2045#section-6.1 // "7bit" is the default. contentTransferEncoding = "7bit"; } try { // TODO Don't create 2 temp files. // decodeBody creates BinaryTempFileBody, but we could avoid this // if we implement ImapStringBody. // (We'll need to share a temp file. Protect it with a ref-count.) fetchPart.setBody( decodeBody(bodyStream, contentTransferEncoding, fetchPart.getSize(), listener)); } catch (Exception e) { // TODO: Figure out what kinds of exceptions might actually be thrown // from here. This blanket catch-all is because we're not sure what to // do if we don't have a contentTransferEncoding, and we don't have // time to figure out what exceptions might be thrown. LogUtils.e(Logging.LOG_TAG, "Error fetching body %s", e); } } if (listener != null) { listener.messageRetrieved(message); } } finally { destroyResponses(); } } while (!response.isTagged()); } catch (IOException ioe) { throw ioExceptionHandler(mConnection, ioe); } }
From source file:com.tct.email.mail.store.ImapFolder.java
public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener) throws MessagingException { if (messages.length == 0) { return;//www. j a v a 2 s .co m } //[FEATURE]-Add-BEGIN by TSCD.Chao Zhang,04/14/2014,FR 631895(porting from FR 472914) int limitedSize = messages[0].getDownloadOptions(); //[FEATURE]-Add-END by TSCD.Chao Zhang //[FEATURE]-Add-BEGIN by TSCD.chao zhang,04/25/2014,FR 631895(porting from FR487417) if (downloadRemainFlag) { limitedSize = Utility.ENTIRE_MAIL; } //[FEATURE]-Add-END by TSCD.Chao Zhang checkOpen(); HashMap<String, Message> messageMap = new HashMap<String, Message>(); for (Message m : messages) { messageMap.put(m.getUid(), m); } /* * Figure out what command we are going to run: * FLAGS - UID FETCH (FLAGS) * ENVELOPE - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[ * HEADER.FIELDS (date subject from content-type to cc)]) * STRUCTURE - UID FETCH (BODYSTRUCTURE) * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned * BODY - UID FETCH (BODY.PEEK[]) * Part - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID */ final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>(); fetchFields.add(ImapConstants.UID); if (fp.contains(FetchProfile.Item.FLAGS)) { fetchFields.add(ImapConstants.FLAGS); } if (fp.contains(FetchProfile.Item.ENVELOPE)) { fetchFields.add(ImapConstants.INTERNALDATE); fetchFields.add(ImapConstants.RFC822_SIZE); fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { fetchFields.add(ImapConstants.BODYSTRUCTURE); } if (fp.contains(FetchProfile.Item.BODY_SANE)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE); } if (fp.contains(FetchProfile.Item.BODY)) { fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK); } // TODO Why are we only fetching the first part given? final Part fetchPart = fp.getFirstPart(); if (fetchPart != null) { final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA); // TODO Why can a single part have more than one Id? And why should we only fetch // the first id if there are more than one? if (partIds != null) { //[FEATURE]-Add-BEGIN by TSCD.Chao Zhang,04/14/2014,FR 631895(porting from FR 472914) String fetchFieldCommand = ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]"; if (limitedSize != Utility.ENTIRE_MAIL && fetchPart.getMimeType().contains(ImapConstants.TEXT.toLowerCase())) { fetchFieldCommand = fetchFieldCommand + "<0." + limitedSize + ">"; } fetchFields.add(fetchFieldCommand); //[FEATURE]-Add-END by TSCD.Chao Zhang } } try { mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)", ImapStore.joinMessageUids(messages), Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false); ImapResponse response; do { response = null; try { // TS: Gantao 2015-12-07 EMAIL BUGFIX_1020377 MOD_S //set ui callback when network downloading, update progress bar when fetching // attachment from server. //response = mConnection.readResponse(); response = mConnection.readResponse(listener); // TS: Gantao 2015-12-07 EMAIL BUGFIX_1020377 MOD_S if (!response.isDataResponse(1, ImapConstants.FETCH)) { continue; // Ignore } final ImapList fetchList = response.getListOrEmpty(2); final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString(); if (TextUtils.isEmpty(uid)) continue; ImapMessage message = (ImapMessage) messageMap.get(uid); if (message == null) continue; if (fp.contains(FetchProfile.Item.FLAGS)) { final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS); for (int i = 0, count = flags.size(); i < count; i++) { final ImapString flag = flags.getStringOrEmpty(i); if (flag.is(ImapConstants.FLAG_DELETED)) { message.setFlagInternal(Flag.DELETED, true); } else if (flag.is(ImapConstants.FLAG_ANSWERED)) { message.setFlagInternal(Flag.ANSWERED, true); } else if (flag.is(ImapConstants.FLAG_SEEN)) { message.setFlagInternal(Flag.SEEN, true); } else if (flag.is(ImapConstants.FLAG_FLAGGED)) { message.setFlagInternal(Flag.FLAGGED, true); } } } if (fp.contains(FetchProfile.Item.ENVELOPE)) { final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE) .getDateOrNull(); final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE) .getNumberOrZero(); final String header = fetchList .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString(); message.setInternalDate(internalDate); message.setSize(size); message.parse(Utility.streamFromAsciiString(header)); } if (fp.contains(FetchProfile.Item.STRUCTURE)) { ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE); if (!bs.isEmpty()) { try { parseBodyStructure(bs, message, ImapConstants.TEXT); } catch (MessagingException e) { if (Logging.LOGD) { LogUtils.v(Logging.LOG_TAG, e, "Error handling message"); } message.setBody(null); } } } if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) { // Body is keyed by "BODY[]...". // Previously used "BODY[..." but this can be confused with "BODY[HEADER..." // TODO Should we accept "RFC822" as well?? ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true); InputStream bodyStream = body.getAsStream(); message.parse(bodyStream); } if (fetchPart != null) { InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream(); String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING); String contentTransferEncoding = null; if (encodings != null && encodings.length > 0) { contentTransferEncoding = encodings[0]; } else { // According to http://tools.ietf.org/html/rfc2045#section-6.1 // "7bit" is the default. contentTransferEncoding = "7bit"; } try { // TODO Don't create 2 temp files. // decodeBody creates BinaryTempFileBody, but we could avoid this // if we implement ImapStringBody. // (We'll need to share a temp file. Protect it with a ref-count.) fetchPart.setBody( decodeBody(bodyStream, contentTransferEncoding, fetchPart.getSize(), listener)); } catch (Exception e) { // TODO: Figure out what kinds of exceptions might actually be thrown // from here. This blanket catch-all is because we're not sure what to // do if we don't have a contentTransferEncoding, and we don't have // time to figure out what exceptions might be thrown. LogUtils.e(Logging.LOG_TAG, "Error fetching body %s", e); } } if (listener != null) { listener.messageRetrieved(message); } } finally { destroyResponses(); } } while (!response.isTagged()); } catch (IOException ioe) { throw ioExceptionHandler(mConnection, ioe); } }
From source file:com.datatorrent.stram.StramClient.java
/** * Launch application for the dag represented by this client. * * @throws YarnException//from w w w .j a v a2 s. c om * @throws IOException */ public void startApplication() throws YarnException, IOException { Class<?>[] defaultClasses; if (applicationType.equals(YARN_APPLICATION_TYPE)) { //TODO restrict the security check to only check if security is enabled for webservices. if (UserGroupInformation.isSecurityEnabled()) { defaultClasses = DATATORRENT_SECURITY_CLASSES; } else { defaultClasses = DATATORRENT_CLASSES; } } else { throw new IllegalStateException(applicationType + " is not a valid application type."); } LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses); if (resources != null) { localJarFiles.addAll(resources); } YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); //GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class); //GetClusterNodesResponse clusterNodesResp = rmClient.clientRM.getClusterNodes(clusterNodesReq); //LOG.info("Got Cluster node info from ASM"); //for (NodeReport node : clusterNodesResp.getNodeReports()) { // LOG.info("Got node report from ASM for" // + ", nodeId=" + node.getNodeId() // + ", nodeAddress" + node.getHttpAddress() // + ", nodeRackName" + node.getRackName() // + ", nodeNumContainers" + node.getNumContainers() // + ", nodeHealthStatus" + node.getHealthReport()); //} List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication newApp = yarnClient.createApplication(); appId = newApp.getNewApplicationResponse().getApplicationId(); // Dump out information about cluster capability as seen by the resource manager int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory(); LOG.info("Max mem capability of resources in this cluster " + maxMem); int amMemory = dag.getMasterMemoryMB(); if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) { dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString()); } // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the application id appContext.setApplicationId(appId); // set the application name appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME)); appContext.setApplicationType(this.applicationType); if (YARN_APPLICATION_TYPE.equals(this.applicationType)) { //appContext.setMaxAppAttempts(1); // no retries until Stram is HA } // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Setup security tokens // If security is enabled get ResourceManager and NameNode delegation tokens. // Set these tokens on the container so that they are sent as part of application submission. // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken // is also used by ResourceManager to fetch the jars from HDFS and set them up for the // application master launch. if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } } finally { fs.close(); } new ClientRMHelper(yarnClient, conf).addRMDelegationToken(tokenRenewer, credentials); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // copy required jar files to dfs, to be localized for containers FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS); Path appPath; String configuredAppPath = dag.getValue(LogicalPlan.APPLICATION_PATH); if (configuredAppPath == null) { appPath = new Path(appsBasePath, appId.toString()); } else { appPath = new Path(configuredAppPath); } String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {})); LOG.info("libjars: {}", libJarsCsv); dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, libJarsCsv, localResources, fs); if (archives != null) { String[] localFiles = archives.split(","); String archivesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("archives: {}", archivesCsv); dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.ARCHIVE, archivesCsv, localResources, fs); } if (files != null) { String[] localFiles = files.split(","); String filesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("files: {}", filesCsv); dag.getAttributes().put(LogicalPlan.FILES, filesCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, filesCsv, localResources, fs); } dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString()); StorageAgent agent = dag.getAttributes().get(OperatorContext.STORAGE_AGENT); if (agent != null && agent instanceof StorageAgent.ApplicationAwareStorageAgent) { ((StorageAgent.ApplicationAwareStorageAgent) agent).setApplicationAttributes(dag.getAttributes()); } if (dag.getAttributes() .get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */ Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS); // use conf client side to pickup any proxy settings from dt-site.xml dag.setAttribute(OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(checkpointPath.toString(), conf)); } if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) { dag.setAttribute(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator()); } // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(appPath, "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } if (originalAppId != null) { Path origAppPath = new Path(appsBasePath, this.originalAppId); LOG.info("Restart from {}", origAppPath); copyInitialState(origAppPath); } // push logical plan to DFS location Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME); FSDataOutputStream outStream = fs.create(cfgDst, true); LogicalPlan.write(this.dag, outStream); outStream.close(); Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME); outStream = fs.create(launchConfigDst, true); conf.writeXml(outStream); outStream.close(); LaunchContainerRunnable.addFileToLocalResources(LogicalPlan.SER_FILE_NAME, fs.getFileStatus(cfgDst), LocalResourceType.FILE, localResources); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // Add application jar(s) location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar(s) // including ${CLASSPATH} will duplicate the class path in app master, removing it for now //StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*"); StringBuilder classPathEnv = new StringBuilder("./*"); String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH); for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH : classpath.split(",")) { if (c.equals("$HADOOP_CLIENT_CONF_DIR")) { // SPOI-2501 continue; } classPathEnv.append(':'); classPathEnv.append(c.trim()); } env.put("CLASSPATH", classPathEnv.toString()); // propagate to replace node managers user name (effective in non-secure mode) env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(javaCmd); if (dag.isDebug()) { vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n"); } // Set Xmx based on am memory size // default heap size 75% of total memory if (dag.getMasterJVMOptions() != null) { vargs.add(dag.getMasterJVMOptions()); } Path tmpDir = new Path(ApplicationConstants.Environment.PWD.$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR); vargs.add("-Djava.io.tmpdir=" + tmpDir); vargs.add("-Xmx" + (amMemory * 3 / 4) + "m"); vargs.add("-XX:+HeapDumpOnOutOfMemoryError"); vargs.add("-XX:HeapDumpPath=" + System.getProperty("java.io.tmpdir") + "/dt-heap-" + appId.getId() + ".bin"); vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA"); vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR); vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath())); if (dag.isDebug()) { vargs.add("-Dlog4j.debug=true"); } String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL); if (loggersLevel != null) { vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel)); } vargs.add(StreamingAppMaster.class.getName()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final command StringBuilder command = new StringBuilder(9 * vargs.size()); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(queueName); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure String specStr = Objects.toStringHelper("Submitting application: ") .add("name", appContext.getApplicationName()).add("queue", appContext.getQueue()) .add("user", UserGroupInformation.getLoginUser()).add("resource", appContext.getResource()) .toString(); LOG.info(specStr); if (dag.isDebug()) { //LOG.info("Full submission context: " + appContext); } yarnClient.submitApplication(appContext); } finally { fs.close(); } }
From source file:org.apache.solr.cloud.TestCloudPivotFacet.java
@Test public void test() throws Exception { sanityCheckAssertNumerics();//from w ww .j av a2 s . com waitForThingsToLevelOut(30000); // TODO: why would we have to wait? // handle.clear(); handle.put("QTime", SKIPVAL); handle.put("timestamp", SKIPVAL); final Set<String> fieldNameSet = new HashSet<>(); // build up a randomized index final int numDocs = atLeast(500); log.info("numDocs: {}", numDocs); for (int i = 1; i <= numDocs; i++) { SolrInputDocument doc = buildRandomDocument(i); // not efficient, but it guarantees that even if people change buildRandomDocument // we'll always have the full list of fields w/o needing to keep code in sync fieldNameSet.addAll(doc.getFieldNames()); cloudClient.add(doc); } cloudClient.commit(); fieldNameSet.remove("id"); assertTrue("WTF, bogus field exists?", fieldNameSet.add("bogus_not_in_any_doc_s")); final String[] fieldNames = fieldNameSet.toArray(new String[fieldNameSet.size()]); Arrays.sort(fieldNames); // need determinism when picking random fields for (int i = 0; i < 5; i++) { String q = "*:*"; if (random().nextBoolean()) { q = "id:[* TO " + TestUtil.nextInt(random(), 300, numDocs) + "]"; } ModifiableSolrParams baseP = params("rows", "0", "q", q); if (random().nextBoolean()) { baseP.add("fq", "id:[* TO " + TestUtil.nextInt(random(), 200, numDocs) + "]"); } final boolean stats = random().nextBoolean(); if (stats) { baseP.add(StatsParams.STATS, "true"); // if we are doing stats, then always generated the same # of STATS_FIELD // params, using multiple tags from a fixed set, but with diff fieldName values. // later, each pivot will randomly pick a tag. baseP.add(StatsParams.STATS_FIELD, "{!key=sk1 tag=st1,st2}" + pickRandomStatsFields(fieldNames)); baseP.add(StatsParams.STATS_FIELD, "{!key=sk2 tag=st2,st3}" + pickRandomStatsFields(fieldNames)); baseP.add(StatsParams.STATS_FIELD, "{!key=sk3 tag=st3,st4}" + pickRandomStatsFields(fieldNames)); // NOTE: there's a chance that some of those stats field names // will be the same, but if so, all the better to test that edge case } ModifiableSolrParams pivotP = params(FACET, "true"); // put our FACET_PIVOT params in a set in case we just happen to pick the same one twice LinkedHashSet<String> pivotParamValues = new LinkedHashSet<String>(); pivotParamValues.add(buildPivotParamValue(buildRandomPivot(fieldNames))); if (random().nextBoolean()) { pivotParamValues.add(buildPivotParamValue(buildRandomPivot(fieldNames))); } pivotP.set(FACET_PIVOT, pivotParamValues.toArray(new String[pivotParamValues.size()])); // keep limit low - lots of unique values, and lots of depth in pivots pivotP.add(FACET_LIMIT, "" + TestUtil.nextInt(random(), 1, 17)); // sometimes use an offset if (random().nextBoolean()) { pivotP.add(FACET_OFFSET, "" + TestUtil.nextInt(random(), 0, 7)); } if (random().nextBoolean()) { String min = "" + TestUtil.nextInt(random(), 0, numDocs + 10); pivotP.add(FACET_PIVOT_MINCOUNT, min); // trace param for validation baseP.add(TRACE_MIN, min); } if (random().nextBoolean()) { pivotP.add(FACET_DISTRIB_MCO, "true"); // trace param for validation baseP.add(TRACE_DISTRIB_MIN, "true"); } if (random().nextBoolean()) { String missing = "" + random().nextBoolean(); pivotP.add(FACET_MISSING, missing); // trace param for validation baseP.add(TRACE_MISS, missing); } if (random().nextBoolean()) { String sort = random().nextBoolean() ? "index" : "count"; pivotP.add(FACET_SORT, sort); // trace param for validation baseP.add(TRACE_SORT, sort); } // overrequest // // NOTE: since this test focuses on accuracy of refinement, and doesn't do // control collection comparisons, there isn't a lot of need for excessive // overrequesting -- we focus here on trying to exercise the various edge cases // involved as different values are used with overrequest if (0 == TestUtil.nextInt(random(), 0, 4)) { // we want a decent chance of no overrequest at all pivotP.add(FACET_OVERREQUEST_COUNT, "0"); pivotP.add(FACET_OVERREQUEST_RATIO, "0"); } else { if (random().nextBoolean()) { pivotP.add(FACET_OVERREQUEST_COUNT, "" + TestUtil.nextInt(random(), 0, 5)); } if (random().nextBoolean()) { // sometimes give a ratio less then 1, code should be smart enough to deal float ratio = 0.5F + random().nextFloat(); // sometimes go negative if (random().nextBoolean()) { ratio *= -1; } pivotP.add(FACET_OVERREQUEST_RATIO, "" + ratio); } } assertPivotCountsAreCorrect(baseP, pivotP); } }
From source file:org.apache.tajo.engine.planner.LogicalPlanner.java
@Override public ScanNode visitRelation(PlanContext context, Stack<Expr> stack, Relation expr) throws PlanningException { QueryBlock block = context.queryBlock; ScanNode scanNode = block.getNodeFromExpr(expr); updatePhysicalInfo(scanNode.getTableDesc()); // Find expression which can be evaluated at this relation node. // Except for column references, additional expressions used in select list, where clause, order-by clauses // can be evaluated here. Their reference names are kept in newlyEvaluatedExprsRef. Set<String> newlyEvaluatedExprsReferences = new LinkedHashSet<String>(); for (Iterator<NamedExpr> iterator = block.namedExprsMgr.getIteratorForUnevaluatedExprs(); iterator .hasNext();) {// ww w. j av a2s. co m NamedExpr rawTarget = iterator.next(); try { EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(), NameResolvingMode.RELS_ONLY); if (checkIfBeEvaluatedAtRelation(block, evalNode, scanNode)) { block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode); newlyEvaluatedExprsReferences.add(rawTarget.getAlias()); // newly added exr } } catch (VerifyException ve) { } } // Assume that each unique expr is evaluated once. LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, scanNode, newlyEvaluatedExprsReferences); // The fact the some expr is included in newlyEvaluatedExprsReferences means that it is already evaluated. // So, we get a raw expression and then creates a target. for (String reference : newlyEvaluatedExprsReferences) { NamedExpr refrer = block.namedExprsMgr.getNamedExpr(reference); EvalNode evalNode = exprAnnotator.createEvalNode(context, refrer.getExpr(), NameResolvingMode.RELS_ONLY); targets.add(new Target(evalNode, reference)); } scanNode.setTargets(targets.toArray(new Target[targets.size()])); verifyProjectedFields(block, scanNode); return scanNode; }