List of usage examples for java.util Locale ROOT
Locale ROOT
To view the source code for java.util Locale ROOT.
Click Source Link
From source file:com.gargoylesoftware.htmlunit.javascript.host.html.HTMLElement.java
/** * Sets an attribute./* w w w . j a v a2 s.com*/ * See also <a href="http://www.w3.org/TR/2000/REC-DOM-Level-2-Core-20001113/core.html#ID-F68F082"> * the DOM reference</a> * * @param name Name of the attribute to set * @param value Value to set the attribute to */ @Override public void setAttribute(String name, final String value) { getDomNodeOrDie().setAttribute(name, value); // call corresponding event handler setOnxxx if found if (!name.isEmpty()) { name = name.toLowerCase(Locale.ROOT); if (name.startsWith("on")) { try { name = Character.toUpperCase(name.charAt(0)) + name.substring(1); final Method method = getClass().getMethod("set" + name, METHOD_PARAMS_OBJECT); method.invoke(this, new Object[] { new EventHandler(getDomNodeOrDie(), name.substring(2), value) }); } catch (final NoSuchMethodException e) { //silently ignore } catch (final IllegalAccessException e) { //silently ignore } catch (final InvocationTargetException e) { throw new RuntimeException(e.getCause()); } } } }
From source file:org.elasticsearch.plugins.PluginManagerIT.java
public void testThatBasicAuthIsRejectedOnHttp() throws Exception { assertStatus(String.format(Locale.ROOT, "install http://user:pass@localhost:12345/foo.zip --verbose"), CliTool.ExitStatus.IO_ERROR); assertThat(terminal.getTerminalOutput(), hasItem(containsString("Basic auth is only supported for HTTPS!"))); }
From source file:com.ehsy.solr.util.SimplePostTool.java
/** * Guesses the type of a file, based on file name suffix * @param file the file/*from w w w. j ava 2 s. c om*/ * @return the content-type guessed */ protected static String guessType(File file) { String name = file.getName(); String suffix = name.substring(name.lastIndexOf(".") + 1); return mimeMap.get(suffix.toLowerCase(Locale.ROOT)); }
From source file:com.gargoylesoftware.htmlunit.javascript.host.css.CSSStyleDeclaration.java
/** * Returns a sorted map containing style elements, keyed on style element name. We use a * {@link LinkedHashMap} map so that results are deterministic and are thus testable. * * @return a sorted map containing style elements, keyed on style element name *//*from w w w . j a va 2s .c om*/ private Map<String, StyleElement> getStyleMap() { final String styleAttribute = jsElement_.getDomNodeOrDie().getAttribute("style"); if (styleString_ == styleAttribute) { return styleMap_; } final Map<String, StyleElement> styleMap = new LinkedHashMap<>(); if (DomElement.ATTRIBUTE_NOT_DEFINED == styleAttribute || DomElement.ATTRIBUTE_VALUE_EMPTY == styleAttribute) { styleMap_ = styleMap; styleString_ = styleAttribute; return styleMap_; } for (final String token : StringUtils.split(styleAttribute, ';')) { final int index = token.indexOf(":"); if (index != -1) { final String key = token.substring(0, index).trim().toLowerCase(Locale.ROOT); String value = token.substring(index + 1).trim(); String priority = ""; if (StringUtils.endsWithIgnoreCase(value, "!important")) { priority = PRIORITY_IMPORTANT; value = value.substring(0, value.length() - 10); value = value.trim(); } final StyleElement element = new StyleElement(key, value, priority, SelectorSpecificity.FROM_STYLE_ATTRIBUTE, getCurrentElementIndex()); styleMap.put(key, element); } } styleMap_ = styleMap; styleString_ = styleAttribute; return styleMap_; }
From source file:org.elasticsearch.client.RequestTests.java
private static void setRandomVersionType(DocWriteRequest<?> request, Map<String, String> expectedParams) { if (randomBoolean()) { VersionType versionType = randomFrom(VersionType.values()); request.versionType(versionType); if (versionType != VersionType.INTERNAL) { expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT)); }// w w w. jav a 2 s. c o m } }
From source file:org.apache.solr.cloud.TestPullReplica.java
private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException { switch (random().nextInt(3)) { case 0: // Add replica with SolrJ CollectionAdminResponse response = CollectionAdminRequest .addReplicaToShard(collectionName, shardName, type).process(cluster.getSolrClient()); assertEquals("Unexpected response status: " + response.getStatus(), 0, response.getStatus()); break;/* w w w. j a va 2 s . c o m*/ case 1: // Add replica with V1 API String url = String.format(Locale.ROOT, "%s/admin/collections?action=ADDREPLICA&collection=%s&shard=%s&type=%s", cluster.getRandomJetty(random()).getBaseUrl(), collectionName, shardName, type); HttpGet addReplicaGet = new HttpGet(url); HttpResponse httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaGet); assertEquals(200, httpResponse.getStatusLine().getStatusCode()); break; case 2:// Add replica with V2 API url = String.format(Locale.ROOT, "%s/____v2/c/%s/shards", cluster.getRandomJetty(random()).getBaseUrl(), collectionName); String requestBody = String.format(Locale.ROOT, "{add-replica:{shard:%s, type:%s}}", shardName, type); HttpPost addReplicaPost = new HttpPost(url); addReplicaPost.setHeader("Content-type", "application/json"); addReplicaPost.setEntity(new StringEntity(requestBody)); httpResponse = cluster.getSolrClient().getHttpClient().execute(addReplicaPost); assertEquals(200, httpResponse.getStatusLine().getStatusCode()); break; } }
From source file:org.elasticsearch.plugins.PluginManagerIT.java
public void testThatBasicAuthIsSupportedWithHttps() throws Exception { assumeTrue("test requires security manager to be disabled", System.getSecurityManager() == null); SSLSocketFactory defaultSocketFactory = HttpsURLConnection.getDefaultSSLSocketFactory(); ServerBootstrap serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory()); SelfSignedCertificate ssc = new SelfSignedCertificate("localhost"); try {//from w ww. j av a 2 s. co m // Create a trust manager that does not validate certificate chains: SSLContext sc = SSLContext.getInstance("SSL"); sc.init(null, InsecureTrustManagerFactory.INSTANCE.getTrustManagers(), null); HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); final List<HttpRequest> requests = new ArrayList<>(); final SslContext sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey()); serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() throws Exception { return Channels.pipeline(new SslHandler(sslContext.newEngine()), new HttpRequestDecoder(), new HttpResponseEncoder(), new LoggingServerHandler(requests)); } }); Channel channel = serverBootstrap.bind(new InetSocketAddress(InetAddress.getByName("localhost"), 0)); int port = ((InetSocketAddress) channel.getLocalAddress()).getPort(); // IO_ERROR because there is no real file delivered... assertStatus( String.format(Locale.ROOT, "install https://user:pass@localhost:%s/foo.zip --verbose --timeout 1s", port), ExitStatus.IO_ERROR); // ensure that we did not try any other data source like download.elastic.co, in case we specified our own local URL assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("download.elastic.co")))); assertThat(requests, hasSize(1)); String msg = String.format(Locale.ROOT, "Request header did not contain Authorization header, terminal output was: %s", terminal.getTerminalOutput()); assertThat(msg, requests.get(0).headers().contains("Authorization"), is(true)); assertThat(msg, requests.get(0).headers().get("Authorization"), is("Basic " + Base64.encodeBytes("user:pass".getBytes(StandardCharsets.UTF_8)))); } finally { HttpsURLConnection.setDefaultSSLSocketFactory(defaultSocketFactory); serverBootstrap.releaseExternalResources(); ssc.delete(); } }
From source file:org.apache.manifoldcf.crawler.connectors.webcrawler.WebcrawlerConnector.java
/** Process a set of documents. * This is the method that should cause each document to be fetched, processed, and the results either added * to the queue of documents for the current job, and/or entered into the incremental ingestion manager. * The document specification allows this class to filter what is done based on the job. * The connector will be connected before this method can be called. *@param documentIdentifiers is the set of document identifiers to process. *@param statuses are the currently-stored document versions for each document in the set of document identifiers * passed in above./*from w ww . j a v a 2 s . c o m*/ *@param activities is the interface this method should use to queue up new document references * and ingest documents. *@param jobMode is an integer describing how the job is being run, whether continuous or once-only. *@param usesDefaultAuthority will be true only if the authority in use for these documents is the default one. */ @Override public void processDocuments(String[] documentIdentifiers, IExistingVersions statuses, Specification spec, IProcessActivity activities, int jobMode, boolean usesDefaultAuthority) throws ManifoldCFException, ServiceInterruption { getSession(); // Forced acls String[] acls = getAcls(spec); // Sort it, java.util.Arrays.sort(acls); // Get the excluded headers Set<String> excludedHeaders = findExcludedHeaders(spec); // Since document specifications can change, we need to look at each url and filter it as part of the // process of getting version strings. To do that, we need to compile the DocumentSpecification into // an object that knows how to do this. DocumentURLFilter filter = new DocumentURLFilter(spec); String filterVersion = filter.getVersionString(); // There are two ways to handle any document that's not available. The first is to remove it. The second is to keep it, but mark it with an empty version string. // With the web crawler, the major concern with simply removing the document is that it might be referred to from multiple places - and in addition // it will get requeued every time the parent document is processed. This is not optimal because it represents churn. // On the other hand, keeping the document in the queue causes the queue to bloat, which is also not optimal, and it makes the crawler basically // incapable of deleting documents. // Since the primary use of the crawler is expected to be repeated intranet crawls, I've thus chosen to optimize the crawler for accuracy rather than performance // - if the document is gone, I just remove it, and expect churn when recrawling activities occur. for (String documentIdentifier : documentIdentifiers) { // Verify that the url is legal if (!filter.isDocumentAndHostLegal(documentIdentifier)) { if (Logging.connectors.isDebugEnabled()) Logging.connectors.debug("WEB: Removing url '" + documentIdentifier + "' because it's not in the set of allowed ones"); // Use null because we should have already filtered when we queued. activities.deleteDocument(documentIdentifier); continue; } try { // The first thing we need to know is whether this url is part of a session-protected area. We'll use that information // later to detect redirection to login. SequenceCredentials sessionCredential = getSequenceCredential(documentIdentifier); if (Logging.connectors.isDebugEnabled()) { if (sessionCredential != null) Logging.connectors.debug("Web: For document identifier '" + documentIdentifier + "' found session credential key '" + sessionCredential.getSequenceKey() + "'"); } // Set up the initial state and state variables. // Fetch status FetchStatus fetchStatus = new FetchStatus(); // Calculate an event name; we'll need this to control sequencing. String globalSequenceEvent; if (sessionCredential != null) { String sequenceKey = sessionCredential.getSequenceKey(); globalSequenceEvent = makeSessionLoginEventName(activities, sequenceKey); } else globalSequenceEvent = null; // This is the main 'state loop'. The code is structured to use the finally clause from the following try to clean up any // events that were created within the loop. The loop itself has two parts: document fetch, and logic to figure out what state to transition // to (e.g. how to process the fetched document). A signal variable is used to signal the desired outcome. // We need to be sure we clean up the sequence event in case there's an error, so put a try/finally around everything. try { loginAndFetch(fetchStatus, activities, documentIdentifier, sessionCredential, globalSequenceEvent); switch (fetchStatus.resultSignal) { case RESULT_NO_DOCUMENT: if (Logging.connectors.isDebugEnabled()) Logging.connectors.debug("WEB: Removing url '" + documentIdentifier + "'" + ((fetchStatus.contextMessage != null) ? " because " + fetchStatus.contextMessage : ""), fetchStatus.contextException); activities.deleteDocument(documentIdentifier); break; case RESULT_NO_VERSION: if (Logging.connectors.isDebugEnabled()) Logging.connectors.debug("WEB: Ignoring url '" + documentIdentifier + "'" + ((fetchStatus.contextMessage != null) ? " because " + fetchStatus.contextMessage : ""), fetchStatus.contextException); // We get here when a document didn't fetch. // No version activities.noDocument(documentIdentifier, ""); break; case RESULT_VERSION_NEEDED: // Calculate version from document data, which is presumed to be present. StringBuilder sb = new StringBuilder(); // Acls packList(sb, acls, '+'); if (acls.length > 0) { sb.append('+'); pack(sb, defaultAuthorityDenyToken, '+'); } else sb.append('-'); // Now, do the metadata. Map<String, Set<String>> metaHash = new HashMap<String, Set<String>>(); String[] fixedListStrings = new String[2]; // They're all folded into the same part of the version string. int headerCount = 0; Iterator<String> headerIterator = fetchStatus.headerData.keySet().iterator(); while (headerIterator.hasNext()) { String headerName = headerIterator.next(); String lowerHeaderName = headerName.toLowerCase(Locale.ROOT); if (!reservedHeaders.contains(lowerHeaderName) && !excludedHeaders.contains(lowerHeaderName)) headerCount += fetchStatus.headerData.get(headerName).size(); } String[] fullMetadata = new String[headerCount]; headerCount = 0; headerIterator = fetchStatus.headerData.keySet().iterator(); while (headerIterator.hasNext()) { String headerName = headerIterator.next(); String lowerHeaderName = headerName.toLowerCase(Locale.ROOT); if (!reservedHeaders.contains(lowerHeaderName) && !excludedHeaders.contains(lowerHeaderName)) { Set<String> valueSet = metaHash.get(headerName); if (valueSet == null) { valueSet = new HashSet<String>(); metaHash.put(headerName, valueSet); } List<String> headerValues = fetchStatus.headerData.get(headerName); for (String headerValue : headerValues) { valueSet.add(headerValue); fixedListStrings[0] = "header-" + headerName; fixedListStrings[1] = headerValue; StringBuilder newsb = new StringBuilder(); packFixedList(newsb, fixedListStrings, '='); fullMetadata[headerCount++] = newsb.toString(); } } } java.util.Arrays.sort(fullMetadata); packList(sb, fullMetadata, '+'); // Done with the parseable part! Add the checksum. sb.append(fetchStatus.checkSum); // Add the filter version sb.append("+"); sb.append(filterVersion); String versionString = sb.toString(); // Now, extract links. // We'll call the "link extractor" series, so we can plug more stuff in over time. boolean indexDocument = extractLinks(documentIdentifier, activities, filter); // If scanOnly is set, we never ingest. But all else is the same. if (!activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) continue; processDocument(activities, documentIdentifier, versionString, indexDocument, metaHash, acls, filter); break; case RESULT_RETRY_DOCUMENT: // Document could not be processed right now. if (Logging.connectors.isDebugEnabled()) Logging.connectors.debug("WEB: Retrying url '" + documentIdentifier + "' later" + ((fetchStatus.contextMessage != null) ? " because " + fetchStatus.contextMessage : ""), fetchStatus.contextException); activities.retryDocumentProcessing(documentIdentifier); break; default: throw new IllegalStateException("Unexpected value for result signal: " + Integer.toString(fetchStatus.resultSignal)); } } finally { // Clean up event, if there is one. if (fetchStatus.sessionState == SESSIONSTATE_LOGIN && globalSequenceEvent != null) { // Terminate the event activities.completeEventSequence(globalSequenceEvent); } } } finally { cache.deleteData(documentIdentifier); } } }
From source file:org.jenkinsci.plugins.fod.FoDAPI.java
/** * If relaseId not on request object, looks up release ID based on application and * release names, then tries to upload. Requires a zip file containing all relevant * files already be created and referenced from the request object's uploadZip field. * /*from w ww.j a v a 2 s. c o m*/ * @param req * @return * @throws IOException */ @SuppressWarnings("deprecation") public UploadStatus uploadFile(UploadRequest req) throws IOException { final String METHOD_NAME = CLASS_NAME + ".uploadFile"; UploadStatus status = new UploadStatus(); PrintStream out = FodBuilder.getLogger(); Long releaseId = req.getReleaseId(); if (null == releaseId || 0 <= releaseId) { String applicationName = req.getApplicationName(); String releaseName = req.getReleaseName(); releaseId = getReleaseId(applicationName, releaseName); out.println(METHOD_NAME + ": ReleaseId: " + releaseId); } if (null != releaseId && 0 < releaseId) { if (sessionToken != null && !sessionToken.isEmpty()) { FileInputStream fs = new FileInputStream(req.getUploadZip()); byte[] readByteArray = new byte[seglen]; byte[] sendByteArray = null; int fragmentNumber = 0; int byteCount = 0; long offset = 0; try { while ((byteCount = fs.read(readByteArray)) != -1) { if (byteCount < seglen) { fragmentNumber = -1; lastFragment = true; sendByteArray = Arrays.copyOf(readByteArray, byteCount); } else { sendByteArray = readByteArray; } StringBuffer postURL = new StringBuffer(); if (req.getLanguageLevel() != null) { postURL.append(baseUrl); postURL.append("/api/v1/release/" + releaseId); postURL.append("/scan/?assessmentTypeId=" + req.getAssessmentTypeId()); postURL.append("&technologyStack=" + URLEncoder.encode(req.getTechnologyStack())); postURL.append("&languageLevel=" + URLEncoder.encode(req.getLanguageLevel())); postURL.append("&fragNo=" + fragmentNumber++); postURL.append("&len=" + byteCount); postURL.append("&offset=" + offset); } else { postURL.append(baseUrl); postURL.append("/api/v1/release/" + releaseId); postURL.append("/scan/?assessmentTypeId=" + req.getAssessmentTypeId()); postURL.append("&technologyStack=" + URLEncoder.encode(req.getTechnologyStack())); postURL.append("&fragNo=" + fragmentNumber++); postURL.append("&len=" + byteCount); postURL.append("&offset=" + offset); } Boolean runOpenSourceAnalysis = req.getRunOpenSourceAnalysis(); Boolean isExpressScan = req.getIsExpressScan(); Boolean isExpressAudit = req.getIsExpressAudit(); Boolean includeThirdParty = req.getIncludeThirdParty(); if (null != runOpenSourceAnalysis) { if (runOpenSourceAnalysis) { postURL.append("&doSonatypeScan=true"); } } if (null != isExpressScan) { if (isExpressScan) { postURL.append("&scanPreferenceId=2"); } } if (null != isExpressAudit) { if (isExpressAudit) { postURL.append("&auditPreferenceId=2"); } } if (null != includeThirdParty) { if (includeThirdParty) { postURL.append("&excludeThirdPartyLibs=false"); } else { postURL.append("&excludeThirdPartyLibs=true"); } } // out.println(METHOD_NAME + ": postURL: " + postURL.toString()); String postErrorMessage = ""; SendPostResponse postResponse = sendPost(postURL.toString(), sendByteArray, postErrorMessage); HttpResponse response = postResponse.getResponse(); if (response == null) { out.println(METHOD_NAME + ": HttpResponse from sendPost is null!"); status.setErrorMessage(postResponse.getErrorMessage()); status.setSendPostFailed(true); break; } else { StatusLine sl = response.getStatusLine(); Integer statusCode = Integer.valueOf(sl.getStatusCode()); status.setHttpStatusCode(statusCode); if (!statusCode.toString().startsWith("2")) { status.setErrorMessage(sl.toString()); if (statusCode.toString().equals("500")) { status.setErrorMessage(sl.toString()); out.println(METHOD_NAME + ": Error uploading to HPE FoD after successful authentication. Please contact your Technical Account Manager with this log for assistance."); out.println(METHOD_NAME + ": DEBUG: " + status.getErrorMessage()); out.println(METHOD_NAME + ": DEBUG: Bytes sent: " + status.getBytesSent()); } break; } else { if (fragmentNumber != 0 && fragmentNumber % 5 == 0) { out.println(METHOD_NAME + ": Upload Status - Bytes sent:" + offset); status.setBytesSent(offset); } if (lastFragment) { HttpEntity entity = response.getEntity(); String finalResponse = EntityUtils.toString(entity).trim(); out.println(METHOD_NAME + ": finalResponse=" + finalResponse); if (finalResponse.toUpperCase(Locale.ROOT).equals("ACK")) { status.setUploadSucceeded(true); status.setBytesSent(offset); } else { status.setUploadSucceeded(false); status.setErrorMessage(finalResponse); status.setBytesSent(bytesSent); } } } EntityUtils.consume(response.getEntity()); } offset += byteCount; } } finally { fs.close(); } bytesSent = offset; out.println(METHOD_NAME + ": bytesSent=" + bytesSent); } } else { if (releaseId == null) { status.setErrorMessage(METHOD_NAME + ":Error: releaseId is null!"); } status.setUploadSucceeded(false); status.setErrorMessage("Combination of applicationName of \"" + req.getApplicationName() + "\" and releaseName of \"" + req.getReleaseName() + "\" is not valid"); } return status; }