List of usage examples for javax.servlet.http HttpServletResponse SC_GONE
int SC_GONE
To view the source code for javax.servlet.http HttpServletResponse SC_GONE.
Click Source Link
From source file:com.streamsets.pipeline.stage.origin.ipctokafka.IpcToKafkaServlet.java
@Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { String requestor = req.getRemoteAddr() + ":" + req.getRemotePort(); if (shuttingDown) { LOG.debug("Shutting down, discarding incoming request from '{}'", requestor); resp.setStatus(HttpServletResponse.SC_GONE); } else {//from w ww . ja v a 2 s . c om String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER); String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER); String contentType = req.getContentType(); String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER); if (!Constants.APPLICATION_BINARY.equals(contentType)) { invalidRequestMeter.mark(); resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format( "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY)); } else if (!"true".equals(json1Fragmentable)) { invalidRequestMeter.mark(); resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format( "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded")); } else if (!configs.appId.equals(appId)) { invalidRequestMeter.mark(); LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId); resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'"); } else { long start = System.currentTimeMillis(); LOG.debug("Request accepted from '{}'", requestor); try (InputStream in = req.getInputStream()) { InputStream is = in; boolean processRequest = true; if (compression != null) { switch (compression) { case Constants.SNAPPY_COMPRESSION: is = new SnappyFramedInputStream(is, true); break; default: invalidRequestMeter.mark(); LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression, requestor); resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, "Unsupported compression: " + compression); processRequest = false; } } if (processRequest) { LOG.debug("Processing request from '{}'", requestor); List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize); LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size()); long kStart = System.currentTimeMillis(); SdcKafkaProducer producer = getKafkaProducer(); long kafkaTime = System.currentTimeMillis() - kStart; try { for (byte[] message : messages) { // we are using round robing partition strategy, partition key is ignored kStart = System.currentTimeMillis(); producer.enqueueMessage(kafkaConfigBean.kafkaConfig.topic, message, ""); kafkaTime += System.currentTimeMillis() - kStart; } kStart = System.currentTimeMillis(); producer.write(); kafkaTime += System.currentTimeMillis() - kStart; resp.setStatus(HttpServletResponse.SC_OK); requestMeter.mark(); } catch (StageException ex) { LOG.warn("Kakfa producer error: {}", ex.toString(), ex); errorQueue.offer(ex); errorRequestMeter.mark(); LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex); resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString()); } finally { kStart = System.currentTimeMillis(); releaseKafkaProducer(producer); kafkaTime += System.currentTimeMillis() - kStart; } kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS); kafkaMessagesMeter.mark(messages.size()); } } catch (Exception ex) { errorRequestMeter.mark(); LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex); resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString()); } finally { requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } } } }
From source file:org.onehippo.forge.camel.demo.rest.services.AbstractRestUpdateResource.java
/** * Invoke Solr Update Service based on the given <code>action</code> and <code>id</code>. * @param action Update action name. It should be either 'addOrReplace' or 'delete'. * @param handleUuid The document handle identifier. * @return//from www . ja v a2 s . c o m */ @POST @Path("/") public Response update(@QueryParam("action") @DefaultValue(INDEX_ACTION) String action, @QueryParam("id") String handleUuid) { log.info("Updating ('{}') document from search index: {}", action, handleUuid); if (StringUtils.isNotEmpty(handleUuid)) { try { HstRequestContext requestContext = RequestContextProvider.get(); if (INDEX_ACTION.equals(action)) { Node node = requestContext.getSession().getNodeByIdentifier(handleUuid); HippoBean bean = (HippoBean) getObjectConverter(requestContext).getObject(node); if (bean instanceof BaseHippoDocument) { BaseHippoDocument document = (BaseHippoDocument) bean; JSONObject payload = createDocumentAddPayload(document); if (payload != null) { HttpResponse httpResponse = invokeUpdateService(action, payload); if (httpResponse.getStatusLine().getStatusCode() != HttpServletResponse.SC_OK) { return Response.status(httpResponse.getStatusLine().getStatusCode()).build(); } } } else { log.warn("The bean from '{}' is not a BaseHippoDocument.", handleUuid); } } else if (DELETE_ACTION.equals(action)) { JSONObject payload = createDocumentDeletePayload(handleUuid); HttpResponse httpResponse = invokeUpdateService(action, payload); final int status = httpResponse.getStatusLine().getStatusCode(); if (status >= HttpServletResponse.SC_BAD_REQUEST) { if (status == HttpServletResponse.SC_NOT_FOUND || status == HttpServletResponse.SC_GONE) { log.info("The document is not found or no more exists: '{}'.", handleUuid); } else if (status != HttpServletResponse.SC_OK) { return Response.status(httpResponse.getStatusLine().getStatusCode()).build(); } } } else { log.warn("Unknown action: '{}'.", action); } } catch (ItemNotFoundException e) { log.warn("The news is not found by the identifier: '{}'", handleUuid); } catch (Exception e) { if (log.isDebugEnabled()) { log.warn("Failed to find news by identifier.", e); } else { log.warn("Failed to find news by identifier. {}", e.toString()); } throw new WebApplicationException(e, buildServerErrorResponse(e)); } } return Response.ok().build(); }
From source file:com.cognitivabrasil.repositorio.web.FileControllerTest.java
@Test public void testGetFile() throws IOException { com.cognitivabrasil.repositorio.data.entities.Files f3 = new Files(); f3.setId(3);/*from ww w .j a v a2s.com*/ f3.setName("testGet.txt"); f3.setContentType("text"); f3.setLocation("somewhere"); HttpServletResponse response = new MockHttpServletResponse(); HttpServletResponse response2 = new MockHttpServletResponse(); FileController fileController = mockFiles(); // proves response and response2 are not comitted yet. Assert.assertFalse(response.isCommitted()); Assert.assertFalse(response2.isCommitted()); int fileId = 3; when(fileService.get(fileId)).thenReturn(f3); // tests non-existent id. int id = 99; fileController.getFile(id, response); Assert.assertTrue(response.isCommitted()); assertThat(HttpServletResponse.SC_GONE, equalTo(response.getStatus())); // tests valid id. // Problem: how to make the getFile method avoid the f=null. id = 3; Assert.assertFalse(response2.isCommitted()); fileController.getFile(id, response2); Assert.assertTrue(response2.isCommitted()); assertThat(HttpServletResponse.SC_GONE, equalTo(response2.getStatus())); Assert.assertTrue(response2.isCommitted()); }
From source file:com.streamsets.pipeline.lib.http.HttpReceiverServlet.java
@Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { String requestor = req.getRemoteAddr() + ":" + req.getRemotePort(); if (isShuttingDown()) { LOG.debug("Shutting down, discarding incoming request from '{}'", requestor); resp.setStatus(HttpServletResponse.SC_GONE); } else {//from w w w .j ava 2 s . co m if (validatePostRequest(req, resp)) { long start = System.currentTimeMillis(); LOG.debug("Request accepted from '{}'", requestor); try (InputStream in = req.getInputStream()) { InputStream is = in; String compression = req.getHeader(HttpConstants.X_SDC_COMPRESSION_HEADER); if (compression == null) { compression = req.getHeader(HttpConstants.CONTENT_ENCODING_HEADER); } if (compression != null) { switch (compression) { case HttpConstants.SNAPPY_COMPRESSION: is = new SnappyFramedInputStream(is, true); break; case HttpConstants.GZIP_COMPRESSION: is = new GZIPInputStream(is); break; default: throw new IOException( Utils.format("It shouldn't happen, unexpected compression '{}'", compression)); } } LOG.debug("Processing request from '{}'", requestor); processRequest(req, is, resp); } catch (Exception ex) { errorQueue.offer(ex); errorRequestMeter.mark(); LOG.warn("Error while processing request payload from '{}': {}", requestor, ex.toString(), ex); resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString()); } finally { requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } } else { invalidRequestMeter.mark(); } } }
From source file:aiai.ai.station.actors.DownloadSnippetActor.java
private void logError(String snippetCode, HttpResponseException e) { if (e.getStatusCode() == HttpServletResponse.SC_GONE) { log.warn("Snippet with code {} wasn't found", snippetCode); } else if (e.getStatusCode() == HttpServletResponse.SC_CONFLICT) { log.warn("Snippet with id {} is broken and need to be recreated", snippetCode); } else {/*from www . java 2 s . co m*/ log.error("HttpResponseException", e); } }
From source file:org.apache.hadoop.hdfs.server.namenode.ImageServlet.java
@Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { try {//from w w w. j a v a2 s .c o m final ServletContext context = getServletContext(); final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context); final GetImageParams parsedParams = new GetImageParams(request, response); final Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); validateRequest(context, conf, request, response, nnImage, parsedParams.getStorageInfoString()); UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { if (parsedParams.isGetImage()) { long txid = parsedParams.getTxId(); File imageFile = null; String errorMessage = "Could not find image"; if (parsedParams.shouldFetchLatest()) { imageFile = nnImage.getStorage().getHighestFsImageName(); } else { errorMessage += " with txid " + txid; imageFile = nnImage.getStorage().getFsImage(txid, EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK)); } if (imageFile == null) { throw new IOException(errorMessage); } CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders(); long start = monotonicNow(); serveFile(imageFile); if (metrics != null) { // Metrics non-null only when used inside name node long elapsed = monotonicNow() - start; metrics.addGetImage(elapsed); } } else if (parsedParams.isGetEdit()) { long startTxId = parsedParams.getStartTxId(); long endTxId = parsedParams.getEndTxId(); File editFile = nnImage.getStorage().findFinalizedEditsFile(startTxId, endTxId); long start = monotonicNow(); serveFile(editFile); if (metrics != null) { // Metrics non-null only when used inside name node long elapsed = monotonicNow() - start; metrics.addGetEdit(elapsed); } } return null; } private void serveFile(File file) throws IOException { FileInputStream fis = new FileInputStream(file); try { setVerificationHeadersForGet(response, file); setFileNameHeaders(response, file); if (!file.exists()) { // Potential race where the file was deleted while we were in the // process of setting headers! throw new FileNotFoundException(file.toString()); // It's possible the file could be deleted after this point, but // we've already opened the 'fis' stream. // It's also possible length could change, but this would be // detected by the client side as an inaccurate length header. } // send file TransferFsImage.copyFileToStream(response.getOutputStream(), file, fis, getThrottler(conf)); } finally { IOUtils.closeStream(fis); } } }); } catch (Throwable t) { String errMsg = "GetImage failed. " + StringUtils.stringifyException(t); response.sendError(HttpServletResponse.SC_GONE, errMsg); throw new IOException(errMsg); } finally { response.getOutputStream().close(); } }
From source file:org.apache.hadoop.mapred.TaskLogServlet.java
/** * Get the logs via http./*from w w w. j a v a2 s .co m*/ */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { long start = 0; long end = -1; boolean plainText = false; TaskLog.LogName filter = null; boolean isCleanup = false; String attemptIdStr = request.getParameter("attemptid"); if (attemptIdStr == null) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Argument attemptid is required"); return; } String logFilter = request.getParameter("filter"); if (logFilter != null) { try { filter = TaskLog.LogName.valueOf(TaskLog.LogName.class, logFilter.toUpperCase()); } catch (IllegalArgumentException iae) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Illegal value for filter: " + logFilter); return; } } String sLogOff = request.getParameter("start"); if (sLogOff != null) { start = Long.valueOf(sLogOff).longValue(); } String sLogEnd = request.getParameter("end"); if (sLogEnd != null) { end = Long.valueOf(sLogEnd).longValue(); } String sPlainText = request.getParameter("plaintext"); if (sPlainText != null) { plainText = Boolean.valueOf(sPlainText); } String sCleanup = request.getParameter("cleanup"); if (sCleanup != null) { isCleanup = Boolean.valueOf(sCleanup); } TaskAttemptID attemptId = TaskAttemptID.forName(attemptIdStr); if (!TaskLog.getAttemptDir(attemptId, isCleanup).exists()) { response.sendError(HttpServletResponse.SC_GONE, "Task log directory for task " + attemptId + " does not exist. May be cleaned up by Task Tracker, if older logs."); return; } // get user name who is accessing String user = request.getRemoteUser(); if (user != null) { ServletContext context = getServletContext(); TaskTracker taskTracker = (TaskTracker) context.getAttribute("task.tracker"); JobID jobId = attemptId.getJobID(); // get jobACLConf from ACLs file JobConf jobACLConf = getConfFromJobACLsFile(jobId); // Ignore authorization if job-acls.xml is not found if (jobACLConf != null) { try { checkAccessForTaskLogs(jobACLConf, user, jobId.toString(), taskTracker); } catch (AccessControlException e) { String errMsg = "User " + user + " failed to view tasklogs of job " + jobId + "!\n\n" + e.getMessage(); response.sendError(HttpServletResponse.SC_UNAUTHORIZED, errMsg); return; } } } OutputStream out = response.getOutputStream(); if (!plainText) { out.write(("<html>\n" + "<title>Task Logs: '" + attemptId + "'</title>\n" + "<body>\n" + "<h1>Task Logs: '" + attemptId + "'</h1><br>\n").getBytes()); if (filter == null) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.STDOUT, isCleanup); printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.STDERR, isCleanup); if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.SYSLOG)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.SYSLOG, isCleanup); } if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.DEBUGOUT)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.DEBUGOUT, isCleanup); } if (haveTaskLog(attemptId, isCleanup, TaskLog.LogName.PROFILE)) { printTaskLog(response, out, attemptId, start, end, plainText, TaskLog.LogName.PROFILE, isCleanup); } } else { printTaskLog(response, out, attemptId, start, end, plainText, filter, isCleanup); } out.write("</body></html>\n".getBytes()); out.close(); } else if (filter == null) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "You must supply a value for `filter' (STDOUT, STDERR, or SYSLOG) if you set plainText = true"); } else { printTaskLog(response, out, attemptId, start, end, plainText, filter, isCleanup); } }
From source file:aiai.ai.launchpad.server.ServerController.java
private HttpEntity<AbstractResource> returnEmptyAsGone(HttpServletResponse response) throws IOException { response.sendError(HttpServletResponse.SC_GONE); return new HttpEntity<>(new ByteArrayResource(new byte[0]), getHeader(0)); }
From source file:org.apache.hadoop.hbase.rest.Status.java
public void setGone() { this.statusCode = HttpServletResponse.SC_GONE; this.message = new StatusMessage(statusCode, true, "item no longer available"); }
From source file:com.redhat.jenkins.nodesharingfrontend.Api.java
/** * Request to utilize reserved computer. * * Response codes://from w w w . jav a 2s . c om * - "200 OK" is used when the node was accepted, the node is expected to be correctly added to Jenkins by the time * the request completes with the code. The code is also returned when the node is already helt by this executor. * - "410 Gone" when there is no longer the need for such host and orchestrator can reuse it immediately. The node must not be created. */ @RequirePOST public void doUtilizeNode(@Nonnull final StaplerRequest req, @Nonnull final StaplerResponse rsp) throws IOException { final Jenkins jenkins = Jenkins.getActiveInstance(); jenkins.checkPermission(RestEndpoint.RESERVE); UtilizeNodeRequest request = Entity.fromInputStream(req.getInputStream(), UtilizeNodeRequest.class); final NodeDefinition definition = NodeDefinition.create(request.getFileName(), request.getDefinition()); if (definition == null) throw new AssertionError("Unknown node definition: " + request.getFileName()); final String name = definition.getName(); // utilizeNode call received even though the node is already being utilized Node node = getCollidingNode(jenkins, name); if (node != null) { new UtilizeNodeResponse(fingerprint).toOutputStream(rsp.getOutputStream()); rsp.setStatus(HttpServletResponse.SC_OK); LOGGER.warning("Skipping node addition as it already exists"); return; } // Do not accept the node when there is no load for it if (!isThereAWorkloadFor(jenkins, definition)) { rsp.setStatus(HttpServletResponse.SC_GONE); LOGGER.info("Skipping node addition as there isn't a workload for it"); return; } try { final SharedNode newNode = cloud.createNode(definition); // Prevent replacing existing node due to a race condition in repeated utilizeNode calls Queue.withLock(new NotReallyRoleSensitiveCallable<Void, IOException>() { @Override public Void call() throws IOException { Node node = getCollidingNode(jenkins, name); if (node == null) { jenkins.addNode(newNode); } else { LOGGER.warning("Skipping node addition due to race condition"); } return null; } }); new UtilizeNodeResponse(fingerprint).toOutputStream(rsp.getOutputStream()); rsp.setStatus(HttpServletResponse.SC_OK); } catch (IllegalArgumentException e) { e.printStackTrace(new PrintStream(rsp.getOutputStream())); rsp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE); } }