List of usage examples for javax.servlet.http HttpServletResponse SC_GONE
int SC_GONE
To view the source code for javax.servlet.http HttpServletResponse SC_GONE.
Click Source Link
From source file:org.nema.medical.mint.server.controller.Utils.java
public static StudyStatus validateStudyStatus(final File studiesRoot, final String studyUUID, final HttpServletResponse response, final StudyDAO studyDAO) throws IOException { if (StringUtils.isBlank(studyUUID)) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing study ID"); return Utils.StudyStatus.INVALID_ID; }//from w w w .ja v a2s.c o m final File studyDir = new File(studiesRoot, studyUUID); if (studyDir.exists()) { if (studyDir.canRead()) { return Utils.StudyStatus.OK; } LOG.error("Unable to read directory for study: " + studyDir); response.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid study requested: Not readable"); return Utils.StudyStatus.ABSENT; } final MINTStudy study = studyDAO.findStudy(studyUUID); if (study != null) { if (study.getStudyVersion() == -1) { LOG.error("Requested study has previously been deleted: " + studyUUID); response.sendError(HttpServletResponse.SC_GONE, "Invalid study requested: deleted"); return Utils.StudyStatus.DELETED; } return Utils.StudyStatus.OK; } LOG.error("Unable to locate study " + studyUUID); response.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid study requested: Not found"); return Utils.StudyStatus.ABSENT; }
From source file:org.apache.hadoop.dfs.FsckServlet.java
@SuppressWarnings("unchecked") public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { Map<String, String[]> pmap = request.getParameterMap(); try {/*from ww w . j a v a 2 s . c om*/ ServletContext context = getServletContext(); NameNode nn = (NameNode) context.getAttribute("name.node"); Configuration conf = (Configuration) context.getAttribute("name.conf"); NamenodeFsck fscker = new NamenodeFsck(conf, nn, pmap, response); fscker.fsck(); } catch (IOException ie) { StringUtils.stringifyException(ie); LOG.warn(ie); String errMsg = "Fsck on path " + pmap.get("path") + " failed."; response.sendError(HttpServletResponse.SC_GONE, errMsg); throw ie; } }
From source file:org.nuxeo.wss.handlers.fprpc.OWSSvrHandler.java
@Override protected void processCall(FPRPCRequest request, FPRPCResponse fpResponse, int callIndex, WSSBackend backend) throws WSSException { FPRPCCall call = request.getCalls().get(callIndex); fpResponse.addRenderingParameter("siteRoot", request.getSitePath()); fpResponse.addRenderingParameter("request", request); log.debug("Handling FP OWS call on method " + call.getMethodName()); if ("FileOpen".equals(call.getMethodName())) { handleFileDialog(request, fpResponse, call, backend, false); } else if ("FileSave".equals(call.getMethodName())) { try {/*w w w. j a va 2 s . c om*/ fpResponse.getHttpResponse().sendError(HttpServletResponse.SC_NOT_ACCEPTABLE, "Please use list-document API for save as"); return; } catch (IOException e) { throw new WSSException("Error while sending error!", e); } //handleFileDialog(request, fpResponse, call, backend, true); } else if ("SaveForm".equals(call.getMethodName())) { if ("HEAD".equals(request.getHttpRequest().getMethod())) { fpResponse.setContentType("text/html"); fpResponse.getHttpResponse().setStatus(HttpServletResponse.SC_GONE); return; } else { fpResponse.setContentType("text/html"); fpResponse.getHttpResponse().setStatus(HttpServletResponse.SC_NOT_FOUND); return; } } }
From source file:com.cognitivabrasil.repositorio.web.FileController.java
@RequestMapping(value = "/{id}", method = RequestMethod.GET) public void getFile(@PathVariable("id") int id, HttpServletResponse response) throws IOException { Files f = fileService.get(id); if (f == null) { response.sendError(HttpServletResponse.SC_GONE, "O arquivo solicitado no foi encontrado."); } else {/*ww w.j av a 2 s . c o m*/ String fileName = f.getLocation(); try { // get your file as InputStream InputStream is = new FileInputStream(new File(fileName)); response.setHeader("Content-Disposition", "attachment; filename=" + f.getName()); response.setContentType(f.getContentType()); // copy it to response's OutputStream IOUtils.copy(is, response.getOutputStream()); response.flushBuffer(); } catch (FileNotFoundException fe) { response.sendError(HttpServletResponse.SC_GONE, "O arquivo solicitado no foi encontrado."); LOG.error("O arquivo solicitado no foi encontrado.", fe); } catch (IOException ex) { LOG.error("Error writing file to output stream. Filename was '" + fileName + "'"); throw ex; } } }
From source file:aiai.ai.station.actors.DownloadResourceActor.java
public void fixedDelay() { if (globals.isUnitTesting) { return;//www . j a v a 2s . c o m } if (!globals.isStationEnabled) { return; } DownloadResourceTask task; while ((task = poll()) != null) { // if (Boolean.TRUE.equals(preparedMap.get(task.getId()))) { // continue; // } AssetFile assetFile = StationResourceUtils.prepareResourceFile(task.targetDir, task.binaryDataType, task.id, null); if (assetFile.isError) { log.warn("Resource can't be downloaded. Asset file initialization was failed, {}", assetFile); continue; } if (assetFile.isContent) { log.info("Resource was already downloaded. Asset file: {}", assetFile.file.getPath()); // preparedMap.put(task.getId(), true); continue; } try { Request request = Request.Get(targetUrl + '/' + task.getBinaryDataType() + '/' + task.getId()) .connectTimeout(5000).socketTimeout(5000); Response response; if (globals.isSecureRestUrl) { response = executor.executor.execute(request); } else { response = request.execute(); } response.saveContent(assetFile.file); // preparedMap.put(task.getId(), true); log.info("Resource #{} was loaded", task.getId()); } catch (HttpResponseException e) { if (e.getStatusCode() == HttpServletResponse.SC_GONE) { log.warn("Resource with id {} wasn't found", task.getId()); } else if (e.getStatusCode() == HttpServletResponse.SC_CONFLICT) { log.warn("Resource with id {} is broken and need to be recreated", task.getId()); } else { log.error("HttpResponseException.getStatusCode(): {}", e.getStatusCode()); log.error("HttpResponseException", e); } } catch (SocketTimeoutException e) { log.error("SocketTimeoutException", e); } catch (IOException e) { log.error("IOException", e); } } }
From source file:org.apache.hadoop.mapred.TaskLogServlet.java
private void printTaskLog(HttpServletResponse response, OutputStream out, TaskAttemptID taskId, long start, long end, boolean plainText, TaskLog.LogName filter, boolean isCleanup) throws IOException { if (!plainText) { out.write(("<br><b><u>" + filter + " logs</u></b><br>\n" + "<pre>\n").getBytes()); }/*ww w. j a v a 2 s.co m*/ try { InputStream taskLogReader = new TaskLog.Reader(taskId, filter, start, end, isCleanup); byte[] b = new byte[65536]; int result; while (true) { result = taskLogReader.read(b); if (result > 0) { if (plainText) { out.write(b, 0, result); } else { HtmlQuoting.quoteHtmlChars(out, b, 0, result); } } else { break; } } taskLogReader.close(); if (!plainText) { out.write("</pre></td></tr></table><hr><br>\n".getBytes()); } } catch (IOException ioe) { if (filter == TaskLog.LogName.DEBUGOUT) { if (!plainText) { out.write("</pre><hr><br>\n".getBytes()); } // do nothing } else { String msg = "Failed to retrieve " + filter + " log for task: " + taskId; LOG.warn(msg, ioe); response.sendError(HttpServletResponse.SC_GONE, msg); } } }
From source file:byps.http.HActiveMessage.java
public synchronized void cancelMessage() { if (log.isDebugEnabled()) log.debug("cancelMessage(" + messageId); canceled = true;// w w w .j a v a2 s.c om // Threads might wait in getIncomingStream() this.notifyAll(); Thread thread = workerThread; if (log.isDebugEnabled()) log.debug("worker is still running: " + (thread != null)); if (thread != null) { if (log.isDebugEnabled()) log.debug("interrupt thread=" + thread); thread.interrupt(); // The worker thread will call RequestContext.complete // when it is finished. } else if (rctxtMessage != null) { if (log.isDebugEnabled()) log.debug("assume long-poll, complete response with HTTP 410"); // Assume Longpoll request because a worker thread would have called // getAndRemoveRequestContext // before it has called removeWorker. // This block is executed, if the session is invalidated. // The response code is SC_GONE in order to stop HServerR on the // client side from // sending a new long-poll. HttpServletResponse resp = (HttpServletResponse) rctxtMessage.getResponse(); resp.setStatus(HttpServletResponse.SC_GONE); rctxtMessage.complete(); rctxtMessage = null; } incomingStreams.clear(); checkFinished(); if (log.isDebugEnabled()) log.debug(")cancelMessage"); }
From source file:org.apache.hadoop.hdfs.server.namenode.GetImageServlet.java
@SuppressWarnings("unchecked") public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { Map<String, String[]> pmap = request.getParameterMap(); try {/*from w w w . j a v a 2 s.c om*/ ServletContext context = getServletContext(); final FSImage nnImage = (FSImage) context.getAttribute("name.system.image"); final TransferFsImage ff = new TransferFsImage(pmap, request, response); final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); if (UserGroupInformation.isSecurityEnabled() && !isValidRequestor(request.getRemoteUser(), conf)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "Only Namenode and Secondary Namenode may access this servlet"); LOG.warn("Received non-NN/SNN request for image or edits from " + request.getRemoteHost()); return; } UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { if (ff.getImage()) { // send fsImage TransferFsImage.getFileServer(response.getOutputStream(), nnImage.getFsImageName()); } else if (ff.getEdit()) { // send edits TransferFsImage.getFileServer(response.getOutputStream(), nnImage.getFsEditName()); } else if (ff.putImage()) { // issue a HTTP get request to download the new fsimage nnImage.validateCheckpointUpload(ff.getToken()); reloginIfNecessary().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { TransferFsImage.getFileClient(ff.getInfoServer(), "getimage=1", nnImage.getFsImageNameCheckpoint()); return null; } }); nnImage.checkpointUploadDone(); } return null; } // We may have lost our ticket since the last time we tried to open // an http connection, so log in just in case. private UserGroupInformation reloginIfNecessary() throws IOException { // This method is only called on the NN, therefore it is safe to // use these key values. return UserGroupInformation.loginUserFromKeytabAndReturnUGI( SecurityUtil.getServerPrincipal(conf.get(DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode.getAddress(conf).getHostName()), conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); } }); } catch (Exception ie) { String errMsg = "GetImage failed. " + StringUtils.stringifyException(ie); response.sendError(HttpServletResponse.SC_GONE, errMsg); throw new IOException(errMsg); } finally { response.getOutputStream().close(); } }
From source file:com.streamsets.pipeline.stage.origin.sdcipctokafka.IpcToKafkaServlet.java
@Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { String requestor = req.getRemoteAddr() + ":" + req.getRemotePort(); if (shuttingDown) { LOG.debug("Shutting down, discarding incoming request from '{}'", requestor); resp.setStatus(HttpServletResponse.SC_GONE); } else {//from w ww . ja v a 2 s .co m String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER); String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER); String contentType = req.getContentType(); String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER); if (!Constants.APPLICATION_BINARY.equals(contentType)) { invalidRequestMeter.mark(); resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format( "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY)); } else if (!"true".equals(json1Fragmentable)) { invalidRequestMeter.mark(); resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format( "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded")); } else if (!configs.appId.equals(appId)) { invalidRequestMeter.mark(); LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId); resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'"); } else { long start = System.currentTimeMillis(); LOG.debug("Request accepted from '{}'", requestor); try (InputStream in = req.getInputStream()) { InputStream is = in; boolean processRequest = true; if (compression != null) { switch (compression) { case Constants.SNAPPY_COMPRESSION: is = new SnappyFramedInputStream(is, true); break; default: invalidRequestMeter.mark(); LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression, requestor); resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, "Unsupported compression: " + compression); processRequest = false; } } if (processRequest) { LOG.debug("Processing request from '{}'", requestor); List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize); LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size()); long kStart = System.currentTimeMillis(); SdcKafkaProducer producer = getKafkaProducer(); long kafkaTime = System.currentTimeMillis() - kStart; try { for (byte[] message : messages) { // we are using round robing partition strategy, partition key is ignored kStart = System.currentTimeMillis(); producer.enqueueMessage(configs.topic, message, ""); kafkaTime += System.currentTimeMillis() - kStart; } kStart = System.currentTimeMillis(); producer.write(); kafkaTime += System.currentTimeMillis() - kStart; resp.setStatus(HttpServletResponse.SC_OK); requestMeter.mark(); } catch (StageException ex) { LOG.warn("Kakfa producer error: {}", ex.toString(), ex); errorQueue.offer(ex); errorRequestMeter.mark(); LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex); resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString()); } finally { kStart = System.currentTimeMillis(); releaseKafkaProducer(producer); kafkaTime += System.currentTimeMillis() - kStart; } kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS); kafkaMessagesMeter.mark(messages.size()); } } catch (Exception ex) { errorRequestMeter.mark(); LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex); resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString()); } finally { requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } } } }
From source file:cf.spring.servicebroker.ServiceBrokerHandler.java
@Override public void handleRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { if (!authenticator.authenticate(request, response)) { return;/*from w w w. j a va2s .c o m*/ } ApiVersionValidator.validateApiVersion(request); try { response.setContentType(Constants.JSON_CONTENT_TYPE); final Matcher matcher = URI_PATTERN.matcher(request.getRequestURI()); if (!matcher.matches()) { throw new NotFoundException("Resource not found"); } final String instanceId = matcher.group(1); final String bindingId = matcher.group(3); if ("put".equalsIgnoreCase(request.getMethod())) { if (bindingId == null) { final ProvisionBody provisionBody = mapper.readValue(request.getInputStream(), ProvisionBody.class); final String serviceId = provisionBody.getServiceId(); final BrokerServiceAccessor accessor = getServiceAccessor(serviceId); final ProvisionRequest provisionRequest = new ProvisionRequest(UUID.fromString(instanceId), provisionBody.getPlanId(), provisionBody.getOrganizationGuid(), provisionBody.getSpaceGuid()); final ProvisionResponse provisionResponse = accessor.provision(provisionRequest); if (provisionResponse.isCreated()) { response.setStatus(HttpServletResponse.SC_CREATED); } mapper.writeValue(response.getOutputStream(), provisionResponse); } else { final BindBody bindBody = mapper.readValue(request.getInputStream(), BindBody.class); final String serviceId = bindBody.getServiceId(); final BrokerServiceAccessor accessor = getServiceAccessor(serviceId); final BindRequest bindRequest = new BindRequest(UUID.fromString(instanceId), UUID.fromString(bindingId), bindBody.applicationGuid, bindBody.getPlanId()); final BindResponse bindResponse = accessor.bind(bindRequest); if (bindResponse.isCreated()) { response.setStatus(HttpServletResponse.SC_CREATED); } mapper.writeValue(response.getOutputStream(), bindResponse); } } else if ("delete".equalsIgnoreCase(request.getMethod())) { final String serviceId = request.getParameter(SERVICE_ID_PARAM); final String planId = request.getParameter(PLAN_ID_PARAM); final BrokerServiceAccessor accessor = getServiceAccessor(serviceId); try { if (bindingId == null) { // Deprovision final DeprovisionRequest deprovisionRequest = new DeprovisionRequest( UUID.fromString(instanceId), planId); accessor.deprovision(deprovisionRequest); } else { // Unbind final UnbindRequest unbindRequest = new UnbindRequest(UUID.fromString(bindingId), UUID.fromString(instanceId), planId); accessor.unbind(unbindRequest); } } catch (MissingResourceException e) { response.setStatus(HttpServletResponse.SC_GONE); } response.getWriter().write("{}"); } else { response.setStatus(HttpServletResponse.SC_METHOD_NOT_ALLOWED); } } catch (ConflictException e) { response.setStatus(HttpServletResponse.SC_CONFLICT); response.getWriter().write("{}"); } catch (ServiceBrokerException e) { LOGGER.warn("An error occurred processing a service broker request", e); response.setStatus(e.getHttpResponseCode()); mapper.writeValue(response.getOutputStream(), new ErrorBody(e.getMessage())); } catch (Throwable e) { LOGGER.error(e.getMessage(), e); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); mapper.writeValue(response.getOutputStream(), new ErrorBody(e.getMessage())); } }