List of usage examples for java.io BufferedReader ready
public boolean ready() throws IOException
From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java
private List<SourceRecord> pollFromStream() throws InterruptedException { // Unfortunately we can't just use readLine() because it blocks // in an uninterruptible way. Instead we have to manage // splitting lines ourselves, using simple backoff when // no new data is available. try {/*w ww . j a va 2 s . c o m*/ final BufferedReader readerCopy; synchronized (this) { readerCopy = reader; } if (readerCopy == null) return null; ArrayList<SourceRecord> records = null; int nread = 0; while (readerCopy.ready()) { nread = readerCopy.read(buffer, offset, buffer.length - offset); log.trace("Read {} bytes from {}", nread, logFilename()); if (nread > 0) { offset += nread; if (offset == buffer.length) { char[] newbuf = new char[buffer.length * 2]; System.arraycopy(buffer, 0, newbuf, 0, buffer.length); buffer = newbuf; } String line; do { line = extractLine(); if (line != null) { log.trace("Read a line from {}", logFilename()); if (records == null) records = new ArrayList<>(); records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, Schema.STRING_SCHEMA, line)); } } while (line != null); } } if (nread <= 0) synchronized (this) { this.wait(1000); } return records; } catch (IOException e) { // Underlying stream was killed, probably as a result // of calling stop. Allow to return null, and driving // thread will handle any shutdown if necessary. } return null; }
From source file:com.datafibers.kafka.connect.FileGenericSourceTask.java
@Override public List<SourceRecord> poll() throws InterruptedException { if (!inProgressPaths.isEmpty()) { try {/*from w w w .j a v a2 s .c om*/ Path currentPath = inProgressPaths.remove(0); processedPaths.add(currentPath); filename = currentPath.getFileName().toString(); fileInProcessing = FileUtils.getFile(currentPath.toString() + FILENAME_EXT_PROCESSING); fileProcessed = FileUtils.getFile(currentPath.toString() + FILENAME_EXT_PROCESSED); FileUtils.moveFile(FileUtils.getFile(currentPath.toString()), fileInProcessing); stream = new FileInputStream(fileInProcessing); Map<String, Object> offset = context.offsetStorageReader() .offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null && !overwrite) { log.info("Found previous offset, will not process {}", filename); return null; } else streamOffset = 0L; reader = new BufferedReader(new InputStreamReader(stream)); log.info("Opened {} for reading", filename); } catch (IOException e) { throw new ConnectException(String.format("Unable to open file %", filename), e); } } else { log.warn("********* Waiting for file that meets the glob criteria! *********"); synchronized (this) { this.wait(interval); findMatch(); } return null; } ArrayList<SourceRecord> records = new ArrayList<SourceRecord>(); //StringBuilder fileContent = new StringBuilder(); try { final BufferedReader readerCopy; synchronized (this) { readerCopy = reader; } if (readerCopy == null) return null; int nread = 0; while (readerCopy.ready()) { nread = readerCopy.read(buffer, offset, buffer.length - offset); log.trace("Read {} bytes from {}", nread, filename); if (nread > 0) { offset += nread; if (offset == buffer.length) { char[] newbuf = new char[buffer.length * 2]; System.arraycopy(buffer, 0, newbuf, 0, buffer.length); buffer = newbuf; } String line; do { line = extractLine(); if (line != null) { line = line.trim(); log.trace("Read a line from {}", filename); if (records == null) records = new ArrayList<>(); /* records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, dataSchema, structDecodingRoute(line, filename)));*/ if (schemaValidate) { records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, dataSchema, structDecodingRoute(line, filename))); } else { log.info("STRING SCHEMA Processing"); records.add(new SourceRecord(offsetKey(filename), offsetValue(streamOffset), topic, Schema.STRING_SCHEMA, line)); } } new ArrayList<SourceRecord>(); } while (line != null); } } // Finish processing and rename as processed. FileUtils.moveFile(fileInProcessing, fileProcessed); if (nread <= 0) synchronized (this) { this.wait(1000); } return records; } catch (IOException e) { throw new ConnectException(String.format("Unable to read file %", filename), e); } }
From source file:de.huberlin.wbi.hiway.common.Client.java
/** * copy and replace input ports// w w w . j av a 2s.c o m */ private Path preProcessGalaxyWorkflow(Path wfSource, Path wfTemp) throws IOException { List<String> lines = new ArrayList<>(); try (BufferedReader reader = new BufferedReader( new FileReader(wfTemp == null ? wfSource.toString() : wfTemp.toString()))) { String line; while ((line = reader.readLine()) != null) { if (line.contains("\"name\": \"Input dataset\"")) { String inputLine = lines.get(lines.size() - 3); String portName = inputLine.substring(inputLine.indexOf("\"name\": \"") + 9, inputLine.lastIndexOf("\"")); System.out.println("Enter file location in HDFS for Galaxy workflow input port \"" + portName + "\". Press return or wait 30 seconds to use default value \"" + portName + "\"."); BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); long startTime = System.currentTimeMillis(); // wait 30s while ((System.currentTimeMillis() - startTime) < 30 * 1000 && !in.ready()) { } if (in.ready()) { String newPortName = in.readLine(); if (newPortName.length() > 0) { inputLine = inputLine.replace(portName, newPortName); lines.set(lines.size() - 3, inputLine); } } } lines.add(line); } } wfTemp = new Path("./." + wfSource.getName()); try (BufferedWriter writer = new BufferedWriter(new FileWriter(wfTemp.toString()))) { for (String line : lines) { writer.write(line); writer.newLine(); } } return wfTemp; }
From source file:xc.mst.manager.processingDirective.DefaultServicesService.java
/** * Given a BufferedReader for a File, skip to the next line in the file * which is neither a comment nor whitespace * //from ww w. j a v a 2s. c om * @param in * The BufferedReader for the file * @return The first non-comment non-whitespace line in the file, or null if we reached the end of the file * @throws IOException * If an error occurred while reading the file */ private String consumeCommentsAndWhitespace(BufferedReader in) throws IOException { while (in.ready()) { String line = in.readLine(); // A line from the configuration file // If the line is a valid line, return it if (!line.startsWith("#") && line.trim().length() > 0) return line.trim(); } // If we got here we reached the end of the file, so return null return null; }
From source file:distribuidos.MyThread.java
private void aumentarFichero(int cantidad) { try {/*from w w w . j ava 2 s . co m*/ FileInputStream fis = new FileInputStream(this.fichero); BufferedReader br = new BufferedReader(new InputStreamReader(fis)); String linea; int numero = 0; if (br.ready()) { linea = br.readLine(); numero = Integer.parseInt(linea); } br.close(); fis.close(); numero += cantidad; FileOutputStream fos = new FileOutputStream(this.fichero, false); BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fos)); bw.write("" + numero); bw.close(); fos.close(); } catch (IOException ex) { System.out.println("Error de fichero"); } }
From source file:edu.du.penrose.systems.fedoraApp.util.MetsBatchFileSplitter.java
/** * Split the batchIngest command file. If the batch contains additions put the results into the 'mets/new' directory. If updates put the * files into the 'mets/upates' directory. The ingest command (only one per batchfile!!) is saved in a comment prior to the <mets:mets> * element for each split file. This means the batch can only contain files of one type ie adds or updates. * <br><br>/* w w w.j a v a 2 s.c om*/ * If an error occurs we will try to remove any generated output file and then throw an exception. * <br> * One the ingest command line is found ie "<ingestControl command="A" type="pidInOBJID" />" ALL other command line are ignored!! AFter that * point we only look for <mets:mets> and </mets:mets> to split the file. * * NOTE Since this method may need to get fedora pids The following libraries are needed... * wsdl4j-1.5.1.jar * commons-discovery.jar * fcrepo-server-3.4-utilities-main.jar * jaxrpc.jar * logback-core-0.9.18.jar * logback-classic-0.9.18.jar * trippi-1.1.2-core.jar * fcrepo-common-3.4.jar * fcrepo-client-admin-3.4.jar * jdom.jar * * @param ingestOptions set batchIngestDate, batchDescription, * @param threadStatus can be null. * @param inFile batch file to split * @param metsNewDirectory * @param metsUpdatesDirectory * @param nameFileFromOBJID create xml file's that are named the same as it's OBJID element. This seems like a good idea but if the file * already exists you will get a error, killing the entire ingest. * @param fedoraUser used for a replyWithPid ingest. If null we will pull from the batchIngest.properties file. * @param fedoraPassword used for a replyWithPid ingest. If null we will pull from the batchIngest.properties file. * * @return IF the batch file is an add of type 'replyWithPid' return a map of OBJID and PIDs otherwise return null. NOTE: if the <mets:mets OBJID> element is * empty in the batch file, both the key and the value of the returned map will contain the pid. * * @throws Exception */ static public Map<String, String> splitMetsBatchFile_version_2(BatchIngestOptions ingestOptions, ThreadStatusMsg threadStatus, File inFile, String metsNewDirectory, String metsUpdatesDirectory, boolean nameFileFromOBJID, String fedoraHost, String fedoraPort, String fedoraUser, String fedoraPassword) throws Exception { Map<String, String> pidMap = null; FileInputStream batchFileInputStream; try { batchFileInputStream = new FileInputStream(inFile); } catch (FileNotFoundException e) { throw new FatalException(e.getMessage()); } DataInputStream batchFileDataInputStream = new DataInputStream(batchFileInputStream); BufferedReader batchFileBufferedReader = new BufferedReader( new InputStreamReader(batchFileDataInputStream)); String oneLine = null; String ingestControlLine = null; int fileCount = 0; String documentType = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"; String batchCreationDate = null; String batchDescription = null; String metsDirectory = null; // will get set to either the new directory or the updates directory. File outFile = null; FileOutputStream metsFileOutputStream = null; BufferedWriter metsBufferedWriter = null; boolean headerFoundLookOnlyForMetsNow = false; while (batchFileBufferedReader.ready()) { oneLine = batchFileBufferedReader.readLine(); if (!headerFoundLookOnlyForMetsNow) { if (oneLine.contains("<?xml version") && oneLine.trim().startsWith("<")) { documentType = oneLine; } // LOOK FOR BATCH DESCRIPTION if (oneLine.contains(BATCH_DESCRIPTION_ELEMENT_MARKER) && oneLine.indexOf("<!") == -1) { int tempLocation1 = oneLine.indexOf("batchCreationDate=" + QUOTE); if (tempLocation1 == -1) { oneLine.indexOf("batchCreationDate=" + APOST); } int tempLocation2 = oneLine.indexOf(QUOTE, tempLocation1 + 19); if (tempLocation2 == -1) { oneLine.indexOf(APOST, tempLocation1 + 19); } batchCreationDate = oneLine.substring(tempLocation1 + 19, tempLocation2); ingestOptions.setBatchIngestDate(batchCreationDate); oneLine = batchFileBufferedReader.readLine(); if (!oneLine.contains("<literal>")) { throw new FatalException("Invalid batchDescription"); } StringBuffer tempBatchDescription = new StringBuffer(); boolean endBatchDescription = false; do { tempBatchDescription.append(oneLine); if (oneLine.contains("</literal>")) { endBatchDescription = true; batchDescription = tempBatchDescription.toString(); batchDescription = batchDescription.replace("<literal>", ""); // it may all be on one line. batchDescription = batchDescription.replace("</literal>", ""); } oneLine = batchFileBufferedReader.readLine(); } while (!endBatchDescription); ingestOptions.setBatchDescription(batchDescription.trim()); } // look for batch command at the top of the file, prior to the first <mets:mets> if (oneLine.contains((INGEST_CONTROL_ELEMENT_MARKER)) && oneLine.indexOf("<!") == -1) { if ((!oneLine.contains("command")) || (!oneLine.contains("type"))) { throw new FatalException( "The batch control element must have both command and type attributes"); } ingestControlLine = oneLine.trim(); boolean validCommandLine = parseCommandLine(ingestOptions, ingestControlLine); if (!validCommandLine) { throw new Exception("ERROR: Invalid command line found in batch ingest file:" + inFile + " , " + ingestControlLine); } headerFoundLookOnlyForMetsNow = true; switch (ingestOptions.getIngestCommand()) { case UPDATE: metsDirectory = metsUpdatesDirectory; break; case ADD: metsDirectory = metsNewDirectory; break; default: throw new Exception("ERROR: Invalid ingest command"); } } // if line is ingestControl (command line) } else // if-else headerFoundLookOnlyForMetsNow { if (oneLine.contains((INGEST_CONTROL_ELEMENT_MARKER)) && oneLine.indexOf("<!") == -1) { logger.warn("More than one ingest control line found in batch file! extras will be ignored:" + inFile); } // look for <mets:mets> and get complete <mets:mets> element if (oneLine.contains("<mets:mets") && oneLine.indexOf("<!") == -1) { boolean haveEntireMetsLine = false; while (!haveEntireMetsLine) { StringBuffer tempBuffer = new StringBuffer(oneLine); String moreOfMetsLine = null; if (!oneLine.contains(">")) { moreOfMetsLine = batchFileBufferedReader.readLine(); tempBuffer.append(moreOfMetsLine); if (moreOfMetsLine.contains(">")) { haveEntireMetsLine = true; oneLine = tempBuffer.toString(); } else { oneLine = tempBuffer.toString(); } } else { haveEntireMetsLine = true; } } // process everything up to </mets:mets> String objID = MetsBatchFileSplitter.getObjID(oneLine); if (nameFileFromOBJID) { outFile = new File(metsDirectory + objID + ".xml"); logger.info("outputSplitFile METS file: " + metsDirectory + objID + ".xml"); if (outFile.exists()) { String errorMsg = "file already exists:" + outFile.getName(); System.out.println(errorMsg); logger.error(errorMsg); throw new FatalException(errorMsg); } } else { switch (ingestOptions.getIngestThreadType()) { case BACKGROUND: // TBD this is probably an error case MANUAL: outFile = new File(metsDirectory + edu.du.penrose.systems.util.FileUtil.getDateTimeMilliSecondEnsureUnique() + ".xml"); break; case REMOTE: outFile = new File(metsDirectory + edu.du.penrose.systems.util.FileUtil.getDateTimeMilliSecondEnsureUnique() + FedoraAppConstants.REMOTE_TASK_NAME_SUFFIX + ".xml"); break; } } // oneLine now contains the entire <mets:mets....> line logger.info("outputSplitFile METS file: " + outFile.toString() + "\n\n"); boolean errorOccurred = false; try { metsFileOutputStream = new FileOutputStream(outFile); metsBufferedWriter = new BufferedWriter( new OutputStreamWriter(metsFileOutputStream, "UTF-8")); metsBufferedWriter.write(documentType); metsBufferedWriter.newLine(); switch (ingestOptions.getIngestCommand()) { case ADD: switch (ingestOptions.getAddCommandType()) { case REPLY_WITH_PID: // we get one pid at a time, write it to the <mets:mets> line OBJID value and add it to the pidMap String[] tempPids = null; if (fedoraPassword == null || fedoraUser == null || fedoraHost == null || fedoraPort == null) { tempPids = FedoraAppUtil.getPIDs(ingestOptions.getInstitution(), new NonNegativeInteger("1")); } else { tempPids = FedoraAppUtil.getPIDs(fedoraHost, Integer.valueOf(fedoraPort), fedoraUser, fedoraPassword, ingestOptions.getInstitution(), new NonNegativeInteger("1")); } String reservedPid = tempPids[0]; metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); if (pidMap == null) { pidMap = new LinkedHashMap<String, String>(); } oneLine = putPidInMetsLineOBJID(oneLine, reservedPid); if (objID.contentEquals("")) { pidMap.put(reservedPid, reservedPid); } else { pidMap.put(objID, reservedPid); } break; case PID_IN_OBJID: case NORMAL: default: metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); } break; case UPDATE: metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); break; case NOT_SET: default: throw new Exception("ERROR: Invalid ingest command"); } // read lines from batch file and write to the new mets file until </mets:mets> line while (!oneLine.contains("</mets:mets")) { // null pointer on premature end of file. metsBufferedWriter.write(oneLine); metsBufferedWriter.newLine(); oneLine = batchFileBufferedReader.readLine(); if (oneLine == null) { throw new FatalException("Error spliting batch file, missing </mets:mets>"); } } metsBufferedWriter.write(oneLine); metsBufferedWriter.newLine(); metsBufferedWriter.close(); } catch (Exception e) { errorOccurred = true; // for cleanup, see below. throw new Exception(e); } finally { metsBufferedWriter.close(); if (errorOccurred) { outFile.delete(); //clean up. } } fileCount++; if (threadStatus != null) { threadStatus.setStatus("Spliting XML file #: " + fileCount); } } // if <mets:mets> found (look for another mets section now). } // if-else ! headerFoundLookOnlyForMetsNow } // while return pidMap; // may be null }
From source file:be.docarch.odt2braille.PEF.java
/** * maxPages: -1 = infinity/*from w ww.jav a 2 s.c om*/ */ private int addPagesToSection(Document document, Element sectionElement, File brailleFile, int maxRows, int maxCols, int maxPages) throws IOException, Exception { int pageCount = 0; FileInputStream fileInputStream = new FileInputStream(brailleFile); InputStreamReader inputStreamReader = new InputStreamReader(fileInputStream, "UTF-8"); BufferedReader bufferedReader = new BufferedReader(inputStreamReader); Element pageElement; Element rowElement; Node textNode; String line; boolean nextPage = bufferedReader.ready() && (maxPages > pageCount || maxPages == -1); try { while (nextPage) { pageElement = document.createElementNS(pefNS, "page"); for (int i = 0; i < maxRows; i++) { line = bufferedReader.readLine(); if (line == null) { throw new Exception("number of rows < " + maxRows); } line = line.replaceAll("\u2800", "\u0020").replaceAll("\u00A0", "\u0020") .replaceAll("\uE00F", "\u002D").replaceAll("\uE000", "\u0020"); if (line.length() > maxCols) { throw new Exception("line length > " + maxCols); } rowElement = document.createElementNS(pefNS, "row"); textNode = document.createTextNode(liblouisTable.toBraille(line)); rowElement.appendChild(textNode); pageElement.appendChild(rowElement); if (IS_WINDOWS) { bufferedReader.readLine(); } } sectionElement.appendChild(pageElement); pageCount++; if (bufferedReader.read() != '\f') { throw new Exception("unexpected character, should be form feed"); } nextPage = nextPage = bufferedReader.ready() && (maxPages > pageCount || maxPages == -1); } } finally { if (bufferedReader != null) { bufferedReader.close(); inputStreamReader.close(); fileInputStream.close(); } } return pageCount; }
From source file:petascope.wcps.server.test.FullTestsOnline.java
/** * Send an request to the WCPS server./*from www .j ava 2s . co m*/ * Returns a message on error and null otherwise. */ public String runOneTest(String param, String query, String method) throws MalformedURLException, IOException { // System.out.println("--------------------"); // System.out.println(query); // System.out.println("\t--------------------"); // connect to the servlet URL servlet = new URL(PetascopeURL); HttpURLConnection conn = (HttpURLConnection) servlet.openConnection(); // inform the connection that we will send output and accept input conn.setDoInput(true); conn.setDoOutput(true); // Don't use a cached version of URL connection. conn.setUseCaches(false); conn.setDefaultUseCaches(false); // Default method is GET if (method.equals("POST")) { conn.setRequestMethod(method); } // For POST: We send binary data if (method.equals("POST")) { // conn.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); // query = URLEncoder.encode(query, "UTF-8"); } if (param.equals("") == false) { param += "="; } String data = param + query; DataOutputStream out = new DataOutputStream(conn.getOutputStream()); out.writeBytes(data); out.flush(); out.close(); BufferedReader cgiOutput = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line1 = cgiOutput.readLine(); String line2 = cgiOutput.readLine(); String line3 = cgiOutput.readLine(); System.out.println("\t" + line1); System.out.println("\t" + line2); System.out.println("\t" + line3); if ((line1 != null) && (line2 != null) && (line3 != null)) { StringBuffer output = new StringBuffer(line1 + line2 + line3); while (cgiOutput.ready()) { output.append(cgiOutput.readLine()); } // Try to parse error message if (line1.startsWith("<")) { DocumentBuilder builder = null; try { DocumentBuilderFactory domFactory = DocumentBuilderFactory.newInstance(); domFactory.setNamespaceAware(true); // never forget this! builder = domFactory.newDocumentBuilder(); Document doc = builder.parse(IOUtils.toInputStream(output.toString())); Element rootElem = doc.getDocumentElement(); if (rootElem.getNodeName().endsWith("ExceptionReport")) { return output.toString(); } } catch (Exception e) { } } } return null; }
From source file:fr.eolya.extraction.tika.TikaWrapper.java
private void processWithPdfToText(InputStream input) { File tempFile = null;/* w w w. j a v a 2 s. co m*/ File tempFile2 = null; try { if (input != null && pdfToTextPath != null && !"".equals(pdfToTextPath)) { // Get a local copy of the file tempFile = createTempFile("tmp", ".pdf", tmpPath); if (!writeToFile(tempFile, input)) return; meta2 = new HashMap<String, String>(); meta2.put(META_CONTENTSIZE, String.valueOf(tempFile.length())); tempFile2 = createTempFile("tmp", ".html", tmpPath); Shell sh = new Shell(); // Convert with PDFTOTEXT - pdftotext -enc UTF-8 -raw -q -htmlmeta -eol unix in.pdf out.html sh.exec(pdfToTextPath, "-enc", "UTF-8", "-raw", "-q", "-htmlmeta", "-eol", "unix", tempFile.getAbsolutePath(), tempFile2.getAbsolutePath()).consumeAsString(); tempFile.delete(); // Load in string and add the <meta http-equiv='Content-Type' content='text/html; charset=utf-8'> line InputStreamReader fr1 = new InputStreamReader(new FileInputStream(tempFile2), "UTF-8"); BufferedReader br1 = new BufferedReader(fr1); StringBuilder sb = new StringBuilder(); while (br1.ready()) { String line = br1.readLine(); sb.append(line).append("\n"); if ("</head>".equals(line)) { sb.append("<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>") .append("\n"); } } br1.close(); tempFile2.delete(); meta2.put(META_CONTENTTYPE, CONTENT_TYPE_PDF); text = sb.toString(); Document doc = Jsoup.parse(text); if (doc != null) { meta2.put(META_TITLE, doc.select("title").text()); meta2.put(META_AUTHOR, getMetaContent(doc, "Author")); String creationDate = getMetaContent(doc, "CreationDate"); if (creationDate != null) { // 20130322143113Z00'00' -> 2013-03-22T14:31:13Z Pattern p = Pattern.compile("[0-9]{14}Z[0-9]{2}'[0-9]{2}'"); Matcher m = p.matcher(creationDate); if (m.find()) { String value = String.format("%1$s-%2$s-%3$sT%4$s:%5$s:%6$sZ", creationDate.substring(0, 4), creationDate.substring(4, 6), creationDate.substring(6, 8), creationDate.substring(8, 10), creationDate.substring(10, 12), creationDate.substring(12, 14)); meta2.put(META_CREATED, value); } else { // 20130322143113+02'00' -> 2013-03-22T14:31:13Z p = Pattern.compile("[0-9]{14}\\+[0-9]{2}'[0-9]{2}'"); m = p.matcher(creationDate); if (m.find()) { String value = String.format("%1$s-%2$s-%3$sT%4$s:%5$s:%6$sZ", creationDate.substring(0, 4), creationDate.substring(4, 6), creationDate.substring(6, 8), creationDate.substring(8, 10), creationDate.substring(10, 12), creationDate.substring(12, 14)); meta2.put(META_CREATED, value); } } } if (OUTPUT_FORMAT_TEXT.equals(outputFormat)) { Document doc2 = new Cleaner(Whitelist.basic()).clean(doc); text = doc2.body().text(); } } } } catch (Exception e) { if (tempFile != null && tempFile.exists()) tempFile.delete(); if (tempFile2 != null && tempFile2.exists()) tempFile2.delete(); e.printStackTrace(); text = null; meta2 = null; } }
From source file:fm.last.android.player.StreamProxy.java
private HttpRequest readRequest(Socket client) { HttpRequest request = null;// www. j av a 2 s . c o m InputStream is; String firstLine; String range = null; String ua = null; try { is = client.getInputStream(); BufferedReader reader = new BufferedReader(new InputStreamReader(is), 8192); firstLine = reader.readLine(); String line = null; do { line = reader.readLine(); if (line != null && line.toLowerCase().startsWith("range: ")) { range = line.substring(7); } if (line != null && line.toLowerCase().startsWith("user-agent: ")) { ua = line.substring(12); } } while (line != null && !"".equals(line) && reader.ready()); } catch (IOException e) { Log.e(LOG_TAG, "Error parsing request", e); return request; } if (firstLine == null) { Log.i(LOG_TAG, "Proxy client closed connection without a request."); return request; } StringTokenizer st = new StringTokenizer(firstLine); String method = st.nextToken(); String uri = st.nextToken(); Log.d(LOG_TAG, uri); String realUri = uri.substring(1); Log.d(LOG_TAG, realUri); request = new BasicHttpRequest(method, realUri, new ProtocolVersion("HTTP", 1, 1)); if (range != null) request.addHeader("Range", range); if (ua != null) request.addHeader("User-Agent", ua); return request; }