List of usage examples for java.io DataOutputStream writeBytes
public final void writeBytes(String s) throws IOException
From source file:com.jaspersoft.studio.statistics.UsageManager.java
/** * Send the statistics to the defined server. They are read from the properties filed and converted into a JSON * string. Then this string is sent to the server as a post parameter named data *//*from w w w .j a va 2s .c om*/ protected void sendStatistics() { BufferedReader responseReader = null; DataOutputStream postWriter = null; try { if (!STATISTICS_SERVER_URL.trim().isEmpty()) { URL obj = new URL(STATISTICS_SERVER_URL); HttpURLConnection con = (HttpURLConnection) obj.openConnection(); // add request header con.setRequestMethod("POST"); //$NON-NLS-1$ con.setRequestProperty("User-Agent", "Mozilla/5.0"); //$NON-NLS-1$ //$NON-NLS-2$ con.setRequestProperty("Accept-Language", "en-US,en;q=0.5"); //$NON-NLS-1$ //$NON-NLS-2$ // Read and convert the statistics into a JSON string UsagesContainer container = new UsagesContainer(getAppDataFolder().getName()); boolean fileChanged = false; synchronized (UsageManager.this) { Properties prop = getStatisticsContainer(); for (Object key : new ArrayList<Object>(prop.keySet())) { try { String[] id_category = key.toString().split(Pattern.quote(ID_CATEGORY_SEPARATOR)); String value = prop.getProperty(key.toString(), "0"); int usageNumber = Integer.parseInt(value); //$NON-NLS-1$ String version = getVersion(); //Check if the id contains the version if (id_category.length == 3) { version = id_category[2]; } else { //Old structure, remove the old entry and insert the new fixed one //this is a really limit case and should almost never happen prop.remove(key); String fixed_key = id_category[0] + ID_CATEGORY_SEPARATOR + id_category[1] + ID_CATEGORY_SEPARATOR + version; prop.setProperty(fixed_key, value); fileChanged = true; } container.addStat( new UsageStatistic(id_category[0], id_category[1], version, usageNumber)); } catch (Exception ex) { //if a key is invalid remove it ex.printStackTrace(); prop.remove(key); fileChanged = true; } } } if (fileChanged) { //The statistics file was changed, maybe a fix or an invalid property removed //write it corrected on the disk writeStatsToDisk.cancel(); writeStatsToDisk.setPriority(Job.SHORT); writeStatsToDisk.schedule(MINIMUM_WAIT_TIME); } ObjectMapper mapper = new ObjectMapper(); String serializedData = mapper.writeValueAsString(container); // Send post request with the JSON string as the data parameter String urlParameters = "data=" + serializedData; //$NON-NLS-1$ con.setDoOutput(true); postWriter = new DataOutputStream(con.getOutputStream()); postWriter.writeBytes(urlParameters); postWriter.flush(); int responseCode = con.getResponseCode(); responseReader = new BufferedReader(new InputStreamReader(con.getInputStream())); String inputLine; StringBuffer response = new StringBuffer(); while ((inputLine = responseReader.readLine()) != null) { response.append(inputLine); } // Update the upload time if (responseCode == 200 && ModelUtils.safeEquals(response.toString(), "ok")) { setInstallationInfo(TIMESTAMP_INFO, String.valueOf(getCurrentTime())); } else { //print result System.out.println("Response error: " + response.toString()); } } } catch (Exception ex) { ex.printStackTrace(); JaspersoftStudioPlugin.getInstance().logError(Messages.UsageManager_errorStatUpload, ex); } finally { FileUtils.closeStream(postWriter); FileUtils.closeStream(responseReader); } }
From source file:ffx.crystal.CCP4MapWriter.java
/** * write data to file, does not normalize * * @param data map data to write out/* www . j av a 2 s. c o m*/ * @param norm should the data be normalized by mean/sd? */ public void write(double data[], boolean norm) { ByteOrder b = ByteOrder.nativeOrder(); FileOutputStream fos; DataOutputStream dos; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; double mean = 0.0; double sd = 0.0; int n = 0; for (int k = 0; k < extz; k++) { for (int j = 0; j < exty; j++) { for (int i = 0; i < extx; i++) { int index = stride * (i + extx * (j + exty * k)); // int index = k * (exty * (extx + 2)) + j * (extx + 2) + i; n++; if (data[index] < min) { min = data[index]; } if (data[index] > max) { max = data[index]; } mean += (data[index] - mean) / n; } } } n = 0; for (int k = 0; k < extz; k++) { for (int j = 0; j < exty; j++) { for (int i = 0; i < extx; i++) { int index = stride * (i + extx * (j + exty * k)); // int index = k * (exty * (extx + 2)) + j * (extx + 2) + i; sd += pow(data[index] - mean, 2.0); n++; } } } sd = sqrt(sd / n); if (norm) { for (int k = 0; k < extz; k++) { for (int j = 0; j < exty; j++) { for (int i = 0; i < extx; i++) { int index = stride * (i + extx * (j + exty * k)); data[index] = (data[index] - mean) / sd; } } } // recurse write(data, false); } try { if (logger.isLoggable(Level.INFO)) { StringBuilder sb = new StringBuilder(); sb.append(String.format("\nwriting CCP4 map file: \"%s\"\n", filename)); sb.append(String.format("map min: %g max: %g mean: %g standard dev.: %g", min, max, mean, sd)); logger.info(sb.toString()); } fos = new FileOutputStream(filename); dos = new DataOutputStream(fos); byte bytes[] = new byte[2048]; int offset = 0; int imapdata; float fmapdata; String mapstr; // header ByteBuffer bb = ByteBuffer.wrap(bytes); bb.order(b).putInt(extx); bb.order(b).putInt(exty); bb.order(b).putInt(extz); // mode (2 = reals, only one we accept) bb.order(b).putInt(2); bb.order(b).putInt(orix); bb.order(b).putInt(oriy); bb.order(b).putInt(oriz); bb.order(b).putInt(nx); bb.order(b).putInt(ny); bb.order(b).putInt(nz); bb.order(b).putFloat((float) crystal.a); bb.order(b).putFloat((float) crystal.b); bb.order(b).putFloat((float) crystal.c); bb.order(b).putFloat((float) crystal.alpha); bb.order(b).putFloat((float) crystal.beta); bb.order(b).putFloat((float) crystal.gamma); bb.order(b).putInt(1); bb.order(b).putInt(2); bb.order(b).putInt(3); bb.order(b).putFloat((float) min); bb.order(b).putFloat((float) max); bb.order(b).putFloat((float) mean); bb.order(b).putInt(crystal.spaceGroup.number); // bb.order(b).putInt(1); // symmetry bytes - should set this up at some point // imapdata = swap ? ByteSwap.swap(320) : 320; bb.order(b).putInt(80); bb.order(b).putInt(0); for (int i = 0; i < 12; i++) { bb.order(b).putFloat(0.0f); } for (int i = 0; i < 15; i++) { bb.order(b).putInt(0); } dos.write(bytes, offset, 208); bb.rewind(); mapstr = "MAP "; dos.writeBytes(mapstr); // machine code: double, float, int, uchar // 0x4441 for LE, 0x1111 for BE if (ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN)) { imapdata = 0x4441; } else { imapdata = 0x1111; } bb.order(b).putInt(imapdata); bb.order(b).putFloat((float) sd); bb.order(b).putInt(1); dos.write(bytes, offset, 12); StringBuilder sb = new StringBuilder(); sb.append("map data from ffx"); while (sb.length() < 80) { sb.append(" "); } dos.writeBytes(sb.toString()); sb = new StringBuilder(); while (sb.length() < 80) { sb.append(" "); } for (int i = 0; i < 9; i++) { dos.writeBytes(sb.toString()); } sb = new StringBuilder(); sb.append("x,y,z"); while (sb.length() < 80) { sb.append(" "); } dos.writeBytes(sb.toString()); bb.rewind(); for (int k = 0; k < extz; k++) { for (int j = 0; j < exty; j++) { for (int i = 0; i < extx; i++) { int index = stride * (i + extx * (j + exty * k)); // int index = k * (exty * (extx + 2)) + j * (extx + 2) + i; fmapdata = (float) data[index]; bb.order(b).putFloat(fmapdata); if (!bb.hasRemaining()) { dos.write(bytes); bb.rewind(); } } } } if (bb.position() > 0) { dos.write(bytes); bb.rewind(); } dos.close(); } catch (Exception e) { String message = "Fatal exception evaluating structure factors.\n"; logger.log(Level.SEVERE, message, e); System.exit(-1); } }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
private int showTxns(Hive db, ShowTxnsDesc desc) throws HiveException { // Call the metastore to get the currently queued and running compactions. GetOpenTxnsInfoResponse rsp = db.showTransactions(); // Write the results into the file DataOutputStream os = getOutputStream(desc.getResFile()); try {//from w w w . jav a2 s. co m // Write a header os.writeBytes("Transaction ID"); os.write(separator); os.writeBytes("Transaction State"); os.write(separator); os.writeBytes("Started Time"); os.write(separator); os.writeBytes("Last Heartbeat Time"); os.write(separator); os.writeBytes("User"); os.write(separator); os.writeBytes("Hostname"); os.write(terminator); for (TxnInfo txn : rsp.getOpen_txns()) { os.writeBytes(Long.toString(txn.getId())); os.write(separator); os.writeBytes(txn.getState().toString()); os.write(separator); os.writeBytes(Long.toString(txn.getStartedTime())); os.write(separator); os.writeBytes(Long.toString(txn.getLastHeartbeatTime())); os.write(separator); os.writeBytes(txn.getUser()); os.write(separator); os.writeBytes(txn.getHostname()); os.write(terminator); } } catch (IOException e) { LOG.warn("show transactions: " + stringifyException(e)); return 1; } finally { IOUtils.closeStream(os); } return 0; }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
private int showCompactions(Hive db, ShowCompactionsDesc desc) throws HiveException { // Call the metastore to get the status of all known compactions (completed get purged eventually) ShowCompactResponse rsp = db.showCompactions(); // Write the results into the file final String noVal = " --- "; DataOutputStream os = getOutputStream(desc.getResFile()); try {//from www .j a v a 2 s .c o m // Write a header os.writeBytes("Database"); os.write(separator); os.writeBytes("Table"); os.write(separator); os.writeBytes("Partition"); os.write(separator); os.writeBytes("Type"); os.write(separator); os.writeBytes("State"); os.write(separator); os.writeBytes("Worker"); os.write(separator); os.writeBytes("Start Time"); os.write(separator); os.writeBytes("Duration(ms)"); os.write(separator); os.writeBytes("HadoopJobId"); os.write(terminator); if (rsp.getCompacts() != null) { for (ShowCompactResponseElement e : rsp.getCompacts()) { os.writeBytes(e.getDbname()); os.write(separator); os.writeBytes(e.getTablename()); os.write(separator); String part = e.getPartitionname(); os.writeBytes(part == null ? noVal : part); os.write(separator); os.writeBytes(e.getType().toString()); os.write(separator); os.writeBytes(e.getState()); os.write(separator); String wid = e.getWorkerid(); os.writeBytes(wid == null ? noVal : wid); os.write(separator); os.writeBytes(e.isSetStart() ? Long.toString(e.getStart()) : noVal); os.write(separator); os.writeBytes(e.isSetEndTime() ? Long.toString(e.getEndTime() - e.getStart()) : noVal); os.write(separator); os.writeBytes(e.isSetHadoopJobId() ? e.getHadoopJobId() : noVal); os.write(terminator); } } } catch (IOException e) { LOG.warn("show compactions: " + stringifyException(e)); return 1; } finally { IOUtils.closeStream(os); } return 0; }
From source file:org.apache.hadoop.hive.ql.metadata.formatting.TextMetaDataFormatter.java
private void writeFileSystemStats(DataOutputStream outStream, HiveConf conf, List<Path> locations, Path tblPath, boolean partSpecified, int indent) throws IOException { long totalFileSize = 0; long maxFileSize = 0; long minFileSize = Long.MAX_VALUE; long lastAccessTime = 0; long lastUpdateTime = 0; int numOfFiles = 0; boolean unknown = false; FileSystem fs = tblPath.getFileSystem(conf); // in case all files in locations do not exist try {//from w w w . jav a 2s. co m FileStatus tmpStatus = fs.getFileStatus(tblPath); lastAccessTime = tmpStatus.getAccessTime(); lastUpdateTime = tmpStatus.getModificationTime(); if (partSpecified) { // check whether the part exists or not in fs tmpStatus = fs.getFileStatus(locations.get(0)); } } catch (IOException e) { LOG.warn("Cannot access File System. File System status will be unknown: ", e); unknown = true; } if (!unknown) { for (Path loc : locations) { try { FileStatus status = fs.getFileStatus(tblPath); FileStatus[] files = fs.listStatus(loc); long accessTime = status.getAccessTime(); long updateTime = status.getModificationTime(); // no matter loc is the table location or part location, it must be a // directory. if (!status.isDir()) { continue; } if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } for (FileStatus currentStatus : files) { if (currentStatus.isDir()) { continue; } numOfFiles++; long fileLen = currentStatus.getLen(); totalFileSize += fileLen; if (fileLen > maxFileSize) { maxFileSize = fileLen; } if (fileLen < minFileSize) { minFileSize = fileLen; } accessTime = currentStatus.getAccessTime(); updateTime = currentStatus.getModificationTime(); if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } } } catch (IOException e) { // ignore } } } String unknownString = "unknown"; for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("totalNumberFiles:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + numOfFiles).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("totalFileSize:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + totalFileSize).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("maxFileSize:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + maxFileSize).getBytes("UTF-8")); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("minFileSize:".getBytes("UTF-8")); if (numOfFiles > 0) { outStream.write((unknown ? unknownString : "" + minFileSize).getBytes("UTF-8")); } else { outStream.write((unknown ? unknownString : "" + 0).getBytes("UTF-8")); } outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("lastAccessTime:".getBytes("UTF-8")); outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : "" + lastAccessTime); outStream.write(terminator); for (int k = 0; k < indent; k++) { outStream.write(Utilities.INDENT.getBytes("UTF-8")); } outStream.write("lastUpdateTime:".getBytes("UTF-8")); outStream.write((unknown ? unknownString : "" + lastUpdateTime).getBytes("UTF-8")); outStream.write(terminator); }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
public int showColumns(Hive db, ShowColumnsDesc showCols) throws HiveException { Table table = db.getTable(showCols.getTableName()); // write the results in the file DataOutputStream outStream = getOutputStream(showCols.getResFile()); ;// w w w . j av a 2 s . c om try { List<FieldSchema> cols = table.getCols(); cols.addAll(table.getPartCols()); // In case the query is served by HiveServer2, don't pad it with spaces, // as HiveServer2 output is consumed by JDBC/ODBC clients. boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols, false, isOutputPadded, null)); } catch (IOException e) { throw new HiveException(e, ErrorMsg.GENERIC_ERROR); } finally { IOUtils.closeStream(outStream); } return 0; }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
/** * Write a list of the user defined functions to a file. * @param db//from w w w.ja v a 2 s . co m * * @param showFuncs * are the functions we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int showFunctions(Hive db, ShowFunctionsDesc showFuncs) throws HiveException { // get the tables for the desired patten - populate the output stream Set<String> funcs = null; if (showFuncs.getPattern() != null) { LOG.info("pattern: " + showFuncs.getPattern()); if (showFuncs.getIsLikePattern()) { funcs = FunctionRegistry.getFunctionNamesByLikePattern(showFuncs.getPattern()); } else { console.printInfo("SHOW FUNCTIONS is deprecated, please use SHOW FUNCTIONS LIKE instead."); funcs = FunctionRegistry.getFunctionNames(showFuncs.getPattern()); } LOG.info("results : " + funcs.size()); } else { funcs = FunctionRegistry.getFunctionNames(); } // write the results in the file DataOutputStream outStream = getOutputStream(showFuncs.getResFile()); try { SortedSet<String> sortedFuncs = new TreeSet<String>(funcs); // To remove the primitive types sortedFuncs.removeAll(serdeConstants.PrimitiveTypes); Iterator<String> iterFuncs = sortedFuncs.iterator(); while (iterFuncs.hasNext()) { // create a row per table name outStream.writeBytes(iterFuncs.next()); outStream.write(terminator); } } catch (FileNotFoundException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e); } finally { IOUtils.closeStream(outStream); } return 0; }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
/** * Shows a description of a function.// w ww .j a va 2 s.co m * @param db * * @param descFunc * is the function we are describing * @throws HiveException */ private int describeFunction(Hive db, DescFunctionDesc descFunc) throws HiveException, SQLException { String funcName = descFunc.getName(); // write the results in the file DataOutputStream outStream = getOutputStream(descFunc.getResFile()); try { // get the function documentation Description desc = null; Class<?> funcClass = null; FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(funcName); if (functionInfo != null) { funcClass = functionInfo.getFunctionClass(); } if (funcClass != null) { desc = AnnotationUtils.getAnnotation(funcClass, Description.class); } if (desc != null) { outStream.writeBytes(desc.value().replace("_FUNC_", funcName)); if (descFunc.isExtended()) { Set<String> synonyms = FunctionRegistry.getFunctionSynonyms(funcName); if (synonyms.size() > 0) { outStream.writeBytes("\nSynonyms: " + join(synonyms, ", ")); } if (desc.extended().length() > 0) { outStream.writeBytes("\n" + desc.extended().replace("_FUNC_", funcName)); } } } else { if (funcClass != null) { outStream.writeBytes("There is no documentation for function '" + funcName + "'"); } else { outStream.writeBytes("Function '" + funcName + "' does not exist."); } } outStream.write(terminator); if (descFunc.isExtended()) { if (funcClass != null) { outStream.writeBytes("Function class:" + funcClass.getName() + "\n"); } if (functionInfo != null) { outStream.writeBytes("Function type:" + functionInfo.getFunctionType() + "\n"); FunctionResource[] resources = functionInfo.getResources(); if (resources != null) { for (FunctionResource resource : resources) { outStream.writeBytes("Resource:" + resource.getResourceURI() + "\n"); } } } } } catch (FileNotFoundException e) { LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("describe function: " + stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e); } finally { IOUtils.closeStream(outStream); } return 0; }
From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java
/** * Write a list of the current locks to a file. * @param db//from w w w . j a v a2 s . c o m * * @param showLocks * the locks we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); HiveLockManager lockMgr = txnManager.getLockManager(); if (txnManager.useNewShowLocksFormat()) return showLocksNewFormat(showLocks, lockMgr); boolean isExt = showLocks.isExt(); if (lockMgr == null) { throw new HiveException("show Locks LockManager not specified"); } // write the results in the file DataOutputStream outStream = getOutputStream(showLocks.getResFile()); try { List<HiveLock> locks = null; if (showLocks.getTableName() == null) { // TODO should be doing security check here. Users should not be // able to see each other's locks. locks = lockMgr.getLocks(false, isExt); } else { locks = lockMgr.getLocks( HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt); } Collections.sort(locks, new Comparator<HiveLock>() { @Override public int compare(HiveLock o1, HiveLock o2) { int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName()); if (cmp == 0) { if (o1.getHiveLockMode() == o2.getHiveLockMode()) { return cmp; } // EXCLUSIVE locks occur before SHARED locks if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) { return -1; } return +1; } return cmp; } }); Iterator<HiveLock> locksIter = locks.iterator(); while (locksIter.hasNext()) { HiveLock lock = locksIter.next(); outStream.writeBytes(lock.getHiveLockObject().getDisplayName()); outStream.write(separator); outStream.writeBytes(lock.getHiveLockMode().toString()); if (isExt) { HiveLockObjectData lockData = lock.getHiveLockObject().getData(); if (lockData != null) { outStream.write(terminator); outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId()); outStream.write(terminator); outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime()); outStream.write(terminator); outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode()); outStream.write(terminator); outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr()); } } outStream.write(terminator); } } catch (FileNotFoundException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (IOException e) { LOG.warn("show function: " + stringifyException(e)); return 1; } catch (Exception e) { throw new HiveException(e.toString(), e); } finally { IOUtils.closeStream(outStream); } return 0; }