List of usage examples for java.lang Float NEGATIVE_INFINITY
float NEGATIVE_INFINITY
To view the source code for java.lang Float NEGATIVE_INFINITY.
Click Source Link
From source file:com.xqdev.jam.MLJAM.java
private static void sendXQueryResponse(HttpServletResponse res, Object o) throws IOException { // Make sure to leave the status code alone. It defaults to 200, but sometimes // callers of this method will have set it to a custom code. res.setContentType("x-marklogic/xquery; charset=UTF-8"); //res.setContentType("text/plain"); Writer writer = res.getWriter(); // care to handle errors later? if (o == null) { writer.write("()"); }//from ww w. j a va 2 s .c om else if (o instanceof byte[]) { writer.write("binary {'"); writer.write(hexEncode((byte[]) o)); writer.write("'}"); } else if (o instanceof Object[]) { Object[] arr = (Object[]) o; writer.write("("); for (int i = 0; i < arr.length; i++) { sendXQueryResponse(res, arr[i]); if (i + 1 < arr.length) writer.write(", "); } writer.write(")"); } else if (o instanceof String) { writer.write("'"); writer.write(escapeSingleQuotes(o.toString())); writer.write("'"); } else if (o instanceof Integer) { writer.write("xs:int("); writer.write(o.toString()); writer.write(")"); } else if (o instanceof Long) { writer.write("xs:integer("); writer.write(o.toString()); writer.write(")"); } else if (o instanceof Float) { Float flt = (Float) o; writer.write("xs:float("); if (flt.equals(Float.POSITIVE_INFINITY)) { writer.write("'INF'"); } else if (flt.equals(Float.NEGATIVE_INFINITY)) { writer.write("'-INF'"); } else if (flt.equals(Float.NaN)) { writer.write("fn:number(())"); // poor man's way to write NaN } else { writer.write(o.toString()); } writer.write(")"); } else if (o instanceof Double) { Double dbl = (Double) o; writer.write("xs:double("); if (dbl.equals(Double.POSITIVE_INFINITY)) { writer.write("'INF'"); } else if (dbl.equals(Double.NEGATIVE_INFINITY)) { writer.write("'-INF'"); } else if (dbl.equals(Double.NaN)) { writer.write("fn:number(())"); // poor man's way to write NaN } else { writer.write(o.toString()); } writer.write(")"); } else if (o instanceof Boolean) { writer.write("xs:boolean('"); writer.write(o.toString()); writer.write("')"); } else if (o instanceof BigDecimal) { writer.write("xs:decimal("); writer.write(o.toString()); writer.write(")"); } else if (o instanceof Date) { // We want something like: 2006-04-30T01:28:30.499-07:00 // We format to get: 2006-04-30T01:28:30.499-0700 // Then we add in the colon writer.write("xs:dateTime('"); String d = dateFormat.format((Date) o); writer.write(d.substring(0, d.length() - 2)); writer.write(":"); writer.write(d.substring(d.length() - 2)); writer.write("')"); } else if (o instanceof XMLGregorianCalendar) { XMLGregorianCalendar greg = (XMLGregorianCalendar) o; QName type = greg.getXMLSchemaType(); if (type.equals(DatatypeConstants.DATETIME)) { writer.write("xs:dateTime('"); } else if (type.equals(DatatypeConstants.DATE)) { writer.write("xs:date('"); } else if (type.equals(DatatypeConstants.TIME)) { writer.write("xs:time('"); } else if (type.equals(DatatypeConstants.GYEARMONTH)) { writer.write("xs:gYearMonth('"); } else if (type.equals(DatatypeConstants.GMONTHDAY)) { writer.write("xs:gMonthDay('"); } else if (type.equals(DatatypeConstants.GYEAR)) { writer.write("xs:gYear('"); } else if (type.equals(DatatypeConstants.GMONTH)) { writer.write("xs:gMonth('"); } else if (type.equals(DatatypeConstants.GDAY)) { writer.write("xs:gDay('"); } writer.write(greg.toXMLFormat()); writer.write("')"); } else if (o instanceof Duration) { Duration dur = (Duration) o; /* // The following fails on Xerces QName type = dur.getXMLSchemaType(); if (type.equals(DatatypeConstants.DURATION)) { writer.write("xs:duration('"); } else if (type.equals(DatatypeConstants.DURATION_DAYTIME)) { writer.write("xdt:dayTimeDuration('"); } else if (type.equals(DatatypeConstants.DURATION_YEARMONTH)) { writer.write("xdt:yearMonthDuration('"); } */ // If no years or months, must be DURATION_DAYTIME if (dur.getYears() == 0 && dur.getMonths() == 0) { writer.write("xdt:dayTimeDuration('"); } // If has years or months but nothing else, must be DURATION_YEARMONTH else if (dur.getDays() == 0 && dur.getHours() == 0 && dur.getMinutes() == 0 && dur.getSeconds() == 0) { writer.write("xdt:yearMonthDuration('"); } else { writer.write("xs:duration('"); } writer.write(dur.toString()); writer.write("')"); } else if (o instanceof org.jdom.Element) { org.jdom.Element elt = (org.jdom.Element) o; writer.write("xdmp:unquote('"); // Because "<" in XQuery is the same as "<" I need to double escape any ampersands writer.write(new org.jdom.output.XMLOutputter().outputString(elt).replaceAll("&", "&") .replaceAll("'", "''")); writer.write("')/*"); // make sure to return the root elt } else if (o instanceof org.jdom.Document) { org.jdom.Document doc = (org.jdom.Document) o; writer.write("xdmp:unquote('"); writer.write(new org.jdom.output.XMLOutputter().outputString(doc).replaceAll("&", "&") .replaceAll("'", "''")); writer.write("')"); } else if (o instanceof org.jdom.Text) { org.jdom.Text text = (org.jdom.Text) o; writer.write("text {'"); writer.write(escapeSingleQuotes(text.getText())); writer.write("'}"); } else if (o instanceof org.jdom.Attribute) { // <fake xmlns:pref="http://uri.com" pref:attrname="attrvalue"/>/@*:attrname // <fake xmlns="http://uri.com" attrname="attrvalue"/>/@*:attrname org.jdom.Attribute attr = (org.jdom.Attribute) o; writer.write("<fake xmlns"); if ("".equals(attr.getNamespacePrefix())) { writer.write("=\""); } else { writer.write(":" + attr.getNamespacePrefix() + "=\""); } writer.write(attr.getNamespaceURI()); writer.write("\" "); writer.write(attr.getQualifiedName()); writer.write("=\""); writer.write(escapeSingleQuotes(attr.getValue())); writer.write("\"/>/@*:"); writer.write(attr.getName()); } else if (o instanceof org.jdom.Comment) { org.jdom.Comment com = (org.jdom.Comment) o; writer.write("comment {'"); writer.write(escapeSingleQuotes(com.getText())); writer.write("'}"); } else if (o instanceof org.jdom.ProcessingInstruction) { org.jdom.ProcessingInstruction pi = (org.jdom.ProcessingInstruction) o; writer.write("processing-instruction "); writer.write(pi.getTarget()); writer.write(" {'"); writer.write(escapeSingleQuotes(pi.getData())); writer.write("'}"); } else if (o instanceof QName) { QName q = (QName) o; writer.write("fn:expanded-QName('"); writer.write(escapeSingleQuotes(q.getNamespaceURI())); writer.write("','"); writer.write(q.getLocalPart()); writer.write("')"); } else { writer.write("error('XQuery tried to retrieve unsupported type: " + o.getClass().getName() + "')"); } writer.flush(); }
From source file:edu.umd.windmemory.RunPersonalizedPageRankBasic.java
private float[] phase1(int i, int j, String basePath, int numNodes, String sources) throws Exception { Job job = Job.getInstance(getConf()); job.setJobName("PageRank:Basic:iteration" + j + ":Phase1"); job.setJarByClass(RunPersonalizedPageRankBasic.class); String in = basePath + "/iter" + formatter.format(i); String out = basePath + "/iter" + formatter.format(j) + "t"; String outm = out + "-mass"; // We need to actually count the number of part files to get the number of partitions (because // the directory might contain _log). int numPartitions = 0; for (FileStatus s : FileSystem.get(getConf()).listStatus(new Path(in))) { if (s.getPath().getName().contains("part-")) numPartitions++;//w w w .j ava2 s .c o m } LOG.info("PageRank: iteration " + j + ": Phase1"); LOG.info(" - input: " + in); LOG.info(" - output: " + out); LOG.info(" - nodeCnt: " + numNodes); LOG.info(" - source nodes" + sources); // LOG.info(" - useInmapCombiner: " + useInMapperCombiner); LOG.info("computed number of partitions: " + numPartitions); int numReduceTasks = numPartitions; job.getConfiguration().setInt("NodeCount", numNodes); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false); //job.getConfiguration().set("mapred.child.java.opts", "-Xmx2048m"); job.getConfiguration().set("PageRankMassPath", outm); job.getConfiguration().set("source.nodes", sources); job.setNumReduceTasks(numReduceTasks); FileInputFormat.setInputPaths(job, new Path(in)); FileOutputFormat.setOutputPath(job, new Path(out)); job.setInputFormatClass(NonSplitableSequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(PageRankNode.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(PageRankNode.class); job.setMapperClass(MapClass.class); job.setReducerClass(ReduceClass.class); FileSystem.get(getConf()).delete(new Path(out), true); FileSystem.get(getConf()).delete(new Path(outm), true); long startTime = System.currentTimeMillis(); job.waitForCompletion(true); System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds"); int num = sources.split(",").length; float[] mass = new float[num]; for (int m = 0; m < num; m++) { mass[m] = Float.NEGATIVE_INFINITY; } FileSystem fs = FileSystem.get(getConf()); for (FileStatus f : fs.listStatus(new Path(outm))) { FSDataInputStream fin = fs.open(f.getPath()); for (int m = 0; m < num; m++) { mass[m] = sumLogProbs(mass[m], fin.readFloat()); } fin.close(); } return mass; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService.java
@Override public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { AMRMTokenIdentifier amrmTokenIdentifier = YarnServerSecurityUtils.authorizeRequest(); ApplicationAttemptId appAttemptId = amrmTokenIdentifier.getApplicationAttemptId(); ApplicationId applicationId = appAttemptId.getApplicationId(); this.amLivelinessMonitor.receivedPing(appAttemptId); /* check if its in cache */ AllocateResponseLock lock = responseMap.get(appAttemptId); if (lock == null) { String message = "Application attempt " + appAttemptId + " doesn't exist in ApplicationMasterService cache."; LOG.error(message);/*from ww w . j a v a2 s .c o m*/ throw new ApplicationAttemptNotFoundException(message); } synchronized (lock) { AllocateResponse lastResponse = lock.getAllocateResponse(); if (!hasApplicationMasterRegistered(appAttemptId)) { String message = "AM is not registered for known application attempt: " + appAttemptId + " or RM had restarted after AM registered . AM should re-register."; LOG.info(message); RMAuditLogger.logFailure(this.rmContext.getRMApps().get(appAttemptId.getApplicationId()).getUser(), AuditConstants.AM_ALLOCATE, "", "ApplicationMasterService", message, applicationId, appAttemptId); throw new ApplicationMasterNotRegisteredException(message); } if ((request.getResponseId() + 1) == lastResponse.getResponseId()) { /* old heartbeat */ return lastResponse; } else if (request.getResponseId() + 1 < lastResponse.getResponseId()) { String message = "Invalid responseId in AllocateRequest from application attempt: " + appAttemptId + ", expect responseId to be " + (lastResponse.getResponseId() + 1); throw new InvalidApplicationMasterRequestException(message); } //filter illegal progress values float filteredProgress = request.getProgress(); if (Float.isNaN(filteredProgress) || filteredProgress == Float.NEGATIVE_INFINITY || filteredProgress < 0) { request.setProgress(0); } else if (filteredProgress > 1 || filteredProgress == Float.POSITIVE_INFINITY) { request.setProgress(1); } // Send the status update to the appAttempt. this.rmContext.getDispatcher().getEventHandler() .handle(new RMAppAttemptStatusupdateEvent(appAttemptId, request.getProgress())); List<ResourceRequest> ask = request.getAskList(); List<ContainerId> release = request.getReleaseList(); ResourceBlacklistRequest blacklistRequest = request.getResourceBlacklistRequest(); List<String> blacklistAdditions = (blacklistRequest != null) ? blacklistRequest.getBlacklistAdditions() : Collections.EMPTY_LIST; List<String> blacklistRemovals = (blacklistRequest != null) ? blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST; RMApp app = this.rmContext.getRMApps().get(applicationId); // set label expression for Resource Requests if resourceName=ANY ApplicationSubmissionContext asc = app.getApplicationSubmissionContext(); for (ResourceRequest req : ask) { if (null == req.getNodeLabelExpression() && ResourceRequest.ANY.equals(req.getResourceName())) { req.setNodeLabelExpression(asc.getNodeLabelExpression()); } } Resource maximumCapacity = rScheduler.getMaximumResourceCapability(); // sanity check try { RMServerUtils.normalizeAndValidateRequests(ask, maximumCapacity, app.getQueue(), rScheduler, rmContext); } catch (InvalidResourceRequestException e) { LOG.warn("Invalid resource ask by application " + appAttemptId, e); throw e; } try { RMServerUtils.validateBlacklistRequest(blacklistRequest); } catch (InvalidResourceBlacklistRequestException e) { LOG.warn("Invalid blacklist request by application " + appAttemptId, e); throw e; } // In the case of work-preserving AM restart, it's possible for the // AM to release containers from the earlier attempt. if (!app.getApplicationSubmissionContext().getKeepContainersAcrossApplicationAttempts()) { try { RMServerUtils.validateContainerReleaseRequest(release, appAttemptId); } catch (InvalidContainerReleaseException e) { LOG.warn("Invalid container release by application " + appAttemptId, e); throw e; } } // Split Update Resource Requests into increase and decrease. // No Exceptions are thrown here. All update errors are aggregated // and returned to the AM. List<UpdateContainerRequest> increaseResourceReqs = new ArrayList<>(); List<UpdateContainerRequest> decreaseResourceReqs = new ArrayList<>(); List<UpdateContainerError> updateContainerErrors = RMServerUtils.validateAndSplitUpdateResourceRequests( rmContext, request, maximumCapacity, increaseResourceReqs, decreaseResourceReqs); // Send new requests to appAttempt. Allocation allocation; RMAppAttemptState state = app.getRMAppAttempt(appAttemptId).getAppAttemptState(); if (state.equals(RMAppAttemptState.FINAL_SAVING) || state.equals(RMAppAttemptState.FINISHING) || app.isAppFinalStateStored()) { LOG.warn(appAttemptId + " is in " + state + " state, ignore container allocate request."); allocation = EMPTY_ALLOCATION; } else { allocation = this.rScheduler.allocate(appAttemptId, ask, release, blacklistAdditions, blacklistRemovals, increaseResourceReqs, decreaseResourceReqs); } if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) { LOG.info("blacklist are updated in Scheduler." + "blacklistAdditions: " + blacklistAdditions + ", " + "blacklistRemovals: " + blacklistRemovals); } RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId); AllocateResponse allocateResponse = recordFactory.newRecordInstance(AllocateResponse.class); if (!allocation.getContainers().isEmpty()) { allocateResponse.setNMTokens(allocation.getNMTokens()); } // Notify the AM of container update errors if (!updateContainerErrors.isEmpty()) { allocateResponse.setUpdateErrors(updateContainerErrors); } // update the response with the deltas of node status changes List<RMNode> updatedNodes = new ArrayList<RMNode>(); if (app.pullRMNodeUpdates(updatedNodes) > 0) { List<NodeReport> updatedNodeReports = new ArrayList<NodeReport>(); for (RMNode rmNode : updatedNodes) { SchedulerNodeReport schedulerNodeReport = rScheduler.getNodeReport(rmNode.getNodeID()); Resource used = BuilderUtils.newResource(0, 0); int numContainers = 0; if (schedulerNodeReport != null) { used = schedulerNodeReport.getUsedResource(); numContainers = schedulerNodeReport.getNumContainers(); } NodeId nodeId = rmNode.getNodeID(); NodeReport report = BuilderUtils.newNodeReport(nodeId, rmNode.getState(), rmNode.getHttpAddress(), rmNode.getRackName(), used, rmNode.getTotalCapability(), numContainers, rmNode.getHealthReport(), rmNode.getLastHealthReportTime(), rmNode.getNodeLabels()); updatedNodeReports.add(report); } allocateResponse.setUpdatedNodes(updatedNodeReports); } allocateResponse.setAllocatedContainers(allocation.getContainers()); allocateResponse.setCompletedContainersStatuses(appAttempt.pullJustFinishedContainers()); allocateResponse.setResponseId(lastResponse.getResponseId() + 1); allocateResponse.setAvailableResources(allocation.getResourceLimit()); // Handling increased/decreased containers List<UpdatedContainer> updatedContainers = new ArrayList<>(); if (allocation.getIncreasedContainers() != null) { for (Container c : allocation.getIncreasedContainers()) { updatedContainers.add(UpdatedContainer.newInstance(ContainerUpdateType.INCREASE_RESOURCE, c)); } } if (allocation.getDecreasedContainers() != null) { for (Container c : allocation.getDecreasedContainers()) { updatedContainers.add(UpdatedContainer.newInstance(ContainerUpdateType.DECREASE_RESOURCE, c)); } } allocateResponse.setUpdatedContainers(updatedContainers); allocateResponse.setNumClusterNodes(this.rScheduler.getNumClusterNodes()); // add preemption to the allocateResponse message (if any) allocateResponse.setPreemptionMessage(generatePreemptionMessage(allocation)); // Set application priority allocateResponse.setApplicationPriority(app.getApplicationSubmissionContext().getPriority()); // update AMRMToken if the token is rolled-up MasterKeyData nextMasterKey = this.rmContext.getAMRMTokenSecretManager().getNextMasterKeyData(); if (nextMasterKey != null && nextMasterKey.getMasterKey().getKeyId() != amrmTokenIdentifier.getKeyId()) { RMAppAttemptImpl appAttemptImpl = (RMAppAttemptImpl) appAttempt; Token<AMRMTokenIdentifier> amrmToken = appAttempt.getAMRMToken(); if (nextMasterKey.getMasterKey().getKeyId() != appAttemptImpl.getAMRMTokenKeyId()) { LOG.info("The AMRMToken has been rolled-over. Send new AMRMToken back" + " to application: " + applicationId); amrmToken = rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(appAttemptId); appAttemptImpl.setAMRMToken(amrmToken); } allocateResponse.setAMRMToken(org.apache.hadoop.yarn.api.records.Token.newInstance( amrmToken.getIdentifier(), amrmToken.getKind().toString(), amrmToken.getPassword(), amrmToken.getService().toString())); } /* * As we are updating the response inside the lock object so we don't * need to worry about unregister call occurring in between (which * removes the lock object). */ lock.setAllocateResponse(allocateResponse); return allocateResponse; } }
From source file:nl.uva.illc.dataselection.InvitationModel.java
public static void updateTranslationTable(final int src[][], final int trg[][], final TranslationTable ttable, final float sPD[]) { jobs.execute(new Runnable() { @Override//from w ww . j a va2 s .c o m public void run() { log.info("Updating translation table ... "); TranslationTable counts = new TranslationTable(); HashIntFloatMap totals = HashIntFloatMaps.newMutableMap(); for (int sent = 0; sent < src.length; sent++) { if (sent % 100000 == 0) log.debug("Sentence " + sent); if (ignore.containsKey(sent)) continue; if (sPD[sent] < CONF_THRESHOLD) continue; int ssent[] = src[sent]; int tsent[] = trg[sent]; HashIntFloatMap s_total = HashIntFloatMaps.newMutableMap(); // calculating normalization for (int t = 1; t < tsent.length; t++) { int tw = tsent[t]; for (int s = 0; s < ssent.length; s++) { int sw = ssent[s]; s_total.put(tw, logAdd(s_total.getOrDefault(tw, Float.NEGATIVE_INFINITY), ttable.get(tw, sw, p))); } } // collect counts for (int t = 1; t < tsent.length; t++) { int tw = tsent[t]; for (int s = 0; s < ssent.length; s++) { int sw = ssent[s]; float in_count = sPD[sent] + (ttable.get(tw, sw, p) - s_total.get(tw)); counts.put(tw, sw, logAdd(counts.get(tw, sw, Float.NEGATIVE_INFINITY), in_count)); totals.put(sw, logAdd(totals.getOrDefault(sw, Float.NEGATIVE_INFINITY), in_count)); } } } // maximization for (int tw : counts.ttable.keySet()) { HashIntFloatMap tMap = counts.ttable.get(tw); for (int sw : tMap.keySet()) { float newProb = counts.get(tw, sw) - totals.get(sw); ttable.put(tw, sw, newProb); } } log.info("Updating translation table DONE"); InvitationModel.latch.countDown(); } }); }
From source file:it.uniroma2.sag.kelp.learningalgorithm.classification.passiveaggressive.BudgetedPassiveAggressiveClassification.java
private void updateNearestNeighbors(int changedSvIndex) { List<SupportVector> svs = this.getPredictionFunction().getModel().getSupportVectors(); Example newSv = svs.get(changedSvIndex).getInstance(); float currentSimilarity; int changedSvNN = -1; float maxSimilarityChangedSv = Float.NEGATIVE_INFINITY; for (int i = 0; i < this.budget; i++) { if (i == changedSvIndex) { continue; }/*from w w w .j a v a 2 s . co m*/ Example currentExample = svs.get(i).getInstance(); currentSimilarity = kernel.innerProduct(newSv, currentExample); if (currentSimilarity > maxSimilarityChangedSv) { maxSimilarityChangedSv = currentSimilarity; changedSvNN = i; } if (this.nearestNeighbors[i] == changedSvIndex) {//the nearest neighbor of the current one has been removed int nn2 = -1; float maxSimilarity2 = Float.NEGATIVE_INFINITY; for (int j = 0; j < this.budget; j++) { if (j == i) { continue; } float currentSimilarity2 = kernel.innerProduct(svs.get(j).getInstance(), currentExample); if (currentSimilarity2 > maxSimilarity2) { maxSimilarity2 = currentSimilarity2; nn2 = j; } } this.nearestNeighbors[i] = nn2; this.nearestNeighborsSimilarity[i] = maxSimilarity2; } else if (currentSimilarity > this.nearestNeighborsSimilarity[i]) {//checking whether the new Sv is closer to the old NN this.nearestNeighborsSimilarity[i] = currentSimilarity; this.nearestNeighbors[i] = changedSvIndex; } } this.nearestNeighbors[changedSvIndex] = changedSvNN; this.nearestNeighborsSimilarity[changedSvIndex] = maxSimilarityChangedSv; }
From source file:RunPersonalizedPageRankBasic.java
private float phase1(int i, int j, String basePath, int numNodes, boolean useCombiner, String sources) throws Exception { Job job = Job.getInstance(getConf()); job.setJobName("PageRank:Basic:iteration" + j + ":Phase1"); job.setJarByClass(RunPersonalizedPageRankBasic.class); job.getConfiguration().setStrings("sources", sources); String in = basePath + "/iter" + formatter.format(i); String out = basePath + "/iter" + formatter.format(j) + "t"; String outm = out + "-mass"; // We need to actually count the number of part files to get the number of partitions (because // the directory might contain _log). int numPartitions = 0; for (FileStatus s : FileSystem.get(getConf()).listStatus(new Path(in))) { if (s.getPath().getName().contains("part-")) numPartitions++;//from w ww . java 2 s . c om } LOG.info("PageRank: iteration " + j + ": Phase1"); LOG.info(" - input: " + in); LOG.info(" - output: " + out); LOG.info(" - nodeCnt: " + numNodes); LOG.info(" - useCombiner: " + useCombiner); LOG.info("computed number of partitions: " + numPartitions); int numReduceTasks = numPartitions; job.getConfiguration().setInt("NodeCount", numNodes); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false); //job.getConfiguration().set("mapred.child.java.opts", "-Xmx2048m"); job.getConfiguration().set("PageRankMassPath", outm); job.setNumReduceTasks(numReduceTasks); FileInputFormat.setInputPaths(job, new Path(in)); FileOutputFormat.setOutputPath(job, new Path(out)); job.setInputFormatClass(NonSplitableSequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(PageRankNodeExtended.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(PageRankNodeExtended.class); job.setMapperClass(MapClass.class); if (useCombiner) { // job.setCombinerClass(CombineClass.class); } job.setReducerClass(ReduceClass.class); FileSystem.get(getConf()).delete(new Path(out), true); FileSystem.get(getConf()).delete(new Path(outm), true); long startTime = System.currentTimeMillis(); job.waitForCompletion(true); System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds"); float mass = Float.NEGATIVE_INFINITY; FileSystem fs = FileSystem.get(getConf()); for (FileStatus f : fs.listStatus(new Path(outm))) { FSDataInputStream fin = fs.open(f.getPath()); mass = sumLogProbs(mass, fin.readFloat()); fin.close(); } return mass; }
From source file:RunPageRankBasic.java
private float phase1(int i, int j, String basePath, int numNodes, boolean useCombiner, boolean useInMapperCombiner) throws Exception { Job job = Job.getInstance(getConf()); job.setJobName("PageRank:Basic:iteration" + j + ":Phase1"); job.setJarByClass(RunPageRankBasic.class); String in = basePath + "/iter" + formatter.format(i); String out = basePath + "/iter" + formatter.format(j) + "t"; String outm = out + "-mass"; // We need to actually count the number of part files to get the number of partitions (because // the directory might contain _log). int numPartitions = 0; for (FileStatus s : FileSystem.get(getConf()).listStatus(new Path(in))) { if (s.getPath().getName().contains("part-")) numPartitions++;/*from w ww .ja v a 2 s. co m*/ } LOG.info("PageRank: iteration " + j + ": Phase1"); LOG.info(" - input: " + in); LOG.info(" - output: " + out); LOG.info(" - nodeCnt: " + numNodes); LOG.info(" - useCombiner: " + useCombiner); LOG.info(" - useInmapCombiner: " + useInMapperCombiner); LOG.info("computed number of partitions: " + numPartitions); int numReduceTasks = numPartitions; job.getConfiguration().setInt("NodeCount", numNodes); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false); //job.getConfiguration().set("mapred.child.java.opts", "-Xmx2048m"); job.getConfiguration().set("PageRankMassPath", outm); job.setNumReduceTasks(numReduceTasks); FileInputFormat.setInputPaths(job, new Path(in)); FileOutputFormat.setOutputPath(job, new Path(out)); job.setInputFormatClass(NonSplitableSequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(PageRankNode.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(PageRankNode.class); job.setMapperClass(useInMapperCombiner ? MapWithInMapperCombiningClass.class : MapClass.class); if (useCombiner) { job.setCombinerClass(CombineClass.class); } job.setReducerClass(ReduceClass.class); FileSystem.get(getConf()).delete(new Path(out), true); FileSystem.get(getConf()).delete(new Path(outm), true); long startTime = System.currentTimeMillis(); job.waitForCompletion(true); System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds"); float mass = Float.NEGATIVE_INFINITY; FileSystem fs = FileSystem.get(getConf()); for (FileStatus f : fs.listStatus(new Path(outm))) { FSDataInputStream fin = fs.open(f.getPath()); mass = sumLogProbs(mass, fin.readFloat()); fin.close(); } return mass; }
From source file:edu.umd.shrawanraina.RunPageRankBasic.java
private float phase1(int i, int j, String basePath, int numNodes, boolean useCombiner, boolean useInMapperCombiner) throws Exception { Job job = Job.getInstance(getConf()); job.setJobName("PageRank:Basic:iteration" + j + ":Phase1"); job.setJarByClass(RunPageRankBasic.class); String in = basePath + "/iter" + formatter.format(i); String out = basePath + "/iter" + formatter.format(j) + "t"; String outm = out + "-mass"; // We need to actually count the number of part files to get the number // of partitions (because // the directory might contain _log). int numPartitions = 0; for (FileStatus s : FileSystem.get(getConf()).listStatus(new Path(in))) { if (s.getPath().getName().contains("part-")) numPartitions++;//from www .j a v a 2 s . com } LOG.info("PageRank: iteration " + j + ": Phase1"); LOG.info(" - input: " + in); LOG.info(" - output: " + out); LOG.info(" - nodeCnt: " + numNodes); LOG.info(" - useCombiner: " + useCombiner); LOG.info(" - useInmapCombiner: " + useInMapperCombiner); LOG.info("computed number of partitions: " + numPartitions); int numReduceTasks = numPartitions; job.getConfiguration().setInt("NodeCount", numNodes); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false); // job.getConfiguration().set("mapred.child.java.opts", "-Xmx2048m"); job.getConfiguration().set("PageRankMassPath", outm); job.setNumReduceTasks(numReduceTasks); FileInputFormat.setInputPaths(job, new Path(in)); FileOutputFormat.setOutputPath(job, new Path(out)); job.setInputFormatClass(NonSplitableSequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(PageRankNode.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(PageRankNode.class); job.setMapperClass(useInMapperCombiner ? MapWithInMapperCombiningClass.class : MapClass.class); if (useCombiner) { job.setCombinerClass(CombineClass.class); } job.setReducerClass(ReduceClass.class); FileSystem.get(getConf()).delete(new Path(out), true); FileSystem.get(getConf()).delete(new Path(outm), true); long startTime = System.currentTimeMillis(); job.waitForCompletion(true); System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds"); float mass = Float.NEGATIVE_INFINITY; FileSystem fs = FileSystem.get(getConf()); for (FileStatus f : fs.listStatus(new Path(outm))) { FSDataInputStream fin = fs.open(f.getPath()); mass = sumLogProbs(mass, fin.readFloat()); fin.close(); } return mass; }
From source file:at.knowcenter.wag.egov.egiz.pdf.PDFPage.java
public static float findMaxX(Pos[] coordinates) { float max = Float.NEGATIVE_INFINITY; for (int i = 0; i < coordinates.length; i++) { if (coordinates[i].x > max) { max = coordinates[i].x;/*from www .j a v a 2s .c om*/ } } return max; }
From source file:org.proteosuite.FastScatterPlot.java
/** * Calculates the Y data range.// w w w . ja va 2 s. c o m * * @param data the data. * * @return The range. */ private Range calculateYDataRange(float[][] data) { Range result = null; if (data != null) { float lowest = Float.POSITIVE_INFINITY; float highest = Float.NEGATIVE_INFINITY; for (int i = 0; i < data[0].length; i++) { float v = data[1][i]; if (v < lowest) { lowest = v; } if (v > highest) { highest = v; } } if (lowest <= highest) { result = new Range(lowest, highest); } } return result; }