List of usage examples for java.util ArrayList toString
public String toString()
From source file:org.sakaiproject.webservices.SakaiScript.java
/** * Copies a role from one authzgroup to another. Useful for mass population/synchronisation * <p/>/* ww w . j av a2 s .c o m*/ * The sessionid argument must be a valid session for a super user ONLY otherwise it will fail. * The authzgroup arguments for sites should start with /site/SITEID, likewise for site groups etc * * @param sessionid the sessionid of a valid session for the admin user * @param authzgroupid1 the authgroupid of the site you want to copy the role FROM * @param authzgroupid2 the authgroupid of the site you want to copy the role TO * @param roleid the id of the role you want to copy * @param description the description of the new role * @param removeBeforeSync if synchronising roles, whether or not to remove the functions from the target role before adding the set in. This will * mean that no additional permissions will remain, if they already exist in that role * @return success or RuntimeException * @throws RuntimeException if not a super user, if new role cannot be created, if functions differ after the new role is made */ @WebMethod @Path("/copyRole2") @Produces("text/plain") @GET public String copyRole2( @WebParam(name = "sessionid", partName = "sessionid") @QueryParam("sessionid") String sessionid, @WebParam(name = "authzgroupid1", partName = "authzgroupid1") @QueryParam("authzgroupid1") String authzgroupid1, @WebParam(name = "authzgroupid2", partName = "authzgroupid2") @QueryParam("authzgroupid2") String authzgroupid2, @WebParam(name = "roleid", partName = "roleid") @QueryParam("roleid") String roleid, @WebParam(name = "description", partName = "description") @QueryParam("description") String description, @WebParam(name = "removeBeforeSync", partName = "removeBeforeSync") @QueryParam("removeBeforeSync") boolean removeBeforeSync) { Session session = establishSession(sessionid); Set existingfunctions; Set newfunctions; Set existingroles; ArrayList existingroleids; Iterator iRoles; boolean createRole = false; Role role2; //check that ONLY super user's are accessing this if (!securityService.isSuperUser(session.getUserId())) { LOG.warn("WS copyRole2(): Permission denied. Restricted to super users."); throw new RuntimeException("WS copyRole(): Permission denied. Restricted to super users."); } try { //open authzgroup1 AuthzGroup authzgroup1 = authzGroupService.getAuthzGroup(authzgroupid1); //get role that we want to copy Role role1 = authzgroup1.getRole(roleid); //get functions that are in this role existingfunctions = role1.getAllowedFunctions(); LOG.warn("WS copyRole(): existing functions in role " + roleid + " in " + authzgroupid1 + ": " + new ArrayList(existingfunctions).toString()); //open authzgroup2 AuthzGroup authzgroup2 = authzGroupService.getAuthzGroup(authzgroupid2); //get roles in authzgroup2 existingroles = authzgroup2.getRoles(); existingroleids = new ArrayList(); //iterate over roles, get the roleId from the role, add to arraylist for checking for (iRoles = existingroles.iterator(); iRoles.hasNext();) { Role existingrole = (Role) iRoles.next(); existingroleids.add(existingrole.getId()); } LOG.warn("WS copyRole2(): existing roles in " + authzgroupid2 + ": " + existingroleids.toString()); //if this roleid exists in the authzgroup already... if (existingroleids.contains(roleid)) { LOG.warn("WS copyRole2(): role " + roleid + " exists in " + authzgroupid2 + ". This role will updated."); } else { LOG.warn("WS copyRole2(): role " + roleid + " does not exist in " + authzgroupid2 + ". This role will be created."); //create this role in authzgroup2 role2 = authzgroup2.addRole(roleid); //save authzgroup change authzGroupService.save(authzgroup2); //reopen authzgroup2 for checking authzgroup2 = authzGroupService.getAuthzGroup(authzgroupid2); //check the role was actually created by getting set again and iterating existingroles = authzgroup2.getRoles(); existingroleids = new ArrayList(); //iterate over roles, get the roleId from the role, add to arraylist for checking for (iRoles = existingroles.iterator(); iRoles.hasNext();) { Role existingrole = (Role) iRoles.next(); existingroleids.add(existingrole.getId()); } LOG.warn("WS copyRole2(): existing roles in " + authzgroupid2 + " after addition: " + existingroleids.toString()); //if role now exists, ok, else fault. if (existingroleids.contains(roleid)) { LOG.warn("WS copyRole2(): role " + roleid + " was created in " + authzgroupid2 + "."); } else { LOG.warn("WS copyRole2(): role " + roleid + " could not be created in " + authzgroupid2 + "."); throw new RuntimeException( "WS copyRole2(): role " + roleid + " could not be created in " + authzgroupid2 + "."); } } //get this role role2 = authzgroup2.getRole(roleid); //if removing permissions before syncing (SAK-18019) if (removeBeforeSync) { role2.disallowAll(); } //add Set of functions to this role role2.allowFunctions(existingfunctions); //set description role2.setDescription(description); //save authzgroup change authzGroupService.save(authzgroup2); //reopen authzgroup2 for checking authzgroup2 = authzGroupService.getAuthzGroup(authzgroupid2); //get role we want to check role2 = authzgroup2.getRole(roleid); //get Set of functions that are now in this role newfunctions = role2.getAllowedFunctions(); //compare existingfunctions with newfunctions to see that they match if (newfunctions.containsAll(existingfunctions)) { LOG.warn("WS copyRole2(): functions added successfully to role " + roleid + " in " + authzgroupid2 + "."); } else { LOG.warn("WS copyRole2(): functions in roles differ after addition."); throw new RuntimeException("WS copyRole(): functions in roles differ after addition."); } } catch (Exception e) { return e.getClass().getName() + " : " + e.getMessage(); } return "success"; }
From source file:Anaphora_Resolution.ParseAllXMLDocuments.java
public static Tree HobbsResolve(Tree pronoun, ArrayList<Tree> forest) { Tree wholetree = forest.get(forest.size() - 1); // The last one is the one I am going to start from ArrayList<Tree> candidates = new ArrayList<Tree>(); List<Tree> path = wholetree.pathNodeToNode(wholetree, pronoun); System.out.println(path);// ww w . j a v a 2s . co m // Step 1 Tree ancestor = pronoun.parent(wholetree); // This one locates the NP the pronoun is in, therefore we need one more "parenting" ! // Step 2 ancestor = ancestor.parent(wholetree); //System.out.println("LABEL: "+pronoun.label().value() + "\n\tVALUE: "+pronoun.firstChild()); while (!ancestor.label().value().equals("NP") && !ancestor.label().value().equals("S")) ancestor = ancestor.parent(wholetree); Tree X = ancestor; path = X.pathNodeToNode(wholetree, pronoun); System.out.println(path); // Step 3 for (Tree relative : X.children()) { for (Tree candidate : relative) { if (candidate.contains(pronoun)) break; // I am looking to all the nodes to the LEFT (i.e. coming before) the path leading to X. contain <-> in the path //System.out.println("LABEL: "+relative.label().value() + "\n\tVALUE: "+relative.firstChild()); if ((candidate.parent(wholetree) != X) && (candidate.parent(wholetree).label().value().equals("NP") || candidate.parent(wholetree).label().value().equals("S"))) if (candidate.label().value().equals("NP")) // "Propose as the antecedent any NP node that is encountered which has an NP or S node between it and X" candidates.add(candidate); } } // Step 9 is a GOTO step 4, hence I will envelope steps 4 to 8 inside a while statement. while (true) { // It is NOT an infinite loop. // Step 4 if (X.parent(wholetree) == wholetree) { for (int q = 1; q < MAXPREVSENTENCES; ++q) {// I am looking for the prev sentence (hence we start with 1) if (forest.size() - 1 < q) break; // If I don't have it, break Tree prevTree = forest.get(forest.size() - 1 - q); // go to previous tree // Now we look for each S subtree, in order of recency (hence right-to-left, hence opposite order of that of .children() ). ArrayList<Tree> backlist = new ArrayList<Tree>(); for (Tree child : prevTree.children()) { for (Tree subtree : child) { if (subtree.label().value().equals("S")) { backlist.add(child); break; } } } for (int i = backlist.size() - 1; i >= 0; --i) { Tree Treetovisit = backlist.get(i); for (Tree relative : Treetovisit.children()) { for (Tree candidate : relative) { if (candidate.contains(pronoun)) continue; // I am looking to all the nodes to the LEFT (i.e. coming before) the path leading to X. contain <-> in the path //System.out.println("LABEL: "+relative.label().value() + "\n\tVALUE: "+relative.firstChild()); if (candidate.label().value().equals("NP")) { // "Propose as the antecedent any NP node that you find" if (!candidates.contains(candidate)) candidates.add(candidate); } } } } } break; // It will always come here eventually } // Step 5 ancestor = X.parent(wholetree); //System.out.println("LABEL: "+pronoun.label().value() + "\n\tVALUE: "+pronoun.firstChild()); while (!ancestor.label().value().equals("NP") && !ancestor.label().value().equals("S")) ancestor = ancestor.parent(wholetree); X = ancestor; // Step 6 if (X.label().value().equals("NP")) { // If X is an NP for (Tree child : X.children()) { // Find the nominal nodes that X directly dominates if (child.label().value().equals("NN") || child.label().value().equals("NNS") || child.label().value().equals("NNP") || child.label().value().equals("NNPS")) if (!child.contains(pronoun)) candidates.add(X); // If one of them is not in the path between X and the pronoun, add X to the antecedents } } // Step SETTE for (Tree relative : X.children()) { for (Tree candidate : relative) { if (candidate.contains(pronoun)) continue; // I am looking to all the nodes to the LEFT (i.e. coming before) the path leading to X. contain <-> in the path //System.out.println("LABEL: "+relative.label().value() + "\n\tVALUE: "+relative.firstChild()); if (candidate.label().value().equals("NP")) { // "Propose as the antecedent any NP node that you find" boolean contains = false; for (Tree oldercandidate : candidates) { if (oldercandidate.contains(candidate)) { contains = true; break; } } if (!contains) candidates.add(candidate); } } } // Step 8 if (X.label().value().equals("S")) { boolean right = false; // Now we want all branches to the RIGHT of the path pronoun -> X. for (Tree relative : X.children()) { if (relative.contains(pronoun)) { right = true; continue; } if (!right) continue; for (Tree child : relative) { // Go in but do not go below any NP or S node. Go below the rest if (child.label().value().equals("NP")) { candidates.add(child); break; // not sure if this means avoid going below NP but continuing with the rest of non-NP children. Should be since its DFS. } if (child.label().value().equals("S")) break; // Same } } } } // Step 9 is a GOTO, so we use a while. System.out.println(pronoun + ": CHAIN IS " + candidates.toString()); ArrayList<Integer> scores = new ArrayList<Integer>(); for (int j = 0; j < candidates.size(); ++j) { Tree candidate = candidates.get(j); Tree parent = null; int parent_index = 0; for (Tree tree : forest) { if (tree.contains(candidate)) { parent = tree; break; } ++parent_index; } scores.add(0); if (parent_index == 0) scores.set(j, scores.get(j) + 100); // If in the last sentence, +100 points scores.set(j, scores.get(j) + syntacticScore(candidate, parent)); if (existentialEmphasis(candidate)) // Example: "There was a dog standing outside" scores.set(j, scores.get(j) + 70); if (!adverbialEmphasis(candidate, parent)) scores.set(j, scores.get(j) + 50); if (headNounEmphasis(candidate, parent)) scores.set(j, scores.get(j) + 80); int sz = forest.size() - 1; // System.out.println("pronoun in sentence " + sz + "(sz). Candidate in sentence "+parent_index+" (parent_index)"); int dividend = 1; for (int u = 0; u < sz - parent_index; ++u) dividend *= 2; //System.out.println("\t"+dividend); scores.set(j, scores.get(j) / dividend); System.out.println(candidate + " -> " + scores.get(j)); } int max = -1; int max_index = -1; for (int i = 0; i < scores.size(); ++i) { if (scores.get(i) > max) { max_index = i; max = scores.get(i); } } Tree final_candidate = candidates.get(max_index); System.out.println("My decision for " + pronoun + " is: " + final_candidate); // Decide what candidate, with both gender resolution and Lappin and Leass ranking. Tree pronounparent = pronoun.parent(wholetree).parent(wholetree); // 1 parent gives me the NP of the pronoun int pos = 0; for (Tree sibling : pronounparent.children()) { System.out.println("Sibling " + pos + ": " + sibling); if (sibling.contains(pronoun)) break; ++pos; } System.out.println("Before setchild: " + pronounparent); @SuppressWarnings("unused") Tree returnval = pronounparent.setChild(pos, final_candidate); System.out.println("After setchild: " + pronounparent); return wholetree; // wholetree is already modified, since it contains pronounparent }
From source file:edu.fullerton.viewerplugin.SpectrumPlot.java
/** * use the external program genPlot.py to generate the graph * @param dbufs input spec/*from ww w .jav a 2s .co m*/ * @param compact minimize label because output image will be small * @return */ private ArrayList<Integer> makeAddPlotFiles(ArrayList<ChanDataBuffer> dbufs, boolean compact) throws WebUtilException, LdvTableException { ExternalProgramManager epm = new ExternalProgramManager(); ArrayList<Integer> ret = new ArrayList<>(); float fsMax = 0; try { ArrayList<String> cmd = new ArrayList<>(); ArrayList<genPlotInfo> spectra = new ArrayList<>(); File tempDir = epm.getTempDir("sp_"); File outFile = epm.getTempFile("sp_plot_", ".png"); // we need to know for labeling boolean sameChannel = true; boolean sameTime = true; boolean multiPlot = dbufs.size() > 1; Set<ChanInfo> cis = new TreeSet<>(); Set<TimeInterval> tis = new TreeSet<>(); if (multiPlot) { for (ChanDataBuffer buf : dbufs) { cis.add(buf.getChanInfo()); tis.add(buf.getTimeInterval()); } sameChannel = cis.size() == 1; sameTime = tis.size() == 1; } for (ChanDataBuffer buf : dbufs) { double[][] spectrum = calcSpectrum(buf); File spFile = epm.writeTempCSV("Spectrum_", spectrum); String fTitle = ""; String fLegend = ""; float fs = buf.getChanInfo().getRate(); fsMax = Math.max(fsMax, fs); if (!sameChannel) { String chanName = buf.getChanInfo().getChanName(); String chanFs; if (fs >= 1) { chanFs = String.format("%1$.0f Hz", fs); } else { chanFs = String.format("%1$.3f Hz", fs); } if (fTitle.length() > 0) { fTitle += ", "; } fLegend += String.format("%1$s at %2$s ", chanName, chanFs); } if (!sameTime) { long gps = buf.getTimeInterval().getStartGps(); String utc = TimeAndDate.gpsAsUtcString(gps); fLegend += String.format("%1$s (%2$d)", utc, gps); } spectra.add(new genPlotInfo(spFile, fTitle, fLegend)); } File outImg = epm.getTempFile("spPlot_", ".png"); cmd.add("/usr/local/ldvw/bin/genPlot.py"); // add input files for (genPlotInfo gpi : spectra) { File f = gpi.spFile; cmd.add("--infile"); cmd.add(f.getCanonicalPath()); if (!sameChannel || !sameTime) { if (gpi.title.length() > 0) { cmd.add("--title"); cmd.add(gpi.title); } if (gpi.legend.length() > 0) { cmd.add("--legend"); cmd.add(gpi.legend); } } } // add and output file cmd.add("--out"); cmd.add(outFile.getCanonicalPath()); // add options if (parameterMap.containsKey("sp_logy")) { cmd.add("--logy"); } if (parameterMap.containsKey("sp_logx")) { cmd.add("--logx"); } if (height > 100 && width > 100) { cmd.add("--geometry"); cmd.add(String.format("%1$dx%2$d", width, height)); } if (fmin > 0) { cmd.add("--xmin"); cmd.add(String.format("%1$.2f", fmin)); } if (fmax < fsMax && fmax > 0) { cmd.add("--xmax"); cmd.add(String.format("%1$.2f", fmax)); } // add the super title String supTitle = "Spectrum plot"; long dur = dbufs.get(0).getTimeInterval().getDuration(); String durStr = String.format("%1$,d s", dur); if (dur >= 3600) { durStr = TimeAndDate.hrTime(dur); } if (!multiPlot) { supTitle = getTitle(dbufs, false); } else if (!sameChannel && sameTime) { long gps = dbufs.get(0).getTimeInterval().getStartGps(); supTitle = String.format("%1$s (%2$d) t = %3$s", TimeAndDate.gpsAsUtcString(gps), gps, durStr); } else if (sameChannel && !sameTime) { String chanFs; float fs = dbufs.get(0).getChanInfo().getRate(); if (fs > 1) { chanFs = String.format("%1$.0f", fs); } else { chanFs = String.format("%1$.3f", fs); } supTitle = String.format("%1$s at %2$s Hz, t=%3$s", dbufs.get(0).getChanInfo().getChanName(), chanFs, durStr); } else if (!sameChannel && !sameTime) { supTitle = "Spectrum plot"; } cmd.add("--suptitle"); cmd.add(supTitle); // axis labels DecimalFormat dform = new DecimalFormat("0.0###"); float bw = 1 / secperfft; cmd.add("--xlabel"); cmd.add(String.format("Frequency Hz, bw: %1$s, " + "fft: %2$,d, s/fft: %3$.2f, ov: %4$.2f", dform.format(bw), nfft, secperfft, overlap)); pwrScale.setTex(true); cmd.add("--ylabel"); cmd.add(pwrScale.toString()); //cmd.add("--test"); if (epm.runExternalProgram(cmd)) { int imgId = epm.addImg2Db(outFile, db, vuser.getCn()); ret.add(imgId); } else { vpage.add("Problem generating plot of spectrum."); vpage.addBlankLines(2); vpage.add("Command line: "); vpage.addBlankLines(1); vpage.add(new PageItemString(cmd.toString(), false)); vpage.addBlankLines(2); vpage.add("Stderr:"); vpage.addBlankLines(1); vpage.add(new PageItemString(epm.getStderr(), false)); vpage.addBlankLines(2); vpage.add("Stdout:"); vpage.addBlankLines(1); vpage.add(new PageItemString(epm.getStdout(), false)); vpage.addBlankLines(2); } } catch (IOException ex) { throw new WebUtilException("Spectrum plot (genPlot.py):", ex); } finally { epm.removeTemps(); } return ret; }
From source file:com.all4tec.sa.maven.proguard.ProGuardMojo.java
public void execute() throws MojoExecutionException, MojoFailureException { log = getLog();//from ww w. j ava 2s .co m if (skip) { log.info("Bypass ProGuard processing because \"proguard.skip=true\""); return; } boolean mainIsJar = mavenProject.getPackaging().equals("jar"); boolean mainIsPom = mavenProject.getPackaging().equals("pom"); File inJarFile = new File(outputDirectory, injar); if (mainIsJar && (!inJarFile.exists())) { if (injarNotExistsSkip) { log.info("Bypass ProGuard processing because \"injar\" dos not exist"); return; } throw new MojoFailureException("Can't find file " + inJarFile); } if (mainIsPom && (!inJarFile.exists()) && injarNotExistsSkip) { log.info("Bypass ProGuard processing because \"injar\" dos not exist"); return; } if (!outputDirectory.exists()) { if (!outputDirectory.mkdirs()) { throw new MojoFailureException("Can't create " + outputDirectory); } } File outJarFile; boolean sameArtifact; if (attach) { outjar = nameNoType(injar); if (useArtifactClassifier()) { outjar += "-" + attachArtifactClassifier; } outjar += "." + attachArtifactType; } if ((outjar != null) && (!outjar.equals(injar))) { sameArtifact = false; outJarFile = (new File(outputDirectory, outjar)).getAbsoluteFile(); if (outJarFile.exists()) { if (!deleteFileOrDirectory(outJarFile)) { throw new MojoFailureException("Can't delete " + outJarFile); } } } else { sameArtifact = true; outJarFile = inJarFile.getAbsoluteFile(); File baseFile; if (inJarFile.isDirectory()) { baseFile = new File(outputDirectory, nameNoType(injar) + "_proguard_base"); } else { baseFile = new File(outputDirectory, nameNoType(injar) + "_proguard_base.jar"); } if (baseFile.exists()) { if (!deleteFileOrDirectory(baseFile)) { throw new MojoFailureException("Can't delete " + baseFile); } } if (inJarFile.exists()) { if (!inJarFile.renameTo(baseFile)) { throw new MojoFailureException("Can't rename " + inJarFile); } } inJarFile = baseFile; } ArrayList<String> args = new ArrayList<String>(); if (log.isDebugEnabled()) { List dependancy = mavenProject.getCompileArtifacts(); for (Iterator i = dependancy.iterator(); i.hasNext();) { Artifact artifact = (Artifact) i.next(); log.debug("--- compile artifact " + artifact.getGroupId() + ":" + artifact.getArtifactId() + ":" + artifact.getType() + ":" + artifact.getClassifier() + " Scope:" + artifact.getScope()); } for (Iterator i = mavenProject.getArtifacts().iterator(); i.hasNext();) { Artifact artifact = (Artifact) i.next(); log.debug("--- artifact " + artifact.getGroupId() + ":" + artifact.getArtifactId() + ":" + artifact.getType() + ":" + artifact.getClassifier() + " Scope:" + artifact.getScope()); } for (Iterator i = mavenProject.getDependencies().iterator(); i.hasNext();) { Dependency artifact = (Dependency) i.next(); log.debug("--- dependency " + artifact.getGroupId() + ":" + artifact.getArtifactId() + ":" + artifact.getType() + ":" + artifact.getClassifier() + " Scope:" + artifact.getScope()); } } Set inPath = new HashSet(); boolean hasInclusionLibrary = false; if (assembly != null) { for (Iterator iter = assembly.inclusions.iterator(); iter.hasNext();) { Inclusion inc = (Inclusion) iter.next(); if (!inc.library) { File file = getClasspathElement(getDependancy(inc, mavenProject), mavenProject); inPath.add(file.toString()); log.debug("--- ADD injars:" + inc.artifactId); StringBuffer filter = new StringBuffer(fileToString(file)); filter.append("(!META-INF/MANIFEST.MF"); if (!addMavenDescriptor) { filter.append(","); filter.append("!META-INF/maven/**"); } if (inc.filter != null) { filter.append(",").append(inc.filter); } filter.append(")"); args.add("-injars"); args.add(filter.toString()); } else { hasInclusionLibrary = true; log.debug("--- ADD libraryjars:" + inc.artifactId); // This may not be CompileArtifacts, maven 2.0.6 bug File file = getClasspathElement(getDependancy(inc, mavenProject), mavenProject); inPath.add(file.toString()); args.add("-libraryjars"); args.add(fileToString(file)); } } } if ((!mainIsPom) && inJarFile.exists()) { args.add("-injars"); StringBuffer filter = new StringBuffer(fileToString(inJarFile)); if ((inFilter != null) || (!addMavenDescriptor)) { filter.append("("); boolean coma = false; if (!addMavenDescriptor) { coma = true; filter.append("!META-INF/maven/**"); } if (inFilter != null) { if (coma) { filter.append(","); } filter.append(inFilter); } filter.append(")"); } args.add(filter.toString()); } args.add("-outjars"); args.add(fileToString(outJarFile)); if (!obfuscate) { args.add("-dontobfuscate"); } if (proguardInclude != null) { if (proguardInclude.exists()) { args.add("-include"); args.add(fileToString(proguardInclude)); log.debug("proguardInclude " + proguardInclude); } else { log.debug("proguardInclude config does not exists " + proguardInclude); } } if (includeDependency) { List dependency = this.mavenProject.getCompileArtifacts(); for (Iterator i = dependency.iterator(); i.hasNext();) { Artifact artifact = (Artifact) i.next(); // dependency filter if (isExclusion(artifact)) { continue; } File file = getClasspathElement(artifact, mavenProject); if (inPath.contains(file.toString())) { log.debug("--- ignore libraryjars since one in injar:" + artifact.getArtifactId()); continue; } log.debug("--- ADD libraryjars:" + artifact.getArtifactId()); args.add("-libraryjars"); args.add(fileToString(file)); } } if (libs != null) { for (Iterator i = libs.iterator(); i.hasNext();) { Object lib = i.next(); args.add("-libraryjars"); args.add(fileNameToString(lib.toString())); } } args.add("-printmapping"); args.add(fileToString((new File(outputDirectory, "proguard_map.txt").getAbsoluteFile()))); args.add("-printseeds"); args.add(fileToString((new File(outputDirectory, "proguard_seeds.txt").getAbsoluteFile()))); if (log.isDebugEnabled()) { args.add("-verbose"); } if (options != null) { for (int i = 0; i < options.length; i++) { args.add(options[i]); } } // Check if args should be inlined in a proguard configuration file or not. If args total size is more than 32k, // process launch will failed File vTempFile = null; if (writeCommandLineToFile) { log.info("Transform command line in file configuration"); vTempFile = createFileConfiguration(args, mavenProject, outputDirectory); // Remove all args, and add just path to Proguard configuration file just created args.clear(); args.add("@" + vTempFile.getAbsolutePath()); log.info("Configuration file created : " + vTempFile.getAbsolutePath()); } log.info("execute ProGuard " + args.toString()); proguardMain(getProguardJar(this), args, this); if ((assembly != null) && (hasInclusionLibrary)) { log.info("creating assembly"); File baseFile = new File(outputDirectory, nameNoType(injar) + "_proguard_result.jar"); if (baseFile.exists()) { if (!baseFile.delete()) { throw new MojoFailureException("Can't delete " + baseFile); } } File archiverFile = outJarFile.getAbsoluteFile(); if (!outJarFile.renameTo(baseFile)) { throw new MojoFailureException("Can't rename " + outJarFile); } MavenArchiver archiver = new MavenArchiver(); archiver.setArchiver(jarArchiver); archiver.setOutputFile(archiverFile); archive.setAddMavenDescriptor(addMavenDescriptor); try { jarArchiver.addArchivedFileSet(baseFile); for (Iterator iter = assembly.inclusions.iterator(); iter.hasNext();) { Inclusion inc = (Inclusion) iter.next(); if (inc.library) { File file; Artifact artifact = getDependancy(inc, mavenProject); file = getClasspathElement(artifact, mavenProject); if (file.isDirectory()) { getLog().info("merge project: " + artifact.getArtifactId() + " " + file); jarArchiver.addDirectory(file); } else { getLog().info("merge artifact: " + artifact.getArtifactId()); jarArchiver.addArchivedFileSet(file); } } } archiver.createArchive(mavenProject, archive); } catch (Exception e) { throw new MojoExecutionException("Unable to create jar", e); } } if (attach && !sameArtifact) { if (useArtifactClassifier()) { projectHelper.attachArtifact(mavenProject, attachArtifactType, attachArtifactClassifier, outJarFile); } else { projectHelper.attachArtifact(mavenProject, attachArtifactType, null, outJarFile); } } }
From source file:org.apache.sysml.lops.compile.Dag.java
/** * Method to generate MapReduce job instructions from a given set of nodes. * // ww w . j a v a 2 s. com * @param execNodes list of exec nodes * @param inst list of instructions * @param writeinst list of write instructions * @param deleteinst list of delete instructions * @param rmvarinst list of rmvar instructions * @param jt job type * @throws LopsException if LopsException occurs * @throws DMLRuntimeException if DMLRuntimeException occurs */ private void generateMapReduceInstructions(ArrayList<Lop> execNodes, ArrayList<Instruction> inst, ArrayList<Instruction> writeinst, ArrayList<Instruction> deleteinst, ArrayList<Instruction> rmvarinst, JobType jt) throws LopsException, DMLRuntimeException { ArrayList<Byte> resultIndices = new ArrayList<Byte>(); ArrayList<String> inputs = new ArrayList<String>(); ArrayList<String> outputs = new ArrayList<String>(); ArrayList<InputInfo> inputInfos = new ArrayList<InputInfo>(); ArrayList<OutputInfo> outputInfos = new ArrayList<OutputInfo>(); ArrayList<Long> numRows = new ArrayList<Long>(); ArrayList<Long> numCols = new ArrayList<Long>(); ArrayList<Long> numRowsPerBlock = new ArrayList<Long>(); ArrayList<Long> numColsPerBlock = new ArrayList<Long>(); ArrayList<String> mapperInstructions = new ArrayList<String>(); ArrayList<String> randInstructions = new ArrayList<String>(); ArrayList<String> recordReaderInstructions = new ArrayList<String>(); int numReducers = 0; int replication = 1; ArrayList<String> inputLabels = new ArrayList<String>(); ArrayList<String> outputLabels = new ArrayList<String>(); ArrayList<Instruction> renameInstructions = new ArrayList<Instruction>(); ArrayList<Instruction> variableInstructions = new ArrayList<Instruction>(); ArrayList<Instruction> postInstructions = new ArrayList<Instruction>(); ArrayList<Integer> MRJobLineNumbers = null; if (DMLScript.ENABLE_DEBUG_MODE) { MRJobLineNumbers = new ArrayList<Integer>(); } ArrayList<Lop> inputLops = new ArrayList<Lop>(); boolean cellModeOverride = false; /* Find the nodes that produce an output */ ArrayList<Lop> rootNodes = new ArrayList<Lop>(); getOutputNodes(execNodes, rootNodes, jt); if (LOG.isTraceEnabled()) LOG.trace("# of root nodes = " + rootNodes.size()); /* Remove transient writes that are simple copy of transient reads */ if (jt == JobType.GMR || jt == JobType.GMRCELL) { ArrayList<Lop> markedNodes = new ArrayList<Lop>(); // only keep data nodes that are results of some computation. for (Lop rnode : rootNodes) { if (rnode.getExecLocation() == ExecLocation.Data && ((Data) rnode).isTransient() && ((Data) rnode).getOperationType() == OperationTypes.WRITE && ((Data) rnode).getDataType() == DataType.MATRIX) { // no computation, just a copy if (rnode.getInputs().get(0).getExecLocation() == ExecLocation.Data && ((Data) rnode.getInputs().get(0)).isTransient() && rnode.getOutputParameters() .getLabel().equals(rnode.getInputs().get(0).getOutputParameters().getLabel())) { markedNodes.add(rnode); } } } // delete marked nodes rootNodes.removeAll(markedNodes); markedNodes.clear(); if (rootNodes.isEmpty()) return; } // structure that maps node to their indices that will be used in the instructions HashMap<Lop, Integer> nodeIndexMapping = new HashMap<Lop, Integer>(); /* Determine all input data files */ for (Lop rnode : rootNodes) { getInputPathsAndParameters(rnode, execNodes, inputs, inputInfos, numRows, numCols, numRowsPerBlock, numColsPerBlock, nodeIndexMapping, inputLabels, inputLops, MRJobLineNumbers); } // In case of RAND job, instructions are defined in the input file if (jt == JobType.DATAGEN) randInstructions = inputs; int[] start_index = new int[1]; start_index[0] = inputs.size(); /* Get RecordReader Instructions */ // currently, recordreader instructions are allowed only in GMR jobs if (jt == JobType.GMR || jt == JobType.GMRCELL) { for (Lop rnode : rootNodes) { getRecordReaderInstructions(rnode, execNodes, inputs, recordReaderInstructions, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); if (recordReaderInstructions.size() > 1) throw new LopsException("MapReduce job can only have a single recordreader instruction: " + recordReaderInstructions.toString()); } } /* * Handle cases when job's output is FORCED to be cell format. * - If there exist a cell input, then output can not be blocked. * Only exception is when jobType = REBLOCK/CSVREBLOCK (for obvisous reason) * or when jobType = RAND since RandJob takes a special input file, * whose format should not be used to dictate the output format. * - If there exists a recordReader instruction * - If jobtype = GroupedAgg. This job can only run in cell mode. */ // if (jt != JobType.REBLOCK && jt != JobType.CSV_REBLOCK && jt != JobType.DATAGEN && jt != JobType.TRANSFORM) { for (int i = 0; i < inputInfos.size(); i++) if (inputInfos.get(i) == InputInfo.BinaryCellInputInfo || inputInfos.get(i) == InputInfo.TextCellInputInfo) cellModeOverride = true; } if (!recordReaderInstructions.isEmpty() || jt == JobType.GROUPED_AGG) cellModeOverride = true; /* Get Mapper Instructions */ for (int i = 0; i < rootNodes.size(); i++) { getMapperInstructions(rootNodes.get(i), execNodes, inputs, mapperInstructions, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); } if (LOG.isTraceEnabled()) { LOG.trace(" Input strings: " + inputs.toString()); if (jt == JobType.DATAGEN) LOG.trace(" Rand instructions: " + getCSVString(randInstructions)); if (jt == JobType.GMR) LOG.trace(" RecordReader instructions: " + getCSVString(recordReaderInstructions)); LOG.trace(" Mapper instructions: " + getCSVString(mapperInstructions)); } /* Get Shuffle and Reducer Instructions */ ArrayList<String> shuffleInstructions = new ArrayList<String>(); ArrayList<String> aggInstructionsReducer = new ArrayList<String>(); ArrayList<String> otherInstructionsReducer = new ArrayList<String>(); for (Lop rn : rootNodes) { int resultIndex = getAggAndOtherInstructions(rn, execNodes, shuffleInstructions, aggInstructionsReducer, otherInstructionsReducer, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); if (resultIndex == -1) throw new LopsException("Unexpected error in piggybacking!"); if (rn.getExecLocation() == ExecLocation.Data && ((Data) rn).getOperationType() == Data.OperationTypes.WRITE && ((Data) rn).isTransient() && rootNodes.contains(rn.getInputs().get(0))) { // Both rn (a transient write) and its input are root nodes. // Instead of creating two copies of the data, simply generate a cpvar instruction NodeOutput out = setupNodeOutputs(rn, ExecType.MR, cellModeOverride, true); writeinst.addAll(out.getLastInstructions()); } else { resultIndices.add(Byte.valueOf((byte) resultIndex)); // setup output filenames and outputInfos and generate related instructions NodeOutput out = setupNodeOutputs(rn, ExecType.MR, cellModeOverride, false); outputLabels.add(out.getVarName()); outputs.add(out.getFileName()); outputInfos.add(out.getOutInfo()); if (LOG.isTraceEnabled()) { LOG.trace(" Output Info: " + out.getFileName() + ";" + OutputInfo.outputInfoToString(out.getOutInfo()) + ";" + out.getVarName()); } renameInstructions.addAll(out.getLastInstructions()); variableInstructions.addAll(out.getPreInstructions()); postInstructions.addAll(out.getPostInstructions()); } } /* Determine if the output dimensions are known */ byte[] resultIndicesByte = new byte[resultIndices.size()]; for (int i = 0; i < resultIndicesByte.length; i++) { resultIndicesByte[i] = resultIndices.get(i).byteValue(); } if (LOG.isTraceEnabled()) { LOG.trace(" Shuffle Instructions: " + getCSVString(shuffleInstructions)); LOG.trace(" Aggregate Instructions: " + getCSVString(aggInstructionsReducer)); LOG.trace(" Other instructions =" + getCSVString(otherInstructionsReducer)); LOG.trace(" Output strings: " + outputs.toString()); LOG.trace(" ResultIndices = " + resultIndices.toString()); } /* Prepare the MapReduce job instruction */ MRJobInstruction mr = new MRJobInstruction(jt); // check if this is a map-only job. If not, set the number of reducers if (!shuffleInstructions.isEmpty() || !aggInstructionsReducer.isEmpty() || !otherInstructionsReducer.isEmpty()) numReducers = total_reducers; // set inputs, outputs, and other other properties for the job mr.setInputOutputLabels(inputLabels.toArray(new String[0]), outputLabels.toArray(new String[0])); mr.setOutputs(resultIndicesByte); mr.setDimsUnknownFilePrefix(getFilePath()); mr.setNumberOfReducers(numReducers); mr.setReplication(replication); // set instructions for recordReader and mapper mr.setRecordReaderInstructions(getCSVString(recordReaderInstructions)); mr.setMapperInstructions(getCSVString(mapperInstructions)); //compute and set mapper memory requirements (for consistency of runtime piggybacking) if (jt == JobType.GMR) { double mem = 0; for (Lop n : execNodes) mem += computeFootprintInMapper(n); mr.setMemoryRequirements(mem); } if (jt == JobType.DATAGEN) mr.setRandInstructions(getCSVString(randInstructions)); // set shuffle instructions mr.setShuffleInstructions(getCSVString(shuffleInstructions)); // set reducer instruction mr.setAggregateInstructionsInReducer(getCSVString(aggInstructionsReducer)); mr.setOtherInstructionsInReducer(getCSVString(otherInstructionsReducer)); if (DMLScript.ENABLE_DEBUG_MODE) { // set line number information for each MR instruction mr.setMRJobInstructionsLineNumbers(MRJobLineNumbers); } /* Add the prepared instructions to output set */ inst.addAll(variableInstructions); inst.add(mr); inst.addAll(postInstructions); deleteinst.addAll(renameInstructions); for (Lop l : inputLops) { if (DMLScript.ENABLE_DEBUG_MODE) { processConsumers(l, rmvarinst, deleteinst, l); } else { processConsumers(l, rmvarinst, deleteinst, null); } } }
From source file:com.bop.web.ssj.powerlist.PowerList.java
@Action public String identifyInfo(String selected) { ArrayList<JSONObject> errorLs = new ArrayList<JSONObject>(); MultipartHttpServletRequest request = (MultipartHttpServletRequest) ActionContext.getActionContext() .getHttpServletRequest();//from w w w . ja v a 2 s . c om //inputname String[] arr = request.getFileFields(); String title = arr[0]; FileInputStream fs; Workbook designer = null; try { fs = (FileInputStream) request.getFileInputStream(title); designer = new Workbook(fs); } catch (IOException e) { log.error("?IO"); log.error(e.getMessage().toString()); e.printStackTrace(); } catch (Exception e) { log.error("?"); e.printStackTrace(); log.error(e.getMessage().toString()); } Cells cells = designer.getWorksheets().get(0).getCells(); int rows = cells.getMaxDataRow() + 1; int cols = cells.getMaxDataColumn() + 1; if (log.isDebugEnabled()) { log.debug("" + rows + ";" + cols); } if (cols != 12) { return "error"; } // selected = "'" + selected.replaceAll(",", "','") + "'"; if (selected.contains(",")) {//?? //?excel????? for (int x = 1; x < rows; x++) { String itemNameByExcel = cells.get(x, 0).getValue() == null ? "" : cells.get(x, 0).getStringValue().toString(); if (!"".equals(itemNameByExcel)) { return "error1"; } } } else {// //???id???? List<Map<String, Object>> ndlist = this.getItemName(selected); String itemName = ""; if (ndlist.size() > 0) { itemName = ndlist.get(0).get("Item0101") == null ? "" : ndlist.get(0).get("Item0101").toString(); } else { return "sysError"; } //?excel??? for (int x = 1; x < rows; x++) { String itemNameByExcel = cells.get(x, 0).getValue() == null ? "" : cells.get(x, 0).getValue().toString(); if (!itemName.equals(itemNameByExcel)) { return "error2"; } } } //??. int errorNum = 0; for (int i = 1; i < rows; i++) { int m = 0; int n = 0; int z = 0; //? String orgCode = cells.get(i, 1).getStringValue() == null ? "" : cells.get(i, 1).getStringValue().trim(); orgCode = orgCode.replace("-", ""); //??? String orgName = cells.get(i, 2).getStringValue() == null ? "" : cells.get(i, 2).getStringValue().trim(); //? String orgAddressCode = cells.get(i, 4).getStringValue() == null ? "" : cells.get(i, 4).getStringValue().trim(); // String city = cells.get(i, 5).getStringValue() == null ? "" : cells.get(i, 5).getStringValue().trim(); //? String yieldlyCode = cells.get(i, 7).getStringValue() == null ? "" : cells.get(i, 7).getStringValue().trim(); //? if (StringUtils.isNotBlank(orgCode) && (orgCode.length() == 9 || orgCode.length() == 18)) { if (orgCode.length() == 18) { orgCode = orgCode.substring(8, 17); } int orgSize = this.jdbcTemplate .queryForInt("select count(*)from Org01 where ORG_CODE='" + orgCode + "'"); if (orgSize < 1) { m = 1; } } else { m = 1; } //? int zcSize = this.jdbcTemplate .queryForInt("select count(*) from DM_CODETABLE_DATA where codetablename='DB064' and cid='" + orgAddressCode + "'"); if (zcSize < 1) { n = 1; } //? int scSize = this.jdbcTemplate .queryForInt("select count(*) from DM_CODETABLE_DATA where codetablename='DB064' and cid='" + yieldlyCode + "'"); if (scSize < 1) { z = 1; } if (this.jude(m, n, z)) { errorNum++; //?++ HashMap<String, String> sub = new HashMap<String, String>(); sub.put("errorIndex", String.valueOf(errorNum)); sub.put("orgName", orgName); sub.put("city", city); sub.put("code", orgCode); sub.put("orgAddressCode", orgAddressCode); sub.put("yieldlyCode", yieldlyCode); String error = ""; if (m == 1) { sub.put("code", "<label style=\"color:red\";>" + orgCode + "</label>"); error += "?;"; } if (n == 1) { sub.put("orgAddressCode", "<label style=\"color:red\";>" + orgAddressCode + "</label>"); error += "?;"; } if (z == 1) { sub.put("yieldlyCode", "<label style=\"color:red\";>" + yieldlyCode + "</label>"); error += "?;"; } sub.put("errorInfo", error); JSONObject jsonObject = JSONObject.fromObject(sub); errorLs.add(jsonObject); } } Double s = Double.valueOf(errorNum) / Double.valueOf(rows - 1); DecimalFormat df = new DecimalFormat("0.00"); HashMap<String, String> mp = new HashMap<String, String>(); mp.put("content", errorLs.toString()); mp.put("percent", df.format(s * 100) + "%"); mp.put("errorNum", String.valueOf(errorNum)); mp.put("content", errorLs.toString()); JSONObject jsonObject = JSONObject.fromObject(mp); return jsonObject.toString(); }
From source file:com.ibm.bi.dml.lops.compile.Dag.java
/** * Method to generate MapReduce job instructions from a given set of nodes. * // w ww . j ava 2s .c o m * @param execNodes * @param inst * @param deleteinst * @param jobType * @throws LopsException * @throws DMLUnsupportedOperationException * @throws DMLRuntimeException */ @SuppressWarnings("unchecked") public void generateMapReduceInstructions(ArrayList<N> execNodes, ArrayList<Instruction> inst, ArrayList<Instruction> writeinst, ArrayList<Instruction> deleteinst, ArrayList<Instruction> rmvarinst, JobType jt) throws LopsException, DMLUnsupportedOperationException, DMLRuntimeException { ArrayList<Byte> resultIndices = new ArrayList<Byte>(); ArrayList<String> inputs = new ArrayList<String>(); ArrayList<String> outputs = new ArrayList<String>(); ArrayList<InputInfo> inputInfos = new ArrayList<InputInfo>(); ArrayList<OutputInfo> outputInfos = new ArrayList<OutputInfo>(); ArrayList<Long> numRows = new ArrayList<Long>(); ArrayList<Long> numCols = new ArrayList<Long>(); ArrayList<Long> numRowsPerBlock = new ArrayList<Long>(); ArrayList<Long> numColsPerBlock = new ArrayList<Long>(); ArrayList<String> mapperInstructions = new ArrayList<String>(); ArrayList<String> randInstructions = new ArrayList<String>(); ArrayList<String> recordReaderInstructions = new ArrayList<String>(); int numReducers = 0; int replication = 1; ArrayList<String> inputLabels = new ArrayList<String>(); ArrayList<String> outputLabels = new ArrayList<String>(); ArrayList<Instruction> renameInstructions = new ArrayList<Instruction>(); ArrayList<Instruction> variableInstructions = new ArrayList<Instruction>(); ArrayList<Instruction> postInstructions = new ArrayList<Instruction>(); ArrayList<Integer> MRJobLineNumbers = null; if (DMLScript.ENABLE_DEBUG_MODE) { MRJobLineNumbers = new ArrayList<Integer>(); } ArrayList<Lop> inputLops = new ArrayList<Lop>(); boolean cellModeOverride = false; /* Find the nodes that produce an output */ ArrayList<N> rootNodes = new ArrayList<N>(); getOutputNodes(execNodes, rootNodes, jt); if (LOG.isTraceEnabled()) LOG.trace("# of root nodes = " + rootNodes.size()); /* Remove transient writes that are simple copy of transient reads */ if (jt == JobType.GMR || jt == JobType.GMRCELL) { ArrayList<N> markedNodes = new ArrayList<N>(); // only keep data nodes that are results of some computation. for (int i = 0; i < rootNodes.size(); i++) { N node = rootNodes.get(i); if (node.getExecLocation() == ExecLocation.Data && ((Data) node).isTransient() && ((Data) node).getOperationType() == OperationTypes.WRITE && ((Data) node).getDataType() == DataType.MATRIX) { // no computation, just a copy if (node.getInputs().get(0).getExecLocation() == ExecLocation.Data && ((Data) node.getInputs().get(0)).isTransient() && node.getOutputParameters().getLabel() .compareTo(node.getInputs().get(0).getOutputParameters().getLabel()) == 0) { markedNodes.add(node); } } } // delete marked nodes rootNodes.removeAll(markedNodes); markedNodes.clear(); if (rootNodes.isEmpty()) return; } // structure that maps node to their indices that will be used in the instructions HashMap<N, Integer> nodeIndexMapping = new HashMap<N, Integer>(); /* Determine all input data files */ for (int i = 0; i < rootNodes.size(); i++) { getInputPathsAndParameters(rootNodes.get(i), execNodes, inputs, inputInfos, numRows, numCols, numRowsPerBlock, numColsPerBlock, nodeIndexMapping, inputLabels, inputLops, MRJobLineNumbers); } // In case of RAND job, instructions are defined in the input file if (jt == JobType.DATAGEN) randInstructions = inputs; int[] start_index = new int[1]; start_index[0] = inputs.size(); /* Get RecordReader Instructions */ // currently, recordreader instructions are allowed only in GMR jobs if (jt == JobType.GMR || jt == JobType.GMRCELL) { for (int i = 0; i < rootNodes.size(); i++) { getRecordReaderInstructions(rootNodes.get(i), execNodes, inputs, recordReaderInstructions, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); if (recordReaderInstructions.size() > 1) throw new LopsException("MapReduce job can only have a single recordreader instruction: " + recordReaderInstructions.toString()); } } /* * Handle cases when job's output is FORCED to be cell format. * - If there exist a cell input, then output can not be blocked. * Only exception is when jobType = REBLOCK/CSVREBLOCK (for obvisous reason) * or when jobType = RAND since RandJob takes a special input file, * whose format should not be used to dictate the output format. * - If there exists a recordReader instruction * - If jobtype = GroupedAgg. This job can only run in cell mode. */ // if (jt != JobType.REBLOCK && jt != JobType.CSV_REBLOCK && jt != JobType.DATAGEN && jt != JobType.TRANSFORM) { for (int i = 0; i < inputInfos.size(); i++) if (inputInfos.get(i) == InputInfo.BinaryCellInputInfo || inputInfos.get(i) == InputInfo.TextCellInputInfo) cellModeOverride = true; } if (!recordReaderInstructions.isEmpty() || jt == JobType.GROUPED_AGG) cellModeOverride = true; /* Get Mapper Instructions */ for (int i = 0; i < rootNodes.size(); i++) { getMapperInstructions(rootNodes.get(i), execNodes, inputs, mapperInstructions, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); } if (LOG.isTraceEnabled()) { LOG.trace(" Input strings: " + inputs.toString()); if (jt == JobType.DATAGEN) LOG.trace(" Rand instructions: " + getCSVString(randInstructions)); if (jt == JobType.GMR) LOG.trace(" RecordReader instructions: " + getCSVString(recordReaderInstructions)); LOG.trace(" Mapper instructions: " + getCSVString(mapperInstructions)); } /* Get Shuffle and Reducer Instructions */ ArrayList<String> shuffleInstructions = new ArrayList<String>(); ArrayList<String> aggInstructionsReducer = new ArrayList<String>(); ArrayList<String> otherInstructionsReducer = new ArrayList<String>(); for (int i = 0; i < rootNodes.size(); i++) { N rn = rootNodes.get(i); int resultIndex = getAggAndOtherInstructions(rn, execNodes, shuffleInstructions, aggInstructionsReducer, otherInstructionsReducer, nodeIndexMapping, start_index, inputLabels, inputLops, MRJobLineNumbers); if (resultIndex == -1) throw new LopsException("Unexpected error in piggybacking!"); if (rn.getExecLocation() == ExecLocation.Data && ((Data) rn).getOperationType() == Data.OperationTypes.WRITE && ((Data) rn).isTransient() && rootNodes.contains(rn.getInputs().get(0))) { // Both rn (a transient write) and its input are root nodes. // Instead of creating two copies of the data, simply generate a cpvar instruction NodeOutput out = setupNodeOutputs(rootNodes.get(i), ExecType.MR, cellModeOverride, true); writeinst.addAll(out.getLastInstructions()); } else { resultIndices.add(Byte.valueOf((byte) resultIndex)); // setup output filenames and outputInfos and generate related instructions NodeOutput out = setupNodeOutputs(rootNodes.get(i), ExecType.MR, cellModeOverride, false); outputLabels.add(out.getVarName()); outputs.add(out.getFileName()); outputInfos.add(out.getOutInfo()); if (LOG.isTraceEnabled()) { LOG.trace(" Output Info: " + out.getFileName() + ";" + OutputInfo.outputInfoToString(out.getOutInfo()) + ";" + out.getVarName()); } renameInstructions.addAll(out.getLastInstructions()); variableInstructions.addAll(out.getPreInstructions()); postInstructions.addAll(out.getPostInstructions()); } } /* Determine if the output dimensions are known */ byte[] resultIndicesByte = new byte[resultIndices.size()]; for (int i = 0; i < resultIndicesByte.length; i++) { resultIndicesByte[i] = resultIndices.get(i).byteValue(); } if (LOG.isTraceEnabled()) { LOG.trace(" Shuffle Instructions: " + getCSVString(shuffleInstructions)); LOG.trace(" Aggregate Instructions: " + getCSVString(aggInstructionsReducer)); LOG.trace(" Other instructions =" + getCSVString(otherInstructionsReducer)); LOG.trace(" Output strings: " + outputs.toString()); LOG.trace(" ResultIndices = " + resultIndices.toString()); } /* Prepare the MapReduce job instruction */ MRJobInstruction mr = new MRJobInstruction(jt); // check if this is a map-only job. If not, set the number of reducers if (!shuffleInstructions.isEmpty() || !aggInstructionsReducer.isEmpty() || !otherInstructionsReducer.isEmpty()) numReducers = total_reducers; // set inputs, outputs, and other other properties for the job mr.setInputOutputLabels(getStringArray(inputLabels), getStringArray(outputLabels)); mr.setOutputs(resultIndicesByte); mr.setDimsUnknownFilePrefix(getFilePath()); mr.setNumberOfReducers(numReducers); mr.setReplication(replication); // set instructions for recordReader and mapper mr.setRecordReaderInstructions(getCSVString(recordReaderInstructions)); mr.setMapperInstructions(getCSVString(mapperInstructions)); //compute and set mapper memory requirements (for consistency of runtime piggybacking) if (jt == JobType.GMR) { double mem = 0; for (N n : execNodes) mem += computeFootprintInMapper(n); mr.setMemoryRequirements(mem); } if (jt == JobType.DATAGEN) mr.setRandInstructions(getCSVString(randInstructions)); // set shuffle instructions mr.setShuffleInstructions(getCSVString(shuffleInstructions)); // set reducer instruction mr.setAggregateInstructionsInReducer(getCSVString(aggInstructionsReducer)); mr.setOtherInstructionsInReducer(getCSVString(otherInstructionsReducer)); if (DMLScript.ENABLE_DEBUG_MODE) { // set line number information for each MR instruction mr.setMRJobInstructionsLineNumbers(MRJobLineNumbers); } /* Add the prepared instructions to output set */ inst.addAll(variableInstructions); inst.add(mr); inst.addAll(postInstructions); deleteinst.addAll(renameInstructions); for (Lop l : inputLops) { if (DMLScript.ENABLE_DEBUG_MODE) { processConsumers((N) l, rmvarinst, deleteinst, (N) l); } else { processConsumers((N) l, rmvarinst, deleteinst, null); } } }
From source file:com.androidaq.AndroiDAQTCPAdapter.java
public void setProp() { //Below is where the code for setting the inputs/outputs, states etc. and get readings from inputs. //((AndroiDAQTCPMain) context).setPage(6); // this statement is used for development convenience chsAreInputs = getInputChannels(); // Inputs are read ArrayList<String> isInputPulsed = getPulsedInputs(chsAreInputs);// which inputs are pulse reads ArrayList<String> chsAreOutputs = getOutputChannels(); // Outputs can be digital or frequency ArrayList<String> isOutputDigital = getDigitalOutputs(chsAreOutputs); // which output channels are digital ArrayList<String> digitalOutState = getDigitalOutputState(isOutputDigital);// get output desired state ArrayList<String> isOutputPulsed = getPulsedOutputs(chsAreOutputs); // which output channels are pulsed ArrayList<String> desiredFreq = getDesiredFreqs(isOutputPulsed); // for channels not digital, what is the desired frequency ArrayList<String> desiredDuty = getDesiredDuty(isOutputPulsed); // for channels not digital, what is the desired duty //ArrayList<String> voltChsRead = getVoltageChannel(); // Get voltage channels read /*setText("\n" + "Inputs are: " + chsAreInputs + "\n" + "Input Pins that are pulsed are: " + isInputPulsed + "\n" + "Outputs Pins are: " + chsAreOutputs + "\n" + "Outputs that are Digital: " + isOutputDigital + "\n" + "Digital output states are: " + digitalOutState + "\n" + "Pulsed output pins are: " + isOutputPulsed + "\n" + "Desired output frequencies are: " + desiredFreq + "\n" + "Voltage Channels Read are: " + voltChsRead + "\n"); */ ((AndroiDAQTCPMain) context).sendMessage("00\r"); // Send to AndroidDAQ menu command and Array(s) of information. String[] temp = new String[16]; Arrays.fill(temp, "NS"); // NS = Not Set ArrayList<String> SendToProp = new ArrayList<String>(Arrays.asList(temp)); //set input channels int x0 = chsAreInputs.size(); // for each channel in chsAreInput, put zero in SendToProp channel location for (int i = 0; i < x0; i++) { String channel = chsAreInputs.get(i); SendToProp.set(Integer.valueOf(channel), "0"); }//from ww w.j ava2 s . co m // set input channels that are pulsed int x1 = isInputPulsed.size(); for (int i = 0; i < x1; i++) { String channel = isInputPulsed.get(i); SendToProp.set(Integer.valueOf(channel), "1"); } // set output channels that are pulsed int x2 = isOutputPulsed.size(); //Log.e("test", "isOutputPulsed size is:" + x2); for (int i = 0; i < x2; i++) { String channel = isOutputPulsed.get(i); String freq = desiredFreq.get(i); String duty = desiredDuty.get(i); SendToProp.set(Integer.valueOf(channel), "4-" + freq + "_" + duty); } //Log.e("test", "SendToProp after pulsed Outputs set is:" + SendToProp.toString()); // set output channels that are not pulsed to their appropriate state int x3 = Math.abs(chsAreOutputs.size() - isOutputPulsed.size()); //Log.e("test", "Math.abs is:" + x3); for (int i = 0; i < x3; i++) { String channel = isOutputDigital.get(i); String state = digitalOutState.get(i); if (state == "true") { SendToProp.set(Integer.valueOf(channel), "3"); } else { SendToProp.set(Integer.valueOf(channel), "2"); } } Log.e("test", "SendToProp after insert is:" + SendToProp.toString()); // create string to send to prop via BT int x4 = SendToProp.size(); //Log.e("test", "SendToProp size is:" + SendToProp.size()); String propMessage = ""; for (int i = 0; i < x4; i++) { String whatIsString = SendToProp.get(i); if (whatIsString.contains("4-")) { String[] freqString = whatIsString.split("-"); String[] dutyString = freqString[1].split("_"); Log.e("test", "freqString[0] is:" + freqString[0]); Log.e("test", "freqString[1] is:" + freqString[1]); Log.e("test", "dutyString[0] is:" + dutyString[0]); Log.e("test", "dutyString[1] is:" + dutyString[1]); propMessage = propMessage.concat( "0" + freqString[0] + "\r" + "0" + dutyString[0] + "\r" + "0" + dutyString[1] + "\r"); } else { propMessage = propMessage + "0" + whatIsString + "\r"; } } Log.e("test", "propMessage after insert is:" + propMessage); //((AndroiDAQTCPMain) context).sendMessage(propMessage + "\r"); String[] subMessages = propMessage.split("\\r"); // break message into sub-messages so data isn't lost in transmission int numOfArrays = subMessages.length; for (int i = 0; i < numOfArrays; i++) { Log.e("test", "subMessages after insert is:" + subMessages[i]); ((AndroiDAQTCPMain) context).sendMessage(subMessages[i] + "\r"); try { Thread.sleep(9); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } //TODO Log.e("test", "runVolts is:" + runVolts); Log.e("test", "runInputs is:" + runInputs); Log.e("test", "runAll is:" + runAll); if (sendOutputs) { sendOutputs = false; ((AndroiDAQTCPMain) context).setOutputs(); } if (runVolts) { ((AndroiDAQTCPMain) context).getVolts(true); runVolts = false; } if (runInputs) { ((AndroiDAQTCPMain) context).getInputs(true); runInputs = false; } if (runContInputs) { ((AndroiDAQTCPMain) context).getInputsCont(true); // TODO runInputs = false; } if (runAll) { ((AndroiDAQTCPMain) context).setOutputs(); try { Thread.sleep(250); } catch (InterruptedException e) { e.printStackTrace(); } ((AndroiDAQTCPMain) context).getAll(true); } }
From source file:com.linkedpipes.plugin.loader.dcatAp11ToDkanBatch.DcatAp11ToDkanBatch.java
@Override public void execute() throws LpException { apiURI = configuration.getApiUri();// w w w . j a v a 2 s. c om //for HTTP request failing on "failed to respond" boolean responded = false; if (apiURI == null || apiURI.isEmpty() || configuration.getUsername() == null || configuration.getUsername().isEmpty() || configuration.getPassword() == null || configuration.getPassword().isEmpty()) { throw exceptionFactory.failure("Missing required settings."); } Map<String, String> groups = getGroups(); LOG.debug("Querying metadata for datasets"); LinkedList<String> datasets = new LinkedList<>(); for (Map<String, Value> map : executeSelectQuery( "SELECT ?d WHERE {?d a <" + DcatAp11ToDkanBatchVocabulary.DCAT_DATASET_CLASS + ">}")) { datasets.add(map.get("d").stringValue()); } int current = 0; int total = datasets.size(); LOG.info("Found " + total + " datasets"); progressReport.start(total); token = getToken(configuration.getUsername(), configuration.getPassword()); for (String datasetURI : datasets) { current++; CloseableHttpResponse queryResponse = null; LOG.info("Processing dataset " + current + "/" + total + ": " + datasetURI); String publisher_uri = executeSimpleSelectQuery("SELECT ?publisher_uri WHERE {<" + datasetURI + "> <" + DCTERMS.PUBLISHER + "> ?publisher_uri }", "publisher_uri"); String publisher_name = executeSimpleSelectQuery( "SELECT ?publisher_name WHERE {<" + datasetURI + "> <" + DCTERMS.PUBLISHER + ">/<" + FOAF.NAME + "> ?publisher_name FILTER(LANGMATCHES(LANG(?publisher_name), \"" + configuration.getLoadLanguage() + "\"))}", "publisher_name"); if (!groups.containsKey(publisher_uri)) { LOG.debug("Creating group " + publisher_uri); if (publisher_name == null || publisher_name.isEmpty()) { throw exceptionFactory.failure("Publisher has no name: " + publisher_uri); } HttpPost httpPost = new HttpPost(apiURI + "/node"); httpPost.addHeader(new BasicHeader("Accept", "application/json")); httpPost.addHeader(new BasicHeader("X-CSRF-Token", token)); ArrayList<NameValuePair> postParameters = new ArrayList<>(); postParameters.add(new BasicNameValuePair("type", "group")); postParameters.add(new BasicNameValuePair("title", publisher_name)); postParameters.add(new BasicNameValuePair("body[und][0][value]", publisher_uri)); try { UrlEncodedFormEntity form = new UrlEncodedFormEntity(postParameters, "UTF-8"); httpPost.setEntity(form); } catch (UnsupportedEncodingException e) { LOG.error("Unexpected encoding issue"); } CloseableHttpResponse response = null; responded = false; do { try { response = postClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == 200) { LOG.debug("Group created OK"); String orgID = new JSONObject(EntityUtils.toString(response.getEntity())) .getString("nid"); groups.put(publisher_uri, orgID); } else { String ent = EntityUtils.toString(response.getEntity()); LOG.error("Group:" + ent); //throw exceptionFactory.failed("Error creating group: " + ent); } responded = true; } catch (Exception e) { LOG.error(e.getLocalizedMessage(), e); } finally { if (response != null) { try { response.close(); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); throw exceptionFactory.failure("Error creating group"); } } } } while (!responded); } ArrayList<NameValuePair> datasetFields = new ArrayList<>(); datasetFields.add(new BasicNameValuePair("type", "dataset")); LinkedList<String> keywords = new LinkedList<>(); for (Map<String, Value> map : executeSelectQuery( "SELECT ?keyword WHERE {<" + datasetURI + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_KEYWORD + "> ?keyword FILTER(LANGMATCHES(LANG(?keyword), \"" + configuration.getLoadLanguage() + "\"))}")) { keywords.add(map.get("keyword").stringValue()); } String concatTags = ""; for (String keyword : keywords) { String safekeyword = fixKeyword(keyword); if (safekeyword.length() >= 2) { concatTags += "\"\"" + safekeyword + "\"\" "; } } if (!concatTags.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_tags[und][value_field]", concatTags)); } String title = executeSimpleSelectQuery("SELECT ?title WHERE {<" + datasetURI + "> <" + DCTERMS.TITLE + "> ?title FILTER(LANGMATCHES(LANG(?title), \"" + configuration.getLoadLanguage() + "\"))}", "title"); if (!title.isEmpty()) { datasetFields.add(new BasicNameValuePair("title", title)); } String description = executeSimpleSelectQuery("SELECT ?description WHERE {<" + datasetURI + "> <" + DCTERMS.DESCRIPTION + "> ?description FILTER(LANGMATCHES(LANG(?description), \"" + configuration.getLoadLanguage() + "\"))}", "description"); if (!description.isEmpty()) { datasetFields.add(new BasicNameValuePair("body[und][0][value]", description)); } else if (configuration.getProfile() .equals(DcatAp11ToDkanBatchVocabulary.PROFILES_NKOD.stringValue())) { //Description is mandatory in NKOD. If missing, add at least title. datasetFields.add(new BasicNameValuePair("body[und][0][value]", title)); } String issued = executeSimpleSelectQuery( "SELECT ?issued WHERE {<" + datasetURI + "> <" + DCTERMS.ISSUED + "> ?issued }", "issued"); if (!issued.isEmpty()) { //long unixTime = System.currentTimeMillis() / 1000L; datasetFields.add(new BasicNameValuePair("created", issued)); } String modified = executeSimpleSelectQuery( "SELECT ?modified WHERE {<" + datasetURI + "> <" + DCTERMS.MODIFIED + "> ?modified }", "modified"); if (!modified.isEmpty()) { datasetFields.add(new BasicNameValuePair("changed", modified)); } if (!publisher_uri.isEmpty()) { datasetFields .add(new BasicNameValuePair("og_group_ref[und][target_id]", groups.get(publisher_uri))); } if (configuration.getProfile().equals(DcatAp11ToDkanBatchVocabulary.PROFILES_NKOD.stringValue())) { String contactPoint = executeSimpleSelectQuery("SELECT ?contact WHERE {<" + datasetURI + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_CONTACT_POINT + ">/<" + DcatAp11ToDkanBatchVocabulary.VCARD_HAS_EMAIL + "> ?contact }", "contact"); if (!contactPoint.isEmpty()) { datasetFields .add(new BasicNameValuePair("field_maintainer_email[und][0][value]", contactPoint)); } String curatorName = executeSimpleSelectQuery("SELECT ?name WHERE {<" + datasetURI + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_CONTACT_POINT + ">/<" + DcatAp11ToDkanBatchVocabulary.VCARD_FN + "> ?name }", "name"); if (!curatorName.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_maintainer[und][0][value]", curatorName)); } if (!publisher_uri.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_publisher_uri[und][0][value]", publisher_uri)); } if (!publisher_name.isEmpty()) { datasetFields .add(new BasicNameValuePair("field_publisher_name[und][0][value]", publisher_name)); } String periodicity = executeSimpleSelectQuery("SELECT ?periodicity WHERE {<" + datasetURI + "> <" + DCTERMS.ACCRUAL_PERIODICITY + "> ?periodicity }", "periodicity"); if (!periodicity.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_frequency_ods[und][0][value]", periodicity)); } else { //Mandatory in NKOD datasetFields.add(new BasicNameValuePair("field_frequency_ods[und][0][value]", "http://publications.europa.eu/resource/authority/frequency/UNKNOWN")); } String temporalStart = executeSimpleSelectQuery( "SELECT ?temporalStart WHERE {<" + datasetURI + "> <" + DCTERMS.TEMPORAL + ">/<" + DcatAp11ToDkanBatchVocabulary.SCHEMA_STARTDATE + "> ?temporalStart }", "temporalStart"); if (!temporalStart.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_temporal_start[und][0][value]", temporalStart)); } String temporalEnd = executeSimpleSelectQuery( "SELECT ?temporalEnd WHERE {<" + datasetURI + "> <" + DCTERMS.TEMPORAL + ">/<" + DcatAp11ToDkanBatchVocabulary.SCHEMA_ENDDATE + "> ?temporalEnd }", "temporalEnd"); if (!temporalEnd.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_temporal_end[und][0][value]", temporalEnd)); } String schemaURL = executeSimpleSelectQuery( "SELECT ?schema WHERE {<" + datasetURI + "> <" + FOAF.PAGE + "> ?schema }", "schema"); if (!schemaURL.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_schema[und][0][value]", schemaURL)); } String spatial = executeSimpleSelectQuery( "SELECT ?spatial WHERE {<" + datasetURI + "> <" + DCTERMS.SPATIAL + "> ?spatial }", "spatial"); if (!spatial.isEmpty()) { datasetFields.add(new BasicNameValuePair("field_spatial[und][0][value]", spatial)); if (spatial.matches("http:\\/\\/ruian.linked.opendata.cz\\/resource\\/.*")) { String type = spatial.replaceAll( "http:\\/\\/ruian.linked.opendata.cz\\/resource\\/([^\\/]+)\\/(.*)", "$1"); String code = spatial.replaceAll( "http:\\/\\/ruian.linked.opendata.cz\\/resource\\/([^\\/]+)\\/(.*)", "$2"); String typ; //We should not parse IRIs, however, here we have no choice. switch (type) { case "vusc": typ = "VC"; break; case "obce": typ = "OB"; break; case "kraje": typ = "KR"; break; case "orp": typ = "OP"; break; case "momc": typ = "MC"; break; case "pou": typ = "PU"; break; default: typ = "ST"; } datasetFields.add(new BasicNameValuePair("field_ruian_type[und][0][value]", typ)); datasetFields.add(new BasicNameValuePair("field_ruian_code[und][0][value]", code)); } else { //RIAN type and code are mandatory in NKOD datasetFields.add(new BasicNameValuePair("field_ruian_type[und][0][value]", "ST")); datasetFields.add(new BasicNameValuePair("field_ruian_code[und][0][value]", "1")); } } else { //RIAN type and code are mandatory in NKOD datasetFields.add(new BasicNameValuePair("field_ruian_type[und][0][value]", "ST")); datasetFields.add(new BasicNameValuePair("field_ruian_code[und][0][value]", "1")); } //DCAT-AP v1.1: has to be an IRI from http://publications.europa.eu/mdr/authority/file-type/index.html LinkedList<String> themes = new LinkedList<>(); for (Map<String, Value> map : executeSelectQuery("SELECT ?theme WHERE {<" + datasetURI + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_THEME + "> ?theme }")) { themes.add(map.get("theme").stringValue()); } String concatThemes = ""; for (String theme : themes) { concatThemes += theme + " "; } if (!concatThemes.isEmpty()) datasetFields.add(new BasicNameValuePair("field_theme[und][0][value]", concatThemes)); } //Distributions LinkedList<String> distributions = new LinkedList<>(); for (Map<String, Value> map : executeSelectQuery("SELECT ?distribution WHERE {<" + datasetURI + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_DISTRIBUTION + "> ?distribution }")) { distributions.add(map.get("distribution").stringValue()); } for (int d = 0; d < distributions.size(); d++) { String distribution = distributions.get(d); ArrayList<NameValuePair> distroFields = new ArrayList<>(); distroFields.add(new BasicNameValuePair("type", "resource")); String dtitle = executeSimpleSelectQuery("SELECT ?title WHERE {<" + distribution + "> <" + DCTERMS.TITLE + "> ?title FILTER(LANGMATCHES(LANG(?title), \"" + configuration.getLoadLanguage() + "\"))}", "title"); if (dtitle.isEmpty()) { //Distribution title is mandatory in DKAN dtitle = title.isEmpty() ? "Resource" : title; } distroFields.add(new BasicNameValuePair("title", dtitle)); String ddescription = executeSimpleSelectQuery("SELECT ?description WHERE {<" + distribution + "> <" + DCTERMS.DESCRIPTION + "> ?description FILTER(LANGMATCHES(LANG(?description), \"" + configuration.getLoadLanguage() + "\"))}", "description"); if (!ddescription.isEmpty()) { distroFields.add(new BasicNameValuePair("body[und][0][value]", ddescription)); } /*String dformat = executeSimpleSelectQuery("SELECT ?format WHERE {<" + distribution + "> <"+ DCTERMS.FORMAT + "> ?format }", "format"); if (!dformat.isEmpty() && codelists != null) { String formatlabel = executeSimpleCodelistSelectQuery("SELECT ?formatlabel WHERE {<" + dformat + "> <"+ SKOS.PREF_LABEL + "> ?formatlabel FILTER(LANGMATCHES(LANG(?formatlabel), \"en\"))}", "formatlabel"); if (!formatlabel.isEmpty()) { distroFields.add(new BasicNameValuePair("field_format[und][0][value]", formatlabel)); } }*/ String dmimetype = executeSimpleSelectQuery("SELECT ?format WHERE {<" + distribution + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_MEDIATYPE + "> ?format }", "format"); if (!dmimetype.isEmpty()) { distroFields.add(new BasicNameValuePair("field_link_remote_file[und][0][filemime]", dmimetype.replaceAll(".*\\/([^\\/]+\\/[^\\/]+)", "$1"))); } String dwnld = executeSimpleSelectQuery("SELECT ?dwnld WHERE {<" + distribution + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_DOWNLOADURL + "> ?dwnld }", "dwnld"); String access = executeSimpleSelectQuery("SELECT ?acc WHERE {<" + distribution + "> <" + DcatAp11ToDkanBatchVocabulary.DCAT_ACCESSURL + "> ?acc }", "acc"); //we prefer downloadURL, but only accessURL is mandatory if (dwnld == null || dwnld.isEmpty()) { dwnld = access; if (dwnld == null || dwnld.isEmpty()) { LOG.warn("Empty download and access URLs: " + datasetURI); continue; } } if (!dwnld.isEmpty()) { distroFields.add(new BasicNameValuePair( "field_link_remote_file[und][0][filefield_remotefile][url]", dwnld)); } /*if (!distribution.isEmpty()) { distro.put("distro_url", distribution); }*/ String dissued = executeSimpleSelectQuery( "SELECT ?issued WHERE {<" + distribution + "> <" + DCTERMS.ISSUED + "> ?issued }", "issued"); if (!dissued.isEmpty()) { distroFields.add(new BasicNameValuePair("created", dissued)); } String dmodified = executeSimpleSelectQuery( "SELECT ?modified WHERE {<" + distribution + "> <" + DCTERMS.MODIFIED + "> ?modified }", "modified"); if (!dmodified.isEmpty()) { distroFields.add(new BasicNameValuePair("changed", dmodified)); } if (configuration.getProfile().equals(DcatAp11ToDkanBatchVocabulary.PROFILES_NKOD.stringValue())) { String dtemporalStart = executeSimpleSelectQuery( "SELECT ?temporalStart WHERE {<" + distribution + "> <" + DCTERMS.TEMPORAL + ">/<" + DcatAp11ToDkanBatchVocabulary.SCHEMA_STARTDATE + "> ?temporalStart }", "temporalStart"); if (!dtemporalStart.isEmpty()) { distroFields .add(new BasicNameValuePair("field_temporal_start[und][0][value]", dtemporalStart)); } String dtemporalEnd = executeSimpleSelectQuery( "SELECT ?temporalEnd WHERE {<" + distribution + "> <" + DCTERMS.TEMPORAL + ">/<" + DcatAp11ToDkanBatchVocabulary.SCHEMA_ENDDATE + "> ?temporalEnd }", "temporalEnd"); if (!dtemporalEnd.isEmpty()) { distroFields.add(new BasicNameValuePair("field_temporal_end[und][0][value]", dtemporalEnd)); } String dschemaURL = executeSimpleSelectQuery( "SELECT ?schema WHERE {<" + distribution + "> <" + DCTERMS.CONFORMS_TO + "> ?schema }", "schema"); if (!dschemaURL.isEmpty()) { distroFields.add(new BasicNameValuePair("field_described_by[und][0][value]", dschemaURL)); } String dlicense = executeSimpleSelectQuery( "SELECT ?license WHERE {<" + distribution + "> <" + DCTERMS.LICENSE + "> ?license }", "license"); if (dlicense.isEmpty()) { //This is mandatory in NKOD and DKAN extension dlicense = "http://joinup.ec.europa.eu/category/licence/unknown-licence"; } distroFields.add(new BasicNameValuePair("field_licence[und][0][value]", dlicense)); if (dmimetype.isEmpty()) { //! field_format => mimetype //This is mandatory in NKOD and DKAN extension dmimetype = "http://www.iana.org/assignments/media-types/application/octet-stream"; } distroFields.add(new BasicNameValuePair("field_mimetype[und][0][value]", dmimetype.replaceAll(".*\\/([^\\/]+\\/[^\\/]+)", "$1"))); } //POST DISTRIBUTION LOG.debug("Creating resource " + distribution); HttpPost httpPost = new HttpPost(apiURI + "/node"); httpPost.addHeader(new BasicHeader("Accept", "application/json")); httpPost.addHeader(new BasicHeader("X-CSRF-Token", token)); try { UrlEncodedFormEntity form = new UrlEncodedFormEntity(distroFields, "UTF-8"); httpPost.setEntity(form); } catch (UnsupportedEncodingException e) { LOG.error("Unexpected encoding issue"); } CloseableHttpResponse response = null; String resID = null; responded = false; do { try { LOG.debug("POSTing resource " + distribution); response = postClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == 200) { String resp = EntityUtils.toString(response.getEntity()); LOG.debug("Resource created OK: " + resp); try { resID = new JSONObject(resp).getString("nid"); datasetFields.add(new BasicNameValuePair( "field_resources[und][" + d + "][target_id]", dtitle + " (" + resID + ")")); } catch (JSONException e) { LOG.error(e.getLocalizedMessage(), e); LOG.error("Request: " + distroFields.toString()); LOG.error("Response: " + resp); } } else { String ent = EntityUtils.toString(response.getEntity()); LOG.error("Resource:" + ent); //throw exceptionFactory.failed("Error creating resource: " + ent); } responded = true; } catch (NoHttpResponseException e) { LOG.error(e.getLocalizedMessage(), e); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); } finally { if (response != null) { try { response.close(); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); //throw exceptionFactory.failed("Error creating resource"); } } } } while (!responded); } LOG.debug("Creating dataset " + datasetURI); HttpPost httpPost = new HttpPost(apiURI + "/node"); httpPost.addHeader(new BasicHeader("Accept", "application/json")); httpPost.addHeader(new BasicHeader("X-CSRF-Token", token)); try { UrlEncodedFormEntity form = new UrlEncodedFormEntity(datasetFields, "UTF-8"); httpPost.setEntity(form); } catch (UnsupportedEncodingException e) { LOG.error("Unexpected encoding issue"); } CloseableHttpResponse response = null; responded = false; do { try { LOG.debug("POSTing dataset " + datasetURI); response = postClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == 200) { LOG.debug("Dataset created OK"); } else { String ent = EntityUtils.toString(response.getEntity()); LOG.error("Dataset:" + ent); //throw exceptionFactory.failed("Error creating dataset: " + ent); } responded = true; } catch (NoHttpResponseException e) { LOG.error(e.getLocalizedMessage(), e); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); } finally { if (response != null) { try { response.close(); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); throw exceptionFactory.failure("Error creating dataset"); } } } } while (!responded); progressReport.entryProcessed(); } try { queryClient.close(); createClient.close(); postClient.close(); } catch (IOException e) { LOG.error(e.getLocalizedMessage(), e); } progressReport.done(); }
From source file:com.virtusa.isq.vtaf.runtime.SeleniumTestBase.java
/** * Checks in a table whether the given table is in. * /* w w w . j a va 2s .c om*/ * @param element * the element * @param objectName * the object name * @param expectedvale * the expectedvale * @param fail * the fail */ private void compareTableData(final WebElement element, final String objectName, final String expectedvale, final boolean fail, final Object[] customError) { ArrayList<String> htmlTable; ArrayList<String> inputTable; try { htmlTable = getAppTable(element); inputTable = new ArrayList<String>(Arrays.asList(expectedvale.split("(?<!\\\\),"))); ArrayList<String> tempInputTable = new ArrayList<String>(); for (String inputVal : inputTable) { String formattedValue = inputVal.replaceAll("\\\\,", ","); tempInputTable.add(formattedValue); } inputTable = tempInputTable; String inputTableStr = StringUtils.join(inputTable, "|"); String actualTableStr = StringUtils.join(htmlTable, "|"); if (actualTableStr.contains(inputTableStr)) { reportresult(true, "CHECK TABLE :TABLE DATA ", "PASSED", objectName + " :Input Value = " + expectedvale); } else { String inputTableString = inputTable.toString(); String htmlTableString = htmlTable.toString(); if (customError != null && !(customError[0].equals("null") || customError[0].equals(""))) { reportresult(fail, "CHECK TABLE :TABLE DATA ", "FAILED", " Custom Error :" + generateCustomError(customError) + " System generated Error : " + objectName + "'s TABLEDATA is not as expected " + inputTableString + ": Actual :" + htmlTableString); checkTrue(false, fail, objectName + " Custom Error :" + generateCustomError(customError) + " System generated Error : " + objectName + "'s TABLEDATA is not as expected " + inputTableString + ": Actual :" + htmlTableString); } else { reportresult(fail, "CHECK TABLE :TABLE DATA ", "FAILED", objectName + "'s TABLEDATA is not as expected " + inputTableString + ": Actual :" + htmlTableString); checkTrue(false, fail, objectName + "'s TABLEDATA is not as expected " + inputTableString + ": Actual :" + htmlTableString); } } } catch (Exception e) { String errorString = e.getMessage(); reportresult(fail, "CHECK TABLE :TABLE DATA", "FAILED", errorString); checkTrue(false, fail, errorString); } }