List of usage examples for java.nio.file Path toString
String toString();
From source file:edu.jhu.hlt.concrete.ingesters.simple.DoubleLineBreakFileIngester.java
/** * See usage string./*from www. j a v a 2 s. c o m*/ * * @param args */ public static void main(String[] args) { if (args.length != 4) { System.err.println("This program converts a character-based file to a .concrete file."); System.err.println("The text file must contain UTF-8 encoded characters."); System.err.println( "If the file contains any double-newlines, the file will be split into sections where those double-newlines occur."); System.err.println( "The .concrete file will share the same name as the input file, including the extension."); System.err.println("This program takes 4 arguments."); System.err.println("Argument 1: path/to/a/character/based/file"); System.err.println("Argument 2: type of Communication to generate [e.g., tweet]"); System.err.println("Argument 3: type of Sections to generate [e.g., passage]"); System.err.println("Argument 4: path/to/out/concrete/file"); System.err.println("Example usage: " + CompleteFileIngester.class.getName() + " /my/text/file story passage /my/output/folder"); System.exit(1); } String inPathStr = args[0]; Path inPath = Paths.get(inPathStr); try { ExistingNonDirectoryFile ef = new ExistingNonDirectoryFile(inPath); Optional<String> commType = Optional.ofNullable(args[1]); Optional<String> sectionType = Optional.ofNullable(args[2]); Optional<String> outPathStr = Optional.ofNullable(args[3]); Path ep = ef.getPath(); String fn = ef.getName(); Path outPath = Paths.get(outPathStr.get()); Path outFile = outPath.resolve(fn + ".concrete"); // Output directory exists, or it doesn't. // Try to create if it does not. if (!Files.exists(outPath)) { try { Files.createDirectories(outPath); } catch (IOException e) { logger.error("Caught exception when making output directories.", e); } // if it does, check to make sure it's a directory. } else { if (!Files.isDirectory(outPath)) { logger.error("Output path exists but is not a directory."); System.exit(1); } else { // check to make sure the output file won't be overwritten. if (Files.exists(outFile)) { logger.warn("Output file {} exists; not overwriting.", outFile.toString()); System.exit(1); } } } try { UTF8FileIngester ing = new DoubleLineBreakFileIngester(commType.get(), sectionType.get()); Communication comm = ing.fromCharacterBasedFile(ep); new WritableCommunication(comm).writeToFile(outFile, false); } catch (IngestException e) { logger.error("Caught exception during ingest.", e); System.exit(1); } catch (ConcreteException e) { logger.error("Caught exception writing output.", e); } } catch (NoSuchFileException e) { logger.error("Path {} does not exist.", inPathStr); System.exit(1); } catch (NotFileException e) { logger.error("Path {} is a directory.", inPathStr); System.exit(1); } }
From source file:org.eclipse.swt.snippets.SnippetExplorer.java
/** * SnippetExplorer main method.//www .j av a 2s.c o m * * @param args does not parse any arguments */ public static void main(String[] args) throws Exception { final String os = System.getProperty("os.name"); multiDisplaySupport = (os != null && os.toLowerCase().contains("windows")); if (canRunCommand("java")) { javaCommand = "java"; } else { final String javaHome = System.getProperty("java.home"); if (javaHome != null) { final Path java = Paths.get(javaHome, "bin", "java"); java.normalize(); if (canRunCommand(java.toString())) { javaCommand = java.toString(); } } } snippets = loadSnippets(); snippets.sort((a, b) -> { int cmp = Integer.compare(a.snippetNum, b.snippetNum); if (cmp == 0) { cmp = a.snippetName.compareTo(b.snippetName); } return cmp; }); new SnippetExplorer().open(); }
From source file:edu.usc.goffish.gofs.tools.GoFSDeployGraph.java
@SuppressWarnings("deprecation") public static void main(String[] args) throws IOException { if (args.length < REQUIRED_ARGS) { PrintUsageAndQuit(null);//from www . j av a2 s . c o m } if (args.length == 1 && args[0].equals("-help")) { PrintUsageAndQuit(null); } // optional arguments boolean overwriteGraph = false; PartitionerMode partitionerMode = PartitionerMode.METIS; ComponentizerMode componentizerMode = ComponentizerMode.WCC; MapperMode mapperMode = MapperMode.ROUNDROBIN; PartitionedFileMode partitionedFileMode = PartitionedFileMode.DEFAULT; DistributerMode distributerMode = DistributerMode.SCP; int instancesGroupingSize = 1; int numSubgraphBins = -1; // optional sub arguments Path metisBinaryPath = null; String[] extraMetisOptions = null; Path partitioningPath = null; Path partitionedGMLFilePath = null; // parse optional arguments int i = 0; OptArgLoop: for (i = 0; i < args.length - REQUIRED_ARGS; i++) { switch (args[i]) { case "-overwriteGraph": overwriteGraph = true; break; case "-partitioner": i++; if (args[i].equals("stream")) { partitionerMode = PartitionerMode.STREAM; } else if (args[i].startsWith("metis")) { String[] subargs = parseSubArgs('=', args[i]); if (subargs[0].equals("metis")) { partitionerMode = PartitionerMode.METIS; if (subargs.length > 1) { try { metisBinaryPath = Paths.get(subargs[1]); if (!metisBinaryPath.isAbsolute()) { throw new InvalidPathException(metisBinaryPath.toString(), "metis binary path must be absolute"); } } catch (InvalidPathException e) { PrintUsageAndQuit("metis binary - " + e.getMessage()); } if (subargs.length > 2) { extraMetisOptions = parseSubArgs(' ', subargs[2]); } } } else { PrintUsageAndQuit(null); } } else if (args[i].startsWith("predefined")) { String[] subargs = parseSubArgs('=', args[i]); if (subargs[0].equals("predefined")) { partitionerMode = PartitionerMode.PREDEFINED; if (subargs.length < 2) { PrintUsageAndQuit(null); } try { partitioningPath = Paths.get(subargs[1]); } catch (InvalidPathException e) { PrintUsageAndQuit("partitioning file - " + e.getMessage()); } } else { PrintUsageAndQuit(null); } } else { PrintUsageAndQuit(null); } break; case "-intermediategml": if (args[i + 1].startsWith("save")) { i++; String[] subargs = parseSubArgs('=', args[i]); if (subargs[0].equals("save")) { if (subargs.length < 2) { PrintUsageAndQuit(null); } partitionedFileMode = PartitionedFileMode.SAVE; try { partitionedGMLFilePath = Paths.get(subargs[1]); } catch (InvalidPathException e) { PrintUsageAndQuit("partitioned gml file - " + e.getMessage()); } } } else { partitionedFileMode = PartitionedFileMode.READ; } break; case "-componentizer": i++; switch (args[i]) { case "single": componentizerMode = ComponentizerMode.SINGLE; break; case "wcc": componentizerMode = ComponentizerMode.WCC; break; default: PrintUsageAndQuit(null); } break; case "-distributer": i++; switch (args[i]) { case "scp": distributerMode = DistributerMode.SCP; break; case "write": distributerMode = DistributerMode.WRITE; break; default: PrintUsageAndQuit(null); } break; case "-mapper": i++; if (args[i].equalsIgnoreCase("roundrobin")) { mapperMode = MapperMode.ROUNDROBIN; } else { PrintUsageAndQuit(null); } break; case "-serializer:instancegroupingsize": i++; try { if (args[i].equalsIgnoreCase("ALL")) { instancesGroupingSize = Integer.MAX_VALUE; } else { instancesGroupingSize = Integer.parseInt(args[i]); if (instancesGroupingSize < 1) { PrintUsageAndQuit("Serialization instance grouping size must be greater than zero"); } } } catch (NumberFormatException e) { PrintUsageAndQuit("Serialization instance grouping size - " + e.getMessage()); } break; case "-serializer:numsubgraphbins": i++; try { numSubgraphBins = Integer.parseInt(args[i]); if (instancesGroupingSize < 1) { PrintUsageAndQuit("Serialization number of subgraph bins must be greater than zero"); } } catch (NumberFormatException e) { PrintUsageAndQuit("Serialization number of subgraph bins - " + e.getMessage()); } break; default: break OptArgLoop; } } if (args.length - i < REQUIRED_ARGS) { PrintUsageAndQuit(null); } // required arguments IInternalNameNode nameNode = null; Class<? extends IInternalNameNode> nameNodeType = null; URI nameNodeLocation = null; String graphId = null; int numPartitions = 0; Path gmlTemplatePath = null; List<Path> gmlInstancePaths = new LinkedList<>(); // parse required arguments try { nameNodeType = NameNodeProvider.loadNameNodeType(args[i]); i++; } catch (ReflectiveOperationException e) { PrintUsageAndQuit("name node type - " + e.getMessage()); } try { nameNodeLocation = new URI(args[i]); i++; } catch (URISyntaxException e) { PrintUsageAndQuit("name node location - " + e.getMessage()); } try { nameNode = NameNodeProvider.loadNameNode(nameNodeType, nameNodeLocation); } catch (ReflectiveOperationException e) { PrintUsageAndQuit("error loading name node - " + e.getMessage()); } graphId = args[i++]; try { numPartitions = Integer.parseInt(args[i]); i++; } catch (NumberFormatException e) { PrintUsageAndQuit("number of partitions - " + e.getMessage()); } Path gmlInputFile = null; try { gmlInputFile = Paths.get(args[i]); i++; } catch (InvalidPathException e) { PrintUsageAndQuit(e.getMessage()); } // finished parsing args if (i < args.length) { PrintUsageAndQuit("Unrecognized argument \"" + args[i] + "\""); } // ensure name node is available if (!nameNode.isAvailable()) { throw new IOException("Name node at " + nameNode.getURI() + " is not available"); } // ensure there are data nodes available Set<URI> dataNodes = nameNode.getDataNodes(); if (dataNodes == null || dataNodes.isEmpty()) { throw new IllegalArgumentException("name node does not have any data nodes available for deployment"); } // ensure graph id does not exist (unless to be overwritten) IntCollection partitions = nameNode.getPartitionDirectory().getPartitions(graphId); if (partitions != null) { if (!overwriteGraph) { throw new IllegalArgumentException( "graph id \"" + graphId + "\" already exists in name node partition directory"); } else { for (int partitionId : partitions) { nameNode.getPartitionDirectory().removePartitionMapping(graphId, partitionId); } } } IGraphLoader loader = null; IPartitioner partitioner = null; if (partitionedFileMode != PartitionedFileMode.READ) { XMLConfiguration configuration; try { configuration = new XMLConfiguration(gmlInputFile.toFile()); configuration.setDelimiterParsingDisabled(true); //read the template property gmlTemplatePath = Paths.get(configuration.getString("template")); //read the instance property for (Object instance : configuration.getList("instances.instance")) { gmlInstancePaths.add(Paths.get(instance.toString())); } } catch (ConfigurationException | InvalidPathException e) { PrintUsageAndQuit("gml input file - " + e.getMessage()); } // create loader loader = new GMLGraphLoader(gmlTemplatePath); // create partitioner switch (partitionerMode) { case METIS: if (metisBinaryPath == null) { partitioner = new MetisPartitioner(); } else { partitioner = new MetisPartitioner(metisBinaryPath, extraMetisOptions); } break; case STREAM: partitioner = new StreamPartitioner(new LDGObjectiveFunction()); break; case PREDEFINED: partitioner = new PredefinedPartitioner( MetisPartitioning.read(Files.newInputStream(partitioningPath))); break; default: PrintUsageAndQuit(null); } } // create componentizer IGraphComponentizer graphComponentizer = null; switch (componentizerMode) { case SINGLE: graphComponentizer = new SingleComponentizer(); break; case WCC: graphComponentizer = new WCCComponentizer(); break; default: PrintUsageAndQuit(null); } // create mapper IPartitionMapper partitionMapper = null; switch (mapperMode) { case ROUNDROBIN: partitionMapper = new RoundRobinPartitionMapper(nameNode.getDataNodes()); break; default: PrintUsageAndQuit(null); } // create serializer ISliceSerializer serializer = nameNode.getSerializer(); if (serializer == null) { throw new IOException("name node at " + nameNode.getURI() + " returned null serializer"); } // create distributer IPartitionDistributer partitionDistributer = null; switch (distributerMode) { case SCP: partitionDistributer = new SCPPartitionDistributer(serializer, instancesGroupingSize, numSubgraphBins); break; case WRITE: partitionDistributer = new DirectWritePartitionDistributer(serializer, instancesGroupingSize, numSubgraphBins); break; } GMLPartitionBuilder partitionBuilder = null; try { System.out.print("Executing command: DeployGraph"); for (String arg : args) { System.out.print(" " + arg); } System.out.println(); // perform deployment long time = System.currentTimeMillis(); switch (partitionedFileMode) { case DEFAULT: partitionBuilder = new GMLPartitionBuilder(graphComponentizer, gmlTemplatePath, gmlInstancePaths); deploy(nameNode.getPartitionDirectory(), graphId, numPartitions, loader, partitioner, partitionBuilder, null, partitionMapper, partitionDistributer); break; case SAVE: //save partitioned gml files partitionBuilder = new GMLPartitionBuilder(partitionedGMLFilePath, graphComponentizer, gmlTemplatePath, gmlInstancePaths); //partitioned gml input file name format as graphid_numpartitions_paritioningtype_serializer String intermediateGMLInputFile = new StringBuffer().append(graphId).append("_") .append(numPartitions).append("_").append(partitionerMode.name().toLowerCase()).append("_") .append(serializer.getClass().getSimpleName().toLowerCase()).toString(); deploy(nameNode.getPartitionDirectory(), graphId, numPartitions, loader, partitioner, partitionBuilder, intermediateGMLInputFile, partitionMapper, partitionDistributer); break; case READ: //read partitioned gml files partitionBuilder = new GMLPartitionBuilder(graphComponentizer); partitionBuilder.new XMLConfigurationBuilder(gmlInputFile.toFile().getAbsolutePath()) .readIntermediateGMLFile(); deploy(nameNode.getPartitionDirectory(), graphId, numPartitions, partitionBuilder, partitionMapper, partitionDistributer); break; } System.out.println("finished [total " + (System.currentTimeMillis() - time) + "ms]"); } finally { if (partitionBuilder != null) partitionBuilder.close(); } }
From source file:com.curecomp.primefaces.migrator.PrimefacesMigration.java
public static void main(String[] args) throws Exception { // Let's use some colors :) // AnsiConsole.systemInstall(); CommandLineParser cliParser = new BasicParser(); CommandLine cli = null;/*from w ww. j a v a 2s . c o m*/ try { cli = cliParser.parse(OPTIONS, args); } catch (ParseException e) { printHelp(); } if (!cli.hasOption("s")) { printHelp(); } String sourcePattern; if (cli.hasOption("p")) { sourcePattern = cli.getOptionValue("p"); } else { sourcePattern = DEFAULT_SOURCE_PATTERN; } String defaultAnswer; if (cli.hasOption("default-answer")) { defaultAnswer = cli.getOptionValue("default-answer"); } else { defaultAnswer = DEFAULT_DEFAULT_PROMPT_ANSWER; } boolean defaultAnswerYes = defaultAnswer.equalsIgnoreCase("y"); boolean quiet = cli.hasOption("q"); boolean testWrite = cli.hasOption("t"); Path sourceDirectory = Paths.get(cli.getOptionValue("s")).toAbsolutePath(); // Since we use IO we will have some blocking threads hanging around int threadCount = Runtime.getRuntime().availableProcessors() * 2; ThreadPoolExecutor threadPool = new ThreadPoolExecutor(threadCount, threadCount, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()); BlockingQueue<WidgetVarLocation> foundUsages = new LinkedBlockingQueue<>(); BlockingQueue<WidgetVarLocation> unusedOrAmbiguous = new LinkedBlockingQueue<>(); BlockingQueue<WidgetVarLocation> skippedUsages = new LinkedBlockingQueue<>(); List<Future<?>> futures = new ArrayList<>(); findWidgetVars(sourceDirectory, sourcePattern, threadPool).forEach(widgetVarLocation -> { // We can't really find usages of widget vars that use EL expressions :( if (widgetVarLocation.widgetVar.contains("#")) { unusedOrAmbiguous.add(widgetVarLocation); return; } try { FileActionVisitor visitor = new FileActionVisitor(sourceDirectory, sourcePattern, sourceFile -> futures.add(threadPool.submit((Callable<?>) () -> { findWidgetVarUsages(sourceFile, widgetVarLocation, foundUsages, skippedUsages, unusedOrAmbiguous); return null; }))); Files.walkFileTree(sourceDirectory, visitor); } catch (IOException ex) { throw new RuntimeException(ex); } }); awaitAll(futures); new TreeSet<>(skippedUsages).forEach(widgetUsage -> { int startIndex = widgetUsage.columnNr; int endIndex = startIndex + widgetUsage.widgetVar.length(); String relativePath = widgetUsage.location.toAbsolutePath().toString() .substring(sourceDirectory.toString().length()); String previous = replace(widgetUsage.line, startIndex, endIndex, Ansi.ansi().bold().fg(Ansi.Color.RED).a(widgetUsage.widgetVar).reset().toString()); System.out.println("Skipped " + relativePath + " at line " + widgetUsage.lineNr + " and col " + widgetUsage.columnNr + " for widgetVar '" + widgetUsage.widgetVar + "'"); System.out.println("\t" + previous); }); Map<WidgetVarLocation, List<WidgetVarLocation>> written = new HashMap<>(); new TreeSet<>(foundUsages).forEach(widgetUsage -> { WidgetVarLocation key = new WidgetVarLocation(null, widgetUsage.location, widgetUsage.lineNr, -1, null); List<WidgetVarLocation> writtenList = written.get(key); int existing = writtenList == null ? 0 : writtenList.size(); int startIndex = widgetUsage.columnNr; int endIndex = startIndex + widgetUsage.widgetVar.length(); String relativePath = widgetUsage.location.toAbsolutePath().toString() .substring(sourceDirectory.toString().length()); String next = replace(widgetUsage.line, startIndex, endIndex, Ansi.ansi().bold().fg(Ansi.Color.RED) .a("PF('" + widgetUsage.widgetVar + "')").reset().toString()); System.out .println(relativePath + " at line " + widgetUsage.lineNr + " and col " + widgetUsage.columnNr); System.out.println("\t" + next); System.out.print("Replace (Y/N)? [" + (defaultAnswerYes ? "Y" : "N") + "]: "); String input; if (quiet) { input = ""; System.out.println(); } else { try { do { input = in.readLine(); } while (input != null && !input.isEmpty() && !"y".equalsIgnoreCase(input) && !"n".equalsIgnoreCase(input)); } catch (IOException ex) { throw new RuntimeException(ex); } } if (input == null) { System.out.println("Aborted!"); } else if (input.isEmpty() && defaultAnswerYes || !input.isEmpty() && !"n".equalsIgnoreCase(input)) { System.out.println("Replaced!"); System.out.print("\t"); if (writtenList == null) { writtenList = new ArrayList<>(); written.put(key, writtenList); } writtenList.add(widgetUsage); List<String> lines; try { lines = Files.readAllLines(widgetUsage.location); } catch (IOException ex) { throw new RuntimeException(ex); } try (OutputStream os = testWrite ? new ByteArrayOutputStream() : Files.newOutputStream(widgetUsage.location); PrintWriter pw = new PrintWriter(new OutputStreamWriter(os, StandardCharsets.UTF_8))) { String line; for (int i = 0; i < lines.size(); i++) { int lineNr = i + 1; line = lines.get(i); if (lineNr == widgetUsage.lineNr) { int begin = widgetUsage.columnNr + (testWrite ? 0 : existing * 6); int end = begin + widgetUsage.widgetVar.length(); String newLine = replace(line, begin, end, "PF('" + widgetUsage.widgetVar + "')", false); if (testWrite) { System.out.println(newLine); } else { pw.println(newLine); } } else { if (!testWrite) { pw.println(line); } } } } catch (IOException ex) { throw new RuntimeException(ex); } } else { System.out.println("Skipped!"); } }); new TreeSet<>(unusedOrAmbiguous).forEach(widgetUsage -> { int startIndex = widgetUsage.columnNr; int endIndex = startIndex + widgetUsage.widgetVar.length(); String relativePath = widgetUsage.location.toAbsolutePath().toString() .substring(sourceDirectory.toString().length()); String previous = replace(widgetUsage.line, startIndex, endIndex, Ansi.ansi().bold().fg(Ansi.Color.RED).a(widgetUsage.widgetVar).reset().toString()); System.out.println("Skipped unused or ambiguous " + relativePath + " at line " + widgetUsage.lineNr + " and col " + widgetUsage.columnNr); System.out.println("\t" + previous); }); threadPool.shutdown(); }
From source file:com.thesmartweb.swebrank.Main.java
/** * @param args the command line arguments *//*from w w w . j av a2 s. co m*/ public static void main(String[] args) { Path input_path = Paths.get("//mnt//var//DBs//inputsL10//nba//");//input directory String output_parent_directory = "//mnt//var//DBs//outputsConfL10//nba//";//output directory String config_path = "//mnt//var//DBs//config//";//input directory //---Disable apache log manually---- //System.setProperty("org.apache.commons.logging.Log","org.apache.commons.logging.impl.NoOpLog"); System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.Log4JLogger"); //--------------Domain that is searched---------- String domain = ""; //------------------search engine related options---------------------- List<String> queries = null; int results_number = 0;//the number of results that are returned from each search engine List<Boolean> enginechoice = null; //list element #0. True/False Bing //list element #1. True/False Google //list element #2. True/False Yahoo! //list element #3. True/False Merged //-----------Moz options--------------------- List<Boolean> mozMetrics = null; //The list is going to contain the moz related input in the following order //list element #1. True/False, True we use Moz API, false not //list element #2. True if we use Domain Authority //list element #3. True if we use External MozRank //list element #4. True if we use MozRank //list element #5. True if we use MozTrust //list element #6. True if we use Subdomain MozRank //list element #7. True if we use Page Authority //only one is used (the first to be set to true) boolean moz_threshold_option = false;//set to true we use the threshold Double moz_threshold = 0.0;//if we want to have a threshold in moz int top_count_moz = 0;//if we want to get the moz top-something results //---------------Semantic Analysis method---------------- List<Boolean> ContentSemantics = null; int SensebotConcepts = 0;//define the amount of concepts that sensebot is going to recognize List<Double> SWebRankSettings = null; //------(string)directory is going to be used later----- String output_child_directory; //-------we get all the paths of the txt (input) files from the input directory------- DataManipulation getfiles = new DataManipulation();//class responsible for the extraction of paths Collection<File> inputs_files;//array to include the paths of the txt files inputs_files = getfiles.getinputfiles(input_path.toString(), "txt");//method to retrieve all the path of the input documents //------------read the txt files------------ for (File input : inputs_files) { ReadInput ri = new ReadInput();//function to read the input boolean check_reading_input = ri.perform(input); if (check_reading_input) { domain = ri.domain; //---------- queries = ri.queries; results_number = ri.results_number; enginechoice = ri.enginechoice; //------------ mozMetrics = ri.mozMetrics; moz_threshold_option = ri.moz_threshold_option; moz_threshold = ri.moz_threshold.doubleValue(); //--------------- ContentSemantics = ri.ContentSemantics; SWebRankSettings = ri.SWebRankSettings; } int top_visible = 0;//option to set the amount of results you can get in the merged search engine //------if we choose to use a Moz metric or Visibility score for our ranking, we need to set the results_number for the search engines to its max which is 50 //-----we set the top results number for moz or Visibility rank---- if (mozMetrics.get(0) || enginechoice.get(3)) { if (mozMetrics.get(0)) { top_count_moz = results_number; } //if moz is true, top_count_moz gets the value of result number if (enginechoice.get(3)) { top_visible = results_number; } //if merged engine is true, top_visible gets the value of result number results_number = 50;//this is the max amount of results that you can get from the search engine APIs } //-----if we want to use Moz we should check first if it works if (mozMetrics.get(0)) { Moz Moz = new Moz(); //---if it works, moz remains true, otherwise it is set to false mozMetrics.add(0, Moz.check(config_path)); //if it is false and we have chosen to use Visibility score with Moz, we reset back to the standard settings (ranking and not merged) //therefore, we reset the number of results from 50 to the top_count_moz which contained the original number of results if (!mozMetrics.get(0)) { if (!enginechoice.get(3)) { results_number = top_count_moz; } } } //----------we set the wordLists that we are going to use--------------------- List<String> finalList = new ArrayList<String>();//finalList is going to contain all the content in the end Total_analysis ta = new Total_analysis();//we call total analysis int iteration_counter = 0;//the iteration_counter is used in order to count the number of iterations of the algorithm and to be checked with perf_limit //this list of arraylists is going to contain all the wordLists that are produced for every term of the String[] query, //in order to calculate the NGD scores between every term of the wordList and the term that was used as query in order to produce the spesific wordList List<ArrayList<String>> array_wordLists = new ArrayList<>(); List<String> wordList_previous = new ArrayList<>(); List<String> wordList_new = new ArrayList<>(); double convergence = 0;//we create the convergence percentage and initialize it String conv_percentages = "";//string that contains all the convergence percentages DataManipulation wordsmanipulation = new DataManipulation();//method to manipulate various word data (String, list<String>, etc) do { //if we run the algorithm for the 1st time we already have the query so we skip the loop below that produces the new array of query if (iteration_counter != 0) { wordList_previous = wordList_new; //we add the previous wordList to the finalList finalList = wordsmanipulation.AddAList(wordList_previous, finalList); List<String> query_new_list_total = new ArrayList<>(); int iteration_previous = iteration_counter - 1; Combinations_Engine cn = new Combinations_Engine();//call the class to combine the terms produced for (String query : queries) { List<String> ids = new ArrayList<>(); if (enginechoice.get(0)) { String id = domain + "/" + query + "/bing" + "/" + iteration_previous; ids.add(id); } if (enginechoice.get(1)) { String id = domain + "/" + query + "/google" + "/" + iteration_previous; ids.add(id); } if (enginechoice.get(2)) { String id = domain + "/" + query + "/yahoo" + "/" + iteration_previous; ids.add(id); } ElasticGetWordList ESget = new ElasticGetWordList();//we call this class to get the wordlist from the Elastic Search List<String> maxWords = ESget.getMaxWords(ids, SWebRankSettings.get(9).intValue(), config_path);//we are going to get a max amount of words int query_index = queries.indexOf(query); int size_query_new = SWebRankSettings.get(10).intValue();//the amount of new queries we are willing to create //we create the new queries for every query of the previous round by combining the words produced from this query List<String> query_new_list = cn.perform(maxWords, SWebRankSettings.get(7), queries, SWebRankSettings.get(6), query_index, size_query_new, config_path); //we add the list of new queries to the total list that containas all the new queries query_new_list_total.addAll(query_new_list); System.out.println("query pointer=" + query_index + ""); } //---------------------the following cleans a list from null and duplicates query_new_list_total = wordsmanipulation.clearListString(query_new_list_total); //--------------we create the new directory that our files are going to be saved String txt_directory = FilenameUtils.getBaseName(input.getName()); output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter + "//"; //----------------append the wordlist to a file------------------ wordsmanipulation.AppendWordList(query_new_list_total, output_child_directory + "queries_" + iteration_counter + ".txt"); if (query_new_list_total.size() < 1) { break; } //if we don't create new queries we end the while loop //total analysis' function is going to do all the work and return back what we need ta = new Total_analysis(); ta.perform(wordList_previous, iteration_counter, output_child_directory, domain, enginechoice, query_new_list_total, results_number, top_visible, mozMetrics, moz_threshold_option, moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts, SWebRankSettings, config_path); //we get the array of wordlists array_wordLists = ta.getarray_wordLists(); //get the wordlist that includes all the new queries wordList_new = ta.getwordList_total(); //---------------------the following cleans a list from null and duplicates------------- wordList_new = wordsmanipulation.clearListString(wordList_new); //----------------append the wordlist to a file-------------------- wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt"); //the concergence percentage of this iteration convergence = ta.getConvergence();//we are going to use convergence score to check the convergence //a string that contains all the convergence percentage for each round separated by \n character conv_percentages = conv_percentages + "\n" + convergence; //a file that is going to include the convergence percentages wordsmanipulation.AppendString(conv_percentages, output_child_directory + "convergence_percentage.txt"); //we add the new wordList to the finalList finalList = wordsmanipulation.AddAList(wordList_new, finalList); //we set the query array to be equal to the query new total that we have created queries = query_new_list_total; //we increment the iteration_counter in order to count the iterations of the algorithm and to use the perf_limit iteration_counter++; } else {//the following source code is performed on the 1st run of the loop //------------we extract the parent path of the file String txt_directory = FilenameUtils.getBaseName(input.getName()); //----------we create a string that is going to be used for the corresponding directory of outputs output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter + "//"; //we call total analysis function performOld ta.perform(wordList_new, iteration_counter, output_child_directory, domain, enginechoice, queries, results_number, top_visible, mozMetrics, moz_threshold_option, moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts, SWebRankSettings, config_path); //we get the array of wordlists array_wordLists = ta.getarray_wordLists(); //get the wordlist that includes all the new queries wordList_new = ta.getwordList_total(); //---------------------the following cleans a list from null and duplicates wordList_new = wordsmanipulation.clearListString(wordList_new); //----------------append the wordlist to a file wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt"); //----------------------------------------- iteration_counter++;//increase the iteration_counter that counts the iterations of the algorithm } } while (convergence < SWebRankSettings.get(5).doubleValue() && iteration_counter < SWebRankSettings.get(8).intValue());//while the convergence percentage is below the limit and the iteration_counter below the performance limit if (iteration_counter == 1) { finalList = wordsmanipulation.AddAList(wordList_new, finalList); } //--------------------content List---------------- if (!finalList.isEmpty()) { //---------------------the following cleans the final list from null and duplicates finalList = wordsmanipulation.clearListString(finalList); //write the keywords to a file boolean flag_file = false;//boolean flag to declare successful write to file flag_file = wordsmanipulation.AppendWordList(finalList, output_parent_directory + "total_content.txt"); if (!flag_file) { System.out.print("can not create the content file for: " + output_parent_directory + "total_content.txt"); } } //we are going to save the total content with its convergence on the ElasticSearch cluster in a separated index //Node node = nodeBuilder().client(true).clusterName("lshrankldacluster").node(); //Client client = node.client(); //get the elastic search indexes in a list List<String> elasticIndexes = ri.GetKeyFile(config_path, "elasticSearchIndexes"); Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "lshrankldacluster") .build(); Client client = new TransportClient(settings) .addTransportAddress(new InetSocketTransportAddress("localhost", 9300)); JSONObject objEngineLevel = new JSONObject(); objEngineLevel.put("TotalContent", finalList);//we save the total content objEngineLevel.put("Convergences", conv_percentages);//we save the convergence percentages IndexRequest indexReq = new IndexRequest(elasticIndexes.get(0), "content", domain);//we save also the domain indexReq.source(objEngineLevel); IndexResponse indexRes = client.index(indexReq).actionGet(); //node.close(); client.close(); //----------------------convergence percentages writing to file--------------- //use the conv_percentages string if (conv_percentages.length() != 0) { boolean flag_file = false;//boolean flag to declare successful write to file flag_file = wordsmanipulation.AppendString(conv_percentages, output_parent_directory + "convergence_percentages.txt"); if (!flag_file) { System.out.print("can not create the convergence file for: " + output_parent_directory + "convergence_percentages.txt"); } } } }
From source file:ch.ifocusit.livingdoc.plugin.utils.AsciidocUtil.java
public static boolean isAdoc(Path path) { return isAdoc(path.toString()); }
From source file:adalid.util.velocity.Matchmaker.java
private static void match(String path1, String path2) { MetaFolderSql meta1 = new MetaFolderSql(path1); MetaFolderSql meta2 = new MetaFolderSql(path2); boolean read1 = meta1.read(); boolean read2 = meta2.read(); if (read1 && read2) { String key1;//from ww w . j ava2 s . co m String sep1 = meta1.getMetaFolderPath().toString() + separator; logger.info("folder = " + sep1); for (Path path : meta1.getFiles().keySet()) { key1 = StringUtils.substringAfter(path.toString(), sep1); keys1.add(key1); } String key2; String sep2 = meta2.getMetaFolderPath().toString() + separator; logger.info("folder = " + sep2); for (Path path : meta2.getFiles().keySet()) { key2 = StringUtils.substringAfter(path.toString(), sep2); if (keys1.contains(key2)) { logger.info("file = " + key2); } } } }
From source file:com.basistech.rosette.apimodel.NonNullTest.java
@Parameterized.Parameters(name = "{0}") public static Collection<Object[]> data() throws URISyntaxException, IOException { File dir = new File("src/test/data"); Collection<Object[]> params = new ArrayList<>(); try (DirectoryStream<Path> paths = Files.newDirectoryStream(dir.toPath())) { for (Path file : paths) { if (file.toString().endsWith(".json")) { String className = file.getFileName().toString().replace(".json", ""); params.add(new Object[] { NonNullTest.class.getPackage().getName() + "." + className, file.toFile() }); }//from ww w . ja v a 2s . c o m } } return params; }
From source file:com.wavemaker.commons.util.WMFileUtils.java
public static Collection<String> findMatchedRelativePaths(String pattern, String basePath) { FilePatternMatchVisitor filePatternMatchVisitor = new FilePatternMatchVisitor(pattern, basePath); try {/*from www. java2s .com*/ Files.walkFileTree(Paths.get(basePath), filePatternMatchVisitor); Collection<Path> matchedFiles = filePatternMatchVisitor.getMatchedPaths(); List<String> matchedFilePaths = new ArrayList<>(matchedFiles.size()); for (Path path : matchedFiles) { matchedFilePaths.add(path.toString()); } return matchedFilePaths; } catch (IOException e) { throw new WMRuntimeException("Failed to find matched ignore patterns for " + pattern, e); } }
From source file:com.pixlabs.web.utils.Mp3Finder.java
/** * @param path Path of the directory that should be looked into. * @return a linkedlist containing all the Mp3 files found in the directory and subdirectories. */// w w w . ja v a2 s. c o m public static LinkedList<Mp3FileAdvanced> mp3InDirectories(Path path) { Iterator it = FileUtils.iterateFiles(new File(path.toString()), new String[] { "mp3" }, true); LinkedList<Mp3FileAdvanced> mp3List = new LinkedList<>(); while (it.hasNext()) { File file = (File) it.next(); try { mp3List.add(new Mp3FileAdvanced(file)); } catch (InvalidDataException | IOException | UnsupportedTagException e) { e.printStackTrace(); } } return mp3List; }