List of usage examples for java.lang Thread NORM_PRIORITY
int NORM_PRIORITY
To view the source code for java.lang Thread NORM_PRIORITY.
Click Source Link
From source file:com.google.dart.compiler.metrics.Tracer.java
private BlockingQueue<TraceEvent> openLogWriter(final Writer writer, final String fileName) { try {/*from w w w .j a v a 2 s. com*/ if (outputFormat.equals(Format.HTML)) { writer.write("<HTML isdump=\"true\"><body>" + "<style>body {font-family:Helvetica; margin-left:15px;}</style>" + "<h2>Performance dump from GWT</h2>" + "<div>This file contains data that can be viewed with the " + "<a href=\"http://code.google.com/speedtracer\">SpeedTracer</a> " + "extension under the <a href=\"http://chrome.google.com/\">" + "Chrome</a> browser.</div><p><span id=\"info\">" + "(You must install the SpeedTracer extension to open this file)</span></p>" + "<div style=\"display: none\" id=\"traceData\" version=\"0.17\">\n"); } } catch (IOException e) { System.err .println("Unable to write to dart.speedtracerlog '" + (fileName == null ? "" : fileName) + "'"); e.printStackTrace(); return null; } final BlockingQueue<TraceEvent> eventQueue = new LinkedBlockingQueue<TraceEvent>(); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { // Wait for the other thread to drain the queue. eventQueue.add(shutDownSentinel); shutDownLatch.await(); } catch (InterruptedException e) { // Ignored } } }); // Background thread to write SpeedTracer events to log Thread logWriterWorker = new LogWriterThread(writer, fileName, eventQueue); // Lower than normal priority. logWriterWorker.setPriority((Thread.MIN_PRIORITY + Thread.NORM_PRIORITY) / 2); /* * This thread must be daemon, otherwise shutdown hooks would never begin to * run, and an app wouldn't finish. */ logWriterWorker.setDaemon(true); logWriterWorker.setName("SpeedTracerLogger writer"); logWriterWorker.start(); return eventQueue; }
From source file:JNLPAppletLauncher.java
public void start() { if (DEBUG) {/* ww w.j av a2 s . c o m*/ System.err.println("Applet.start"); } if (isInitOk) { if (firstStart) { // first time firstStart = false; Thread startupThread = new Thread() { public void run() { initAndStartApplet(); } }; startupThread.setName("AppletLauncher-Startup"); startupThread.setPriority(Thread.NORM_PRIORITY - 1); startupThread.start(); } else if (appletStarted) { checkNoDDrawAndUpdateDeploymentProperties(); // We have to start again the applet (start can be called multiple times, // e.g once per tabbed browsing subApplet.start(); } } }
From source file:lenscorrection.Distortion_Correction.java
static protected void extractSIFTPointsThreaded(final int index, final List<Feature>[] siftFeatures, final List<PointMatch>[] inliers, final AbstractAffineModel2D<?>[] models) { // save all matching candidates final List<PointMatch>[] candidates = new List[siftFeatures.length - 1]; final Thread[] threads = MultiThreading.newThreads(); final AtomicInteger ai = new AtomicInteger(0); // start at second // slice//from ww w. j ava 2s.com for (int ithread = 0; ithread < threads.length; ++ithread) { threads[ithread] = new Thread() { @Override public void run() { setPriority(Thread.NORM_PRIORITY); for (int j = ai.getAndIncrement(); j < candidates.length; j = ai.getAndIncrement()) { final int i = (j < index ? j : j + 1); candidates[j] = FloatArray2DSIFT.createMatches(siftFeatures[index], siftFeatures[i], 1.5f, null, Float.MAX_VALUE, 0.5f); } } }; } MultiThreading.startAndJoin(threads); // get rid of the outliers and save the rigid transformations to match // the inliers final AtomicInteger ai2 = new AtomicInteger(0); for (int ithread = 0; ithread < threads.length; ++ithread) { threads[ithread] = new Thread() { @Override public void run() { setPriority(Thread.NORM_PRIORITY); for (int i = ai2.getAndIncrement(); i < candidates.length; i = ai2.getAndIncrement()) { final List<PointMatch> tmpInliers = new ArrayList<PointMatch>(); // RigidModel2D m = // RigidModel2D.estimateBestModel(candidates.get(i), // tmpInliers, sp.min_epsilon, sp.max_epsilon, // sp.min_inlier_ratio); final AbstractAffineModel2D<?> m; switch (sp.expectedModelIndex) { case 0: m = new TranslationModel2D(); break; case 1: m = new RigidModel2D(); break; case 2: m = new SimilarityModel2D(); break; case 3: m = new AffineModel2D(); break; default: return; } boolean modelFound = false; try { modelFound = m.filterRansac(candidates[i], tmpInliers, 1000, sp.maxEpsilon, sp.minInlierRatio, 10); } catch (final NotEnoughDataPointsException e) { modelFound = false; } if (modelFound) IJ.log("Model found:\n " + candidates[i].size() + " candidates\n " + tmpInliers.size() + " inliers\n " + String.format("%.2f", m.getCost()) + "px average displacement"); else IJ.log("No Model found."); inliers[index * (sp.numberOfImages - 1) + i] = tmpInliers; models[index * (sp.numberOfImages - 1) + i] = m; // System.out.println("**** MODEL ADDED: " + // (index*(sp.numberOfImages-1)+i)); } } }; } MultiThreading.startAndJoin(threads); }
From source file:org.apache.hadoop.hbase.client.TestAdmin.java
void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize) throws Exception { TableName tableName = TableName.valueOf("testForceSplit"); StringBuilder sb = new StringBuilder(); // Add tail to String so can see better in logs where a test is running. for (int i = 0; i < rowCounts.length; i++) { sb.append("_").append(Integer.toString(rowCounts[i])); }//www.j a v a 2 s .com assertFalse(admin.tableExists(tableName)); final HTable table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize); int rowCount = 0; byte[] q = new byte[0]; // insert rows into column families. The number of rows that have values // in a specific column family is decided by rowCounts[familyIndex] for (int index = 0; index < familyNames.length; index++) { ArrayList<Put> puts = new ArrayList<Put>(rowCounts[index]); for (int i = 0; i < rowCounts[index]; i++) { byte[] k = Bytes.toBytes(i); Put put = new Put(k); put.add(familyNames[index], q, k); puts.add(put); } table.put(puts); if (rowCount < rowCounts[index]) { rowCount = rowCounts[index]; } } // get the initial layout (should just be one region) Map<HRegionInfo, ServerName> m = table.getRegionLocations(); LOG.info("Initial regions (" + m.size() + "): " + m); assertTrue(m.size() == 1); // Verify row count Scan scan = new Scan(); ResultScanner scanner = table.getScanner(scan); int rows = 0; for (@SuppressWarnings("unused") Result result : scanner) { rows++; } scanner.close(); assertEquals(rowCount, rows); // Have an outstanding scan going on to make sure we can scan over splits. scan = new Scan(); scanner = table.getScanner(scan); // Scan first row so we are into first region before split happens. scanner.next(); // Split the table this.admin.split(tableName.getName(), splitPoint); final AtomicInteger count = new AtomicInteger(0); Thread t = new Thread("CheckForSplit") { public void run() { for (int i = 0; i < 45; i++) { try { sleep(1000); } catch (InterruptedException e) { continue; } // check again table = new HTable(conf, tableName); Map<HRegionInfo, ServerName> regions = null; try { regions = table.getRegionLocations(); } catch (IOException e) { e.printStackTrace(); } if (regions == null) continue; count.set(regions.size()); if (count.get() >= 2) { LOG.info("Found: " + regions); break; } LOG.debug("Cycle waiting on split"); } LOG.debug("CheckForSplit thread exited, current region count: " + count.get()); } }; t.setPriority(Thread.NORM_PRIORITY - 2); t.start(); t.join(); // Verify row count rows = 1; // We counted one row above. for (@SuppressWarnings("unused") Result result : scanner) { rows++; if (rows > rowCount) { scanner.close(); assertTrue("Scanned more than expected (" + rowCount + ")", false); } } scanner.close(); assertEquals(rowCount, rows); Map<HRegionInfo, ServerName> regions = null; try { regions = table.getRegionLocations(); } catch (IOException e) { e.printStackTrace(); } assertEquals(2, regions.size()); Set<HRegionInfo> hRegionInfos = regions.keySet(); HRegionInfo[] r = hRegionInfos.toArray(new HRegionInfo[hRegionInfos.size()]); if (splitPoint != null) { // make sure the split point matches our explicit configuration assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[0].getEndKey())); assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[1].getStartKey())); LOG.debug("Properly split on " + Bytes.toString(splitPoint)); } else { if (familyNames.length > 1) { int splitKey = Bytes.toInt(r[0].getEndKey()); // check if splitKey is based on the largest column family // in terms of it store size int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey); LOG.debug( "SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + r[0]); for (int index = 0; index < familyNames.length; index++) { int delta = Math.abs(rowCounts[index] / 2 - splitKey); if (delta < deltaForLargestFamily) { assertTrue( "Delta " + delta + " for family " + index + " should be at least deltaForLargestFamily " + deltaForLargestFamily, false); } } } } TEST_UTIL.deleteTable(tableName); table.close(); }
From source file:ffx.ui.ModelingPanel.java
/** * Launch the active command on the active system in the specified * directory./*from ww w .j av a2 s.c o m*/ * * @param command The command to be excuted. * @param dir The directory to execute the command in. * @return a {@link ffx.ui.FFXExec} object. */ public FFXExec launch(String command, String dir) { logger.log(Level.INFO, "Command: {0}\nDirectory: {1}", new Object[] { command, dir }); synchronized (this) { // Check that the TINKER *.exe exists in TINKER/bin String path = MainPanel.ffxDir.getAbsolutePath(); File exe = new File(path + File.separator + activeCommand.toLowerCase()); if (!exe.exists()) { exe = new File(exe.getAbsolutePath() + ".exe"); if (!exe.exists()) { String message = "The " + activeCommand + " executable was not found in " + path + ". Please use the 'Set TINKER...' dialog to change the TINKER directory."; JOptionPane.showMessageDialog(null, message, "Could not launch " + activeCommand, JOptionPane.ERROR_MESSAGE); return null; } } // Check that the directory to execute the command in is valid File dirf = new File(dir); if (!dirf.exists()) { logger.log(Level.WARNING, "Directory doesn''t exist: {0}", dirf.getAbsolutePath()); return null; } // Check if we need a key file if (!commandFileTypes.contains(FileType.ANY)) { if (activeSystem == null) { return null; } activeFileType = FileType.XYZ; // Check that the TINKER command executes on this file type if (!commandFileTypes.contains(activeFileType)) { String message = activeCommand.toUpperCase() + " does not execute on " + activeFileType + " files."; JOptionPane.showMessageDialog(null, message, "Could not launch " + activeCommand, JOptionPane.ERROR_MESSAGE); return null; } // Check that a key file exists or prompt to create one. if (activeSystem.getKeyFile() == null) { mainPanel.createKeyFile(activeSystem); // Give up if the key file is null. if (activeSystem.getKeyFile() == null) { return null; } } } else { // Determine names to use for the output of Protein/Nucleic command = createCommandInput(); String structureName = commandTextArea.getText().trim(); if (!structureName.equalsIgnoreCase("")) { structureName = (structureName.split("\n"))[0]; if (structureName != null) { structureName = structureName.trim(); int dot = structureName.lastIndexOf("."); if (dot > 0) { structureName = structureName.substring(0, dot); } } } // If the above fails, just use the name of the executable // (protein or nulceic) if (structureName == null) { structureName = activeCommand.toLowerCase(); } File file = new File(dir + File.separator + structureName + ".xyz"); file = SystemFilter.version(file); activeSystem = new FFXSystem(file, null, Keyword.loadProperties(file)); File logFile = new File(file.getParent() + File.separator + structureName + ".log"); activeSystem.setLogFile(logFile); loadLogSettings(); activeFileType = FileType.ANY; // Need to have a parameter file chosen. mainPanel.openKey(activeSystem, true); if (activeSystem.getKeyFile() == null) { return null; } } // Decide on a Log file if (((String) logSettings.getSelectedItem()).startsWith("Create")) { File newLog = SystemFilter.version(activeSystem.getLogFile()); activeSystem.setLogFile(newLog); } String logName = activeSystem.getLogFile().getAbsolutePath(); // Determine the command string command = createCommandInput(); // If a new structure file will be created, determine what the name // will be. File newFile = null; if (commandActions.toUpperCase().contains("LOAD")) { File oldFile = activeSystem.getFile(); if (commandActions.toUpperCase().contains("LOADXYZ")) { String fileName = oldFile.getAbsolutePath(); int dot = fileName.lastIndexOf("."); if (dot > 0) { fileName = fileName.substring(0, dot) + ".xyz"; } oldFile = new File(fileName); } else if (commandActions.toUpperCase().contains("LOADINT")) { String fileName = oldFile.getAbsolutePath(); int dot = fileName.lastIndexOf("."); if (dot > 0) { fileName = fileName.substring(0, dot) + ".int"; } oldFile = new File(fileName); } else if (commandActions.toUpperCase().contains("LOADPDB")) { String fileName = oldFile.getAbsolutePath(); int dot = fileName.lastIndexOf("."); if (dot > 0) { fileName = fileName.substring(0, dot) + ".pdb"; } oldFile = new File(fileName); } newFile = SystemFilter.version(oldFile); } // Save any changes that have been made to the key file mainPanel.getKeywordPanel().saveChanges(); // Remove any TINKER *.END files removeEnd(); // Create the input file String commandInput = commandTextArea.getText(); if (commandInput != null && !commandInput.trim().equalsIgnoreCase("")) { File inputFile = new File(dir + File.separator + activeCommand.toLowerCase() + ".in"); inputFile.deleteOnExit(); try { FileWriter fw = new FileWriter(inputFile); fw.write(commandInput); fw.close(); } catch (Exception e) { logger.info(e.toString()); return null; } } // If the job progressively modifies coordinates, open a copy of it. boolean openOnto = false; if (commandActions.toUpperCase().contains("CONNECT")) { // If a version file is created, open it onto the structure used // to // display the job. if (newFile != null) { openOnto = true; } mainPanel.open(activeSystem.getFile(), activeCommand); try { while (mainPanel.isOpening()) { wait(10); } } catch (Exception e) { logger.info(e.toString()); return null; } activeSystem = mainPanel.getHierarchy().getActive(); } // Finally, create and execute the command in a new thread. FFXExec tinkerExec = new FFXExec(activeSystem, logName, command, dir, mainPanel, newFile, openOnto); Thread tinkerThread = new Thread(tinkerExec); tinkerThread.setPriority(Thread.NORM_PRIORITY); tinkerThread.setName(logName); // If the job progressively modifies coordinates, connect to it. if (commandActions.toUpperCase().contains("CONNECT")) { mainPanel.connectToTINKER(activeSystem, tinkerThread); } else { tinkerThread.start(); } // If some action should be taken when the job finishes, // add it to the Modeling Jobs Vector if (!commandActions.equalsIgnoreCase("NONE")) { executingCommands.add(tinkerThread); // mainPanel.getLogPanel().refreshStatus(); } return tinkerExec; } }
From source file:com.septrivium.augeo.ui.SipHome.java
public ImageLoaderConfiguration.Builder getUILConfig() { ImageLoaderConfiguration.Builder config = new ImageLoaderConfiguration.Builder(this); config.threadPriority(Thread.NORM_PRIORITY - 2); config.denyCacheImageMultipleSizesInMemory(); config.diskCacheFileNameGenerator(new Md5FileNameGenerator()); config.diskCacheSize(50 * 1024 * 1024); // 50 MiB config.tasksProcessingOrder(QueueProcessingType.LIFO); config.writeDebugLogs(); // Remove for release app; return config; }
From source file:org.apache.hadoop.hbase.client.TestAdmin1.java
void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize) throws Exception { TableName tableName = TableName.valueOf("testForceSplit"); StringBuilder sb = new StringBuilder(); // Add tail to String so can see better in logs where a test is running. for (int i = 0; i < rowCounts.length; i++) { sb.append("_").append(Integer.toString(rowCounts[i])); }/*from w ww .j a v a 2s . com*/ assertFalse(admin.tableExists(tableName)); try (final Table table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize); final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { int rowCount = 0; byte[] q = new byte[0]; // insert rows into column families. The number of rows that have values // in a specific column family is decided by rowCounts[familyIndex] for (int index = 0; index < familyNames.length; index++) { ArrayList<Put> puts = new ArrayList<Put>(rowCounts[index]); for (int i = 0; i < rowCounts[index]; i++) { byte[] k = Bytes.toBytes(i); Put put = new Put(k); put.addColumn(familyNames[index], q, k); puts.add(put); } table.put(puts); if (rowCount < rowCounts[index]) { rowCount = rowCounts[index]; } } // get the initial layout (should just be one region) List<HRegionLocation> m = locator.getAllRegionLocations(); LOG.info("Initial regions (" + m.size() + "): " + m); assertTrue(m.size() == 1); // Verify row count Scan scan = new Scan(); ResultScanner scanner = table.getScanner(scan); int rows = 0; for (@SuppressWarnings("unused") Result result : scanner) { rows++; } scanner.close(); assertEquals(rowCount, rows); // Have an outstanding scan going on to make sure we can scan over splits. scan = new Scan(); scanner = table.getScanner(scan); // Scan first row so we are into first region before split happens. scanner.next(); // Split the table this.admin.split(tableName, splitPoint); final AtomicInteger count = new AtomicInteger(0); Thread t = new Thread("CheckForSplit") { @Override public void run() { for (int i = 0; i < 45; i++) { try { sleep(1000); } catch (InterruptedException e) { continue; } // check again List<HRegionLocation> regions = null; try { regions = locator.getAllRegionLocations(); } catch (IOException e) { e.printStackTrace(); } if (regions == null) continue; count.set(regions.size()); if (count.get() >= 2) { LOG.info("Found: " + regions); break; } LOG.debug("Cycle waiting on split"); } LOG.debug("CheckForSplit thread exited, current region count: " + count.get()); } }; t.setPriority(Thread.NORM_PRIORITY - 2); t.start(); t.join(); // Verify row count rows = 1; // We counted one row above. for (@SuppressWarnings("unused") Result result : scanner) { rows++; if (rows > rowCount) { scanner.close(); assertTrue("Scanned more than expected (" + rowCount + ")", false); } } scanner.close(); assertEquals(rowCount, rows); List<HRegionLocation> regions = null; try { regions = locator.getAllRegionLocations(); } catch (IOException e) { e.printStackTrace(); } assertEquals(2, regions.size()); if (splitPoint != null) { // make sure the split point matches our explicit configuration assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(0).getRegionInfo().getEndKey())); assertEquals(Bytes.toString(splitPoint), Bytes.toString(regions.get(1).getRegionInfo().getStartKey())); LOG.debug("Properly split on " + Bytes.toString(splitPoint)); } else { if (familyNames.length > 1) { int splitKey = Bytes.toInt(regions.get(0).getRegionInfo().getEndKey()); // check if splitKey is based on the largest column family // in terms of it store size int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey); LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + regions.get(0).getRegionInfo()); for (int index = 0; index < familyNames.length; index++) { int delta = Math.abs(rowCounts[index] / 2 - splitKey); if (delta < deltaForLargestFamily) { assertTrue("Delta " + delta + " for family " + index + " should be at least " + "deltaForLargestFamily " + deltaForLargestFamily, false); } } } } TEST_UTIL.deleteTable(tableName); } }
From source file:com.codename1.impl.android.AndroidImplementation.java
/** * Returns the platform EDT thread priority */ public int getEDTThreadPriority() { return Thread.NORM_PRIORITY; }
From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java
private void startQueryCancellationPool() { ThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("query-cancellation-pool-Thread-%d") .priority(Thread.NORM_PRIORITY).build(); //Using fixed values for pool . corePoolSize = maximumPoolSize = 3 and keepAliveTime = 60 secs queryCancellationPool = new ThreadPoolExecutor(3, 3, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), factory);/*from w ww.j a va 2 s. c o m*/ }
From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java
private void startQueryExpirer() { ThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("QueryExpirer-%d").daemon(true) .priority(Thread.NORM_PRIORITY).build(); queryExpirer = Executors.newSingleThreadScheduledExecutor(factory); long expiryRunInterval = conf.getLong(QUERY_EXPIRY_INTERVAL_MILLIS, DEFAULT_QUERY_EXPIRY_INTERVAL_MILLIS); queryExpirer.scheduleWithFixedDelay(new Runnable() { @Override/* w ww . j a v a 2 s .c o m*/ public void run() { try { expireQueries(); } catch (Exception e) { incrCounter(QUERY_EXPIRY_FAILURE_COUNTER); log.error("Unable to expire queries", e); } } }, expiryRunInterval, expiryRunInterval, TimeUnit.MILLISECONDS); log.info("Enabled periodic exipry of queries at {} millis interval", expiryRunInterval); }