List of usage examples for java.nio.channels FileLock release
public abstract void release() throws IOException;
From source file:com.thoughtworks.go.config.GoConfigDataSource.java
public synchronized GoConfigSaveResult writeWithLock(UpdateConfigCommand updatingCommand, GoConfigHolder configHolder) {/*from w w w .j av a2 s . c om*/ FileChannel channel = null; FileOutputStream outputStream = null; FileLock lock = null; try { RandomAccessFile randomAccessFile = new RandomAccessFile(fileLocation(), "rw"); channel = randomAccessFile.getChannel(); lock = channel.lock(); // Need to convert to xml before we try to write it to the config file. // If our cruiseConfig fails XSD validation, we don't want to write it incorrectly. String configAsXml = getModifiedConfig(updatingCommand, configHolder); randomAccessFile.seek(0); randomAccessFile.setLength(0); outputStream = new FileOutputStream(randomAccessFile.getFD()); LOGGER.info(String.format("[Configuration Changed] Saving updated configuration.")); IOUtils.write(configAsXml, outputStream); ConfigSaveState configSaveState = shouldMergeConfig(updatingCommand, configHolder) ? ConfigSaveState.MERGED : ConfigSaveState.UPDATED; return new GoConfigSaveResult(internalLoad(configAsXml, getConfigUpdatingUser(updatingCommand)), configSaveState); } catch (ConfigFileHasChangedException e) { LOGGER.warn("Configuration file could not be merged successfully after a concurrent edit: " + e.getMessage(), e); throw e; } catch (GoConfigInvalidException e) { LOGGER.warn("Configuration file is invalid: " + e.getMessage(), e); throw bomb(e.getMessage(), e); } catch (Exception e) { LOGGER.error("Configuration file is not valid: " + e.getMessage(), e); throw bomb(e.getMessage(), e); } finally { if (channel != null && lock != null) { try { lock.release(); channel.close(); IOUtils.closeQuietly(outputStream); } catch (IOException e) { LOGGER.error("Error occured when releasing file lock and closing file.", e); } } LOGGER.debug("[Config Save] Done writing with lock"); } }
From source file:com.concursive.connect.config.ApplicationPrefs.java
/** * Initializes preferences/*from ww w . ja v a2 s . co m*/ * * @param context ServletContext */ public void initializePrefs(ServletContext context) { LOG.info("Initializing..."); // Load the application node name, if any try { Properties instanceProperties = new Properties(); instanceProperties.load(context.getResourceAsStream("/WEB-INF/instance.property")); node = instanceProperties.getProperty("node", DEFAULT_NODE); LOG.info("Node: " + node); } catch (Exception e) { LOG.info("Default Node: " + DEFAULT_NODE); node = DEFAULT_NODE; } // Determine the file library String fileLibrary = retrieveFileLibraryLocation(context); if (fileLibrary != null) { loadProperties(fileLibrary); this.add(FILE_LIBRARY_PATH, fileLibrary); configureDebug(); verifyKey(context, fileLibrary); configureConnectionPool(context); configureFreemarker(context); configureWebdavManager(context); configureSystemSettings(context); configureCache(context); if (isConfigured()) { if (ApplicationVersion.isOutOfDate(this)) { LOG.info("Upgrade triggered... obtaining lock to continue"); // Use a lock file to to start upgrading File upgradeLockFile = new File(fileLibrary + "upgrade.lock"); FileChannel fileChannel = null; FileLock fileLock = null; try { // Configure the file for locking fileChannel = new RandomAccessFile(upgradeLockFile, "rw").getChannel(); // Use fileChannel.lock which blocks until the lock is obtained fileLock = fileChannel.lock(); // Reload the prefs to make sure the upgrade isn't already complete loadProperties(fileLibrary); if (ApplicationVersion.isOutOfDate(this)) { // The application needs an update LOG.info("Installed version " + ApplicationVersion.getInstalledVersion(this) + " will be upgraded to " + ApplicationVersion.VERSION); performUpgrade(context); } } catch (Exception e) { LOG.error("initializePrefs-> performUpgrade", e); } finally { try { if (fileLock != null) { fileLock.release(); } if (fileChannel != null) { fileChannel.close(); } } catch (Exception eclose) { LOG.error("initializePrefs-> lock", eclose); } } } if (!ApplicationVersion.isOutOfDate(this)) { // Start the services now that everything is ready initializeServices(context); } } } configureDefaultBehavior(context); loadApplicationDictionaries(context); }
From source file:com.clustercontrol.agent.job.PublicKeyThread.java
/** * ?Authorized_key????<BR>/*from w w w . j av a 2 s. c o m*/ * * @param publicKey * @return */ private synchronized boolean addKey(String publicKey) { m_log.debug("add key start"); if (SKIP_KEYFILE_UPDATE) { m_log.info("skipped appending publicKey"); return true; } //??? String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH); m_log.debug("faileName" + fileName); if (fileName == null || fileName.length() == 0) return false; //File? File fi = new File(fileName); RandomAccessFile randomAccessFile = null; FileChannel channel = null; FileLock lock = null; boolean add = false; try { //RandomAccessFile? randomAccessFile = new RandomAccessFile(fi, "rw"); //FileChannel? channel = randomAccessFile.getChannel(); // for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) { if (null != (lock = channel.tryLock())) { break; } m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT) + " : " + fileName + "]"); Thread.sleep(FILELOCK_WAIT); } if (null == lock) { m_log.warn("file locking timeout."); return false; } // (?) synchronized (authKeyLock) { //?? channel.position(channel.size()); //? String writeData = "\n" + publicKey; // m_log.debug("add key : " + writeData); //????? ByteBuffer buffer = ByteBuffer.allocate(512); //??? buffer.clear(); buffer.put(writeData.getBytes()); buffer.flip(); channel.write(buffer); } add = true; } catch (Exception e) { m_log.error(e); } finally { try { if (channel != null) { channel.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } if (lock != null) { // lock.release(); } } catch (Exception e) { } } return add; }
From source file:net.modsec.ms.connector.ConnRequestHandler.java
/** * Writes the modified modsecurity configurations to configuration file. * @param json contains modsecurity configurations as json object. */// www.j av a 2 s.co m @SuppressWarnings("unchecked") public static void onWriteMSConfig(JSONObject json) { log.info("onWriteMSConfig called.. : " + json.toJSONString()); MSConfig serviceCfg = MSConfig.getInstance(); JSONObject jsonResp = new JSONObject(); String fileName = serviceCfg.getConfigMap().get("MSConfigFile"); String modifiedStr = ""; InputStream ins = null; FileOutputStream out = null; BufferedReader br = null; try { File file = new File(fileName); DataInputStream in; @SuppressWarnings("resource") FileChannel channel = new RandomAccessFile(file, "rw").getChannel(); FileLock lock = channel.lock(); try { ins = new FileInputStream(file); in = new DataInputStream(ins); br = new BufferedReader(new InputStreamReader(in)); String line = ""; boolean check; while ((line = br.readLine()) != null) { check = true; //log.info("Line :" + line); for (ModSecConfigFields field : ModSecConfigFields.values()) { if (line.startsWith(field.toString())) { if (line.trim().split(" ")[0].equals(field.toString())) { if (json.containsKey(field.toString())) { if (((String) json.get(field.toString())).equals("") || json.get(field.toString()) == null) { log.info("---------- Log Empty value ----:" + (String) json.get(field.toString())); json.remove(field.toString()); check = false; continue; } else { modifiedStr += field.toString() + " " + json.remove(field.toString()) + "\n"; check = false; } } } } } if (check) { modifiedStr += line + "\n"; } } for (ModSecConfigFields field : ModSecConfigFields.values()) { if (json.containsKey(field.toString())) { if (json.get(field.toString()) == null || ((String) json.get(field.toString())).equals("")) { log.info("---------- Log Empty value ----:" + (String) json.get(field.toString())); json.remove(field.toString()); check = false; continue; } else { modifiedStr += field.toString() + " " + json.remove(field.toString()) + "\n"; } } } //modified string writing to modsecurity configurations log.info("Writing File :" + modifiedStr); out = new FileOutputStream(fileName); out.write(modifiedStr.getBytes()); log.info("ModSecurity Configurations configurations Written ... "); } finally { lock.release(); } br.close(); in.close(); ins.close(); out.close(); //For Restarting modsecurity so that modified configuration can be applied JSONObject restartJson = new JSONObject(); restartJson.put("action", "restart"); String cmd = serviceCfg.getConfigMap().get("MSRestart"); executeShScript(cmd, restartJson); jsonResp.put("action", "writeMSConfig"); jsonResp.put("status", "0"); jsonResp.put("message", "Configurations updated!"); } catch (FileNotFoundException e1) { jsonResp.put("action", "writeMSConfig"); jsonResp.put("status", "1"); jsonResp.put("message", "Internal Service is down!"); e1.printStackTrace(); } catch (IOException | NullPointerException e) { jsonResp.put("action", "writeMSConfig"); jsonResp.put("status", "0"); jsonResp.put("message", "Unable to modify configurations. Sorry of inconvenience"); e.printStackTrace(); } log.info("Sending Json :" + jsonResp.toJSONString()); ConnectorService.getConnectorProducer().send(jsonResp.toJSONString()); jsonResp.clear(); }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
/** queue uncrawled urls via the CrawlQueueLoader * // ww w . ja va2 s .com * @param loader */ public void queueUnCrawledItems(CrawlQueueLoader loader) throws IOException { _queueState = QueueState.QUEUEING; int metadataVersion = getMetadata().getVersion(); synchronized (_metadata) { // reset metadata PERIOD int urlCount = _metadata.getUrlCount(); _metadata.clear(); _metadata.setUrlCount(urlCount); } RandomAccessFile fixedDataReader = new RandomAccessFile(_fixedDataFile, "rw"); RandomAccessFile stringDataReader = new RandomAccessFile(_variableDataFile, "rw"); try { OnDiskCrawlHistoryItem item = new OnDiskCrawlHistoryItem(); URLFP fingerprint = new URLFP(); while (fixedDataReader.getFilePointer() != fixedDataReader.length()) { long position = fixedDataReader.getFilePointer(); //LOG.info("*** TRYING READ LOCK FOR OFFSET:" + position); while (true) { // get read lock on position ... try { FileLock lock = fixedDataReader.getChannel().tryLock(position, OnDiskCrawlHistoryItem.ON_DISK_SIZE, false); try { //LOG.info("*** GOT READ LOCK FOR OFFSET:" + position); item.deserialize(fixedDataReader); break; } finally { lock.release(); //LOG.info("*** RELEASED READ LOCK FOR OFFSET:" + position); } } catch (OverlappingFileLockException e) { LOG.error("*** LOCK CONTENTION AT:" + position + " Exception:" + CCStringUtils.stringifyException(e)); } } // seek to string data stringDataReader.seek(item._stringsOffset); // and skip buffer length WritableUtils.readVInt(stringDataReader); // and read primary string String url = stringDataReader.readUTF(); // setup fingerprint fingerprint.setDomainHash(item._domainHash); fingerprint.setUrlHash(item._urlFingerprint); // first, if it has not been crawled ever, crawl it not matter what ... boolean crawlItem = !item.isFlagSet(OnDiskCrawlHistoryItem.FLAG_HAS_CRAWL_STATUS); // if it has been crawled ... check list metadata version ... if (!crawlItem && metadataVersion >= 1) { // ok this is newer version of the list ... // check refresh time if specified ... int refreshIntervalInSeconds = DEFAULT_REFRESH_INTERVAL_IN_SECS; if (getMetadata().getRefreshInterval() != 0) { refreshIntervalInSeconds = getMetadata().getRefreshInterval(); } if (item._updateTimestamp > 0) { long timeSinceLastCrawl = item._updateTimestamp; if (System.currentTimeMillis() - timeSinceLastCrawl >= (refreshIntervalInSeconds * 1000)) { crawlItem = true; } } } if (crawlItem) { loader.queueURL(fingerprint, url); synchronized (_metadata) { // update queued item count _metadata.setQueuedItemCount(_metadata.getQueuedItemCount() + 1); } } else { updateMetadata(item, _metadata, 0); } // ok update subdomain stats updateSubDomainMetadataForItemDuringLoad(item, url, fingerprint, crawlItem); } flushCachedSubDomainMetadata(); loader.flush(); _queueState = QueueState.QUEUED; } catch (IOException e) { LOG.error("Encountered Exception Queueing Items for List:" + _listId + " Exception:" + CCStringUtils.stringifyException(e)); _queueState = QueueState.ERROR; } finally { fixedDataReader.close(); stringDataReader.close(); } }
From source file:org.spf4j.perf.tsdb.TimeSeriesDatabase.java
/** * Read measurements from table.//w w w. ja v a 2 s . c om * * @param tableName * @param startTime start time including * @param endTime end time including * @return * @throws IOException */ private TimeSeries read(final long startTime, final long endTime, final long startAtFragment, final long endAtFragment, final boolean skipFirst) throws IOException { synchronized (path) { TLongArrayList timeStamps = new TLongArrayList(); List<long[]> data = new ArrayList<>(); if (startAtFragment > 0) { FileLock lock = ch.lock(0, Long.MAX_VALUE, true); try { DataFragment frag; long nextFragmentLocation = startAtFragment; boolean last = false; boolean psFirst = skipFirst; do { if (nextFragmentLocation == endAtFragment) { last = true; } file.seek(nextFragmentLocation); frag = new DataFragment(file); if (psFirst) { psFirst = false; } else { long fragStartTime = frag.getStartTimeMillis(); if (fragStartTime >= startTime) { TIntArrayList fragTimestamps = frag.getTimestamps(); int nr = 0; for (int i = 0; i < fragTimestamps.size(); i++) { long ts = fragStartTime + fragTimestamps.get(i); if (ts <= endTime) { timeStamps.add(ts); nr++; } else { break; } } int i = 0; for (long[] d : frag.getData()) { if (i < nr) { data.add(d); } else { break; } nr++; } if (fragTimestamps.size() > nr) { break; } } } nextFragmentLocation = frag.getNextDataFragment(); } while (nextFragmentLocation > 0 && !last); } catch (IOException | RuntimeException e) { try { lock.release(); throw e; } catch (IOException ex) { ex.addSuppressed(e); throw ex; } } lock.release(); } return new TimeSeries(timeStamps.toArray(), data.toArray(new long[data.size()][])); } }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
/** * update list state of a recently crawled item * //from w w w. j a v a2 s . c o m * @param fingerprint - the fingerprint of the updated item * @param newData - the updated crawl history data for the given item * @throws IOException */ @Override public void updateItemState(URLFP fingerprint, ProxyCrawlHistoryItem newData) throws IOException { if (_listState == LoadState.LOADED) { // check for membership ... if (_bloomFilter.isPresent(fingerprint)) { //LOG.info("UpdateItemState Called for URL:" + newData.getOriginalURL() + " List:" + getListId()); //LOG.info("UpdateItemState Loading OnDisk Item for URL:" + newData.getOriginalURL() + " List:" + getListId()); // extract existing item from disk OnDiskCrawlHistoryItem originalItem = loadOnDiskItemForURLFP(fingerprint); //if present (null if false cache hit) if (originalItem != null) { // build an on disk item data structure for any potential changes ... OnDiskCrawlHistoryItem newItem = onDiskItemFromHistoryItem(fingerprint, newData); // set inital offset information newItem._fileOffset = originalItem._fileOffset; newItem._stringsOffset = originalItem._stringsOffset; // LOG.info("UpdateItemState Comparing OnDisk Item to New Item for URL:" + newData.getOriginalURL() + " List:" + getListId()); // compare the two items ... if (!newItem.equals(originalItem)) { //LOG.info("UpdateItemState Items Don't Match for URL:" + newData.getOriginalURL() + " List:" + getListId()); // ok items do not match ... figure out if strings are different ... if (newItem._stringsCRC != originalItem._stringsCRC) { RandomAccessFile stringsFile = new RandomAccessFile(_variableDataFile, "rw"); try { // seek to end stringsFile.seek(stringsFile.length()); // update offset info newItem._stringsOffset = stringsFile.length(); // write out string data length WritableUtils.writeVInt(stringsFile, _stringBuffer1.getLength()); // write strings to log file stringsFile.write(_stringBuffer1.getData(), 0, _stringBuffer1.getLength()); } finally { stringsFile.close(); } } // otherwise take the offset from old item else { newItem._stringsOffset = originalItem._stringsOffset; } //LOG.info("Opening Data File for OnDiskItem load for Fingerprint:" + newItem._urlFingerprint); // ok, different paths depending on wether this is an in memory update or not ... boolean wroteToMemory = false; synchronized (this) { if (_tempFixedDataBuffer != null) { wroteToMemory = true; // reset output buffer _tempOutputBuffer.reset(); // serizlie to output buffer newItem.serialize(_tempOutputBuffer); // copy to appropriate location System.arraycopy(_tempOutputBuffer.getData(), 0, _tempFixedDataBuffer, (int) originalItem._fileOffset, OnDiskCrawlHistoryItem.ON_DISK_SIZE); } } if (!wroteToMemory) { // write to disk RandomAccessFile file = new RandomAccessFile(_fixedDataFile, "rw"); try { while (true) { try { //LOG.info("*** TRYING UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); FileLock lock = file.getChannel().tryLock(originalItem._fileOffset, OnDiskCrawlHistoryItem.ON_DISK_SIZE, false); try { //LOG.info("*** GOT UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); file.seek(originalItem._fileOffset); newItem.serialize(file); //LOG.info("Updated Data File for OnDiskItem for Fingerprint:" + originalItem._urlFingerprint); break; } finally { //LOG.info("*** RELEASED UPDATE LOCK FOR OFFSET:" + originalItem._fileOffset); lock.release(); } } catch (OverlappingFileLockException e) { LOG.error("###LockConflict(RETRY):" + CCStringUtils.stringifyException(e)); } } } finally { file.close(); } } // ok now update metadata ... synchronized (_metadata) { int updateFlags = calculateUpdateFlags(originalItem, newItem); if (updateFlags != 0) { int metadataDirtyFlags = updateMetadata(newItem, _metadata, 0); // only write metadata to disk if temp data buffer is null if (metadataDirtyFlags != 0 && !wroteToMemory) { if ((metadataDirtyFlags & MetadataUpdateFlag_ModifiedCrawlStatus) != 0) { _metadata.setQueuedItemCount(_metadata.getQueuedItemCount() - 1); } writeMetadataToDisk(); } // if not writing to memory then update subdomain metadata if (!wroteToMemory) { synchronized (_subDomainMetadataFile) { CrawlListMetadata subDomainMetadata = getSubDomainMetadataByURL( newData.getOriginalURL()); int subDomainMetadataDirtyFlags = updateMetadata(newItem, subDomainMetadata, processFileOffsets); if (subDomainMetadataDirtyFlags != 0 && !wroteToMemory) { if ((subDomainMetadataDirtyFlags & MetadataUpdateFlag_ModifiedCrawlStatus) != 0) { subDomainMetadata.setQueuedItemCount( subDomainMetadata.getQueuedItemCount() - 1); } writeSubDomainMetadataToDisk(subDomainMetadata); } } } } } synchronized (this) { if (_eventListener != null) { _eventListener.itemUpdated(fingerprint); } } } } } } }
From source file:JNLPAppletLauncher.java
/** * This method is called by the static initializer to create / initialize * the temp root directory that will hold the temp directories for this * instance of the JVM. This is done as follows: * * 1. Synchronize on a global lock. Note that for this purpose we will * use System.out in the absence of a true global lock facility. * We are careful not to hold this lock too long. * * 2. Check for the existence of the "jnlp.applet.launcher.tmproot" * system property./*from w ww . j a v a2 s . com*/ * * a. If set, then some other thread in a different ClassLoader has * already created the tmprootdir, so we just need to * use it. The remaining steps are skipped. * * b. If not set, then we are the first thread in this JVM to run, * and we need to create the the tmprootdir. * * 3. Create the tmprootdir, along with the appropriate locks. * Note that we perform the operations in the following order, * prior to creating tmprootdir itself, to work around the fact that * the file creation and file lock steps are not atomic, and we need * to ensure that a newly-created tmprootdir isn't reaped by a * concurrently running JVM. * * create jlnNNNN.tmp using File.createTempFile() * lock jlnNNNN.tmp * create jlnNNNN.lck while holding the lock on the .tmp file * lock jlnNNNN.lck * * Since the Reaper thread will enumerate the list of *.lck files * before starting, we can guarantee that if there exists a *.lck file * for an active process, then the corresponding *.tmp file is locked * by that active process. This guarantee lets us avoid reaping an * active process' files. * * 4. Set the "jnlp.applet.launcher.tmproot" system property. * * 5. Add a shutdown hook to cleanup jlnNNNN.lck and jlnNNNN.tmp. We * don't actually expect that this shutdown hook will ever be called, * but the act of doing this, ensures that the locks never get * garbage-collected, which is necessary for correct behavior when * the first ClassLoader is later unloaded, while subsequent Applets * are still running. * * 6. Start the Reaper thread to cleanup old installations. */ private static void initTmpRoot() throws IOException { if (VERBOSE) { System.err.println("---------------------------------------------------"); } synchronized (System.out) { // Get the name of the tmpbase directory. String tmpBaseName = System.getProperty("java.io.tmpdir") + File.separator + "jnlp-applet"; tmpBaseDir = new File(tmpBaseName); // Get the value of the tmproot system property final String tmpRootPropName = "jnlp.applet.launcher.tmproot"; tmpRootPropValue = System.getProperty(tmpRootPropName); if (tmpRootPropValue == null) { // Create the tmpbase directory if it doesn't already exist tmpBaseDir.mkdir(); if (!tmpBaseDir.isDirectory()) { throw new IOException("Cannot create directory " + tmpBaseDir); } // Create ${tmpbase}/jlnNNNN.tmp then lock the file File tmpFile = File.createTempFile("jln", ".tmp", tmpBaseDir); if (VERBOSE) { System.err.println("tmpFile = " + tmpFile.getAbsolutePath()); } final FileOutputStream tmpOut = new FileOutputStream(tmpFile); final FileChannel tmpChannel = tmpOut.getChannel(); final FileLock tmpLock = tmpChannel.lock(); // Strip off the ".tmp" to get the name of the tmprootdir String tmpFileName = tmpFile.getAbsolutePath(); String tmpRootName = tmpFileName.substring(0, tmpFileName.lastIndexOf(".tmp")); // create ${tmpbase}/jlnNNNN.lck then lock the file String lckFileName = tmpRootName + ".lck"; File lckFile = new File(lckFileName); if (VERBOSE) { System.err.println("lckFile = " + lckFile.getAbsolutePath()); } lckFile.createNewFile(); final FileOutputStream lckOut = new FileOutputStream(lckFile); final FileChannel lckChannel = lckOut.getChannel(); final FileLock lckLock = lckChannel.lock(); // Create tmprootdir tmpRootDir = new File(tmpRootName); if (DEBUG) { System.err.println("tmpRootDir = " + tmpRootDir.getAbsolutePath()); } if (!tmpRootDir.mkdir()) { throw new IOException("Cannot create " + tmpRootDir); } // Add shutdown hook to cleanup the OutputStream, FileChannel, // and FileLock for the jlnNNNN.lck and jlnNNNN.lck files. // We do this so that the locks never get garbage-collected. Runtime.getRuntime().addShutdownHook(new Thread() { /* @Override */ public void run() { // NOTE: we don't really expect that this code will ever // be called. If it does, we will close the output // stream, which will in turn close the channel. // We will then release the lock. try { tmpOut.close(); tmpLock.release(); lckOut.close(); lckLock.release(); } catch (IOException ex) { // Do nothing } } }); // Set the system property... tmpRootPropValue = tmpRootName.substring(tmpRootName.lastIndexOf(File.separator) + 1); System.setProperty(tmpRootPropName, tmpRootPropValue); if (VERBOSE) { System.err.println("Setting " + tmpRootPropName + "=" + tmpRootPropValue); } // Start a new Reaper thread to do stuff... Thread reaperThread = new Thread() { /* @Override */ public void run() { deleteOldTempDirs(); } }; reaperThread.setName("AppletLauncher-Reaper"); reaperThread.start(); } else { // Make sure that the property is not set to an illegal value if (tmpRootPropValue.indexOf('/') >= 0 || tmpRootPropValue.indexOf(File.separatorChar) >= 0) { throw new IOException("Illegal value of: " + tmpRootPropName); } // Set tmpRootDir = ${tmpbase}/${jnlp.applet.launcher.tmproot} if (VERBOSE) { System.err.println("Using existing value of: " + tmpRootPropName + "=" + tmpRootPropValue); } tmpRootDir = new File(tmpBaseDir, tmpRootPropValue); if (DEBUG) { System.err.println("tmpRootDir = " + tmpRootDir.getAbsolutePath()); } if (!tmpRootDir.isDirectory()) { throw new IOException("Cannot access " + tmpRootDir); } } } }
From source file:MyZone.Settings.java
public byte[] readXML(String filename) { byte[] readIn = null; FileChannel channel = null;/* ww w . j ava2s .c o m*/ FileLock lock = null; FileInputStream fis = null; ByteArrayOutputStream baos = null; try { File file = new File(filename); if (!file.exists()) { return null; } fis = new FileInputStream(file); channel = fis.getChannel(); while ((lock = channel.tryLock(0L, Long.MAX_VALUE, true)) == null) { Thread.yield(); } baos = new ByteArrayOutputStream(); byte[] b = new byte[1024]; ByteBuffer buf = ByteBuffer.wrap(b); int count = 0; long fileLength = file.length(); while (fileLength > 0) { count = channel.read(buf); if (count >= 0) { fileLength -= count; baos.write(b, 0, count); buf.rewind(); } } readIn = baos.toByteArray(); } catch (Exception e) { if (DEBUG) { e.printStackTrace(); } readIn = null; } finally { try { if (lock != null) { lock.release(); } if (channel != null) { channel.close(); } if (fis != null) { fis.close(); } if (baos != null) { baos.close(); } } catch (Exception e) { if (DEBUG) { e.printStackTrace(); } readIn = null; } } return readIn; }
From source file:com.clustercontrol.agent.job.PublicKeyThread.java
/** * ?Authorized_key????<BR>/*from w ww.j av a 2 s . c o m*/ * * @param publicKey * @return true : ?false: */ private synchronized boolean deleteKey(String publicKey) { m_log.debug("delete key start"); if (SKIP_KEYFILE_UPDATE) { m_log.info("skipped deleting publicKey"); return true; } Charset charset = Charset.forName("UTF-8"); CharsetEncoder encoder = charset.newEncoder(); CharsetDecoder decoder = charset.newDecoder(); //??? String fileName = AgentProperties.getProperty(execUser.toLowerCase() + AUTHORIZED_KEY_PATH); if (fileName == null || fileName.length() == 0) return false; //File? File fi = new File(fileName); RandomAccessFile randomAccessFile = null; FileChannel channel = null; FileLock lock = null; boolean delete = false; try { //RandomAccessFile? randomAccessFile = new RandomAccessFile(fi, "rw"); //FileChannel? channel = randomAccessFile.getChannel(); // for (int i = 0; i < (FILELOCK_TIMEOUT / FILELOCK_WAIT); i++) { if (null != (lock = channel.tryLock())) { break; } m_log.info("waiting for locked file... [" + (i + 1) + "/" + (FILELOCK_TIMEOUT / FILELOCK_WAIT) + " : " + fileName + "]"); Thread.sleep(FILELOCK_WAIT); } if (null == lock) { m_log.warn("file locking timeout."); return false; } // (?) synchronized (authKeyLock) { //?? ByteBuffer buffer = ByteBuffer.allocate((int) channel.size()); //?? channel.read(buffer); // ???????????0? buffer.flip(); //?? String contents = decoder.decode(buffer).toString(); // ? m_log.debug("contents " + contents.length() + " : " + contents); //?? List<String> keyCheck = new ArrayList<String>(); StringTokenizer tokenizer = new StringTokenizer(contents, "\n"); while (tokenizer.hasMoreTokens()) { keyCheck.add(tokenizer.nextToken()); } //?????? int s = keyCheck.lastIndexOf(publicKey); if (s != -1) { // ? m_log.debug("remobe key : " + keyCheck.get(s)); keyCheck.remove(s); } //????? encoder.reset(); buffer.clear(); int i; if (keyCheck.size() > 0) { for (i = 0; i < keyCheck.size() - 1; i++) { encoder.encode(CharBuffer.wrap(keyCheck.get(i) + "\n"), buffer, false); } encoder.encode(CharBuffer.wrap(keyCheck.get(i)), buffer, true); } //??? buffer.flip(); channel.truncate(0); channel.position(0); channel.write(buffer); } delete = true; } catch (IOException e) { m_log.error(e.getMessage(), e); } catch (RuntimeException e) { m_log.error(e.getMessage(), e); } catch (InterruptedException e) { m_log.error(e.getMessage(), e); } finally { try { if (channel != null) { channel.close(); } if (randomAccessFile != null) { randomAccessFile.close(); } //? if (lock != null) { lock.release(); } } catch (Exception e) { } } return delete; }