Example usage for org.apache.commons.io FileUtils touch

List of usage examples for org.apache.commons.io FileUtils touch

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils touch.

Prototype

public static void touch(File file) throws IOException 

Source Link

Document

Implements the same behaviour as the "touch" utility on Unix.

Usage

From source file:com.github.jrh3k5.mojo.flume.AbstractFlumeAgentsMojoTest.java

/**
 * Test the removal of libraries from the Flume installation.
 * /*from   w  ww. j  a va2  s . com*/
 * @throws Exception
 *             If any errors occur during the test run.
 */
@Test
public void testRemoveLibs() throws Exception {
    final File testDirectory = createTestDirectory();
    final File libDir = new File(testDirectory, "lib");
    FileUtils.forceMkdir(libDir);

    final File toRemove = new File(libDir, "toremove.jar");
    FileUtils.touch(toRemove);
    final File toKeep = new File(libDir, "tokeep.jar");
    FileUtils.touch(toKeep);

    final Libs libs = new Libs();
    libs.setRemovals(Collections.singletonList(toRemove.getName()));
    when(agent.getLibs()).thenReturn(libs);

    // The mojo should remove only the configured library
    mojo.removeLibs(agent, testDirectory);

    assertThat(toRemove).doesNotExist();
    assertThat(toKeep).exists();
}

From source file:com.linkedin.helix.store.file.FilePropertyStore.java

@Override
public void setProperty(String key, T value) throws PropertyStoreException {
    String path = getPath(key);//ww w .j ava2  s.  com
    File file = new File(path); // null;
    // FileLock fLock = null;
    FileOutputStream fout = null;

    // TODO create non-exist dirs recursively
    try {
        _readWriteLock.writeLock().lock();
        // file = new File(path);
        if (!file.exists()) {
            FileUtils.touch(file);
        }

        fout = new FileOutputStream(file);
        // FileChannel fChannel = fout.getChannel();

        // TODO need a timeout on lock operation
        // fLock = fChannel.lock();

        byte[] bytes = _serializer.serialize(value);
        fout.write(bytes);
    }
    //    catch (FileNotFoundException e)
    //    {
    //      logger.error("fail to set property, key:" + key +
    //          "\nfile not found exception:" + e);
    //    }
    catch (IOException e) {
        logger.error("fail to set property. key:" + key + "value:" + value, e);
    } finally {
        _readWriteLock.writeLock().unlock();
        try {
            // if (fLock != null && fLock.isValid())
            //   fLock.release();

            if (fout != null) {
                fout.close();
            }
        } catch (IOException e) {
            logger.error("fail to close file. key:" + key, e);
        }

    }

}

From source file:edu.kit.dama.transfer.client.impl.AbstractTransferClient.java

/**
 * Create the temporary transfer directory and a .lock file to avoid multiple
 * transfers for the same DOID. Calling this method is part of the internal
 * transfer preparation.// w ww. ja  v  a  2  s .  c  om
 *
 * @return TRUE if the directory could be created and locked.
 */
private boolean createAndLockTransfer() {
    boolean result = false;
    try {
        String identifier = transferContainer.getUniqueTransferIdentifier();
        LOGGER.debug("Try to create temp transfer directory for transfer ID {}", identifier);
        String tempDir = StagingUtils.getTempDir(transferContainer);
        LOGGER.debug(" - Temp transfer directory: {}", tempDir);
        FileUtils.touch(new File(tempDir + File.separator + ".lock"));
        LOGGER.debug("Created and locked temp transfer directory");
        result = true;
    } catch (IOException ioe) {
        LOGGER.error("Failed to lock transfer directory", ioe);
    }
    return result;
}

From source file:com.ikanow.aleph2.data_import.services.TestHarvestContext.java

@Test
public void test_fileLocations() throws InstantiationException, IllegalAccessException, ClassNotFoundException,
        InterruptedException, ExecutionException {
    _logger.info("running test_fileLocations");

    try {/*w  w w  .  j  a  v a  2 s.  c  om*/
        final HarvestContext test_context = _app_injector.getInstance(HarvestContext.class);

        //All we can do here is test the trivial eclipse specific path:
        try {
            File f = new File(test_context._globals.local_root_dir() + "/lib/aleph2_test_file_locations.jar");
            FileUtils.forceMkdir(f.getParentFile());
            FileUtils.touch(f);
        } catch (Exception e) {
        } // probably already exists:

        final DataBucketBean test_empty_bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::_id, "test")
                .with(DataBucketBean::harvest_technology_name_or_id, "test_harvest_tech_id").done().get();

        final SharedLibraryBean htlib1 = BeanTemplateUtils.build(SharedLibraryBean.class)
                .with(SharedLibraryBean::_id, "test_harvest_tech_id")
                .with(SharedLibraryBean::path_name, "test_harvest_tech_name.jar").done().get();

        test_context.setBucket(test_empty_bucket);
        test_context.setTechnologyConfig(htlib1);

        final List<String> lib_paths = test_context.getHarvestContextLibraries(Optional.empty());

        //(this doesn't work very well when run in test mode because it's all being found from file)
        assertTrue("Finds some libraries", !lib_paths.isEmpty());
        lib_paths.stream().forEach(lib -> assertTrue("No external libraries: " + lib, lib.contains("aleph2")));

        assertTrue(
                "Can find the test JAR or the data model: "
                        + lib_paths.stream().collect(Collectors.joining(";")),
                lib_paths.stream().anyMatch(lib -> lib.contains("aleph2_test_file_locations"))
                        || lib_paths.stream().anyMatch(lib -> lib.contains("aleph2_data_model")));

        // Now get the various shared libs

        final HarvestControlMetadataBean harvest_module1 = BeanTemplateUtils
                .build(HarvestControlMetadataBean.class)
                .with(HarvestControlMetadataBean::library_names_or_ids, Arrays.asList("id1", "name2.zip"))
                .done().get();

        final HarvestControlMetadataBean harvest_module2 = BeanTemplateUtils
                .build(HarvestControlMetadataBean.class).with(HarvestControlMetadataBean::library_names_or_ids,
                        Arrays.asList("id1", "name3.test", "test_harvest_tech_id"))
                .done().get();

        final DataBucketBean test_bucket = BeanTemplateUtils.build(DataBucketBean.class)
                .with(DataBucketBean::_id, "test")
                .with(DataBucketBean::harvest_technology_name_or_id, "test_harvest_tech_id")
                .with(DataBucketBean::harvest_configs, Arrays.asList(harvest_module1, harvest_module2)).done()
                .get();

        final SharedLibraryBean htmod1 = BeanTemplateUtils.build(SharedLibraryBean.class)
                .with(SharedLibraryBean::_id, "id1").with(SharedLibraryBean::path_name, "name1.jar").done()
                .get();

        final SharedLibraryBean htmod2 = BeanTemplateUtils.build(SharedLibraryBean.class)
                .with(SharedLibraryBean::_id, "id2").with(SharedLibraryBean::path_name, "name2.zip").done()
                .get();

        final SharedLibraryBean htmod3 = BeanTemplateUtils.build(SharedLibraryBean.class)
                .with(SharedLibraryBean::_id, "id3").with(SharedLibraryBean::path_name, "name3.test").done()
                .get();

        test_context._service_context.getService(IManagementDbService.class, Optional.empty()).get()
                .getSharedLibraryStore().storeObjects(Arrays.asList(htlib1, htmod1, htmod2, htmod3)).get();

        Map<String, String> mods = test_context.getHarvestLibraries(Optional.of(test_bucket)).get();
        assertTrue("name1:" + mods,
                mods.containsKey("name1.jar") && mods.get("name1.jar").endsWith("id1.cache.jar"));
        assertTrue("name2:" + mods,
                mods.containsKey("name2.zip") && mods.get("name2.zip").endsWith("id2.cache.zip"));
        assertTrue("name3:" + mods,
                mods.containsKey("name3.test") && mods.get("name3.test").endsWith("id3.cache.misc.test"));
        assertTrue("test_harvest_tech_name:" + mods, mods.containsKey("test_harvest_tech_name.jar")
                && mods.get("test_harvest_tech_name.jar").endsWith("test_harvest_tech_id.cache.jar"));
    } catch (Exception e) {
        try {
            e.printStackTrace();
        } catch (Exception ee) {
            System.out.println(ErrorUtils.getLongForm("{1}: {0}", e, e.getClass()));
        }
        fail("Threw exception");
    }

}

From source file:de.tudarmstadt.ukp.dkpro.core.io.web1t.Web1TFormatWriter.java

private Map<Integer, BufferedWriter> initializeWriters(int min, int max)
        throws ResourceInitializationException {
    Map<Integer, BufferedWriter> writers = new HashMap<Integer, BufferedWriter>();
    for (int level = min; level <= max; level++) {
        try {//ww w.  j av  a2s.  c o m
            File outputFile = new File(outputPath, level + ".txt");

            if (outputFile.exists()) {
                outputFile.delete();
            }
            FileUtils.touch(outputFile);

            writers.put(level, new BufferedWriter(
                    new OutputStreamWriter(new FileOutputStream(outputFile), outputEncoding)));
        } catch (IOException e) {
            throw new ResourceInitializationException(e);
        }
    }
    return writers;
}

From source file:de.unileipzig.ub.indexer.App.java

public static void main(String[] args) throws IOException {

    // create Options object
    Options options = new Options();

    options.addOption("h", "help", false, "display this help");

    options.addOption("f", "filename", true, "name of the JSON file whose content should be indexed");
    options.addOption("i", "index", true, "the name of the target index");
    options.addOption("d", "doctype", true, "the name of the doctype (title, local, ...)");

    options.addOption("t", "host", true, "elasticsearch hostname (default: 0.0.0.0)");
    options.addOption("p", "port", true, "transport port (that's NOT the http port, default: 9300)");
    options.addOption("c", "cluster", true, "cluster name (default: elasticsearch_mdma)");

    options.addOption("b", "bulksize", true, "number of docs sent in one request (default: 3000)");
    options.addOption("v", "verbose", false, "show processing speed while indexing");
    options.addOption("s", "status", false, "only show status of index for file");

    options.addOption("r", "repair", false, "attempt to repair recoverable inconsistencies on the go");
    options.addOption("e", "debug", false, "set logging level to debug");
    options.addOption("l", "logfile", true, "logfile - in not specified only log to stdout");

    options.addOption("m", "memcached", true, "host and port of memcached (default: localhost:11211)");
    options.addOption("z", "latest-flag-on", true,
            "enable latest flag according to field (within content, e.g. 001)");
    options.addOption("a", "flat", false, "flat-mode: do not check for inconsistencies");

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;//from w w  w .  j av  a2 s.c  om

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException ex) {
        logger.error(ex);
        System.exit(1);
    }

    // setup logging
    Properties systemProperties = System.getProperties();
    systemProperties.put("net.spy.log.LoggerImpl", "net.spy.memcached.compat.log.Log4JLogger");
    System.setProperties(systemProperties);
    Logger.getLogger("net.spy.memcached").setLevel(Level.ERROR);

    Properties props = new Properties();
    props.load(props.getClass().getResourceAsStream("/log4j.properties"));

    if (cmd.hasOption("debug")) {
        props.setProperty("log4j.logger.de.unileipzig", "DEBUG");
    }

    if (cmd.hasOption("logfile")) {
        props.setProperty("log4j.rootLogger", "INFO, stdout, F");
        props.setProperty("log4j.appender.F", "org.apache.log4j.FileAppender");
        props.setProperty("log4j.appender.F.File", cmd.getOptionValue("logfile"));
        props.setProperty("log4j.appender.F.layout", "org.apache.log4j.PatternLayout");
        props.setProperty("log4j.appender.F.layout.ConversionPattern", "%5p | %d | %F | %L | %m%n");
    }

    PropertyConfigurator.configure(props);

    InetAddress addr = InetAddress.getLocalHost();
    String memcachedHostAndPort = addr.getHostAddress() + ":11211";
    if (cmd.hasOption("m")) {
        memcachedHostAndPort = cmd.getOptionValue("m");
    }

    // setup caching
    try {
        if (memcachedClient == null) {
            memcachedClient = new MemcachedClient(
                    new ConnectionFactoryBuilder().setFailureMode(FailureMode.Cancel).build(),
                    AddrUtil.getAddresses("0.0.0.0:11211"));
            try {
                // give client and server 500ms
                Thread.sleep(300);
            } catch (InterruptedException ex) {
            }

            Collection availableServers = memcachedClient.getAvailableServers();
            logger.info(availableServers);
            if (availableServers.size() == 0) {
                logger.info("no memcached servers found");
                memcachedClient.shutdown();
                memcachedClient = null;
            } else {
                logger.info(availableServers.size() + " memcached server(s) detected, fine.");
            }
        }
    } catch (IOException ex) {
        logger.warn("couldn't create a connection, bailing out: " + ex.getMessage());
    }

    // process options

    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("indexer", options, true);
        quit(0);
    }

    boolean verbose = false;
    if (cmd.hasOption("verbose")) {
        verbose = true;
    }

    // ES options
    String[] hosts = new String[] { "0.0.0.0" };
    int port = 9300;
    String clusterName = "elasticsearch_mdma";
    int bulkSize = 3000;

    if (cmd.hasOption("host")) {
        hosts = cmd.getOptionValues("host");
    }
    if (cmd.hasOption("port")) {
        port = Integer.parseInt(cmd.getOptionValue("port"));
    }
    if (cmd.hasOption("cluster")) {
        clusterName = cmd.getOptionValue("cluster");
    }
    if (cmd.hasOption("bulksize")) {
        bulkSize = Integer.parseInt(cmd.getOptionValue("bulksize"));
        if (bulkSize < 1 || bulkSize > 100000) {
            logger.error("bulksize must be between 1 and 100,000");
            quit(1);
        }
    }

    // ES Client
    final Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "elasticsearch_mdma")
            .build();
    final TransportClient client = new TransportClient(settings);
    for (String host : hosts) {
        client.addTransportAddress(new InetSocketTransportAddress(host, port));
    }

    if (cmd.hasOption("filename") && cmd.hasOption("index") && cmd.hasOption("doctype")) {

        final String filename = cmd.getOptionValue("filename");

        final File _file = new File(filename);
        if (_file.length() == 0) {
            logger.info(_file.getAbsolutePath() + " is empty, skipping");
            quit(0); // file is empty
        }

        // for flat mode: leave a stampfile beside the json to 
        // indicate previous successful processing
        File directory = new File(filename).getParentFile();
        File stampfile = new File(directory, DigestUtils.shaHex(filename) + ".indexed");

        long start = System.currentTimeMillis();
        long lineCount = 0;

        final String indexName = cmd.getOptionValue("index");
        final String docType = cmd.getOptionValue("doctype");
        BulkRequestBuilder bulkRequest = client.prepareBulk();

        try {
            if (cmd.hasOption("flat")) {
                // flat mode
                // .........
                if (stampfile.exists()) {
                    logger.info("SKIPPING, since it seems this file has already " + "been imported (found: "
                            + stampfile.getAbsolutePath() + ")");
                    quit(0);
                }
            } else {

                final String srcSHA1 = extractSrcSHA1(filename);

                logger.debug(filename + " srcsha1: " + srcSHA1);

                long docsInIndex = getIndexedRecordCount(client, indexName, srcSHA1);
                logger.debug(filename + " indexed: " + docsInIndex);

                long docsInFile = getLineCount(filename);
                logger.debug(filename + " lines: " + docsInFile);

                // in non-flat-mode, indexing would take care
                // of inconsistencies
                if (docsInIndex == docsInFile) {
                    logger.info("UP-TO DATE: " + filename + " (" + docsInIndex + ", " + srcSHA1 + ")");
                    client.close();
                    quit(0);
                }

                if (docsInIndex > 0) {
                    logger.warn("INCONSISTENCY DETECTED: " + filename + ": indexed:" + docsInIndex + " lines:"
                            + docsInFile);

                    if (!cmd.hasOption("r")) {
                        logger.warn(
                                "Please re-run indexer with --repair flag or delete residues first with: $ curl -XDELETE "
                                        + hosts[0] + ":9200/" + indexName
                                        + "/_query -d ' {\"term\" : { \"meta.srcsha1\" : \"" + srcSHA1
                                        + "\" }}'");
                        client.close();
                        quit(1);
                    } else {
                        logger.info("Attempting to clear residues...");
                        // attempt to repair once
                        DeleteByQueryResponse dbqr = client.prepareDeleteByQuery(indexName)
                                .setQuery(termQuery("meta.srcsha1", srcSHA1)).execute().actionGet();

                        Iterator<IndexDeleteByQueryResponse> it = dbqr.iterator();
                        long deletions = 0;
                        while (it.hasNext()) {
                            IndexDeleteByQueryResponse response = it.next();
                            deletions += 1;
                        }
                        logger.info("Deleted residues of " + filename);
                        logger.info("Refreshing [" + indexName + "]");
                        RefreshResponse refreshResponse = client.admin().indices()
                                .refresh(new RefreshRequest(indexName)).actionGet();

                        long indexedAfterDelete = getIndexedRecordCount(client, indexName, srcSHA1);
                        logger.info(indexedAfterDelete + " docs remained");
                        if (indexedAfterDelete > 0) {
                            logger.warn("Not all residues cleaned. Try to fix this manually: $ curl -XDELETE "
                                    + hosts[0] + ":9200/" + indexName
                                    + "/_query -d ' {\"term\" : { \"meta.srcsha1\" : \"" + srcSHA1 + "\" }}'");
                            quit(1);
                        } else {
                            logger.info("Residues are gone. Now trying to reindex: " + filename);
                        }
                    }
                }
            }

            logger.info("INDEXING-REQUIRED: " + filename);
            if (cmd.hasOption("status")) {
                quit(0);
            }

            HashSet idsInBatch = new HashSet();

            String idField = null;
            if (cmd.hasOption("z")) {
                idField = cmd.getOptionValue("z");
            }

            final FileReader fr = new FileReader(filename);
            final BufferedReader br = new BufferedReader(fr);

            String line;
            // one line is one document
            while ((line = br.readLine()) != null) {

                // "Latest-Flag" machine
                // This gets obsolete with a "flat" index
                if (cmd.hasOption("z")) {
                    // flag that indicates, whether the document
                    // about to be indexed will be the latest
                    boolean willBeLatest = true;

                    // check if there is a previous (lower meta.timestamp) document with 
                    // the same identifier (whatever that may be - queried under "content")
                    final String contentIdentifier = getContentIdentifier(line, idField);
                    idsInBatch.add(contentIdentifier);

                    // assumed in meta.timestamp
                    final Long timestamp = Long.parseLong(getTimestamp(line));

                    logger.debug("Checking whether record is latest (line: " + lineCount + ")");
                    logger.debug(contentIdentifier + ", " + timestamp);

                    // get all docs, which match the contentIdentifier
                    // by filter, which doesn't score
                    final TermFilterBuilder idFilter = new TermFilterBuilder("content." + idField,
                            contentIdentifier);
                    final TermFilterBuilder kindFilter = new TermFilterBuilder("meta.kind", docType);
                    final AndFilterBuilder afb = new AndFilterBuilder();
                    afb.add(idFilter).add(kindFilter);
                    final FilteredQueryBuilder fb = filteredQuery(matchAllQuery(), afb);

                    final SearchResponse searchResponse = client.prepareSearch(indexName)
                            .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(fb).setFrom(0)
                            .setSize(1200) // 3 years and 105 days assuming daily updates at the most
                            .setExplain(false).execute().actionGet();

                    final SearchHits searchHits = searchResponse.getHits();

                    logger.debug("docs with this id in the index: " + searchHits.getTotalHits());

                    for (final SearchHit hit : searchHits.getHits()) {
                        final String docId = hit.id();
                        final Map<String, Object> source = hit.sourceAsMap();
                        final Map meta = (Map) source.get("meta");
                        final Long docTimestamp = Long.parseLong(meta.get("timestamp").toString());
                        // if the indexed doc timestamp is lower the the current one, 
                        // remove any latest flag
                        if (timestamp >= docTimestamp) {
                            source.remove("latest");
                            final ObjectMapper mapper = new ObjectMapper();
                            // put the updated doc back
                            // IndexResponse response = 
                            client.prepareIndex(indexName, docType).setCreate(false).setId(docId)
                                    .setSource(mapper.writeValueAsBytes(source))
                                    .execute(new ActionListener<IndexResponse>() {
                                        public void onResponse(IndexResponse rspns) {
                                            logger.debug("Removed latest flag from " + contentIdentifier + ", "
                                                    + docTimestamp + ", " + hit.id() + " since (" + timestamp
                                                    + " > " + docTimestamp + ")");
                                        }

                                        public void onFailure(Throwable thrwbl) {
                                            logger.error("Could not remove flag from " + hit.id() + ", "
                                                    + contentIdentifier);
                                        }
                                    });
                            // .execute()
                            //.actionGet();
                        } else {
                            logger.debug("Doc " + hit.id() + " is newer (" + docTimestamp + ")");
                            willBeLatest = false;
                        }
                    }

                    if (willBeLatest) {
                        line = setLatestFlag(line);
                        logger.info("Setting latest flag on " + contentIdentifier + ", " + timestamp);
                    }

                    // end of latest-flag machine
                    // beware - this will be correct as long as there
                    // are no dups within one bulk!
                }

                bulkRequest.add(client.prepareIndex(indexName, docType).setSource(line));
                lineCount++;
                logger.debug("Added line " + lineCount + " to BULK");
                logger.debug(line);

                if (lineCount % bulkSize == 0) {

                    if (idsInBatch.size() != bulkSize && cmd.hasOption("z")) {
                        logger.error(
                                "This batch has duplications in the ID. That's not bad for the index, just makes the latest flag fuzzy");
                        logger.error(
                                "Bulk size was: " + bulkSize + ", but " + idsInBatch.size() + " IDs (only)");
                    }
                    idsInBatch.clear();

                    logger.debug("Issuing BULK request");

                    final long actionCount = bulkRequest.numberOfActions();
                    final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
                    final long tookInMillis = bulkResponse.getTookInMillis();

                    if (bulkResponse.hasFailures()) {
                        logger.fatal("FAILED, bulk not indexed. exiting now.");
                        Iterator<BulkItemResponse> it = bulkResponse.iterator();
                        while (it.hasNext()) {
                            BulkItemResponse bir = it.next();
                            if (bir.isFailed()) {
                                Failure failure = bir.getFailure();
                                logger.fatal("id: " + failure.getId() + ", message: " + failure.getMessage()
                                        + ", type: " + failure.getType() + ", index: " + failure.getIndex());
                            }
                        }
                        quit(1);
                    } else {
                        if (verbose) {
                            final double elapsed = System.currentTimeMillis() - start;
                            final double speed = (lineCount / elapsed * 1000);
                            logger.info("OK (" + filename + ") " + lineCount + " docs indexed (" + actionCount
                                    + "/" + tookInMillis + "ms" + "/" + String.format("%.2f", speed) + "r/s)");
                        }
                    }
                    bulkRequest = client.prepareBulk();
                }
            }

            // handle the remaining items
            final long actionCount = bulkRequest.numberOfActions();
            if (actionCount > 0) {
                final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
                final long tookInMillis = bulkResponse.getTookInMillis();

                if (bulkResponse.hasFailures()) {
                    logger.fatal("FAILED, bulk not indexed. exiting now.");
                    Iterator<BulkItemResponse> it = bulkResponse.iterator();
                    while (it.hasNext()) {
                        BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            Failure failure = bir.getFailure();
                            logger.fatal("id: " + failure.getId() + ", message: " + failure.getMessage()
                                    + ", type: " + failure.getType() + ", index: " + failure.getIndex());
                        }
                    }
                    quit(1);
                } else {

                    // trigger update now
                    RefreshResponse refreshResponse = client.admin().indices()
                            .refresh(new RefreshRequest(indexName)).actionGet();

                    if (verbose) {
                        final double elapsed = System.currentTimeMillis() - start;
                        final double speed = (lineCount / elapsed * 1000);
                        logger.info("OK (" + filename + ") " + lineCount + " docs indexed (" + actionCount + "/"
                                + tookInMillis + "ms" + "/" + String.format("%.2f", speed) + "r/s)");
                    }

                }

            }

            br.close();
            client.close();
            final double elapsed = (System.currentTimeMillis() - start) / 1000;
            final double speed = (lineCount / elapsed);
            logger.info("indexing (" + filename + ") " + lineCount + " docs took " + elapsed + "s (speed: "
                    + String.format("%.2f", speed) + "r/s)");
            if (cmd.hasOption("flat")) {
                try {
                    FileUtils.touch(stampfile);
                } catch (IOException ioe) {
                    logger.warn(".indexed files not created. Will reindex everything everytime.");
                }
            }
        } catch (IOException e) {
            client.close();
            logger.error(e);
            quit(1);
        } finally {
            client.close();
        }
    }
    quit(0);
}

From source file:com.liferay.portal.util.FileImpl.java

public void touch(File file) throws IOException {
    FileUtils.touch(file);
}

From source file:com.izforge.izpack.event.RegistryInstallerListener.java

/**
 * Registers the uninstaller./*w  ww. j  av  a2 s. co m*/
 *
 * @throws NativeLibException for any native library exception
 * @throws InstallerException for any other error
 */
private void registerUninstallKey() throws NativeLibException {
    String uninstallName = registry.getUninstallName();
    if (uninstallName == null) {
        return;
    }
    InstallData installData = getInstallData();
    String keyName = RegistryHandler.UNINSTALL_ROOT + uninstallName;
    String uninstallerPath = IoHelper.translatePath(installData.getInfo().getUninstallerPath(),
            installData.getVariables());
    String cmd = "\"" + installData.getVariable("JAVA_HOME") + "\\bin\\javaw.exe\" -jar \"" + uninstallerPath
            + "\\" + installData.getInfo().getUninstallerName() + "\"";
    String appVersion = installData.getVariable("APP_VER");
    String appUrl = installData.getVariable("APP_URL");

    try {
        registry.setRoot(RegistryHandler.HKEY_LOCAL_MACHINE);
        registry.setValue(keyName, "DisplayName", uninstallName);
    } catch (NativeLibException exception) { // Users without administrative rights should be able to install the app for themselves
        logger.warning("Failed to register uninstaller in HKEY_LOCAL_MACHINE hive, trying HKEY_CURRENT_USER: "
                + exception.getMessage());
        registry.setRoot(RegistryHandler.HKEY_CURRENT_USER);
        registry.setValue(keyName, "DisplayName", uninstallName);
    }
    registry.setValue(keyName, "UninstallString", cmd);
    registry.setValue(keyName, "DisplayVersion", appVersion);
    if (appUrl != null && appUrl.length() > 0) {
        registry.setValue(keyName, "HelpLink", appUrl);
    }
    // Try to write the uninstaller icon out.
    InputStream in = null;
    FileOutputStream out = null;
    try {
        in = resources.getInputStream(UNINSTALLER_ICON);
        String iconPath = installData.getVariable("INSTALL_PATH") + File.separator + "Uninstaller"
                + File.separator + "UninstallerIcon.ico";

        // make sure the 'Uninstaller' directory exists
        File uninstallerIcon = new File(iconPath);
        FileUtils.touch(uninstallerIcon);

        out = new FileOutputStream(uninstallerIcon);
        IOUtils.copy(in, out);
        out.flush();
        out.close();
        registry.setValue(keyName, "DisplayIcon", iconPath);
    } catch (ResourceNotFoundException exception) {
        // No icon resource defined; ignore it
        logger.warning("The configured uninstaller icon was not found: " + exception.getMessage());
    } catch (IOException exception) {
        throw new InstallerException(exception);
    } finally {
        IOUtils.closeQuietly(in);
        IOUtils.closeQuietly(out);
    }
}

From source file:com.ikanow.aleph2.harvest.logstash.services.LogstashHarvestService.java

/** Creates the logstash restart file, which the v1 version of logstash periodically checks to decide whether to restart
 * @param bucket//from ww  w. j a v  a  2s .com
 * @param config
 * @param globals
 * @throws IOException
 */
protected void createLogstashRestartCommand(final DataBucketBean bucket, final LogstashBucketConfigBean config,
        final LogstashHarvesterConfigBean globals) throws IOException {
    FileUtils.touch(new File(globals.restart_file()));
}

From source file:com.linkedin.helix.store.file.FilePropertyStore.java

@Override
public void updatePropertyUntilSucceed(String key, DataUpdater<T> updater, boolean createIfAbsent) {
    String path = getPath(key);/*www .  java  2s.  c om*/
    File file = new File(path);
    RandomAccessFile raFile = null;
    FileLock fLock = null;

    try {
        _readWriteLock.writeLock().lock();
        if (!file.exists()) {
            FileUtils.touch(file);
        }

        raFile = new RandomAccessFile(file, "rw");
        FileChannel fChannel = raFile.getChannel();
        fLock = fChannel.lock();

        T current = getProperty(key);
        T update = updater.update(current);
        setProperty(key, update);
    } catch (Exception e) {
        logger.error("fail to updatePropertyUntilSucceed, path:" + path, e);
    } finally {
        _readWriteLock.writeLock().unlock();
        try {
            if (fLock != null && fLock.isValid()) {
                fLock.release();
            }

            if (raFile != null) {
                raFile.close();
            }
        } catch (IOException e) {
            logger.error("fail to close file, path:" + path, e);
        }
    }
}