Example usage for java.util.concurrent TimeUnit DAYS

List of usage examples for java.util.concurrent TimeUnit DAYS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit DAYS.

Prototype

TimeUnit DAYS

To view the source code for java.util.concurrent TimeUnit DAYS.

Click Source Link

Document

Time unit representing twenty four hours.

Usage

From source file:com.foudroyantfactotum.mod.fousarchive.utility.midi.FileSupporter.java

public static void main(String[] args) throws InterruptedException, IOException {
    for (int i = 0; i < noOfWorkers; ++i)
        pool.submit(new ConMidiDetailsPuller());

    final File sourceDir = new File(source);
    final File outputDir = new File(output);

    Logger.info(UserLogger.GENERAL, "source directory: " + sourceDir.getAbsolutePath());
    Logger.info(UserLogger.GENERAL, "output directory: " + outputDir.getAbsolutePath());
    Logger.info(UserLogger.GENERAL, "processing midi files using " + noOfWorkers + " cores");

    FileUtils.deleteDirectory(outputDir);
    FileUtils.touch(new File(outputDir + "/master.json.gz"));

    for (File sfile : sourceDir.listFiles()) {
        recFile(sfile, files);//  w  w  w .j a v a  2s  .c om
    }

    for (int i = 0; i < noOfWorkers; ++i)
        files.put(TERMINATOR);

    pool.shutdown();
    pool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);//just get all the work done first.

    try (final OutputStream fstream = new FileOutputStream(outputDir + "/master.json.gz")) {
        try (final GZIPOutputStream gzstream = new GZIPOutputStream(fstream)) {
            final OutputStreamWriter osw = new OutputStreamWriter(gzstream);

            osw.write(JSON.toJson(processedMidiFiles));
            osw.flush();
        }
    } catch (IOException e) {
        Logger.info(UserLogger.GENERAL, e.toString());
    }

    Logger.info(UserLogger.GENERAL, "Processed " + processedMidiFiles.size() + " midi files out of " + fileCount
            + " files. " + (fileCount - processedMidiFiles.size()) + " removed");
}

From source file:com.mapr.PurchaseLog.java

public static void main(String[] args) throws IOException {
    Options opts = new Options();
    CmdLineParser parser = new CmdLineParser(opts);
    try {/*from   www  .ja  v  a 2 s.c o m*/
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        System.err.println("Usage: -count <number>G|M|K [ -users number ]  log-file user-profiles");
        return;
    }

    Joiner withTab = Joiner.on("\t");

    // first generate lots of user definitions
    SchemaSampler users = new SchemaSampler(
            Resources.asCharSource(Resources.getResource("user-schema.txt"), Charsets.UTF_8).read());
    File userFile = File.createTempFile("user", "tsv");
    BufferedWriter out = Files.newBufferedWriter(userFile.toPath(), Charsets.UTF_8);
    for (int i = 0; i < opts.users; i++) {
        out.write(withTab.join(users.sample()));
        out.newLine();
    }
    out.close();

    // now generate a session for each user
    Splitter onTabs = Splitter.on("\t");
    Splitter onComma = Splitter.on(",");

    Random gen = new Random();
    SchemaSampler intermediate = new SchemaSampler(
            Resources.asCharSource(Resources.getResource("hit_step.txt"), Charsets.UTF_8).read());

    final int COUNTRY = users.getFieldNames().indexOf("country");
    final int CAMPAIGN = intermediate.getFieldNames().indexOf("campaign_list");
    final int SEARCH_TERMS = intermediate.getFieldNames().indexOf("search_keywords");
    Preconditions.checkState(COUNTRY >= 0, "Need country field in user schema");
    Preconditions.checkState(CAMPAIGN >= 0, "Need campaign_list field in step schema");
    Preconditions.checkState(SEARCH_TERMS >= 0, "Need search_keywords field in step schema");

    out = Files.newBufferedWriter(new File(opts.out).toPath(), Charsets.UTF_8);

    for (String line : Files.readAllLines(userFile.toPath(), Charsets.UTF_8)) {
        long t = (long) (TimeUnit.MILLISECONDS.convert(30, TimeUnit.DAYS) * gen.nextDouble());
        List<String> user = Lists.newArrayList(onTabs.split(line));

        // pick session length
        int n = (int) Math.floor(-30 * Math.log(gen.nextDouble()));

        for (int i = 0; i < n; i++) {
            // time on page
            int dt = (int) Math.floor(-20000 * Math.log(gen.nextDouble()));
            t += dt;

            // hit specific values
            JsonNode step = intermediate.sample();

            // check for purchase
            double p = 0.01;
            List<String> campaigns = Lists.newArrayList(onComma.split(step.get("campaign_list").asText()));
            List<String> keywords = Lists.newArrayList(onComma.split(step.get("search_keywords").asText()));
            if ((user.get(COUNTRY).equals("us") && campaigns.contains("5"))
                    || (user.get(COUNTRY).equals("jp") && campaigns.contains("7")) || keywords.contains("homer")
                    || keywords.contains("simpson")) {
                p = 0.5;
            }

            String events = gen.nextDouble() < p ? "1" : "-";

            out.write(Long.toString(t));
            out.write("\t");
            out.write(line);
            out.write("\t");
            out.write(withTab.join(step));
            out.write("\t");
            out.write(events);
            out.write("\n");
        }
    }
    out.close();
}

From source file:com.alertlogic.aws.kinesis.test1.StreamWriter.java

/**
 * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the
 * program is terminated./*from w  ww  . j a v a  2s.  co m*/
 *
 * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send
 *        data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources
 *        exist or should be created.
 * @throws InterruptedException If this application is interrupted while sending records to Kinesis.
 */
public static void main(String[] args) throws InterruptedException {
    if (args.length != 3) {
        System.err.println(
                "Usage: " + StreamWriter.class.getSimpleName() + " <number of threads> <stream name> <region>");
        System.exit(1);
    }

    int numberOfThreads = Integer.parseInt(args[0]);
    String streamName = args[1];
    Region region = SampleUtils.parseRegion(args[2]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    // The more resources we declare the higher write IOPS we need on our DynamoDB table.
    // We write a record for each resource every interval.
    // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum.
    List<String> resources = new ArrayList<>();
    resources.add("/index.html");

    // These are the possible referrers to use when generating pairs
    List<String> referrers = new ArrayList<>();
    referrers.add("http://www.amazon.com");
    referrers.add("http://www.google.com");
    referrers.add("http://www.yahoo.com");
    referrers.add("http://www.bing.com");
    referrers.add("http://www.stackoverflow.com");
    referrers.add("http://www.reddit.com");

    HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers);

    // Creates a stream to write to with 2 shards if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName);

    ExecutorService es = Executors.newCachedThreadPool();

    Runnable pairSender = new Runnable() {
        @Override
        public void run() {
            try {
                putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                LOG.warn(
                        "Thread encountered an error while sending records. Records will no longer be put by this thread.",
                        ex);
            }
        }
    };

    for (int i = 0; i < numberOfThreads; i++) {
        es.submit(pairSender);
    }

    LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).",
            DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads));

    es.shutdown();
    es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
}

From source file:com.amazonaws.services.kinesis.samples.datavis.HttpReferrerStreamWriter.java

/**
 * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the
 * program is terminated.// w  w w  .  j  a  v  a2 s.co m
 *
 * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send
 *        data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources
 *        exist or should be created.
 * @throws InterruptedException If this application is interrupted while sending records to Kinesis.
 */
public static void main(String[] args) throws InterruptedException {
    if (args.length != 3) {
        System.err.println("Usage: " + HttpReferrerStreamWriter.class.getSimpleName()
                + " <number of threads> <stream name> <region>");
        System.exit(1);
    }

    int numberOfThreads = Integer.parseInt(args[0]);
    String streamName = args[1];
    Region region = SampleUtils.parseRegion(args[2]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    // The more resources we declare the higher write IOPS we need on our DynamoDB table.
    // We write a record for each resource every interval.
    // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum.
    List<String> resources = new ArrayList<>();
    resources.add("/index.html");

    // These are the possible referrers to use when generating pairs
    List<String> referrers = new ArrayList<>();
    referrers.add("http://www.amazon.com");
    referrers.add("http://www.google.com");
    referrers.add("http://www.yahoo.com");
    referrers.add("http://www.bing.com");
    referrers.add("http://www.stackoverflow.com");
    referrers.add("http://www.reddit.com");

    HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers);

    // Creates a stream to write to with 2 shards if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName);

    ExecutorService es = Executors.newCachedThreadPool();

    Runnable pairSender = new Runnable() {
        @Override
        public void run() {
            try {
                putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                LOG.warn(
                        "Thread encountered an error while sending records. Records will no longer be put by this thread.",
                        ex);
            }
        }
    };

    for (int i = 0; i < numberOfThreads; i++) {
        es.submit(pairSender);
    }

    LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).",
            DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads));

    es.shutdown();
    es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
}

From source file:com.linkedin.pinot.common.utils.SchemaUtils.java

/**
 * An example on how to use this utility class.
 *//* w ww .  j a v  a2  s  . c  om*/
public static void main(String[] args) {
    Schema schema = new Schema.SchemaBuilder().setSchemaName("testSchema")
            .addSingleValueDimension("dimension", FieldSpec.DataType.DOUBLE)
            .addMetric("metric", FieldSpec.DataType.INT).addTime("time", TimeUnit.DAYS, FieldSpec.DataType.INT)
            .build();
    System.out.println(postSchema("localhost", 8100, schema));
    Schema fetchedSchema = getSchema("localhost", 8100, "testSchema");
    Preconditions.checkNotNull(fetchedSchema);
    System.out.println(fetchedSchema);
    System.out.println(equalsIgnoreVersion(schema, fetchedSchema));
    System.out.println(deleteSchema("localhost", 8100, "testSchema"));
}

From source file:com.linkedin.pinot.tools.admin.command.GenerateDataCommand.java

public static void main(String[] args) throws JsonGenerationException, JsonMappingException, IOException {
    SchemaBuilder schemaBuilder = new SchemaBuilder();

    schemaBuilder.addSingleValueDimension("name", DataType.STRING);
    schemaBuilder.addSingleValueDimension("age", DataType.INT);
    schemaBuilder.addMetric("percent", DataType.FLOAT);
    schemaBuilder.addTime("days", TimeUnit.DAYS, DataType.LONG);

    Schema schema = schemaBuilder.build();
    ObjectMapper objectMapper = new ObjectMapper();
    System.out.println(objectMapper.writeValueAsString(schema));
}

From source file:org.dllearner.algorithms.qtl.operations.lgg.LGGGeneratorRDFS.java

public static void main(String[] args) throws Exception {
    StringRenderer.setRenderer(Rendering.DL_SYNTAX);
    // knowledge base
    SparqlEndpoint endpoint = SparqlEndpoint.getEndpointDBpedia();
    endpoint = SparqlEndpoint.create("http://sake.informatik.uni-leipzig.de:8890/sparql", "http://dbpedia.org");
    QueryExecutionFactory qef = FluentQueryExecutionFactory
            .http(endpoint.getURL().toString(), endpoint.getDefaultGraphURIs()).config()
            .withCache(CacheUtilsH2.createCacheFrontend("/tmp/cache", false, TimeUnit.DAYS.toMillis(60)))
            .withPagination(10000).withDelay(50, TimeUnit.MILLISECONDS).end().create();

    // tree generation
    ConciseBoundedDescriptionGenerator cbdGenerator = new ConciseBoundedDescriptionGeneratorImpl(qef);
    int maxDepth = 2;

    QueryTreeFactory treeFactory = new QueryTreeFactoryBase();
    treeFactory.setMaxDepth(maxDepth);/*from   w w  w .j  a v a 2  s. com*/
    treeFactory.addDropFilters(new PredicateDropStatementFilter(StopURIsDBpedia.get()),
            new PredicateDropStatementFilter(StopURIsRDFS.get()),
            new PredicateDropStatementFilter(StopURIsOWL.get()),
            new ObjectDropStatementFilter(StopURIsOWL.get()),
            new PredicateDropStatementFilter(StopURIsSKOS.get()),
            new ObjectDropStatementFilter(StopURIsSKOS.get()),
            new NamespaceDropStatementFilter(
                    Sets.newHashSet("http://dbpedia.org/property/", "http://purl.org/dc/terms/",
                            "http://dbpedia.org/class/yago/", "http://www.w3.org/2003/01/geo/wgs84_pos#",
                            "http://www.georss.org/georss/", FOAF.getURI())));
    List<RDFResourceTree> trees = new ArrayList<>();
    List<String> resources = Lists.newArrayList("http://dbpedia.org/resource/Leipzig",
            "http://dbpedia.org/resource/Berlin");
    for (String resource : resources) {
        try {
            System.out.println(resource);
            Model model = cbdGenerator.getConciseBoundedDescription(resource, maxDepth);
            RDFResourceTree tree = treeFactory.getQueryTree(ResourceFactory.createResource(resource), model);
            System.out.println(tree.getStringRepresentation());
            trees.add(tree);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    // LGG computation
    SPARQLReasoner reasoner = new SPARQLReasoner(qef);
    reasoner.setPrecomputeClassHierarchy(true);
    reasoner.setPrecomputeObjectPropertyHierarchy(true);
    reasoner.setPrecomputeDataPropertyHierarchy(true);
    reasoner.init();
    reasoner.precomputePropertyDomains();
    reasoner.precomputeObjectPropertyRanges();
    LGGGenerator lggGen = new LGGGeneratorRDFS(reasoner);
    RDFResourceTree lgg = lggGen.getLGG(trees);

    System.out.println("LGG");
    System.out.println(lgg.getStringRepresentation());
    System.out.println(QueryTreeUtils.toSPARQLQueryString(lgg));
    System.out.println(QueryTreeUtils.toOWLClassExpression(lgg));
}

From source file:Main.java

public static boolean isCacheAvailable(long createTime, int availableDays) {
    return System.currentTimeMillis() <= createTime + TimeUnit.DAYS.toMillis(availableDays);
}

From source file:Main.java

public static void shutdownAndWaitForever(ExecutorService threadPool) {
    shutdownAndWait(threadPool, 100500L, TimeUnit.DAYS);
}

From source file:Main.java

public static String getHumanReadableTime(long ms) {
    final long days = TimeUnit.MILLISECONDS.toDays(ms);
    ms -= TimeUnit.DAYS.toMillis(days);
    final long hours = TimeUnit.MILLISECONDS.toHours(ms);
    ms -= TimeUnit.HOURS.toMillis(hours);
    final long minutes = TimeUnit.MILLISECONDS.toMinutes(ms);
    ms -= TimeUnit.MINUTES.toMillis(minutes);
    final long seconds = TimeUnit.MILLISECONDS.toSeconds(ms);

    final StringBuilder sb = new StringBuilder();
    sb.append(days);//  w  w w. j  a  v a2 s.c o  m
    sb.append("d ");
    sb.append(hours);
    sb.append("h ");
    sb.append(minutes);
    sb.append("m ");
    sb.append(seconds);
    sb.append('s');

    return (sb.toString());
}