Example usage for java.lang System nanoTime

List of usage examples for java.lang System nanoTime

Introduction

In this page you can find the example usage for java.lang System nanoTime.

Prototype

@HotSpotIntrinsicCandidate
public static native long nanoTime();

Source Link

Document

Returns the current value of the running Java Virtual Machine's high-resolution time source, in nanoseconds.

Usage

From source file:com.netflix.genie.core.jobs.workflow.impl.InitialSetupTask.java

/**
 * {@inheritDoc}//from  www . j  a  va2s  .  co m
 */
@Override
public void executeTask(@NotNull final Map<String, Object> context) throws GenieException, IOException {
    final long start = System.nanoTime();
    final Map<String, String> tags = MetricsUtils.newSuccessTagsMap();
    try {
        final JobExecutionEnvironment jobExecEnv = (JobExecutionEnvironment) context
                .get(JobConstants.JOB_EXECUTION_ENV_KEY);
        final String jobWorkingDirectory = jobExecEnv.getJobWorkingDir().getCanonicalPath();
        final Writer writer = (Writer) context.get(JobConstants.WRITER_KEY);
        final String jobId = jobExecEnv.getJobRequest().getId()
                .orElseThrow(() -> new GeniePreconditionException("No job id found. Unable to continue"));
        log.info("Starting Initial Setup Task for job {}", jobId);

        this.createJobDirStructure(jobWorkingDirectory);

        // set the env variables in the launcher script
        this.createJobDirEnvironmentVariables(writer, jobWorkingDirectory);
        this.createApplicationEnvironmentVariables(writer);

        // create environment variables for the command
        final Command command = jobExecEnv.getCommand();
        this.createCommandEnvironmentVariables(writer, command);

        // create environment variables for the cluster
        final Cluster cluster = jobExecEnv.getCluster();
        this.createClusterEnvironmentVariables(writer, cluster);

        // create environment variable for the job itself
        this.createJobEnvironmentVariables(writer, jobId, jobExecEnv.getJobRequest().getName(),
                jobExecEnv.getMemory());

        //Export the Genie Version
        writer.write(GENIE_VERSION_EXPORT);
        writer.write(LINE_SEPARATOR);
        writer.write(LINE_SEPARATOR);

        log.info("Finished Initial Setup Task for job {}", jobId);
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.getRegistry().timer(timerId.withTags(tags)).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:com.aimluck.eip.util.ALTimelineUtils.java

private static void uploadTimelineImage(ServletContext servletContext, int uid, int index, String title,
        String filePath, String sFilePath, EipTTimeline timeline) throws FileNotFoundException, IOException {
    Date now = new Date();
    String filename = index + "_" + String.valueOf(System.nanoTime());
    File tmpFile = new File(servletContext.getRealPath(sFilePath));
    byte[] imageInBytes = IOUtils.toByteArray(new FileInputStream(tmpFile));

    EipTTimelineFile file = Database.create(EipTTimelineFile.class);
    file.setOwnerId(uid);//from  w w w .j  ava 2  s . c  o  m
    file.setFileName(title);
    file.setFilePath(getRelativePath(filename));
    file.setFileThumbnail(imageInBytes);
    file.setEipTTimeline(timeline);
    file.setCreateDate(now);
    file.setUpdateDate(now);

    Database.commit();

    tmpFile = new File(servletContext.getRealPath(filePath));
    imageInBytes = IOUtils.toByteArray(new FileInputStream(tmpFile));

    ALStorageService.createNewFile(new ByteArrayInputStream(imageInBytes),
            JetspeedResources.getString("aipo.filedir", "") + ALStorageService.separator()
                    + Database.getDomainName() + ALStorageService.separator()
                    + JetspeedResources.getString("aipo.timeline.categorykey", "")
                    + ALStorageService.separator() + uid + ALStorageService.separator() + filename);
}

From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.RemoteParForMR.java

/**
 * // w  w w .  j  a v  a  2 s  . c  o  m
 * @param pfid
 * @param program
 * @param taskFile
 * @param resultFile
 * @param _enableCPCaching 
 * @param mode
 * @param numMappers
 * @param replication
 * @return
 * @throws DMLRuntimeException
 */
public static RemoteParForJobReturn runJob(long pfid, String program, String taskFile, String resultFile,
        MatrixObject colocatedDPMatrixObj, //inputs
        boolean enableCPCaching, int numMappers, int replication, int max_retry, long minMem, boolean jvmReuse) //opt params
        throws DMLRuntimeException {
    RemoteParForJobReturn ret = null;
    String jobname = "ParFor-EMR";
    long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;

    JobConf job;
    job = new JobConf(RemoteParForMR.class);
    job.setJobName(jobname + pfid);

    //maintain dml script counters
    Statistics.incrementNoOfCompiledMRJobs();

    try {
        /////
        //configure the MR job

        //set arbitrary CP program blocks that will perform in the mapper
        MRJobConfiguration.setProgramBlocks(job, program);

        //enable/disable caching
        MRJobConfiguration.setParforCachingConfig(job, enableCPCaching);

        //set mappers, reducers, combiners
        job.setMapperClass(RemoteParWorkerMapper.class); //map-only

        //set input format (one split per row, NLineInputFormat default N=1)
        if (ParForProgramBlock.ALLOW_DATA_COLOCATION && colocatedDPMatrixObj != null) {
            job.setInputFormat(RemoteParForColocatedNLineInputFormat.class);
            MRJobConfiguration.setPartitioningFormat(job, colocatedDPMatrixObj.getPartitionFormat());
            MatrixCharacteristics mc = colocatedDPMatrixObj.getMatrixCharacteristics();
            MRJobConfiguration.setPartitioningBlockNumRows(job, mc.getRowsPerBlock());
            MRJobConfiguration.setPartitioningBlockNumCols(job, mc.getColsPerBlock());
            MRJobConfiguration.setPartitioningFilename(job, colocatedDPMatrixObj.getFileName());
        } else //default case 
        {
            job.setInputFormat(NLineInputFormat.class);
        }

        //set the input path and output path 
        FileInputFormat.setInputPaths(job, new Path(taskFile));

        //set output format
        job.setOutputFormat(SequenceFileOutputFormat.class);

        //set output path
        MapReduceTool.deleteFileIfExistOnHDFS(resultFile);
        FileOutputFormat.setOutputPath(job, new Path(resultFile));

        //set the output key, value schema
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(LongWritable.class);
        job.setOutputValueClass(Text.class);

        //////
        //set optimization parameters

        //set the number of mappers and reducers 
        job.setNumMapTasks(numMappers); //numMappers
        job.setNumReduceTasks(0);
        //job.setInt("mapred.map.tasks.maximum", 1); //system property
        //job.setInt("mapred.tasktracker.tasks.maximum",1); //system property
        //job.setInt("mapred.jobtracker.maxtasks.per.job",1); //system property

        //use FLEX scheduler configuration properties
        if (ParForProgramBlock.USE_FLEX_SCHEDULER_CONF) {
            job.setInt("flex.priority", 0); //highest

            job.setInt("flex.map.min", 0);
            job.setInt("flex.map.max", numMappers);
            job.setInt("flex.reduce.min", 0);
            job.setInt("flex.reduce.max", numMappers);
        }

        //set jvm memory size (if require)
        String memKey = "mapred.child.java.opts";
        if (minMem > 0 && minMem > InfrastructureAnalyzer.extractMaxMemoryOpt(job.get(memKey))) {
            InfrastructureAnalyzer.setMaxMemoryOpt(job, memKey, minMem);
            LOG.warn("Forcing '" + memKey + "' to -Xmx" + minMem / (1024 * 1024) + "M.");
        }

        //disable automatic tasks timeouts and speculative task exec
        job.setInt("mapred.task.timeout", 0);
        job.setMapSpeculativeExecution(false);

        //set up map/reduce memory configurations (if in AM context)
        DMLConfig config = ConfigurationManager.getConfig();
        DMLAppMasterUtils.setupMRJobRemoteMaxMemory(job, config);

        //enables the reuse of JVMs (multiple tasks per MR task)
        if (jvmReuse)
            job.setNumTasksToExecutePerJvm(-1); //unlimited

        //set sort io buffer (reduce unnecessary large io buffer, guaranteed memory consumption)
        job.setInt("io.sort.mb", 8); //8MB

        //set the replication factor for the results
        job.setInt("dfs.replication", replication);

        //set the max number of retries per map task
        //  disabled job-level configuration to respect cluster configuration
        //  note: this refers to hadoop2, hence it never had effect on mr1
        //job.setInt("mapreduce.map.maxattempts", max_retry);

        //set unique working dir
        MRJobConfiguration.setUniqueWorkingDir(job);

        /////
        // execute the MR job         
        RunningJob runjob = JobClient.runJob(job);

        // Process different counters 
        Statistics.incrementNoOfExecutedMRJobs();
        Group pgroup = runjob.getCounters().getGroup(ParForProgramBlock.PARFOR_COUNTER_GROUP_NAME);
        int numTasks = (int) pgroup.getCounter(Stat.PARFOR_NUMTASKS.toString());
        int numIters = (int) pgroup.getCounter(Stat.PARFOR_NUMITERS.toString());
        if (DMLScript.STATISTICS && !InfrastructureAnalyzer.isLocalMode()) {
            Statistics.incrementJITCompileTime(pgroup.getCounter(Stat.PARFOR_JITCOMPILE.toString()));
            Statistics.incrementJVMgcCount(pgroup.getCounter(Stat.PARFOR_JVMGC_COUNT.toString()));
            Statistics.incrementJVMgcTime(pgroup.getCounter(Stat.PARFOR_JVMGC_TIME.toString()));
            Group cgroup = runjob.getCounters().getGroup(CacheableData.CACHING_COUNTER_GROUP_NAME.toString());
            CacheStatistics
                    .incrementMemHits((int) cgroup.getCounter(CacheStatistics.Stat.CACHE_HITS_MEM.toString()));
            CacheStatistics.incrementFSBuffHits(
                    (int) cgroup.getCounter(CacheStatistics.Stat.CACHE_HITS_FSBUFF.toString()));
            CacheStatistics
                    .incrementFSHits((int) cgroup.getCounter(CacheStatistics.Stat.CACHE_HITS_FS.toString()));
            CacheStatistics.incrementHDFSHits(
                    (int) cgroup.getCounter(CacheStatistics.Stat.CACHE_HITS_HDFS.toString()));
            CacheStatistics.incrementFSBuffWrites(
                    (int) cgroup.getCounter(CacheStatistics.Stat.CACHE_WRITES_FSBUFF.toString()));
            CacheStatistics.incrementFSWrites(
                    (int) cgroup.getCounter(CacheStatistics.Stat.CACHE_WRITES_FS.toString()));
            CacheStatistics.incrementHDFSWrites(
                    (int) cgroup.getCounter(CacheStatistics.Stat.CACHE_WRITES_HDFS.toString()));
            CacheStatistics
                    .incrementAcquireRTime(cgroup.getCounter(CacheStatistics.Stat.CACHE_TIME_ACQR.toString()));
            CacheStatistics
                    .incrementAcquireMTime(cgroup.getCounter(CacheStatistics.Stat.CACHE_TIME_ACQM.toString()));
            CacheStatistics
                    .incrementReleaseTime(cgroup.getCounter(CacheStatistics.Stat.CACHE_TIME_RLS.toString()));
            CacheStatistics
                    .incrementExportTime(cgroup.getCounter(CacheStatistics.Stat.CACHE_TIME_EXP.toString()));
        }

        // read all files of result variables and prepare for return
        LocalVariableMap[] results = readResultFile(job, resultFile);

        ret = new RemoteParForJobReturn(runjob.isSuccessful(), numTasks, numIters, results);
    } catch (Exception ex) {
        throw new DMLRuntimeException(ex);
    } finally {
        // remove created files 
        try {
            MapReduceTool.deleteFileIfExistOnHDFS(new Path(taskFile), job);
            MapReduceTool.deleteFileIfExistOnHDFS(new Path(resultFile), job);
        } catch (IOException ex) {
            throw new DMLRuntimeException(ex);
        }
    }

    if (DMLScript.STATISTICS) {
        long t1 = System.nanoTime();
        Statistics.maintainCPHeavyHitters("MR-Job_" + jobname, t1 - t0);
    }

    return ret;
}

From source file:com.flexive.shared.TimestampRecorder.java

/**
 * Add a timestamp with the given name. The execution time of atimestamp
 * is the time elapsed since the last call to timestamp or begin, if no timestamp was
 * recorded yet./*from   ww w .  j  a  va 2  s.  c om*/
 *
 * @param name  Timestamp name which will be included in the string representation
 * @return      the time taken for the last task, in nanoseconds
 */
public long timestamp(String name) {
    final long nanos = System.nanoTime() - totalNanos - startNanos;
    timestamps.add(new Pair<String, Long>(name, nanos));
    totalNanos += nanos;
    return nanos;
}

From source file:de.javakaffee.web.msm.serializer.xstream.XStreamTranscoderTest.java

@Test
public void testReadValueIntoObject() throws Exception {
    final MemcachedBackupSessionManager manager = new MemcachedBackupSessionManager();
    manager.setContainer(new StandardContext());
    final XStreamTranscoder transcoder = new XStreamTranscoder(manager);

    final StandardSession session = manager.createEmptySession();
    session.setValid(true);//from w w w.ja  va  2s.c  o  m
    session.setCreationTime(System.currentTimeMillis());
    getField(StandardSession.class, "lastAccessedTime").set(session, System.currentTimeMillis() + 100);
    session.setMaxInactiveInterval(600);

    session.setId("foo");

    session.setAttribute("person1",
            createPerson("foo bar", Gender.MALE, "foo.bar@example.org", "foo.bar@example.com"));
    session.setAttribute("person2",
            createPerson("bar baz", Gender.FEMALE, "bar.baz@example.org", "bar.baz@example.com"));

    final long start1 = System.nanoTime();
    transcoder.serialize(session);
    System.out.println("xstream-ser took " + (System.nanoTime() - start1) / 1000);

    final long start2 = System.nanoTime();
    transcoder.serialize(session);
    System.out.println("xstream-ser took " + (System.nanoTime() - start2) / 1000);

    final long start3 = System.nanoTime();
    final byte[] json = transcoder.serialize(session);
    final StandardSession readJSONValue = (StandardSession) transcoder.deserialize(json);
    System.out.println("xstream-round took " + (System.nanoTime() - start3) / 1000);

    //System.out.println( "Have json: " + new String(json) );
    assertEquals(readJSONValue, session);

    final long start4 = System.nanoTime();
    final StandardSession readJavaValue = javaRoundtrip(session, manager);
    System.out.println("java-round took " + (System.nanoTime() - start4) / 1000);
    assertEquals(readJavaValue, session);

    assertEquals(readJSONValue, readJavaValue);

    System.out.println(ToStringBuilder.reflectionToString(session));
    System.out.println(ToStringBuilder.reflectionToString(readJSONValue));
    System.out.println(ToStringBuilder.reflectionToString(readJavaValue));

}

From source file:com.esri.geoevent.test.tools.RunTcpInBdsOutTest.java

public void send(String server, Integer port, Long numEvents, Integer rate, String data_file) {

    BufferedReader br = null;/*  www .jav a2 s .  c  o m*/
    ArrayList<String> lines = new ArrayList<>();
    LocalDateTime st = null;

    try {

        // Read the file into String array
        br = new BufferedReader(new FileReader(data_file));

        String line = null;
        while ((line = br.readLine()) != null) {
            lines.add(line);
        }

        Socket sckt = new Socket(server, port);
        OutputStream os = sckt.getOutputStream();

        Integer cnt = 0;

        st = LocalDateTime.now();

        Double ns_delay = 1000000000.0 / (double) rate;

        long ns = ns_delay.longValue();
        if (ns < 0) {
            ns = 0;
        }

        int i = 0;
        int j = 0;

        while (i < numEvents) {
            i++;
            j++;
            if (j >= lines.size()) {
                j = 0;
            }
            line = lines.get(j) + "\n";

            final long stime = System.nanoTime();

            long etime = 0;
            do {
                etime = System.nanoTime();
            } while (stime + ns >= etime);

            os.write(line.getBytes());
            os.flush();

        }

        LocalDateTime et = LocalDateTime.now();
        if (st != null) {
            et = LocalDateTime.now();

            Duration delta = Duration.between(st, et);

            Double elapsed_seconds = (double) delta.getSeconds() + delta.getNano() / 1000000000.0;

            send_rate = (double) numEvents / elapsed_seconds;
        }

        sckt.close();
        os = null;

    } catch (Exception e) {
        System.err.println(e.getMessage());
        send_rate = -1.0;
    } finally {
        try {
            br.close();
        } catch (Exception e) {
            //
        }

        this.send_rate = send_rate;

    }

}

From source file:net.sourceforge.subsonic.dao.AbstractDao.java

private void log(String sql, long startTimeNano) {
    long millis = (System.nanoTime() - startTimeNano) / 1000000L;

    // Log queries that take more than 2 seconds.
    if (millis > TimeUnit.SECONDS.toMillis(2L)) {
        LOG.debug(millis + " ms:  " + sql);
    }/*from  w  w w .  ja v a2 s  .  c om*/
}

From source file:com.clutch.ClutchSync.java

public static void sync(ClutchStats clutchStats) {
    if (thisIsHappening) {
        return;/* w ww. j a va  2s.  c  o  m*/
    }
    thisIsHappening = true;
    if (pendingReload) {
        pendingReload = false;
        for (ClutchView clutchView : clutchViews) {
            clutchView.contentChanged();
        }
    }
    ClutchAPIClient.callMethod("sync", null, new ClutchAPIResponseHandler() {
        @Override
        public void onSuccess(JSONObject response) {
            final AssetManager mgr = context.getAssets();

            File parentCacheDir = context.getCacheDir();
            final File tempDir;
            try {
                tempDir = File.createTempFile("clutchtemp", Long.toString(System.nanoTime()), parentCacheDir);
                if (!tempDir.delete()) {
                    Log.e(TAG, "Could not delete temp file: " + tempDir.getAbsolutePath());
                    return;
                }
                if (!tempDir.mkdir()) {
                    Log.e(TAG, "Could not create temp directory: " + tempDir.getAbsolutePath());
                    return;
                }
            } catch (IOException e) {
                Log.e(TAG, "Could not create temp file");
                return;
            }

            File cacheDir = getCacheDir();
            if (cacheDir == null) {
                try {
                    if (!copyAssetDir(mgr, tempDir)) {
                        return;
                    }
                } catch (IOException e) {
                    Log.e(TAG, "Couldn't copy the asset dir files to the temp dir: " + e);
                    return;
                }
            } else {
                try {
                    if (!copyDir(cacheDir, tempDir)) {
                        return;
                    }
                } catch (IOException e) {
                    Log.e(TAG, "Couldn't copy the cache dir files to the temp dir: " + e);
                    return;
                }
            }

            conf = response.optJSONObject("conf");
            String version = "" + conf.optInt("_version");
            newFilesDownloaded = false;
            try {
                JSONCompareResult confCompare = JSONCompare.compareJSON(ClutchConf.getConf(), conf,
                        JSONCompareMode.NON_EXTENSIBLE);
                if (confCompare.failed()) {
                    newFilesDownloaded = true;
                    // This is where in the ObjC version we write out the conf, but I don't think we need to anymore
                }
            } catch (JSONException e1) {
                Log.i(TAG, "Couldn't compare the conf file with the cached conf file: " + e1);
            }

            File cachedFiles = new File(tempDir, "__files.json");
            JSONObject cached = null;
            if (cachedFiles.exists()) {
                StringBuffer strContent = new StringBuffer("");
                try {
                    FileInputStream in = new FileInputStream(cachedFiles);
                    int ch;
                    while ((ch = in.read()) != -1) {
                        strContent.append((char) ch);
                    }
                    in.close();
                    cached = new JSONObject(new JSONTokener(strContent.toString()));
                } catch (IOException e) {
                    Log.e(TAG, "Could not read __files.json from cache file: " + e);
                } catch (JSONException e) {
                    Log.e(TAG, "Could not parse __files.json from cache file: " + e);
                }
            }
            if (cached == null) {
                cached = new JSONObject();
            }

            final JSONObject files = response.optJSONObject("files");
            try {
                JSONCompareResult filesCompare = JSONCompare.compareJSON(cached, files,
                        JSONCompareMode.NON_EXTENSIBLE);
                if (filesCompare.passed()) {
                    complete(tempDir, files);
                    return;
                }
            } catch (JSONException e1) {
                Log.i(TAG, "Couldn't compare the file hash list with the cached file hash list: " + e1);
            }

            try {
                BufferedWriter bw = new BufferedWriter(new FileWriter(cachedFiles));
                bw.write(files.toString());
                bw.flush();
                bw.close();
            } catch (FileNotFoundException e) {
            } catch (IOException e) {
            }

            currentFile = 0;
            final int numFiles = files.length();
            Iterator<?> it = files.keys();
            while (it.hasNext()) {
                final String fileName = (String) it.next();
                final String hash = files.optString(fileName);
                final String prevHash = cached.optString(fileName);

                // If they equal, then just continue
                if (hash.equals(prevHash)) {
                    if (++currentFile == numFiles) {
                        complete(tempDir, files);
                        return;
                    }
                    continue;
                }

                // Looks like we've seen a new file, so we should reload when this is all done
                newFilesDownloaded = true;

                // Otherwise we need to download the new file
                ClutchAPIClient.downloadFile(fileName, version, new ClutchAPIDownloadResponseHandler() {
                    @Override
                    public void onSuccess(String response) {
                        try {
                            File fullFile = new File(tempDir, fileName);
                            fullFile.getParentFile().mkdirs();
                            fullFile.createNewFile();
                            BufferedWriter bw = new BufferedWriter(new FileWriter(fullFile));
                            bw.write(response);
                            bw.flush();
                            bw.close();
                        } catch (IOException e) {
                            final Writer result = new StringWriter();
                            final PrintWriter printWriter = new PrintWriter(result);
                            e.printStackTrace(printWriter);
                            Log.e(TAG, "Tried, but could not write file: " + fileName + " : " + result);
                        }

                        if (++currentFile == numFiles) {
                            complete(tempDir, files);
                            return;
                        }
                    }

                    @Override
                    public void onFailure(Throwable e, String content) {
                        final Writer result = new StringWriter();
                        final PrintWriter printWriter = new PrintWriter(result);
                        e.printStackTrace(printWriter);
                        Log.e(TAG, "Error downloading file from server: " + fileName + " " + result + " "
                                + content);
                        if (++currentFile == numFiles) {
                            complete(tempDir, files);
                            return;
                        }
                    }
                });
            }
        }

        @Override
        public void onFailure(Throwable e, JSONObject errorResponse) {
            Log.e(TAG, "Failed to sync with the Clutch server: " + errorResponse);
        }
    });
    background(clutchStats);
}

From source file:com.boundary.camel.component.url.UrlClient.java

public void connect(UrlConfiguration configuration) {
    // Set the configuration
    this.configuration = configuration;

    // Start the timer
    long start = System.nanoTime();

    // Connect to the URL and retrieve the contents
    connect();/*w  ww. j  av a2  s  .c  o m*/

    // Stop the timer
    long stop = System.nanoTime();

    // compute the elapsed time
    setResponseTime((long) ((stop - start) * NANO_SECONDS_TO_MILLI_SECONDS));
}

From source file:com.almende.eve.state.mongo.MongoState.java

/**
 * the constructor used on creation of new state in the database.
 * // www  .  ja  v a  2 s.  co m
 * @param agentId
 *            the agent id
 */
public MongoState(final String agentId) {
    super(agentId);
    timestamp = System.nanoTime();
    agentType = null;
}