Example usage for java.util.concurrent ThreadLocalRandom current

List of usage examples for java.util.concurrent ThreadLocalRandom current

Introduction

In this page you can find the example usage for java.util.concurrent ThreadLocalRandom current.

Prototype

public static ThreadLocalRandom current() 

Source Link

Document

Returns the current thread's ThreadLocalRandom .

Usage

From source file:org.apache.druid.security.kerberos.KerberosAuthenticator.java

@Override
public Filter getFilter() {
    return new AuthenticationFilter() {
        private Signer mySigner;

        @Override/*from w  w  w . j a va 2 s . com*/
        public void init(FilterConfig filterConfig) throws ServletException {
            ClassLoader prevLoader = Thread.currentThread().getContextClassLoader();
            try {
                // AuthenticationHandler is created during Authenticationfilter.init using reflection with thread context class loader.
                // In case of druid since the class is actually loaded as an extension and filter init is done in main thread.
                // We need to set the classloader explicitly to extension class loader.
                Thread.currentThread().setContextClassLoader(AuthenticationFilter.class.getClassLoader());
                super.init(filterConfig);
                String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
                configPrefix = (configPrefix != null) ? configPrefix + "." : "";
                Properties config = getConfiguration(configPrefix, filterConfig);
                String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
                if (signatureSecret == null) {
                    signatureSecret = Long.toString(ThreadLocalRandom.current().nextLong());
                    log.warn("'signature.secret' configuration not set, using a random value as secret");
                }
                final byte[] secretBytes = StringUtils.toUtf8(signatureSecret);
                SignerSecretProvider signerSecretProvider = new SignerSecretProvider() {
                    @Override
                    public void init(Properties config, ServletContext servletContext, long tokenValidity) {

                    }

                    @Override
                    public byte[] getCurrentSecret() {
                        return secretBytes;
                    }

                    @Override
                    public byte[][] getAllSecrets() {
                        return new byte[][] { secretBytes };
                    }
                };
                mySigner = new Signer(signerSecretProvider);
            } finally {
                Thread.currentThread().setContextClassLoader(prevLoader);
            }
        }

        // Copied from hadoop-auth's AuthenticationFilter, to allow us to change error response handling in doFilterSuper
        @Override
        protected AuthenticationToken getToken(HttpServletRequest request) throws AuthenticationException {
            AuthenticationToken token = null;
            String tokenStr = null;
            Cookie[] cookies = request.getCookies();
            if (cookies != null) {
                for (Cookie cookie : cookies) {
                    if (cookie.getName().equals(AuthenticatedURL.AUTH_COOKIE)) {
                        tokenStr = cookie.getValue();
                        try {
                            tokenStr = mySigner.verifyAndExtract(tokenStr);
                        } catch (SignerException ex) {
                            throw new AuthenticationException(ex);
                        }
                        break;
                    }
                }
            }
            if (tokenStr != null) {
                token = AuthenticationToken.parse(tokenStr);
                if (!token.getType().equals(getAuthenticationHandler().getType())) {
                    throw new AuthenticationException("Invalid AuthenticationToken type");
                }
                if (token.isExpired()) {
                    throw new AuthenticationException("AuthenticationToken expired");
                }
            }
            return token;
        }

        @Override
        public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
                throws IOException, ServletException {
            HttpServletRequest httpReq = (HttpServletRequest) request;

            // If there's already an auth result, then we have authenticated already, skip this.
            if (request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT) != null) {
                filterChain.doFilter(request, response);
                return;
            }

            if (loginContext == null) {
                initializeKerberosLogin();
            }

            String path = ((HttpServletRequest) request).getRequestURI();
            if (isExcluded(path)) {
                filterChain.doFilter(request, response);
            } else {
                String clientPrincipal = null;
                try {
                    Cookie[] cookies = httpReq.getCookies();
                    if (cookies == null) {
                        clientPrincipal = getPrincipalFromRequestNew((HttpServletRequest) request);
                    } else {
                        clientPrincipal = null;
                        for (Cookie cookie : cookies) {
                            if ("hadoop.auth".equals(cookie.getName())) {
                                Matcher matcher = HADOOP_AUTH_COOKIE_REGEX.matcher(cookie.getValue());
                                if (matcher.matches()) {
                                    clientPrincipal = matcher.group(1);
                                    break;
                                }
                            }
                        }
                    }
                } catch (Exception ex) {
                    clientPrincipal = null;
                }

                if (clientPrincipal != null) {
                    request.setAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT,
                            new AuthenticationResult(clientPrincipal, authorizerName, name, null));
                }
            }

            doFilterSuper(request, response, filterChain);
        }

        // Copied from hadoop-auth's AuthenticationFilter, to allow us to change error response handling
        private void doFilterSuper(ServletRequest request, ServletResponse response, FilterChain filterChain)
                throws IOException, ServletException {
            boolean unauthorizedResponse = true;
            int errCode = HttpServletResponse.SC_UNAUTHORIZED;
            AuthenticationException authenticationEx = null;
            HttpServletRequest httpRequest = (HttpServletRequest) request;
            HttpServletResponse httpResponse = (HttpServletResponse) response;
            boolean isHttps = "https".equals(httpRequest.getScheme());
            try {
                boolean newToken = false;
                AuthenticationToken token;
                try {
                    token = getToken(httpRequest);
                } catch (AuthenticationException ex) {
                    log.warn("AuthenticationToken ignored: " + ex.getMessage());
                    // will be sent back in a 401 unless filter authenticates
                    authenticationEx = ex;
                    token = null;
                }
                if (getAuthenticationHandler().managementOperation(token, httpRequest, httpResponse)) {
                    if (token == null) {
                        if (log.isDebugEnabled()) {
                            log.debug("Request [{%s}] triggering authentication", getRequestURL(httpRequest));
                        }
                        token = getAuthenticationHandler().authenticate(httpRequest, httpResponse);
                        if (token != null && token.getExpires() != 0
                                && token != AuthenticationToken.ANONYMOUS) {
                            token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
                        }
                        newToken = true;
                    }
                    if (token != null) {
                        unauthorizedResponse = false;
                        if (log.isDebugEnabled()) {
                            log.debug("Request [{%s}] user [{%s}] authenticated", getRequestURL(httpRequest),
                                    token.getUserName());
                        }
                        final AuthenticationToken authToken = token;
                        httpRequest = new HttpServletRequestWrapper(httpRequest) {

                            @Override
                            public String getAuthType() {
                                return authToken.getType();
                            }

                            @Override
                            public String getRemoteUser() {
                                return authToken.getUserName();
                            }

                            @Override
                            public Principal getUserPrincipal() {
                                return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
                            }
                        };
                        if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
                            String signedToken = mySigner.sign(token.toString());
                            tokenToAuthCookie(httpResponse, signedToken, getCookieDomain(), getCookiePath(),
                                    token.getExpires(), !token.isExpired() && token.getExpires() > 0, isHttps);
                            request.setAttribute(SIGNED_TOKEN_ATTRIBUTE,
                                    tokenToCookieString(signedToken, getCookieDomain(), getCookiePath(),
                                            token.getExpires(), !token.isExpired() && token.getExpires() > 0,
                                            isHttps));
                        }
                        // Since this request is validated also set DRUID_AUTHENTICATION_RESULT
                        request.setAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT,
                                new AuthenticationResult(token.getName(), authorizerName, name, null));
                        doFilter(filterChain, httpRequest, httpResponse);
                    }
                } else {
                    unauthorizedResponse = false;
                }
            } catch (AuthenticationException ex) {
                // exception from the filter itself is fatal
                errCode = HttpServletResponse.SC_FORBIDDEN;
                authenticationEx = ex;
                if (log.isDebugEnabled()) {
                    log.debug(ex, "Authentication exception: " + ex.getMessage());
                } else {
                    log.warn("Authentication exception: " + ex.getMessage());
                }
            }
            if (unauthorizedResponse) {
                if (!httpResponse.isCommitted()) {
                    tokenToAuthCookie(httpResponse, "", getCookieDomain(), getCookiePath(), 0, false, isHttps);
                    // If response code is 401. Then WWW-Authenticate Header should be
                    // present.. reset to 403 if not found..
                    if ((errCode == HttpServletResponse.SC_UNAUTHORIZED) && (!httpResponse.containsHeader(
                            org.apache.hadoop.security.authentication.client.KerberosAuthenticator.WWW_AUTHENTICATE))) {
                        errCode = HttpServletResponse.SC_FORBIDDEN;
                    }
                    if (authenticationEx == null) {
                        // Don't send an error response here, unlike the base AuthenticationFilter implementation.
                        // This request did not use Kerberos auth.
                        // Instead, we will send an error response in PreResponseAuthorizationCheckFilter to allow
                        // other Authenticator implementations to check the request.
                        filterChain.doFilter(request, response);
                    } else {
                        // Do send an error response here, we attempted Kerberos authentication and failed.
                        httpResponse.sendError(errCode, authenticationEx.getMessage());
                    }
                }
            }
        }
    };
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestDatanodeProtocolRetryPolicy.java

/**
 * Verify the following scenario./*from   w ww.  java2  s . c o m*/
 * 1. The initial DatanodeProtocol.registerDatanode succeeds.
 * 2. DN starts heartbeat process.
 * 3. In the first heartbeat, NN asks DN to reregister.
 * 4. DN calls DatanodeProtocol.registerDatanode.
 * 5. DatanodeProtocol.registerDatanode throws EOFException.
 * 6. DN retries.
 * 7. DatanodeProtocol.registerDatanode succeeds.
 */
@Test(timeout = 60000)
public void testDatanodeRegistrationRetry() throws Exception {
    final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);

    Mockito.doAnswer(new Answer<DatanodeRegistration>() {
        int i = 0;

        @Override
        public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
            i++;
            if (i > 1 && i < 5) {
                LOG.info("mockito exception " + i);
                throw new EOFException("TestDatanodeProtocolRetryPolicy");
            } else {
                DatanodeRegistration dr = (DatanodeRegistration) invocation.getArguments()[0];
                datanodeRegistration = new DatanodeRegistration(dr.getDatanodeUuid(), dr);
                LOG.info("mockito succeeded " + datanodeRegistration);
                return datanodeRegistration;
            }
        }
    }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));

    when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));

    Mockito.doAnswer(new Answer<HeartbeatResponse>() {
        int i = 0;

        @Override
        public HeartbeatResponse answer(InvocationOnMock invocation) throws Throwable {
            i++;
            HeartbeatResponse heartbeatResponse;
            if (i == 1) {
                LOG.info("mockito heartbeatResponse registration " + i);
                heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[] { RegisterCommand.REGISTER },
                        new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null,
                        ThreadLocalRandom.current().nextLong() | 1L);
            } else {
                LOG.info("mockito heartbeatResponse " + i);
                heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[0],
                        new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null,
                        ThreadLocalRandom.current().nextLong() | 1L);
            }
            return heartbeatResponse;
        }
    }).when(namenode).sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class),
            Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(),
            Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean());

    dn = new DataNode(conf, locations, null) {
        @Override
        DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
            Assert.assertEquals(NN_ADDR, nnAddr);
            return namenode;
        }
    };

    // Trigger a heartbeat so that it acknowledges the NN as active.
    dn.getAllBpOs().get(0).triggerHeartbeatForTests();

    waitForBlockReport(namenode);
}

From source file:grakn.core.server.session.computer.GraknSparkComputer.java

@SuppressWarnings("PMD.UnusedFormalParameter")
private Future<ComputerResult> submitWithExecutor() {
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    // Use different output locations
    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);

    updateConfigKeys(sparkConfiguration);

    final Future<ComputerResult> result = computerService.submit(() -> {
        final long startTime = System.currentTimeMillis();

        //////////////////////////////////////////////////
        /////// PROCESS SHIM AND SYSTEM PROPERTIES ///////
        //////////////////////////////////////////////////
        final String shimService = KryoSerializer.class.getCanonicalName()
                .equals(this.sparkConfiguration.getString(Constants.SPARK_SERIALIZER, null))
                        ? UnshadedKryoShimService.class.getCanonicalName()
                        : HadoopPoolShimService.class.getCanonicalName();
        this.sparkConfiguration.setProperty(KryoShimServiceLoader.KRYO_SHIM_SERVICE, shimService);
        ///////////
        final StringBuilder params = new StringBuilder();
        this.sparkConfiguration.getKeys().forEachRemaining(key -> {
            if (KEYS_PASSED_IN_JVM_SYSTEM_PROPERTIES.contains(key)) {
                params.append(" -D").append("tinkerpop.").append(key).append("=")
                        .append(this.sparkConfiguration.getProperty(key));
                System.setProperty("tinkerpop." + key, this.sparkConfiguration.getProperty(key).toString());
            }/*from   ww  w .  j a  v  a2s. co m*/
        });
        if (params.length() > 0) {
            this.sparkConfiguration.setProperty(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS,
                    (this.sparkConfiguration.getString(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS, "")
                            + params.toString()).trim());
            this.sparkConfiguration.setProperty(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS,
                    (this.sparkConfiguration.getString(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, "")
                            + params.toString()).trim());
        }
        KryoShimServiceLoader.applyConfiguration(this.sparkConfiguration);
        //////////////////////////////////////////////////
        //////////////////////////////////////////////////
        //////////////////////////////////////////////////
        // apache and hadoop configurations that are used throughout the graph computer computation
        final org.apache.commons.configuration.Configuration graphComputerConfiguration = new HadoopConfiguration(
                this.sparkConfiguration);
        if (!graphComputerConfiguration.containsKey(Constants.SPARK_SERIALIZER)) {
            graphComputerConfiguration.setProperty(Constants.SPARK_SERIALIZER,
                    KryoSerializer.class.getCanonicalName());
            if (!graphComputerConfiguration.containsKey(Constants.SPARK_KRYO_REGISTRATOR)) {
                graphComputerConfiguration.setProperty(Constants.SPARK_KRYO_REGISTRATOR,
                        GryoRegistrator.class.getCanonicalName());
            }
        }
        graphComputerConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES,
                this.persist.equals(GraphComputer.Persist.EDGES));

        final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(graphComputerConfiguration);

        final Storage fileSystemStorage = FileSystemStorage.open(hadoopConfiguration);
        final boolean inputFromHDFS = FileInputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean inputFromSpark = PersistedInputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean outputToHDFS = FileOutputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean outputToSpark = PersistedOutputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean skipPartitioner = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_PARTITIONER, false);
        final boolean skipPersist = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE, false);

        if (inputFromHDFS) {
            String inputLocation = Constants
                    .getSearchGraphLocation(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION),
                            fileSystemStorage)
                    .orElse(null);
            if (null != inputLocation) {
                try {
                    graphComputerConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                    hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                } catch (final IOException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final InputRDD inputRDD;
        final OutputRDD outputRDD;
        final boolean filtered;
        try {
            inputRDD = InputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER,
                                    InputRDD.class, InputRDD.class).newInstance()
                            : InputFormatRDD.class.newInstance();
            outputRDD = OutputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER,
                                    OutputRDD.class, OutputRDD.class).newInstance()
                            : OutputFormatRDD.class.newInstance();

            // if the input class can filter on load, then set the filters
            if (inputRDD instanceof InputFormatRDD
                    && GraphFilterAware.class.isAssignableFrom(hadoopConfiguration.getClass(
                            Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class))) {
                GraphFilterAware.storeGraphFilter(graphComputerConfiguration, hadoopConfiguration,
                        this.graphFilter);
                filtered = false;
            } else if (inputRDD instanceof GraphFilterAware) {
                ((GraphFilterAware) inputRDD).setGraphFilter(this.graphFilter);
                filtered = false;
            } else
                filtered = this.graphFilter.hasFilter();
        } catch (final InstantiationException | IllegalAccessException e) {
            throw new IllegalStateException(e.getMessage(), e);
        }

        // create the spark context from the graph computer configuration
        final JavaSparkContext sparkContext = new JavaSparkContext(Spark.create(hadoopConfiguration));
        final Storage sparkContextStorage = SparkContextStorage.open();

        sparkContext.setJobGroup(jobGroupId, jobDescription);

        GraknSparkMemory memory = null;
        // delete output location
        final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
        if (null != outputLocation) {
            if (outputToHDFS && fileSystemStorage.exists(outputLocation)) {
                fileSystemStorage.rm(outputLocation);
            }
            if (outputToSpark && sparkContextStorage.exists(outputLocation)) {
                sparkContextStorage.rm(outputLocation);
            }
        }

        // the Spark application name will always be set by SparkContextStorage,
        // thus, INFO the name to make it easier to debug
        logger.debug(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX
                + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "["
                + this.mapReducers + "]");

        // add the project jars to the cluster
        this.loadJars(hadoopConfiguration, sparkContext);
        updateLocalConfiguration(sparkContext, hadoopConfiguration);

        // create a message-passing friendly rdd from the input rdd
        boolean partitioned = false;
        JavaPairRDD<Object, VertexWritable> loadedGraphRDD = inputRDD.readGraphRDD(graphComputerConfiguration,
                sparkContext);

        // if there are vertex or edge filters, filter the loaded graph rdd prior to partitioning and persisting
        if (filtered) {
            this.logger.debug("Filtering the loaded graphRDD: " + this.graphFilter);
            loadedGraphRDD = GraknSparkExecutor.applyGraphFilter(loadedGraphRDD, this.graphFilter);
        }
        // if the loaded graph RDD is already partitioned use that partitioner,
        // else partition it with HashPartitioner
        if (loadedGraphRDD.partitioner().isPresent()) {
            this.logger.debug("Using the existing partitioner associated with the loaded graphRDD: "
                    + loadedGraphRDD.partitioner().get());
        } else {
            if (!skipPartitioner) {
                final Partitioner partitioner = new HashPartitioner(
                        this.workersSet ? this.workers : loadedGraphRDD.partitions().size());
                this.logger.debug("Partitioning the loaded graphRDD: " + partitioner);
                loadedGraphRDD = loadedGraphRDD.partitionBy(partitioner);
                partitioned = true;
                assert loadedGraphRDD.partitioner().isPresent();
            } else {
                // no easy way to test this with a test case
                assert skipPartitioner == !loadedGraphRDD.partitioner().isPresent();

                this.logger.debug("Partitioning has been skipped for the loaded graphRDD via "
                        + Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            }
        }
        // if the loaded graphRDD was already partitioned previous,
        // then this coalesce/repartition will not take place
        if (this.workersSet) {
            // ensures that the loaded graphRDD does not have more partitions than workers
            if (loadedGraphRDD.partitions().size() > this.workers) {
                loadedGraphRDD = loadedGraphRDD.coalesce(this.workers);
            } else {
                // ensures that the loaded graphRDD does not have less partitions than workers
                if (loadedGraphRDD.partitions().size() < this.workers) {
                    loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
                }
            }
        }
        // persist the vertex program loaded graph as specified by configuration
        // or else use default cache() which is MEMORY_ONLY
        if (!skipPersist && (!inputFromSpark || partitioned || filtered)) {
            loadedGraphRDD = loadedGraphRDD.persist(StorageLevel.fromString(
                    hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
        }
        // final graph with view
        // (for persisting and/or mapReducing -- may be null and thus, possible to save space/time)
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        try {
            ////////////////////////////////
            // process the vertex program //
            ////////////////////////////////
            if (null != this.vertexProgram) {
                memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, sparkContext);
                /////////////////
                // if there is a registered VertexProgramInterceptor, use it to bypass the GraphComputer semantics
                if (graphComputerConfiguration
                        .containsKey(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) {
                    try {
                        final GraknSparkVertexProgramInterceptor<VertexProgram> interceptor = (GraknSparkVertexProgramInterceptor) Class
                                .forName(graphComputerConfiguration
                                        .getString(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR))
                                .newInstance();
                        computedGraphRDD = interceptor.apply(this.vertexProgram, loadedGraphRDD, memory);
                    } catch (final ClassNotFoundException | IllegalAccessException | InstantiationException e) {
                        throw new IllegalStateException(e.getMessage());
                    }
                } else {
                    // standard GraphComputer semantics
                    // get a configuration that will be propagated to all workers
                    final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
                    this.vertexProgram.storeState(vertexProgramConfiguration);
                    // set up the vertex program and wire up configurations
                    this.vertexProgram.setup(memory);
                    JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;
                    memory.broadcastMemory(sparkContext);
                    // execute the vertex program
                    while (true) {
                        if (Thread.interrupted()) {
                            sparkContext.cancelAllJobs();
                            throw new TraversalInterruptedException();
                        }
                        memory.setInExecute(true);
                        viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(loadedGraphRDD,
                                viewIncomingRDD, memory, graphComputerConfiguration,
                                vertexProgramConfiguration);
                        memory.setInExecute(false);
                        if (this.vertexProgram.terminate(memory)) {
                            break;
                        } else {
                            memory.incrIteration();
                            memory.broadcastMemory(sparkContext);
                        }
                    }
                    // if the graph will be continued to be used (persisted or mapreduced),
                    // then generate a view+graph
                    if ((null != outputRDD && !this.persist.equals(Persist.NOTHING))
                            || !this.mapReducers.isEmpty()) {
                        computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(loadedGraphRDD,
                                viewIncomingRDD, this.vertexProgram.getVertexComputeKeys());
                        assert null != computedGraphRDD && computedGraphRDD != loadedGraphRDD;
                    } else {
                        // ensure that the computedGraphRDD was not created
                        assert null == computedGraphRDD;
                    }
                }
                /////////////////
                memory.complete(); // drop all transient memory keys
                // write the computed graph to the respective output (rdd or output format)
                if (null != outputRDD && !this.persist.equals(Persist.NOTHING)) {
                    // the logic holds that a computeGraphRDD must be created at this point
                    assert null != computedGraphRDD;

                    outputRDD.writeGraphRDD(graphComputerConfiguration, computedGraphRDD);
                }
            }

            final boolean computedGraphCreated = computedGraphRDD != null && computedGraphRDD != loadedGraphRDD;
            if (!computedGraphCreated) {
                computedGraphRDD = loadedGraphRDD;
            }

            final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

            //////////////////////////////
            // process the map reducers //
            //////////////////////////////
            if (!this.mapReducers.isEmpty()) {
                // create a mapReduceRDD for executing the map reduce jobs on
                JavaPairRDD<Object, VertexWritable> mapReduceRDD = computedGraphRDD;
                if (computedGraphCreated && !outputToSpark) {
                    // drop all the edges of the graph as they are not used in mapReduce processing
                    mapReduceRDD = computedGraphRDD.mapValues(vertexWritable -> {
                        vertexWritable.get().dropEdges(Direction.BOTH);
                        return vertexWritable;
                    });
                    // if there is only one MapReduce to execute, don't bother wasting the clock cycles.
                    if (this.mapReducers.size() > 1) {
                        mapReduceRDD = mapReduceRDD.persist(StorageLevel.fromString(hadoopConfiguration
                                .get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
                    }
                }

                for (final MapReduce mapReduce : this.mapReducers) {
                    // execute the map reduce job
                    final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(
                            graphComputerConfiguration);
                    mapReduce.storeState(newApacheConfiguration);
                    // map
                    final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(mapReduceRDD, mapReduce,
                            newApacheConfiguration);
                    // combine
                    final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                            ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                            : mapRDD;
                    // reduce
                    final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                            ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                            : combineRDD;
                    // write the map reduce output back to disk and computer result memory
                    if (null != outputRDD) {
                        mapReduce.addResultToMemory(finalMemory, outputRDD.writeMemoryRDD(
                                graphComputerConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                    }
                }
                // if the mapReduceRDD is not simply the computed graph, unpersist the mapReduceRDD
                if (computedGraphCreated && !outputToSpark) {
                    assert loadedGraphRDD != computedGraphRDD;
                    assert mapReduceRDD != computedGraphRDD;
                    mapReduceRDD.unpersist();
                } else {
                    assert mapReduceRDD == computedGraphRDD;
                }
            }

            // unpersist the loaded graph if it will not be used again (no PersistedInputRDD)
            // if the graphRDD was loaded from Spark, but then partitioned or filtered, its a different RDD
            if (!inputFromSpark || partitioned || filtered) {
                loadedGraphRDD.unpersist();
            }
            // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
            // if the computed graph is the loadedGraphRDD because it was not mutated and not-unpersisted,
            // then don't unpersist the computedGraphRDD/loadedGraphRDD
            if ((!outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING))
                    && computedGraphCreated) {
                computedGraphRDD.unpersist();
            }
            // delete any file system or rdd data if persist nothing
            if (null != outputLocation && this.persist.equals(GraphComputer.Persist.NOTHING)) {
                if (outputToHDFS) {
                    fileSystemStorage.rm(outputLocation);
                }
                if (outputToSpark) {
                    sparkContextStorage.rm(outputLocation);
                }
            }
            // update runtime and return the newly computed graph
            finalMemory.setRuntime(System.currentTimeMillis() - startTime);
            // clear properties that should not be propagated in an OLAP chain
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_GRAPH_FILTER);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            return new DefaultComputerResult(InputOutputHelper.getOutputGraph(graphComputerConfiguration,
                    this.resultGraph, this.persist), finalMemory.asImmutable());
        } catch (Exception e) {
            // So it throws the same exception as tinker does
            throw new RuntimeException(e);
        }
    });
    computerService.shutdown();
    return result;
}

From source file:com.google.gerrit.server.mail.send.SmtpEmailSender.java

public static String generateMultipartBoundary(String textBody, String htmlBody) throws EmailException {
    byte[] bytes = new byte[8];
    ThreadLocalRandom rng = ThreadLocalRandom.current();

    // The probability of the boundary being valid is approximately
    // (2^64 - len(message)) / 2^64.
    ////from   ww  w.j  a va  2 s.co m
    // The message is much shorter than 2^64 bytes, so if two tries don't
    // suffice, something is seriously wrong.
    for (int i = 0; i < 2; i++) {
        rng.nextBytes(bytes);
        String boundary = BaseEncoding.base64().encode(bytes);
        String encBoundary = "--" + boundary;
        if (textBody.contains(encBoundary) || htmlBody.contains(encBoundary)) {
            continue;
        }
        return boundary;
    }
    throw new EmailException("Gave up generating unique MIME boundary");
}

From source file:org.apache.flink.table.codegen.SortCodeGeneratorTest.java

private Object[] generateValues(InternalType type) {

    Random rnd = new Random();

    int seedNum = RECORD_NUM / 5;
    Object[] seeds = new Object[seedNum];
    seeds[0] = null;//from  www. ja v a2  s. c o  m
    seeds[1] = value1(type, rnd);
    seeds[2] = value2(type, rnd);
    seeds[3] = value3(type, rnd);
    for (int i = 4; i < seeds.length; i++) {
        if (type.equals(InternalTypes.BOOLEAN)) {
            seeds[i] = rnd.nextBoolean();
        } else if (type.equals(InternalTypes.BYTE)) {
            seeds[i] = (byte) rnd.nextLong();
        } else if (type.equals(InternalTypes.SHORT)) {
            seeds[i] = (short) rnd.nextLong();
        } else if (type.equals(InternalTypes.INT)) {
            seeds[i] = rnd.nextInt();
        } else if (type.equals(InternalTypes.LONG)) {
            seeds[i] = rnd.nextLong();
        } else if (type.equals(InternalTypes.FLOAT)) {
            seeds[i] = rnd.nextFloat() * rnd.nextLong();
        } else if (type.equals(InternalTypes.DOUBLE)) {
            seeds[i] = rnd.nextDouble() * rnd.nextLong();
        } else if (type.equals(InternalTypes.STRING)) {
            seeds[i] = BinaryString.fromString(RandomStringUtils.random(rnd.nextInt(20)));
        } else if (type instanceof DecimalType) {
            DecimalType decimalType = (DecimalType) type;
            BigDecimal decimal = new BigDecimal(rnd.nextInt()).divide(
                    new BigDecimal(ThreadLocalRandom.current().nextInt(1, 256)),
                    ThreadLocalRandom.current().nextInt(1, 30), BigDecimal.ROUND_HALF_EVEN);
            seeds[i] = Decimal.fromBigDecimal(decimal, decimalType.precision(), decimalType.scale());
        } else if (type instanceof ArrayType || type.equals(InternalTypes.BINARY)) {
            byte[] bytes = new byte[rnd.nextInt(16) + 1];
            rnd.nextBytes(bytes);
            seeds[i] = type.equals(InternalTypes.BINARY) ? bytes : BinaryArray.fromPrimitiveArray(bytes);
        } else if (type instanceof RowType) {
            RowType rowType = (RowType) type;
            if (rowType.getTypeAt(0).equals(InternalTypes.INT)) {
                seeds[i] = GenericRow.of(rnd.nextInt());
            } else {
                seeds[i] = GenericRow.of(GenericRow.of(rnd.nextInt()));
            }
        } else if (type instanceof GenericType) {
            seeds[i] = new BinaryGeneric<>(rnd.nextInt(), IntSerializer.INSTANCE);
        } else {
            throw new RuntimeException("Not support!");
        }
    }

    // result values
    Object[] results = new Object[RECORD_NUM];
    for (int i = 0; i < RECORD_NUM; i++) {
        results[i] = seeds[rnd.nextInt(seedNum)];
    }
    return results;
}

From source file:io.hops.hopsworks.api.tensorflow.TfServingService.java

@POST
@Consumes(MediaType.APPLICATION_JSON)/*  w  ww  .  j a v  a  2  s . c  o m*/
@Produces(MediaType.APPLICATION_JSON)
@AllowedProjectRoles({ AllowedProjectRoles.DATA_OWNER, AllowedProjectRoles.DATA_SCIENTIST })
public Response createTfServing(TfServing tfServing, @Context SecurityContext sc,
        @Context HttpServletRequest req) throws AppException {

    if (projectId == null) {
        throw new AppException(Response.Status.BAD_REQUEST.getStatusCode(), "Incomplete request!");
    }
    String hdfsUser = getHdfsUser(sc);
    if (hdfsUser == null) {
        throw new AppException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
                "Could not find your username. Report a bug.");
    }

    try {
        HdfsUsers user = hdfsUsersFacade.findByName(hdfsUser);

        if (user == null) {
            throw new AppException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
                    "Possible inconsistency - could not find your user.");
        }

        String modelPath = tfServing.getHdfsModelPath();

        if (modelPath.startsWith("hdfs://")) {
            int projectsIndex = modelPath.indexOf("/Projects");
            modelPath = modelPath.substring(projectsIndex, modelPath.length());
        }

        if (!inodes.existsPath(modelPath)) {
            throw new AppException(Response.Status.NOT_FOUND.getStatusCode(),
                    "Could not find .pb file in the path " + modelPath);
        }

        if (modelPath.equals("")) {
            throw new AppException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
                    "Select your .pb file corresponding to the model to be served in the Models dataset.");
        }

        tfServing.setHdfsUserId(user.getId());

        String secret = DigestUtils.sha256Hex(Integer.toString(ThreadLocalRandom.current().nextInt()));
        tfServing.setSecret(secret);

        tfServing.setModelName(getModelName(modelPath));
        int version = -1;
        String basePath = null;
        try {
            version = getVersion(modelPath);
            basePath = getModelBasePath(modelPath);
        } catch (Exception e) {
            throw new AppException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
                    ".pb file should be located in Models/{model_name}/{version}");
        }

        String email = sc.getUserPrincipal().getName();

        tfServing.setVersion(version);
        tfServing.setProject(project);
        tfServing.setHdfsModelPath(basePath);
        tfServing.setStatus(TfServingStatusEnum.CREATED);
        tfServing.setCreator(userFacade.findByEmail(email));
        tfServingFacade.persist(tfServing);

    } catch (DatabaseException dbe) {
        throw new AppException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), dbe.getMessage());
    }
    return noCacheResponse.getNoCacheResponseBuilder(Response.Status.CREATED).entity(tfServing).build();
}

From source file:io.github.microcks.service.TestService.java

private void waitSomeRandomMS(int min, int max) {
    Object semaphore = new Object();
    long timeout = ThreadLocalRandom.current().nextInt(min, max + 1);
    synchronized (semaphore) {
        try {/*from   w w w.j  ava2 s .  c om*/
            semaphore.wait(timeout);
        } catch (Exception e) {
            log.debug("waitSomeRandomMS semaphore was interrupted");
        }
    }
}

From source file:com.pushtechnology.consulting.SessionCreator.java

/**
 * Returns a random number between 1 (inclusive) and <code>max</code>
 * (inclusive)./*from ww  w.ja  va  2s .  c  om*/
 *
 * @param max
 * @return random {@link Integer}
 */
private int getRandomReconnect(int max) {
    return ThreadLocalRandom.current().nextInt(1, max + 1);
}

From source file:org.apache.tez.runtime.library.common.sort.impl.dflt.TestDefaultSorter.java

@Test
@Ignore//from  w  w  w  .j ava  2s .com
/**
 * Disabling this, as this would need 2047 MB io.sort.mb for testing.
 * Provide > 2GB to JVM when running this test to avoid OOM in string generation.
 *
 * Set DefaultSorter.MAX_IO_SORT_MB = 2047 for running this.
 */
public void testSortLimitsWithLargeRecords() throws IOException {
    OutputContext context = createTezOutputContext();

    doReturn(2800 * 1024 * 1024l).when(context).getTotalMemoryAvailableToTask();

    //Setting IO_SORT_MB to 2047 MB
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, 2047);
    context.requestInitialMemory(
            ExternalSorter.getInitialMemoryRequirement(conf, context.getTotalMemoryAvailableToTask()),
            new MemoryUpdateCallbackHandler());

    DefaultSorter sorter = new DefaultSorter(context, conf, 2, 2047 << 20);

    int i = 0;
    /**
     * If io.sort.mb is not capped to 1800, this would end up throwing
     * "java.lang.ArrayIndexOutOfBoundsException" after many spills.
     * Intentionally made it as infinite loop.
     */
    while (true) {
        Text key = new Text(i + "");
        //Generate random size between 1 MB to 100 MB.
        int valSize = ThreadLocalRandom.current().nextInt(1 * 1024 * 1024, 100 * 1024 * 1024);
        String val = StringInterner.weakIntern(StringUtils.repeat("v", valSize));
        sorter.write(key, new Text(val));
        i = (i + 1) % 10;
    }
}

From source file:org.xwiki.extension.script.ExtensionHistoryScriptService.java

private ReplayRequest createReplayRequest(List<ExtensionJobHistoryRecord> records) {
    ReplayRequest request = new ReplayRequest();
    String suffix = new Date().getTime() + "-" + ThreadLocalRandom.current().nextInt(100, 1000);
    request.setId(getReplayJobId(suffix));
    // There may be questions for which there isn't a specified answer.
    request.setInteractive(true);/*  w  w  w  . j  a v a 2 s.c  om*/
    request.setRecords(records);

    // Provide information on what started the job.
    request.setProperty(PROPERTY_CONTEXT_WIKI, this.xcontextProvider.get().getWikiId());
    request.setProperty(PROPERTY_CONTEXT_ACTION, this.xcontextProvider.get().getAction());

    setRightsProperties(request);

    return request;
}