Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//  ww w .  j  av a  2 s . com
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.nifi.processors.standard.AbstractQueryDatabaseTable.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory)
        throws ProcessException {
    // Fetch the column/table info once
    if (!setupComplete.get()) {
        super.setup(context);
    }// w  w  w  . j a v a2 s .  c  o  m
    ProcessSession session = sessionFactory.createSession();
    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();

    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
    final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES)
            .evaluateAttributeExpressions().getValue();
    final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions()
            .getValue();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;
    final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet()
            ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger()
            : 0;

    SqlWriter sqlWriter = configureSqlWriter(session, context);

    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;

    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform "
                + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
    // set as the current state map (after the session has been committed)
    final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());

    //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
    for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
        String maxPropKey = maxProp.getKey().toLowerCase();
        String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter);
        if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
            String newMaxPropValue;
            // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme)
            // the value has been stored under a key that is only the column name. Fall back to check the column name,
            // but store the new initial max value under the fully-qualified key.
            if (statePropertyMap.containsKey(maxPropKey)) {
                newMaxPropValue = statePropertyMap.get(maxPropKey);
            } else {
                newMaxPropValue = maxProp.getValue();
            }
            statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);

        }
    }

    List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null
            : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
    final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList,
            customWhereClause, statePropertyMap);
    final StopWatch stopWatch = new StopWatch(true);
    final String fragmentIdentifier = UUID.randomUUID().toString();

    try (final Connection con = dbcpService.getConnection(Collections.emptyMap());
            final Statement st = con.createStatement()) {

        if (fetchSize != null && fetchSize > 0) {
            try {
                st.setFetchSize(fetchSize);
            } catch (SQLException se) {
                // Not all drivers support this, just log the error (at debug level) and move on
                logger.debug("Cannot set fetch size to {} due to {}",
                        new Object[] { fetchSize, se.getLocalizedMessage() }, se);
            }
        }

        String jdbcURL = "DBCPService";
        try {
            DatabaseMetaData databaseMetaData = con.getMetaData();
            if (databaseMetaData != null) {
                jdbcURL = databaseMetaData.getURL();
            }
        } catch (SQLException se) {
            // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
        }

        final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions()
                .asTimePeriod(TimeUnit.SECONDS).intValue();
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        if (logger.isDebugEnabled()) {
            logger.debug("Executing query {}", new Object[] { selectQuery });
        }
        try (final ResultSet resultSet = st.executeQuery(selectQuery)) {
            int fragmentIndex = 0;
            // Max values will be updated in the state property map by the callback
            final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName,
                    statePropertyMap, dbAdapter);

            while (true) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                FlowFile fileToProcess = session.create();
                try {
                    fileToProcess = session.write(fileToProcess, out -> {
                        try {
                            nrOfRows.set(
                                    sqlWriter.writeResultSet(resultSet, out, getLogger(), maxValCollector));
                        } catch (Exception e) {
                            throw new ProcessException("Error during database query or conversion of records.",
                                    e);
                        }
                    });
                } catch (ProcessException e) {
                    // Add flowfile to results before rethrowing so it will be removed from session in outer catch
                    resultSetFlowFiles.add(fileToProcess);
                    throw e;
                }

                if (nrOfRows.get() > 0) {
                    // set attributes
                    final Map<String, String> attributesToAdd = new HashMap<>();
                    attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                    attributesToAdd.put(RESULT_TABLENAME, tableName);

                    if (maxRowsPerFlowFile > 0) {
                        attributesToAdd.put(FRAGMENT_ID, fragmentIdentifier);
                        attributesToAdd.put(FRAGMENT_INDEX, String.valueOf(fragmentIndex));
                    }

                    attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                    fileToProcess = session.putAllAttributes(fileToProcess, attributesToAdd);
                    sqlWriter.updateCounters(session);

                    logger.info("{} contains {} records; transferring to 'success'",
                            new Object[] { fileToProcess, nrOfRows.get() });

                    session.getProvenanceReporter().receive(fileToProcess, jdbcURL,
                            stopWatch.getElapsed(TimeUnit.MILLISECONDS));
                    resultSetFlowFiles.add(fileToProcess);
                    // If we've reached the batch size, send out the flow files
                    if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                        session.transfer(resultSetFlowFiles, REL_SUCCESS);
                        session.commit();
                        resultSetFlowFiles.clear();
                    }
                } else {
                    // If there were no rows returned, don't send the flowfile
                    session.remove(fileToProcess);
                    // If no rows and this was first FlowFile, yield
                    if (fragmentIndex == 0) {
                        context.yield();
                    }
                    break;
                }

                fragmentIndex++;
                if (maxFragments > 0 && fragmentIndex >= maxFragments) {
                    break;
                }

                // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around
                if (maxFragments == 0 && maxRowsPerFlowFile == 0) {
                    break;
                }

                // If we are splitting up the data into flow files, don't loop back around if we've gotten all results
                if (maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) {
                    break;
                }
            }

            // Apply state changes from the Max Value tracker
            maxValCollector.applyStateChanges();

            // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes
            if (outputBatchSize == 0) {
                for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                    // Add maximum values as attributes
                    for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
                        // Get just the column name from the key
                        String key = entry.getKey();
                        String colName = key
                                .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                "maxvalue." + colName, entry.getValue()));
                    }

                    //set count on all FlowFiles
                    if (maxRowsPerFlowFile > 0) {
                        resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                    }
                }
            }
        } catch (final SQLException e) {
            throw e;
        }

        session.transfer(resultSetFlowFiles, REL_SUCCESS);

    } catch (final ProcessException | SQLException e) {
        logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
        if (!resultSetFlowFiles.isEmpty()) {
            session.remove(resultSetFlowFiles);
        }
        context.yield();
    } finally {
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded",
                    new Object[] { this, ioe });
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java

/**
 * Simulates HLog append ops for a region and tests
 * {@link FSHLog#areAllRegionsFlushed(Map, Map, Map)} API.
 * It compares the region sequenceIds with oldestFlushing and oldestUnFlushed entries.
 * If a region's entries are larger than min of (oldestFlushing, oldestUnFlushed), then the
 * region should be flushed before archiving this WAL.
*///from  www .  j a  v a  2s. c o  m
@Test
public void testAllRegionsFlushed() {
    LOG.debug("testAllRegionsFlushed");
    Map<byte[], Long> oldestFlushingSeqNo = new HashMap<byte[], Long>();
    Map<byte[], Long> oldestUnFlushedSeqNo = new HashMap<byte[], Long>();
    Map<byte[], Long> seqNo = new HashMap<byte[], Long>();
    // create a table
    TableName t1 = TableName.valueOf("t1");
    // create a region
    HRegionInfo hri1 = new HRegionInfo(t1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    // variables to mock region sequenceIds
    final AtomicLong sequenceId1 = new AtomicLong(1);
    // test empty map
    assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
    // add entries in the region
    seqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.incrementAndGet());
    oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
    // should say region1 is not flushed.
    assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
    // test with entries in oldestFlushing map.
    oldestUnFlushedSeqNo.clear();
    oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), sequenceId1.get());
    assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
    // simulate region flush, i.e., clear oldestFlushing and oldestUnflushed maps
    oldestFlushingSeqNo.clear();
    oldestUnFlushedSeqNo.clear();
    assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
    // insert some large values for region1
    oldestUnFlushedSeqNo.put(hri1.getEncodedNameAsBytes(), 1000l);
    seqNo.put(hri1.getEncodedNameAsBytes(), 1500l);
    assertFalse(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));

    // tests when oldestUnFlushed/oldestFlushing contains larger value.
    // It means region is flushed.
    oldestFlushingSeqNo.put(hri1.getEncodedNameAsBytes(), 1200l);
    oldestUnFlushedSeqNo.clear();
    seqNo.put(hri1.getEncodedNameAsBytes(), 1199l);
    assertTrue(FSHLog.areAllRegionsFlushed(seqNo, oldestFlushingSeqNo, oldestUnFlushedSeqNo));
}

From source file:org.apache.nifi.processors.kite.ConvertCSVToAvro.java

@Override
public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile incomingCSV = session.get();
    if (incomingCSV == null) {
        return;//from ww w.  ja  v a 2  s .c om
    }

    CSVProperties props = new CSVProperties.Builder()
            .charset(context.getProperty(CHARSET).evaluateAttributeExpressions(incomingCSV).getValue())
            .delimiter(context.getProperty(DELIMITER).evaluateAttributeExpressions(incomingCSV).getValue())
            .quote(context.getProperty(QUOTE).evaluateAttributeExpressions(incomingCSV).getValue())
            .escape(context.getProperty(ESCAPE).evaluateAttributeExpressions(incomingCSV).getValue())
            .hasHeader(context.getProperty(HAS_HEADER).evaluateAttributeExpressions(incomingCSV).asBoolean())
            .linesToSkip(
                    context.getProperty(LINES_TO_SKIP).evaluateAttributeExpressions(incomingCSV).asInteger())
            .build();

    String schemaProperty = context.getProperty(SCHEMA).evaluateAttributeExpressions(incomingCSV).getValue();
    final Schema schema;
    try {
        schema = getSchema(schemaProperty, DefaultConfiguration.get());
    } catch (SchemaNotFoundException e) {
        getLogger().error("Cannot find schema: " + schemaProperty);
        session.transfer(incomingCSV, FAILURE);
        return;
    }

    try (final DataFileWriter<Record> writer = new DataFileWriter<>(
            AvroUtil.newDatumWriter(schema, Record.class))) {
        writer.setCodec(getCodecFactory(context.getProperty(COMPRESSION_TYPE).getValue()));

        try {
            final AtomicLong written = new AtomicLong(0L);
            final FailureTracker failures = new FailureTracker();

            FlowFile badRecords = session.clone(incomingCSV);
            FlowFile outgoingAvro = session.write(incomingCSV, new StreamCallback() {
                @Override
                public void process(InputStream in, OutputStream out) throws IOException {
                    try (CSVFileReader<Record> reader = new CSVFileReader<>(in, props, schema, Record.class)) {
                        reader.initialize();
                        try (DataFileWriter<Record> w = writer.create(schema, out)) {
                            while (reader.hasNext()) {
                                try {
                                    Record record = reader.next();
                                    w.append(record);
                                    written.incrementAndGet();
                                } catch (DatasetRecordException e) {
                                    failures.add(e);
                                }
                            }
                        }
                    }
                }
            });

            long errors = failures.count();

            session.adjustCounter("Converted records", written.get(),
                    false /* update only if file transfer is successful */);
            session.adjustCounter("Conversion errors", errors,
                    false /* update only if file transfer is successful */);

            if (written.get() > 0L) {
                session.transfer(outgoingAvro, SUCCESS);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors + written.get() });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                    session.transfer(badRecords, INCOMPATIBLE);
                } else {
                    session.remove(badRecords);
                }

            } else {
                session.remove(outgoingAvro);

                if (errors > 0L) {
                    getLogger().warn("Failed to convert {}/{} records from CSV to Avro",
                            new Object[] { errors, errors });
                    badRecords = session.putAttribute(badRecords, "errors", failures.summary());
                } else {
                    badRecords = session.putAttribute(badRecords, "errors", "No incoming records");
                }

                session.transfer(badRecords, FAILURE);
            }

        } catch (ProcessException | DatasetIOException e) {
            getLogger().error("Failed reading or writing", e);
            session.transfer(incomingCSV, FAILURE);
        } catch (DatasetException e) {
            getLogger().error("Failed to read FlowFile", e);
            session.transfer(incomingCSV, FAILURE);
        }
    } catch (final IOException ioe) {
        throw new RuntimeException("Unable to close Avro Writer", ioe);
    }
}

From source file:com.github.rinde.datgen.pdptw.DatasetGenerator.java

Dataset<GeneratedScenario> doGenerate() {

    final ListeningExecutorService service = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(builder.numThreads));
    final Dataset<GeneratedScenario> dataset = Dataset.naturalOrder();

    final List<ScenarioCreator> jobs = new ArrayList<>();

    final RandomGenerator rng = new MersenneTwister(builder.randomSeed);
    final Map<GeneratorSettings, IdSeedGenerator> rngMap = new LinkedHashMap<>();

    for (final Long urgency : builder.urgencyLevels) {
        for (final Double scale : builder.scaleLevels) {
            for (final Entry<TimeSeriesType, Collection<Range<Double>>> dynLevel : builder.dynamismLevels
                    .asMap().entrySet()) {

                final int reps = builder.numInstances * dynLevel.getValue().size();

                final long urg = urgency * 60 * 1000L;
                // The office hours is the period in which new orders are accepted,
                // it is defined as [0,officeHoursLength).
                final long officeHoursLength;
                if (urg < halfDiagTT) {
                    officeHoursLength = builder.scenarioLengthMs - twoDiagTT - PICKUP_DURATION
                            - DELIVERY_DURATION;
                } else {
                    officeHoursLength = builder.scenarioLengthMs - urg - oneAndHalfDiagTT - PICKUP_DURATION
                            - DELIVERY_DURATION;
                }//from   w w w .j ava  2 s. co m

                final int numOrders = DoubleMath.roundToInt(scale * numOrdersPerScale,
                        RoundingMode.UNNECESSARY);

                final ImmutableMap.Builder<String, String> props = ImmutableMap.builder();

                props.put("expected_num_orders", Integer.toString(numOrders));
                props.put("pickup_duration", Long.toString(PICKUP_DURATION));
                props.put("delivery_duration", Long.toString(DELIVERY_DURATION));
                props.put("width_height", String.format("%1.1fx%1.1f", AREA_WIDTH, AREA_WIDTH));

                // TODO store this in TimeSeriesType?
                final RangeSet<Double> rset = TreeRangeSet.create();
                for (final Range<Double> r : dynLevel.getValue()) {
                    rset.add(r);
                }

                // createTimeSeriesGenerator(dynLevel.getKey(), officeHoursLength,
                // numOrders, numOrdersPerScale, props);

                final GeneratorSettings set = GeneratorSettings.builder().setDayLength(builder.scenarioLengthMs)
                        .setOfficeHours(officeHoursLength).setTimeSeriesType(dynLevel.getKey())
                        .setDynamismRangeCenters(builder.dynamismRangeMap.subRangeMap(rset.span()))
                        .setUrgency(urg).setScale(scale).setNumOrders(numOrders).setProperties(props.build())
                        .build();

                final IdSeedGenerator isg = new IdSeedGenerator(rng.nextLong());
                rngMap.put(set, isg);

                for (int i = 0; i < reps; i++) {
                    final LocationGenerator lg = Locations.builder().min(0d).max(AREA_WIDTH).buildUniform();

                    final TimeSeriesGenerator tsg2 = createTimeSeriesGenerator(dynLevel.getKey(),
                            officeHoursLength, numOrders, numOrdersPerScale,
                            ImmutableMap.<String, String>builder());
                    final ScenarioGenerator gen = createGenerator(officeHoursLength, urg, scale, tsg2,
                            set.getDynamismRangeCenters(), lg, builder, numOrdersPerScale);

                    jobs.add(ScenarioCreator.create(isg.next(), set, gen));
                }
            }
        }
    }

    final AtomicLong currentJobs = new AtomicLong(0L);
    final AtomicLong datasetSize = new AtomicLong(0L);

    LOGGER.info(" - Submitting " + jobs.size() + " Jobs");
    for (final ScenarioCreator job : jobs) {
        submitJob(currentJobs, service, job, builder.numInstances, dataset, rngMap, datasetSize);
    }

    final long targetSize = builder.numInstances * builder.dynamismLevels.values().size()
            * builder.scaleLevels.size() * builder.urgencyLevels.size();
    while (datasetSize.get() < targetSize || dataset.size() < targetSize) {
        try {
            // LOGGER.info(" - Waiting, current size ==" + dataset.size());
            Thread.sleep(THREAD_SLEEP_DURATION);
        } catch (final InterruptedException e) {
            throw new IllegalStateException(e);
        }
    }

    LOGGER.info(" - Shutdown Service, Awaiting Termination");
    service.shutdown();
    try {
        service.awaitTermination(1L, TimeUnit.HOURS);
    } catch (final InterruptedException e) {
        throw new IllegalStateException(e);
    }

    LOGGER.info(" - Returning dataset");

    return dataset;
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the
 * queue and send them.//from  ww w  . j  av a2  s.c om
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);

            // Keep 20 queries inside the query queue.
            while (queryQueue.size() == 20) {
                Thread.sleep(1);

                long currentTime = System.currentTimeMillis();
                if (currentTime - reportStartTime >= reportIntervalMs) {
                    long timePassed = currentTime - startTime;
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                                    + "Average Client Time: {}ms.",
                            timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt);
                    reportStartTime = currentTime;
                    numReportIntervals++;

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                    + "Average Client Time: {}ms.",
            timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.
 * Ensures we do not lose edits./*from  ww  w  . j  a v  a2  s .com*/
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    final int numWriters = 3;
    Thread zombie = new ZombieLastLogWriterRegionServer(counter, stop, region, numWriters);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n");
                for (FileStatus status : fs.listStatus(WALDIR)) {
                    ls.append("\t").append(status.toString()).append("\n");
                }
                LOG.debug(ls);
                LOG.info("Splitting WALs out from under zombie. Expecting " + numWriters + " files.");
                WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf2, wals);
                LOG.info("Finished splitting out from under zombie.");
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals("wrong number of split files for region", numWriters, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countWAL(logfile);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. "
                        + "Zombie could write " + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

CompletableFuture<MessageId> getLastMessageIdAsync() {
    if (getState() == State.Closing || getState() == State.Closed) {
        return FutureUtil
                .failedFuture(new PulsarClientException.AlreadyClosedException("Consumer was already closed"));
    }// w ww  . ja  v  a 2s  . c o  m

    AtomicLong opTimeoutMs = new AtomicLong(client.getConfiguration().getOperationTimeoutMs());
    Backoff backoff = new Backoff(100, TimeUnit.MILLISECONDS, opTimeoutMs.get() * 2, TimeUnit.MILLISECONDS, 0,
            TimeUnit.MILLISECONDS);
    CompletableFuture<MessageId> getLastMessageIdFuture = new CompletableFuture<>();

    internalGetLastMessageIdAsync(backoff, opTimeoutMs, getLastMessageIdFuture);
    return getLastMessageIdFuture;
}

From source file:io.warp10.continuum.egress.EgressFetchHandler.java

/**
 * Output a tab separated version of fetched data. Deduplication is done on the fly so we don't decode twice.
 * // w w w  . j  a v a 2 s .  c  om
 */
private static void tsvDump(PrintWriter pw, GTSDecoderIterator iter, long now, long timespan, boolean raw,
        boolean dedup, boolean signed, AtomicReference<Metadata> lastMeta, AtomicLong lastCount,
        boolean sortMeta) throws IOException {

    String name = null;
    Map<String, String> labels = null;

    StringBuilder classSB = new StringBuilder();
    StringBuilder labelsSB = new StringBuilder();
    StringBuilder attributesSB = new StringBuilder();
    StringBuilder valueSB = new StringBuilder();

    Metadata lastMetadata = lastMeta.get();
    long currentCount = lastCount.get();

    while (iter.hasNext()) {
        GTSDecoder decoder = iter.next();

        if (!decoder.next()) {
            continue;
        }

        long toDecodeCount = Long.MAX_VALUE;

        if (timespan < 0) {
            Metadata meta = decoder.getMetadata();
            if (!meta.equals(lastMetadata)) {
                lastMetadata = meta;
                currentCount = 0;
            }
            toDecodeCount = Math.max(0, -timespan - currentCount);
        }

        //
        // Only display the class + labels if they have changed since the previous GTS
        //

        Map<String, String> lbls = decoder.getLabels();

        //
        // Compute the new name
        //

        boolean displayName = false;

        if (null == name || (!name.equals(decoder.getName()) || !labels.equals(lbls))) {
            displayName = true;
            name = decoder.getName();
            labels = lbls;
            classSB.setLength(0);
            GTSHelper.encodeName(classSB, name);
            labelsSB.setLength(0);
            attributesSB.setLength(0);
            boolean first = true;

            if (sortMeta) {
                lbls = new TreeMap<String, String>(lbls);
            }
            for (Entry<String, String> entry : lbls.entrySet()) {
                //
                // Skip owner/producer labels and any other 'private' labels
                //
                if (!signed) {
                    if (Constants.PRODUCER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                    if (Constants.OWNER_LABEL.equals(entry.getKey())) {
                        continue;
                    }
                }

                if (!first) {
                    labelsSB.append(",");
                }
                GTSHelper.encodeName(labelsSB, entry.getKey());
                labelsSB.append("=");
                GTSHelper.encodeName(labelsSB, entry.getValue());
                first = false;
            }

            first = true;
            if (decoder.getMetadata().getAttributesSize() > 0) {

                if (sortMeta) {
                    decoder.getMetadata()
                            .setAttributes(new TreeMap<String, String>(decoder.getMetadata().getAttributes()));
                }

                for (Entry<String, String> entry : decoder.getMetadata().getAttributes().entrySet()) {
                    if (!first) {
                        attributesSB.append(",");
                    }
                    GTSHelper.encodeName(attributesSB, entry.getKey());
                    attributesSB.append("=");
                    GTSHelper.encodeName(attributesSB, entry.getValue());
                    first = false;
                }
            }

        }

        long timestamp = 0L;
        long location = GeoTimeSerie.NO_LOCATION;
        long elevation = GeoTimeSerie.NO_ELEVATION;
        Object value = null;

        boolean dup = true;

        long decoded = 0;

        do {

            if (toDecodeCount == decoded) {
                break;
            }

            //
            // Filter out any value not in the time range
            //

            long newTimestamp = decoder.getTimestamp();

            if (newTimestamp > now || (timespan >= 0 && newTimestamp <= (now - timespan))) {
                continue;
            }

            //
            // TODO(hbs): filter out values with no location or outside the selected geozone when a geozone was set
            //

            long newLocation = decoder.getLocation();
            long newElevation = decoder.getElevation();
            Object newValue = decoder.getValue();

            dup = true;

            if (dedup) {
                if (location != newLocation || elevation != newElevation) {
                    dup = false;
                } else {
                    if (null == newValue) {
                        // Consider nulls as duplicates (can't happen!)
                        dup = false;
                    } else if (newValue instanceof Number) {
                        if (!((Number) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof String) {
                        if (!((String) newValue).equals(value)) {
                            dup = false;
                        }
                    } else if (newValue instanceof Boolean) {
                        if (!((Boolean) newValue).equals(value)) {
                            dup = false;
                        }
                    }
                }
            }

            decoded++;

            location = newLocation;
            elevation = newElevation;
            timestamp = newTimestamp;
            value = newValue;

            if (raw) {
                if (!dedup || !dup) {
                    pw.print(classSB);
                    pw.print('\t');
                    pw.print(labelsSB);
                    pw.print('\t');
                    pw.print(attributesSB);
                    pw.print('\t');

                    pw.print(timestamp);
                    pw.print('\t');

                    if (GeoTimeSerie.NO_LOCATION != location) {
                        double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                        pw.print(latlon[0]);
                        pw.print('\t');
                        pw.print(latlon[1]);
                    } else {
                        pw.print('\t');
                    }

                    pw.print('\t');

                    if (GeoTimeSerie.NO_ELEVATION != elevation) {
                        pw.print(elevation);
                    }
                    pw.print('\t');

                    valueSB.setLength(0);
                    GTSHelper.encodeValue(valueSB, value);
                    pw.println(valueSB);
                }
            } else {
                // Display the name only if we have at least one value to display
                // We force 'dup' to be false when we must show the name
                if (displayName) {
                    pw.print("# ");
                    pw.print(classSB);
                    pw.print("{");
                    pw.print(labelsSB);
                    pw.print("}");
                    pw.print("{");
                    pw.print(attributesSB);
                    pw.println("}");
                    displayName = false;
                    dup = false;
                }

                if (!dedup || !dup) {
                    pw.print(timestamp);
                    pw.print('\t');
                    if (GeoTimeSerie.NO_LOCATION != location) {
                        double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                        pw.print(latlon[0]);
                        pw.print('\t');
                        pw.print(latlon[1]);
                    } else {
                        pw.print('\t');
                    }

                    pw.print('\t');

                    if (GeoTimeSerie.NO_ELEVATION != elevation) {
                        pw.print(elevation);
                    }
                    pw.print('\t');

                    valueSB.setLength(0);
                    GTSHelper.encodeValue(valueSB, value);
                    pw.println(valueSB);
                }
            }
        } while (decoder.next());

        // Update currentcount
        if (timespan < 0) {
            currentCount += decoded;
        }

        // Print any remaining value
        if (dedup && dup) {
            if (raw) {
                pw.print(classSB);
                pw.print('\t');
                pw.print(labelsSB);
                pw.print('\t');
                pw.print(attributesSB);
                pw.print('\t');

                pw.print(timestamp);
                pw.print('\t');

                if (GeoTimeSerie.NO_LOCATION != location) {
                    double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                    pw.print(latlon[0]);
                    pw.print('\t');
                    pw.print(latlon[1]);
                } else {
                    pw.print('\t');
                }

                pw.print('\t');

                if (GeoTimeSerie.NO_ELEVATION != elevation) {
                    pw.print(elevation);
                }
                pw.print('\t');

                valueSB.setLength(0);
                GTSHelper.encodeValue(valueSB, value);
                pw.println(valueSB);
            } else {
                pw.print(timestamp);
                pw.print('\t');
                if (GeoTimeSerie.NO_LOCATION != location) {
                    double[] latlon = GeoXPLib.fromGeoXPPoint(location);
                    pw.print(latlon[0]);
                    pw.print('\t');
                    pw.print(latlon[1]);
                } else {
                    pw.print('\t');
                }

                pw.print('\t');

                if (GeoTimeSerie.NO_ELEVATION != elevation) {
                    pw.print(elevation);
                }
                pw.print('\t');

                valueSB.setLength(0);
                GTSHelper.encodeValue(valueSB, value);
                pw.println(valueSB);
            }

        }

        //
        // If displayName is still true it means we should have displayed the name but no value matched,
        // so set name to null so we correctly display the name for the next decoder if it has values
        //

        if (displayName) {
            name = null;
        }
    }

    lastMeta.set(lastMetadata);
    lastCount.set(currentCount);
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

protected long saveTask(JsonNode it) {

    ObjectNode n = flattenTask(it);/*from ww w.java 2  s .c om*/

    n.put("swarmClusterId", getSwarmClusterId().get());

    String taskId = n.get("taskId").asText();
    String serviceId = n.path("serviceId").asText();
    String swarmNodeId = n.path("swarmNodeId").asText();
    checkNotEmpty(taskId, "taskId");
    checkNotEmpty(serviceId, "serviceId");
    checkNotEmpty(swarmNodeId, "swarmNodeId");

    AtomicLong timestamp = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient()
            .execCypher(
                    "merge (x:DockerTask {taskId:{taskId}}) set x+={props}, x.updateTs=timestamp() return x",
                    "taskId", taskId, "props", n)
            .forEach(tt -> {

                timestamp.set(tt.path("updateTs").asLong(Long.MAX_VALUE));

                removeDockerLabels("DockerTask", "taskId", taskId, n, it);
            });

    {
        // it might be worth it to select these relationships and only
        // update if they are missing
        dockerScanner.getNeoRxClient().execCypher(
                "match (s:DockerService {serviceId:{serviceId}}),(t:DockerTask{taskId:{taskId}}) merge (s)-[x:CONTAINS]->(t) set x.updateTs=timestamp() return t,s",
                "serviceId", serviceId, "taskId", taskId);

        dockerScanner.getNeoRxClient().execCypher(
                "match (h:DockerHost {swarmNodeId:{swarmNodeId}}), (t:DockerTask {swarmNodeId:{swarmNodeId}}) merge (h)-[x:RUNS]->(t) set x.updateTs=timestamp()",
                "swarmNodeId", swarmNodeId);
    }
    return timestamp.get();
}