Example usage for org.apache.hadoop.conf Configuration getLong

List of usage examples for org.apache.hadoop.conf Configuration getLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getLong.

Prototype

public long getLong(String name, long defaultValue) 

Source Link

Document

Get the value of the name property as a long.

Usage

From source file:org.apache.parquet.hadoop.ParquetOutputFormat.java

License:Apache License

public static long getLongBlockSize(Configuration configuration) {
    return configuration.getLong(BLOCK_SIZE, DEFAULT_BLOCK_SIZE);
}

From source file:org.apache.parquet.hadoop.ParquetOutputFormat.java

License:Apache License

public RecordWriter<Void, T> getRecordWriter(Configuration conf, Path file, CompressionCodecName codec)
        throws IOException, InterruptedException {
    final WriteSupport<T> writeSupport = getWriteSupport(conf);

    CodecFactory codecFactory = new CodecFactory(conf);
    long blockSize = getLongBlockSize(conf);
    if (INFO)//ww w . j  a  v  a 2 s  .  c  o  m
        LOG.info("Parquet block size to " + blockSize);
    int pageSize = getPageSize(conf);
    if (INFO)
        LOG.info("Parquet page size to " + pageSize);
    int dictionaryPageSize = getDictionaryPageSize(conf);
    if (INFO)
        LOG.info("Parquet dictionary page size to " + dictionaryPageSize);
    boolean enableDictionary = getEnableDictionary(conf);
    if (INFO)
        LOG.info("Dictionary is " + (enableDictionary ? "on" : "off"));
    boolean validating = getValidation(conf);
    if (INFO)
        LOG.info("Validation is " + (validating ? "on" : "off"));
    WriterVersion writerVersion = getWriterVersion(conf);
    if (INFO)
        LOG.info("Writer version is: " + writerVersion);
    int maxPaddingSize = getMaxPaddingSize(conf);
    if (INFO)
        LOG.info("Maximum row group padding size is " + maxPaddingSize + " bytes");

    WriteContext init = writeSupport.init(conf);
    ParquetFileWriter w = new ParquetFileWriter(conf, init.getSchema(), file, Mode.CREATE, blockSize,
            maxPaddingSize);
    w.start();

    float maxLoad = conf.getFloat(ParquetOutputFormat.MEMORY_POOL_RATIO,
            MemoryManager.DEFAULT_MEMORY_POOL_RATIO);
    long minAllocation = conf.getLong(ParquetOutputFormat.MIN_MEMORY_ALLOCATION,
            MemoryManager.DEFAULT_MIN_MEMORY_ALLOCATION);
    if (memoryManager == null) {
        memoryManager = new MemoryManager(maxLoad, minAllocation);
    } else if (memoryManager.getMemoryPoolRatio() != maxLoad) {
        LOG.warn("The configuration " + MEMORY_POOL_RATIO + " has been set. It should not "
                + "be reset by the new value: " + maxLoad);
    }

    return new ParquetRecordWriter<T>(w, writeSupport, init.getSchema(), init.getExtraMetaData(), blockSize,
            pageSize, codecFactory.getCompressor(codec, pageSize), dictionaryPageSize, enableDictionary,
            validating, writerVersion, memoryManager);
}

From source file:org.apache.phoenix.cache.aggcache.SpillableGroupByCache.java

License:Apache License

/**
 * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by queries
 *
 * @param estSize//ww w .  j a  v  a  2 s.  co m
 * @param estValueSize
 * @param aggs
 * @param ctxt
 */
public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId,
        ServerAggregators aggs, final int estSizeNum) {
    totalNumElements = 0;
    this.aggregators = aggs;
    this.env = env;

    final int estValueSize = aggregators.getEstimatedByteSize();
    final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);

    // Compute Map initial map
    final Configuration conf = env.getConfiguration();
    final long maxCacheSizeConf = conf.getLong(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX);
    final int numSpillFilesConf = conf.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES);

    final int maxSizeNum = (int) (maxCacheSizeConf / estValueSize);
    final int minSizeNum = (SPGBY_CACHE_MIN_SIZE / estValueSize);

    // use upper and lower bounds for the cache size
    final int maxCacheSize = Math.max(minSizeNum, Math.min(maxSizeNum, estSizeNum));
    final long estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(maxCacheSize, estValueSize);
    try {
        this.chunk = tenantCache.getMemoryManager().allocate(estSize);
    } catch (InsufficientMemoryException ime) {
        logger.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
                + GROUPBY_MAX_CACHE_SIZE_ATTRIB);
        throw ime;
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
    }

    // LRU cache implemented as LinkedHashMap with access order
    cache = new LinkedHashMap<ImmutableBytesWritable, Aggregator[]>(maxCacheSize, 0.75f, true) {
        boolean spill = false;
        int cacheSize = maxCacheSize;

        @Override
        protected boolean removeEldestEntry(Map.Entry<ImmutableBytesWritable, Aggregator[]> eldest) {
            if (!spill && size() > cacheSize) { // increase allocation
                cacheSize *= 1.5f;
                long estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(cacheSize,
                        estValueSize);
                try {
                    chunk.resize(estSize);
                } catch (InsufficientMemoryException im) {
                    // Cannot extend Map anymore, start spilling
                    spill = true;
                }
            }

            if (spill) {
                try {
                    if (spillManager == null) {
                        // Lazy instantiation of spillable data
                        // structures
                        //
                        // Only create spill data structs if LRU
                        // cache is too small
                        spillManager = new SpillManager(numSpillFilesConf, aggregators, env.getConfiguration(),
                                new QueryCache());
                    }
                    spillManager.spill(eldest.getKey(), eldest.getValue());
                } catch (IOException ioe) {
                    // Ensure that we always close and delete the temp files
                    try {
                        throw new RuntimeException(ioe);
                    } finally {
                        Closeables.closeQuietly(SpillableGroupByCache.this);
                    }
                }
                return true;
            }

            return false;
        }
    };
}

From source file:org.apache.phoenix.cache.GlobalCache.java

License:Apache License

private static long getMaxMemorySize(Configuration config) {
    long maxSize = Runtime.getRuntime().maxMemory()
            * config.getInt(MAX_MEMORY_PERC_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MEMORY_PERC) / 100;
    maxSize = Math.min(maxSize, config.getLong(MAX_MEMORY_SIZE_ATTRIB, Long.MAX_VALUE));
    return maxSize;
}

From source file:org.apache.phoenix.coprocessor.MetaDataRegionObserver.java

License:Apache License

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves
    // among region servers because we relies on server time of RS which is hosting
    // SYSTEM.CATALOG
    Configuration config = env.getConfiguration();
    long sleepTime = config.getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
            QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
    try {/*from   w ww .  j av  a 2 s.c  om*/
        if (sleepTime > 0) {
            Thread.sleep(sleepTime);
        }
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }
    enableRebuildIndex = config.getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
            QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
    rebuildIndexTimeInterval = config.getLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
            QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
    initialRebuildTaskDelay = config.getLong(QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY,
            QueryServicesOptions.DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY);
}

From source file:org.apache.phoenix.coprocessor.MetaDataRegionObserver.java

License:Apache License

@VisibleForTesting
public static synchronized void initRebuildIndexConnectionProps(Configuration config) {
    if (rebuildIndexConnectionProps == null) {
        Properties props = new Properties();
        long indexRebuildQueryTimeoutMs = config.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB,
                QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT);
        long indexRebuildRPCTimeoutMs = config.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB,
                QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT);
        long indexRebuildClientScannerTimeOutMs = config.getLong(
                QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
                QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT);
        int indexRebuildRpcRetriesCounter = config.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER,
                QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER);
        // Set various phoenix and hbase level timeouts and rpc retries
        props.setProperty(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, Long.toString(indexRebuildQueryTimeoutMs));
        props.setProperty(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
                Long.toString(indexRebuildClientScannerTimeOutMs));
        props.setProperty(HConstants.HBASE_RPC_TIMEOUT_KEY, Long.toString(indexRebuildRPCTimeoutMs));
        props.setProperty(HConstants.HBASE_CLIENT_RETRIES_NUMBER, Long.toString(indexRebuildRpcRetriesCounter));
        // don't run a second index populations upsert select
        props.setProperty(QueryServices.INDEX_POPULATION_SLEEP_TIME, "0");
        rebuildIndexConnectionProps = PropertiesUtil.combineProperties(props, config);
    }/*from   w  ww .jav a2s .co m*/
}

From source file:org.apache.phoenix.coprocessor.TaskRegionObserver.java

License:Apache License

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    Configuration config = env.getConfiguration();
    timeInterval = config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
            QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS);
    timeMaxInterval = config.getLong(QueryServices.TASK_HANDLING_MAX_INTERVAL_MS_ATTRIB,
            QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
    initialDelay = config.getLong(QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB,
            QueryServicesOptions.DEFAULT_TASK_HANDLING_INITIAL_DELAY_MS);
}

From source file:org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.java

License:Apache License

@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
        final Scan scan, final RegionScanner s) throws IOException, SQLException {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Region region = env.getRegion();
    long ts = scan.getTimeRange().getMax();
    boolean localIndexScan = ScanUtil.isLocalIndex(scan);
    if (ScanUtil.isAnalyzeTable(scan)) {
        byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
        byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
        // Let this throw, as this scan is being done for the sole purpose of collecting stats
        StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env,
                region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
        return collectStats(s, statsCollector, region, scan, env.getConfiguration());
    } else if (ScanUtil.isIndexRebuild(scan)) {
        return rebuildIndices(s, region, scan, env.getConfiguration());
    }/*from w  ww .j  a va  2 s .c  o m*/
    int offsetToBe = 0;
    if (localIndexScan) {
        /*
         * For local indexes, we need to set an offset on row key expressions to skip
         * the region start key.
         */
        offsetToBe = region.getRegionInfo().getStartKey().length != 0
                ? region.getRegionInfo().getStartKey().length
                : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offsetToBe);
    }
    final int offset = offsetToBe;

    PTable projectedTable = null;
    PTable writeToTable = null;
    byte[][] values = null;
    byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
    boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
    if (isDescRowKeyOrderUpgrade) {
        logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
        projectedTable = deserializeTable(descRowKeyTableBytes);
        try {
            writeToTable = PTableImpl.makePTable(projectedTable, true);
        } catch (SQLException e) {
            ServerUtil.throwIOException("Upgrade failed", e); // Impossible
        }
        values = new byte[projectedTable.getPKColumns().size()][];
    }
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null
            : IndexMaintainer.deserialize(localIndexBytes, useProto);
    MutationList indexMutations = localIndexBytes == null ? new MutationList() : new MutationList(1024);

    RegionScanner theScanner = s;

    byte[] replayMutations = scan.getAttribute(BaseScannerRegionObserver.REPLAY_WRITES);
    byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
    byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
    List<Expression> selectExpressions = null;
    byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
    boolean isUpsert = false;
    boolean isDelete = false;
    byte[] deleteCQ = null;
    byte[] deleteCF = null;
    byte[] emptyCF = null;
    HTable targetHTable = null;
    boolean isPKChanging = false;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (upsertSelectTable != null) {
        isUpsert = true;
        projectedTable = deserializeTable(upsertSelectTable);
        targetHTable = new HTable(upsertSelectConfig, projectedTable.getPhysicalName().getBytes());
        selectExpressions = deserializeExpressions(
                scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
        values = new byte[projectedTable.getPKColumns().size()][];
        isPKChanging = ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
    } else {
        byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
        isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
        if (!isDelete) {
            deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
            deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
        }
        emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
    }
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil
            .useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region,
                indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr,
                useQualifierAsIndex);
    }

    if (j != null) {
        theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env,
                useQualifierAsIndex, useNewValueColumnQualifier);
    }

    int maxBatchSize = 0;
    long maxBatchSizeBytes = 0L;
    MutationList mutations = new MutationList();
    boolean needToWrite = false;
    Configuration conf = env.getConfiguration();
    long flushSize = region.getTableDesc().getMemStoreFlushSize();

    if (flushSize <= 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
                HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
    }

    /**
     * Slow down the writes if the memstore size more than
     * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
     * bytes. This avoids flush storm to hdfs for cases like index building where reads and
     * write happen to all the table regions in the server.
     */
    final long blockingMemStoreSize = flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
            HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1);

    boolean buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan;
    if (buildLocalIndex) {
        checkForLocalIndexColumnFamilies(region, indexMaintainers);
    }
    if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null)
            || emptyCF != null || buildLocalIndex) {
        needToWrite = true;
        maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10));
        maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    }
    Aggregators aggregators = ServerAggregators
            .deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), conf);
    Aggregator[] rowAggregators = aggregators.getAggregators();
    boolean hasMore;
    boolean hasAny = false;
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations(
                "Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(),
                ScanUtil.getCustomAnnotations(scan)));
    }
    int rowCount = 0;
    final RegionScanner innerScanner = theScanner;
    boolean useIndexProto = true;
    byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    // for backward compatiblity fall back to look by the old attribute
    if (indexMaintainersPtr == null) {
        indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
        useIndexProto = false;
    }
    boolean acquiredLock = false;
    boolean incrScanRefCount = false;
    try {
        if (needToWrite) {
            synchronized (lock) {
                if (isRegionClosingOrSplitting) {
                    throw new IOException(
                            "Temporarily unable to write from scan because region is closing or splitting");
                }
                scansReferenceCount++;
                incrScanRefCount = true;
                lock.notifyAll();
            }
        }
        region.startRegionOperation();
        acquiredLock = true;
        synchronized (innerScanner) {
            do {
                List<Cell> results = useQualifierAsIndex
                        ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(),
                                minMaxQualifiers.getSecond(), encodingScheme)
                        : new ArrayList<Cell>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    rowCount++;
                    result.setKeyValues(results);
                    if (isDescRowKeyOrderUpgrade) {
                        Arrays.fill(values, null);
                        Cell firstKV = results.get(0);
                        RowKeySchema schema = projectedTable.getRowKeySchema();
                        int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset,
                                firstKV.getRowLength(), ptr);
                        for (int i = 0; i < schema.getFieldCount(); i++) {
                            Boolean hasValue = schema.next(ptr, i, maxOffset);
                            if (hasValue == null) {
                                break;
                            }
                            Field field = schema.getField(i);
                            if (field.getSortOrder() == SortOrder.DESC) {
                                // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
                                if (field.getDataType().isArrayType()) {
                                    field.getDataType().coerceBytes(ptr, null, field.getDataType(),
                                            field.getMaxLength(), field.getScale(), field.getSortOrder(),
                                            field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
                                }
                                // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
                                else if (field.getDataType() == PChar.INSTANCE
                                        || field.getDataType() == PBinary.INSTANCE) {
                                    int len = ptr.getLength();
                                    while (len > 0
                                            && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                        len--;
                                    }
                                    ptr.set(ptr.get(), ptr.getOffset(), len);
                                    // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
                                } else if (field.getDataType() == PFloat.INSTANCE
                                        || field.getDataType() == PDouble.INSTANCE) {
                                    byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(),
                                            ptr.getLength());
                                    ptr.set(invertedBytes);
                                }
                            } else if (field.getDataType() == PBinary.INSTANCE) {
                                // Remove trailing space characters so that the setValues call below will replace them
                                // with the correct zero byte character. Note this is somewhat dangerous as these
                                // could be legit, but I don't know what the alternative is.
                                int len = ptr.getLength();
                                while (len > 0
                                        && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                    len--;
                                }
                                ptr.set(ptr.get(), ptr.getOffset(), len);
                            }
                            values[i] = ptr.copyBytes();
                        }
                        writeToTable.newKey(ptr, values);
                        if (Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset,
                                firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset,
                                ptr.getLength()) == 0) {
                            continue;
                        }
                        byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (offset > 0) { // for local indexes (prepend region start key)
                            byte[] newRowWithOffset = new byte[offset + newRow.length];
                            System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0,
                                    offset);
                            ;
                            System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
                            newRow = newRowWithOffset;
                        }
                        byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(),
                                firstKV.getRowLength());
                        for (Cell cell : results) {
                            // Copy existing cell but with new row key
                            Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(),
                                    cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(),
                                    cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(),
                                    KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(),
                                    cell.getValueOffset(), cell.getValueLength());
                            switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
                            case Put:
                                // If Put, point delete old Put
                                Delete del = new Delete(oldRow);
                                del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(),
                                        cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(),
                                        cell.getFamilyLength(), cell.getQualifierArray(),
                                        cell.getQualifierOffset(), cell.getQualifierLength(),
                                        cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0,
                                        0));
                                mutations.add(del);

                                Put put = new Put(newRow);
                                put.add(newCell);
                                mutations.add(put);
                                break;
                            case Delete:
                            case DeleteColumn:
                            case DeleteFamily:
                            case DeleteFamilyVersion:
                                Delete delete = new Delete(newRow);
                                delete.addDeleteMarker(newCell);
                                mutations.add(delete);
                                break;
                            }
                        }
                    } else if (buildLocalIndex) {
                        for (IndexMaintainer maintainer : indexMaintainers) {
                            if (!results.isEmpty()) {
                                result.getKey(ptr);
                                ValueGetter valueGetter = maintainer.createGetterFromKeyValues(
                                        ImmutableBytesPtr.copyBytesIfNecessary(ptr), results);
                                Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr,
                                        results.get(0).getTimestamp(),
                                        env.getRegion().getRegionInfo().getStartKey(),
                                        env.getRegion().getRegionInfo().getEndKey());
                                indexMutations.add(put);
                            }
                        }
                        result.setKeyValues(results);
                    } else if (isDelete) {
                        // FIXME: the version of the Delete constructor without the lock
                        // args was introduced in 0.94.4, thus if we try to use it here
                        // we can no longer use the 0.94.2 version of the client.
                        Cell firstKV = results.get(0);
                        Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(),
                                firstKV.getRowLength(), ts);
                        if (replayMutations != null) {
                            delete.setAttribute(REPLAY_WRITES, replayMutations);
                        }
                        mutations.add(delete);
                        // force tephra to ignore this deletes
                        delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                    } else if (isUpsert) {
                        Arrays.fill(values, null);
                        int bucketNumOffset = 0;
                        if (projectedTable.getBucketNum() != null) {
                            values[0] = new byte[] { 0 };
                            bucketNumOffset = 1;
                        }
                        int i = bucketNumOffset;
                        List<PColumn> projectedColumns = projectedTable.getColumns();
                        for (; i < projectedTable.getPKColumns().size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                values[i] = ptr.copyBytes();
                                // If SortOrder from expression in SELECT doesn't match the
                                // column being projected into then invert the bits.
                                if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                    SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                }
                            } else {
                                values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
                            }
                        }
                        projectedTable.newKey(ptr, values);
                        PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
                        for (; i < projectedColumns.size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                PColumn column = projectedColumns.get(i);
                                if (!column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(),
                                        expression.getSortOrder(), expression.getMaxLength(),
                                        expression.getScale(), column.getMaxLength(), column.getScale())) {
                                    throw new DataExceedsCapacityException(column.getDataType(),
                                            column.getMaxLength(), column.getScale(),
                                            column.getName().getString(), ptr);
                                }
                                column.getDataType().coerceBytes(ptr, null, expression.getDataType(),
                                        expression.getMaxLength(), expression.getScale(),
                                        expression.getSortOrder(), column.getMaxLength(), column.getScale(),
                                        column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
                                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                row.setValue(column, bytes);
                            }
                        }
                        for (Mutation mutation : row.toRowMutations()) {
                            if (replayMutations != null) {
                                mutation.setAttribute(REPLAY_WRITES, replayMutations);
                            }
                            mutations.add(mutation);
                        }
                        for (i = 0; i < selectExpressions.size(); i++) {
                            selectExpressions.get(i).reset();
                        }
                    } else if (deleteCF != null && deleteCQ != null) {
                        // No need to search for delete column, since we project only it
                        // if no empty key value is being set
                        if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                            Delete delete = new Delete(results.get(0).getRowArray(),
                                    results.get(0).getRowOffset(), results.get(0).getRowLength());
                            delete.deleteColumns(deleteCF, deleteCQ, ts);
                            // force tephra to ignore this deletes
                            delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY,
                                    new byte[0]);
                            mutations.add(delete);
                        }
                    }
                    if (emptyCF != null) {
                        /*
                         * If we've specified an emptyCF, then we need to insert an empty
                         * key value "retroactively" for any key value that is visible at
                         * the timestamp that the DDL was issued. Key values that are not
                         * visible at this timestamp will not ever be projected up to
                         * scans past this timestamp, so don't need to be considered.
                         * We insert one empty key value per row per timestamp.
                         */
                        Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                        for (Cell kv : results) {
                            long kvts = kv.getTimestamp();
                            if (!timeStamps.contains(kvts)) {
                                Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
                                put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
                                        ByteUtil.EMPTY_BYTE_ARRAY);
                                mutations.add(put);
                            }
                        }
                    }
                    if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize,
                            maxBatchSizeBytes)) {
                        commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState,
                                targetHTable, useIndexProto, isPKChanging);
                        mutations.clear();
                    }
                    // Commit in batches based on UPSERT_BATCH_SIZE_BYTES_ATTRIB in config

                    if (ServerUtil.readyToCommit(indexMutations.size(), indexMutations.byteSize(), maxBatchSize,
                            maxBatchSizeBytes)) {
                        setIndexAndTransactionProperties(indexMutations, indexUUID, indexMaintainersPtr,
                                txState, useIndexProto);
                        commitBatch(region, indexMutations, blockingMemStoreSize);
                        indexMutations.clear();
                    }
                    aggregators.aggregate(rowAggregators, result);
                    hasAny = true;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState,
                        targetHTable, useIndexProto, isPKChanging);
                mutations.clear();
            }

            if (!indexMutations.isEmpty()) {
                commitBatch(region, indexMutations, blockingMemStoreSize);
                indexMutations.clear();
            }
        }
    } finally {
        if (needToWrite && incrScanRefCount) {
            synchronized (lock) {
                scansReferenceCount--;
                if (scansReferenceCount < 0) {
                    logger.warn(
                            "Scan reference count went below zero. Something isn't correct. Resetting it back to zero");
                    scansReferenceCount = 0;
                }
                lock.notifyAll();
            }
        }
        try {
            if (targetHTable != null) {
                targetHTable.close();
            }
        } finally {
            try {
                innerScanner.close();
            } finally {
                if (acquiredLock)
                    region.closeRegionOperation();
            }
        }
    }
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations(
                "Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan,
                ScanUtil.getCustomAnnotations(scan)));
    }

    final boolean hadAny = hasAny;
    KeyValue keyValue = null;
    if (hadAny) {
        byte[] value = aggregators.toBytes(rowAggregators);
        keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
                AGG_TIMESTAMP, value, 0, value.length);
    }
    final KeyValue aggKeyValue = keyValue;

    RegionScanner scanner = new BaseRegionScanner(innerScanner) {
        private boolean done = !hadAny;

        @Override
        public boolean isFilterDone() {
            return done;
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            if (done)
                return false;
            done = true;
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;

}

From source file:org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.java

License:Apache License

private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan,
        Configuration config) throws IOException {
    byte[] indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    boolean useProto = true;
    // for backward compatibility fall back to look up by the old attribute
    if (indexMetaData == null) {
        useProto = false;//  w ww  . j av  a  2 s.c o  m
        indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
    }
    boolean hasMore;
    int rowCount = 0;
    try {
        int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        long maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
        MutationList mutations = new MutationList(maxBatchSize);
        region.startRegionOperation();
        byte[] uuidValue = ServerCacheClient.generateId();
        synchronized (innerScanner) {
            do {
                List<Cell> results = new ArrayList<Cell>();
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    Put put = null;
                    Delete del = null;
                    for (Cell cell : results) {

                        if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
                            if (put == null) {
                                put = new Put(CellUtil.cloneRow(cell));
                                put.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD
                                        : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                put.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
                                mutations.add(put);
                                // Since we're replaying existing mutations, it makes no sense to write them to the wal
                                put.setDurability(Durability.SKIP_WAL);
                            }
                            put.add(cell);
                        } else {
                            if (del == null) {
                                del = new Delete(CellUtil.cloneRow(cell));
                                del.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD
                                        : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                del.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
                                mutations.add(del);
                                // Since we're replaying existing mutations, it makes no sense to write them to the wal
                                del.setDurability(Durability.SKIP_WAL);
                            }
                            del.addDeleteMarker(cell);
                        }
                    }
                    if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize,
                            maxBatchSizeBytes)) {
                        region.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
                                HConstants.NO_NONCE, HConstants.NO_NONCE);
                        uuidValue = ServerCacheClient.generateId();
                        mutations.clear();
                    }
                    rowCount++;
                }

            } while (hasMore);
            if (!mutations.isEmpty()) {
                region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE,
                        HConstants.NO_NONCE);
            }
        }
    } catch (IOException e) {
        logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
        throw e;
    } finally {
        region.closeRegionOperation();
    }
    byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
    final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
            SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);

    RegionScanner scanner = new BaseRegionScanner(innerScanner) {
        @Override
        public HRegionInfo getRegionInfo() {
            return region.getRegionInfo();
        }

        @Override
        public boolean isFilterDone() {
            return true;
        }

        @Override
        public void close() throws IOException {
            innerScanner.close();
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}

From source file:org.apache.phoenix.hbase.index.Indexer.java

License:Apache License

/**
 * Extracts the slow call threshold values from the configuration.
 *//*from  w  w w  .j  a v a 2s  . co m*/
private void setSlowThresholds(Configuration c) {
    slowIndexPrepareThreshold = c.getLong(INDEXER_INDEX_WRITE_SLOW_THRESHOLD_KEY,
            INDEXER_INDEX_WRITE_SLOW_THRESHOLD_DEFAULT);
    slowIndexWriteThreshold = c.getLong(INDEXER_INDEX_PREPARE_SLOW_THRESHOLD_KEY,
            INDEXER_INDEX_PREPARE_SLOW_THREHSOLD_DEFAULT);
    slowPreWALRestoreThreshold = c.getLong(INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_KEY,
            INDEXER_PRE_WAL_RESTORE_SLOW_THRESHOLD_DEFAULT);
    slowPostOpenThreshold = c.getLong(INDEXER_POST_OPEN_SLOW_THRESHOLD_KEY,
            INDEXER_POST_OPEN_SLOW_THRESHOLD_DEFAULT);
    slowPreIncrementThreshold = c.getLong(INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_KEY,
            INDEXER_PRE_INCREMENT_SLOW_THRESHOLD_DEFAULT);
}