Example usage for java.util NavigableMap entrySet

List of usage examples for java.util NavigableMap entrySet

Introduction

In this page you can find the example usage for java.util NavigableMap entrySet.

Prototype

Set<Map.Entry<K, V>> entrySet();

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:com.alibaba.wasp.master.handler.ServerShutdownHandler.java

@Override
public void process() throws IOException {
    final ServerName serverName = this.serverName;
    try {//from w  w w  . jav a2  s.  co m
        NavigableMap<EntityGroupInfo, Result> egis = null;
        while (!this.server.isStopped()) {
            try {
                egis = FMetaReader.getServerUserEntityGroups(server.getConfiguration(), this.serverName);
                break;
            } catch (IOException ioe) {
                LOG.info("Received exception accessing META during server shutdown of " + serverName
                        + ", retrying META read", ioe);
            }
        }
        if (this.server.isStopped()) {
            throw new IOException("Server is stopped");
        }

        // Clean out anything in entityGroups in transition. Being conservative
        // and
        // doing after log splitting. Could do some states before -- OPENING?
        // OFFLINE? -- and then others after like CLOSING that depend on log
        // splitting.
        List<EntityGroupState> entityGroupsInTransition = assignmentManager.processServerShutdown(serverName);
        LOG.info("Reassigning " + ((egis == null) ? 0 : egis.size()) + " entityGroup(s) that "
                + (serverName == null ? "null" : serverName) + " was carrying (skipping "
                + entityGroupsInTransition.size() + " entityGroup(s) that are already in transition)");

        // Iterate entityGroups that were on this server and assign them
        if (egis != null) {
            EntityGroupStates entityGroupStates = assignmentManager.getEntityGroupStates();
            List<EntityGroupInfo> toAssignEntityGroups = new ArrayList<EntityGroupInfo>();
            for (Map.Entry<EntityGroupInfo, Result> e : egis.entrySet()) {
                EntityGroupInfo egi = e.getKey();
                EntityGroupState egit = entityGroupStates.getEntityGroupTransitionState(egi);
                if (processDeadEntityGroup(egi, e.getValue(), assignmentManager, server)) {
                    ServerName addressFromAM = entityGroupStates.getFServerOfEntityGroup(egi);
                    if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
                        // If this entityGroup is in transition on the dead server, it
                        // must be
                        // opening or pending_open, which is covered by
                        // AM#processServerShutdown
                        LOG.debug("Skip assigning entityGroup " + egi.getEntityGroupNameAsString()
                                + " because it has been opened in " + addressFromAM.getServerName());
                        continue;
                    }
                    if (egit != null) {
                        if (!egit.isOnServer(serverName) || egit.isClosed() || egit.isOpened()
                                || egit.isSplit()) {
                            // Skip entityGroups that are in transition on other server,
                            // or in state closed/opened/split
                            LOG.info("Skip assigning entityGroup " + egit);
                            continue;
                        }
                        try {
                            // clean zk node
                            LOG.info("Reassigning entityGroup with eg = " + egit
                                    + " and deleting zk node if exists");
                            ZKAssign.deleteNodeFailSilent(server.getZooKeeper(), egi);
                        } catch (KeeperException ke) {
                            this.server.abort("Unexpected ZK exception deleting unassigned node " + egi, ke);
                            return;
                        }
                    }
                    toAssignEntityGroups.add(egi);
                } else if (egit != null) {
                    if (egit.isSplitting() || egit.isSplit()) {
                        // This will happen when the FServer went down and the call back
                        // for
                        // the SPLIITING or SPLIT
                        // has not yet happened for node Deleted event. In that case if
                        // the EntityGroup was actually
                        // split
                        // but the FServer had gone down before completing the split
                        // process
                        // then will not try to
                        // assign the parent EntityGroup again. In that case we should
                        // make the
                        // EntityGroup offline and
                        // also delete the EntityGroup from EGIT.
                        assignmentManager.entityGroupOffline(egi);
                    } else if ((egit.isClosing() || egit.isPendingClose()) && assignmentManager.getZKTable()
                            .isDisablingOrDisabledTable(egi.getTableNameAsString())) {
                        // If the table was partially disabled and the FServer went down,
                        // we
                        // should clear the EGIT
                        // and remove the node for the EntityGroup.
                        // The egit that we use may be stale in case the table was in
                        // DISABLING state
                        // but though we did assign we will not be clearing the znode in
                        // CLOSING state.
                        // Doing this will have no harm. See HBASE-5927
                        assignmentManager.deleteClosingOrClosedNode(egi);
                        assignmentManager.entityGroupOffline(egi);
                    } else {
                        LOG.warn("THIS SHOULD NOT HAPPEN: unexpected entityGroup in transition " + egit
                                + " not to be assigned by SSH of server " + serverName);
                    }
                }
            }
            try {
                assignmentManager.assign(toAssignEntityGroups);
            } catch (InterruptedException ie) {
                LOG.error("Caught " + ie + " during round-robin assignment");
                throw new IOException(ie);
            }
        }
    } finally {
        this.deadServers.finish(serverName);
    }
    LOG.info("Finished processing of shutdown of " + serverName);
}

From source file:com.palantir.atlasdb.transaction.impl.SerializableTransaction.java

private void verifyRanges(Transaction ro) {
    // verify each set of reads to ensure they are the same.
    for (String table : rangeEndByTable.keySet()) {
        for (Entry<RangeRequest, byte[]> e : rangeEndByTable.get(table).entrySet()) {
            RangeRequest range = e.getKey();
            byte[] rangeEnd = e.getValue();
            if (rangeEnd.length != 0 && !RangeRequests.isTerminalRow(range.isReverse(), rangeEnd)) {
                range = range.getBuilder()
                        .endRowExclusive(RangeRequests.getNextStartRow(range.isReverse(), rangeEnd)).build();
            }/*from   w  w w  .  j  av a2s . c o  m*/

            final ConcurrentNavigableMap<Cell, byte[]> writes = writesByTable.get(table);
            BatchingVisitableView<RowResult<byte[]>> bv = BatchingVisitableView.of(ro.getRange(table, range));
            NavigableMap<Cell, ByteBuffer> readsInRange = Maps.transformValues(getReadsInRange(table, e, range),
                    new Function<byte[], ByteBuffer>() {
                        @Override
                        public ByteBuffer apply(byte[] input) {
                            return ByteBuffer.wrap(input);
                        }
                    });
            boolean isEqual = bv
                    .transformBatch(new Function<List<RowResult<byte[]>>, List<Entry<Cell, ByteBuffer>>>() {
                        @Override
                        public List<Entry<Cell, ByteBuffer>> apply(List<RowResult<byte[]>> input) {
                            List<Entry<Cell, ByteBuffer>> ret = Lists.newArrayList();
                            for (RowResult<byte[]> row : input) {
                                for (Entry<Cell, byte[]> cell : row.getCells()) {

                                    // NB: We filter our write set out here because our normal SI checking handles this case to ensure the value hasn't changed.
                                    if (writes == null || !writes.containsKey(cell.getKey())) {
                                        ret.add(Maps.immutableEntry(cell.getKey(),
                                                ByteBuffer.wrap(cell.getValue())));
                                    }
                                }
                            }
                            return ret;
                        }
                    }).isEqual(readsInRange.entrySet());
            if (!isEqual) {
                throw TransactionSerializableConflictException.create(table, getTimestamp(),
                        System.currentTimeMillis() - timeCreated);
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.security.access.TestAccessController.java

@Test
public void testGlobalAuthorizationForNewRegisteredRS() throws Exception {
    LOG.debug("Test for global authorization for a new registered RegionServer.");
    MiniHBaseCluster hbaseCluster = TEST_UTIL.getHBaseCluster();

    // Since each RegionServer running on different user, add global
    // permissions for the new user.
    String currentUser = User.getCurrent().getShortName();
    String activeUserForNewRs = currentUser + ".hfs." + hbaseCluster.getLiveRegionServerThreads().size();
    grantGlobal(TEST_UTIL, activeUserForNewRs, Permission.Action.ADMIN, Permission.Action.CREATE,
            Permission.Action.READ, Permission.Action.WRITE);

    final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    admin.createTable(htd);/*from w ww .  j  a  v a 2s  .c  o m*/

    // Starting a new RegionServer.
    JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster.startRegionServer();
    final HRegionServer newRs = newRsThread.getRegionServer();

    // Move region to the new RegionServer.
    final HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE2);
    try {
        NavigableMap<HRegionInfo, ServerName> regions = table.getRegionLocations();
        final Map.Entry<HRegionInfo, ServerName> firstRegion = regions.entrySet().iterator().next();

        AccessTestAction moveAction = new AccessTestAction() {
            @Override
            public Object run() throws Exception {
                admin.move(firstRegion.getKey().getEncodedNameAsBytes(),
                        Bytes.toBytes(newRs.getServerName().getServerName()));
                return null;
            }
        };
        SUPERUSER.runAs(moveAction);

        final int RETRIES_LIMIT = 10;
        int retries = 0;
        while (newRs.getOnlineRegions(TEST_TABLE2).size() < 1 && retries < RETRIES_LIMIT) {
            LOG.debug("Waiting for region to be opened. Already retried " + retries + " times.");
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
            }
            retries++;
            if (retries == RETRIES_LIMIT - 1) {
                fail("Retry exhaust for waiting region to be opened.");
            }
        }
        // Verify write permission for user "admin2" who has the global
        // permissions.
        AccessTestAction putAction = new AccessTestAction() {
            @Override
            public Object run() throws Exception {
                Put put = new Put(Bytes.toBytes("test"));
                put.add(TEST_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
                table.put(put);
                return null;
            }
        };
        USER_ADMIN.runAs(putAction);
    } finally {
        table.close();
    }
}

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskSchedulerService.java

private void preemptTasks(int forPriority, int numTasksToPreempt, String[] potentialHosts) {
    Set<String> preemptHosts = null;
    writeLock.lock();//ww w  . j  a v a 2  s . co m
    List<TaskInfo> preemptedTaskList = null;
    try {
        NavigableMap<Integer, TreeSet<TaskInfo>> orderedMap = runningTasks.descendingMap();
        Iterator<Entry<Integer, TreeSet<TaskInfo>>> iterator = orderedMap.entrySet().iterator();
        int preemptedCount = 0;
        while (iterator.hasNext() && preemptedCount < numTasksToPreempt) {
            Entry<Integer, TreeSet<TaskInfo>> entryAtPriority = iterator.next();
            if (entryAtPriority.getKey() > forPriority) {
                if (potentialHosts != null && preemptHosts == null) {
                    preemptHosts = Sets.newHashSet(potentialHosts);
                }
                Iterator<TaskInfo> taskInfoIterator = entryAtPriority.getValue().iterator();
                while (taskInfoIterator.hasNext() && preemptedCount < numTasksToPreempt) {
                    TaskInfo taskInfo = taskInfoIterator.next();
                    if (preemptHosts == null || preemptHosts.contains(taskInfo.assignedNode.getHost())) {
                        // Candidate for preemption.
                        preemptedCount++;
                        LOG.info("preempting {} for task at priority {} with potentialHosts={}", taskInfo,
                                forPriority, potentialHosts == null ? "" : Arrays.toString(potentialHosts));
                        taskInfo.setPreemptedInfo(clock.getTime());
                        if (preemptedTaskList == null) {
                            preemptedTaskList = new LinkedList<>();
                        }
                        dagStats.registerTaskPreempted(taskInfo.assignedNode.getHost());
                        preemptedTaskList.add(taskInfo);
                        registerPendingPreemption(taskInfo.assignedNode.getHost());
                        // Remove from the runningTaskList
                        taskInfoIterator.remove();
                    }
                }

                // Remove entire priority level if it's been emptied.
                if (entryAtPriority.getValue().isEmpty()) {
                    iterator.remove();
                }
            } else {
                // No tasks qualify as preemptable
                LOG.debug("No tasks qualify as killable to schedule tasks at priority {}. Current priority={}",
                        forPriority, entryAtPriority.getKey());
                break;
            }
        }
    } finally {
        writeLock.unlock();
    }
    // Send out the preempted request outside of the lock.
    if (preemptedTaskList != null) {
        for (TaskInfo taskInfo : preemptedTaskList) {
            LOG.info("Preempting task {}", taskInfo);
            getContext().preemptContainer(taskInfo.containerId);
            // Preemption will finally be registered as a deallocateTask as a result of preemptContainer
            // That resets preemption info and allows additional tasks to be pre-empted if required.
        }
    }
    // The schedule loop will be triggered again when the deallocateTask request comes in for the
    // preempted task.
}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    initParameters();/*from w w  w.  j a  v  a 2s . c  om*/
    /**
     * <pre>
     * We atleast need two priority queues 
     * a) It would contain HotSpot regions with their load as the moving criteria (max priority queue)
     * b) Non hot spot region with their loads (min priority queue)
     * 
     * Further we need to iterate over these queues and decrease the load so we 
     * need a data structure to build these queues 
     * and lastly we need to return the Region plan.
     * </pre>
     */

    LOG.debug("#################Came in the new Balancer Code and the cluster status is = " + this.status);
    long startTime = System.currentTimeMillis();
    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.info("numServers=0 so skipping load balancing");
        return null;

    }

    NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> regionServerAndServerLoadMap = new TreeMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>>();
    PriorityQueue<HotSpotServerAndLoad> hotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.DESC_LOAD);
    PriorityQueue<HotSpotServerAndLoad> nonHotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.ASC_LOAD);
    HashBiMap<HRegionInfo, HotSpotRegionLoad> allRegionsLoadBiMap = HashBiMap.create();
    LOG.debug("#################clusterState=" + clusterState);
    double normalisedTotalLoadOfAllRegions = initRegionLoadMapsBasedOnInput(clusterState,
            regionServerAndServerLoadMap, allRegionsLoadBiMap);
    LOG.debug("#################normalisedTotalLoadOfAllRegions=" + normalisedTotalLoadOfAllRegions);
    // Check if we even need to do any load balancing
    double average = normalisedTotalLoadOfAllRegions / numServers; // for
    // logging
    // HBASE-3681 check sloppiness first
    LOG.debug("######################## final regionServerAndServerLoadMap == " + regionServerAndServerLoadMap);
    if (!loadBalancingNeeded(numServers, regionServerAndServerLoadMap, normalisedTotalLoadOfAllRegions,
            average)) {
        // we do not need load balancing
        return null;
    }
    double minLoad = normalisedTotalLoadOfAllRegions / numServers;
    double maxLoad = normalisedTotalLoadOfAllRegions % numServers == 0 ? minLoad : minLoad + 1;
    // as we now have to balance stuff, init PQ's
    LOG.debug(String.format("#################minLoad =%s,maxLoad= %s", minLoad, maxLoad));
    for (Map.Entry<HotSpotServerAndLoad, List<HotSpotRegionLoad>> item : regionServerAndServerLoadMap
            .entrySet()) {
        HotSpotServerAndLoad serverLoad = item.getKey();
        if (serverLoad.isHotSpot()) {

            hotspotRegionServers.add(serverLoad);
        } else {
            if (serverLoad.getLoad() < maxLoad) {
                nonHotspotRegionServers.add(serverLoad);
            }
        }
    }
    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(normalisedTotalLoadOfAllRegions)
            .append(", numServers=").append(numServers).append(", max=").append(maxLoad).append(", min=")
            .append(minLoad);
    LOG.debug(strBalanceParam.toString());
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    while (hotspotRegionServers.size() > 0 && nonHotspotRegionServers.size() > 0) {
        HotSpotServerAndLoad serverToBalance = hotspotRegionServers.poll();
        LOG.debug(String.format("#################serverToBalance =%s",
                serverToBalance.getServerName().getServerName()));
        // get least loaded not hotspot regions of this server
        List<HotSpotRegionLoad> regionList = regionServerAndServerLoadMap.get(serverToBalance);
        // assume it to be sorted asc.
        if (regionList.size() > 0) {
            HotSpotRegionLoad regionToMove = regionList.remove(0);
            HRegionInfo regionMoveInfo = allRegionsLoadBiMap.inverse().get(regionToMove);

            /*
             * regionMoveInfo can be null in case the load map returns us
             * the root and meta regions along with the movable regions But
             * as the clusterState which is passed to us does not contain
             * these regions we can have a situation where
             * regionServerAndServerLoadMap contains some regions which are
             * not present in the allRegionsLoadBiMap
             */
            if (regionMoveInfo != null && !regionMoveInfo.isMetaRegion() && !regionMoveInfo.isRootRegion()
                    && !regionMoveInfo.isMetaTable() && regionToMove.isRegionHotspot()) {
                LOG.debug(String.format(
                        "#################Came to move the region regionMoveInfo=%s;; regionToMove=%s ",
                        regionMoveInfo, regionToMove));
                // move out.
                HotSpotServerAndLoad destinationServer = nonHotspotRegionServers.poll();

                RegionPlan rpl = new RegionPlan(allRegionsLoadBiMap.inverse().get(regionToMove),
                        serverToBalance.getServerName(), destinationServer.getServerName());
                regionsToReturn.add(rpl);
                serverToBalance.modifyLoad(regionToMove.getLoad());
                destinationServer.modifyLoad(-1 * regionToMove.getLoad());
                // reenter them to list. if they satisfy conditions
                if (serverToBalance.getLoad() > minLoad) {
                    hotspotRegionServers.offer(serverToBalance);
                }
                if (destinationServer.getLoad() < maxLoad) {
                    nonHotspotRegionServers.offer(destinationServer);
                }
            }
        }
    }
    LOG.info("Total Time taken to balance = " + (System.currentTimeMillis() - startTime));
    LOG.info(String.format("#################regionsToReturn=%s ", regionsToReturn));
    return regionsToReturn;

}

From source file:com.eucalyptus.tests.awssdk.S3ListMpuTests.java

@Test
public void keyMarkerUploadIdMarker() throws Exception {
    testInfo(this.getClass().getSimpleName() + " - keyMarkerUploadIdMarker");

    try {//from   w ww .ja v  a2s  .co m
        int numKeys = 3 + random.nextInt(3); // 3-5 keys
        int numUploads = 3 + random.nextInt(3); // 3-5 uploads

        print("Number of keys: " + numKeys);
        print("Number of uploads per key: " + numUploads);

        // Generate some mpus
        TreeMap<String, List<String>> keyUploadIdMap = initiateMpusForMultipleKeys(s3ClientA, accountA, numKeys,
                numUploads, new String());

        // Starting with every key and upload ID in the ascending order, list the mpus using the pair and verify that the results.
        for (Map.Entry<String, List<String>> mapEntry : keyUploadIdMap.entrySet()) {

            // Compute what the sorted mpus should look like
            NavigableMap<String, List<String>> tailMap = keyUploadIdMap.tailMap(mapEntry.getKey(), false);

            for (int i = 0; i < numUploads; i++) {
                // Compute what the sorted uploadIds should look like this key
                List<String> tailList = mapEntry.getValue().subList(i + 1, numUploads);

                // List mpus using the key marker and upload ID marker and verify
                MultipartUploadListing listing = listMpu(s3ClientA, accountA, bucketName, mapEntry.getKey(),
                        mapEntry.getValue().get(i), null, null, null, false);
                assertTrue(
                        "Expected " + ((tailMap.size() * numUploads) + (numUploads - i - 1))
                                + " mpu listings, but got " + listing.getMultipartUploads().size(),
                        ((tailMap.size() * numUploads) + (numUploads - i - 1)) == listing.getMultipartUploads()
                                .size());

                Iterator<MultipartUpload> mpuIterator = listing.getMultipartUploads().iterator();

                for (String uploadId : tailList) {
                    MultipartUpload mpu = mpuIterator.next();
                    assertTrue("Expected key to be " + mapEntry.getKey() + ", but got " + mpu.getKey(),
                            mpu.getKey().equals(mapEntry.getKey()));
                    assertTrue("Expected upload ID to be " + uploadId + ", but got " + mpu.getUploadId(),
                            mpu.getUploadId().equals(uploadId));
                    verifyCommonElements(mpu);
                }

                for (Entry<String, List<String>> tailMapEntry : tailMap.entrySet()) {
                    for (String uploadId : tailMapEntry.getValue()) {
                        MultipartUpload mpu = mpuIterator.next();
                        assertTrue("Expected key to be " + tailMapEntry.getKey() + ", but got " + mpu.getKey(),
                                mpu.getKey().equals(tailMapEntry.getKey()));
                        assertTrue("Expected upload ID to be " + uploadId + ", but got " + mpu.getUploadId(),
                                mpu.getUploadId().equals(uploadId));
                        verifyCommonElements(mpu);
                    }
                }

                assertTrue("Expected mpu iterator to be empty", !mpuIterator.hasNext());
            }
        }
    } catch (AmazonServiceException ase) {
        printException(ase);
        assertThat(false, "Failed to run keyMarkerUploadIdMarker");
    }
}

From source file:org.voltdb.compiler.VoltCompiler.java

/**
 * Common code for schema loading shared by loadSchema and compileDatabaseNode
 *
 * @param db the database entry in the catalog
 * @param hsql an interface to the hsql frontend, initialized and potentially reused by the caller.
 * @param voltDdlTracker non-standard VoltDB schema annotations, initially those from a project file
 * @param schemas the ddl input files//from w  w w  .  j  a v a  2 s .c  om
 * @param export optional export connector configuration (from the project file)
 * @param classDependencies optional additional jar files required by procedures
 * @param whichProcs indicates which ddl-defined procedures to load: none, single-statement, or all
 * @param jarOutput The in-memory jar to populate or null if the caller doesn't provide one.
 */
private void compileDatabase(Database db, HSQLInterface hsql, VoltDDLElementTracker voltDdlTracker,
        VoltCompilerReader cannonicalDDLIfAny, Database previousDBIfAny, List<VoltCompilerReader> schemaReaders,
        ExportType export, Collection<Class<?>> classDependencies, DdlProceduresToLoad whichProcs,
        InMemoryJarfile jarOutput) throws VoltCompilerException {
    // Actually parse and handle all the DDL
    // DDLCompiler also provides partition descriptors for DDL PARTITION
    // and REPLICATE statements.
    final DDLCompiler ddlcompiler = new DDLCompiler(this, hsql, voltDdlTracker, m_classLoader);

    if (cannonicalDDLIfAny != null) {
        // add the file object's path to the list of files for the jar
        m_ddlFilePaths.put(cannonicalDDLIfAny.getName(), cannonicalDDLIfAny.getPath());

        ddlcompiler.loadSchema(cannonicalDDLIfAny, db, whichProcs);
    }

    m_dirtyTables.clear();

    for (final VoltCompilerReader schemaReader : schemaReaders) {
        String origFilename = m_currentFilename;
        try {
            if (m_currentFilename == null || m_currentFilename.equals(NO_FILENAME))
                m_currentFilename = schemaReader.getName();

            // add the file object's path to the list of files for the jar
            m_ddlFilePaths.put(schemaReader.getName(), schemaReader.getPath());

            ddlcompiler.loadSchema(schemaReader, db, whichProcs);
        } finally {
            m_currentFilename = origFilename;
        }
    }

    ddlcompiler.compileToCatalog(db);

    // Actually parse and handle all the partitions
    // this needs to happen before procedures are compiled
    String msg = "In database, ";
    final CatalogMap<Table> tables = db.getTables();
    for (Table table : tables) {
        String tableName = table.getTypeName();

        if (voltDdlTracker.m_partitionMap.containsKey(tableName.toLowerCase())) {
            String colName = voltDdlTracker.m_partitionMap.get(tableName.toLowerCase());
            // A null column name indicates a replicated table. Ignore it here
            // because it defaults to replicated in the catalog.
            if (colName != null) {
                assert (tables.getIgnoreCase(tableName) != null);
                if (table.getMaterializer() != null) {
                    msg += "the materialized view is automatically partitioned based on its source table. "
                            + "Invalid PARTITION statement on view table " + tableName + ".";
                    throw new VoltCompilerException(msg);
                }

                final Column partitionCol = table.getColumns().getIgnoreCase(colName);
                // make sure the column exists
                if (partitionCol == null) {
                    msg += "PARTITION has unknown COLUMN '" + colName + "'";
                    throw new VoltCompilerException(msg);
                }
                // make sure the column is marked not-nullable
                if (partitionCol.getNullable() == true) {
                    msg += "Partition column '" + tableName + "." + colName + "' is nullable. "
                            + "Partition columns must be constrained \"NOT NULL\".";
                    throw new VoltCompilerException(msg);
                }
                // verify that the partition column is a supported type
                VoltType pcolType = VoltType.get((byte) partitionCol.getType());
                switch (pcolType) {
                case TINYINT:
                case SMALLINT:
                case INTEGER:
                case BIGINT:
                case STRING:
                case VARBINARY:
                    break;
                default:
                    msg += "Partition column '" + tableName + "." + colName + "' is not a valid type. "
                            + "Partition columns must be an integer or varchar type.";
                    throw new VoltCompilerException(msg);
                }

                table.setPartitioncolumn(partitionCol);
                table.setIsreplicated(false);

                // Check valid indexes, whether they contain the partition column or not.
                for (Index index : table.getIndexes()) {
                    checkValidPartitionTableIndex(index, partitionCol, tableName);
                }
                // Set the partitioning of destination tables of associated views.
                // If a view's source table is replicated, then a full scan of the
                // associated view is single-sited. If the source is partitioned,
                // a full scan of the view must be distributed, unless it is filtered
                // by the original table's partitioning key, which, to be filtered,
                // must also be a GROUP BY key.
                final CatalogMap<MaterializedViewInfo> views = table.getViews();
                for (final MaterializedViewInfo mvi : views) {
                    mvi.getDest().setIsreplicated(false);
                    setGroupedTablePartitionColumn(mvi, partitionCol);
                }
            }
        }
    }

    // add database estimates info
    addDatabaseEstimatesInfo(m_estimates, db);

    // Process DDL exported tables
    NavigableMap<String, NavigableSet<String>> exportTables = voltDdlTracker.getExportedTables();
    for (Entry<String, NavigableSet<String>> e : exportTables.entrySet()) {
        String targetName = e.getKey();
        for (String tableName : e.getValue()) {
            addExportTableToConnector(targetName, tableName, db);
        }
    }

    // Process and add exports and connectors to the catalog
    // Must do this before compiling procedures to deny updates
    // on append-only tables.
    if (export != null) {
        // currently, only a single connector is allowed
        compileExport(export, db);
    }

    // process DRed tables
    for (Entry<String, String> drNode : voltDdlTracker.getDRedTables().entrySet()) {
        compileDRTable(drNode, db);
    }

    if (whichProcs != DdlProceduresToLoad.NO_DDL_PROCEDURES) {
        Collection<ProcedureDescriptor> allProcs = voltDdlTracker.getProcedureDescriptors();
        CatalogMap<Procedure> previousProcsIfAny = null;
        if (previousDBIfAny != null) {
            previousProcsIfAny = previousDBIfAny.getProcedures();
        }
        compileProcedures(db, hsql, allProcs, classDependencies, whichProcs, previousProcsIfAny, jarOutput);
    }

    // add extra classes from the DDL
    m_addedClasses = voltDdlTracker.m_extraClassses.toArray(new String[0]);
    // Also, grab the IMPORT CLASS lines so we can add them to the
    // generated DDL
    m_importLines = voltDdlTracker.m_importLines.toArray(new String[0]);
    addExtraClasses(jarOutput);

    compileRowLimitDeleteStmts(db, hsql, ddlcompiler.getLimitDeleteStmtToXmlEntries());
}

From source file:org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite.java

private String printOnePartition(Result result) throws IOException, TException {
    byte[] key = result.getRow();
    HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(key,
            result.getValue(CATALOG_CF, CATALOG_COL), this);
    StringBuilder builder = new StringBuilder();
    builder.append(dumpThriftObject(sdParts.containingPartition)).append(" sdHash: ")
            .append(Base64.encodeBase64URLSafeString(sdParts.sdHash)).append(" stats:");
    NavigableMap<byte[], byte[]> statsCols = result.getFamilyMap(STATS_CF);
    for (Map.Entry<byte[], byte[]> statsCol : statsCols.entrySet()) {
        builder.append(" column ").append(new String(statsCol.getKey(), HBaseUtils.ENCODING)).append(": ");
        ColumnStatistics pcs = buildColStats(key, false);
        ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue());
        builder.append(dumpThriftObject(cso));
    }//from  w ww  .  j a v  a 2s .  c  o m
    return builder.toString();
}

From source file:org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite.java

private String printOneTable(Result result) throws IOException, TException {
    byte[] key = result.getRow();
    HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializeTable(key,
            result.getValue(CATALOG_CF, CATALOG_COL));
    StringBuilder builder = new StringBuilder();
    builder.append(dumpThriftObject(sdParts.containingTable)).append(" sdHash: ")
            .append(Base64.encodeBase64URLSafeString(sdParts.sdHash)).append(" stats:");
    NavigableMap<byte[], byte[]> statsCols = result.getFamilyMap(STATS_CF);
    for (Map.Entry<byte[], byte[]> statsCol : statsCols.entrySet()) {
        builder.append(" column ").append(new String(statsCol.getKey(), HBaseUtils.ENCODING)).append(": ");
        ColumnStatistics pcs = buildColStats(key, true);
        ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue());
        builder.append(dumpThriftObject(cso));
    }/*from   w  w  w  . ja  v a  2 s.  c o m*/
    // Add the primary key
    List<SQLPrimaryKey> pk = getPrimaryKey(sdParts.containingTable.getDbName(),
            sdParts.containingTable.getTableName());
    if (pk != null && pk.size() > 0) {
        builder.append(" primary key: ");
        for (SQLPrimaryKey pkcol : pk)
            builder.append(dumpThriftObject(pkcol));
    }

    // Add any foreign keys
    List<SQLForeignKey> fks = getForeignKeys(sdParts.containingTable.getDbName(),
            sdParts.containingTable.getTableName());
    if (fks != null && fks.size() > 0) {
        builder.append(" foreign keys: ");
        for (SQLForeignKey fkcol : fks)
            builder.append(dumpThriftObject(fkcol));

    }
    return builder.toString();
}

From source file:umich.ms.batmass.gui.viewers.map2d.components.BaseMap2D.java

/**
 * Fills the map given a scan collection.
 * @param scans//from w  w w .j  a v a2  s. c  o  m
 * @return True, if filling was done successfully.
 *         False if something bad happened, e.g. scanCollection didn't contain any scans between rtStart & rtEnd
 */
public boolean fillMapFromScans(IScanCollection scans) {
    int pixelsVertical = availableHeight;
    height = pixelsVertical;
    width = availableWidth;

    NavigableMap<Integer, IScan> scansByRtSpanAtMsLevel = scans.getScansByRtSpanAtMsLevel(rtLo, rtHi, msLevel);
    ;
    if (!precursorMzRange.equals(Map2DPanel.OPT_DISPLAY_ALL_MZ_REGIONS)) {
        // if only scans from specific precursor m/z window were requested
        IntervalST<Double, TreeMap<Integer, IScan>> precursorRanges = scans.getMapMsLevel2rangeGroups()
                .get(msLevel);
        if (precursorRanges != null) {
            IntervalST.Node<Double, TreeMap<Integer, IScan>> node = precursorRanges.get(precursorMzRange);
            if (node != null) {
                // these are all the scans at proper MS level and in proper precursor m/z range
                TreeMap<Integer, IScan> scansInMzRange = node.getValue();
                // now filter this TreeMap to only leave scans that are in our RT range
                Integer numLo = scansByRtSpanAtMsLevel.firstKey();
                Integer numHi = scansByRtSpanAtMsLevel.lastKey();
                numLo = scansInMzRange.ceilingKey(numLo);
                numHi = scansInMzRange.floorKey(numHi);
                scansByRtSpanAtMsLevel = scansInMzRange.subMap(numLo, true, numHi, true);
            }
        }
    }
    if (scansByRtSpanAtMsLevel == null || scansByRtSpanAtMsLevel.size() == 0) {
        initErrorFillingState();
        return false;
    }
    scanNumLo = scansByRtSpanAtMsLevel.firstKey();
    scanNumHi = scansByRtSpanAtMsLevel.lastKey();

    // compare the number of scans to available vertical pixels
    int scanCount = scansByRtSpanAtMsLevel.size();
    this.map = new double[height][width];
    this.mapRaw = new double[height][width];
    this.maxValInFullRow = new double[height];

    IScan scan;
    TreeMap<Integer, IScan> mapNum2scan = scans.getMapNum2scan();
    IScan[] scansToAverage = new IScan[4];
    ISpectrum spectrum;
    Integer mzIdxLo, mzIdxHi;
    int x, y;
    boolean hasProfile = true;
    double[] masses, intensities;
    filledRowIds = new int[scansByRtSpanAtMsLevel.size()];
    int idx = 0;
    double denoisingTimeCounter = 0;
    for (Map.Entry<Integer, IScan> num2scan : scansByRtSpanAtMsLevel.entrySet()) {
        scan = num2scan.getValue();
        if (doProfileModeGapFilling && !scan.isCentroided()) {
            hasProfile = true;
        }
        spectrum = null;
        try {
            spectrum = scan.fetchSpectrum();
        } catch (FileParsingException ex) {
            Exceptions.printStackTrace(ex);
        }
        if (spectrum == null) {
            continue;
        }

        y = extrapolateRtToY(scan.getRt());
        filledRowIds[idx] = y;
        idx++;
        if (y > this.map.length - 1) {
            OutputWndPrinter.printErr("DEBUG", String.format(
                    "BaseMap2D: (y > this.map.length-1) for scan #%d.\n" + "\ty=%d, len-1=%d, height=%d\n"
                            + "\trt=%.20f, rtStart=%.20f, rtEnd=%.20f, rtSpan=%.20f",
                    scan.getNum(), y, this.map.length - 1, height, scan.getRt(), rtLo, rtHi, rtSpan));
        }
        masses = spectrum.getMZs();
        intensities = spectrum.getIntensities();

        mzIdxLo = spectrum.findMzIdxCeiling(mzLo);
        mzIdxHi = spectrum.findMzIdxFloor(mzHi);
        if (mzIdxLo == null || mzIdxHi == null) {
            OutputWndPrinter.printErr("DEBUG", String.format(
                    "BaseMap2D: mzIdxLo or mzIdxHi were null for scan #%d. " + "Not filling the map from them.",
                    scan.getNum()));
            continue;
        }
        if (mzIdxLo < 0 || mzIdxLo > masses.length - 1) {
            OutputWndPrinter.printErr("DEBUG", String.format(
                    "BaseMap2D: (mzIdxLo < 0 || mzIdxLo > masses.length-1) for scan #%d", scan.getNum()));
        }
        if (mzIdxHi < 0 || mzIdxHi > masses.length - 1) {
            OutputWndPrinter.printErr("DEBUG", String.format(
                    "BaseMap2D: (mzIdxHi < 0 || mzIdxHi > masses.length-1) for scan #%d", scan.getNum()));
        }

        double denoiseThreshold = Double.NaN;
        boolean applyDenoise = isDoDenoise();
        if (applyDenoise) {
            long start = System.nanoTime();
            denoiseThreshold = findDenoiseThreshold(masses, intensities);
            double denoisingTime = (System.nanoTime() - start) / 1e6;
            denoisingTimeCounter = denoisingTimeCounter + denoisingTime;
            if (Double.isNaN(denoiseThreshold)) {
                applyDenoise = false;
            }
        }

        double maxInt = spectrum.getMaxInt();
        for (int i = mzIdxLo; i <= mzIdxHi; i++) {

            x = extrapolateMzToX(masses[i]);
            addPeakRaw(x, y, intensities[i]);

            if (applyDenoise && intensities[i] < denoiseThreshold) {
                continue;
            }
            if (x > this.map[0].length - 1) {
                OutputWndPrinter.printErr("DEBUG", String.format(
                        "BaseMap2D: (x > this.map[0].length-1) for scan #%d.\n"
                                + "\tx=%d, len-1=%d, width=%d,\n"
                                + "\ti=%d, masses[i]=%.20f, mzStart=%.20f, mzEnd=%.20f, mzSpan=%.20f",
                        scan.getNum(), x, this.map[0].length - 1, width, i, masses[i], mzLo, mzHi, mzSpan));
            }

            // boost if present in previous/next scan
            // boost if present in previous/next scan// boost if present in previous/next scan// boost if present in previous/next scan// boost if present in previous/next scan// boost if present in previous/next scan
            //                double curIntensity = intensities[i];
            //                final int maxScanSpan = 2000;
            //                int numScansDisplayed = scansByRtSpanAtMsLevel.size();
            //                if (false && numScansDisplayed <= maxScanSpan) {
            //                    double maxIntInVicinity;
            //                    double intensityUpdateFactor = 1;
            //                    double dm, dmPpm, dmUpdateFactor;
            //                    int maxIntIdx;
            //                    double[] curInts, curMzs;
            //
            //                    final int scanNumShift = 1;
            //                    final double ppmTolerance = 15d;
            //
            //                    if (scan.getNum() % 1000 == 0) {
            //                        System.out.printf("Averaging for scan %d\n", scan.getNum());
            //                    }
            //                    scansToAverage[0] = mapNum2scan.get(scan.getNum() - scanNumShift*2);
            //                    scansToAverage[1] = mapNum2scan.get(scan.getNum() - scanNumShift);
            //                    scansToAverage[2] = mapNum2scan.get(scan.getNum() + scanNumShift);
            //                    scansToAverage[3] = mapNum2scan.get(scan.getNum() + scanNumShift*2);
            //                    double curMass = masses[i];
            //
            //                    for (IScan avgScan : scansToAverage) {                        
            //                        if (avgScan == null) {
            //                            continue;
            //                        }
            //                        ISpectrum s = avgScan.getSpectrum();
            //                        if (s == null) {
            //                            continue;
            //                        }
            //                        int[] mzIdxs = s.findMzIdxsWithinPpm(curMass, ppmTolerance);
            //                        dm = Double.NEGATIVE_INFINITY;
            //                        dmUpdateFactor = 1;
            //                        intensityUpdateFactor = 1;
            //                        if (mzIdxs != null) {
            //                            curInts = s.getIntensities();
            //                            curMzs = s.getMZs();
            //                            maxIntIdx = -1;
            //                            maxIntInVicinity = Double.NEGATIVE_INFINITY;
            //                            for (int j = mzIdxs[0]; j <= mzIdxs[1]; j++) {
            //                                if (curInts[j] > maxIntInVicinity) {
            //                                    maxIntIdx = j;
            //                                }
            //                            }
            //                            if (maxIntIdx != -1) {
            //                                intensityUpdateFactor = curInts[maxIntIdx];
            //                                dm = Math.abs(curMass - curMzs[maxIntIdx]);
            //
            //                                dmPpm = dm / (curMass / 1e6d);
            //                                if (dmPpm > ppmTolerance) {
            //                                    dmUpdateFactor = 0d;
            //                                    throw new IllegalStateException("dmUpdateFactor set to zero, should not happen");
            //                                } else {
            //                                    dmUpdateFactor = (1 - Math.pow(dmPpm / ppmTolerance, 2d));
            //                                }
            //                            } else {
            //                                throw new IllegalStateException("Strange condition, should never be triggered");
            //                            }
            //                        } else {
            //                            // if masses in the vicinity not found, then penalize
            //                            // TODO: this should be dependent on the chosen distribution for mass deviations
            //                            //       see dmFactor
            //                            intensityUpdateFactor = 1;
            //                            dmUpdateFactor = (1 - Math.pow(0.5d, 2d));
            //                        }
            //                        
            //                        curIntensity = curIntensity * (intensityUpdateFactor * dmUpdateFactor);
            //                    }
            //                }

            //                addPeak(x, y, curIntensity);
            addPeak(x, y, intensities[i]);
            maxValInFullRow[y] = maxInt;
            //                if (curIntensity > 1e6) {
            //                    addPeak(x, y, curIntensity);
            //                }

        }

        if (hasProfile && doProfileModeGapFilling) {
            double pixelSizeMz = getMzSpan() / availableWidth;
            if (pixelSizeMz < 0.05) {
                fillProfileGaps(0, y, pixelSizeMz);
            }
        }
    }
    if (isDoDenoise()) {
        OutputWndPrinter.printErr("DEBUG", String.format("Denoising took on average: %.2fms (%d scans)\n",
                (denoisingTimeCounter) / scansByRtSpanAtMsLevel.size(), scansByRtSpanAtMsLevel.size()));
    }

    if (hasProfile) { // profile mode spectrum
        if (!doProfileModeGapFilling && doMzCloseZoomGapFilling) {
            applySavitzkyGolay(map);
        }
    } else { // !hasProfile => centroided spectrum
        if (doMzCloseZoomGapFilling) {
            applySavitzkyGolay(map);
        }
    }

    findMinMaxIntensities();

    // if we created the full-sized version of the map, then a lot of rows might
    // be zero, because no scan actually mapped to this row of pixels
    // so we just fill it with the same pixels as in the previous filled row.
    if (doInterpRt) {
        for (int filledRowIdx = 0; filledRowIdx < filledRowIds.length - 1; filledRowIdx++) {
            int rowLo = filledRowIds[filledRowIdx];
            int rowHi = filledRowIds[filledRowIdx + 1];
            for (int rowToFillIdx = rowLo + 1; rowToFillIdx < rowHi; rowToFillIdx++) {
                System.arraycopy(map[rowLo], 0, map[rowToFillIdx], 0, width);
                maxValInFullRow[rowToFillIdx] = maxValInFullRow[rowLo];
            }
        }
    }

    // add a tiny bit to the total intensity, allows not to care about
    // edge values when mapping intensities to colors.
    // Adding MIN_NORMAL, as totalIntensity shoule be a value > 1.0
    totalIntensityMax += 1e-8;
    return true;
}