Example usage for java.util Arrays hashCode

List of usage examples for java.util Arrays hashCode

Introduction

In this page you can find the example usage for java.util Arrays hashCode.

Prototype

public static int hashCode(Object a[]) 

Source Link

Document

Returns a hash code based on the contents of the specified array.

Usage

From source file:org.bimserver.GeometryGenerator.java

private int hash(GeometryData geometryData) {
    int hashCode = 0;
    if (geometryData.getIndices() != null) {
        hashCode += Arrays.hashCode(geometryData.getIndices());
    }//from   w  ww .  j a v a  2s.  c om
    if (geometryData.getVertices() != null) {
        hashCode += Arrays.hashCode(geometryData.getVertices());
    }
    if (geometryData.getNormals() != null) {
        hashCode += Arrays.hashCode(geometryData.getNormals());
    }
    if (geometryData.getMaterialIndices() != null) {
        hashCode += Arrays.hashCode(geometryData.getMaterialIndices());
    }
    if (geometryData.getMaterials() != null) {
        hashCode += Arrays.hashCode(geometryData.getMaterials());
    }
    return hashCode;
}

From source file:com.opengamma.analytics.math.cube.InterpolatedFromSurfacesDoublesCube.java

@Override
public int hashCode() {
    final int prime = 31;
    int result = super.hashCode();
    result = prime * result + _interpolator.hashCode();
    result = prime * result + _plane.hashCode();
    result = prime * result + Arrays.hashCode(_points);
    result = prime * result + Arrays.hashCode(_surfaces);
    return result;
}

From source file:nlmt.topicmodels.HierarchicalLDAModel.java

@Override
public int hashCode() {
    int result;// w w w .  j  a  v a  2  s .c  om
    long temp;
    result = maxDepth;
    temp = Double.doubleToLongBits(gamma);
    result = 31 * result + (int) (temp ^ (temp >>> 32));
    result = 31 * result + Arrays.hashCode(eta);
    temp = Double.doubleToLongBits(m);
    result = 31 * result + (int) (temp ^ (temp >>> 32));
    temp = Double.doubleToLongBits(pi);
    result = 31 * result + (int) (temp ^ (temp >>> 32));
    result = 31 * result + (documents != null ? Arrays.hashCode(documents) : 0);
    result = 31 * result + vocabulary.hashCode();
    result = 31 * result + nodeMapper.hashCode();
    result = 31 * result + (documentPaths != null ? Arrays.hashCode(documentPaths) : 0);
    result = 31 * result + (rootNode != null ? rootNode.hashCode() : 0);
    return result;
}

From source file:com.android.sdklib.internal.repository.packages.Package.java

@Override
public int hashCode() {
    final int prime = 31;
    int result = 1;
    result = prime * result + Arrays.hashCode(mArchives);
    result = prime * result + ((mObsolete == null) ? 0 : mObsolete.hashCode());
    result = prime * result + getRevision().hashCode();
    result = prime * result + ((mSource == null) ? 0 : mSource.hashCode());
    return result;
}

From source file:android.databinding.tool.store.SetterStore.java

private static int mergedHashCode(Object... objects) {
    return Arrays.hashCode(objects);
}

From source file:ffx.potential.parameters.MultipoleType.java

/**
 * {@inheritDoc}/*from  w w  w .  ja  va2s  .  com*/
 */
@Override
public int hashCode() {
    int hash = 7;
    hash = 29 * hash + Arrays.hashCode(frameAtomTypes);
    return hash;
}

From source file:org.sufficientlysecure.keychain.provider.KeyWritableRepository.java

/**
 * Save a public keyring into the database.
 * <p>/*from   w  w w .jav a 2 s.c o m*/
 * This is a high level method, which takes care of merging all new information into the old and
 * keep public and secret keyrings in sync.
 * <p>
 * If you want to merge keys in-memory only and not save in database set skipSave=true.
 */
public SaveKeyringResult savePublicKeyRing(UncachedKeyRing publicRing, byte[] expectedFingerprint,
        ArrayList<CanonicalizedKeyRing> canKeyRings, boolean forceRefresh, boolean skipSave) {

    try {
        long masterKeyId = publicRing.getMasterKeyId();
        log(LogType.MSG_IP, KeyFormattingUtils.convertKeyIdToHex(masterKeyId));
        mIndent += 1;

        if (publicRing.isSecret()) {
            log(LogType.MSG_IP_BAD_TYPE_SECRET);
            return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
        }

        CanonicalizedPublicKeyRing canPublicRing;
        boolean alreadyExists = false;

        // If there is an old keyring, merge it
        try {
            UncachedKeyRing oldPublicRing = getCanonicalizedPublicKeyRing(masterKeyId).getUncachedKeyRing();
            alreadyExists = true;

            // Merge data from new public ring into the old one
            log(LogType.MSG_IP_MERGE_PUBLIC);
            publicRing = oldPublicRing.merge(publicRing, mLog, mIndent);

            // If this is null, there is an error in the log so we can just return
            if (publicRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }

            // Canonicalize this keyring, to assert a number of assumptions made about it.
            canPublicRing = (CanonicalizedPublicKeyRing) publicRing.canonicalize(mLog, mIndent);
            if (canPublicRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }
            if (canKeyRings != null)
                canKeyRings.add(canPublicRing);

            // Early breakout if nothing changed
            if (!forceRefresh && Arrays.hashCode(publicRing.getEncoded()) == Arrays
                    .hashCode(oldPublicRing.getEncoded())) {
                log(LogType.MSG_IP_SUCCESS_IDENTICAL);
                return new SaveKeyringResult(SaveKeyringResult.UPDATED, mLog, null);
            }
        } catch (NotFoundException e) {
            // Not an issue, just means we are dealing with a new keyring.

            // Canonicalize this keyring, to assert a number of assumptions made about it.
            canPublicRing = (CanonicalizedPublicKeyRing) publicRing.canonicalize(mLog, mIndent);
            if (canPublicRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }
            if (canKeyRings != null)
                canKeyRings.add(canPublicRing);
        }

        // If there is a secret key, merge new data (if any) and save the key for later
        CanonicalizedSecretKeyRing canSecretRing;
        try {
            UncachedKeyRing secretRing = getCanonicalizedSecretKeyRing(publicRing.getMasterKeyId())
                    .getUncachedKeyRing();

            // Merge data from new public ring into secret one
            log(LogType.MSG_IP_MERGE_SECRET);
            secretRing = secretRing.merge(publicRing, mLog, mIndent);
            if (secretRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }
            // This has always been a secret key ring, this is a safe cast
            canSecretRing = (CanonicalizedSecretKeyRing) secretRing.canonicalize(mLog, mIndent);
            if (canSecretRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }

        } catch (NotFoundException e) {
            // No secret key available (this is what happens most of the time)
            canSecretRing = null;
        }

        // If we have an expected fingerprint, make sure it matches
        if (expectedFingerprint != null) {
            if (!canPublicRing.containsBoundSubkey(expectedFingerprint)) {
                log(LogType.MSG_IP_FINGERPRINT_ERROR);
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            } else {
                log(LogType.MSG_IP_FINGERPRINT_OK);
            }
        }

        int result;
        if (skipSave) {
            // skip save method, set fixed result
            result = SaveKeyringResult.SAVED_PUBLIC | (alreadyExists ? SaveKeyringResult.UPDATED : 0);
        } else {
            result = saveCanonicalizedPublicKeyRing(canPublicRing, canSecretRing != null);
        }

        // Save the saved keyring (if any)
        if (canSecretRing != null) {
            int secretResult;
            if (skipSave) {
                // skip save method, set fixed result
                secretResult = SaveKeyringResult.SAVED_SECRET;
            } else {
                secretResult = saveCanonicalizedSecretKeyRing(canSecretRing);
            }

            if ((secretResult & SaveKeyringResult.RESULT_ERROR) != SaveKeyringResult.RESULT_ERROR) {
                result |= SaveKeyringResult.SAVED_SECRET;
            }
        }

        return new SaveKeyringResult(result, mLog, canSecretRing);
    } catch (IOException e) {
        log(LogType.MSG_IP_ERROR_IO_EXC);
        return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
    } finally {
        mIndent -= 1;
    }
}

From source file:org.sufficientlysecure.keychain.provider.KeyWritableRepository.java

public SaveKeyringResult saveSecretKeyRing(UncachedKeyRing secretRing,
        ArrayList<CanonicalizedKeyRing> canKeyRings, boolean skipSave) {

    try {/*from w  w  w .  ja  v a  2 s.c  o  m*/
        long masterKeyId = secretRing.getMasterKeyId();
        log(LogType.MSG_IS, KeyFormattingUtils.convertKeyIdToHex(masterKeyId));
        mIndent += 1;

        if (!secretRing.isSecret()) {
            log(LogType.MSG_IS_BAD_TYPE_PUBLIC);
            return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
        }

        CanonicalizedSecretKeyRing canSecretRing;
        boolean alreadyExists = false;

        // If there is an old secret key, merge it.
        try {
            UncachedKeyRing oldSecretRing = getCanonicalizedSecretKeyRing(masterKeyId).getUncachedKeyRing();
            alreadyExists = true;

            // Merge data from new secret ring into old one
            log(LogType.MSG_IS_MERGE_SECRET);
            secretRing = secretRing.merge(oldSecretRing, mLog, mIndent);

            // If this is null, there is an error in the log so we can just return
            if (secretRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }

            // Canonicalize this keyring, to assert a number of assumptions made about it.
            // This is a safe cast, because we made sure this is a secret ring above
            canSecretRing = (CanonicalizedSecretKeyRing) secretRing.canonicalize(mLog, mIndent);
            if (canSecretRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }
            if (canKeyRings != null)
                canKeyRings.add(canSecretRing);

            // Early breakout if nothing changed
            if (Arrays.hashCode(secretRing.getEncoded()) == Arrays.hashCode(oldSecretRing.getEncoded())) {
                log(LogType.MSG_IS_SUCCESS_IDENTICAL, KeyFormattingUtils.convertKeyIdToHex(masterKeyId));
                return new SaveKeyringResult(SaveKeyringResult.UPDATED, mLog, null);
            }
        } catch (NotFoundException e) {
            // Not an issue, just means we are dealing with a new keyring

            // Canonicalize this keyring, to assert a number of assumptions made about it.
            // This is a safe cast, because we made sure this is a secret ring above
            canSecretRing = (CanonicalizedSecretKeyRing) secretRing.canonicalize(mLog, mIndent);
            if (canSecretRing == null) {

                // Special case: If keyring canonicalization failed, try again after adding
                // all self-certificates from the public key.
                try {
                    log(LogType.MSG_IS_MERGE_SPECIAL);
                    UncachedKeyRing oldPublicRing = getCanonicalizedPublicKeyRing(masterKeyId)
                            .getUncachedKeyRing();
                    secretRing = secretRing.merge(oldPublicRing, mLog, mIndent);
                    canSecretRing = (CanonicalizedSecretKeyRing) secretRing.canonicalize(mLog, mIndent);
                } catch (NotFoundException e2) {
                    // nothing, this is handled right in the next line
                }

                if (canSecretRing == null) {
                    return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
                }
            }
            if (canKeyRings != null)
                canKeyRings.add(canSecretRing);
        }

        // Merge new data into public keyring as well, if there is any
        UncachedKeyRing publicRing;
        try {
            UncachedKeyRing oldPublicRing = getCanonicalizedPublicKeyRing(masterKeyId).getUncachedKeyRing();

            // Merge data from new secret ring into public one
            log(LogType.MSG_IS_MERGE_PUBLIC);
            publicRing = oldPublicRing.merge(secretRing, mLog, mIndent);
            if (publicRing == null) {
                return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
            }

        } catch (NotFoundException e) {
            log(LogType.MSG_IS_PUBRING_GENERATE);
            publicRing = secretRing.extractPublicKeyRing();
        }

        CanonicalizedPublicKeyRing canPublicRing = (CanonicalizedPublicKeyRing) publicRing.canonicalize(mLog,
                mIndent);
        if (canPublicRing == null) {
            return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
        }

        int publicResult;
        if (skipSave) {
            // skip save method, set fixed result
            publicResult = SaveKeyringResult.SAVED_PUBLIC;
        } else {
            publicResult = saveCanonicalizedPublicKeyRing(canPublicRing, true);
        }

        if ((publicResult & SaveKeyringResult.RESULT_ERROR) == SaveKeyringResult.RESULT_ERROR) {
            return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
        }

        int result;
        if (skipSave) {
            // skip save method, set fixed result
            result = SaveKeyringResult.SAVED_SECRET | (alreadyExists ? SaveKeyringResult.UPDATED : 0);
        } else {
            result = saveCanonicalizedSecretKeyRing(canSecretRing);
        }

        return new SaveKeyringResult(result, mLog, canSecretRing);
    } catch (IOException e) {
        log(LogType.MSG_IS_ERROR_IO_EXC);
        return new SaveKeyringResult(SaveKeyringResult.RESULT_ERROR, mLog, null);
    } finally {
        mIndent -= 1;
    }
}

From source file:edu.brown.hstore.BatchPlanner.java

/**
 * @param txn_id/*w ww  . j  a  v a2s  .  co  m*/
 * @param client_handle
 * @param base_partition
 * @param predict_partitions
 * @param touched_partitions
 * @param batchArgs
 * @return
 */
public BatchPlan plan(Long txn_id, long client_handle, Integer base_partition,
        Collection<Integer> predict_partitions, boolean predict_singlepartitioned,
        Histogram<Integer> touched_partitions, ParameterSet[] batchArgs) {
    if (this.enable_profiling)
        time_plan.start();
    if (d)
        LOG.debug(String.format("Constructing a new %s BatchPlan for %s txn #%d", this.catalog_proc.getName(),
                (predict_singlepartitioned ? "single-partition" : "distributed"), txn_id));

    boolean cache_isSinglePartition[] = null;

    // OPTIMIZATION: Check whether we can use a cached single-partition BatchPlan
    if (this.force_singlePartition || this.enable_caching) {
        boolean is_allSinglePartition = true;
        cache_isSinglePartition = new boolean[this.batchSize];

        // OPTIMIZATION: Skip all of this if we know that we're always
        //               suppose to be single-partitioned
        if (this.force_singlePartition == false) {
            for (int stmt_index = 0; stmt_index < this.batchSize; stmt_index++) {
                if (cache_fastLookups[stmt_index] == null) {
                    if (d)
                        LOG.debug(String.format(
                                "[#%d-%02d] No fast look-ups for %s. Cache is marked as not single-partitioned",
                                txn_id, stmt_index, this.catalog_stmts[stmt_index].fullName()));
                    cache_isSinglePartition[stmt_index] = false;
                } else {
                    if (d)
                        LOG.debug(String.format("[#%d-%02d] Using fast-lookup caching for %s: %s", txn_id,
                                stmt_index, this.catalog_stmts[stmt_index].fullName(),
                                Arrays.toString(cache_fastLookups[stmt_index])));
                    Object params[] = batchArgs[stmt_index].toArray();
                    cache_isSinglePartition[stmt_index] = true;
                    for (int idx : cache_fastLookups[stmt_index]) {
                        if (hasher.hash(params[idx]) != base_partition.intValue()) {
                            cache_isSinglePartition[stmt_index] = false;
                            break;
                        }
                    } // FOR
                }
                if (d)
                    LOG.debug(String.format("[#%d-%02d] cache_isSinglePartition[%s] = %s", txn_id, stmt_index,
                            this.catalog_stmts[stmt_index].fullName(), cache_isSinglePartition[stmt_index]));
                is_allSinglePartition = is_allSinglePartition && cache_isSinglePartition[stmt_index];
            } // FOR (Statement)
        }
        if (t)
            LOG.trace(String.format("[#%d] is_allSinglePartition=%s", txn_id, is_allSinglePartition));

        // If all of the Statements are single-partition, then we can use
        // the cached BatchPlan if we already have one.
        // This saves a lot of trouble
        if (is_allSinglePartition && cache_singlePartitionPlans[base_partition.intValue()] != null) {
            if (d)
                LOG.debug(String.format("[#%d] Using cached BatchPlan at partition #%02d: %s", txn_id,
                        base_partition, Arrays.toString(this.catalog_stmts)));
            if (this.enable_profiling)
                time_plan.stop();
            return (cache_singlePartitionPlans[base_partition.intValue()]);
        }
    }

    // Otherwise we have to construct a new BatchPlan
    plan.init(client_handle, base_partition);

    // ----------------------
    // DEBUG DUMP
    // ----------------------
    if (t) {
        Map<String, Object> m = new ListOrderedMap<String, Object>();
        m.put("Batch Size", this.batchSize);
        for (int i = 0; i < this.batchSize; i++) {
            m.put(String.format("[%02d] %s", i, this.catalog_stmts[i].getName()),
                    Arrays.toString(batchArgs[i].toArray()));
        }
        LOG.trace("\n" + StringUtil.formatMapsBoxed(m));
    }

    // Only maintain the histogram of what partitions were touched if we
    // know that we're going to throw a MispredictionException
    Histogram<Integer> mispredict_h = null;
    boolean mispredict = false;

    for (int stmt_index = 0; stmt_index < this.batchSize; stmt_index++) {
        final Statement catalog_stmt = this.catalog_stmts[stmt_index];
        assert (catalog_stmt != null) : "The Statement at index " + stmt_index + " is null for "
                + this.catalog_proc;
        final Object params[] = batchArgs[stmt_index].toArray();
        if (t)
            LOG.trace(String.format("[#%d-%02d] Calculating touched partitions plans for %s", txn_id,
                    stmt_index, catalog_stmt.fullName()));

        Map<PlanFragment, Set<Integer>> frag_partitions = plan.frag_partitions[stmt_index];
        Set<Integer> stmt_all_partitions = plan.stmt_partitions[stmt_index];

        boolean has_singlepartition_plan = catalog_stmt.getHas_singlesited();
        boolean is_replicated_only = this.stmt_is_replicatedonly[stmt_index];
        boolean is_read_only = this.stmt_is_readonly[stmt_index];
        // boolean stmt_localFragsAreNonTransactional =
        // plan.localFragsAreNonTransactional;
        boolean is_singlepartition = has_singlepartition_plan;
        boolean is_local = true;
        CatalogMap<PlanFragment> fragments = null;

        // AbstractPlanNode node =
        // PlanNodeUtil.getRootPlanNodeForStatement(catalog_stmt, false);
        // LOG.info(PlanNodeUtil.debug(node));

        // OPTIMIZATION: Fast partition look-up caching
        // OPTIMIZATION: Read-only queries on replicated tables always just
        //               go to the local partition
        // OPTIMIZATION: If we're force to be single-partitioned, pretend
        //               that the table is replicated
        if (cache_isSinglePartition[stmt_index] || (is_replicated_only && is_read_only)
                || this.force_singlePartition) {
            if (t) {
                if (cache_isSinglePartition[stmt_index]) {
                    LOG.trace(String.format("[#%d-%02d] Using fast-lookup for %s. Skipping PartitionEstimator",
                            txn_id, stmt_index, catalog_stmt.fullName()));
                } else {
                    LOG.trace(String.format(
                            "[#%d-%02d] %s is read-only and replicate-only. Skipping PartitionEstimator",
                            txn_id, stmt_index, catalog_stmt.fullName()));
                }
            }
            assert (has_singlepartition_plan);

            if (this.cache_singlePartitionFragmentPartitions == null) {
                this.cache_singlePartitionFragmentPartitions = CACHED_FRAGMENT_PARTITION_MAPS[base_partition
                        .intValue()];
            }
            Map<PlanFragment, Set<Integer>> cached_frag_partitions = this.cache_singlePartitionFragmentPartitions
                    .get(catalog_stmt);
            if (cached_frag_partitions == null) {
                cached_frag_partitions = new HashMap<PlanFragment, Set<Integer>>();
                Set<Integer> p = CACHED_SINGLE_PARTITION_SETS[base_partition.intValue()];
                for (PlanFragment catalog_frag : catalog_stmt.getFragments().values()) {
                    cached_frag_partitions.put(catalog_frag, p);
                } // FOR
                this.cache_singlePartitionFragmentPartitions.put(catalog_stmt, cached_frag_partitions);
            }
            if (plan.stmt_partitions_swap[stmt_index] == null) {
                plan.stmt_partitions_swap[stmt_index] = plan.stmt_partitions[stmt_index];
                plan.frag_partitions_swap[stmt_index] = plan.frag_partitions[stmt_index];
            }
            stmt_all_partitions = plan.stmt_partitions[stmt_index] = CACHED_SINGLE_PARTITION_SETS[base_partition
                    .intValue()];
            frag_partitions = plan.frag_partitions[stmt_index] = cached_frag_partitions;
        }

        // Otherwise figure out whether the query can execute as
        // single-partitioned or not
        else {
            if (t)
                LOG.trace(String.format(
                        "[#%d-%02d] Computing touched partitions %s in txn #%d with the PartitionEstimator",
                        txn_id, stmt_index, catalog_stmt.fullName(), txn_id));

            if (plan.stmt_partitions_swap[stmt_index] != null) {
                stmt_all_partitions = plan.stmt_partitions[stmt_index] = plan.stmt_partitions_swap[stmt_index];
                plan.stmt_partitions_swap[stmt_index] = null;
                stmt_all_partitions.clear();

                frag_partitions = plan.frag_partitions[stmt_index] = plan.frag_partitions_swap[stmt_index];
                plan.frag_partitions_swap[stmt_index] = null;
            }

            try {
                // OPTIMIZATION: If we were told that the transaction is suppose to be 
                // single-partitioned, then we will throw the single-partitioned PlanFragments 
                // at the PartitionEstimator to get back what partitions each PlanFragment 
                // will need to go to. If we get multiple partitions, then we know that we 
                // mispredicted and we should throw a MispredictionException
                // If we originally didn't predict that it was single-partitioned, then we 
                // actually still need to check whether the query should be single-partitioned or not.
                // This is because a query may actually just want to execute on just one 
                // partition (note that it could be a local partition or the remote partition).
                // We'll assume that it's single-partition <<--- Can we cache that??
                while (true) {
                    if (is_singlepartition == false)
                        stmt_all_partitions.clear();
                    fragments = (is_singlepartition ? catalog_stmt.getFragments()
                            : catalog_stmt.getMs_fragments());

                    // PARTITION ESTIMATOR
                    if (this.enable_profiling)
                        ProfileMeasurement.swap(this.time_plan, this.time_partitionEstimator);
                    this.p_estimator.getAllFragmentPartitions(frag_partitions, stmt_all_partitions,
                            fragments.values(), params, base_partition);
                    if (this.enable_profiling)
                        ProfileMeasurement.swap(this.time_partitionEstimator, this.time_plan);

                    int stmt_all_partitions_size = stmt_all_partitions.size();
                    if (is_singlepartition && stmt_all_partitions_size > 1) {
                        // If this was suppose to be multi-partitioned, then
                        // we want to stop right here!!
                        if (predict_singlepartitioned) {
                            if (t)
                                LOG.trace(String.format("Mispredicted txn #%d - Multiple Partitions"));
                            mispredict = true;
                            break;
                        }
                        // Otherwise we can let it wrap back around and
                        // construct the fragment mapping for the
                        // multi-partition PlanFragments
                        is_singlepartition = false;
                        continue;
                    }
                    is_local = (stmt_all_partitions_size == 1 && stmt_all_partitions.contains(base_partition));
                    if (is_local == false && predict_singlepartitioned) {
                        // Again, this is not what was suppose to happen!
                        if (t)
                            LOG.trace(String.format("Mispredicted txn #%d - Remote Partitions %s", txn_id,
                                    stmt_all_partitions));
                        mispredict = true;
                        break;
                    } else if (predict_partitions.containsAll(stmt_all_partitions) == false) {
                        // Again, this is not what was suppose to happen!
                        if (t)
                            LOG.trace(String.format("Mispredicted txn #%d - Unallocated Partitions %s / %s",
                                    txn_id, stmt_all_partitions, predict_partitions));
                        mispredict = true;
                        break;
                    }
                    // Score! We have a plan that works!
                    break;
                } // WHILE
                  // Bad Mojo!
            } catch (Exception ex) {
                String msg = "";
                for (int i = 0; i < this.batchSize; i++) {
                    msg += String.format("[#%d-%02d] %s %s\n%5s\n", txn_id, i, catalog_stmt.fullName(),
                            catalog_stmt.getSqltext(), Arrays.toString(batchArgs[i].toArray()));
                } // FOR
                LOG.fatal("\n" + msg);
                throw new RuntimeException("Unexpected error when planning " + catalog_stmt.fullName(), ex);
            }
        }
        if (d)
            LOG.debug(String.format("[#%d-%02d] is_singlepartition=%s, partitions=%s", txn_id, stmt_index,
                    is_singlepartition, stmt_all_partitions));

        // Get a sorted list of the PlanFragments that we need to execute
        // for this query
        if (is_singlepartition) {
            if (this.sorted_singlep_fragments[stmt_index] == null) {
                this.sorted_singlep_fragments[stmt_index] = PlanNodeUtil.getSortedPlanFragments(catalog_stmt,
                        true);
            }
            plan.frag_list[stmt_index] = this.sorted_singlep_fragments[stmt_index];

            // Only mark that we touched these partitions if the Statement
            // is not on a replicated table
            if (is_replicated_only == false) {
                touched_partitions.putAll(stmt_all_partitions);
            }

        } else {
            if (this.sorted_multip_fragments[stmt_index] == null) {
                this.sorted_multip_fragments[stmt_index] = PlanNodeUtil.getSortedPlanFragments(catalog_stmt,
                        false);
            }
            plan.frag_list[stmt_index] = this.sorted_multip_fragments[stmt_index];

            // Always mark that we are touching these partitions
            touched_partitions.putAll(stmt_all_partitions);
        }

        plan.readonly = plan.readonly && catalog_stmt.getReadonly();
        // plan.localFragsAreNonTransactional =
        // plan.localFragsAreNonTransactional ||
        // stmt_localFragsAreNonTransactional;
        plan.all_singlepartitioned = plan.all_singlepartitioned && is_singlepartition;
        plan.all_local = plan.all_local && is_local;

        // Keep track of whether the current query in the batch was
        // single-partitioned or not
        plan.singlepartition_bitmap[stmt_index] = is_singlepartition;

        // Misprediction!!
        if (mispredict) {
            // If this is the first Statement in the batch that hits the mispredict, 
            // then we need to create the histogram and populate it with the 
            // partitions from the previous queries
            int start_idx = stmt_index;
            if (mispredict_h == null) {
                mispredict_h = new Histogram<Integer>();
                start_idx = 0;
            }
            for (int i = start_idx; i <= stmt_index; i++) {
                if (d)
                    LOG.debug(String.format(
                            "Pending mispredict for txn #%d. Checking whether to add partitions for batch statement %02d",
                            txn_id, i));

                // Make sure that we don't count the local partition if it
                // was reading a replicated table.
                if (this.stmt_is_replicatedonly[i] == false
                        || (this.stmt_is_replicatedonly[i] && this.stmt_is_readonly[i] == false)) {
                    if (t)
                        LOG.trace(String.format(
                                "%s touches non-replicated table. Including %d partitions in mispredict histogram for txn #%d",
                                this.catalog_stmts[i].fullName(), plan.stmt_partitions[i].size(), txn_id));
                    mispredict_h.putAll(plan.stmt_partitions[i]);
                }
            } // FOR
            continue;
        }

        // ----------------------
        // DEBUG DUMP
        // ----------------------
        if (d) {
            Map<?, ?> maps[] = new Map[fragments.size() + 1];
            int ii = 0;
            for (PlanFragment catalog_frag : fragments) {
                Map<String, Object> m = new ListOrderedMap<String, Object>();
                Set<Integer> p = plan.frag_partitions[stmt_index].get(catalog_frag);
                boolean frag_local = (p.size() == 1 && p.contains(base_partition));
                m.put(String.format("[%02d] Fragment", ii), catalog_frag.fullName());
                m.put(String.format("     Partitions"), p);
                m.put(String.format("     IsLocal"), frag_local);
                ii++;
                maps[ii] = m;
            } // FOR

            Map<String, Object> header = new ListOrderedMap<String, Object>();
            header.put("Batch Statement#", String.format("%02d / %02d", stmt_index, this.batchSize));
            header.put("Catalog Statement", catalog_stmt.fullName());
            header.put("Statement SQL", catalog_stmt.getSqltext());
            header.put("All Partitions", plan.stmt_partitions[stmt_index]);
            header.put("Local Partition", base_partition);
            header.put("IsSingledSited", is_singlepartition);
            header.put("IsStmtLocal", is_local);
            header.put("IsReplicatedOnly", is_replicated_only);
            header.put("IsBatchLocal", plan.all_local);
            header.put("Fragments", fragments.size());
            maps[0] = header;

            LOG.debug("\n" + StringUtil.formatMapsBoxed(maps));
        }
    } // FOR (Statement)

    // Check whether we have an existing graph exists for this batch
    // configuration
    // This is the only place where we need to synchronize
    int bitmap_hash = Arrays.hashCode(plan.singlepartition_bitmap);
    PlanGraph graph = this.plan_graphs.get(bitmap_hash);
    if (graph == null) { // assume fast case
        graph = this.buildPlanGraph(plan);
        this.plan_graphs.put(bitmap_hash, graph);
    }
    plan.graph = graph;
    plan.rounds_length = graph.num_rounds;

    if (this.enable_profiling)
        time_plan.stop();

    // Create the MispredictException if any Statement in the loop above hit
    // it. We don't want to throw it because whoever called us may want to look
    // at the plan first
    if (mispredict_h != null) {
        plan.mispredict = new MispredictionException(txn_id, mispredict_h);
    }
    // If this a single-partition plan and we have caching enabled, we'll
    // add this to our cached listing. We'll mark it as cached so that it is never
    // returned back to the BatchPlan object pool
    else if (this.enable_caching && cache_singlePartitionPlans[base_partition.intValue()] == null
            && plan.isSingledPartitionedAndLocal()) {
        cache_singlePartitionPlans[base_partition.intValue()] = plan;
        plan.cached = true;
        plan = new BatchPlan(this.maxRoundSize);
        return cache_singlePartitionPlans[base_partition.intValue()];
    }

    if (d)
        LOG.debug("Created BatchPlan:\n" + plan.toString());
    return (plan);
}

From source file:org.pentaho.di.trans.steps.loadfileinput.LoadFileInputMeta.java

@Override
public int hashCode() {
    int result = fileName != null ? Arrays.hashCode(fileName) : 0;
    result = 31 * result + (fileMask != null ? Arrays.hashCode(fileMask) : 0);
    result = 31 * result + (excludeFileMask != null ? Arrays.hashCode(excludeFileMask) : 0);
    result = 31 * result + (includeFilename ? 1 : 0);
    result = 31 * result + (filenameField != null ? filenameField.hashCode() : 0);
    result = 31 * result + (includeRowNumber ? 1 : 0);
    result = 31 * result + (rowNumberField != null ? rowNumberField.hashCode() : 0);
    result = 31 * result + (int) (rowLimit ^ (rowLimit >>> 32));
    result = 31 * result + (inputFields != null ? Arrays.hashCode(inputFields) : 0);
    result = 31 * result + (encoding != null ? encoding.hashCode() : 0);
    result = 31 * result + (DynamicFilenameField != null ? DynamicFilenameField.hashCode() : 0);
    result = 31 * result + (fileinfield ? 1 : 0);
    result = 31 * result + (addresultfile ? 1 : 0);
    result = 31 * result + (fileRequired != null ? Arrays.hashCode(fileRequired) : 0);
    result = 31 * result + (IsIgnoreEmptyFile ? 1 : 0);
    result = 31 * result + (includeSubFolders != null ? Arrays.hashCode(includeSubFolders) : 0);
    result = 31 * result + (shortFileFieldName != null ? shortFileFieldName.hashCode() : 0);
    result = 31 * result + (pathFieldName != null ? pathFieldName.hashCode() : 0);
    result = 31 * result + (hiddenFieldName != null ? hiddenFieldName.hashCode() : 0);
    result = 31 * result/* ww  w .j av a2 s .com*/
            + (lastModificationTimeFieldName != null ? lastModificationTimeFieldName.hashCode() : 0);
    result = 31 * result + (uriNameFieldName != null ? uriNameFieldName.hashCode() : 0);
    result = 31 * result + (rootUriNameFieldName != null ? rootUriNameFieldName.hashCode() : 0);
    result = 31 * result + (extensionFieldName != null ? extensionFieldName.hashCode() : 0);
    return result;
}