Example usage for java.lang IllegalAccessError IllegalAccessError

List of usage examples for java.lang IllegalAccessError IllegalAccessError

Introduction

In this page you can find the example usage for java.lang IllegalAccessError IllegalAccessError.

Prototype

public IllegalAccessError(String s) 

Source Link

Document

Constructs an IllegalAccessError with the specified detail message.

Usage

From source file:com.baidu.cc.web.rpc.ConfigServerServiceImpl.java

/**
 * tag?? ???? IllegalAccessError./*from   www.j  a  va 2  s.c o  m*/
 * 
 * @param user
 *            ??
 * @param password
 *            ?
 * @param version
 *            ?
 * @param tag
 *            tag value
 * @return falsetag?
 */
@Override
public boolean checkVersionTag(String user, String password, Long version, String tag) {
    User u = authenticate(user, password);

    if (StringUtils.isBlank(tag)) {
        if (LOGGER.isInfoEnabled()) {
            LOGGER.info("To check tag value is blank will always return false.");
        }
        return false;
    }

    Version v = versionService.findById(version);
    if (v == null) {
        throw new IllegalAccessError("No version id: '" + version + "' found.");
    }

    authorizeProject(u, v.getProjectId());

    return tag.equals(v.getCheckSum());
}

From source file:sf.net.experimaestro.scheduler.Resource.java

/**
 * Returns the main output file for this resource
 *//*from ww  w  .ja  va 2s .  com*/
public Path outputFile() throws IOException {
    throw new IllegalAccessError("No output file for resources of type " + this.getClass());
}

From source file:de.tudarmstadt.lt.lm.lucenebased.CountingStringLM.java

public Document getNgramLuceneDoc(List<String> ngram) {
    if (ngram == null)
        throw new IllegalAccessError("Ngram is null.");
    if (ngram.isEmpty())
        return _UNKOWN_NGRAM_LUCENE_DOCUMENT;
    String ngram_str = StringUtils.join(ngram, ' ');
    return getNgramLuceneDoc(ngram_str);
}

From source file:de.tudarmstadt.lt.lm.lucenebased.CountingStringLM.java

public Document getNgramLuceneDoc(String ngram_str) {
    if (ngram_str == null)
        throw new IllegalAccessError("Ngram is null.");
    if (ngram_str.isEmpty())
        return _UNKOWN_NGRAM_LUCENE_DOCUMENT;
    Query query = new TermQuery(new Term("ngram", ngram_str));
    try {/*  w  ww .  ja  va 2s .co  m*/
        Document doc = null;
        ScoreDoc[] hits = _searcher_ngram.search(query, 2).scoreDocs;
        if (hits.length >= 1) {
            if (hits.length > 1)
                LOG.warn("Found more than one entry for '{}', expected only one.", ngram_str);
            doc = _searcher_ngram.doc(hits[0].doc);
            return doc;
        }
    } catch (IOException e) {
        LOG.error("Could not get ngram {}. Luceneindex failed.", ngram_str, e);
    }
    return _UNKOWN_NGRAM_LUCENE_DOCUMENT;
}

From source file:com.baidu.cc.web.rpc.ConfigServerServiceImpl.java

/**
 * import configuration items to the configuration center server.
 * //from ww  w.ja v a  2s . c  om
 * @param user
 *            user name
 * @param password
 *            password
 * @param version
 *            the target version id to import
 * @param configItems
 *            imported configuration items
 */
@Override
public void importConfigItems(String user, String password, Long version, Map<String, String> configItems) {

    if (MapUtils.isEmpty(configItems)) {
        if (LOGGER.isInfoEnabled()) {
            LOGGER.info("import a empty map will be ingored");
        }
        return;
    }

    authenticate(user, password);

    // check version exist
    Version v = versionService.findById(version);
    if (v == null) {
        throw new IllegalAccessError("import config failed. No version id '" + version + "' found.");
    }

    List<ConfigGroup> groups = configGroupService.findByVersionId(version);
    if (CollectionUtils.isNotEmpty(groups)) {
        throw new IllegalAccessError("import config failed. version id '" + version + "' is not empty.");
    }

    ConfigGroup group = ConfigGroup.newGroup();
    group.setVersionId(v.getId());
    configGroupService.saveEntity(group);

    // get configuration group id
    List<ConfigItem> items = group.newItems(configItems);
    for (ConfigItem configItem : items) {
        configItemService.saveEntity(configItem);
    }

}

From source file:biz.wolschon.finance.jgnucash.mysql.impl.GnucashDatabase.java

/**
 * {@inheritDoc}//from   w ww .  j  av a2 s .  c o  m
 * @see biz.wolschon.fileformats.gnucash.GnucashWritableFile#getRootElement()
 */
@Override
public GncV2 getRootElement() {
    LOG.info("getRootElement()");
    throw new IllegalAccessError("getRootElement() is not implemented yet!");
    //        Collection<? extends GnucashWritableAccount> writableRootAccounts = getWritableRootAccounts();
    //        if (writableRootAccounts.size() == 1) {
    //            return writableRootAccounts.iterator().next();
    //        }
    //        LOG.severe("we have " + writableRootAccounts.size() + " root-accounts!");
    //        return null;
}

From source file:org.apache.hadoop.hbase.regionserver.HStoreFile.java

/**
 * Opens reader on this store file. Called by Constructor.
 * @throws IOException/*from  ww  w  . j a v a 2  s. c o m*/
 * @see #closeReader(boolean)
 */
private void open() throws IOException {
    if (this.reader != null) {
        throw new IllegalAccessError("Already open");
    }

    // Open the StoreFile.Reader
    this.reader = fileInfo.open(this.fs, this.cacheConf, false, noReadahead ? 0L : -1L, primaryReplica,
            refCount, true);

    // Load up indices and fileinfo. This also loads Bloom filter type.
    metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());

    // Read in our metadata.
    byte[] b = metadataMap.get(MAX_SEQ_ID_KEY);
    if (b != null) {
        // By convention, if halfhfile, top half has a sequence number > bottom
        // half. Thats why we add one in below. Its done for case the two halves
        // are ever merged back together --rare.  Without it, on open of store,
        // since store files are distinguished by sequence id, the one half would
        // subsume the other.
        this.sequenceid = Bytes.toLong(b);
        if (fileInfo.isTopReference()) {
            this.sequenceid += 1;
        }
    }

    if (isBulkLoadResult()) {
        // generate the sequenceId from the fileName
        // fileName is of the form <randomName>_SeqId_<id-when-loaded>_
        String fileName = this.getPath().getName();
        // Use lastIndexOf() to get the last, most recent bulk load seqId.
        int startPos = fileName.lastIndexOf("SeqId_");
        if (startPos != -1) {
            this.sequenceid = Long
                    .parseLong(fileName.substring(startPos + 6, fileName.indexOf('_', startPos + 6)));
            // Handle reference files as done above.
            if (fileInfo.isTopReference()) {
                this.sequenceid += 1;
            }
        }
        // SKIP_RESET_SEQ_ID only works in bulk loaded file.
        // In mob compaction, the hfile where the cells contain the path of a new mob file is bulk
        // loaded to hbase, these cells have the same seqIds with the old ones. We do not want
        // to reset new seqIds for them since this might make a mess of the visibility of cells that
        // have the same row key but different seqIds.
        boolean skipResetSeqId = isSkipResetSeqId(metadataMap.get(SKIP_RESET_SEQ_ID));
        if (skipResetSeqId) {
            // increase the seqId when it is a bulk loaded file from mob compaction.
            this.sequenceid += 1;
        }
        this.reader.setSkipResetSeqId(skipResetSeqId);
        this.reader.setBulkLoaded(true);
    }
    this.reader.setSequenceID(this.sequenceid);

    b = metadataMap.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
    if (b != null) {
        this.maxMemstoreTS = Bytes.toLong(b);
    }

    b = metadataMap.get(MAJOR_COMPACTION_KEY);
    if (b != null) {
        boolean mc = Bytes.toBoolean(b);
        if (this.majorCompaction == null) {
            this.majorCompaction = new AtomicBoolean(mc);
        } else {
            this.majorCompaction.set(mc);
        }
    } else {
        // Presume it is not major compacted if it doesn't explicity say so
        // HFileOutputFormat explicitly sets the major compacted key.
        this.majorCompaction = new AtomicBoolean(false);
    }

    b = metadataMap.get(EXCLUDE_FROM_MINOR_COMPACTION_KEY);
    this.excludeFromMinorCompaction = (b != null && Bytes.toBoolean(b));

    BloomType hfileBloomType = reader.getBloomFilterType();
    if (cfBloomType != BloomType.NONE) {
        reader.loadBloomfilter(BlockType.GENERAL_BLOOM_META);
        if (hfileBloomType != cfBloomType) {
            LOG.info("HFile Bloom filter type for " + reader.getHFileReader().getName() + ": " + hfileBloomType
                    + ", but " + cfBloomType + " specified in column family " + "configuration");
        }
    } else if (hfileBloomType != BloomType.NONE) {
        LOG.info("Bloom filter turned off by CF config for " + reader.getHFileReader().getName());
    }

    // load delete family bloom filter
    reader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META);

    try {
        this.reader.timeRange = TimeRangeTracker.getTimeRange(metadataMap.get(TIMERANGE_KEY));
    } catch (IllegalArgumentException e) {
        LOG.error("Error reading timestamp range data from meta -- " + "proceeding without", e);
        this.reader.timeRange = null;
    }
    // initialize so we can reuse them after reader closed.
    firstKey = reader.getFirstKey();
    lastKey = reader.getLastKey();
    comparator = reader.getComparator();
}

From source file:de.tudarmstadt.lt.lm.lucenebased.CountingStringLM.java

public Document getWordLuceneDoc(String word) {
    if (word == null)
        throw new IllegalAccessError("Word is null.");
    if (de.tudarmstadt.lt.utilities.StringUtils.trim(word).isEmpty())
        return _UNKOWN_WORD_LUCENE_DOCUMENT;
    Query query = new TermQuery(new Term("word", word));
    try {/*ww w  .  j a  va  2  s . co m*/
        Document doc = null;
        ScoreDoc[] hits = _searcher_vocab.search(query, 2).scoreDocs;
        if (hits.length >= 1) {
            if (hits.length > 1)
                LOG.warn("Found more than one entry for '{}', expected only one.", word);
            doc = _searcher_vocab.doc(hits[0].doc);
            return doc;
        }
    } catch (IOException e) {
        LOG.error("Could not get word {}. Luceneindex failed.", word, e);
    }
    return _UNKOWN_WORD_LUCENE_DOCUMENT;
}

From source file:net.grandcentrix.thirtyinch.TiPresenter.java

/**
 * The view is now attached and ready to receive events.
 *
 * @see #onDetachView()//from   www  .  j  a  v a2  s. c o  m
 * @see #attachView(TiView)
 */
protected void onAttachView(@NonNull V view) {
    if (mCalled) {
        throw new IllegalAccessError("don't call #onAttachView(TiView) directly, call #attachView(TiView)");
    }
    mCalled = true;
}

From source file:org.apache.hadoop.hbase.regionserver.StoreFile.java

/**
 * Opens reader on this store file.  Called by Constructor.
 * @return Reader for the store file.//  w ww  .  j  a  v  a  2s.  co  m
 * @throws IOException
 * @see #closeReader(boolean)
 */
private Reader open() throws IOException {
    if (this.reader != null) {
        throw new IllegalAccessError("Already open");
    }

    // Open the StoreFile.Reader
    this.reader = fileInfo.open(this.fs, this.cacheConf);

    // Load up indices and fileinfo. This also loads Bloom filter type.
    metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());

    // Read in our metadata.
    byte[] b = metadataMap.get(MAX_SEQ_ID_KEY);
    if (b != null) {
        // By convention, if halfhfile, top half has a sequence number > bottom
        // half. Thats why we add one in below. Its done for case the two halves
        // are ever merged back together --rare.  Without it, on open of store,
        // since store files are distinguished by sequence id, the one half would
        // subsume the other.
        this.sequenceid = Bytes.toLong(b);
        if (fileInfo.isTopReference()) {
            this.sequenceid += 1;
        }
    }

    if (isBulkLoadResult()) {
        // generate the sequenceId from the fileName
        // fileName is of the form <randomName>_SeqId_<id-when-loaded>_
        String fileName = this.getPath().getName();
        int startPos = fileName.indexOf("SeqId_");
        if (startPos != -1) {
            this.sequenceid = Long
                    .parseLong(fileName.substring(startPos + 6, fileName.indexOf('_', startPos + 6)));
            // Handle reference files as done above.
            if (fileInfo.isTopReference()) {
                this.sequenceid += 1;
            }
        }
    }
    this.reader.setSequenceID(this.sequenceid);

    b = metadataMap.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
    if (b != null) {
        this.maxMemstoreTS = Bytes.toLong(b);
    }

    b = metadataMap.get(MAJOR_COMPACTION_KEY);
    if (b != null) {
        boolean mc = Bytes.toBoolean(b);
        if (this.majorCompaction == null) {
            this.majorCompaction = new AtomicBoolean(mc);
        } else {
            this.majorCompaction.set(mc);
        }
    } else {
        // Presume it is not major compacted if it doesn't explicity say so
        // HFileOutputFormat explicitly sets the major compacted key.
        this.majorCompaction = new AtomicBoolean(false);
    }

    b = metadataMap.get(EXCLUDE_FROM_MINOR_COMPACTION_KEY);
    this.excludeFromMinorCompaction = (b != null && Bytes.toBoolean(b));

    BloomType hfileBloomType = reader.getBloomFilterType();
    if (cfBloomType != BloomType.NONE) {
        reader.loadBloomfilter(BlockType.GENERAL_BLOOM_META);
        if (hfileBloomType != cfBloomType) {
            LOG.info("HFile Bloom filter type for " + reader.getHFileReader().getName() + ": " + hfileBloomType
                    + ", but " + cfBloomType + " specified in column family " + "configuration");
        }
    } else if (hfileBloomType != BloomType.NONE) {
        LOG.info("Bloom filter turned off by CF config for " + reader.getHFileReader().getName());
    }

    // load delete family bloom filter
    reader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META);

    try {
        byte[] timerangeBytes = metadataMap.get(TIMERANGE_KEY);
        if (timerangeBytes != null) {
            this.reader.timeRangeTracker = new TimeRangeTracker();
            Writables.copyWritable(timerangeBytes, this.reader.timeRangeTracker);
        }
    } catch (IllegalArgumentException e) {
        LOG.error("Error reading timestamp range data from meta -- " + "proceeding without", e);
        this.reader.timeRangeTracker = null;
    }
    return this.reader;
}