List of usage examples for java.math BigInteger compareTo
public int compareTo(BigInteger val)
From source file:org.rifidi.emulator.reader.thingmagic.database.impl.row.DBTagIDRow.java
@Override public int compareToValue(String key, String testValue) { // TODO Auto-generated method stub //TODO add a check to see if the format is correct for ID. if (key.equals(ID)) { BigInteger id = new BigInteger(tag.toString().replace(" ", ""), 16); BigInteger id2 = new BigInteger(testValue.substring(2), 16); return id.compareTo(id2); }//from ww w. j av a2 s .c om if (key.equals(PROTOCOL_ID)) { int protocolID = 0; switch (tag.getTag().getTagGeneration()) { case GEN1: protocolID = 1; break; case GEN2: protocolID = 12; } int protocolIDTest = 0; if (testValue.equalsIgnoreCase("'GEN2'")) { protocolIDTest = 12; } else if (testValue.equalsIgnoreCase("'EPC1'")) { protocolIDTest = 1; } else { //TODO: Deal with when this fails try { protocolIDTest = Integer.valueOf(testValue); } catch (NumberFormatException e) { //TODO Deal with this. logger.debug(e); } } return protocolID - protocolIDTest; } logger.debug("Value: " + get(key)); logger.debug("Test Value: " + testValue); return Integer.valueOf(get(key)) - Integer.valueOf(testValue); }
From source file:org.apache.accumulo.pig.Bytes.java
/** * Iterate over keys within the passed inclusive range. */// w ww.j ava 2 s . c o m public static Iterable<byte[]> iterateOnSplits(final byte[] a, final byte[] b, final int num) { byte[] aPadded; byte[] bPadded; if (a.length < b.length) { aPadded = padTail(a, b.length - a.length); bPadded = b; } else if (b.length < a.length) { aPadded = a; bPadded = padTail(b, a.length - b.length); } else { aPadded = a; bPadded = b; } if (compareTo(aPadded, bPadded) >= 0) { throw new IllegalArgumentException("b <= a"); } if (num <= 0) { throw new IllegalArgumentException("num cannot be < 0"); } byte[] prependHeader = { 1, 0 }; final BigInteger startBI = new BigInteger(add(prependHeader, aPadded)); final BigInteger stopBI = new BigInteger(add(prependHeader, bPadded)); final BigInteger diffBI = stopBI.subtract(startBI); final BigInteger splitsBI = BigInteger.valueOf(num + 1); if (diffBI.compareTo(splitsBI) < 0) { return null; } final BigInteger intervalBI; try { intervalBI = diffBI.divide(splitsBI); } catch (Exception e) { LOG.error("Exception caught during division", e); return null; } final Iterator<byte[]> iterator = new Iterator<byte[]>() { private int i = -1; @Override public boolean hasNext() { return i < num + 1; } @Override public byte[] next() { i++; if (i == 0) { return a; } if (i == num + 1) { return b; } BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i))); byte[] padded = curBI.toByteArray(); if (padded[1] == 0) { padded = tail(padded, padded.length - 2); } else { padded = tail(padded, padded.length - 1); } return padded; } @Override public void remove() { throw new UnsupportedOperationException(); } }; return new Iterable<byte[]>() { @Override public Iterator<byte[]> iterator() { return iterator; } }; }
From source file:org.openspotlight.graph.internal.NodeAndLinkSupport.java
@SuppressWarnings("unchecked") public static <T extends Node> T createNode(final PartitionFactory factory, final StorageSession session, final String contextId, final String parentId, final Class<T> clazz, final String name, final boolean needsToVerifyType, final Iterable<Class<? extends Link>> linkTypesForLinkDeletion, final Iterable<Class<? extends Link>> linkTypesForLinkedNodeDeletion) { final Map<String, Class<? extends Serializable>> propertyTypes = newHashMap(); final Map<String, Serializable> propertyValues = newHashMap(); final PropertyDescriptor[] descriptors = PropertyUtils.getPropertyDescriptors(clazz); StorageNode node = null;// w w w .jav a 2s . com if (contextId == null) { throw new IllegalStateException(); } final Partition partition = factory.getPartition(contextId); NodeKey internalNodeKey; final Class<? extends Node> targetNodeType = findTargetClass(clazz); if (session != null) { internalNodeKey = session.withPartition(partition).createNodeKeyWithType(targetNodeType.getName()) .withSimpleKey(NAME, name).andCreate(); node = session.withPartition(partition).createCriteria().withUniqueKey(internalNodeKey).buildCriteria() .andSearchUnique(session); } else { internalNodeKey = new NodeKeyBuilderImpl(targetNodeType.getName(), partition).withSimpleKey(NAME, name) .andCreate(); } for (final PropertyDescriptor d : descriptors) { if (d.getName().equals("class")) { continue; } propertyTypes.put(d.getName(), (Class<? extends Serializable>) Reflection.findClassWithoutPrimitives(d.getPropertyType())); final Object rawValue = node != null ? node.getPropertyValueAsString(session, d.getName()) : null; final Serializable value = (Serializable) (rawValue != null ? Conversion.convert(rawValue, d.getPropertyType()) : null); propertyValues.put(d.getName(), value); } int weigthValue; final Set<String> stNodeProperties = node != null ? node.getPropertyNames(session) : Collections.<String>emptySet(); if (stNodeProperties.contains(WEIGTH_VALUE)) { weigthValue = Conversion.convert(node.getPropertyValueAsString(session, WEIGTH_VALUE), Integer.class); } else { weigthValue = findInitialWeight(clazz); } Class<? extends Node> savedClass = null; if (stNodeProperties.contains(CORRECT_CLASS)) { savedClass = Conversion.convert(node.getPropertyValueAsString(session, CORRECT_CLASS), Class.class); } final BigInteger savedClassNumericType = savedClass != null ? findNumericType(savedClass) : null; final BigInteger proposedClassNumericType = findNumericType(clazz); final Class<? extends Node> classToUse = savedClassNumericType != null && savedClassNumericType.compareTo(proposedClassNumericType) > 0 ? savedClass : clazz; final NodeImpl internalNode = new NodeImpl(name, classToUse, internalNodeKey.getKeyAsString(), propertyTypes, propertyValues, parentId, contextId, weigthValue); if (node != null) { internalNode.cachedEntry = new WeakReference<StorageNode>(node); if (needsToVerifyType) { fixTypeData(session, classToUse, node); } final String captionAsString = node.getPropertyValueAsString(session, CAPTION); if (captionAsString != null) { internalNode.setCaption(captionAsString); } } final Enhancer e = new Enhancer(); e.setSuperclass(classToUse); e.setInterfaces(new Class<?>[] { PropertyContainerMetadata.class }); e.setCallback(new PropertyContainerInterceptor(internalNode)); return (T) e.create(new Class[0], new Object[0]); }
From source file:co.rsk.validators.ProofOfWorkRule.java
@Override public boolean isValid(BlockHeader header) { // TODO: refactor this an move it to another class. Change the Global ProofOfWorkRule to AuthenticationRule. // TODO: Make ProofOfWorkRule one of the classes that inherits from AuthenticationRule. if (isFallbackMiningPossibleAndBlockSigned(header)) { boolean isValidFallbackSignature = validFallbackBlockSignature(constants, header, header.getBitcoinMergedMiningHeader()); if (!isValidFallbackSignature) { logger.warn("Fallback signature failed. Header {}", header.getShortHash()); }/* www. j a v a 2 s . c om*/ return isValidFallbackSignature; } co.rsk.bitcoinj.core.NetworkParameters bitcoinNetworkParameters = bridgeConstants.getBtcParams(); MerkleProofValidator mpValidator; try { if (blockchainConfig.getConfigForBlock(header.getNumber()).isRskip92()) { mpValidator = new Rskip92MerkleProofValidator(header.getBitcoinMergedMiningMerkleProof()); } else { mpValidator = new GenesisMerkleProofValidator(bitcoinNetworkParameters, header.getBitcoinMergedMiningMerkleProof()); } } catch (RuntimeException ex) { logger.warn("Merkle proof can't be validated. Header {}", header.getShortHash(), ex); return false; } byte[] bitcoinMergedMiningCoinbaseTransactionCompressed = header .getBitcoinMergedMiningCoinbaseTransaction(); if (bitcoinMergedMiningCoinbaseTransactionCompressed == null) { logger.warn("Compressed coinbase transaction does not exist. Header {}", header.getShortHash()); return false; } if (header.getBitcoinMergedMiningHeader() == null) { logger.warn("Bitcoin merged mining header does not exist. Header {}", header.getShortHash()); return false; } BtcBlock bitcoinMergedMiningBlock = bitcoinNetworkParameters.getDefaultSerializer() .makeBlock(header.getBitcoinMergedMiningHeader()); BigInteger target = DifficultyUtils.difficultyToTarget(header.getDifficulty()); BigInteger bitcoinMergedMiningBlockHashBI = bitcoinMergedMiningBlock.getHash().toBigInteger(); if (bitcoinMergedMiningBlockHashBI.compareTo(target) > 0) { logger.warn("Hash {} is higher than target {}", bitcoinMergedMiningBlockHashBI.toString(16), target.toString(16)); return false; } byte[] bitcoinMergedMiningCoinbaseTransactionMidstate = new byte[RskMiningConstants.MIDSTATE_SIZE]; System.arraycopy(bitcoinMergedMiningCoinbaseTransactionCompressed, 0, bitcoinMergedMiningCoinbaseTransactionMidstate, 8, RskMiningConstants.MIDSTATE_SIZE_TRIMMED); byte[] bitcoinMergedMiningCoinbaseTransactionTail = new byte[bitcoinMergedMiningCoinbaseTransactionCompressed.length - RskMiningConstants.MIDSTATE_SIZE_TRIMMED]; System.arraycopy(bitcoinMergedMiningCoinbaseTransactionCompressed, RskMiningConstants.MIDSTATE_SIZE_TRIMMED, bitcoinMergedMiningCoinbaseTransactionTail, 0, bitcoinMergedMiningCoinbaseTransactionTail.length); byte[] expectedCoinbaseMessageBytes = org.bouncycastle.util.Arrays.concatenate(RskMiningConstants.RSK_TAG, header.getHashForMergedMining()); List<Byte> bitcoinMergedMiningCoinbaseTransactionTailAsList = Arrays .asList(ArrayUtils.toObject(bitcoinMergedMiningCoinbaseTransactionTail)); List<Byte> expectedCoinbaseMessageBytesAsList = Arrays .asList(ArrayUtils.toObject(expectedCoinbaseMessageBytes)); int rskTagPosition = Collections.lastIndexOfSubList(bitcoinMergedMiningCoinbaseTransactionTailAsList, expectedCoinbaseMessageBytesAsList); if (rskTagPosition == -1) { logger.warn( "bitcoin coinbase transaction tail message does not contain expected RSKBLOCK:RskBlockHeaderHash. Expected: {} . Actual: {} .", Arrays.toString(expectedCoinbaseMessageBytes), Arrays.toString(bitcoinMergedMiningCoinbaseTransactionTail)); return false; } /* * We check that the there is no other block before the rsk tag, to avoid a possible malleability attack: * If we have a mid state with 10 blocks, and the rsk tag, we can also have * another mid state with 9 blocks, 64bytes + the rsk tag, giving us two blocks with different hashes but the same spv proof. * */ if (rskTagPosition >= 64) { logger.warn("bitcoin coinbase transaction tag position is bigger than expected 64. Actual: {}.", Integer.toString(rskTagPosition)); return false; } List<Byte> rskTagAsList = Arrays.asList(ArrayUtils.toObject(RskMiningConstants.RSK_TAG)); int lastTag = Collections.lastIndexOfSubList(bitcoinMergedMiningCoinbaseTransactionTailAsList, rskTagAsList); if (rskTagPosition != lastTag) { logger.warn("The valid RSK tag is not the last RSK tag. Tail: {}.", Arrays.toString(bitcoinMergedMiningCoinbaseTransactionTail)); return false; } int remainingByteCount = bitcoinMergedMiningCoinbaseTransactionTail.length - rskTagPosition - RskMiningConstants.RSK_TAG.length - RskMiningConstants.BLOCK_HEADER_HASH_SIZE; if (remainingByteCount > RskMiningConstants.MAX_BYTES_AFTER_MERGED_MINING_HASH) { logger.warn("More than 128 bytes after RSK tag"); return false; } // TODO test long byteCount = Pack.bigEndianToLong(bitcoinMergedMiningCoinbaseTransactionMidstate, 8); long coinbaseLength = bitcoinMergedMiningCoinbaseTransactionTail.length + byteCount; if (coinbaseLength <= 64) { logger.warn("Coinbase transaction must always be greater than 64-bytes long. But it was: {}", coinbaseLength); return false; } SHA256Digest digest = new SHA256Digest(bitcoinMergedMiningCoinbaseTransactionMidstate); digest.update(bitcoinMergedMiningCoinbaseTransactionTail, 0, bitcoinMergedMiningCoinbaseTransactionTail.length); byte[] bitcoinMergedMiningCoinbaseTransactionOneRoundOfHash = new byte[32]; digest.doFinal(bitcoinMergedMiningCoinbaseTransactionOneRoundOfHash, 0); Sha256Hash bitcoinMergedMiningCoinbaseTransactionHash = Sha256Hash .wrapReversed(Sha256Hash.hash(bitcoinMergedMiningCoinbaseTransactionOneRoundOfHash)); if (!mpValidator.isValid(bitcoinMergedMiningBlock.getMerkleRoot(), bitcoinMergedMiningCoinbaseTransactionHash)) { logger.warn("bitcoin merkle branch doesn't match coinbase and state root"); return false; } return true; }
From source file:org.apache.tajo.querymaster.Repartitioner.java
public static void scheduleRangeShuffledFetches(TaskSchedulerContext schedulerContext, MasterPlan masterPlan, Stage stage, DataChannel channel, int maxNum) throws IOException { ExecutionBlock execBlock = stage.getBlock(); ScanNode scan = execBlock.getScanNodes()[0]; ExecutionBlock sampleChildBlock = masterPlan.getChild(stage.getId(), 0); SortNode sortNode = PlannerUtil.findTopNode(sampleChildBlock.getPlan(), NodeType.SORT); SortSpec[] sortSpecs = sortNode.getSortKeys(); Schema sortSchema = SchemaBuilder.builder().addAll(channel.getShuffleKeys()).build(); TupleRange[] ranges;/*ww w .j a va2 s . co m*/ int determinedTaskNum; // calculate the number of maximum query ranges TableStats totalStat = computeChildBlocksStats(stage.getContext(), masterPlan, stage.getId()); // If there is an empty table in inner join, it should return zero rows. if (totalStat.getNumBytes() == 0 && totalStat.getColumnStats().size() == 0) { return; } TupleRange mergedRange = TupleUtil.columnStatToRange(sortSpecs, sortSchema, totalStat.getColumnStats(), false); if (sortNode.getSortPurpose() == SortPurpose.STORAGE_SPECIFIED) { String dataFormat = PlannerUtil.getDataFormat(masterPlan.getLogicalPlan()); CatalogService catalog = stage.getContext().getQueryMasterContext().getWorkerContext().getCatalog(); LogicalRootNode rootNode = masterPlan.getLogicalPlan().getRootBlock().getRoot(); TableDesc tableDesc = null; try { tableDesc = PlannerUtil.getTableDesc(catalog, rootNode.getChild()); } catch (UndefinedTableException e) { throw new IOException("Can't get table meta data from catalog: " + PlannerUtil.getStoreTableName(masterPlan.getLogicalPlan())); } Tablespace space = TablespaceManager.getAnyByScheme(dataFormat).get(); ranges = space.getInsertSortRanges(stage.getContext().getQueryContext(), tableDesc, sortNode.getInSchema(), sortSpecs, mergedRange); determinedTaskNum = ranges.length; } else { RangePartitionAlgorithm partitioner = new UniformRangePartition(mergedRange, sortSpecs); BigInteger card = partitioner.getTotalCardinality(); // if the number of the range cardinality is less than the desired number of tasks, // we set the the number of tasks to the number of range cardinality. if (card.compareTo(BigInteger.valueOf(maxNum)) < 0) { LOG.info(stage.getId() + ", The range cardinality (" + card + ") is less then the desired number of tasks (" + maxNum + ")"); determinedTaskNum = card.intValue(); } else { determinedTaskNum = maxNum; } LOG.info(stage.getId() + ", Try to divide " + mergedRange + " into " + determinedTaskNum + " sub ranges (total units: " + determinedTaskNum + ")"); ranges = partitioner.partition(determinedTaskNum); if (ranges == null) { throw new NullPointerException("ranges is null on " + stage.getId() + " stage."); } if (ranges.length == 0) { LOG.warn(stage.getId() + " no range infos."); } TupleUtil.setMaxRangeIfNull(sortSpecs, sortSchema, totalStat.getColumnStats(), ranges); if (LOG.isDebugEnabled()) { for (TupleRange eachRange : ranges) { LOG.debug(stage.getId() + " range: " + eachRange.getStart() + " ~ " + eachRange.getEnd()); } } } // TODO - We should remove dummy fragment. FileFragment dummyFragment = new FileFragment(scan.getTableName(), new Path("/dummy"), 0, 0, new String[] { UNKNOWN_HOST }); Stage.scheduleFragment(stage, dummyFragment); Map<Pair<PullHost, ExecutionBlockId>, FetchImpl> fetches = new HashMap<>(); List<ExecutionBlock> childBlocks = masterPlan.getChilds(stage.getId()); for (ExecutionBlock childBlock : childBlocks) { Stage childExecSM = stage.getContext().getStage(childBlock.getId()); for (Task qu : childExecSM.getTasks()) { for (IntermediateEntry p : qu.getIntermediateData()) { Pair<PullHost, ExecutionBlockId> key = new Pair<>(p.getPullHost(), childBlock.getId()); if (fetches.containsKey(key)) { fetches.get(key).addPart(p.getTaskId(), p.getAttemptId()); } else { FetchImpl fetch = new FetchImpl(scan.getTableName(), p.getPullHost(), RANGE_SHUFFLE, childBlock.getId(), 0); fetch.addPart(p.getTaskId(), p.getAttemptId()); fetches.put(key, fetch); } } } } SortedMap<TupleRange, Collection<FetchProto>> map; map = new TreeMap<>(); Set<FetchProto> fetchSet; RowStoreUtil.RowStoreEncoder encoder = RowStoreUtil.createEncoder(sortSchema); for (int i = 0; i < ranges.length; i++) { fetchSet = new HashSet<>(); RangeParam rangeParam = new RangeParam(ranges[i], i == (ranges.length - 1), encoder); for (FetchImpl fetch : fetches.values()) { FetchImpl copy = null; try { copy = fetch.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException(e); } copy.setRangeParams(rangeParam); fetchSet.add(copy.getProto()); } map.put(ranges[i], fetchSet); } scheduleFetchesByRoundRobin(stage, map, scan.getTableName(), determinedTaskNum); schedulerContext.setEstimatedTaskNum(determinedTaskNum); }
From source file:org.limewire.mojito.util.DHTSizeEstimator.java
/** * Adds the approximate DHT size as returned by a remote Node. * The average of the remote DHT sizes is incorporated into into * our local computation./* w w w. j a v a2 s .c om*/ */ public synchronized void addEstimatedRemoteSize(BigInteger remoteSize) { if (!ContextSettings.COUNT_REMOTE_SIZE.getValue()) { // Clear the list of remotely estimated DHT sizes as they're // no longer needed. remoteSizeHistory.clear(); return; } if (remoteSize.compareTo(BigInteger.ZERO) == 0) { return; } if (remoteSize.compareTo(BigInteger.ZERO) < 0 || remoteSize.compareTo(MAXIMUM) > 0) { if (LOG.isWarnEnabled()) { LOG.warn(remoteSize + " is an illegal argument"); } return; } remoteSizeHistory.add(remoteSize); // Adjust the size of the List. The Setting is SIMPP-able // and may change! int maxRemoteHistorySize = ContextSettings.MAX_REMOTE_HISTORY_SIZE.getValue(); while (remoteSizeHistory.size() > maxRemoteHistorySize && !remoteSizeHistory.isEmpty()) { remoteSizeHistory.remove(0); } }
From source file:org.apache.hadoop.hive.ql.optimizer.optiq.translator.RexNodeConverter.java
protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticException { RexBuilder rexBuilder = cluster.getRexBuilder(); RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo(); RelDataType optiqDataType = TypeConverter.convert(hiveType, dtFactory); PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory(); ConstantObjectInspector coi = literal.getWritableObjectInspector(); Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(), coi); RexNode optiqLiteral = null;//www . j a va 2s. c o m // TODO: Verify if we need to use ConstantObjectInspector to unwrap data switch (hiveTypeCategory) { case BOOLEAN: optiqLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue()); break; case BYTE: byte[] byteArray = new byte[] { (Byte) value }; ByteString bs = new ByteString(byteArray); optiqLiteral = rexBuilder.makeBinaryLiteral(bs); break; case SHORT: optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), optiqDataType); break; case INT: optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value)); break; case LONG: optiqLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value)); break; // TODO: is Decimal an exact numeric or approximate numeric? case DECIMAL: if (value instanceof HiveDecimal) { value = ((HiveDecimal) value).bigDecimalValue(); } else if (value instanceof Decimal128) { value = ((Decimal128) value).toBigDecimal(); } if (value == null) { // We have found an invalid decimal value while enforcing precision and // scale. Ideally, // we would replace it with null here, which is what Hive does. However, // we need to plumb // this thru up somehow, because otherwise having different expression // type in AST causes // the plan generation to fail after CBO, probably due to some residual // state in SA/QB. // For now, we will not run CBO in the presence of invalid decimal // literals. throw new OptiqSemanticException( "Expression " + literal.getExprString() + " is not a valid decimal"); // TODO: return createNullLiteral(literal); } BigDecimal bd = (BigDecimal) value; BigInteger unscaled = bd.unscaledValue(); if (unscaled.compareTo(MIN_LONG_BI) >= 0 && unscaled.compareTo(MAX_LONG_BI) <= 0) { optiqLiteral = rexBuilder.makeExactLiteral(bd); } else { // CBO doesn't support unlimited precision decimals. In practice, this // will work... // An alternative would be to throw CboSemanticException and fall back // to no CBO. RelDataType relType = cluster.getTypeFactory().createSqlType(SqlTypeName.DECIMAL, bd.scale(), unscaled.toString().length()); optiqLiteral = rexBuilder.makeExactLiteral(bd, relType); } break; case FLOAT: optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), optiqDataType); break; case DOUBLE: optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), optiqDataType); break; case CHAR: if (value instanceof HiveChar) value = ((HiveChar) value).getValue(); optiqLiteral = rexBuilder.makeLiteral((String) value); break; case VARCHAR: if (value instanceof HiveVarchar) value = ((HiveVarchar) value).getValue(); optiqLiteral = rexBuilder.makeLiteral((String) value); break; case STRING: optiqLiteral = rexBuilder.makeLiteral((String) value); break; case DATE: Calendar cal = new GregorianCalendar(); cal.setTime((Date) value); optiqLiteral = rexBuilder.makeDateLiteral(cal); break; case TIMESTAMP: optiqLiteral = rexBuilder.makeTimestampLiteral((Calendar) value, RelDataType.PRECISION_NOT_SPECIFIED); break; case BINARY: case VOID: case UNKNOWN: default: throw new RuntimeException("UnSupported Literal"); } return optiqLiteral; }
From source file:com.chinamobile.bcbsp.util.Bytes.java
/** * Split passed range. Expensive operation relatively. Uses BigInteger math. * Useful splitting ranges for MapReduce jobs. * * @param a/*ww w . jav a 2s . c o m*/ * Beginning of range * @param b * End of range * @param num * Number of times to split range. Pass 1 if you want to split the * range in two; i.e. one split. * @return Array of dividing values */ public static byte[][] split(final byte[] a, final byte[] b, final int num) { byte[] aPadded; byte[] bPadded; if (a.length < b.length) { aPadded = padTail(a, b.length - a.length); bPadded = b; } else if (b.length < a.length) { aPadded = a; bPadded = padTail(b, a.length - b.length); } else { aPadded = a; bPadded = b; } if (compareTo(aPadded, bPadded) >= 0) { throw new IllegalArgumentException("b <= a"); } if (num <= 0) { throw new IllegalArgumentException("num cannot be < 0"); } byte[] prependHeader = { 1, 0 }; BigInteger startBI = new BigInteger(add(prependHeader, aPadded)); BigInteger stopBI = new BigInteger(add(prependHeader, bPadded)); BigInteger diffBI = stopBI.subtract(startBI); BigInteger splitsBI = BigInteger.valueOf(num + 1); if (diffBI.compareTo(splitsBI) < 0) { return null; } BigInteger intervalBI; try { intervalBI = diffBI.divide(splitsBI); } catch (Exception e) { LOG.error("Exception caught during division", e); return null; } byte[][] result = new byte[num + 2][]; result[0] = a; for (int i = 1; i <= num; i++) { BigInteger curBI = startBI.add(intervalBI.multiply(BigInteger.valueOf(i))); byte[] padded = curBI.toByteArray(); if (padded[1] == 0) { padded = tail(padded, padded.length - 2); } else { padded = tail(padded, padded.length - 1); } result[i] = padded; } result[num + 1] = b; return result; }
From source file:org.o3project.optsdn.don.NetworkInformation.java
/** * Get informations from IDEx file(idex.txt). * - DP ID/*from w w w. j a va 2 s . c om*/ * - OF Port * * @param filepath The IDEx file path * @throws Exception File Read Failed */ private void parseIdExFile(String filepath) throws Exception { String idexJson = readIdexAsJson(filepath); ObjectMapper mapper = new ObjectMapper(); @SuppressWarnings("unchecked") Map<String, Map<String, String>> value = mapper.readValue(idexJson, Map.class); Set<Entry<String, Map<String, String>>> entrySet = value.entrySet(); dpidMap = new HashMap<String, Long>(); Map<String, Integer> ofPortMap = new HashMap<String, Integer>(); for (Entry<String, Map<String, String>> entry : entrySet) { Map<String, String> params = entry.getValue(); BigInteger bigintDpid = new BigInteger(params.get(DPID)); if (bigintDpid.compareTo(BigInteger.valueOf(0)) < 0 || bigintDpid.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) { throw new Exception("DP ID is out of boundary. (DP ID valid between 0 and 2^63-1)"); } long dpid = bigintDpid.longValue(); String informationModelId = entry.getKey(); Port port = new Port(informationModelId); String neId = port.getNeId(); Long existDpid = dpidMap.get(neId); if (existDpid == null) { dpidMap.put(neId, dpid); } else if (!existDpid.equals(dpid)) { logger.warn("Fail to add DP ID[" + dpid + "]. " + "The DP ID of NE[" + neId + "] is already set" + "(exist DP ID[" + existDpid + "])."); } int ofPortId = Integer.valueOf(params.get(PORT)); Integer existOfPortId = ofPortMap.get(informationModelId); if (existOfPortId != null) { if (!existOfPortId.equals(ofPortId)) { logger.warn("Fail to add OpenFlow Port ID[" + ofPortId + "]. " + "The OpenFlow Port ID of Port[" + informationModelId + "] is already set" + "(exist OpenFlow Port ID[" + existOfPortId + "])."); } } else { if (ofPortId < 0 && ofPortId > Integer.MAX_VALUE) { throw new Exception("OpenFlow Port ID is out of boundary. " + "(OpenFlow Port ID valid between 0 and 2^31-1)"); } ofPortMap.put(informationModelId, ofPortId); } } for (Port port : portSet) { Integer openFlowPortId = ofPortMap.get(port.getInformationModelId()); if (openFlowPortId == null) { continue; } port.setOpenFlowPortId(openFlowPortId); } for (List<List<Port>> linkList : omsConnectionInfoListMap.values()) { for (List<Port> link : linkList) { Port port1 = link.get(0); Integer openFlowPortId1 = ofPortMap.get(port1.getInformationModelId()); if (openFlowPortId1 != null) { port1.setOpenFlowPortId(openFlowPortId1); } Port port2 = link.get(1); Integer openFlowPortId2 = ofPortMap.get(port2.getInformationModelId()); if (openFlowPortId2 != null) { port2.setOpenFlowPortId(openFlowPortId2); } } } }
From source file:cc.mintcoin.wallet.ui.SendCoinsFragment.java
private static Payment createPaymentMessage(@Nonnull final Transaction transaction, @Nullable final Address refundAddress, @Nullable final BigInteger refundAmount, @Nullable final String memo, @Nullable final byte[] merchantData) { final Protos.Payment.Builder builder = Protos.Payment.newBuilder(); builder.addTransactions(ByteString.copyFrom(transaction.unsafeBitcoinSerialize())); if (refundAddress != null) { if (refundAmount.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) throw new IllegalArgumentException("refund amount too big for protobuf: " + refundAmount); final Protos.Output.Builder refundOutput = Protos.Output.newBuilder(); refundOutput.setAmount(refundAmount.longValue()); refundOutput//from ww w . j a v a 2s . co m .setScript(ByteString.copyFrom(ScriptBuilder.createOutputScript(refundAddress).getProgram())); builder.addRefundTo(refundOutput); } if (memo != null) builder.setMemo(memo); if (merchantData != null) builder.setMerchantData(ByteString.copyFrom(merchantData)); return builder.build(); }