List of usage examples for java.math BigDecimal toPlainString
public String toPlainString()
From source file:net.vexelon.myglob.fragments.InvoiceFragment.java
private void updateInvoiceView(User user, List<Map<String, String>> results) { if (Defs.LOG_ENABLED) Log.v(Defs.LOG_TAG, "Updating invoice for: " + user.getPhoneNumber()); View v = getView();/*from ww w .j ava2 s . c om*/ boolean found = false; for (Map<String, String> map : results) { if (map.containsKey(GLBInvoiceXMLParser.TAG_MSISDN)) { String value = map.get(GLBInvoiceXMLParser.TAG_MSISDN); // String userPhone = user.getPhoneNumber(); // if (value.endsWith(userPhone.substring(userPhone.length() - 6, userPhone.length()))) { if (value.trim().length() == 0) { // invoice info setText(v, R.id.tv_invoice_num, map.get(GLBInvoiceXMLParser.TAG_INVNUM)); Calendar calendar = Calendar.getInstance(); calendar.setTimeInMillis(Long.parseLong(map.get(GLBInvoiceXMLParser.TAG_DATE))); setText(v, R.id.tv_invoice_date, Defs.globalDateFormat.format(calendar.getTime())); // costs BigDecimal servicesCharge = new BigDecimal("0.00"); BigDecimal discounts = new BigDecimal("0.00"); try { // solve discounts amount BigDecimal discount = valOrZero(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT)); BigDecimal discountPackage = valOrZero(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT_PACKAGE)); BigDecimal discountLoyality = valOrZero(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT_LOYALITY)); BigDecimal discountUBB = valOrZero(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT_GLOBUL_UBB)); discounts = discounts.add(discount).add(discountPackage).add(discountLoyality) .add(discountUBB); // solve services costs BigDecimal fixedCharge = valOrZero(map.get(GLBInvoiceXMLParser.TAG_FIXED_CHARGE)); // BigDecimal discounts = new BigDecimal(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT)); BigDecimal totalNoVAT = valOrZero(map.get(GLBInvoiceXMLParser.TAG_TOTAL_NO_VAT)); servicesCharge = totalNoVAT.subtract(discounts).subtract(fixedCharge); } catch (Exception e) { Log.e(Defs.LOG_TAG, "Failed to get decimal prices info!", e); /* * XXX * It would be better to throw exception at this point! */ discounts = new BigDecimal(map.get(GLBInvoiceXMLParser.TAG_DISCOUNT)); } setText(v, R.id.tv_invoice_services, servicesCharge.toPlainString()); setText(v, R.id.tv_invoice_fixed_charge, map.get(GLBInvoiceXMLParser.TAG_FIXED_CHARGE)); setText(v, R.id.tv_invoice_discount, discounts.toPlainString()); // totals setText(v, R.id.tv_invoice_tot_no_vat, map.get(GLBInvoiceXMLParser.TAG_TOTAL_NO_VAT)); setText(v, R.id.tv_invoice_vat, map.get(GLBInvoiceXMLParser.TAG_VAT)); setText(v, R.id.tv_invoice_totvat, map.get(GLBInvoiceXMLParser.TAG_TOTALVAT)); // amount dues setText(v, R.id.tv_invoice_prev_amountdue, map.get(GLBInvoiceXMLParser.TAG_PREV_AMOUNTDUE)); setText(v, R.id.tv_invoice_paied_amountdue, map.get(GLBInvoiceXMLParser.TAG_PAID_AMOUNTDUE)); setText(v, R.id.tv_invoice_total_dueamount, map.get(GLBInvoiceXMLParser.TAG_TOTAL_DUEAMOUNT)); found = true; break; } } } if (!found) { // empty MSISDN was not found! setText(v, R.id.tv_invoice_status_nodata, R.string.text_invoice_invalid); } else { TextView tv = (TextView) v.findViewById(R.id.tv_invoice_status_nodata); tv.setVisibility(View.GONE); TableLayout table_invoice = (TableLayout) v.findViewById(R.id.table_invoice); table_invoice.setVisibility(View.VISIBLE); } setUpdated(true); }
From source file:com.nimrodtechs.ipc.ZeroMQRmiClient.java
@Override public String listCallMetrics() { StringBuffer sb = new StringBuffer(); for (Map.Entry<String, CallingMetric> entry : callingMetrics.entrySet()) { if (entry.getValue().callCount.get() > 0) { BigDecimal avgTrip = new BigDecimal( entry.getValue().cummulativeRoundTripTime.get() / entry.getValue().callCount.get()) .setScale(3, BigDecimal.ROUND_HALF_UP).movePointLeft(6) .setScale(6, BigDecimal.ROUND_HALF_UP); BigDecimal avgServ = new BigDecimal( entry.getValue().cummulativeServerExecutionTime.get() / entry.getValue().callCount.get()) .setScale(3, BigDecimal.ROUND_HALF_UP).movePointLeft(6) .setScale(6, BigDecimal.ROUND_HALF_UP); sb.append(entry.getValue().currentServiceAndMethodName + " " + entry.getValue().callCount.get() + " avgTrip " + avgTrip.toPlainString() + " avgSerX " + avgServ.toPlainString() + " minTrip " + new BigDecimal(entry.getValue().minRoundTripTime) .movePointLeft(6).setScale(6, BigDecimal.ROUND_HALF_UP).toPlainString() + " maxTrip " + new BigDecimal(entry.getValue().maxRoundTripTime) .movePointLeft(6).setScale(6, BigDecimal.ROUND_HALF_UP).toPlainString() + " minSerX " + new BigDecimal(entry.getValue().minServerExecutionTime).movePointLeft(6) .setScale(6, BigDecimal.ROUND_HALF_UP).toPlainString() + " maxSerX " + new BigDecimal(entry.getValue().maxServerExecutionTime).movePointLeft(6) .setScale(6, BigDecimal.ROUND_HALF_UP).toPlainString() + "\n"); }//from w w w . j a va 2 s.c om } return sb.toString(); }
From source file:org.multibit.utils.CSMiscUtils.java
public static BigInteger getRawUnitsFromDisplayString(CSAsset asset, String display) { BigDecimal result = null; try {//from ww w .ja v a 2 s . c om //System.out.println("Start to get raw units from: " + display); result = new BigDecimal(display); } catch (NumberFormatException nfe) { nfe.printStackTrace(); return null; } // Reverse apply the multiple int decimalPlaces = CSMiscUtils.getNumberOfDisplayDecimalPlaces(asset); if (decimalPlaces != 0) { result = result.movePointRight(decimalPlaces); } // FIXME: what if multiple is 0.0? ignore? error? // double multiple = asset.getMultiple(); // BigDecimal m = new BigDecimal(String.valueOf(multiple)); // result = result.divide(m, MathContext.DECIMAL32); //System.out.println("multiplier=" + m + ", removed multiplier =" + display); double interestRate = asset.getInterestRate(); BigDecimal rate = new BigDecimal(String.valueOf(interestRate)); rate = rate.divide(new BigDecimal(100)); rate = rate.add(BigDecimal.ONE); Date issueDate = asset.getIssueDate(); DateTime d1 = new DateTime(issueDate); DateTime d2 = new DateTime(); int seconds = Math.abs(Seconds.secondsBetween(d1, d2).getSeconds()); //System.out.println("...Number of seconds difference: " + seconds); BigDecimal elapsedSeconds = new BigDecimal(seconds); BigDecimal elapsedYears = elapsedSeconds.divide(new BigDecimal(COINSPARK_SECONDS_IN_YEAR), MathContext.DECIMAL32); //System.out.println("...Number of years difference: " + elapsedYears.toPlainString()); double base = elapsedSeconds.doubleValue(); double exp = elapsedYears.doubleValue(); //System.out.println("...base=" + base + " exponent=" + exp); double interestMultipler = Math.pow(rate.doubleValue(), elapsedYears.doubleValue()); //System.out.println("interest multipler =" + interestMultipler); result = result.divide(new BigDecimal(String.valueOf(interestMultipler)), MathContext.DECIMAL32); //System.out.println("result = " + result.toPlainString()); result = result.setScale(0, RoundingMode.DOWN); result = result.stripTrailingZeros(); //System.out.println("result floored = " + result.toPlainString()); String resultString = result.toPlainString(); return new BigInteger(resultString); }
From source file:com.mparticle.MParticle.java
/** * Logs an increase in the lifetime value of a user. This will signify an increase * in the revenue assigned to this user for service providers that support revenue tracking. * * @param valueIncreased The currency value by which to increase the current user's LTV (required) * @param eventName An event name to be associated with this increase in LTV (optional) * @param contextInfo An MPProduct or any set of data to associate with this increase in LTV (optional) *//*www .ja v a 2s . co m*/ public void logLtvIncrease(BigDecimal valueIncreased, String eventName, Map<String, String> contextInfo) { if (valueIncreased == null) { ConfigManager.log(LogLevel.ERROR, "ValueIncreased must not be null."); return; } if (contextInfo == null) { contextInfo = new HashMap<String, String>(); } contextInfo.put(MessageKey.RESERVED_KEY_LTV, valueIncreased.toPlainString()); contextInfo.put(Constants.MethodName.METHOD_NAME, Constants.MethodName.LOG_LTV); logEvent(eventName == null ? "Increase LTV" : eventName, EventType.Transaction, contextInfo); }
From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java
private void startGeneratorThread(String lastSourceOffset) throws StageException, SQLException { Offset offset = null;/*from www . j a v a2 s . c o m*/ LocalDateTime startTimestamp; try { startLogMnrForRedoDict(); if (!StringUtils.isEmpty(lastSourceOffset)) { offset = new Offset(lastSourceOffset); if (lastSourceOffset.startsWith("v3")) { if (!useLocalBuffering) { throw new StageException(JDBC_82); } startTimestamp = offset.timestamp.minusSeconds(configBean.txnWindow); } else { if (useLocalBuffering) { throw new StageException(JDBC_83); } startTimestamp = getDateForSCN(new BigDecimal(offset.scn)); } offset.timestamp = startTimestamp; adjustStartTimeAndStartLogMnr(startTimestamp); } else { // reset the start date only if it not set. if (configBean.startValue != StartValues.SCN) { LocalDateTime startDate; if (configBean.startValue == StartValues.DATE) { startDate = LocalDateTime.parse(configBean.startDate, dateTimeColumnHandler.dateFormatter); } else { startDate = nowAtDBTz(); } startDate = adjustStartTimeAndStartLogMnr(startDate); offset = new Offset(version, startDate, ZERO, 0, ""); } else { BigDecimal startCommitSCN = new BigDecimal(configBean.startSCN); startLogMnrSCNToDate.setBigDecimal(1, startCommitSCN); final LocalDateTime start = getDateForSCN(startCommitSCN); LocalDateTime endTime = getEndTimeForStartTime(start); startLogMnrSCNToDate.setString(2, endTime.format(dateTimeColumnHandler.dateFormatter)); startLogMnrSCNToDate.execute(); offset = new Offset(version, start, startCommitSCN.toPlainString(), 0, ""); } } } catch (SQLException ex) { LOG.error("SQLException while trying to setup record generator thread", ex); generationStarted = false; throw new StageException(JDBC_52, ex); } final Offset os = offset; final PreparedStatement select = selectFromLogMnrContents; generationExecutor.submit(() -> { try { generateRecords(os, select); } catch (Throwable ex) { LOG.error("Error while producing records", ex); generationStarted = false; } }); }
From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java
private LocalDateTime getDateForSCN(BigDecimal commitSCN) throws SQLException { startLogMinerUsingGivenSCNs(commitSCN, getEndingSCN()); getTimestampsFromLogMnrContents.setMaxRows(1); try (ResultSet rs = getTimestampsFromLogMnrContents.executeQuery()) { if (rs.next()) { LocalDateTime date = rs.getTimestamp(1).toLocalDateTime(); LOG.debug(START_DATE_REFRESHED_TO, date); return date; }/*from ww w.j a v a2s. c om*/ } throw new IllegalStateException(Utils.format("SCN: '{}' is not valid and cannot be found in LogMiner logs", commitSCN.toPlainString())); }
From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java
@Override public List<ConfigIssue> init() { List<ConfigIssue> issues = super.init(); errorRecordHandler = new DefaultErrorRecordHandler(getContext()); useLocalBuffering = !getContext().isPreview() && configBean.bufferLocally; if (!hikariConfigBean.driverClassName.isEmpty()) { try {//from w ww . java2 s .c o m Class.forName(hikariConfigBean.driverClassName); } catch (ClassNotFoundException e) { LOG.error("Hikari Driver class not found.", e); issues.add(getContext().createConfigIssue(Groups.LEGACY.name(), DRIVER_CLASSNAME, JdbcErrors.JDBC_28, e.toString())); } } issues = hikariConfigBean.validateConfigs(getContext(), issues); if (connection == null) { // For tests, we set a mock connection try { dataSource = jdbcUtil.createDataSourceForRead(hikariConfigBean); connection = dataSource.getConnection(); connection.setAutoCommit(false); } catch (StageException | SQLException e) { LOG.error("Error while connecting to DB", e); issues.add( getContext().createConfigIssue(Groups.JDBC.name(), CONNECTION_STR, JDBC_00, e.toString())); return issues; } } recordQueue = new LinkedBlockingQueue<>(2 * configBean.baseConfigBean.maxBatchSize); String container = configBean.pdb; List<SchemaAndTable> schemasAndTables; try { initializeStatements(); alterSession(); } catch (SQLException ex) { LOG.error("Error while creating statement", ex); issues.add(getContext().createConfigIssue(Groups.JDBC.name(), CONNECTION_STR, JDBC_00, hikariConfigBean.getConnectionString())); } zoneId = ZoneId.of(configBean.dbTimeZone); dateTimeColumnHandler = new DateTimeColumnHandler(zoneId); String commitScnField; BigDecimal scn = null; try { scn = getEndingSCN(); switch (configBean.startValue) { case SCN: if (new BigDecimal(configBean.startSCN).compareTo(scn) > 0) { issues.add(getContext().createConfigIssue(CDC.name(), "oracleCDCConfigBean.startSCN", JDBC_47, scn.toPlainString())); } break; case LATEST: // If LATEST is used, use now() as the startDate and proceed as if a startDate was specified configBean.startDate = nowAtDBTz().format(dateTimeColumnHandler.dateFormatter); // fall-through case DATE: try { LocalDateTime startDate = dateTimeColumnHandler.getDate(configBean.startDate); if (startDate.isAfter(nowAtDBTz())) { issues.add(getContext().createConfigIssue(CDC.name(), "oracleCDCConfigBean.startDate", JDBC_48)); } } catch (DateTimeParseException ex) { LOG.error("Invalid date", ex); issues.add( getContext().createConfigIssue(CDC.name(), "oracleCDCConfigBean.startDate", JDBC_49)); } break; default: throw new IllegalStateException("Unknown start value!"); } } catch (SQLException ex) { LOG.error("Error while getting SCN", ex); issues.add(getContext().createConfigIssue(CREDENTIALS.name(), USERNAME, JDBC_42)); } try (Statement reusedStatement = connection.createStatement()) { int majorVersion = getDBVersion(issues); // If version is 12+, then the check for table presence must be done in an alternate container! if (majorVersion == -1) { return issues; } if (majorVersion >= 12) { if (!StringUtils.isEmpty(container)) { String switchToPdb = "ALTER SESSION SET CONTAINER = " + configBean.pdb; try { reusedStatement.execute(switchToPdb); } catch (SQLException ex) { LOG.error("Error while switching to container: " + container, ex); issues.add(getContext().createConfigIssue(Groups.CREDENTIALS.name(), USERNAME, JDBC_40, container)); return issues; } containerized = true; } } schemasAndTables = new ArrayList<>(); for (SchemaTableConfigBean tables : configBean.baseConfigBean.schemaTableConfigs) { tables.schema = configBean.baseConfigBean.caseSensitive ? tables.schema : tables.schema.toUpperCase(); tables.table = configBean.baseConfigBean.caseSensitive ? tables.table : tables.table.toUpperCase(); if (tables.excludePattern != null) { tables.excludePattern = configBean.baseConfigBean.caseSensitive ? tables.excludePattern : tables.excludePattern.toUpperCase(); } Pattern p = StringUtils.isEmpty(tables.excludePattern) ? null : Pattern.compile(tables.excludePattern); try (ResultSet rs = jdbcUtil.getTableAndViewMetadata(connection, tables.schema, tables.table)) { while (rs.next()) { String schemaName = rs.getString(TABLE_METADATA_TABLE_SCHEMA_CONSTANT); String tableName = rs.getString(TABLE_METADATA_TABLE_NAME_CONSTANT); if (p == null || !p.matcher(tableName).matches()) { schemaName = schemaName.trim(); tableName = tableName.trim(); schemasAndTables.add(new SchemaAndTable(schemaName, tableName)); } } } } validateTablePresence(reusedStatement, schemasAndTables, issues); if (!issues.isEmpty()) { return issues; } for (SchemaAndTable schemaAndTable : schemasAndTables) { try { tableSchemas.put(schemaAndTable, getTableSchema(schemaAndTable)); if (scn != null) { tableSchemaLastUpdate.put(schemaAndTable, scn); } } catch (SQLException ex) { LOG.error("Error while switching to container: " + container, ex); issues.add(getContext().createConfigIssue(Groups.CREDENTIALS.name(), USERNAME, JDBC_50)); } } container = CDB_ROOT; if (majorVersion >= 12) { try { switchContainer.execute(); LOG.info("Switched to CDB$ROOT to start LogMiner."); } catch (SQLException ex) { // Fatal only if we switched to a PDB earlier if (containerized) { LOG.error("Error while switching to container: " + container, ex); issues.add(getContext().createConfigIssue(Groups.CREDENTIALS.name(), USERNAME, JDBC_40, container)); return issues; } // Log it anyway LOG.info("Switching containers failed, ignoring since there was no PDB switch", ex); } } commitScnField = majorVersion >= 11 ? "COMMIT_SCN" : "CSCN"; } catch (SQLException ex) { LOG.error("Error while creating statement", ex); issues.add(getContext().createConfigIssue(Groups.JDBC.name(), CONNECTION_STR, JDBC_00, hikariConfigBean.getConnectionString())); return issues; } final String ddlTracking = shouldTrackDDL ? " + DBMS_LOGMNR.DDL_DICT_TRACKING" : ""; final String readCommitted = useLocalBuffering ? "" : "+ DBMS_LOGMNR.COMMITTED_DATA_ONLY"; this.logMinerProcedure = "BEGIN" + " DBMS_LOGMNR.START_LOGMNR(" + " {}," + " {}," + " OPTIONS => DBMS_LOGMNR." + configBean.dictionary.name() + " + DBMS_LOGMNR.CONTINUOUS_MINE" + readCommitted + " + DBMS_LOGMNR.NO_SQL_DELIMITER" + ddlTracking + ");" + " END;"; final String base = "SELECT SCN, USERNAME, OPERATION_CODE, TIMESTAMP, SQL_REDO, TABLE_NAME, " + commitScnField + ", SEQUENCE#, CSF, XIDUSN, XIDSLT, XIDSQN, RS_ID, SSN, SEG_OWNER, ROLLBACK, ROW_ID " + " FROM V$LOGMNR_CONTENTS" + " WHERE "; final String tableCondition = getListOfSchemasAndTables(schemasAndTables); final String commitRollbackCondition = Utils.format("OPERATION_CODE = {} OR OPERATION_CODE = {}", COMMIT_CODE, ROLLBACK_CODE); final String operationsCondition = "OPERATION_CODE IN (" + getSupportedOperations() + ")"; final String restartNonBufferCondition = Utils.format("((" + commitScnField + " = ? AND SEQUENCE# > ?) OR " + commitScnField + " > ?)" + (shouldTrackDDL ? " OR (OPERATION_CODE = {} AND SCN > ?)" : ""), DDL_CODE); if (useLocalBuffering) { selectString = String.format("%s ((%s AND (%s)) OR (%s))", base, tableCondition, operationsCondition, commitRollbackCondition); } else { selectString = base + " (" + tableCondition + " AND (" + operationsCondition + "))" + "AND (" + restartNonBufferCondition + ")"; } try { initializeLogMnrStatements(); } catch (SQLException ex) { LOG.error("Error while creating statement", ex); issues.add(getContext().createConfigIssue(Groups.JDBC.name(), CONNECTION_STR, JDBC_00, hikariConfigBean.getConnectionString())); } if (configBean.dictionary == DictionaryValues.DICT_FROM_REDO_LOGS) { try { startLogMnrForRedoDict(); } catch (Exception ex) { LOG.warn("Error while attempting to start LogMiner to load dictionary", ex); issues.add(getContext().createConfigIssue(Groups.CDC.name(), "oracleCDCConfigBean.dictionary", JDBC_44, ex)); } } if (useLocalBuffering && configBean.bufferLocation == BufferingValues.ON_DISK) { File tmpDir = new File(System.getProperty("java.io.tmpdir")); String relativePath = getContext().getSdcId() + "/" + getContext().getPipelineId() + "/" + getContext().getStageInfo().getInstanceName(); this.txnBufferLocation = new File(tmpDir, relativePath); try { if (txnBufferLocation.exists()) { FileUtils.deleteDirectory(txnBufferLocation); LOG.info("Deleted " + txnBufferLocation.toString()); } Files.createDirectories(txnBufferLocation.toPath()); LOG.info("Created " + txnBufferLocation.toString()); } catch (IOException ex) { Throwables.propagate(ex); } } if (configBean.bufferLocally) { if (configBean.parseQuery) { parsingExecutor = Executors.newFixedThreadPool(configBean.parseThreadPoolSize, new ThreadFactoryBuilder().setNameFormat("Oracle CDC Origin Parse Thread - %d").build()); } else { parsingExecutor = Executors.newSingleThreadExecutor( new ThreadFactoryBuilder().setNameFormat("Oracle CDC Origin Parse Thread - %d").build()); } } if (configBean.txnWindow >= configBean.logminerWindow) { issues.add(getContext().createConfigIssue(Groups.CDC.name(), "oracleCDCConfigBean.logminerWindow", JDBC_81)); } version = useLocalBuffering ? VERSION_UNCOMMITTED : VERSION_STR; delay = getContext().createGauge("Read Lag (seconds)"); return issues; }
From source file:org.voltdb.compiler.DDLCompiler.java
void addColumnToCatalog(Table table, VoltXMLElement node, SortedMap<Integer, VoltType> columnTypes) throws VoltCompilerException { assert node.name.equals("column"); String name = node.attributes.get("name"); String typename = node.attributes.get("valuetype"); String nullable = node.attributes.get("nullable"); String sizeString = node.attributes.get("size"); int index = Integer.valueOf(node.attributes.get("index")); String defaultvalue = null;//from w ww .j a va 2s . c o m String defaulttype = null; int defaultFuncID = -1; // Default Value for (VoltXMLElement child : node.children) { if (child.name.equals("default")) { for (VoltXMLElement inner_child : child.children) { // Value if (inner_child.name.equals("value")) { assert (defaulttype == null); // There should be only one default value/type. defaultvalue = inner_child.attributes.get("value"); defaulttype = inner_child.attributes.get("valuetype"); assert (defaulttype != null); } else if (inner_child.name.equals("function")) { assert (defaulttype == null); // There should be only one default value/type. defaultFuncID = Integer.parseInt(inner_child.attributes.get("function_id")); defaultvalue = inner_child.attributes.get("name"); defaulttype = inner_child.attributes.get("valuetype"); assert (defaulttype != null); } } } } if (defaulttype != null) { // fyi: Historically, VoltType class initialization errors get reported on this line (?). defaulttype = Integer.toString(VoltType.typeFromString(defaulttype).getValue()); } // replace newlines in default values if (defaultvalue != null) { defaultvalue = defaultvalue.replace('\n', ' '); defaultvalue = defaultvalue.replace('\r', ' '); } // fyi: Historically, VoltType class initialization errors get reported on this line (?). VoltType type = VoltType.typeFromString(typename); columnTypes.put(index, type); if (defaultFuncID == -1) { if (defaultvalue != null && (type == VoltType.DECIMAL || type == VoltType.NUMERIC)) { // Until we support deserializing scientific notation in the EE, we'll // coerce default values to plain notation here. See ENG-952 for more info. BigDecimal temp = new BigDecimal(defaultvalue); defaultvalue = temp.toPlainString(); } } else { // Concat function name and function id, format: NAME:ID // Used by PlanAssembler:getNextInsertPlan(). defaultvalue = defaultvalue + ":" + String.valueOf(defaultFuncID); } Column column = table.getColumns().add(name); // need to set other column data here (default, nullable, etc) column.setName(name); column.setIndex(index); column.setType(type.getValue()); column.setNullable(Boolean.valueOf(nullable)); int size = type.getMaxLengthInBytes(); boolean inBytes = false; if (node.attributes.containsKey("bytes")) { inBytes = Boolean.valueOf(node.attributes.get("bytes")); } // Require a valid length if variable length is supported for a type if (type == VoltType.STRING || type == VoltType.VARBINARY) { if (sizeString == null) { // An unspecified size for a VARCHAR/VARBINARY column should be // for a materialized view column whose type is derived from a // function or expression of variable-length type. // Defaulting these to MAX_VALUE_LENGTH tends to cause them to overflow the // allowed MAX_ROW_SIZE when there are more than one in a view. // It's not clear what benefit, if any, we derive from limiting MAX_ROW_SIZE // based on worst-case length for variable fields, but we comply for now by // arbitrarily limiting these matview column sizes such that // the max number of columns of this size would still fit. size = MAX_ROW_SIZE / MAX_COLUMNS; } else { int userSpecifiedSize = Integer.parseInt(sizeString); if (userSpecifiedSize < 0 || (inBytes && userSpecifiedSize > VoltType.MAX_VALUE_LENGTH)) { String msg = type.toSQLString() + " column " + name + " in table " + table.getTypeName() + " has unsupported length " + sizeString; throw m_compiler.new VoltCompilerException(msg); } if (!inBytes && type == VoltType.STRING) { if (userSpecifiedSize > VoltType.MAX_VALUE_LENGTH_IN_CHARACTERS) { String msg = String.format( "The size of VARCHAR column %s in table %s greater than %d " + "will be enforced as byte counts rather than UTF8 character counts. " + "To eliminate this warning, specify \"VARCHAR(%d BYTES)\"", name, table.getTypeName(), VoltType.MAX_VALUE_LENGTH_IN_CHARACTERS, userSpecifiedSize); m_compiler.addWarn(msg); inBytes = true; } } if (userSpecifiedSize > 0) { size = userSpecifiedSize; } else { // A 0 from the user was already caught // -- so any 0 at this point was NOT user-specified. // It must have been generated by mistake. // We should just stop doing that. It's just noise. // Treating it as a synonym for sizeString == null. size = MAX_ROW_SIZE / MAX_COLUMNS; } } } column.setInbytes(inBytes); column.setSize(size); column.setDefaultvalue(defaultvalue); if (defaulttype != null) column.setDefaulttype(Integer.parseInt(defaulttype)); columnMap.put(name, column); }
From source file:org.gbif.ipt.model.Resource.java
/** * Construct the resource citation from various parts for the version specified. * </br>// w w w .j a v a2s .c om * The citation format is: * Creators (PublicationYear): Title. Version. Publisher. ResourceType. Identifier * * @param version resource version to use in citation * @param homepage homepage URI * * @return generated resource citation string */ public String generateResourceCitation(@NotNull BigDecimal version, @NotNull URI homepage) { StringBuilder sb = new StringBuilder(); // make list of verified authors (having first and last name) List<String> verifiedAuthorList = Lists.newArrayList(); for (Agent creator : getEml().getCreators()) { String authorName = getAuthorName(creator); if (authorName != null) { verifiedAuthorList.add(authorName); } } // add comma separated authors Iterator<String> iter = verifiedAuthorList.iterator(); while (iter.hasNext()) { sb.append(iter.next()); if (iter.hasNext()) { sb.append(", "); } } // add year resource was first published (captured in EML dateStamp) int publicationYear = getPublicationYear(getEml().getDateStamp()); if (publicationYear > 0) { sb.append(" ("); sb.append(publicationYear); sb.append("): "); } // add title sb.append((StringUtils.trimToNull(getTitle()) == null) ? getShortname() : StringUtils.trim(getTitle())); sb.append(". "); // add version sb.append("v"); sb.append(version.toPlainString()); sb.append(". "); // add publisher String publisher = (getOrganisation() == null) ? null : StringUtils.trimToNull(getOrganisation().getName()); if (publisher != null) { sb.append(publisher); sb.append(". "); } // add ResourceTypeGeneral/ResourceType, e.g. Dataset/Occurrence, Dataset/Checklist sb.append("Dataset"); if (getCoreType() != null) { sb.append("/"); sb.append(StringUtils.capitalize(getCoreType().toLowerCase())); } sb.append(". "); // add DOI as the identifier. DataCite recommends using linkable, permanent URL if (getDoi() != null) { sb.append(getDoi().getUrl()); } // otherwise add the citation identifier instead else if (getEml().getCitation() != null && !Strings.isNullOrEmpty(getEml().getCitation().getIdentifier())) { sb.append(getEml().getCitation().getIdentifier()); } // otherwise use its IPT homepage as the identifier else { sb.append(homepage.toString()); } return sb.toString(); }
From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java
private void generateRecords(Offset startingOffset, PreparedStatement selectChanges) { // When this is called the first time, Logminer was started either from SCN or from a start date, so we just keep // track of the start date etc. LOG.info("Attempting to generate records"); boolean error; StringBuilder query = new StringBuilder(); BigDecimal lastCommitSCN = new BigDecimal(startingOffset.scn); int sequenceNumber = startingOffset.sequence; LocalDateTime startTime = adjustStartTime(startingOffset.timestamp); String lastTxnId = startingOffset.txnId; LocalDateTime endTime = getEndTimeForStartTime(startTime); ResultSet resultSet = null;//from ww w .j a v a2 s. c o m while (!getContext().isStopped()) { error = false; generationStarted = true; try { recordQueue.put(new RecordOffset(dummyRecord, new Offset(version, startTime, lastCommitSCN.toPlainString(), sequenceNumber, lastTxnId))); selectChanges = getSelectChangesStatement(); if (!useLocalBuffering) { selectChanges.setBigDecimal(1, lastCommitSCN); selectChanges.setInt(2, sequenceNumber); selectChanges.setBigDecimal(3, lastCommitSCN); if (shouldTrackDDL) { selectChanges.setBigDecimal(4, lastCommitSCN); } } selectChanges.setFetchSize(configBean.jdbcFetchSize); resultSet = selectChanges.executeQuery(); while (resultSet.next() && !getContext().isStopped()) { String queryFragment = resultSet.getString(5); BigDecimal scnDecimal = resultSet.getBigDecimal(1); String scn = scnDecimal.toPlainString(); String xidUsn = String.valueOf(resultSet.getLong(10)); String xidSlt = String.valueOf(resultSet.getString(11)); String xidSqn = String.valueOf(resultSet.getString(12)); String xid = xidUsn + "." + xidSlt + "." + xidSqn; // Query Fragment is not null -> we need to process // Query Fragment is null AND the query string buffered from previous rows due to CSF == 0 is null, // nothing to do, go to next row // Query Fragment is null, but there is previously buffered data in the query, go ahead and process. if (queryFragment != null) { query.append(queryFragment); } else if (queryFragment == null && query.length() == 0) { LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid); continue; } // CSF is 1 if the query is incomplete, so read the next row before parsing // CSF being 0 means query is complete, generate the record if (resultSet.getInt(9) == 0) { if (query.length() == 0) { LOG.debug(READ_NULL_QUERY_FROM_ORACLE, scn, xid); continue; } String queryString = query.toString(); query.setLength(0); String username = resultSet.getString(2); short op = resultSet.getShort(3); String timestamp = resultSet.getString(4); LocalDateTime tsDate = Timestamp.valueOf(timestamp).toLocalDateTime(); delay.getValue().put("delay", getDelay(tsDate)); String table = resultSet.getString(6); BigDecimal commitSCN = resultSet.getBigDecimal(7); int seq = resultSet.getInt(8); String rsId = resultSet.getString(13); Object ssn = resultSet.getObject(14); String schema = String.valueOf(resultSet.getString(15)); int rollback = resultSet.getInt(16); String rowId = resultSet.getString(17); SchemaAndTable schemaAndTable = new SchemaAndTable(schema, table); TransactionIdKey key = new TransactionIdKey(xid); bufferedRecordsLock.lock(); try { if (useLocalBuffering && bufferedRecords.containsKey(key) && bufferedRecords.get(key) .contains(new RecordSequence(null, null, 0, 0, rsId, ssn, null))) { continue; } } finally { bufferedRecordsLock.unlock(); } Offset offset = null; if (LOG.isDebugEnabled()) { LOG.debug( "Commit SCN = {}, SCN = {}, Operation = {}, Txn Id = {}, Timestamp = {}, Row Id = {}, Redo SQL = {}", commitSCN, scn, op, xid, tsDate, rowId, queryString); } if (op != DDL_CODE && op != COMMIT_CODE && op != ROLLBACK_CODE) { if (!useLocalBuffering) { offset = new Offset(version, tsDate, commitSCN.toPlainString(), seq, xid); } Map<String, String> attributes = new HashMap<>(); attributes.put(SCN, scn); attributes.put(USER, username); attributes.put(TIMESTAMP_HEADER, timestamp); attributes.put(TABLE, table); attributes.put(SEQ, String.valueOf(seq)); attributes.put(XID, xid); attributes.put(RS_ID, rsId); attributes.put(SSN, ssn.toString()); attributes.put(SCHEMA, schema); attributes.put(ROLLBACK, String.valueOf(rollback)); attributes.put(ROWID_KEY, rowId); if (!useLocalBuffering || getContext().isPreview()) { if (commitSCN.compareTo(lastCommitSCN) < 0 || (commitSCN.compareTo(lastCommitSCN) == 0 && seq < sequenceNumber)) { continue; } lastCommitSCN = commitSCN; sequenceNumber = seq; if (configBean.keepOriginalQuery) { attributes.put(QUERY_KEY, queryString); } try { Record record = generateRecord(queryString, attributes, op); if (record != null && record.getEscapedFieldPaths().size() > 0) { recordQueue.put(new RecordOffset(record, offset)); } } catch (UnparseableSQLException ex) { LOG.error("Parsing failed", ex); unparseable.offer(queryString); } } else { bufferedRecordsLock.lock(); try { HashQueue<RecordSequence> records = bufferedRecords.computeIfAbsent(key, x -> { x.setTxnStartTime(tsDate); return createTransactionBuffer(key.txnId); }); int nextSeq = records.isEmpty() ? 1 : records.tail().seq + 1; RecordSequence node = new RecordSequence(attributes, queryString, nextSeq, op, rsId, ssn, tsDate); records.add(node); } finally { bufferedRecordsLock.unlock(); } } } else if (!getContext().isPreview() && useLocalBuffering && (op == COMMIT_CODE || op == ROLLBACK_CODE)) { // so this commit was previously processed or it is a rollback, so don't care. if (op == ROLLBACK_CODE || scnDecimal.compareTo(lastCommitSCN) < 0) { bufferedRecordsLock.lock(); try { bufferedRecords.remove(key); } finally { bufferedRecordsLock.unlock(); } } else { bufferedRecordsLock.lock(); try { HashQueue<RecordSequence> records = bufferedRecords.getOrDefault(key, EMPTY_LINKED_HASHSET); if (lastCommitSCN.equals(scnDecimal) && xid.equals(lastTxnId)) { removeProcessedRecords(records, sequenceNumber); } int bufferedRecordsToBeRemoved = records.size(); LOG.debug(FOUND_RECORDS_IN_TRANSACTION, bufferedRecordsToBeRemoved, xid); lastCommitSCN = scnDecimal; lastTxnId = xid; sequenceNumber = addRecordsToQueue(tsDate, scn, xid); } finally { bufferedRecordsLock.unlock(); } } } else { offset = new Offset(version, tsDate, scn, 0, xid); boolean sendSchema = false; // Commit/rollback in Preview will also end up here, so don't really do any of the following in preview // Don't bother with DDL events here. if (!getContext().isPreview()) { // Event is sent on every DDL, but schema is not always sent. // Schema sending logic: // CREATE/ALTER: Schema is sent if the schema after the ALTER is newer than the cached schema // (which we would have sent as an event earlier, at the last alter) // DROP/TRUNCATE: Schema is not sent, since they don't change schema. DDL_EVENT type = getDdlType(queryString); if (type == DDL_EVENT.ALTER || type == DDL_EVENT.CREATE) { sendSchema = refreshSchema(scnDecimal, new SchemaAndTable(schema, table)); } recordQueue.put(new RecordOffset(createEventRecord(type, queryString, schemaAndTable, offset.toString(), sendSchema, timestamp), offset)); } } query.setLength(0); } } } catch (SQLException ex) { error = true; // force a restart from the same timestamp. if (ex.getErrorCode() == MISSING_LOG_FILE) { LOG.warn("SQL Exception while retrieving records", ex); addToStageExceptionsQueue(new StageException(JDBC_86, ex)); } else if (ex.getErrorCode() != RESULTSET_CLOSED_AS_LOGMINER_SESSION_CLOSED) { LOG.warn("SQL Exception while retrieving records", ex); } else if (ex.getErrorCode() == QUERY_TIMEOUT) { LOG.warn("LogMiner select query timed out"); } else if (ex.getErrorCode() == LOGMINER_START_MUST_BE_CALLED) { LOG.warn("Last LogMiner session did not start successfully. Will retry", ex); } else { LOG.error("Error while reading data", ex); addToStageExceptionsQueue(new StageException(JDBC_52, ex)); } } catch (StageException e) { LOG.error("Error while reading data", e); error = true; addToStageExceptionsQueue(e); } catch (InterruptedException ex) { LOG.error("Interrupted while waiting to add data"); Thread.currentThread().interrupt(); } catch (Exception ex) { LOG.error("Error while reading data", ex); error = true; addToStageExceptionsQueue(new StageException(JDBC_52, ex)); } finally { // If an incomplete batch is seen, it means we are going to move the window forward // Ending this session and starting a new one helps reduce PGA memory usage. try { if (resultSet != null && !resultSet.isClosed()) { resultSet.close(); } if (selectChanges != null && !selectChanges.isClosed()) { selectChanges.close(); } } catch (SQLException ex) { LOG.warn("Error while attempting to close SQL statements", ex); } try { endLogMnr.execute(); } catch (SQLException ex) { LOG.warn("Error while trying to close logminer session", ex); } try { if (error) { resetConnectionsQuietly(); } else { discardOldUncommitted(startTime); startTime = adjustStartTime(endTime); endTime = getEndTimeForStartTime(startTime); } startLogMinerUsingGivenDates(startTime.format(dateTimeColumnHandler.dateFormatter), endTime.format(dateTimeColumnHandler.dateFormatter)); } catch (SQLException ex) { LOG.error("Error while attempting to start LogMiner", ex); addToStageExceptionsQueue(new StageException(JDBC_52, ex)); } catch (StageException ex) { LOG.error("Error while attempting to start logminer for redo log dictionary", ex); addToStageExceptionsQueue(ex); } } } }