List of usage examples for java.lang RuntimeException toString
public String toString()
From source file:de.unihannover.se.processSimulation.interactive.ServerMain.java
private void printParseError(PrintWriter w, String t, final RuntimeException e) { w.println("<b>Fehler beim Parsen von Parameter " + t + "</b><br/>"); w.println(e.toString()); w.println("<br/>"); }
From source file:org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob.java
@Override public void run(String[] backupIds) throws IOException { String bulkOutputConfKey;/*from w w w . java 2 s. c om*/ // TODO : run player on remote cluster player = new MapReduceHFileSplitterJob(); bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY; // Player reads all files in arbitrary directory structure and creates // a Map task for each file String bids = StringUtils.join(backupIds, ","); if (LOG.isDebugEnabled()) { LOG.debug("Merge backup images " + bids); } List<Pair<TableName, Path>> processedTableList = new ArrayList<>(); boolean finishedTables = false; Connection conn = ConnectionFactory.createConnection(getConf()); BackupSystemTable table = new BackupSystemTable(conn); FileSystem fs = FileSystem.get(getConf()); try { // Get exclusive lock on backup system table.startBackupExclusiveOperation(); // Start merge operation table.startMergeOperation(backupIds); // Select most recent backup id String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds); TableName[] tableNames = getTableNamesInBackupImages(backupIds); BackupInfo bInfo = table.readBackupInfo(backupIds[0]); String backupRoot = bInfo.getBackupRootDir(); for (int i = 0; i < tableNames.length; i++) { LOG.info("Merge backup images for " + tableNames[i]); // Find input directories for table Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); Path bulkOutputPath = BackupUtils .getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { if (!fs.delete(bulkOutputPath, true)) { LOG.warn("Can not delete: " + bulkOutputPath); } } Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); String[] playerArgs = { dirs, tableNames[i].getNameAsString() }; player.setConf(getConf()); int result = player.run(playerArgs); if (!succeeded(result)) { throw new IOException("Can not merge backup images for " + dirs + " (check Hadoop/MR and HBase logs). Player return code =" + result); } // Add to processed table list processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath)); LOG.debug("Merge Job finished:" + result); } List<TableName> tableList = toTableNameList(processedTableList); table.updateProcessedTablesForMerge(tableList); finishedTables = true; // PHASE 2 (modification of a backup file system) // Move existing mergedBackupId data into tmp directory // we will need it later in case of a failure Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId); Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId); if (!fs.rename(backupDirPath, tmpBackupDir)) { throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir); } else { LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir); } // Move new data into backup dest for (Pair<TableName, Path> tn : processedTableList) { moveData(fs, backupRoot, tn.getSecond(), tn.getFirst(), mergedBackupId); } // Update backup manifest List<String> backupsToDelete = getBackupIdsToDelete(backupIds, mergedBackupId); updateBackupManifest(tmpBackupDir.getParent().toString(), mergedBackupId, backupsToDelete); // Copy meta files back from tmp to backup dir copyMetaData(fs, tmpBackupDir, backupDirPath); // Delete tmp dir (Rename back during repair) if (!fs.delete(tmpBackupDir, true)) { // WARN and ignore LOG.warn("Could not delete tmp dir: " + tmpBackupDir); } // Delete old data deleteBackupImages(backupsToDelete, conn, fs, backupRoot); // Finish merge session table.finishMergeOperation(); // Release lock table.finishBackupExclusiveOperation(); } catch (RuntimeException e) { throw e; } catch (Exception e) { LOG.error(e.toString(), e); if (!finishedTables) { // cleanup bulk directories and finish merge // merge MUST be repeated (no need for repair) cleanupBulkLoadDirs(fs, toPathList(processedTableList)); table.finishMergeOperation(); table.finishBackupExclusiveOperation(); throw new IOException("Backup merge operation failed, you should try it again", e); } else { // backup repair must be run throw new IOException( "Backup merge operation failed, run backup repair tool to restore system's integrity", e); } } finally { table.close(); conn.close(); } }
From source file:org.dancres.blitz.jini.lockmgr.VotingAdapter.java
/** * This method performs voting on the specific decree between all * local voteListeners./* w w w.jav a 2 s . c o m*/ */ public VoteResult localVote(Object decree) { VoteResult voteResult = new VoteResult(); for (int i = 0; i < listeners.length; i++) { VotingListener listener = listeners[i]; try { voteResult.addVote(listener.vote(decree)); } catch (VoteException vex) { // do nothing here. } catch (RuntimeException ex) { if (log.isErrorEnabled()) log.error(ex.toString()); // if we are here, then listener // had thrown a RuntimeException return new FailureVoteResult(ex.getMessage()); } } if (log.isDebugEnabled()) log.debug("Voting on decree " + decree.toString() + " : " + voteResult.toString()); return voteResult; }
From source file:org.apache.pig.backend.hadoop.executionengine.physicalLayer.PhysicalOperator.java
/** * Implementations that call into the different versions of getNext are often * identical, differing only in the signature of the getNext() call they make. * This method allows to cut down on some of the copy-and-paste. * @param dataType Describes the type of obj; a byte from DataType. * * @return result Result of applying this Operator to the Object. * @throws ExecException//from w w w. j av a 2 s . c o m */ public Result getNext(byte dataType) throws ExecException { try { switch (dataType) { case DataType.BAG: return getNextDataBag(); case DataType.BOOLEAN: return getNextBoolean(); case DataType.BYTEARRAY: return getNextDataByteArray(); case DataType.CHARARRAY: return getNextString(); case DataType.DOUBLE: return getNextDouble(); case DataType.FLOAT: return getNextFloat(); case DataType.INTEGER: return getNextInteger(); case DataType.LONG: return getNextLong(); case DataType.BIGINTEGER: return getNextBigInteger(); case DataType.BIGDECIMAL: return getNextBigDecimal(); case DataType.DATETIME: return getNextDateTime(); case DataType.MAP: return getNextMap(); case DataType.TUPLE: return getNextTuple(); default: throw new ExecException("Unsupported type for getNext: " + DataType.findTypeName(dataType)); } } catch (RuntimeException e) { throw new ExecException("Exception while executing " + this.toString() + ": " + e.toString(), e); } }
From source file:org.kuali.kfs.module.purap.service.PurapAccountingServiceTest.java
@ConfigureContext(session = appleton, shouldCommitTransactions = true) public void testGenerateAccountDistributionForProrationWithZeroTotal_OneAcct() { PurapAccountingServiceFixture fixture = PurapAccountingServiceFixture.PREQ_PRORATION_ONE_ACCOUNT_ZERO_TOTAL; PurchasingAccountsPayableDocument preq = fixture.generatePaymentRequestDocument_OneItem(); List<PurApAccountingLine> distributedAccounts = null; try {//w ww.j a va 2 s.co m distributedAccounts = purapAccountingService.generateAccountDistributionForProrationWithZeroTotal(preq); } catch (RuntimeException re) { fail(re.toString()); } List<BigDecimal> correctPercents = new ArrayList<BigDecimal>(); correctPercents.add(0, new BigDecimal("100")); assertEquals(distributedAccounts.size(), correctPercents.size()); comparePercentages(distributedAccounts, correctPercents); }
From source file:org.kuali.kfs.module.purap.service.PurapAccountingServiceTest.java
@ConfigureContext(session = appleton, shouldCommitTransactions = true) public void testGenerateAccountDistributionForProrationWithZeroTotal_TwoAcct() { PurapAccountingServiceFixture fixture = PurapAccountingServiceFixture.PREQ_PRORATION_TWO_ACCOUNTS_ZERO_TOTAL; PurchasingAccountsPayableDocument preq = fixture.generatePaymentRequestDocument_OneItem(); List<PurApAccountingLine> distributedAccounts = null; try {/*from w w w . java 2 s . c o m*/ distributedAccounts = purapAccountingService.generateAccountDistributionForProrationWithZeroTotal(preq); } catch (RuntimeException re) { fail(re.toString()); } List<BigDecimal> correctPercents = new ArrayList<BigDecimal>(); correctPercents.add(0, new BigDecimal("50")); correctPercents.add(1, new BigDecimal("50")); assertEquals(distributedAccounts.size(), correctPercents.size()); comparePercentages(distributedAccounts, correctPercents); }
From source file:org.kuali.kfs.module.purap.service.PurapAccountingServiceTest.java
@ConfigureContext(session = appleton, shouldCommitTransactions = true) public void testGenerateAccountDistributionForProrationWithZeroTotal_ThreeAccount() { PurapAccountingServiceFixture fixture = PurapAccountingServiceFixture.PREQ_PRORATION_THIRDS_ZERO_TOTAL; PurchasingAccountsPayableDocument preq = fixture.generatePaymentRequestDocument_OneItem(); List<PurApAccountingLine> distributedAccounts = null; try {/*from w ww . j a v a2 s .c o m*/ distributedAccounts = purapAccountingService.generateAccountDistributionForProrationWithZeroTotal(preq); } catch (RuntimeException re) { fail(re.toString()); } List<BigDecimal> correctPercents = new ArrayList<BigDecimal>(); correctPercents.add(0, new BigDecimal("33.33")); correctPercents.add(1, new BigDecimal("33.33")); correctPercents.add(2, new BigDecimal("33.34")); assertEquals(distributedAccounts.size(), correctPercents.size()); comparePercentages(distributedAccounts, correctPercents); }
From source file:org.nuxeo.runtime.model.impl.RegistrationInfoImpl.java
protected ComponentInstance createComponentInstance() { try {/*w w w.j av a2 s . c o m*/ return new ComponentInstanceImpl(this); } catch (RuntimeException e) { String msg = "Failed to instantiate component: " + implementation; log.error(msg, e); msg += " (" + e.toString() + ')'; Framework.getRuntime().getWarnings().add(msg); Framework.handleDevError(e); throw e; } }
From source file:org.commonjava.cartographer.graph.spi.neo4j.FileNeo4jConnectionFactory.java
@Override public synchronized RelationshipGraphConnection openConnection(final String workspaceId, final boolean create) throws RelationshipGraphConnectionException { final File db = new File(dbBaseDirectory, workspaceId); if (!db.exists()) { if (!create) { throw new RelationshipGraphConnectionException("Workspace does not exist: %s.", workspaceId); } else if (!db.mkdirs()) { throw new RelationshipGraphConnectionException( "Failed to create workspace directory for: %s. (dir: %s)", workspaceId, db); }//from www. j a v a2s . c o m // // try // { // Thread.sleep( 20 ); // } // catch ( final InterruptedException e ) // { // Thread.currentThread() // .interrupt(); // return null; // } } FileNeo4JGraphConnection conn = openConnections.get(workspaceId); if (conn == null || !conn.isOpen()) { conn = null; int attempt = 0; while (conn == null) { attempt++; try { conn = new FileNeo4JGraphConnection(workspaceId, db, useShutdownHook, storageBatchSize, this); } catch (RuntimeException ex) { if (ex.getCause() instanceof LifecycleException && ex.getCause().getCause() instanceof StoreLockException && ex.getCause().getCause().getCause() instanceof OverlappingFileLockException && attempt < 3) { logger.warn("Tried to connect to DB which is not closed (yet). {} Retrying in 5s.", ex.toString()); try { Thread.sleep(5000); } catch (InterruptedException ez) { logger.error("The wait delay was interrupted.", ex); } } else { throw ex; } } } openConnections.put(workspaceId, conn); } return conn; }
From source file:com.versacomllc.audit.network.sync.SyncAdapter.java
/** * Called by the Android system in response to a request to run the sync * adapter. The work required to read data from the network, parse it, and * store it in the content provider is done here. Extending * AbstractThreadedSyncAdapter ensures that all methods within SyncAdapter * run on a background thread. For this reason, blocking I/O and other * long-running tasks can be run <em>in situ</em>, and you don't have to set * up a separate thread for them. ./*from www .j a va 2 s.c o m*/ * * <p> * This is where we actually perform any work required to perform a sync. * {@link AbstractThreadedSyncAdapter} guarantees that this will be called * on a non-UI thread, so it is safe to peform blocking I/O here. * * <p> * The syncResult argument allows you to pass information back to the method * that triggered the sync. */ @Override public void onPerformSync(Account account, Bundle extras, String authority, ContentProviderClient provider, SyncResult syncResult) { Log.i(TAG, "Beginning network synchronization"); try { Log.i(TAG, "Streaming data from network: "); /** Should sync when Internet connection is available */ if (Utils.isOnline(getContext())) { // Sync customer this.loadCustomerList(getContext()); // Add stie work types this.loadSiteWorkTypesList(getContext()); this.loadEmployeeList(getContext()); this.loadDefectList(getContext()); this.loadProjectList(getContext()); this.synchronizeAuditRecords(getContext()); } } catch (RuntimeException e) { Log.e(TAG, "Error updating database: " + e.toString()); syncResult.databaseError = true; return; } Log.i(TAG, "Network synchronization complete"); }