Error Handling and Edge Cases
Learning Objectives
Categorize XRPL errors by type (transport, protocol, transaction, application)
Implement appropriate retry strategies for each error category
Handle edge cases including partial failures, race conditions, and state inconsistencies
Build comprehensive error logging and alerting systems
Design graceful degradation patterns for production resilience
ERROR LAYERS:
1. TRANSPORT ERRORS
1. PROTOCOL ERRORS
1. XRPL ERRORS
1. APPLICATION ERRORS
class TransportErrorHandler {
static isTransportError(error) {
const transportPatterns = [
'ECONNREFUSED',
'ECONNRESET',
'ETIMEDOUT',
'ENOTFOUND',
'WebSocket',
'connection',
'network'
]
const message = error.message?.toLowerCase() || ''
return transportPatterns.some(p => message.includes(p.toLowerCase()))
}
static getRetryDelay(error, attempt) {
// Exponential backoff for transport errors
const baseDelay = 1000
const maxDelay = 30000
const delay = Math.min(baseDelay * Math.pow(2, attempt), maxDelay)
// Add jitter
return delay + Math.random() * 1000
}
static shouldRetry(error, attempt, maxAttempts = 5) {
if (attempt >= maxAttempts) return false
return this.isTransportError(error)
}
}
```
const XRPLErrors = {
// Not found errors
actNotFound: { retryable: false, message: 'Account does not exist' },
lgrNotFound: { retryable: true, message: 'Ledger not available' },
txnNotFound: { retryable: true, message: 'Transaction not found yet' },
// Resource errors
tooBusy: { retryable: true, message: 'Server overloaded' },
slowDown: { retryable: true, message: 'Rate limited' },
noNetwork: { retryable: true, message: 'Not connected to network' },
// Validation errors
invalidParams: { retryable: false, message: 'Invalid request parameters' },
unknownCmd: { retryable: false, message: 'Unknown command' },
// State errors
noCurrent: { retryable: true, message: 'No current ledger' },
noNetwork: { retryable: true, message: 'Server not synced' }
}
function classifyError(error) {
const code = error.data?.error || error.message
const known = XRPLErrors[code]
if (known) {
return { ...known, code }
}
return {
code: 'unknown',
retryable: false,
message: error.message
}
}
```
class RetryHandler {
constructor(options = {}) {
this.maxRetries = options.maxRetries || 3
this.baseDelay = options.baseDelay || 1000
this.maxDelay = options.maxDelay || 30000
this.retryableErrors = new Set([
'tooBusy', 'slowDown', 'lgrNotFound',
'txnNotFound', 'noCurrent', 'noNetwork'
])
}
async execute(operation, context = {}) {
let lastError
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try {
return await operation()
} catch (error) {
lastError = error
if (!this.shouldRetry(error, attempt)) {
throw this.enhanceError(error, context, attempt)
}
const delay = this.calculateDelay(attempt, error)
console.log(Retry ${attempt + 1}/${this.maxRetries} after ${delay}ms: ${error.message})
await sleep(delay)
}
}
throw this.enhanceError(lastError, context, this.maxRetries)
}
shouldRetry(error, attempt) {
if (attempt >= this.maxRetries) return false
// Transport errors
if (TransportErrorHandler.isTransportError(error)) return true
// XRPL retryable errors
const code = error.data?.error
if (this.retryableErrors.has(code)) return true
return false
}
calculateDelay(attempt, error) {
let delay = this.baseDelay * Math.pow(2, attempt)
// Respect Retry-After header if present
if (error.retryAfter) {
delay = Math.max(delay, error.retryAfter * 1000)
}
// Cap at max delay
delay = Math.min(delay, this.maxDelay)
// Add jitter (±20%)
delay *= 0.8 + Math.random() * 0.4
return Math.floor(delay)
}
enhanceError(error, context, attempts) {
error.context = context
error.attempts = attempts + 1
error.timestamp = new Date().toISOString()
return error
}
}
// Usage
const retry = new RetryHandler({ maxRetries: 5 })
const result = await retry.execute(
() => client.request({ command: 'account_info', account: address }),
{ operation: 'account_info', account: address }
)
```
class CircuitBreaker {
constructor(options = {}) {
this.failureThreshold = options.failureThreshold || 5
this.resetTimeout = options.resetTimeout || 60000
this.state = 'CLOSED' // CLOSED, OPEN, HALF_OPEN
this.failures = 0
this.lastFailure = null
this.successCount = 0
}
async execute(operation) {
if (this.state === 'OPEN') {
if (Date.now() - this.lastFailure > this.resetTimeout) {
this.state = 'HALF_OPEN'
this.successCount = 0
} else {
throw new Error('Circuit breaker is OPEN')
}
}
try {
const result = await operation()
this.onSuccess()
return result
} catch (error) {
this.onFailure()
throw error
}
}
onSuccess() {
this.failures = 0
if (this.state === 'HALF_OPEN') {
this.successCount++
if (this.successCount >= 3) {
this.state = 'CLOSED'
console.log('Circuit breaker CLOSED')
}
}
}
onFailure() {
this.failures++
this.lastFailure = Date.now()
if (this.failures >= this.failureThreshold) {
this.state = 'OPEN'
console.log('Circuit breaker OPEN')
}
}
getState() {
return {
state: this.state,
failures: this.failures,
lastFailure: this.lastFailure
}
}
}
```
// Problem: Checking balance then sending payment
// Between check and send, balance could change
// BAD: Race condition
async function sendIfSufficient_BAD(client, wallet, destination, amount) {
const balance = await getBalance(client, wallet.address)
if (balance >= amount) {
// Another transaction could drain balance here!
await sendPayment(client, wallet, destination, amount)
}
}
// GOOD: Atomic operation with proper error handling
async function sendIfSufficient_GOOD(client, wallet, destination, amount) {
try {
const result = await client.submitAndWait({
TransactionType: 'Payment',
Account: wallet.address,
Destination: destination,
Amount: xrpl.xrpToDrops(amount)
}, { wallet })
return { success: true, result }
} catch (error) {
if (error.data?.engine_result === 'tecUNFUNDED_PAYMENT') {
return { success: false, reason: 'insufficient_funds' }
}
throw error
}
}
```
class SequenceConflictHandler {
constructor(client, account) {
this.client = client
this.account = account
this.pendingSequences = new Map()
}
async handleSequenceError(error, transaction) {
const engineResult = error.data?.engine_result
if (engineResult === 'tefPAST_SEQ') {
// Sequence already used
// Check if our transaction succeeded with this sequence
const existingTx = await this.findTransactionBySequence(
transaction.Sequence
)
if (existingTx?.hash === transaction.expectedHash) {
// Our transaction, already processed
return { status: 'already_succeeded', tx: existingTx }
}
// Different transaction used the sequence
// Resync and retry with new sequence
return { status: 'retry_with_new_sequence' }
}
if (engineResult === 'terPRE_SEQ') {
// Waiting for earlier sequence
// Earlier transaction pending - wait and retry
return { status: 'wait_and_retry', delay: 5000 }
}
throw error
}
async findTransactionBySequence(sequence) {
const response = await this.client.request({
command: 'account_tx',
account: this.account,
limit: 20
})
return response.result.transactions.find(
tx => tx.tx.Sequence === sequence
)
}
}
```
// Handle partial failures in multi-step operations
class TransactionalOperation {
constructor() {
this.steps = []
this.completedSteps = []
this.compensations = new Map()
}
addStep(name, operation, compensation) {
this.steps.push({ name, operation })
if (compensation) {
this.compensations.set(name, compensation)
}
}
async execute() {
for (const step of this.steps) {
try {
const result = await step.operation()
this.completedSteps.push({ name: step.name, result })
} catch (error) {
console.error(Step ${step.name} failed:, error)
await this.rollback()
throw error
}
}
return this.completedSteps
}
async rollback() {
console.log('Rolling back completed steps...')
// Rollback in reverse order
for (const step of [...this.completedSteps].reverse()) {
const compensation = this.compensations.get(step.name)
if (compensation) {
try {
await compensation(step.result)
console.log(Rolled back: ${step.name})
} catch (error) {
console.error(Rollback failed for ${step.name}:, error)
// Log for manual intervention
}
}
}
}
}
```
class ErrorLogger {
constructor(options = {}) {
this.serviceName = options.serviceName || 'xrpl-service'
this.environment = options.environment || 'development'
}
logError(error, context = {}) {
const logEntry = {
timestamp: new Date().toISOString(),
level: 'error',
service: this.serviceName,
environment: this.environment,
error: {
message: error.message,
code: error.data?.error || error.code,
stack: error.stack
},
context: {
...context,
attempts: error.attempts,
operation: context.operation
},
xrpl: {
engineResult: error.data?.engine_result,
engineResultCode: error.data?.engine_result_code,
engineResultMessage: error.data?.engine_result_message
}
}
// Log to console (structured for log aggregation)
console.error(JSON.stringify(logEntry))
// Send to monitoring service
this.sendToMonitoring(logEntry)
return logEntry
}
async sendToMonitoring(logEntry) {
// Integrate with your monitoring (DataDog, Sentry, etc.)
}
shouldAlert(error) {
// Alert on non-retryable errors or repeated failures
const classified = classifyError(error)
if (!classified.retryable) return true
if (error.attempts >= 3) return true
if (error.data?.engine_result?.startsWith('tec')) return true
return false
}
}
```
class HealthChecker {
constructor(client) {
this.client = client
this.lastCheck = null
this.status = 'unknown'
this.errors = []
}
async check() {
const checks = {
connection: await this.checkConnection(),
serverSync: await this.checkServerSync(),
recentLedger: await this.checkRecentLedger()
}
this.lastCheck = Date.now()
this.status = Object.values(checks).every(c => c.healthy)
? 'healthy'
: 'unhealthy'
return {
status: this.status,
timestamp: new Date().toISOString(),
checks
}
}
async checkConnection() {
try {
return {
name: 'connection',
healthy: this.client.isConnected(),
message: this.client.isConnected() ? 'Connected' : 'Disconnected'
}
} catch (error) {
return { name: 'connection', healthy: false, message: error.message }
}
}
async checkServerSync() {
try {
const info = await this.client.request({ command: 'server_info' })
const state = info.result.info.server_state
const healthy = ['full', 'proposing', 'validating'].includes(state)
return {
name: 'serverSync',
healthy,
message: Server state: ${state},
details: { state }
}
} catch (error) {
return { name: 'serverSync', healthy: false, message: error.message }
}
}
async checkRecentLedger() {
try {
const info = await this.client.request({ command: 'server_info' })
const age = info.result.info.validated_ledger?.age || 999
const healthy = age < 30
return {
name: 'recentLedger',
healthy,
message: Ledger age: ${age}s,
details: { age }
}
} catch (error) {
return { name: 'recentLedger', healthy: false, message: error.message }
}
}
}
```
class ResilientClient {
constructor(primaryUrl, fallbackUrls = []) {
this.servers = [primaryUrl, ...fallbackUrls]
this.currentIndex = 0
this.client = null
this.circuitBreakers = this.servers.map(() => new CircuitBreaker())
}
async connect() {
for (let i = 0; i < this.servers.length; i++) {
const index = (this.currentIndex + i) % this.servers.length
try {
if (this.circuitBreakers[index].state === 'OPEN') continue
this.client = new xrpl.Client(this.servers[index])
await this.client.connect()
this.currentIndex = index
console.log(Connected to ${this.servers[index]})
return
} catch (error) {
this.circuitBreakers[index].onFailure()
console.error(Failed to connect to ${this.servers[index]})
}
}
throw new Error('All servers unavailable')
}
async request(command) {
const breaker = this.circuitBreakers[this.currentIndex]
try {
return await breaker.execute(() => this.client.request(command))
} catch (error) {
if (error.message === 'Circuit breaker is OPEN') {
// Try next server
await this.failover()
return this.request(command)
}
throw error
}
}
async failover() {
const startIndex = this.currentIndex
for (let i = 1; i < this.servers.length; i++) {
const index = (startIndex + i) % this.servers.length
if (this.circuitBreakers[index].state !== 'OPEN') {
try {
this.client = new xrpl.Client(this.servers[index])
await this.client.connect()
this.currentIndex = index
console.log(Failover to ${this.servers[index]})
return
} catch (error) {
this.circuitBreakers[index].onFailure()
}
}
}
throw new Error('All servers unavailable')
}
}
```
Build a comprehensive error handling system with retry logic, circuit breakers, structured logging, and health checks. Test with simulated failures.
Time Investment: 3-4 hours
End of Lesson 12
Key Takeaways
Classify errors by type:
Different categories need different handling strategies.
Retry intelligently:
Exponential backoff with jitter; respect rate limits.
Use circuit breakers:
Prevent cascade failures when services are down.
Handle race conditions:
Design for concurrent access; use atomic operations.
Log comprehensively:
Structured logs with context enable debugging. ---