Performance Optimization | XRPL APIs & Integration | XRP Academy - XRP Academy
3 free lessons remaining this month

Free preview access resets monthly

Upgrade for Unlimited
Skip to main content
advanced55 min

Performance Optimization

Learning Objectives

Implement connection pooling for high-throughput applications

Batch requests to reduce round-trips and improve efficiency

Design caching strategies appropriate for XRPL data characteristics

Optimize queries for minimal latency and server load

Monitor and tune performance metrics in production

class XRPLConnectionPool {
  constructor(servers, options = {}) {
    this.servers = servers
    this.poolSize = options.poolSize || 5
    this.connections = []
    this.available = []
    this.waiting = []
    this.healthCheckInterval = options.healthCheckInterval || 30000
  }

async initialize() {
const connectPromises = []

for (let i = 0; i < this.poolSize; i++) {
const serverIndex = i % this.servers.length
connectPromises.push(this.createConnection(serverIndex))
}

this.connections = await Promise.all(connectPromises)
this.available = [...this.connections]

// Start health checks
this.startHealthChecks()

console.log(Pool initialized with ${this.poolSize} connections)
}

async createConnection(serverIndex) {
const client = new xrpl.Client(this.servers[serverIndex])
await client.connect()
return {
client,
serverIndex,
requestCount: 0,
lastUsed: Date.now(),
healthy: true
}
}

async acquire(timeoutMs = 5000) {
// Try to get available connection
if (this.available.length > 0) {
const conn = this.available.shift()
conn.lastUsed = Date.now()
return conn
}

// Wait for one to become available
return new Promise((resolve, reject) => {
const timeout = setTimeout(() => {
const index = this.waiting.indexOf(resolver)
if (index > -1) this.waiting.splice(index, 1)
reject(new Error('Connection pool timeout'))
}, timeoutMs)

const resolver = (conn) => {
clearTimeout(timeout)
resolve(conn)
}

this.waiting.push(resolver)
})
}

release(conn) {
conn.requestCount++

// Give to waiting request if any
if (this.waiting.length > 0) {
const resolver = this.waiting.shift()
conn.lastUsed = Date.now()
resolver(conn)
return
}

// Return to pool
this.available.push(conn)
}

async request(requestData) {
const conn = await this.acquire()

try {
return await conn.client.request(requestData)
} finally {
this.release(conn)
}
}

startHealthChecks() {
setInterval(async () => {
for (const conn of this.connections) {
try {
await conn.client.request({ command: 'ping' })
conn.healthy = true
} catch (error) {
conn.healthy = false
this.replaceConnection(conn)
}
}
}, this.healthCheckInterval)
}

async replaceConnection(oldConn) {
const index = this.connections.indexOf(oldConn)
if (index === -1) return

try {
await oldConn.client.disconnect()
} catch (e) {}

const newConn = await this.createConnection(oldConn.serverIndex)
this.connections[index] = newConn

// Replace in available pool if present
const availIndex = this.available.indexOf(oldConn)
if (availIndex > -1) {
this.available[availIndex] = newConn
}
}

getStats() {
return {
total: this.connections.length,
available: this.available.length,
waiting: this.waiting.length,
healthy: this.connections.filter(c => c.healthy).length,
totalRequests: this.connections.reduce((sum, c) => sum + c.requestCount, 0)
}
}
}
```


class RequestBatcher {
  constructor(pool, options = {}) {
    this.pool = pool
    this.batchSize = options.batchSize || 10
    this.batchDelayMs = options.batchDelayMs || 50
    this.pendingRequests = []
    this.batchTimer = null
  }

async request(requestData) {
return new Promise((resolve, reject) => {
this.pendingRequests.push({ requestData, resolve, reject })

if (this.pendingRequests.length >= this.batchSize) {
this.flush()
} else if (!this.batchTimer) {
this.batchTimer = setTimeout(() => this.flush(), this.batchDelayMs)
}
})
}

async flush() {
if (this.batchTimer) {
clearTimeout(this.batchTimer)
this.batchTimer = null
}

if (this.pendingRequests.length === 0) return

const batch = this.pendingRequests.splice(0, this.batchSize)

// Execute all requests in parallel
const results = await Promise.allSettled(
batch.map(req => this.pool.request(req.requestData))
)

// Resolve/reject individual promises
results.forEach((result, index) => {
if (result.status === 'fulfilled') {
batch[index].resolve(result.value)
} else {
batch[index].reject(result.reason)
}
})
}
}

// Usage
const pool = new XRPLConnectionPool(['wss://s1.ripple.com:51233'])
await pool.initialize()

const batcher = new RequestBatcher(pool, { batchSize: 20, batchDelayMs: 100 })

// These requests will be batched together
const results = await Promise.all([
batcher.request({ command: 'account_info', account: 'rAddr1...' }),
batcher.request({ command: 'account_info', account: 'rAddr2...' }),
batcher.request({ command: 'account_info', account: 'rAddr3...' }),
// ... many more
])
```

async function bulkGetAccountBalances(pool, accounts) {
  const PARALLEL_LIMIT = 20
  const results = new Map()

for (let i = 0; i < accounts.length; i += PARALLEL_LIMIT) {
const batch = accounts.slice(i, i + PARALLEL_LIMIT)

const batchResults = await Promise.allSettled(
batch.map(account =>
pool.request({
command: 'account_info',
account: account,
ledger_index: 'validated'
})
)
)

batchResults.forEach((result, index) => {
const account = batch[index]
if (result.status === 'fulfilled') {
results.set(account, {
balance: parseInt(result.value.result.account_data.Balance) / 1_000_000,
sequence: result.value.result.account_data.Sequence
})
} else {
results.set(account, { error: result.reason.message })
}
})

// Brief pause between batches
if (i + PARALLEL_LIMIT < accounts.length) {
await sleep(100)
}
}

return results
}
```


class XRPLCache {
  constructor(options = {}) {
    this.memoryCache = new Map()
    this.redis = options.redis  // Optional Redis client
    this.stats = { hits: 0, misses: 0 }

// TTL configuration by data type
this.ttlConfig = {
account_info: 5000, // 5 seconds - balances change
account_lines: 30000, // 30 seconds
server_info: 60000, // 1 minute
ledger_validated: 300000, // 5 minutes - historical
transaction: Infinity, // Forever - immutable
book_offers: 2000 // 2 seconds - volatile
}
}

getCacheKey(request) {
const { command, ...params } = request
return xrpl:${command}:${JSON.stringify(params)}
}

getTTL(request) {
return this.ttlConfig[request.command] || 10000
}

async get(request) {
const key = this.getCacheKey(request)

// Check memory cache first
const memResult = this.memoryCache.get(key)
if (memResult && Date.now() < memResult.expires) {
this.stats.hits++
return memResult.data
}

// Check Redis if available
if (this.redis) {
const redisResult = await this.redis.get(key)
if (redisResult) {
const data = JSON.parse(redisResult)
// Populate memory cache
this.memoryCache.set(key, {
data,
expires: Date.now() + this.getTTL(request)
})
this.stats.hits++
return data
}
}

this.stats.misses++
return null
}

async set(request, data) {
const key = this.getCacheKey(request)
const ttl = this.getTTL(request)

// Set in memory
this.memoryCache.set(key, {
data,
expires: Date.now() + ttl
})

// Set in Redis if available
if (this.redis && ttl !== Infinity) {
await this.redis.setex(key, Math.ceil(ttl / 1000), JSON.stringify(data))
}

// Cleanup old memory entries periodically
if (this.memoryCache.size > 10000) {
this.cleanupMemoryCache()
}
}

cleanupMemoryCache() {
const now = Date.now()
for (const [key, value] of this.memoryCache) {
if (now > value.expires) {
this.memoryCache.delete(key)
}
}
}

invalidate(pattern) {
// Invalidate memory cache entries matching pattern
for (const key of this.memoryCache.keys()) {
if (key.includes(pattern)) {
this.memoryCache.delete(key)
}
}
}

getStats() {
const total = this.stats.hits + this.stats.misses
return {
...this.stats,
hitRate: total > 0 ? (this.stats.hits / total * 100).toFixed(2) + '%' : 'N/A',
memorySize: this.memoryCache.size
}
}
}
```

class CachedXRPLClient {
  constructor(pool, cache) {
    this.pool = pool
    this.cache = cache
  }

async request(requestData) {
// Check cache for cacheable requests
if (this.isCacheable(requestData)) {
const cached = await this.cache.get(requestData)
if (cached) return cached
}

// Make actual request
const result = await this.pool.request(requestData)

// Cache result
if (this.isCacheable(requestData)) {
await this.cache.set(requestData, result)
}

return result
}

isCacheable(request) {
// Don't cache writes or subscriptions
const nonCacheable = ['submit', 'subscribe', 'unsubscribe']
return !nonCacheable.includes(request.command)
}
}
```


class OptimizedQueries {

// BAD: Fetches too much data
static async getBalanceBad(client, account) {
const info = await client.request({
command: 'account_info',
account: account,
ledger_index: 'validated',
queue: true, // Not needed for balance
signer_lists: true // Not needed for balance
})
return info.result.account_data.Balance
}

// GOOD: Minimal request
static async getBalanceGood(client, account) {
const info = await client.request({
command: 'account_info',
account: account,
ledger_index: 'validated'
// Only request what you need
})
return info.result.account_data.Balance
}

// BAD: Fetches all transactions then filters
static async getRecentPaymentsBad(client, account) {
const result = await client.request({
command: 'account_tx',
account: account,
limit: 1000 // Way too many
})
return result.result.transactions.filter(
tx => tx.tx.TransactionType === 'Payment'
).slice(0, 10)
}

// GOOD: Use server-side limits
static async getRecentPaymentsGood(client, account) {
let payments = []
let marker = undefined

while (payments.length < 10) {
const result = await client.request({
command: 'account_tx',
account: account,
limit: 50,
marker: marker
})

for (const tx of result.result.transactions) {
if (tx.tx.TransactionType === 'Payment') {
payments.push(tx)
if (payments.length >= 10) break
}
}

marker = result.result.marker
if (!marker) break
}

return payments.slice(0, 10)
}

// GOOD: Use validated ledger for consistency
static async getConsistentAccountState(client, account) {
const ledgerResult = await client.request({
command: 'ledger',
ledger_index: 'validated'
})
const validatedLedger = ledgerResult.result.ledger_index

// All queries use same ledger
const [info, lines, offers] = await Promise.all([
client.request({
command: 'account_info',
account: account,
ledger_index: validatedLedger
}),
client.request({
command: 'account_lines',
account: account,
ledger_index: validatedLedger
}),
client.request({
command: 'account_offers',
account: account,
ledger_index: validatedLedger
})
])

return {
ledger: validatedLedger,
balance: info.result.account_data.Balance,
trustLines: lines.result.lines,
offers: offers.result.offers
}
}
}
```


class PerformanceMonitor {
  constructor() {
    this.metrics = {
      requests: 0,
      totalLatency: 0,
      errors: 0,
      cacheHits: 0,
      cacheMisses: 0,
      byCommand: new Map()
    }
    this.latencyHistogram = []
  }

recordRequest(command, latencyMs, error = false) {
this.metrics.requests++
this.metrics.totalLatency += latencyMs
this.latencyHistogram.push(latencyMs)

if (error) this.metrics.errors++

// Per-command stats
if (!this.metrics.byCommand.has(command)) {
this.metrics.byCommand.set(command, { count: 0, totalLatency: 0 })
}
const cmdStats = this.metrics.byCommand.get(command)
cmdStats.count++
cmdStats.totalLatency += latencyMs
}

getStats() {
const sorted = [...this.latencyHistogram].sort((a, b) => a - b)

return {
requests: this.metrics.requests,
errors: this.metrics.errors,
errorRate: (this.metrics.errors / this.metrics.requests * 100).toFixed(2) + '%',
avgLatency: (this.metrics.totalLatency / this.metrics.requests).toFixed(2) + 'ms',
p50Latency: sorted[Math.floor(sorted.length * 0.5)] + 'ms',
p95Latency: sorted[Math.floor(sorted.length * 0.95)] + 'ms',
p99Latency: sorted[Math.floor(sorted.length * 0.99)] + 'ms',
byCommand: Object.fromEntries(
[...this.metrics.byCommand].map(([cmd, stats]) => [
cmd,
{
count: stats.count,
avgLatency: (stats.totalLatency / stats.count).toFixed(2) + 'ms'
}
])
)
}
}
}

// Instrumented client
class InstrumentedClient {
constructor(pool, monitor) {
this.pool = pool
this.monitor = monitor
}

async request(requestData) {
const start = Date.now()
let error = false

try {
return await this.pool.request(requestData)
} catch (e) {
error = true
throw e
} finally {
this.monitor.recordRequest(requestData.command, Date.now() - start, error)
}
}
}
```



  • Connection pooling with health checks
  • Multi-tier caching (memory + Redis)
  • Request batching for bulk operations
  • Performance monitoring with metrics
  • Benchmark showing improvement over naive implementation

Time Investment: 4-5 hours


End of Lesson 13

Key Takeaways

1

Pool connections:

Reuse connections instead of creating new ones per request.

2

Batch requests:

Parallel requests with reasonable limits (10-20 concurrent).

3

Cache appropriately:

Long TTL for immutable data, short for volatile.

4

Monitor performance:

Track latency percentiles, not just averages.

5

Optimize queries:

Request only needed data; use validated ledger for consistency. ---