Skip to main content

⚑ Performance Optimization Guide

Maximize your Dubhe application’s performance with proven optimization strategies

Prerequisites: Understanding of ECS architecture, Move language, and basic performance concepts

🎯 Performance Optimization Philosophy

Performance in blockchain applications involves multiple layers: smart contract efficiency, indexer throughput, frontend responsiveness, and network optimization. Each layer requires specific strategies.

Smart Contracts

Gas optimization and execution efficiency

Indexer Service

Data processing and query optimization

Frontend

UI responsiveness and real-time updates

πŸ“Š Performance Metrics & Monitoring

Key Performance Indicators

// Performance monitoring for contracts
interface ContractMetrics {
  gasUsage: {
    entityCreation: number;
    componentAdd: number;
    systemExecution: number;
    batchOperations: number;
  };
  executionTime: {
    averageBlockTime: number;
    transactionThroughput: number;
    confirmationTime: number;
  };
  stateSize: {
    totalEntities: number;
    componentsPerEntity: number;
    storageUtilization: number;
  };
}

// Gas usage tracking
export class GasTracker {
  private metrics: Map<string, number[]> = new Map();
  
  track(operation: string, gasUsed: number) {
    if (!this.metrics.has(operation)) {
      this.metrics.set(operation, []);
    }
    this.metrics.get(operation)!.push(gasUsed);
  }
  
  getAverage(operation: string): number {
    const values = this.metrics.get(operation) || [];
    return values.reduce((sum, val) => sum + val, 0) / values.length;
  }
  
  getPercentile(operation: string, percentile: number): number {
    const values = this.metrics.get(operation)?.sort((a, b) => a - b) || [];
    const index = Math.floor(values.length * (percentile / 100));
    return values[index] || 0;
  }
}

πŸš€ Smart Contract Optimization

Component Design Optimization

// βœ… Optimized: Pack related data together
struct OptimizedHealthComponent has store, drop {
    // Pack current and maximum health into single u64
    // Bits 0-31: current health (max 4.3B)
    // Bits 32-63: maximum health (max 4.3B)
    packed_health: u64,
}

public fun get_current_health(health: &OptimizedHealthComponent): u32 {
    (health.packed_health & 0xFFFFFFFF) as u32
}

public fun get_maximum_health(health: &OptimizedHealthComponent): u32 {
    (health.packed_health >> 32) as u32
}

public fun set_current_health(health: &mut OptimizedHealthComponent, value: u32) {
    let max_health = get_maximum_health(health) as u64;
    health.packed_health = (max_health << 32) | (value as u64);
}

// βœ… Optimized: Use bit flags for boolean states
struct OptimizedStateComponent has store, drop {
    // Bit flags: 0=alive, 1=poisoned, 2=stunned, 3=frozen, etc.
    status_flags: u8,
    
    // Pack multiple small values
    // Bits 0-7: level (max 255)
    // Bits 8-15: class (max 255) 
    // Bits 16-31: experience_multiplier (fixed point)
    packed_stats: u32,
}

const STATUS_ALIVE: u8 = 1 << 0;
const STATUS_POISONED: u8 = 1 << 1;
const STATUS_STUNNED: u8 = 1 << 2;

public fun is_alive(state: &OptimizedStateComponent): bool {
    (state.status_flags & STATUS_ALIVE) != 0
}

public fun set_poisoned(state: &mut OptimizedStateComponent, poisoned: bool) {
    if (poisoned) {
        state.status_flags = state.status_flags | STATUS_POISONED;
    } else {
        state.status_flags = state.status_flags & !STATUS_POISONED;
    };
}
// ❌ Bad: Monolithic component (expensive to read/write)
struct PlayerData has store, drop {
    health: u64,
    mana: u64,
    position_x: u64,
    position_y: u64,
    velocity_x: u64,
    velocity_y: u64,
    inventory_items: vector<u64>,
    equipment: Equipment,
    stats: Stats,
    buffs: vector<Buff>,
}

// βœ… Good: Granular components (load only what's needed)
struct CoreStatsComponent has store, drop {
    health: u64,
    mana: u64,
}

struct PositionComponent has store, drop {
    x: u64,
    y: u64,
}

struct MovementComponent has store, drop {
    velocity_x: u64,
    velocity_y: u64,
    speed_multiplier: u16, // Packed as fixed-point
}

// Load only required components for each system
public entry fun movement_system(world: &mut World) {
    // Only loads Position + Movement components, not entire player data
    let moving_entities = world::query_with<PositionComponent, MovementComponent>(world);
    // Process movement...
}

System Optimization Patterns

// βœ… Optimized: Process multiple entities in single transaction
public entry fun batch_heal_system(
    world: &mut World,
    targets: vector<u64>,
    heal_amounts: vector<u64>
) {
    let len = vector::length(&targets);
    assert!(len == vector::length(&heal_amounts), EArrayLengthMismatch);
    assert!(len <= MAX_BATCH_SIZE, EBatchTooLarge);
    
    let i = 0;
    while (i < len) {
        let target = *vector::borrow(&targets, i);
        let heal_amount = *vector::borrow(&heal_amounts, i);
        
        if (world::has_component<HealthComponent>(world, target)) {
            let health = world::get_mut_component<HealthComponent>(world, target);
            health.current = math::min(health.current + heal_amount, health.maximum);
        };
        
        i = i + 1;
    };
    
    // Single event for entire batch
    event::emit(BatchHealEvent {
        targets,
        heal_amounts,
        timestamp: tx_context::epoch_timestamp_ms(ctx),
    });
}

// βœ… Optimized: Conditional system execution
public entry fun smart_ai_system(world: &mut World) {
    // Early exit if no AI entities exist
    if (!world::has_entities_with_component<AIComponent>(world)) {
        return
    };
    
    let ai_entities = world::query_with<AIComponent>(world);
    
    // Process only entities that need updates
    let i = 0;
    while (i < vector::length(&ai_entities)) {
        let entity = *vector::borrow(&ai_entities, i);
        let ai = world::get_component<AIComponent>(world, entity);
        
        // Skip entities that don't need processing
        if (ai.next_update_time > tx_context::epoch_timestamp_ms(ctx)) {
            i = i + 1;
            continue
        };
        
        process_ai_entity(world, entity);
        i = i + 1;
    };
}

Gas Optimization Techniques

1

Minimize Storage Operations

// ❌ Bad: Multiple storage writes
public fun update_player_bad(world: &mut World, player: u64, new_health: u64, new_mana: u64) {
    let health = world::get_mut_component<HealthComponent>(world, player);
    health.current = new_health;
    
    let mana = world::get_mut_component<ManaComponent>(world, player);
    mana.current = new_mana;
}

// βœ… Good: Batch updates in single component
struct VitalStatsComponent has store, drop {
    health_current: u64,
    health_maximum: u64,
    mana_current: u64,
    mana_maximum: u64,
}

public fun update_player_good(
    world: &mut World, 
    player: u64, 
    new_health: u64, 
    new_mana: u64
) {
    let vitals = world::get_mut_component<VitalStatsComponent>(world, player);
    vitals.health_current = new_health;
    vitals.mana_current = new_mana;
    // Single storage write
}
2

Optimize Loops and Iterations

// βœ… Optimized: Use indices and break early
public fun find_target_optimized(
    world: &World, 
    searcher_pos: &PositionComponent,
    max_range: u64
): Option<u64> {
    let enemy_entities = world::query_with<EnemyTag, PositionComponent>(world);
    let len = vector::length(&enemy_entities);
    
    if (len == 0) return option::none();
    
    let closest_distance = max_range + 1;
    let closest_entity = option::none<u64>();
    
    let i = 0;
    while (i < len) {
        let entity = *vector::borrow(&enemy_entities, i);
        let pos = world::get_component<PositionComponent>(world, entity);
        
        let distance = calculate_distance(searcher_pos, pos);
        
        // Early exit if perfect match found
        if (distance == 0) {
            return option::some(entity)
        };
        
        if (distance < closest_distance && distance <= max_range) {
            closest_distance = distance;
            closest_entity = option::some(entity);
        };
        
        i = i + 1;
    };
    
    closest_entity
}
3

Use Native Operations

// βœ… Use built-in vector operations when possible
public fun remove_dead_entities_optimized(world: &mut World) {
    let dead_entities = world::query_with<DeadTag>(world);
    
    // Process in reverse order for efficient removal
    let i = vector::length(&dead_entities);
    while (i > 0) {
        i = i - 1;
        let entity = *vector::borrow(&dead_entities, i);
        world::despawn_entity(world, entity);
    };
}

// βœ… Batch vector operations
public fun apply_area_damage(
    world: &mut World,
    center: &PositionComponent,
    radius: u64,
    damage: u64
) {
    let all_entities = world::query_with<PositionComponent, HealthComponent>(world);
    let targets = vector::empty<u64>();
    
    // First pass: collect targets
    let i = 0;
    while (i < vector::length(&all_entities)) {
        let entity = *vector::borrow(&all_entities, i);
        let pos = world::get_component<PositionComponent>(world, entity);
        
        if (calculate_distance(center, pos) <= radius) {
            vector::push_back(&mut targets, entity);
        };
        
        i = i + 1;
    };
    
    // Second pass: apply damage to all targets
    batch_damage_entities(world, targets, damage);
}

πŸ—„οΈ Indexer Performance Optimization

Database Optimization

-- Optimize for common query patterns

-- 1. Entity-component lookups (most frequent)
CREATE INDEX CONCURRENTLY idx_entity_components_lookup 
ON entity_components(entity_id, component_type);

-- 2. Component queries by type (system queries)
CREATE INDEX CONCURRENTLY idx_components_by_type 
ON entity_components(component_type) 
INCLUDE (entity_id, component_data);

-- 3. Recent events (timeline queries)
CREATE INDEX CONCURRENTLY idx_events_recent 
ON blockchain_events(timestamp DESC, event_type);

-- 4. Player-specific queries
CREATE INDEX CONCURRENTLY idx_players_active 
ON entities(id) 
WHERE has_component('PlayerComponent');

-- 5. Partial index for alive entities (common filter)
CREATE INDEX CONCURRENTLY idx_entities_alive 
ON entities(id) 
WHERE NOT has_component('DeadTag');

-- 6. Spatial queries for position-based systems
CREATE INDEX CONCURRENTLY idx_positions_spatial 
ON entity_components 
USING GIST ((component_data->>'x')::int, (component_data->>'y')::int)
WHERE component_type = 'PositionComponent';

-- Analyze query performance
EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON) 
SELECT entity_id 
FROM entity_components 
WHERE component_type = 'HealthComponent' 
AND (component_data->>'current')::int > 0;

Event Processing Optimization

// High-throughput event processing
import { Transform, pipeline } from 'stream';
import { promisify } from 'util';

class OptimizedEventProcessor {
  private batchSize = 100;
  private batchTimeout = 1000; // 1 second
  private currentBatch: BlockchainEvent[] = [];
  private processingQueue: Promise<void> = Promise.resolve();
  
  constructor(private db: OptimizedDatabase) {
    this.startBatchProcessor();
  }
  
  async processEvent(event: BlockchainEvent) {
    this.currentBatch.push(event);
    
    // Process batch when full or after timeout
    if (this.currentBatch.length >= this.batchSize) {
      await this.processBatch();
    }
  }
  
  private startBatchProcessor() {
    setInterval(async () => {
      if (this.currentBatch.length > 0) {
        await this.processBatch();
      }
    }, this.batchTimeout);
  }
  
  private async processBatch() {
    const batch = this.currentBatch.splice(0);
    if (batch.length === 0) return;
    
    // Queue processing to avoid concurrent batch conflicts
    this.processingQueue = this.processingQueue.then(async () => {
      await this.processBatchInternal(batch);
    });
    
    await this.processingQueue;
  }
  
  private async processBatchInternal(events: BlockchainEvent[]) {
    const startTime = performance.now();
    
    try {
      // Group events by type for efficient processing
      const eventsByType = new Map<string, BlockchainEvent[]>();
      
      events.forEach(event => {
        if (!eventsByType.has(event.type)) {
          eventsByType.set(event.type, []);
        }
        eventsByType.get(event.type)!.push(event);
      });
      
      // Process each event type in parallel
      const processingPromises = Array.from(eventsByType.entries()).map(
        ([eventType, eventBatch]) => this.processEventType(eventType, eventBatch)
      );
      
      await Promise.all(processingPromises);
      
      // Update metrics
      const processingTime = performance.now() - startTime;
      this.updateMetrics(events.length, processingTime);
      
      console.log(`Processed ${events.length} events in ${processingTime}ms`);
      
    } catch (error) {
      console.error('Error processing batch:', error);
      // Implement retry logic or dead letter queue
      await this.handleBatchError(events, error);
    }
  }
  
  private async processEventType(eventType: string, events: BlockchainEvent[]) {
    switch (eventType) {
      case 'ComponentAdded':
        return this.processComponentAddedEvents(events);
      case 'ComponentUpdated':
        return this.processComponentUpdatedEvents(events);
      case 'EntitySpawned':
        return this.processEntitySpawnedEvents(events);
      default:
        console.warn(`Unknown event type: ${eventType}`);
    }
  }
  
  private async processComponentAddedEvents(events: BlockchainEvent[]) {
    // Batch insert components
    const updates = events.map(event => ({
      entityId: event.data.entity,
      componentType: event.data.componentType,
      data: event.data.componentData,
      blockNumber: event.blockNumber,
      transactionHash: event.transactionHash,
    }));
    
    await this.db.batchInsertComponents(updates);
    
    // Invalidate related caches
    const entityIds = events.map(e => e.data.entity);
    await this.invalidateEntityCaches(entityIds);
    
    // Emit WebSocket updates
    await this.emitWebSocketUpdates(events);
  }
}

WebSocket Optimization

// Optimized WebSocket server
import WebSocket from 'ws';
import { EventEmitter } from 'events';

class OptimizedWebSocketServer extends EventEmitter {
  private wss: WebSocket.Server;
  private connections: Map<string, WebSocketConnection> = new Map();
  private subscriptions: Map<string, Set<string>> = new Map(); // topic -> connectionIds
  private connectionGroups: Map<string, Set<string>> = new Map(); // group -> connectionIds
  
  constructor(port: number) {
    super();
    
    this.wss = new WebSocket.Server({ 
      port,
      perMessageDeflate: false, // Disable compression for lower latency
      maxPayload: 1024 * 1024,  // 1MB max message size
    });
    
    this.wss.on('connection', this.handleConnection.bind(this));
    this.startCleanupTimer();
  }
  
  private handleConnection(ws: WebSocket, request: any) {
    const connectionId = this.generateConnectionId();
    const connection = new WebSocketConnection(connectionId, ws);
    
    this.connections.set(connectionId, connection);
    
    ws.on('message', (data: Buffer) => {
      this.handleMessage(connectionId, data);
    });
    
    ws.on('close', () => {
      this.handleDisconnection(connectionId);
    });
    
    ws.on('error', (error) => {
      console.error(`WebSocket error for ${connectionId}:`, error);
      this.handleDisconnection(connectionId);
    });
    
    // Send connection confirmation
    this.send(connectionId, {
      type: 'connected',
      connectionId,
      timestamp: Date.now(),
    });
  }
  
  private handleMessage(connectionId: string, data: Buffer) {
    try {
      const message = JSON.parse(data.toString());
      
      switch (message.type) {
        case 'subscribe':
          this.subscribe(connectionId, message.topic, message.filters);
          break;
        case 'unsubscribe':
          this.unsubscribe(connectionId, message.topic);
          break;
        case 'join_group':
          this.joinGroup(connectionId, message.group);
          break;
        case 'ping':
          this.send(connectionId, { type: 'pong', timestamp: Date.now() });
          break;
      }
    } catch (error) {
      console.error(`Error parsing message from ${connectionId}:`, error);
    }
  }
  
  private subscribe(connectionId: string, topic: string, filters?: any) {
    if (!this.subscriptions.has(topic)) {
      this.subscriptions.set(topic, new Set());
    }
    
    this.subscriptions.get(topic)!.add(connectionId);
    
    const connection = this.connections.get(connectionId);
    if (connection) {
      connection.subscriptions.set(topic, filters || {});
    }
    
    this.send(connectionId, {
      type: 'subscribed',
      topic,
      timestamp: Date.now(),
    });
  }
  
  // Optimized broadcast with filtering
  broadcast(topic: string, data: any, filters?: (connectionId: string) => boolean) {
    const subscribers = this.subscriptions.get(topic);
    if (!subscribers) return;
    
    const message = JSON.stringify({
      type: 'update',
      topic,
      data,
      timestamp: Date.now(),
    });
    
    // Batch send for better performance
    const sends: Promise<void>[] = [];
    
    for (const connectionId of subscribers) {
      if (filters && !filters(connectionId)) continue;
      
      const connection = this.connections.get(connectionId);
      if (connection?.isAlive()) {
        sends.push(connection.send(message));
      }
    }
    
    // Don't wait for all sends to complete
    Promise.allSettled(sends).catch(console.error);
  }
  
  // Efficient cleanup of dead connections
  private startCleanupTimer() {
    setInterval(() => {
      for (const [connectionId, connection] of this.connections) {
        if (!connection.isAlive()) {
          this.handleDisconnection(connectionId);
        }
      }
    }, 30000); // Check every 30 seconds
  }
}

class WebSocketConnection {
  public subscriptions: Map<string, any> = new Map();
  private lastPing: number = Date.now();
  
  constructor(
    public id: string, 
    private ws: WebSocket
  ) {
    // Start ping/pong for connection health
    setInterval(() => {
      if (this.isAlive()) {
        this.ws.ping();
      }
    }, 30000);
    
    this.ws.on('pong', () => {
      this.lastPing = Date.now();
    });
  }
  
  async send(message: string): Promise<void> {
    return new Promise((resolve, reject) => {
      if (this.ws.readyState !== WebSocket.OPEN) {
        reject(new Error('Connection not open'));
        return;
      }
      
      this.ws.send(message, (error) => {
        if (error) reject(error);
        else resolve();
      });
    });
  }
  
  isAlive(): boolean {
    return this.ws.readyState === WebSocket.OPEN && 
           (Date.now() - this.lastPing) < 60000; // 1 minute timeout
  }
}

πŸ–₯️ Frontend Performance Optimization

React Optimization Strategies

// Optimized React components for real-time updates
import React, { useMemo, useCallback, memo } from 'react';
import { useDubheClient } from '@0xobelisk/dubhe-react';

// Memoized entity component to prevent unnecessary re-renders
const EntityDisplay = memo<{ entityId: string; componentTypes: string[] }>(({ 
  entityId, 
  componentTypes 
}) => {
  const client = useDubheClient();
  
  // Use optimized hook that batches component queries
  const components = useEntityComponents(entityId, componentTypes);
  
  // Memoize expensive calculations
  const computedStats = useMemo(() => {
    if (!components.health || !components.stats) return null;
    
    return {
      healthPercentage: (components.health.current / components.health.maximum) * 100,
      totalAttack: components.stats.base_attack + (components.equipment?.weapon_bonus || 0),
      totalDefense: components.stats.base_defense + (components.equipment?.armor_bonus || 0),
    };
  }, [components.health, components.stats, components.equipment]);
  
  // Memoized event handlers
  const handleAttack = useCallback(async (targetId: string) => {
    await client.tx.combatSystem.attack({
      attacker: entityId,
      target: targetId,
    });
  }, [client, entityId]);
  
  if (!computedStats) return <div>Loading...</div>;
  
  return (
    <div className="entity-display">
      <HealthBar 
        current={components.health.current} 
        maximum={components.health.maximum}
        percentage={computedStats.healthPercentage}
      />
      <StatsDisplay stats={computedStats} />
      <ActionButtons onAttack={handleAttack} />
    </div>
  );
});

// Virtualized list for large entity collections
import { FixedSizeList as List } from 'react-window';

const EntityList: React.FC<{ entityIds: string[] }> = ({ entityIds }) => {
  const renderEntity = useCallback(({ index, style }: any) => {
    const entityId = entityIds[index];
    
    return (
      <div style={style}>
        <EntityDisplay 
          entityId={entityId} 
          componentTypes={['HealthComponent', 'StatsComponent']}
        />
      </div>
    );
  }, [entityIds]);
  
  return (
    <List
      height={600}
      itemCount={entityIds.length}
      itemSize={80}
      width="100%"
    >
      {renderEntity}
    </List>
  );
};

πŸ”§ Advanced Optimization Techniques

Memory Management

// Object pooling for frequently created objects
class ObjectPool<T> {
  private available: T[] = [];
  private inUse: Set<T> = new Set();
  private factory: () => T;
  private reset: (obj: T) => void;
  
  constructor(factory: () => T, reset: (obj: T) => void, initialSize = 10) {
    this.factory = factory;
    this.reset = reset;
    
    // Pre-populate pool
    for (let i = 0; i < initialSize; i++) {
      this.available.push(factory());
    }
  }
  
  acquire(): T {
    let obj = this.available.pop();
    
    if (!obj) {
      obj = this.factory();
    }
    
    this.inUse.add(obj);
    return obj;
  }
  
  release(obj: T) {
    if (this.inUse.has(obj)) {
      this.inUse.delete(obj);
      this.reset(obj);
      this.available.push(obj);
    }
  }
  
  getPoolSize() {
    return {
      available: this.available.length,
      inUse: this.inUse.size,
      total: this.available.length + this.inUse.size,
    };
  }
}

// Usage examples
interface Vector2D {
  x: number;
  y: number;
}

const vectorPool = new ObjectPool<Vector2D>(
  () => ({ x: 0, y: 0 }),
  (vec) => { vec.x = 0; vec.y = 0; }
);

// In performance-critical code
function calculateDistance(pos1: Vector2D, pos2: Vector2D): number {
  const diff = vectorPool.acquire();
  diff.x = pos2.x - pos1.x;
  diff.y = pos2.y - pos1.y;
  
  const distance = Math.sqrt(diff.x * diff.x + diff.y * diff.y);
  
  vectorPool.release(diff);
  return distance;
}

Caching Strategies

// Hierarchical caching system
class CacheManager {
  private l1Cache: Map<string, CacheEntry> = new Map(); // In-memory
  private l2Cache: Redis; // Redis cache
  private l3Cache: Database; // Database
  
  private readonly L1_TTL = 30_000;  // 30 seconds
  private readonly L2_TTL = 300_000; // 5 minutes
  
  constructor(redis: Redis, database: Database) {
    this.l2Cache = redis;
    this.l3Cache = database;
    
    // Cleanup expired L1 entries
    setInterval(() => this.cleanupL1(), 10_000);
  }
  
  async get<T>(key: string, fetcher?: () => Promise<T>): Promise<T | null> {
    // L1 Cache (memory)
    const l1Entry = this.l1Cache.get(key);
    if (l1Entry && !this.isExpired(l1Entry)) {
      return l1Entry.value;
    }
    
    // L2 Cache (Redis)
    const l2Value = await this.l2Cache.get(key);
    if (l2Value) {
      const parsed = JSON.parse(l2Value);
      this.setL1(key, parsed);
      return parsed;
    }
    
    // L3 Cache (Database) or fetcher
    let value: T | null = null;
    
    if (fetcher) {
      value = await fetcher();
    } else {
      value = await this.l3Cache.get(key);
    }
    
    if (value !== null) {
      // Store in all cache levels
      await this.setAll(key, value);
    }
    
    return value;
  }
  
  async set<T>(key: string, value: T): Promise<void> {
    await this.setAll(key, value);
  }
  
  private setL1<T>(key: string, value: T) {
    this.l1Cache.set(key, {
      value,
      timestamp: Date.now(),
    });
  }
  
  private async setAll<T>(key: string, value: T) {
    // L1 Cache
    this.setL1(key, value);
    
    // L2 Cache
    await this.l2Cache.setex(key, this.L2_TTL / 1000, JSON.stringify(value));
    
    // L3 Cache (database) - async, don't wait
    this.l3Cache.set(key, value).catch(console.error);
  }
  
  private isExpired(entry: CacheEntry): boolean {
    return Date.now() - entry.timestamp > this.L1_TTL;
  }
  
  private cleanupL1() {
    for (const [key, entry] of this.l1Cache.entries()) {
      if (this.isExpired(entry)) {
        this.l1Cache.delete(key);
      }
    }
  }
}

interface CacheEntry {
  value: any;
  timestamp: number;
}

πŸ“ˆ Performance Monitoring Dashboard

// Performance metrics aggregation
class PerformanceDashboard {
  private metrics: Map<string, MetricSeries> = new Map();
  private alerts: AlertRule[] = [];
  
  constructor() {
    this.startMetricsCollection();
  }
  
  recordMetric(name: string, value: number, tags: Record<string, string> = {}) {
    const key = `${name}:${JSON.stringify(tags)}`;
    
    if (!this.metrics.has(key)) {
      this.metrics.set(key, {
        name,
        tags,
        values: [],
        timestamps: [],
      });
    }
    
    const series = this.metrics.get(key)!;
    series.values.push(value);
    series.timestamps.push(Date.now());
    
    // Keep only last 1000 data points
    if (series.values.length > 1000) {
      series.values.shift();
      series.timestamps.shift();
    }
    
    this.checkAlerts(name, value, tags);
  }
  
  getMetrics(name: string, timeRange: number = 3600000): MetricSeries[] {
    const cutoff = Date.now() - timeRange;
    const results: MetricSeries[] = [];
    
    for (const [key, series] of this.metrics) {
      if (series.name === name) {
        // Filter by time range
        const filtered = {
          ...series,
          values: [] as number[],
          timestamps: [] as number[],
        };
        
        for (let i = 0; i < series.timestamps.length; i++) {
          if (series.timestamps[i] >= cutoff) {
            filtered.values.push(series.values[i]);
            filtered.timestamps.push(series.timestamps[i]);
          }
        }
        
        results.push(filtered);
      }
    }
    
    return results;
  }
  
  addAlert(rule: AlertRule) {
    this.alerts.push(rule);
  }
  
  private checkAlerts(metricName: string, value: number, tags: Record<string, string>) {
    for (const alert of this.alerts) {
      if (alert.metric === metricName && this.matchesTags(alert.tags, tags)) {
        if (alert.condition(value)) {
          this.triggerAlert(alert, value);
        }
      }
    }
  }
  
  private triggerAlert(alert: AlertRule, value: number) {
    console.warn(`ALERT: ${alert.name} - Current value: ${value}`);
    
    // Send to monitoring system (Slack, PagerDuty, etc.)
    fetch('/api/alerts', {
      method: 'POST',
      body: JSON.stringify({
        alertName: alert.name,
        metric: alert.metric,
        value,
        severity: alert.severity,
        timestamp: Date.now(),
      }),
    }).catch(console.error);
  }
  
  private startMetricsCollection() {
    // Collect system metrics
    setInterval(() => {
      this.recordMetric('memory_usage', process.memoryUsage().heapUsed);
      this.recordMetric('cpu_usage', process.cpuUsage().user + process.cpuUsage().system);
      
      // Custom application metrics
      this.recordMetric('active_connections', this.getActiveConnections());
      this.recordMetric('cache_hit_rate', this.getCacheHitRate());
    }, 5000);
  }
}

interface MetricSeries {
  name: string;
  tags: Record<string, string>;
  values: number[];
  timestamps: number[];
}

interface AlertRule {
  name: string;
  metric: string;
  tags: Record<string, string>;
  condition: (value: number) => boolean;
  severity: 'low' | 'medium' | 'high' | 'critical';
}

// Usage
const dashboard = new PerformanceDashboard();

// Add performance alerts
dashboard.addAlert({
  name: 'High Memory Usage',
  metric: 'memory_usage',
  tags: {},
  condition: (value) => value > 1024 * 1024 * 1024, // 1GB
  severity: 'high',
});

dashboard.addAlert({
  name: 'Low Cache Hit Rate',
  metric: 'cache_hit_rate',
  tags: {},
  condition: (value) => value < 0.8, // Below 80%
  severity: 'medium',
});

🎯 Performance Testing Tools

Load Testing

Artillery.js and k6 for testing system limits

Profiling Tools

Chrome DevTools, Node.js profiler, and Move analyzer

Benchmarking

Automated performance regression testing

Monitoring

Production monitoring with Grafana and Prometheus

πŸ”₯ Real-World Case Studies

Case Study 1: MMO Battle Royale Optimization

A 100-player battle royale game faced severe performance issues:
  • Gas costs: 0.5 SUI per player action (unsustainable)
  • Latency: 3-5 second transaction confirmations
  • Throughput: Only 10 TPS during peak battles
  • Memory: Frontend consuming 500MB+ for player states

Case Study 2: DeFi Trading Game Optimization

Challenge: Processing 1000+ trades per second with complex market mechanicsKey Optimizations:
  1. Merkle Tree State Compression
struct MarketState has store {
    // Instead of storing all orders individually
    orders_merkle_root: vector<u8>,
    total_volume: u64,
    last_update: u64,
}

// Validate trades against merkle proof
public fun execute_trade_batch(
    market: &mut MarketState,
    trades: vector<Trade>,
    merkle_proofs: vector<MerkleProof>
) {
    // Process entire batch in single transaction
}
  1. Event Aggregation
// Single event for entire trading session
struct TradingSessionEvent has copy, drop {
    session_id: u64,
    total_trades: u64,
    volume_by_pair: vector<PairVolume>,
    price_changes: vector<PriceChange>,
    // Compressed event data
}
Results: Achieved 1000+ TPS with 90% cost reduction

πŸ› οΈ Optimization Toolkit

Performance Testing Framework

# Comprehensive performance testing suite

# 1. Contract gas analysis
dubhe analyze --gas-report contracts/

# 2. Load testing with Artillery
artillery run load-test-config.yml

# 3. Database performance analysis  
pg_stat_statements_reset();
# Run your queries
SELECT query, calls, mean_time FROM pg_stat_statements ORDER BY mean_time DESC;

# 4. Frontend profiling
lighthouse --only-categories=performance http://localhost:3000

Optimization Checklist

Pre-deployment Checklist:
Gas Optimization:
  • Components are minimal and focused
  • Systems use batch operations where possible
  • Loops are bounded and optimized
  • Storage operations are minimized
  • Event emission is efficient
Architecture Review:
  • ECS patterns are properly implemented
  • Component granularity is appropriate
  • Systems have single responsibilities
  • Error handling is comprehensive
  • Access control is properly implemented

πŸš€ Advanced Optimization Strategies

Hybrid On-Chain/Off-Chain Architecture

// Split game world into optimized shards
class WorldShardManager {
  private shards: Map<string, WorldShard> = new Map();
  
  getShardForEntity(entityId: string): WorldShard {
    // Determine shard based on game logic
    const shardId = this.calculateShardId(entityId);
    return this.shards.get(shardId)!;
  }
  
  // Cross-shard operations handled efficiently
  async moveEntityBetweenShards(
    entityId: string, 
    fromShard: string, 
    toShard: string
  ) {
    // Atomic cross-shard transfer
    const fromShard = this.shards.get(fromShardId)!;
    const toShard = this.shards.get(toShardId)!;
    
    await this.atomicTransfer(entityId, fromShard, toShard);
  }
}

Predictive Performance Scaling

// Intelligent performance scaling
class PerformanceScaler {
  private readonly SCALE_TRIGGERS = {
    HIGH_PLAYER_DENSITY: 50,
    COMBAT_EVENT_RATE: 100,
    MARKET_ACTIVITY_SPIKE: 1000,
  };
  
  async scaleBasedOnGameState(gameMetrics: GameMetrics) {
    const predictions = this.predictPerformanceNeeds(gameMetrics);
    
    if (predictions.needsDatabaseScaling) {
      await this.scaleDatabase(predictions.dbCpuTarget);
    }
    
    if (predictions.needsWebSocketScaling) {
      await this.scaleWebSocketServers(predictions.wsInstanceCount);
    }
    
    if (predictions.needsCacheScaling) {
      await this.scaleRedisCluster(predictions.cacheMemoryTarget);
    }
  }
  
  private predictPerformanceNeeds(metrics: GameMetrics) {
    // ML-based performance prediction
    return this.performanceModel.predict(metrics);
  }
}

🎯 Next Steps

Start Performance Testing

Immediate Actions:
  • Set up monitoring dashboard
  • Run baseline performance tests
  • Identify top 3 bottlenecks
  • Create optimization plan

Implementation Roadmap

Week 1-2: Smart contract optimizations Week 3-4: Database and indexing improvements
Week 5-6: Frontend performance enhancements Week 7-8: Integration testing and monitoring
Performance optimization is an iterative process. Don’t try to implement all optimizations at once. Focus on the bottlenecks that impact user experience the most, measure the results, and iterate.