A fast comment system improves user experience and reduces infrastructure costs. This chapter covers performance optimization strategies.
Target metrics for a good comment system:
| Metric | Target | Acceptable |
|---|---|---|
| Time to First Comment | < 500ms | < 1s |
| Comment Load Time | < 200ms | < 500ms |
| Submit Response | < 300ms | < 1s |
| Time to Interactive | < 1s | < 2s |
┌────────────────────────────────────────────────────────────┐
│ CACHE HIERARCHY │
├────────────────────────────────────────────────────────────┤
│ │
│ Layer 1: Browser Cache │
│ └── Cache-Control headers, Service Worker │
│ TTL: 1-5 minutes │
│ │
│ Layer 2: CDN/Edge Cache │
│ └── Cloudflare, Fastly, Vercel Edge │
│ TTL: 5-60 minutes │
│ │
│ Layer 3: Application Cache (Redis) │
│ └── Serialized responses, computed data │
│ TTL: 5-15 minutes │
│ │
│ Layer 4: Database Query Cache │
│ └── Prepared statements, connection pooling │
│ │
└────────────────────────────────────────────────────────────┘
from fastapi import Response
from datetime import datetime
@app.get("/api/comments")
async def get_comments(page_id: str, response: Response):
comments = await fetch_comments(page_id)
# Get last modification time
last_modified = max(c.updated_at for c in comments) if comments else datetime.utcnow()
etag = hashlib.md5(f"{page_id}:{last_modified}".encode()).hexdigest()
# Set cache headers
response.headers["Cache-Control"] = "public, max-age=60, stale-while-revalidate=300"
response.headers["ETag"] = f'"{etag}"'
response.headers["Last-Modified"] = last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT")
return {"data": comments}
from fastapi import Header, HTTPException
@app.get("/api/comments")
async def get_comments(
page_id: str,
if_none_match: str = Header(None),
if_modified_since: str = Header(None)
):
# Check ETag
current_etag = await get_etag(page_id)
if if_none_match and if_none_match.strip('"') == current_etag:
raise HTTPException(status_code=304) # Not Modified
# Check Last-Modified
last_modified = await get_last_modified(page_id)
if if_modified_since:
client_time = datetime.strptime(if_modified_since, "%a, %d %b %Y %H:%M:%S GMT")
if last_modified <= client_time:
raise HTTPException(status_code=304)
return await fetch_comments(page_id)
class CommentCache:
def __init__(self, redis: Redis, default_ttl: int = 300):
self.redis = redis
self.default_ttl = default_ttl
async def get_comments(self, page_id: str) -> list | None:
key = f"comments:v1:{page_id}"
cached = await self.redis.get(key)
if cached:
return json.loads(cached)
return None
async def set_comments(self, page_id: str, comments: list, ttl: int = None):
key = f"comments:v1:{page_id}"
await self.redis.setex(
key,
ttl or self.default_ttl,
json.dumps(comments, default=str)
)
async def invalidate(self, page_id: str):
"""Invalidate cache when comments change"""
key = f"comments:v1:{page_id}"
await self.redis.delete(key)
async def get_or_fetch(self, page_id: str, fetch_fn) -> list:
"""Get from cache or fetch and cache"""
cached = await self.get_comments(page_id)
if cached is not None:
return cached
comments = await fetch_fn(page_id)
await self.set_comments(page_id, comments)
return comments
# Invalidate on new comment
async def create_comment(data: CommentCreate):
comment = await db_create_comment(data)
# Invalidate cache
await cache.invalidate(data.page_id)
# Also invalidate any parent page cache
if data.parent_id:
parent = await get_comment(data.parent_id)
await cache.invalidate(parent.page_id)
return comment
# Invalidate on moderation
async def moderate_comment(comment_id: str, action: str):
comment = await get_comment(comment_id)
# Perform action
await update_comment_status(comment_id, action)
# Invalidate
await cache.invalidate(comment.page_id)
# Bad: N+1 query problem
async def get_comments_with_replies_bad(page_id: str):
comments = await db.execute(
select(Comment).where(Comment.page_id == page_id)
)
for comment in comments:
# This causes N additional queries!
comment.replies = await db.execute(
select(Comment).where(Comment.parent_id == comment.id)
)
# Good: Single query with joins
async def get_comments_with_replies_good(page_id: str):
query = """
SELECT
c.*,
array_agg(
json_build_object(
'id', r.id,
'content', r.content,
'author_name', r.author_name,
'created_at', r.created_at
)
) FILTER (WHERE r.id IS NOT NULL) as replies
FROM comments c
LEFT JOIN comments r ON r.parent_id = c.id AND r.status = 'approved'
WHERE c.page_id = :page_id
AND c.status = 'approved'
AND c.parent_id IS NULL
GROUP BY c.id
ORDER BY c.created_at DESC
"""
return await db.execute(text(query), {"page_id": page_id})
-- Essential indexes
CREATE INDEX idx_comments_page_status ON comments(page_id, status);
CREATE INDEX idx_comments_parent ON comments(parent_id) WHERE parent_id IS NOT NULL;
CREATE INDEX idx_comments_created ON comments(created_at DESC);
-- Partial index for approved comments (smaller, faster)
CREATE INDEX idx_comments_approved ON comments(page_id, created_at DESC)
WHERE status = 'approved';
-- Covering index (includes all needed columns)
CREATE INDEX idx_comments_list ON comments(page_id, status, created_at DESC)
INCLUDE (id, author_name, content, parent_id);
from sqlalchemy.pool import QueuePool
engine = create_async_engine(
DATABASE_URL,
poolclass=QueuePool,
pool_size=5, # Maintain 5 connections
max_overflow=10, # Allow up to 15 total
pool_timeout=30, # Wait 30s for connection
pool_recycle=1800, # Recycle connections after 30min
)
class LazyComments {
constructor(container) {
this.container = container;
this.loaded = false;
this.observer = new IntersectionObserver(
entries => this.onIntersect(entries),
{ rootMargin: '200px' } // Start loading 200px before visible
);
this.observer.observe(container);
}
onIntersect(entries) {
if (entries[0].isIntersecting && !this.loaded) {
this.loaded = true;
this.loadComments();
this.observer.disconnect();
}
}
async loadComments() {
this.container.innerHTML = '<p>Loading comments...</p>';
const comments = await fetch(`/api/comments?page_id=${this.pageId}`);
this.renderComments(await comments.json());
}
}
// Initialize when DOM ready
document.querySelectorAll('[data-lazy-comments]').forEach(el => {
new LazyComments(el);
});
class VirtualCommentList {
constructor(container, comments, itemHeight = 150) {
this.container = container;
this.comments = comments;
this.itemHeight = itemHeight;
this.visibleCount = Math.ceil(container.clientHeight / itemHeight) + 2;
this.setupScroll();
}
setupScroll() {
// Create spacer for scroll height
this.spacer = document.createElement('div');
this.spacer.style.height = `${this.comments.length * this.itemHeight}px`;
this.container.appendChild(this.spacer);
// Content container
this.content = document.createElement('div');
this.content.style.position = 'absolute';
this.content.style.width = '100%';
this.container.appendChild(this.content);
// Listen for scroll
this.container.addEventListener('scroll', () => this.onScroll());
this.render();
}
onScroll() {
requestAnimationFrame(() => this.render());
}
render() {
const scrollTop = this.container.scrollTop;
const startIndex = Math.floor(scrollTop / this.itemHeight);
const endIndex = Math.min(
startIndex + this.visibleCount,
this.comments.length
);
this.content.style.top = `${startIndex * this.itemHeight}px`;
this.content.innerHTML = this.comments
.slice(startIndex, endIndex)
.map(c => this.renderComment(c))
.join('');
}
}
async function submitComment(data) {
// Create temporary comment with pending state
const tempId = `temp-${Date.now()}`;
const tempComment = {
id: tempId,
...data,
status: 'pending',
created_at: new Date().toISOString()
};
// Immediately add to UI
addCommentToUI(tempComment);
try {
// Submit to server
const response = await fetch('/api/comments', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(data)
});
if (!response.ok) throw new Error('Failed');
const realComment = await response.json();
// Replace temp with real comment
replaceComment(tempId, realComment.data);
showMessage('Comment submitted for review!');
} catch (error) {
// Remove temp comment on failure
removeComment(tempId);
showMessage('Failed to submit comment', 'error');
}
}
// Split the widget for faster initial load
// main.js - Core functionality (< 10KB)
// full.js - Full features (< 30KB)
// Load minimal first
const script = document.createElement('script');
script.src = '/comments/main.min.js';
script.async = true;
// Load full version when idle
script.onload = () => {
if ('requestIdleCallback' in window) {
requestIdleCallback(() => {
import('/comments/full.min.js');
});
}
};
document.head.appendChild(script);
// Cloudflare Worker for smart caching
addEventListener('fetch', event => {
event.respondWith(handleRequest(event.request));
});
async function handleRequest(request) {
const url = new URL(request.url);
// Only cache GET requests
if (request.method !== 'GET') {
return fetch(request);
}
// Check cache
const cache = caches.default;
let response = await cache.match(request);
if (!response) {
response = await fetch(request);
// Cache successful responses
if (response.ok) {
const headers = new Headers(response.headers);
headers.set('Cache-Control', 'public, max-age=60');
response = new Response(response.body, {
status: response.status,
headers
});
event.waitUntil(cache.put(request, response.clone()));
}
}
return response;
}
// vercel.json
{
"headers": [
{
"source": "/api/comments",
"headers": [
{
"key": "Cache-Control",
"value": "s-maxage=60, stale-while-revalidate=300"
}
]
}
]
}
import time
from prometheus_client import Histogram, Counter
# Response time histogram
request_duration = Histogram(
'request_duration_seconds',
'Request duration in seconds',
['endpoint', 'method'],
buckets=[.01, .025, .05, .1, .25, .5, 1, 2.5, 5]
)
# Database query time
db_query_duration = Histogram(
'db_query_duration_seconds',
'Database query duration',
['query_type'],
buckets=[.001, .005, .01, .025, .05, .1, .25, .5]
)
# Cache metrics
cache_operations = Counter(
'cache_operations_total',
'Cache operations',
['operation', 'result'] # hit, miss, set
)
# Middleware to track
@app.middleware("http")
async def timing_middleware(request: Request, call_next):
start = time.perf_counter()
response = await call_next(request)
duration = time.perf_counter() - start
request_duration.labels(
endpoint=request.url.path,
method=request.method
).observe(duration)
response.headers["X-Response-Time"] = f"{duration:.3f}s"
return response
# Simple load test with locust
from locust import HttpUser, task, between
class CommentUser(HttpUser):
wait_time = between(1, 3)
@task(10)
def get_comments(self):
self.client.get("/api/comments?page_id=/blog/test-post")
@task(1)
def post_comment(self):
self.client.post("/api/comments", json={
"page_id": "/blog/test-post",
"author_name": "Load Tester",
"content": "This is a test comment"
})
| Optimization | Impact | Complexity |
|---|---|---|
| HTTP Caching | High | Low |
| Redis Caching | High | Medium |
| Database Indexes | High | Low |
| Lazy Loading | Medium | Low |
| Virtual Scrolling | Medium | High |
| CDN | High | Low |
Performance checklist:
Navigation: