Skip to content

Debugging Tools Cheat Sheet

Commands, flags, and workflows for debugging, profiling, and performance analysis.

# Drop into debugger at this line (Python 3.7+)
breakpoint()
# Older equivalent
import pdb; pdb.set_trace()
# Post-mortem: debug after an unhandled exception
import pdb; pdb.pm()
Terminal window
# Run script under pdb from the start
python -m pdb script.py
# Post-mortem on crash (drops into pdb on exception)
python -m pdb -c continue script.py
CommandShortEffect
nextnExecute next line (step over)
stepsStep into function call
continuecContinue until next breakpoint
returnrContinue until current function returns
break Nb NSet breakpoint at line N
break fnb fnSet breakpoint at function
tbreak NTemporary breakpoint (fires once)
clear Ncl NClear breakpoint number N
listlShow source around current line
longlistllShow full source of current function
print exprpEvaluate and print expression
pp exprPretty-print expression
display exprWatch expression (print on change)
undisplayRemove watched expression
wherewPrint stack trace
upuMove up one stack frame
downdMove down one stack frame
quitqExit debugger
(Pdb) b 42, x > 100 # Break at line 42 only when x > 100
(Pdb) b utils.py:10, len(items) == 0
Terminal window
# ipdb — pdb with IPython features (tab completion, syntax highlighting)
pip install ipdb
python -m ipdb script.py
# Use ipdb as the default breakpoint handler
import os
os.environ["PYTHONBREAKPOINT"] = "ipdb.set_trace"
breakpoint() # Now opens ipdb
Terminal window
# pudb — full TUI debugger with variable inspector
pip install pudb
python -m pudb script.py
Terminal window
# Trace all system calls
strace ./program
# Trace specific syscall categories
strace -e trace=network ./program
strace -e trace=file ./program
strace -e trace=open,read,write ./program
# Attach to running process
strace -p 1234
# Count syscalls and show summary
strace -c ./program
# Write output to file (stderr is normal output)
strace -o trace.log ./program
# Timestamp each syscall
strace -t ./program # Wall clock
strace -T ./program # Time spent in each call
Terminal window
# macOS equivalent of strace (requires SIP adjustments)
sudo dtruss ./program
# Trace running process
sudo dtruss -p 1234
# Trace specific syscalls
sudo dtruss -f -t open ./program
Terminal window
# Count syscalls by process name
sudo dtrace -n 'syscall:::entry { @[execname] = count(); }'
# Trace file opens by a specific process
sudo dtrace -n 'syscall::open*:entry /execname == "python3"/ {
printf("%s", copyinstr(arg0));
}'
# Profile user-space stacks at 99 Hz
sudo dtrace -n 'profile-99 /pid == 1234/ {
@[ustack()] = count();
}'
Terminal window
# Launch program under debugger
lldb ./program
lldb -- ./program --flag arg
# Attach to running process
lldb -p 1234
lldb -n process_name
(lldb) breakpoint set -f main.c -l 42 # Break at file:line
(lldb) b main # Break at function
(lldb) br list # List breakpoints
(lldb) run # Start execution
(lldb) next # Step over
(lldb) step # Step into
(lldb) continue # Continue
(lldb) bt # Backtrace
(lldb) frame variable # Show local variables
(lldb) p expression # Evaluate expression
(lldb) memory read 0x1000 # Examine memory
(lldb) register read # Show registers
(lldb) watchpoint set variable x # Break on write to x
(lldb) quit
(gdb) break main.c:42 # Set breakpoint
(gdb) run # Start program
(gdb) next / step / continue # Navigation
(gdb) bt # Backtrace
(gdb) info locals # Show local variables
(gdb) print expr # Evaluate expression
(gdb) watch variable # Watchpoint
(gdb) x/16xw 0x1000 # Examine 16 words at address
Tasklldbgdb
Set breakpointb mainbreak main
Runrunrun
Backtracebtbt
Print variablep varprint var
Local variablesframe variableinfo locals
Examine memorymemory read addrx addr
Watch variablewatchpoint set variable xwatch x
Attach to PIDprocess attach -p 1234attach 1234
Terminal window
# Install
pip install py-spy
# Live top-like view of a running process
py-spy top --pid 1234
# Record a flame graph (SVG output)
py-spy record -o flame.svg --pid 1234
py-spy record -o flame.svg -- python script.py
# Record with specific format
py-spy record --format flamegraph -o flame.svg -- python script.py
py-spy record --format speedscope -o profile.json -- python script.py
# Sample rate (default 100 Hz)
py-spy record --rate 250 -o flame.svg --pid 1234
# Include native C extensions
py-spy record --native -o flame.svg --pid 1234
# Profile subprocess too
py-spy record --subprocesses -o flame.svg -- python script.py
Terminal window
# Run profiler and save stats
python -m cProfile -o profile.prof script.py
# Sort by cumulative time (direct output)
python -m cProfile -s cumtime script.py
# Visualize with snakeviz (opens browser)
pip install snakeviz
snakeviz profile.prof
# Profile a specific section
import cProfile
import pstats
with cProfile.Profile() as pr:
expensive_function()
stats = pstats.Stats(pr)
stats.sort_stats("cumulative")
stats.print_stats(20) # Top 20 functions
Terminal window
pip install line_profiler
# Decorate functions to profile
@profile
def slow_function():
total = sum(range(1000000))
return total
Terminal window
# Run with kernprof
kernprof -l -v script.py
# -l line-by-line profiling
# -v show results immediately
Terminal window
pip install memory_profiler
from memory_profiler import profile
@profile
def memory_hungry():
a = [1] * 1000000
b = [2] * 2000000
del b
return a
Terminal window
# Run and show line-by-line memory usage
python -m memory_profiler script.py
# Track memory over time (generates plot data)
mprof run script.py
mprof plot # Opens matplotlib graph
Terminal window
# Basic benchmark
hyperfine 'sleep 0.3'
# Compare two commands
hyperfine 'fd . /tmp' 'find /tmp'
# Warmup runs (prime caches)
hyperfine --warmup 3 'command'
# Exact number of runs
hyperfine --runs 50 'command'
# Parameter sweep
hyperfine --parameter-scan threads 1 8 \
'sort --parallel={threads} data.txt'
# Parameter list
hyperfine --parameter-list lang python3,ruby,node \
'{lang} fib.py'
# Export results
hyperfine --export-json results.json 'command'
hyperfine --export-markdown results.md 'command'
# Preparation command (run before each timing)
hyperfine --prepare 'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches' \
'cat largefile'
# Show output of command
hyperfine --show-output 'echo hello'
/usr/bin/time
# Bash builtin (real/user/sys)
time sleep 1
/usr/bin/time -l ./program # macOS: includes memory stats
/usr/bin/time -v ./program # Linux: verbose resource usage
# GNU time output format
/usr/bin/time -f "%e real, %U user, %S sys, %M maxRSS(KB)" ./program
Terminal window
# From command line
python -m timeit 'sum(range(1000))'
python -m timeit -n 10000 -r 5 'sum(range(1000))'
# -n number of executions per run
# -r number of runs (best of r is reported)
import timeit
# Time a statement
elapsed = timeit.timeit('sum(range(1000))', number=10000)
# Time with setup
elapsed = timeit.timeit(
'sorted(data)',
setup='import random; data = random.sample(range(10000), 1000)',
number=1000
)
width = time spent (wider = more time)
depth = call stack (bottom = entry point, top = leaf function)
color = arbitrary (usually random or by category)
Look for:
- Wide bars at top → hot functions (optimize these)
- Tall narrow towers → deep call stacks (check recursion)
- Plateaus → single function dominating runtime
Terminal window
py-spy record --format flamegraph -o flame.svg -- python script.py
# Open flame.svg in a browser (interactive: click to zoom)
Terminal window
# Record CPU profile
perf record -g ./program
# Convert to flame graph (Brendan Gregg's scripts)
perf script | stackcollapse-perf.pl | flamegraph.pl > flame.svg
Terminal window
# Profile from command line using xctrace
xcrun xctrace record --template 'Time Profiler' \
--launch ./program --output profile.trace
# Open in Instruments GUI
open profile.trace

Instruments provides a native flame graph (“Call Tree” view inverted) plus memory allocations, disk I/O, and energy impact profilers.

Terminal window
# Capture all traffic on default interface
sudo tcpdump
# Specific interface and port
sudo tcpdump -i en0 port 443
# Filter by host
sudo tcpdump host 192.168.1.1
# Show packet contents in ASCII
sudo tcpdump -A port 80
# Save to file for Wireshark analysis
sudo tcpdump -w capture.pcap
# Read saved capture
tcpdump -r capture.pcap
# Common filters
sudo tcpdump 'tcp port 80 and host example.com'
sudo tcpdump 'udp and port 53' # DNS only
sudo tcpdump -n 'icmp' # Ping/ICMP only
Terminal window
# Show request/response headers
curl -v https://example.com
# Full trace (hex + ASCII)
curl --trace trace.log https://example.com
curl --trace-ascii trace.log https://example.com
# Show only timing info
curl -o /dev/null -s -w "\
DNS: %{time_namelookup}s
\
Connect: %{time_connect}s
\
TLS: %{time_appconnect}s
\
TTFB: %{time_starttransfer}s
\
Total: %{time_total}s
" \
https://example.com
# Follow redirects with verbose
curl -vL https://short.url/abc
Terminal window
# nslookup — simple DNS query
nslookup example.com
nslookup -type=MX example.com
# dig — detailed DNS query
dig example.com
dig example.com MX
dig +short example.com # Just the answer
dig +trace example.com # Full delegation chain
dig @8.8.8.8 example.com # Query specific nameserver
# host — concise DNS lookup
host example.com
host -t AAAA example.com # IPv6 records
import logging
import json
# Basic configuration
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(name)s %(message)s"
)
logger = logging.getLogger(__name__)
# Structured output with extra fields
logger.info("Request processed", extra={
"user_id": 42,
"duration_ms": 150,
"status": 200
})
# JSON log formatter for machine parsing
class JSONFormatter(logging.Formatter):
def format(self, record):
log_entry = {
"time": self.formatTime(record),
"level": record.levelname,
"message": record.getMessage(),
"logger": record.name,
}
if hasattr(record, "user_id"):
log_entry["user_id"] = record.user_id
return json.dumps(log_entry)
Terminal window
# Filter log lines
grep "ERROR" app.log
grep -i "timeout" app.log
grep -C 3 "Exception" app.log # 3 lines context
# Count error types
grep -oP 'ERROR: \K[^:]+' app.log | sort | uniq -c | sort -rn
# Parse JSON logs with jq
cat app.log | jq 'select(.level == "ERROR")'
cat app.log | jq 'select(.duration_ms > 1000) | {time, message}'
cat app.log | jq -r '[.time, .level, .message] | @tsv'
# Group errors by message
cat app.log | jq -r 'select(.level == "ERROR") | .message' \
| sort | uniq -c | sort -rn
Terminal window
# Follow log file in real time
tail -f app.log
# Follow and filter
tail -f app.log | grep --line-buffered "ERROR"
# Follow multiple files
tail -f *.log
# Follow with highlighting (grep color)
tail -f app.log | grep --line-buffered --color=always -E "ERROR|WARNING|"
# Empty final alternative matches all lines but colors matches
I need to…ToolCommand
Debug Python interactivelypdbbreakpoint() in code
Debug with better UIpudbpython -m pudb script.py
Profile Python CPU usagepy-spypy-spy top --pid 1234
Generate a flame graphpy-spypy-spy record -o flame.svg -- python app.py
Profile function call countscProfilepython -m cProfile -s cumtime script.py
Profile line-by-lineline_profilerkernprof -l -v script.py
Profile memory usagememory_profilerpython -m memory_profiler script.py
Benchmark shell commandshyperfinehyperfine 'cmd1' 'cmd2'
Benchmark Python snippetstimeitpython -m timeit 'expr'
Trace system calls (Linux)stracestrace -e trace=file ./program
Trace system calls (macOS)dtrusssudo dtruss ./program
Debug native binary (macOS)lldblldb ./program
Debug native binary (Linux)gdbgdb ./program
Capture network traffictcpdumpsudo tcpdump -i en0 port 443
Debug HTTP requestscurlcurl -v https://example.com
Debug DNS resolutiondigdig +trace example.com
Find which commit broke somethinggit bisectgit bisect start && git bisect bad
Watch logs in real timetailtail -f app.log | grep ERROR
Parse JSON logsjqjq 'select(.level == "ERROR")' app.log