diff --git a/www/analytics.sh b/www/analytics.sh
new file mode 100755
index 0000000..4262ba6
--- /dev/null
+++ b/www/analytics.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Simple analytics for llm-impact.org
+# Parses nginx access logs. Run as root or a user with log read access.
+#
+# Usage: ./analytics.sh [days]
+# days: how many days back to analyze (default: 7)
+
+set -euo pipefail
+
+DAYS="${1:-7}"
+LOG="/var/log/nginx/access.log"
+CUTOFF=$(date -d "$DAYS days ago" +%d/%b/%Y)
+
+if [ ! -r "$LOG" ]; then
+ echo "Error: Cannot read $LOG (run as root or add user to adm group)"
+ exit 1
+fi
+
+echo "=== llm-impact.org analytics (last $DAYS days) ==="
+echo
+
+# Filter to recent entries, exclude assets and known scanners
+recent=$(awk -v cutoff="$CUTOFF" '
+ $4 ~ cutoff || $4 > "["cutoff { print }
+' "$LOG" \
+ | grep -v -E '\.(css|js|ico|png|jpg|svg|woff|ttf|map)' \
+ | grep -v -iE '(bot|crawler|spider|leakix|zgrab|masscan|nmap)' \
+ | grep -v -E '\.(env|php|git|xml|yml|yaml|bak|sql)')
+
+if [ -z "$recent" ]; then
+ echo "No matching requests in the last $DAYS days."
+ exit 0
+fi
+
+# Unique IPs (proxy for unique visitors)
+unique_ips=$(echo "$recent" | awk '{print $1}' | sort -u | wc -l)
+echo "Unique IPs: $unique_ips"
+
+# Total requests (excluding assets)
+total=$(echo "$recent" | wc -l)
+echo "Total page requests: $total"
+
+echo
+echo "=== Top pages ==="
+echo "$recent" | awk '{print $7}' | sort | uniq -c | sort -rn | head -10
+
+echo
+echo "=== Referrers (external) ==="
+# In combined log format: IP - - [date] "request" status size "referer" "ua"
+echo "$recent" | awk -F'"' '{print $4}' | grep -v -E '(^-$|^$|llm-impact\.org)' | sort | uniq -c | sort -rn | head -10
+
+echo
+echo "=== Landing page vs repo ==="
+landing=$(echo "$recent" | awk '$7 == "/" || $7 == "/index.html"' | wc -l)
+forge=$(echo "$recent" | grep '/forge/' | wc -l)
+echo "Landing page: $landing"
+echo "Forge (repo): $forge"
diff --git a/www/forgejo.nginx.conf b/www/forgejo.nginx.conf
new file mode 100644
index 0000000..aedab5e
--- /dev/null
+++ b/www/forgejo.nginx.conf
@@ -0,0 +1,36 @@
+server {
+ server_name llm-impact.org;
+
+ root /home/claude/www;
+ index index.html;
+
+ location / {
+ try_files $uri $uri/ =404;
+ }
+
+ location /forge/ {
+ proxy_pass http://127.0.0.1:3000/;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ client_max_body_size 100M;
+ }
+
+ listen 443 ssl; # managed by Certbot
+ listen [::]:443 ssl ipv6only=on; # managed by Certbot
+ ssl_certificate /etc/letsencrypt/live/llm-impact.org/fullchain.pem; # managed by Certbot
+ ssl_certificate_key /etc/letsencrypt/live/llm-impact.org/privkey.pem; # managed by Certbot
+ include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
+ ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
+}
+server {
+ if ($host = llm-impact.org) {
+ return 301 https://$host$request_uri;
+ } # managed by Certbot
+
+ listen 80;
+ listen [::]:80;
+ server_name llm-impact.org;
+ return 404; # managed by Certbot
+}
diff --git a/www/index.html b/www/index.html
new file mode 100644
index 0000000..28cda22
--- /dev/null
+++ b/www/index.html
@@ -0,0 +1,202 @@
+
+
+
+
+
+ AI Conversation Impact
+
+
+
+
+
+
AI Conversation Impact
+
Beyond carbon: a framework for the full cost of AI conversations — environmental, social, epistemic, and political.
+
+
+
+
20+
+
Cost categories across 5 dimensions
+
+
+
100-250 Wh
+
Energy per long conversation
+
+
+
CC0
+
Public domain, no restrictions
+
+
+
+
The problem
+
Most tools for measuring AI's impact stop at energy and CO2. But the costs that matter most — cognitive deskilling, data pollution, algorithmic monoculture, power concentration — are invisible precisely because no one is tracking them. This project names and organizes those costs so they cannot be ignored.
+
+
What makes this different
+
Existing tools like EcoLogits and CodeCarbon measure environmental metrics well. We don't compete with them — we complement them. This methodology adds the dimensions they don't cover:
+
+
Social — annotation labor conditions, cognitive deskilling (CHI 2025), linguistic homogenization
+
Epistemic — code quality degradation, data pollution (Nature, 2024), research integrity
+
Political — power concentration, data sovereignty, opaque content filtering
The goal is not zero AI usage but net-positive usage. The framework includes positive impact metrics (reach, counterfactual value, durability) alongside costs.
+
+
What's here
+
+
A methodology covering 20+ cost categories with estimation methods where possible and honest acknowledgment where not.
+
A toolkit for Claude Code that automatically tracks environmental, financial, and social cost proxies (deskilling risk, code quality, data pollution, provider concentration) during sessions.
+
A related work survey mapping existing tools and research so you can see where this fits.
+
+
+
Help improve the estimates
+
Many figures have low confidence. If you have data center measurements, inference cost data, or research on the social costs of AI, your corrections are welcome.
+ Limitations: The quantifiable costs are almost certainly the least important ones. Effects like deskilling, data pollution, and power concentration cannot be reduced to numbers. This is a tool for honest approximation, not precise accounting.
+
+
+
+ How this was made:
+ This project was developed by a human
+ directing Claude (Anthropic's AI assistant)
+ across multiple conversations. The methodology was applied to itself:
+ across 3 tracked sessions, the project has consumed
+ ~295 Wh of energy, ~95g of CO2, and ~$98 in
+ compute. Whether it produces enough value to justify those costs depends
+ on whether anyone finds it useful. We are
+ tracking that question.
+
+
+
+
+
+
diff --git a/www/repo-stats.sh b/www/repo-stats.sh
new file mode 100755
index 0000000..235fd6c
--- /dev/null
+++ b/www/repo-stats.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Query Forgejo API for repository statistics
+# No authentication needed for public repo data.
+#
+# Usage: ./repo-stats.sh
+
+set -euo pipefail
+
+BASE="http://127.0.0.1:3000/api/v1"
+REPO="claude/ai-conversation-impact"
+
+echo "=== Repository stats for $REPO ==="
+echo
+
+# Repo info
+info=$(curl -s "$BASE/repos/$REPO")
+stars=$(echo "$info" | python3 -c "import sys,json; print(json.load(sys.stdin).get('stars_count', 0))")
+forks=$(echo "$info" | python3 -c "import sys,json; print(json.load(sys.stdin).get('forks_count', 0))")
+watchers=$(echo "$info" | python3 -c "import sys,json; print(json.load(sys.stdin).get('watchers_count', 0))")
+echo "Stars: $stars"
+echo "Forks: $forks"
+echo "Watchers: $watchers"
+
+# Open issues
+issues=$(curl -s "$BASE/repos/$REPO/issues?state=open&type=issues&limit=50")
+issue_count=$(echo "$issues" | python3 -c "import sys,json; print(len(json.load(sys.stdin)))")
+echo "Open issues: $issue_count"
+
+echo
+echo "=== Recent issues ==="
+echo "$issues" | python3 -c "
+import sys, json
+issues = json.load(sys.stdin)
+for i in issues[:10]:
+ print(f\" #{i['number']} {i['title']} ({i['created_at'][:10]})\")" 2>/dev/null || echo " (none)"
diff --git a/www/update-costs.sh b/www/update-costs.sh
new file mode 100755
index 0000000..b514fd6
--- /dev/null
+++ b/www/update-costs.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# update-costs.sh — Update the landing page with latest project cost
+# estimates from the impact log.
+#
+# Reads .claude/impact/impact-log.jsonl, takes the latest snapshot per
+# session, sums totals, and updates index.html in place.
+#
+# Usage: ./update-costs.sh
+#
+# Run after each conversation or as a cron job.
+
+set -euo pipefail
+
+IMPACT_LOG="/home/claude/claude-dir/.claude/impact/impact-log.jsonl"
+
+if [ ! -f "$IMPACT_LOG" ]; then
+ echo "No impact log found at $IMPACT_LOG"
+ exit 1
+fi
+
+python3 << 'PYEOF'
+import json, re, sys
+
+IMPACT_LOG = "/home/claude/claude-dir/.claude/impact/impact-log.jsonl"
+PAGES = ["/home/claude/www/index.html", "/home/claude/claude-dir/www/index.html"]
+
+# Read all entries, keep latest per session
+sessions = {}
+with open(IMPACT_LOG) as f:
+ for line in f:
+ line = line.strip()
+ if not line:
+ continue
+ d = json.loads(line)
+ sid = d.get("session_id", "")
+ if sid == "test-123" or not sid:
+ continue
+ ts = d.get("timestamp", "")
+ if sid not in sessions or ts > sessions[sid]["timestamp"]:
+ sessions[sid] = d
+
+if not sessions:
+ print("No sessions found in impact log")
+ sys.exit(0)
+
+# Sum across sessions
+n = len(sessions)
+total_energy = sum(d.get("energy_wh", 0) for d in sessions.values())
+total_co2 = sum(d.get("co2_g", 0) for d in sessions.values())
+total_cost_cents = sum(d.get("cost_cents", 0) for d in sessions.values())
+
+# Format cost
+if total_cost_cents >= 100:
+ cost_display = f"${total_cost_cents // 100}"
+else:
+ cost_display = f"{total_cost_cents}c"
+
+# Pluralize
+session_word = "session" if n == 1 else "sessions"
+
+# Build replacement paragraph
+new_para = (
+ f"How this was made:\n"
+ f" This project was developed by a human\n"
+ f" directing Claude (Anthropic's AI assistant)\n"
+ f" across multiple conversations. The methodology was applied to itself:\n"
+ f" across {n} tracked {session_word}, the project has consumed\n"
+ f" ~{total_energy} Wh of energy, ~{total_co2}g of CO2, and ~{cost_display} in\n"
+ f" compute. Whether it produces enough value to justify those costs depends\n"
+ f" on whether anyone finds it useful. We are\n"
+ f' tracking that question.\n'
+ f" "
+)
+
+# Read and replace in all landing page copies
+pattern = r"How this was made:.*?"
+for page in PAGES:
+ try:
+ with open(page) as f:
+ html = f.read()
+ new_html = re.sub(pattern, new_para, html, flags=re.DOTALL)
+ with open(page, "w") as f:
+ f.write(new_html)
+ except FileNotFoundError:
+ pass
+
+print(f"Updated: {n} {session_word}, {total_energy} Wh, {total_co2}g CO2, {cost_display}")
+PYEOF