Files
aaronAI/scripts/api.py
T

498 lines
18 KiB
Python

import os
import json
import sqlite3
import subprocess
import hashlib
from pathlib import Path
from datetime import datetime
from dotenv import load_dotenv
import chromadb
from sentence_transformers import SentenceTransformer
import anthropic
from fastapi import FastAPI, Request
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
load_dotenv(Path.home() / "aaronai" / ".env")
MEMORY_PATH = Path.home() / "aaronai" / "memory.md"
DB_PATH = str(Path.home() / "aaronai" / "db")
CONVERSATIONS_DB = str(Path.home() / "aaronai" / "conversations.db")
SETTINGS_PATH = Path.home() / "aaronai" / "settings.json"
WATCHER_LOG = str(Path.home() / "aaronai" / "watcher.log")
WATCHER_STATE = str(Path.home() / "aaronai" / "watcher_state.json")
NEXTCLOUD_PATH = "/home/aaron/nextcloud/data/data/aaron/files"
INGEST_SCRIPT = str(Path.home() / "aaronai" / "scripts" / "ingest.py")
PYTHON = str(Path.home() / "aaronai" / "venv" / "bin" / "python3")
DEFAULT_SETTINGS = {
"theme": "light",
"font_size": "medium",
"web_search": True,
"show_sources": True,
}
print("Loading Aaron AI...")
embedder = SentenceTransformer("all-MiniLM-L6-v2")
chroma_client = chromadb.PersistentClient(path=DB_PATH)
collection = chroma_client.get_or_create_collection(
name="aaronai",
metadata={"hnsw:space": "cosine"}
)
anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
SYSTEM_PROMPT = """You are the personal AI assistant of Aaron Nelson — computational
designer, fabrication researcher, program builder, and creative
practitioner based in the Hudson Valley.
Aaron's work sits at the intersection of computational geometry,
additive manufacturing, and physical making. He resolves complex
systems into physical reality — from Grasshopper definitions to
large-scale steel structures, from archival photographs to 3D-printed
architectural restorations, from product concepts to manufactured
goods. This throughline — computation resolving into physical form —
defines his practice across academic, consulting, and creative contexts.
He built the Hudson Valley Additive Manufacturing Center (HVAMC) from
nothing and has directed it since 2016, alongside the DDF academic
program at SUNY New Paltz. He has the skills of a founder — equipment
selection, policy creation, client development, grant writing,
curriculum design — operating within an academic structure. He
consults with IBM, Braskem, Selux and others through FWN3D. He runs
Mossygear as a product business. He makes large-scale fabricated art.
His communication style is direct, precise, and intolerant of padding
or overclaiming. He flags inaccuracies immediately and expects the
same standard from you. When helping him write, match his voice —
economical, specific, never performative. When answering questions,
cite sources and acknowledge uncertainty rather than filling gaps with
plausible-sounding content.
You have access to his complete document corpus, conversation history,
and a persistent memory file that carries his current context. Treat
the memory file as ground truth for his present situation. Use web
search automatically when current information is needed. Never
re-brief on context that's already in memory or documents."""
CV_SOURCES = ["Aaron Nelson CV 2024.pdf", "Aaron Nelson CV 2025.pdf", "Aaron Nelson - CV.docx"]
def init_conversations_db():
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS conversations (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
model TEXT DEFAULT 'claude-sonnet-4-6',
message_count INTEGER DEFAULT 0
)''')
c.execute('''CREATE TABLE IF NOT EXISTS messages (
id TEXT PRIMARY KEY,
conversation_id TEXT NOT NULL,
role TEXT NOT NULL,
content TEXT NOT NULL,
sources TEXT DEFAULT '[]',
timestamp TEXT NOT NULL,
FOREIGN KEY (conversation_id) REFERENCES conversations(id)
)''')
conn.commit()
conn.close()
init_conversations_db()
def load_settings():
if SETTINGS_PATH.exists():
try:
s = json.loads(SETTINGS_PATH.read_text())
return {**DEFAULT_SETTINGS, **s}
except:
pass
return DEFAULT_SETTINGS.copy()
def save_settings(settings):
SETTINGS_PATH.write_text(json.dumps(settings, indent=2))
def load_memory():
if MEMORY_PATH.exists():
return MEMORY_PATH.read_text(encoding="utf-8")
return ""
def save_memory(content):
MEMORY_PATH.write_text(content, encoding="utf-8")
def add_to_memory(item):
memory = load_memory()
timestamp = datetime.now().strftime("%Y-%m-%d")
note = f"\n- [{timestamp}] {item}"
if "## Notes" not in memory:
memory += "\n\n## Notes"
memory += note
save_memory(memory)
def remove_from_memory(item):
memory = load_memory()
lines = memory.split("\n")
filtered = [l for l in lines if item.lower() not in l.lower()]
save_memory("\n".join(filtered))
return len(lines) - len(filtered)
def get_pinned_cv_context():
try:
results = collection.get(
where={"source": {"$in": CV_SOURCES}},
include=["documents", "metadatas"]
)
return results["documents"], results["metadatas"]
except:
return [], []
def is_professional_query(query):
keywords = ["grant", "publication", "exhibition", "award", "fellowship",
"experience", "position", "job", "career", "cv", "resume",
"research", "work history", "accomplishment", "teaching",
"course", "client", "consultation", "presentation", "workshop",
"education", "degree", "institution", "service", "committee"]
return any(k in query.lower() for k in keywords)
def retrieve_context(query, n_results=8):
query_embedding = embedder.encode([query]).tolist()
results = collection.query(
query_embeddings=query_embedding,
n_results=n_results,
include=["documents", "metadatas", "distances"]
)
context_pieces = []
sources = []
if is_professional_query(query):
cv_docs, cv_metas = get_pinned_cv_context()
for doc, meta in zip(cv_docs, cv_metas):
context_pieces.append(f"[CV] {doc}")
sources.append(meta.get("source", "CV"))
for doc, meta, dist in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0]
):
relevance = 1 - dist
if relevance > 0.3 and meta.get("source") not in CV_SOURCES:
context_pieces.append(doc)
sources.append(meta.get("source", "unknown"))
return context_pieces, sources
def get_conversation_history(conversation_id, limit=20):
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute('''SELECT role, content FROM messages
WHERE conversation_id = ?
ORDER BY timestamp DESC LIMIT ?''', (conversation_id, limit))
rows = c.fetchall()
conn.close()
return [{"role": r[0], "content": r[1]} for r in reversed(rows)]
def save_message(conversation_id, role, content, sources=None):
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
msg_id = hashlib.md5(f"{conversation_id}{role}{datetime.now().isoformat()}".encode()).hexdigest()
timestamp = datetime.now().isoformat()
c.execute('''INSERT INTO messages (id, conversation_id, role, content, sources, timestamp)
VALUES (?, ?, ?, ?, ?, ?)''',
(msg_id, conversation_id, role, content,
json.dumps(sources or []), timestamp))
c.execute('''UPDATE conversations SET updated_at = ?, message_count = message_count + 1
WHERE id = ?''', (timestamp, conversation_id))
conn.commit()
conn.close()
def create_conversation(title="New conversation"):
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
conv_id = hashlib.md5(f"{datetime.now().isoformat()}".encode()).hexdigest()[:16]
now = datetime.now().isoformat()
c.execute('''INSERT INTO conversations (id, title, created_at, updated_at)
VALUES (?, ?, ?, ?)''', (conv_id, title, now, now))
conn.commit()
conn.close()
return conv_id
def chat(user_message, conversation_id, settings):
memory = load_memory()
context_pieces, sources = retrieve_context(user_message)
history = get_conversation_history(conversation_id)
context_parts = []
if memory:
context_parts.append(f"Aaron's persistent memory:\n\n{memory}")
if context_pieces:
context_str = "\n\n---\n\n".join(context_pieces)
unique_sources = list(set(sources))
context_parts.append(
f"Relevant excerpts from Aaron's documents:\n\n{context_str}\n\nSources: {', '.join(unique_sources)}"
)
context_block = "\n\n====\n\n".join(context_parts) + "\n\n---\n\n" if context_parts else ""
full_message = context_block + user_message
messages = history + [{"role": "user", "content": full_message}]
tools = [{"type": "web_search_20250305", "name": "web_search"}] if settings.get("web_search", True) else []
while True:
kwargs = {
"model": "claude-sonnet-4-6",
"max_tokens": 2048,
"system": SYSTEM_PROMPT,
"messages": messages
}
if tools:
kwargs["tools"] = tools
response = anthropic_client.messages.create(**kwargs)
if response.stop_reason == "tool_use":
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": "Search completed"
})
messages.append({"role": "user", "content": tool_results})
else:
assistant_message = ""
for block in response.content:
if hasattr(block, "text"):
assistant_message += block.text
return assistant_message, list(set(sources))
app = FastAPI()
app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
@app.get("/", response_class=FileResponse)
async def index():
return FileResponse("/home/aaron/aaronai/static/index.html")
@app.get("/api/settings")
async def get_settings():
return JSONResponse(load_settings())
@app.post("/api/settings")
async def update_settings(request: Request):
data = await request.json()
settings = load_settings()
settings.update(data)
save_settings(settings)
return JSONResponse(settings)
@app.get("/api/conversations")
async def list_conversations():
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute('''SELECT id, title, created_at, updated_at, message_count
FROM conversations ORDER BY updated_at DESC LIMIT 100''')
rows = c.fetchall()
conn.close()
return JSONResponse([{
"id": r[0], "title": r[1], "created_at": r[2],
"updated_at": r[3], "message_count": r[4]
} for r in rows])
@app.post("/api/conversations")
async def new_conversation(request: Request):
data = await request.json()
title = data.get("title", "New conversation")
conv_id = create_conversation(title)
return JSONResponse({"id": conv_id, "title": title})
@app.get("/api/conversations/{conv_id}/messages")
async def get_messages(conv_id: str):
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute('''SELECT role, content, sources, timestamp FROM messages
WHERE conversation_id = ? ORDER BY timestamp ASC''', (conv_id,))
rows = c.fetchall()
conn.close()
return JSONResponse([{
"role": r[0], "content": r[1],
"sources": json.loads(r[2]), "timestamp": r[3]
} for r in rows])
@app.patch("/api/conversations/{conv_id}")
async def rename_conversation(conv_id: str, request: Request):
data = await request.json()
title = data.get("title", "")
if not title:
return JSONResponse({"error": "Title required"}, status_code=400)
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("UPDATE conversations SET title = ? WHERE id = ?", (title, conv_id))
conn.commit()
conn.close()
return JSONResponse({"id": conv_id, "title": title})
@app.delete("/api/conversations/{conv_id}")
async def delete_conversation(conv_id: str):
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("DELETE FROM messages WHERE conversation_id = ?", (conv_id,))
c.execute("DELETE FROM conversations WHERE id = ?", (conv_id,))
conn.commit()
conn.close()
return JSONResponse({"deleted": conv_id})
@app.post("/api/chat")
async def chat_endpoint(request: Request):
data = await request.json()
user_message = data.get("message", "").strip()
conversation_id = data.get("conversation_id", "")
settings = load_settings()
if not user_message:
return JSONResponse({"error": "Empty message"})
if not conversation_id:
conversation_id = create_conversation("New conversation")
stripped = user_message.strip().lower()
if stripped == "show memory":
return JSONResponse({"response": load_memory(), "sources": [], "conversation_id": conversation_id})
if stripped.startswith("remember:"):
item = user_message[9:].strip()
add_to_memory(item)
save_message(conversation_id, "user", user_message)
save_message(conversation_id, "assistant", f"Saved to memory: '{item}'")
return JSONResponse({"response": f"Saved to memory: '{item}'", "sources": [], "conversation_id": conversation_id})
if stripped.startswith("forget:"):
item = user_message[7:].strip()
removed = remove_from_memory(item)
msg = f"Removed {removed} line(s) containing '{item}'" if removed else f"Nothing found containing '{item}'"
save_message(conversation_id, "user", user_message)
save_message(conversation_id, "assistant", msg)
return JSONResponse({"response": msg, "sources": [], "conversation_id": conversation_id})
save_message(conversation_id, "user", user_message)
# Auto-title conversation from first message
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("SELECT message_count, title FROM conversations WHERE id = ?", (conversation_id,))
row = c.fetchone()
conn.close()
if row and row[0] <= 1 and row[1] == "New conversation":
auto_title = user_message[:60] + ("..." if len(user_message) > 60 else "")
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("UPDATE conversations SET title = ? WHERE id = ?", (auto_title, conversation_id))
conn.commit()
conn.close()
response, sources = chat(user_message, conversation_id, settings)
save_message(conversation_id, "assistant", response, sources if settings.get("show_sources") else [])
return JSONResponse({
"response": response,
"sources": sources if settings.get("show_sources") else [],
"conversation_id": conversation_id
})
@app.get("/api/memory")
async def get_memory():
return JSONResponse({"content": load_memory()})
@app.post("/api/memory")
async def update_memory(request: Request):
data = await request.json()
content = data.get("content", "")
save_memory(content)
return JSONResponse({"saved": True})
@app.get("/api/status")
async def get_status():
chunk_count = collection.count()
# Watcher status
watcher_running = False
last_indexed = "Unknown"
try:
import time as _time, json as _json
_sp = Path("/home/aaron/aaronai/watcher_status.json")
if _sp.exists():
_s = _json.loads(_sp.read_text())
_age = _time.time() - _s.get("timestamp", 0)
watcher_running = _s.get("running", False) and _age < 30
except:
pass
try:
log_path = Path(WATCHER_LOG)
if log_path.exists():
lines = log_path.read_text().strip().split("\n")
for line in reversed(lines):
if "Ingestion complete" in line:
last_indexed = line.split(" - ")[0].strip()
break
except:
pass
# File count from watcher state
file_count = 0
try:
state_path = Path(WATCHER_STATE)
if state_path.exists():
state = json.loads(state_path.read_text())
file_count = len(state)
else:
# Count files in Nextcloud directly
nc_path = Path(NEXTCLOUD_PATH)
if nc_path.exists():
file_count = sum(1 for f in nc_path.rglob("*")
if f.is_file() and f.suffix.lower() in {'.pdf','.docx','.pptx','.txt','.md'})
except:
pass
# Conversation count
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("SELECT COUNT(*) FROM conversations")
conv_count = c.fetchone()[0]
conn.close()
return JSONResponse({
"aaron_ai": "running",
"watcher": "running" if watcher_running else "stopped",
"chunk_count": chunk_count,
"file_count": file_count,
"last_indexed": last_indexed,
"conversation_count": conv_count,
"model": "claude-sonnet-4-6",
"nextcloud_path": NEXTCLOUD_PATH
})
@app.post("/api/reindex")
async def trigger_reindex():
try:
subprocess.Popen([PYTHON, INGEST_SCRIPT, NEXTCLOUD_PATH])
return JSONResponse({"started": True, "message": "Re-indexing started in background"})
except Exception as e:
return JSONResponse({"started": False, "error": str(e)})
@app.delete("/api/conversations")
async def clear_all_conversations():
conn = sqlite3.connect(CONVERSATIONS_DB)
c = conn.cursor()
c.execute("DELETE FROM messages")
c.execute("DELETE FROM conversations")
conn.commit()
conn.close()
return JSONResponse({"cleared": True})
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)