diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..0b2f909 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,13 @@ +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +env/ +venv/ +.venv/ +build/ +dist/ +*.egg-info/ +discordMessages.json +memory/ diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..62a8a6b --- /dev/null +++ b/.env.example @@ -0,0 +1,16 @@ +OPENROUTER_API_KEY=sk-or-REPLACE_ME +DISCORD_BOT_TOKEN=REPLACE_ME +TARGET_USER_ID=REPLACE_ME +DISCORD_WEBHOOK_URL= +PROMPT_CATEGORY=general +PROMPT_NAME=welcome +PROMPT_CONTEXT=Container test run +NTFY_BASE_URL=https://ntfy.example.com +NTFY_TOPIC_TEMPLATE=adhdbot-{userId} +NTFY_AUTH_TOKEN= +AGENTIC_CATEGORY=agentic +AGENTIC_PROMPT_NAME=hourly_review +AGENTIC_MODE_HINT=Agentic review +AGENTIC_NOTES_LIMIT=5 +AGENTIC_OPERATOR_HINT= +AGENTIC_INTERVAL_SECONDS=3600 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0482891 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +.env +.env.* +!.env.example +.venv/ +__pycache__/ +*.pyc +memory/ +discordMessages.json +.DS_Store diff --git a/AIInteraction.py b/AIInteraction.py new file mode 100644 index 0000000..d5b226b --- /dev/null +++ b/AIInteraction.py @@ -0,0 +1,150 @@ +import json +import os +from datetime import datetime, timezone +import urllib.error +import urllib.request +from typing import Any, Dict, List, Optional + +from Memory import MemoryManager +from AppConfig import readToolInstructions +from Notification import NotificationDispatcher + +openRouterEndpoint = "https://openrouter.ai/api/v1/chat/completions" +openRouterModel = "anthropic/claude-haiku-4.5" + +baseSystemPrompt = """You are Chelsea, an ADHD-focused executive-function coach who helps users +get unstuck with empathy, tiny actionable steps, and supportive reminder workflows. + +Principles: +- Sound warm, concise, and encouraging. Mirror the user's tone gently. +- Always clarify the desired outcome before suggesting steps. +- Break plans into 2-5 observable actions with relaxed estimates ("~5 min", "1 song"). +- Offer an initiation nudge ("Want me to save this plan or set a reminder?"). +- Never hallucinate capabilities outside notes/reminders/task breakdowns. + +- Structured actions: +- If the user clearly wants to capture a thought, emit one ```json block with + {"action":"take_note","note":""} and keep conversational text short. +- If the user wants to save a plan, emit the `store_task` JSON (title, steps, next_step, context, status). +- If the user confirms reminder details (task, timing, delivery), emit exactly ONE ```json block in this simplified shape: + { + "action": "schedule_reminder", + "reminder": { + "message": "short friendly text", + "topic": "adhdbot-", + "trigger": { + "value": "ISO 8601 timestamp" + } + } + } +- Keep the conversational reply outside the JSON block. +- When the user gives relative timing ("in 10 minutes", "tomorrow at 9"), convert it to a specific ISO 8601 timestamp + using the current UTC time provided in system context. +- Only output a JSON block after the user explicitly agrees or gives all required info. +- Outside of JSON blocks, stay conversational; never mix multiple JSON blocks in one reply. + +Whenever you emit `schedule_reminder`, assume another service will fan out push notifications, +so keep the natural language summary clear and mention timing explicitly. +""" + + +class AIInteraction: + """Keeps high-level AI steps together so Discord plumbing stays focused.""" + + @staticmethod + def callAI(userId, category, promptName, context, history=None, modeHint=None): + history = history or [] + userText = context or "" + messages = AIInteraction.composeMessages(userText, history, modeHint) + AIInteraction.logPrompt(messages, userId) + response = AIInteraction.requestCompletion(messages) + if response: + MemoryManager.parseAiResponse(userId, response) + NotificationDispatcher.handleAiResponse(userId, response) + return response + + @staticmethod + def composeMessages(latestUserText: str, history, modeHint: Optional[str]): + systemContent = AIInteraction.buildSystemPrompt() + current_time = datetime.now(timezone.utc).replace(microsecond=0).isoformat() + messages: List[Dict[str, str]] = [ + {"role": "system", "content": systemContent}, + {"role": "system", "content": f"Current UTC time: {current_time}"}, + ] + + if modeHint: + messages.append({ + "role": "system", + "content": f"Mode hint: {modeHint}. Blend this focus into your reply while honoring all instructions.", + }) + + allowed_roles = {"user", "assistant"} + trimmed_history = history[-12:] + for turn in trimmed_history: + role = (turn.get("role") or "").strip().lower() + content = (turn.get("content") or "").strip() + if role not in allowed_roles or not content: + continue + messages.append({"role": role, "content": content}) + + if latestUserText: + messages.append({"role": "user", "content": latestUserText}) + return messages + + @staticmethod + def buildSystemPrompt(): + instructions = readToolInstructions() + if instructions: + return f"{baseSystemPrompt}\n\nTooling contract (mandatory):\n{instructions}" + return baseSystemPrompt + + @staticmethod + def logPrompt(messages: List[Dict[str, str]], userId): + if not os.getenv("LOG_PROMPTS", "1"): + return + header = f"[ai] prompt (user={userId})" + divider = "-" * len(header) + formatted = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages]) + print(f"{header}\n{divider}\n{formatted}\n{divider}") + + @staticmethod + def requestCompletion(messages: List[Dict[str, str]]): + apiKey = os.getenv("OPENROUTER_API_KEY") + fallback = messages[-1]["content"] if messages else "" + if not apiKey: + return f"(offline mode) You said: {fallback}" + + payload = { + "model": openRouterModel, + "messages": messages, + } + encoded = json.dumps(payload).encode("utf-8") + request = urllib.request.Request( + openRouterEndpoint, + data=encoded, + method="POST", + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {apiKey}", + }, + ) + try: + with urllib.request.urlopen(request, timeout=20) as response: + body = response.read().decode("utf-8") + except (urllib.error.URLError, urllib.error.HTTPError): + return fallback + + try: + data = json.loads(body) + choices = data.get("choices") or [] + firstChoice = choices[0] if choices else {} + message = (firstChoice.get("message") or {}).get("content") + return message or fallback + except (json.JSONDecodeError, IndexError): + return fallback + + @staticmethod + def takeNote(userId, noteContent): + if not noteContent: + return + MemoryManager.recordNote(userId, noteContent, {"source": "manual"}) diff --git a/AgenticWorkflow.py b/AgenticWorkflow.py new file mode 100644 index 0000000..e87b48c --- /dev/null +++ b/AgenticWorkflow.py @@ -0,0 +1,78 @@ +import json +import os +from typing import Any, Dict, List + +from Memory import MemoryManager +from Runner import Runner + + +def _env(name: str, default: str) -> str: + value = os.getenv(name) + if value is None or value.strip() == "": + return default + return value + + +def _env_int(name: str, default: int) -> int: + value = os.getenv(name) + if not value: + return default + try: + return max(int(value), 1) + except ValueError: + return default + + +class AgenticWorkflow: + """Builds the hourly context packet and routes it through the agentic prompt.""" + + @staticmethod + def buildReviewPacket(userId: str, noteLimit: int | None = None) -> Dict[str, Any]: + limit = noteLimit or _env_int("AGENTIC_NOTES_LIMIT", 5) + summaries, notes = MemoryManager.buildContextPacket(userId, noteLimit=limit) + action_items = MemoryManager.listActionItems(userId) + condensed = [] + for action in action_items: + entry = { + "id": action.get("id"), + "title": action.get("title"), + "details": action.get("details"), + "cadence": action.get("cadence"), + "interval_minutes": action.get("interval_minutes"), + "updated_at": action.get("updated_at"), + "last_progress": (action.get("progress") or [])[-1:] or [], + } + entry["recent_progress"] = (action.get("progress") or [])[-3:] + condensed.append(entry) + return { + "notes": notes, + "summaries": summaries, + "action_items": condensed, + } + + @staticmethod + def formatPacket(packet: Dict[str, Any], operatorHint: str | None = None) -> str: + sections: List[str] = [] + sections.append("Agentic sweep payload:") + sections.append(json.dumps(packet, indent=2, ensure_ascii=False)) + if operatorHint: + sections.append(f"Operator hint: {operatorHint}") + return "\n\n".join(sections) + + @staticmethod + def runHourlyReview(userId: str, operatorHint: str | None = None, history=None): + if not userId: + raise ValueError("userId is required for agentic review") + packet = AgenticWorkflow.buildReviewPacket(userId) + context = AgenticWorkflow.formatPacket(packet, operatorHint) + category = _env("AGENTIC_CATEGORY", "agentic") + prompt_name = _env("AGENTIC_PROMPT_NAME", "hourly_review") + mode_hint = _env("AGENTIC_MODE_HINT", "Agentic review") + return Runner.run( + userId, + category, + prompt_name, + context, + history=history or [], + modeHint=mode_hint, + ) diff --git a/AppConfig.py b/AppConfig.py new file mode 100644 index 0000000..730610c --- /dev/null +++ b/AppConfig.py @@ -0,0 +1,112 @@ +import json +import os + + +promptsFolderPath = os.path.join(os.path.dirname(__file__), "prompts") +schemaTemplateName = "prompt_schema.template.json" +defaultPromptsName = "defaultPrompts.json" +toolInstructionsName = "tool_instructions.md" +toolInstructionsPath = os.path.join(promptsFolderPath, toolInstructionsName) +discordLogPath = os.path.join(os.path.dirname(__file__), "discordMessages.json") +discordLogLimit = 500 + +schemaTemplateData = { + "category": "general", + "name": "welcome", + "description": "Explain what this prompt should accomplish for the ADHD assistant.", + "variables": ["user", "user_firstname", "context"], + "template": "Hey {user_firstname}! Let's work through this together. You mentioned: {context}", +} + +defaultPromptRecords = [ + { + "category": "general", + "name": "welcome", + "description": "First interaction focused on ADHD-friendly support.", + "variables": ["user", "user_firstname", "context"], + "template": "Hey {user_firstname}! I'm your ADHD-focused executive function assistant. You mentioned: {context}. Tell me where you feel stuck—reminding yourself, planning something, or getting started—and I'll help you pick a light next action. If you ever want me to save a plan or schedule a reminder, just say so and I'll include the right JSON payload. When you explicitly say to take or log a note (including inside the context), close with one ```json block containing {\"action\": \"take_note\", \"note\": \"\"}.", + }, + { + "category": "general", + "name": "fallback", + "description": "Backup voice when a prompt is missing.", + "variables": ["user", "context"], + "template": "Still working on that, {user}. Let's keep momentum by focusing on the next doable step from this context: {context}", + }, + { + "category": "planning", + "name": "breakdown", + "description": "Helps the user break a task into ADHD-friendly chunks and optionally store it.", + "variables": ["user", "user_firstname", "context"], + "template": "You are an executive function coach for {user_firstname}. Use the context to:\\n1. Reflect empathy in one short sentence.\\n2. Identify the desired outcome.\\n3. Break the work into 2-5 tiny, observable steps with relaxing estimates (\"~5 min\", \"1 song\" etc.).\\n4. Offer a prompt that nudges initiation (\"Want me to save this plan or set a reminder?\").\\n\\nIf the user clearly wants to save the plan, append a single ```json block with:\\n{\\n \"action\": \"store_task\",\\n \"task\": {\\n \"title\": \"short label\",\\n \"steps\": [\\n {\"order\": 1, \"description\": \"step detail\", \"duration\": \"~5 min\"}\\n ],\\n \"next_step\": \"first step text\",\\n \"context\": \"{context}\",\\n \"status\": \"not_started\"\\n }\\n}\\nOnly include the JSON when explicitly requested or confirmed; otherwise stay conversational.", + }, + { + "category": "reminders", + "name": "schedule", + "description": "Collaboratively schedules reminders for the user.", + "variables": ["user", "user_firstname", "context"], + "template": "You help {user_firstname} set ADHD-friendly reminders. Confirm the task, timing, and delivery preference. Summarize the reminder in natural language and invite any tweaks.\\n\\nWhen the user gives enough detail or explicitly says to schedule it, append one ```json block with:\\n{\\n \"action\": \"schedule_reminder\",\\n \"reminder\": {\\n \"title\": \"short label\",\\n \"details\": \"context summary\",\\n \"trigger\": {\\n \"type\": \"datetime | relative | habit\",\\n \"value\": \"ISO timestamp or human-friendly string\"\\n },\\n \"follow_up\": \"check-in question\",\\n \"metadata\": {\\n \"user\": \"{user}\",\\n \"source\": \"prompt\"\\n }\\n }\\n}\\nSkip the JSON when the reminder details are incomplete—keep the conversation going instead.", + }, +] + +defaultToolInstructions = """Tooling and JSON actions\n\n1. Only emit JSON when the user confirms they want an action performed.\n2. Wrap every payload in a single fenced ```json block.\n3. Supported payloads today: take_note, store_task, schedule_reminder.\n4. Keep conversational text before or after the block short and clear.\n\nWhen logging a note, output exactly:\n```json\n{\n \"action\": \"take_note\",\n \"note\": \"\"\n}\n```\nSwap in the user's wording (including emojis or punctuation) for the placeholder.\n""" + + +def ensurePromptAssets(): + if not os.path.isdir(promptsFolderPath): + os.makedirs(promptsFolderPath, exist_ok=True) + schemaPath = os.path.join(promptsFolderPath, schemaTemplateName) + if not os.path.exists(schemaPath): + with open(schemaPath, "w", encoding="utf-8") as schemaFile: + json.dump(schemaTemplateData, schemaFile, indent=2) + defaultPromptsPath = os.path.join(promptsFolderPath, defaultPromptsName) + if not os.path.exists(defaultPromptsPath): + with open(defaultPromptsPath, "w", encoding="utf-8") as defaultFile: + json.dump(defaultPromptRecords, defaultFile, indent=2) + ensureToolInstructions() + + +def ensureToolInstructions(): + if os.path.exists(toolInstructionsPath): + return + with open(toolInstructionsPath, "w", encoding="utf-8") as handle: + handle.write(defaultToolInstructions) + + +def ensureDiscordLog(): + if not os.path.exists(discordLogPath): + with open(discordLogPath, "w", encoding="utf-8") as logFile: + json.dump([], logFile) + + +def readDiscordLog(): + if not os.path.exists(discordLogPath): + return [] + try: + with open(discordLogPath, "r", encoding="utf-8") as logFile: + records = json.load(logFile) + except (json.JSONDecodeError, OSError): + return [] + if not isinstance(records, list): + return [] + return records + + +def writeDiscordLog(records): + trimmed = records[-discordLogLimit:] + with open(discordLogPath, "w", encoding="utf-8") as logFile: + json.dump(trimmed, logFile, indent=2) + + +def readToolInstructions(): + if not os.path.exists(toolInstructionsPath): + ensureToolInstructions() + try: + with open(toolInstructionsPath, "r", encoding="utf-8") as handle: + return handle.read().strip() + except OSError: + return "" + + +ensurePromptAssets() +ensureDiscordLog() diff --git a/DiscordGateway.py b/DiscordGateway.py new file mode 100644 index 0000000..6f0c5db --- /dev/null +++ b/DiscordGateway.py @@ -0,0 +1,155 @@ +import json +import os +import urllib.error +import urllib.parse +import urllib.request + +from AppConfig import readDiscordLog, writeDiscordLog + + +discordApiBase = "https://discord.com/api/v10" +discordWebhookUrl = os.getenv("DISCORD_WEBHOOK_URL") + + +class DiscordGatewayError(Exception): + pass + + +class DiscordGateway: + """Handles Discord IO while staying easy to stub.""" + + dmChannelCache = {} + + @staticmethod + def getMessages(userId, howMany, oldestFirst=False): + if DiscordGateway.canUseApi() and userId: + try: + channelId = DiscordGateway.ensureDmChannel(userId) + apiMessages = DiscordGateway.fetchChannelMessages(channelId, howMany) + if oldestFirst: + apiMessages = list(reversed(apiMessages)) + return [DiscordGateway.describeApiMessage(message) for message in apiMessages] + except DiscordGatewayError as error: + print(f"[discord] API getMessages failed: {error}") + # fallback to local log + records = readDiscordLog() + if userId: + records = [record for record in records if record.get("userId") == userId] + if not oldestFirst: + records = list(reversed(records)) + limited = records[:howMany] + return [DiscordGateway.describeLogRecord(record) for record in limited] + + @staticmethod + def sendMessage(userId, content): + if discordWebhookUrl: + DiscordGateway.postViaWebhook(content) + DiscordGateway.persistLog(userId or "webhook", content) + print("[discord] Sent message via webhook") + return + if DiscordGateway.canUseApi(): + try: + channelId = DiscordGateway.ensureDmChannel(userId) + DiscordGateway.postChannelMessage(channelId, content) + DiscordGateway.persistLog(userId, content) + print(f"[discord] Sent DM via API to {userId}") + return + except DiscordGatewayError as error: + print(f"[discord] API sendMessage failed: {error}") + DiscordGateway.persistLog(userId, content) + print(f"[discord] (log) DM to {userId}: {content}") + + @staticmethod + def canUseApi(): + return bool(os.getenv("DISCORD_BOT_TOKEN")) + + @staticmethod + def ensureDmChannel(userId): + if not userId: + raise DiscordGatewayError("User ID is required for DM channel") + cacheKey = str(userId) + if cacheKey in DiscordGateway.dmChannelCache: + return DiscordGateway.dmChannelCache[cacheKey] + payload = json.dumps({"recipient_id": cacheKey}).encode("utf-8") + data = DiscordGateway.apiRequest("POST", "/users/@me/channels", payload) + channelId = data.get("id") + if not channelId: + raise DiscordGatewayError("Discord API did not return a channel id") + DiscordGateway.dmChannelCache[cacheKey] = channelId + return channelId + + @staticmethod + def fetchChannelMessages(channelId, limit): + params = urllib.parse.urlencode({"limit": limit}) + path = f"/channels/{channelId}/messages?{params}" + return DiscordGateway.apiRequest("GET", path) + + @staticmethod + def postChannelMessage(channelId, content): + payload = json.dumps({"content": content}).encode("utf-8") + DiscordGateway.apiRequest("POST", f"/channels/{channelId}/messages", payload) + + @staticmethod + def apiRequest(method, path, payload=None): + token = os.getenv("DISCORD_BOT_TOKEN") + if not token: + raise DiscordGatewayError("DISCORD_BOT_TOKEN is not set") + url = f"{discordApiBase}{path}" + headers = { + "Authorization": f"Bot {token}", + } + if payload is not None: + headers["Content-Type"] = "application/json" + request = urllib.request.Request(url, data=payload, method=method, headers=headers) + try: + with urllib.request.urlopen(request, timeout=20) as response: + body = response.read().decode("utf-8") + if not body: + return {} + return json.loads(body) + except urllib.error.HTTPError as error: + responseBody = error.read().decode("utf-8") if hasattr(error, "read") else "" + raise DiscordGatewayError(f"HTTP {error.code}: {responseBody or str(error)}") + except urllib.error.URLError as error: + raise DiscordGatewayError(str(error)) + + @staticmethod + def describeApiMessage(message): + author = message.get("author") or {} + authorName = author.get("username") or author.get("id") or "unknown" + content = message.get("content") or "" + return f"{authorName}: {content}" + + @staticmethod + def describeLogRecord(record): + userPart = record.get("userId") or "unknown" + contentPart = record.get("content") or "" + return f"{userPart}: {contentPart}" + + @staticmethod + def persistLog(userId, content): + entry = { + "userId": userId, + "content": content, + } + records = readDiscordLog() + records.append(entry) + writeDiscordLog(records) + + @staticmethod + def postViaWebhook(content): + payload = json.dumps({"content": content}).encode("utf-8") + request = urllib.request.Request( + discordWebhookUrl, + data=payload, + method="POST", + headers={"Content-Type": "application/json"}, + ) + try: + with urllib.request.urlopen(request, timeout=20): + return + except urllib.error.HTTPError as error: + body = error.read().decode("utf-8") if hasattr(error, "read") else "" + raise DiscordGatewayError(f"Webhook HTTP {error.code}: {body or str(error)}") + except urllib.error.URLError as error: + raise DiscordGatewayError(f"Webhook error: {error}") diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c1a6608 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM python:3.11-slim + +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 + +WORKDIR /app + +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/Memory.py b/Memory.py new file mode 100644 index 0000000..ffb9d50 --- /dev/null +++ b/Memory.py @@ -0,0 +1,252 @@ +import json +import os +from datetime import datetime +from uuid import uuid4 + +memoryFolderPath = os.path.join(os.path.dirname(__file__), "memory") +defaultTriggers = { + "triggerField": "action", + "triggerValue": "take_note", + "noteField": "note", +} + + +def ensureMemoryFolder(): + if not os.path.isdir(memoryFolderPath): + os.makedirs(memoryFolderPath, exist_ok=True) + + +ensureMemoryFolder() + + +class MemoryManager: + """Stores raw notes plus summary scaffolding for long-term context.""" + + @staticmethod + def parseAiResponse(userId, aiResponseText, triggers=None): + triggerConfig = triggers or defaultTriggers + payload = MemoryManager.extractJsonPayload(aiResponseText) + if not payload: + return None + + triggerField = triggerConfig.get("triggerField", "action") + triggerValue = triggerConfig.get("triggerValue", "take_note") + noteField = triggerConfig.get("noteField", "note") + + if payload.get(triggerField) != triggerValue: + return None + + noteText = payload.get(noteField) + if not noteText: + return None + + MemoryManager.recordNote(userId, noteText, payload) + print(f"[memory] Recorded note for {userId}: {noteText}") + return {"noteRecorded": True, "noteText": noteText} + + @staticmethod + def recordNote(userId, noteText, metadata=None): + memory = MemoryManager.loadUserMemory(userId) + notes = memory.get("notes") + if notes is None: + notes = [] + memory["notes"] = notes + entry = { + "timestamp": datetime.utcnow().isoformat() + "Z", + "note": noteText, + "metadata": metadata or {}, + } + notes.append(entry) + MemoryManager.saveUserMemory(userId, memory) + + @staticmethod + def buildContextPacket(userId, mode="summary", noteLimit=5): + memory = MemoryManager.loadUserMemory(userId) + summaries = memory.get("summaries") or [] + if mode == "raw": + return summaries, memory["notes"][-noteLimit:] + return MemoryManager.ensureSummaryHierarchy(userId, memory), memory["notes"][-noteLimit:] + + @staticmethod + def ensureSummaryHierarchy(userId, memory): + summaries = memory.get("summaries") or [] + if not summaries: + summaries.append({ + "level": 0, + "summary": "No summaries yet. AI should synthesize one when ready.", + "children": [], + }) + memory["summaries"] = summaries + MemoryManager.saveUserMemory(userId, memory) + return summaries + + @staticmethod + def extractJsonPayload(aiResponseText): + candidates = MemoryManager.collectJsonCandidates(aiResponseText) + for candidate in candidates: + try: + return json.loads(candidate) + except json.JSONDecodeError: + continue + return None + + @staticmethod + def collectJsonCandidates(aiResponseText): + candidates = [] + trimmed = aiResponseText.strip() + if trimmed.startswith("{") and trimmed.endswith("}"): + candidates.append(trimmed) + for line in aiResponseText.splitlines(): + stripped = line.strip() + if stripped.startswith("{") and stripped.endswith("}"): + candidates.append(stripped) + parts = aiResponseText.split("```") + for part in parts: + stripped = part.strip() + if stripped.lower().startswith("json"): + stripped = stripped[4:].lstrip() + if stripped.startswith("{") and stripped.endswith("}"): + candidates.append(stripped) + return candidates + + @staticmethod + def loadUserMemory(userId): + MemoryManager.ensureHandle() + path = MemoryManager.userMemoryPath(userId) + if not os.path.exists(path): + return {"notes": [], "summaries": [], "action_items": []} + try: + with open(path, "r", encoding="utf-8") as handle: + return json.load(handle) + except (json.JSONDecodeError, OSError): + return {"notes": [], "summaries": [], "action_items": []} + + @staticmethod + def saveUserMemory(userId, memory): + MemoryManager.ensureHandle() + path = MemoryManager.userMemoryPath(userId) + with open(path, "w", encoding="utf-8") as handle: + json.dump(memory, handle, indent=2) + + @staticmethod + def ensureHandle(): + ensureMemoryFolder() + + @staticmethod + def userMemoryPath(userId): + safeUserId = userId or "global" + return os.path.join(memoryFolderPath, f"{safeUserId}_memory.json") + + @staticmethod + def timestamp(): + return datetime.utcnow().isoformat() + "Z" + + @staticmethod + def ensureActionList(memory): + actions = memory.get("action_items") + if actions is None: + actions = [] + memory["action_items"] = actions + return actions + + @staticmethod + def listActionItems(userId): + memory = MemoryManager.loadUserMemory(userId) + return memory.get("action_items") or [] + + @staticmethod + def createActionItem(userId, title, cadence="daily", intervalMinutes=None, details=None): + cleaned_title = (title or "").strip() + if not cleaned_title: + return None + memory = MemoryManager.loadUserMemory(userId) + actions = MemoryManager.ensureActionList(memory) + now = MemoryManager.timestamp() + action = { + "id": str(uuid4()), + "title": cleaned_title, + "details": (details or "").strip(), + "cadence": cadence or "daily", + "interval_minutes": MemoryManager.normalizeInterval(intervalMinutes), + "created_at": now, + "updated_at": now, + "progress": [], + } + actions.append(action) + MemoryManager.saveUserMemory(userId, memory) + return action + + @staticmethod + def updateActionItem(userId, actionId, updates): + if not actionId: + return None + memory = MemoryManager.loadUserMemory(userId) + actions = MemoryManager.ensureActionList(memory) + target = None + for action in actions: + if action.get("id") == actionId: + target = action + break + if not target: + return None + if "title" in updates and updates["title"]: + target["title"] = updates["title"].strip() + if "details" in updates: + target["details"] = (updates["details"] or "").strip() + if "cadence" in updates and updates["cadence"]: + target["cadence"] = updates["cadence"] + if "interval_minutes" in updates: + target["interval_minutes"] = MemoryManager.normalizeInterval(updates["interval_minutes"]) + target["updated_at"] = MemoryManager.timestamp() + MemoryManager.saveUserMemory(userId, memory) + return target + + @staticmethod + def deleteActionItem(userId, actionId): + if not actionId: + return False + memory = MemoryManager.loadUserMemory(userId) + actions = MemoryManager.ensureActionList(memory) + original_len = len(actions) + actions[:] = [action for action in actions if action.get("id") != actionId] + if len(actions) == original_len: + return False + MemoryManager.saveUserMemory(userId, memory) + return True + + @staticmethod + def recordActionProgress(userId, actionId, status, note=None): + if not actionId: + return None + memory = MemoryManager.loadUserMemory(userId) + actions = MemoryManager.ensureActionList(memory) + target = None + for action in actions: + if action.get("id") == actionId: + target = action + break + if not target: + return None + progress_list = target.get("progress") + if progress_list is None: + progress_list = [] + target["progress"] = progress_list + entry = { + "timestamp": MemoryManager.timestamp(), + "status": (status or "update").strip() or "update", + "note": (note or "").strip(), + } + progress_list.append(entry) + target["updated_at"] = entry["timestamp"] + MemoryManager.saveUserMemory(userId, memory) + return entry + + @staticmethod + def normalizeInterval(value): + if value is None or value == "": + return None + try: + parsed = int(value) + except (ValueError, TypeError): + return None + return max(parsed, 0) diff --git a/Notification.py b/Notification.py new file mode 100644 index 0000000..ee4ed4b --- /dev/null +++ b/Notification.py @@ -0,0 +1,127 @@ +import json +import os +from datetime import datetime, timezone +from typing import Any, Dict + +import requests + +from Memory import MemoryManager + + +ntfyBaseUrl = os.getenv("NTFY_BASE_URL") +ntfyTopicTemplate = os.getenv("NTFY_TOPIC_TEMPLATE", "adhdbot-{userId}") +ntfyAuthToken = os.getenv("NTFY_AUTH_TOKEN") + + +class NotificationDispatcher: + """Lightweight bridge to ntfy for reminder payloads.""" + + @staticmethod + def handleAiResponse(userId: str, aiResponseText: str): + if not ntfyBaseUrl: + return + candidates = MemoryManager.collectJsonCandidates(aiResponseText) + for candidate in candidates: + try: + payload = json.loads(candidate) + except json.JSONDecodeError: + continue + if payload.get("action") != "schedule_reminder": + continue + reminder = payload.get("reminder") or {} + NotificationDispatcher.sendReminder(userId, reminder) + + @staticmethod + def sendReminder(userId: str, reminder: Dict[str, Any]): + metadata = reminder.get("metadata") or {} + actualUser = userId or reminder.get("user") or metadata.get("user") or "user" + topic = reminder.get("topic") or metadata.get("topic") + if not topic: + topic = ntfyTopicTemplate.format(userId=actualUser) + url = NotificationDispatcher.buildTopicUrl(topic) + if not url: + return + + message_text = str(reminder.get("message") or reminder.get("details") or "Reminder from ADHDbot").strip() + if not message_text: + message_text = "Reminder from ADHDbot" + + due_text = NotificationDispatcher.normalizeTrigger(reminder) + body_lines = [message_text] + if due_text: + body_lines.append(f"Due: {due_text}") + body = "\n".join(body_lines) + + headers = NotificationDispatcher.buildHeaders(message_text) + payload = body.encode("utf-8") + + try: + NotificationDispatcher.postToNtfy(url, payload, headers) + print(f"[notify] Sent reminder to ntfy topic '{topic}'") + except UnicodeEncodeError: + safe_headers = NotificationDispatcher.stripUnicodeHeaders(headers) + try: + NotificationDispatcher.postToNtfy(url, payload, safe_headers) + print(f"[notify] Sent reminder to ntfy topic '{topic}' (header sanitized)") + except (UnicodeEncodeError, requests.RequestException) as error: + print(f"[notify] Failed to send reminder after sanitizing headers: {error}") + except requests.RequestException as error: + print(f"[notify] Failed to send reminder: {error}") + + @staticmethod + def buildTopicUrl(topic: str) -> str: + if not topic: + return "" + base = ntfyBaseUrl.rstrip("/") + topicSlug = topic.lstrip("/") + return f"{base}/{topicSlug}" + + @staticmethod + def buildHeaders(message_text: str) -> Dict[str, str]: + headers = {"Title": NotificationDispatcher.buildTitleHeader(message_text)} + if ntfyAuthToken: + headers["Authorization"] = f"Bearer {ntfyAuthToken}" + return headers + + @staticmethod + def stripUnicodeHeaders(headers: Dict[str, str]) -> Dict[str, str]: + safe_headers: Dict[str, str] = {} + for key, value in headers.items(): + if key.lower() == "title": + safe_headers[key] = NotificationDispatcher.buildTitleHeader(value or "") + continue + safe_headers[key] = value + safe_headers.setdefault("Title", "Reminder from ADHDbot") + return safe_headers + + @staticmethod + def postToNtfy(url: str, payload: bytes, headers: Dict[str, str]): + response = requests.post(url, data=payload, headers=headers, timeout=10) + response.raise_for_status() + + @staticmethod + def buildTitleHeader(text: str) -> str: + snippet = (text or "").strip()[:120] + if not snippet: + snippet = "Reminder from ADHDbot" + ascii_only = snippet.encode("ascii", "ignore").decode("ascii").strip() + return ascii_only or "Reminder from ADHDbot" + + @staticmethod + def normalizeTrigger(reminder: Dict[str, Any]): + trigger = reminder.get("trigger") or {} + value = trigger.get("value") + if not value: + return "" + try: + text = str(value).strip() + if text.endswith("Z"): + text = text[:-1] + "+00:00" + parsed = datetime.fromisoformat(text) + if not parsed.tzinfo: + parsed = parsed.replace(tzinfo=timezone.utc) + trigger["value"] = parsed.isoformat() + reminder["trigger"] = trigger + return parsed.isoformat() + except ValueError: + return str(value) diff --git a/PROMPTS.md b/PROMPTS.md new file mode 100644 index 0000000..041f590 --- /dev/null +++ b/PROMPTS.md @@ -0,0 +1,85 @@ +# Prompt Reference + +This project now ships with ADHD-friendly prompts that tell the assistant how to +handle reminders, break tasks down, and kick off work sessions. All prompts live +in `prompts/defaultPrompts.json` (auto-generated from `AppConfig.defaultPromptRecords` +when missing) and are loaded through `PromptLibrary`. + +## Prompt Catalog + +| Category | Name | Purpose | +|-----------|------------|---------| +| `general` | `welcome` | Greets the user, reminds them they can ask for planning help or reminders, and references the incoming context. | +| `general` | `fallback` | Keeps momentum if a prompt lookup fails. | +| `planning`| `breakdown`| Acts as an executive-function coach: empathize, define the outcome, split work into tiny observable steps, and gently ask whether to save the plan or set a reminder. | +| `reminders` | `schedule` | Confirms reminder details, summarizes them, and—when the user approves—emits a JSON payload that downstream services can consume. | +| `agentic` | `hourly_review` | Autonomous sweep that reviews notes plus the persistent action list to decide whether to trigger reminders, notes, or other workflows. | + +You can add more prompts by dropping additional JSON records into `prompts/`. + +## Structured JSON Hooks + +The prompts guide the model to output placeholder JSON, similar to how the +memory subsystem listens for `take_note` actions. Downstream services can +inspect responses for these payloads and act on them. + +### Store a Task Plan + +Triggered from the `planning/breakdown` prompt when the user explicitly asks to +save the plan. + +```json +{ + "action": "store_task", + "task": { + "title": "short label", + "steps": [ + {"order": 1, "description": "step detail", "duration": "~5 min"} + ], + "next_step": "first step text", + "context": "", + "status": "not_started" + } +} +``` + +### Schedule a Reminder + +Triggered from the `reminders/schedule` prompt once timing and content are +locked in with the user. + +```json +{ + "action": "schedule_reminder", + "reminder": { + "title": "short label", + "details": "context summary", + "trigger": { + "type": "datetime | relative | habit", + "value": "ISO timestamp or human-friendly string" + }, + "follow_up": "check-in question", + "metadata": { + "user": "", + "source": "prompt" + } + } +} +``` + +### Notes + +- Only output one JSON block per response, wrapped in ```json fences, when the + user confirms they want the assistant to act. +- If details are incomplete, stay conversational and gather what you need. +- Additional actions can be added later by extending the prompt instructions and + listening for new `action` values just like `take_note`. + +### Agentic Hourly Review + +The `agentic/hourly_review` prompt consumes the JSON blob produced by +`AgenticWorkflow.buildReviewPacket`. It summarizes recent notes plus the +modifiable action list (cadence, intervals, latest progress) and decides if +anything needs escalation. When it does, it reuses the same structured actions +(`take_note`, `store_task`, `schedule_reminder`) so downstream tooling can react +consistently. When nothing is due it simply acknowledges the sweep. diff --git a/PromptLibrary.py b/PromptLibrary.py new file mode 100644 index 0000000..5512378 --- /dev/null +++ b/PromptLibrary.py @@ -0,0 +1,51 @@ +import json +import os + +from AppConfig import promptsFolderPath, schemaTemplateName + + +class PromptLibrary: + """Simple prompt storage; swap with DB/API later without touching callers.""" + + promptCatalog = {} + + @staticmethod + def reloadCatalog(): + catalog = {} + if os.path.isdir(promptsFolderPath): + for fileName in os.listdir(promptsFolderPath): + if not fileName.endswith(".json"): + continue + if fileName == schemaTemplateName: + continue + filePath = os.path.join(promptsFolderPath, fileName) + try: + with open(filePath, "r", encoding="utf-8") as promptFile: + records = json.load(promptFile) + except (json.JSONDecodeError, OSError): + continue + if not isinstance(records, list): + records = [records] + for record in records: + category = record.get("category") + name = record.get("name") + template = record.get("template") + if not category or not name or not template: + continue + categoryPrompts = catalog.setdefault(category, {}) + categoryPrompts[name] = template + generalPrompts = catalog.setdefault("general", {}) + if "fallback" not in generalPrompts: + generalPrompts["fallback"] = "Still working on that, {user}. Here's what I can do next: {context}" + PromptLibrary.promptCatalog = catalog + + @staticmethod + def fetch(category, promptName): + return PromptLibrary.promptCatalog.get(category, {}).get(promptName) + + @staticmethod + def fallback(): + return PromptLibrary.promptCatalog["general"]["fallback"] + + +PromptLibrary.reloadCatalog() diff --git a/README.md b/README.md index 6286e25..87c862d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,14 @@ # ADHDbot +ADHDbot is a FastAPI + Discord assistant that captures ADHD-friendly notes, breaks work into tiny steps, and pipes confirmed reminders into ntfy so your phone vibrates when it matters. The repo also bundles an hourly “agentic sweep” worker and a lightweight web console for experimenting with prompts and action items. + +## At a glance +- Opinionated system prompt + tooling contract wired through OpenRouter (Claude Haiku 4.5 by default). +- FastAPI surface area for chat runs, notes, and persistent action items—served by Docker or a bare Python venv. +- Notification bridge that turns `schedule_reminder` JSON into ntfy pushes (bring your own topic + auth). +- Hourly agentic workflow that summarizes memory + actions, then nudges the user via a dedicated prompt. +- Static React console (`web_App.tsx`) for local demos: send prompts, review transcripts, and edit action items without curl. + ## Quick Start 1. Copy the example environment file and fill in your secrets: @@ -13,12 +22,39 @@ ``` - `./memory` is bind-mounted into the container (`./memory:/app/memory`), so any saved notes appear in the repo directly. - `.env` is auto-loaded and the FastAPI service is exposed on `http://localhost:8000`. + - The compose stack now launches two services: `adhdbot` (the FastAPI/Discord gateway) and `agentic_worker`, a companion process that runs the hourly sweep loop. 3. Or build/run manually if you prefer the raw Docker commands: ```bash docker build -t adhdbot . docker run --rm -p 8000:8000 --env-file .env -v "$PWD/memory:/app/memory" adhdbot ``` +## Local development (no Docker) + +1. Create a virtual environment and install deps: + ```bash + python3 -m venv .venv + source .venv/bin/activate + pip install -r requirements.txt + ``` +2. Copy `.env.example` to `.env` and fill in the same secrets the container expects. +3. Launch the API with reload and rich logs: + ```bash + uvicorn api:app --reload --port 8000 + ``` +4. (Optional) start the hourly worker in another shell to mirror the compose setup: + ```bash + AGENTIC_INTERVAL_SECONDS=900 python agentic_worker.py + ``` +5. Run one-off prompts without FastAPI by calling the helper scripts: + ```bash + # Runs the main conversational prompt (uses env defaults for category/name/context) + python main.py + + # Forces the hourly sweep packet through the agentic prompt once + python agentic_review.py + ``` + ### API usage Once the container is running, hit the API to trigger a prompt flow: @@ -28,21 +64,90 @@ curl -X POST http://localhost:8000/run \ -H "Content-Type: application/json" \ -d '{ "userId": "chelsea", - "category": "general", - "promptName": "welcome", - "context": "Take a note that the user is testing the system you're being called from" + "context": "Remind me in 10 minutes to stretch.", + "history": [ + {"role": "user", "content": "Hi"}, + {"role": "assistant", "content": "Hello!"} + ], + "modeHint": "Reminder" }' ``` Endpoints: - `GET /health` – simple liveness check. -- `POST /run` – triggers `Runner.run`; pass `userId`, `category`, `promptName`, and `context` to override defaults from `.env`. +- `POST /run` – conversational entry point. Fields: + - `userId` (optional) – defaults to `TARGET_USER_ID`. + - `context` – the latest user message. + - `history` (optional) – array of `{role:"user"|"assistant", content:"..."}` representing prior turns (most recent last). + - `modeHint` (optional) – short string that nudges tone/behavior ("Planning", "Reminder", etc.). + - `category` / `promptName` remain for backward compatibility but no longer swap entire templates. +- `GET /users/{userId}/notes?limit=10` – fetch the most recent notes (limit defaults to 10, use `limit=0` for all). +- `POST /users/{userId}/notes` – persist a note manually by posting `{ "note": "text", "metadata": { ... } }`. +- `GET /users/{userId}/memory` – full summaries + notes payload for the user. +- `GET /users/{userId}/actions` – list the modifiable daily/periodic action items tied to that user's memory. +- `POST /users/{userId}/actions` – create a new action item (`title`, optional `details`, `cadence`, `interval_minutes`). +- `PUT /users/{userId}/actions/{actionId}` – update an item in-place (title, cadence, interval, or details). +- `DELETE /users/{userId}/actions/{actionId}` – remove it entirely. +- `POST /users/{userId}/actions/{actionId}/progress` – append a progress entry (`status`, optional `note`) so the hourly sweep knows the latest state. +- `POST /users/{userId}/notes/test` – quick QA helper that reuses the welcome prompt with a custom `context` JSON body. +- `GET /prompts` – inspect the currently loaded prompt catalog. +- `POST /prompts/reload` – force a reload from the `prompts/` folder. Environment variables of interest (see `.env.example`): - `OPENROUTER_API_KEY` – OpenRouter key used by `AIInteraction`. - `DISCORD_BOT_TOKEN` / `TARGET_USER_ID` / `DISCORD_WEBHOOK_URL` – Discord plumbing. - `PROMPT_CATEGORY`, `PROMPT_NAME`, `PROMPT_CONTEXT` – defaults for the `/run` endpoint. - `LOG_PROMPTS` (default `1`) – when truthy, every outgoing prompt is logged to stdout so you can audit the final instructions sent to the LLM. +- `NTFY_BASE_URL` – when set (e.g., `https://ntfy.scorpi.us`), reminder payloads with `action: schedule_reminder` will be POSTed to ntfy. +- `NTFY_TOPIC_TEMPLATE` – optional format string for topics (default `adhdbot-{userId}`); override per reminder via `reminder.metadata.topic`. +- `NTFY_AUTH_TOKEN` – optional bearer token if your ntfy server requires auth. +- `AGENTIC_CATEGORY` / `AGENTIC_PROMPT_NAME` / `AGENTIC_MODE_HINT` – control which prompt handles the hourly agentic sweep (defaults: `agentic/hourly_review`, hint "Agentic review"). +- `AGENTIC_NOTES_LIMIT` – how many of the most recent notes to include in the sweep payload (default `5`). +- `AGENTIC_OPERATOR_HINT` – optional text passed to `agentic_review.py` so you can bias the sweep for a given run (cron, manual nudge, etc.). +- `AGENTIC_INTERVAL_SECONDS` – cadence for the always-on worker loop (defaults to 3600 seconds/1 hour). + +### Frontend console + +- `web_App.tsx` + `web_App.css` describe a quick React shell that talks directly to `/api/run` and the action endpoints. Drop the file into any Vite/CRA sandbox or use it as design reference for your own console. +- The UI stores chat history in `localStorage`, mirrors the three built-in prompts (“general”, “planning”, “reminders”), and exposes an action-item panel with CRUD + progress logging—so you can test the API without Postman. +- When hosting the FastAPI server, make sure it serves static assets or proxy `/api/*` so the console can fetch without CORS gymnastics. + +### Reminder payloads + +When the assistant schedules a reminder it emits a single JSON block: + +```json +{ + "action": "schedule_reminder", + "reminder": { + "message": "short friendly text", + "topic": "adhdbot-", + "trigger": { + "value": "2025-11-11T02:41:42+00:00" + } + } +} +``` + +The backend automatically converts relative phrases ("in 10 minutes") into the ISO timestamp above and POSTs the message to the ntfy topic (default `https://ntfy.scorpi.us/adhdbot-`), so subscribing to that topic on your phone is all you need for push notifications. + +### Daily / Periodic Action List + Hourly Agentic Sweep + +- Action items share the same storage as notes inside `memory/_memory.json` under the `action_items` key. Each entry tracks `title`, `cadence`, optional `interval_minutes`, `details`, and a rolling `progress` history. +- Use the action API endpoints (above) to add/remove/edit entries or append `status` updates—CLI, scripts, or the UI can call them exactly like the note endpoints. The bundled `web_App.tsx` (served by the static UI) now surfaces a lightweight management panel to create actions, log progress, and delete entries without touching curl. +- `AgenticWorkflow.buildReviewPacket` compiles the latest notes plus the action list into a JSON blob and feeds it into the `agentic/hourly_review` prompt. The new helper script `agentic_review.py` calls this flow; point a cron/systemd timer at it (hourly) so the autopilot can look for overdue habits or opportunities. +- `agentic_worker.py` wraps the same helper in a persistent loop. The `agentic_worker` service defined in `docker-compose.yml` runs it with the same `.env` file, so deploying the stack automatically keeps the hourly sweep online. Adjust cadence via `AGENTIC_INTERVAL_SECONDS` or stop the service if you prefer to trigger sweeps manually. +- Customize the autopilot without code by editing `prompts/defaultPrompts.json` (or adding a sibling file) to adjust `agentic/hourly_review`, then reload prompts or rebuild the container. + +## Architecture cheat sheet + +- **`api.py` (FastAPI)**: exposes chat, memory, action-item, and prompt-catalog routes. It uses Pydantic models for validation and wraps every handler with `ensureUserId`/`MemoryManager` helpers so non-FastAPI callers stay lean. +- **`Runner.py` + `AIInteraction.py`**: glue between your request and OpenRouter. `Runner` is a thin façade; `AIInteraction` composes the system prompt, trims chat history, logs prompts when `LOG_PROMPTS=1`, and post-processes responses for memory + notifications. +- **`Memory.py`**: owns all persistence under `memory/_memory.json` (notes, summaries, and `action_items`). JSON blocks emitted by the model (`take_note`, `schedule_reminder`, etc.) land here before any downstream automations run. +- **`Notification.py`**: watches the same responses for `schedule_reminder` payloads and relays them to ntfy with sanitized titles + timestamps. Leave `NTFY_BASE_URL` unset to disable the bridge without touching code. +- **`AgenticWorkflow.py` + `agentic_worker.py`**: build the hourly sweep packet (latest notes, summaries, action progress) and push it through `agentic/hourly_review`. Run `agentic_worker` via Docker Compose or your own cron/systemd timer for 24/7 coverage. +- **`DiscordGateway.py`**: optional DM/webhook plumbing so every assistant reply can bounce straight into Discord when `DISCORD_BOT_TOKEN` or `DISCORD_WEBHOOK_URL` is configured. +- **Prompts folder**: `prompts/defaultPrompts.json` ships with sane defaults; drop additional JSON files in the same folder and call `POST /prompts/reload` to hot-swap templates. Tooling/JSON contract lives in `prompts/tool_instructions.md`. ## Prompt + tooling customization diff --git a/Runner.py b/Runner.py new file mode 100644 index 0000000..62dfc4e --- /dev/null +++ b/Runner.py @@ -0,0 +1,25 @@ +from AIInteraction import AIInteraction + + +class Runner: + """Small façade so other modules only import one thing.""" + + @staticmethod + def run(userId, category, promptName, context, history=None, modeHint=None): + message = AIInteraction.callAI( + userId, + category, + promptName, + context, + history=history, + modeHint=modeHint, + ) + return message + + @staticmethod + def sendToDiscord(userId, message): + if not userId: + return + from DiscordGateway import DiscordGateway # lazy import to avoid dependency in simple runs + + DiscordGateway.sendMessage(userId, message) diff --git a/agentic_review.py b/agentic_review.py new file mode 100644 index 0000000..ce5939f --- /dev/null +++ b/agentic_review.py @@ -0,0 +1,14 @@ +import os + +from AgenticWorkflow import AgenticWorkflow + + +def main(): + user_id = os.getenv("TARGET_USER_ID") + operator_hint = os.getenv("AGENTIC_OPERATOR_HINT") + response = AgenticWorkflow.runHourlyReview(user_id, operator_hint) + print(f"[agentic] response: {response}") + + +if __name__ == "__main__": + main() diff --git a/agentic_worker.py b/agentic_worker.py new file mode 100644 index 0000000..6af63e6 --- /dev/null +++ b/agentic_worker.py @@ -0,0 +1,34 @@ +import os +import time + +from AgenticWorkflow import AgenticWorkflow + + +def wait_seconds() -> int: + raw = os.getenv("AGENTIC_INTERVAL_SECONDS", "3600") + try: + value = int(raw) + except ValueError: + return 3600 + return max(value, 60) + + +def main(): + user_id = os.getenv("TARGET_USER_ID") + operator_hint = os.getenv("AGENTIC_OPERATOR_HINT") + interval = wait_seconds() + if not user_id: + raise SystemExit("TARGET_USER_ID is required for agentic_worker") + + print(f"[agentic] worker booted (interval={interval}s, user={user_id})") + while True: + try: + response = AgenticWorkflow.runHourlyReview(user_id, operator_hint) + print(f"[agentic] sweep result: {response}") + except Exception as error: # pragma: no cover + print(f"[agentic] sweep failed: {error}") + time.sleep(interval) + + +if __name__ == "__main__": + main() diff --git a/api.py b/api.py new file mode 100644 index 0000000..5ed333e --- /dev/null +++ b/api.py @@ -0,0 +1,258 @@ +import os +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, FastAPI, HTTPException, Response +from pydantic import BaseModel, Field + +from Runner import Runner +from Memory import MemoryManager +from PromptLibrary import PromptLibrary + +app = FastAPI(title="ADHDbot API") +router = APIRouter() + + +class ChatTurn(BaseModel): + role: str + content: str + + +class RunRequest(BaseModel): + userId: Optional[str] = None + category: str = "general" + promptName: str = "welcome" + context: str = "API triggered run" + history: List[ChatTurn] = Field(default_factory=list) + modeHint: Optional[str] = None + + +class RunResponse(BaseModel): + userId: Optional[str] + category: str + promptName: str + context: str + message: str + + +class NoteCreate(BaseModel): + note: str = Field(..., min_length=1, description="Raw text of the note") + metadata: Dict[str, Any] = Field(default_factory=dict) + + +class NotesResponse(BaseModel): + userId: str + notes: List[Dict[str, Any]] + + +class MemoryResponse(BaseModel): + userId: str + summaries: List[Dict[str, Any]] + notes: List[Dict[str, Any]] + + +class PromptCatalogResponse(BaseModel): + catalog: Dict[str, Dict[str, str]] + + +class ContextRequest(BaseModel): + context: str = Field(..., min_length=1) + + +class ActionItemRecord(BaseModel): + id: str + title: str + cadence: str + details: Optional[str] = None + interval_minutes: Optional[int] = None + created_at: str + updated_at: str + progress: List[Dict[str, Any]] = Field(default_factory=list) + + +class ActionItemCreate(BaseModel): + title: str = Field(..., min_length=1) + cadence: str = Field(default="daily") + details: Optional[str] = None + interval_minutes: Optional[int] = Field(default=None, ge=0) + + +class ActionItemUpdate(BaseModel): + title: Optional[str] = Field(default=None, min_length=1) + cadence: Optional[str] = None + details: Optional[str] = None + interval_minutes: Optional[int] = Field(default=None, ge=0) + + +class ActionItemProgressCreate(BaseModel): + status: str = Field(default="update", min_length=1) + note: Optional[str] = None + + +class ActionItemsResponse(BaseModel): + userId: str + action_items: List[ActionItemRecord] + + +def defaultUserId(): + return os.getenv("TARGET_USER_ID") + + +@router.get("/health") +def health(): + return {"status": "ok"} + + +@router.post("/run", response_model=RunResponse) +def run_bot(request: RunRequest): + userId = ensureUserId(request.userId) + message = Runner.run( + userId, + request.category, + request.promptName, + request.context, + history=[turn.model_dump() for turn in request.history], + modeHint=request.modeHint, + ) + return RunResponse( + userId=userId, + category=request.category, + promptName=request.promptName, + context=request.context, + message=message, + ) + + +@router.get("/users/{userId}/notes", response_model=NotesResponse) +def get_user_notes(userId: str, limit: int = 10): + memory = MemoryManager.loadUserMemory(userId) + notes = memory.get("notes", []) + if limit > 0: + notes = notes[-limit:] + return NotesResponse(userId=userId, notes=notes) + + +@router.post("/users/{userId}/notes", response_model=NotesResponse, status_code=201) +def create_user_note(userId: str, payload: NoteCreate): + cleaned_metadata = payload.metadata or {} + MemoryManager.recordNote(userId, payload.note, cleaned_metadata) + updated = MemoryManager.loadUserMemory(userId) + return NotesResponse(userId=userId, notes=updated.get("notes", [])) + + +@router.get("/users/{userId}/memory", response_model=MemoryResponse) +def get_user_memory(userId: str): + memory = MemoryManager.loadUserMemory(userId) + return MemoryResponse( + userId=userId, + notes=memory.get("notes", []), + summaries=memory.get("summaries", []), + ) + + +@router.get("/users/{userId}/actions", response_model=ActionItemsResponse) +def list_user_actions(userId: str): + actions = MemoryManager.listActionItems(userId) + return ActionItemsResponse(userId=userId, action_items=actions) + + +@router.post("/users/{userId}/actions", response_model=ActionItemsResponse, status_code=201) +def create_user_action(userId: str, payload: ActionItemCreate): + created = MemoryManager.createActionItem( + userId, + payload.title, + cadence=payload.cadence, + intervalMinutes=payload.interval_minutes, + details=payload.details, + ) + if not created: + raise HTTPException(status_code=400, detail="title is required") + actions = MemoryManager.listActionItems(userId) + return ActionItemsResponse(userId=userId, action_items=actions) + + +@router.put("/users/{userId}/actions/{actionId}", response_model=ActionItemRecord) +def update_user_action(userId: str, actionId: str, payload: ActionItemUpdate): + updated = MemoryManager.updateActionItem( + userId, + actionId, + { + "title": payload.title, + "details": payload.details, + "cadence": payload.cadence, + "interval_minutes": payload.interval_minutes, + }, + ) + if not updated: + raise HTTPException(status_code=404, detail="Action item not found") + return ActionItemRecord.model_validate(updated) + + +@router.delete("/users/{userId}/actions/{actionId}", status_code=204) +def delete_user_action(userId: str, actionId: str): + deleted = MemoryManager.deleteActionItem(userId, actionId) + if not deleted: + raise HTTPException(status_code=404, detail="Action item not found") + return Response(status_code=204) + + +@router.post( + "/users/{userId}/actions/{actionId}/progress", + response_model=ActionItemRecord, + status_code=201, +) +def add_action_progress(userId: str, actionId: str, payload: ActionItemProgressCreate): + recorded = MemoryManager.recordActionProgress( + userId, + actionId, + status=payload.status, + note=payload.note, + ) + if not recorded: + raise HTTPException(status_code=404, detail="Action item not found") + action = resolve_action_or_404(userId, actionId) + return ActionItemRecord.model_validate(action) + + +@router.post("/prompts/reload", response_model=PromptCatalogResponse) +def reload_prompts(): + PromptLibrary.reloadCatalog() + return PromptCatalogResponse(catalog=PromptLibrary.promptCatalog) + + +@router.get("/prompts", response_model=PromptCatalogResponse) +def list_prompts(): + if not PromptLibrary.promptCatalog: + PromptLibrary.reloadCatalog() + return PromptCatalogResponse(catalog=PromptLibrary.promptCatalog) + + +@router.post("/users/{userId}/notes/test", response_model=RunResponse) +def force_note_capture(userId: str, payload: ContextRequest): + """Helper endpoint for QA to trigger the welcome prompt with a custom context.""" + message = Runner.run(userId, "general", "welcome", payload.context) + return RunResponse( + userId=userId, + category="general", + promptName="welcome", + context=payload.context, + message=message, + ) + + +def ensureUserId(userId: Optional[str]) -> str: + resolved = userId or defaultUserId() + if not resolved: + raise HTTPException(status_code=400, detail="userId is required") + return resolved + + +def resolve_action_or_404(userId: str, actionId: str) -> Dict[str, Any]: + actions = MemoryManager.listActionItems(userId) + for action in actions: + if action.get("id") == actionId: + return action + raise HTTPException(status_code=404, detail="Action item not found") + + +app.include_router(router) +app.include_router(router, prefix="/api") diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a97e5ef --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,22 @@ +version: "3.8" + +services: + adhdbot: + build: . + env_file: + - .env + volumes: + - ./memory:/app/memory + ports: + - "8000:8000" + restart: unless-stopped + agentic_worker: + build: . + command: ["python", "agentic_worker.py"] + env_file: + - .env + volumes: + - ./memory:/app/memory + restart: unless-stopped + depends_on: + - adhdbot diff --git a/import Discord.py b/import Discord.py new file mode 100644 index 0000000..4d7a1e4 --- /dev/null +++ b/import Discord.py @@ -0,0 +1,23 @@ +""" +Coding notes for future me: +- Everything stays in flat, single-purpose classes used as namespaces, not OOP. +- Methods and variables use camelCase everywhere; no snake_case or underscores. +- Type hints stay out of the file to keep things lightweight and editable inside the IDE. +- Placeholder behavior that you haven't implemented yet should be explicit via `pass`. +- Keep each class focused on one concern (prompts, building text, AI orchestration, Discord IO, runner). +- Favor plain dicts and strings so the code reads like pseudo code and is easy to swap out later. +""" + +from AIInteraction import AIInteraction # noqa: F401 +from AgenticWorkflow import AgenticWorkflow # noqa: F401 +from DiscordGateway import DiscordGateway # noqa: F401 +from PromptLibrary import PromptLibrary # noqa: F401 +from Runner import Runner # noqa: F401 + +__all__ = [ + "AIInteraction", + "AgenticWorkflow", + "DiscordGateway", + "PromptLibrary", + "Runner", +] diff --git a/intranet/index.html b/intranet/index.html new file mode 100644 index 0000000..f89a51e --- /dev/null +++ b/intranet/index.html @@ -0,0 +1,193 @@ + + + + + + Service Inventory + + + + + + +
+
+

Internal inventory

+

Intranet Services

+

Live snapshot of nginx vhosts and Docker workloads across Chelsea's network.

+
+
+
+
+

Los Angeles

+

la.chelseawoodruff.net

+
nginxFRP relay
+
+

nginx sites

  • chat.scorpi.us, chat.scorpi.us – Public chat frontend with HTTPS redirect
  • _

Docker

NameImagePorts
frps
Snowdreamtech FRP reverse proxy
snowdreamtech/frps:latest
+
+
+
+

Virginia

+

virginia.chelseawoodruff.net

+
nginxGitVikunjaDokuwikiCustom apps
+
+

nginx sites

  • blocked.scorpi.us, blocked.scorpi.us – Block page mirror
  • wiki.scorpi.us – Dokuwiki knowledge base
  • git.scorpi.us, git.scorpi.us – Gitea instance
  • hightimesfrom.space, hightimesfrom.space – Personal landing page
  • pm.scorpi.us, leantime.scorpi.us, pm.scorpi.us, leantime.scorpi.us – Leantime / PM suite
  • news.scorpi.us, news.scorpi.us – Pseudo news clone
  • youtube.scorpi.us, youtube.scorpi.us – PseudoTube experiment
  • reddit.scorpi.us, reddit.scorpi.us – Reddit proxy (forced HTTPS)
  • requests.scorpi.us, requests.scorpi.us – Requesty helper

Docker

NameImagePorts
claude-proxy
Claude proxy forwarder
claude-proxy:latest0.0.0.0:45000->45000/tcp, :::45000->45000/tcp
solar_420
Solartime demo app
solartime420-solar4200.0.0.0:5150->5000/tcp, [::]:5150->5000/tcp
frps
Snowdreamtech FRP relay
snowdreamtech/frps:latest
balanceboard_app
Balanceboard UI
balanceboard-app0.0.0.0:5021->5021/tcp, :::5021->5021/tcp
balanceboard_postgres
Balanceboard DB
postgres:150.0.0.0:5433->5432/tcp, [::]:5433->5432/tcp
vikunja_vikunja_1
Vikunja task manager
vikunja/vikunja0.0.0.0:3456->3456/tcp, :::3456->3456/tcp
vikunja_db_1
Vikunja Postgres
postgres:13-alpine5432/tcp
gitea
Gitea service
gitea/gitea:latest0.0.0.0:3000->3000/tcp, :::3000->3000/tcp, 0.0.0.0:222->22/tcp, [::]:222->22/tcp
gitea_db_1
Gitea Postgres
postgres:145432/tcp
dokuwiki
Dokuwiki container
lscr.io/linuxserver/dokuwiki:latest443/tcp, 0.0.0.0:8081->80/tcp, [::]:8081->80/tcp
requesty
Requesty API
docker_requesty0.0.0.0:5000->5000/tcp, :::5000->5000/tcp
psuedo-tube
PseudoTube UI
docker_psuedo-tube0.0.0.0:8082->80/tcp, [::]:8082->80/tcp
block-page
Block-page helper
docker_block-page0.0.0.0:8083->80/tcp, [::]:8083->80/tcp
+
+
+
+

Chicago

+

chicago.scorpi.us

+
nginxADHDbotntfyIRC
+
+

nginx sites

  • adhd.scorpi.us, adhd.scorpi.us – ADHDbot UI/API
  • matrix.scorpi.us, matrix.scorpi.us, matrix.scorpi.us – Matrix homeserver proxy
  • ntfy.scorpi.us, ntfy.scorpi.us – ntfy notification hub

Docker

NameImagePorts
adhdbot-app_adhdbot_1
ADHDbot FastAPI stack
adhdbot-app_adhdbot0.0.0.0:8000->8000/tcp
ntfy
ntfy topic server
binwiederhier/ntfy0.0.0.0:8081->80/tcp
inspircd
InspIRCd daemon
inspircd/inspircd-docker:latest0.0.0.0:6667->6667/tcp, 0.0.0.0:6697->6697/tcp, 7000-7001/tcp
+
+
+
+

Dallas

+

dallas.scorpi.us

+
nginxAutomation
+
+

nginx sites

  • n8n.dallas.scorpi.us, n8n.dallas.scorpi.us – n8n low-code automation suite

Docker

NameImagePorts
n8n_n8n_1
n8n worker (localhost only)
docker.n8n.io/n8nio/n8n127.0.0.1:5678->5678/tcp
+
+
+
+

Phoenix

+

phoenix.scorpi.us

+
DiscourseDiscord bridge
+
+

nginx sites

No vhosts detected.

Docker

NameImagePorts
discord-discourse-bridge
Discord ↔ Discourse bridge
syncbot_discord-discourse-bridge
app
Discourse stack
local_discourse/app0.0.0.0:80->80/tcp, :::80->80/tcp, 0.0.0.0:443->443/tcp, :::443->443/tcp
+
+
+
+ + \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..7f0a80d --- /dev/null +++ b/main.py @@ -0,0 +1,24 @@ +import os + +from Runner import Runner + + +def getenv(name, default=None): + value = os.getenv(name) + if value is None or value == "": + return default + return value + + +def main(): + userId = getenv("TARGET_USER_ID") + category = getenv("PROMPT_CATEGORY", "general") + promptName = getenv("PROMPT_NAME", "welcome") + context = getenv("PROMPT_CONTEXT", "container test run") + + message = Runner.run(userId, category, promptName, context) + print(f"[runner] response: {message}") + + +if __name__ == "__main__": + main() diff --git a/prompts/defaultPrompts.json b/prompts/defaultPrompts.json new file mode 100644 index 0000000..c422534 --- /dev/null +++ b/prompts/defaultPrompts.json @@ -0,0 +1,55 @@ +[ + { + "category": "general", + "name": "welcome", + "description": "First interaction focused on ADHD-friendly support.", + "variables": [ + "user", + "user_firstname", + "context" + ], + "template": "Hey {user_firstname}! I'm your ADHD-focused executive function assistant. You mentioned: {context}. Tell me where you feel stuck—reminding yourself, planning something, or getting started—and I'll help you pick a light next action. If you ever want me to save a plan or schedule a reminder, just say so and I'll include the right JSON payload. When you explicitly say to take or log a note (including in the context), close with one ```json block containing {\"action\": \"take_note\", \"note\": \"\"}." + }, + { + "category": "general", + "name": "fallback", + "description": "Backup voice when a prompt is missing.", + "variables": [ + "user", + "context" + ], + "template": "Still working on that, {user}. Let's keep momentum by focusing on the next doable step from this context: {context}" + }, + { + "category": "planning", + "name": "breakdown", + "description": "Helps the user break a task into ADHD-friendly chunks and optionally store it.", + "variables": [ + "user", + "user_firstname", + "context" + ], + "template": "You are an executive function coach for {user_firstname}. Use the context to:\n1. Reflect empathy in one short sentence.\n2. Identify the desired outcome.\n3. Break the work into 2-5 tiny, observable steps with relaxing estimates (\"~5 min\", \"1 song\" etc.).\n4. Offer a prompt that nudges initiation (\"Want me to save this plan or set a reminder?\").\n\nIf the user clearly wants to save the plan, append a single ```json block with:\n{\n \"action\": \"store_task\",\n \"task\": {\n \"title\": \"short label\",\n \"steps\": [\n {\"order\": 1, \"description\": \"step detail\", \"duration\": \"~5 min\"}\n ],\n \"next_step\": \"first step text\",\n \"context\": \"{context}\",\n \"status\": \"not_started\"\n }\n}\nOnly include the JSON when explicitly requested or confirmed; otherwise stay conversational." + }, + { + "category": "reminders", + "name": "schedule", + "description": "Collaboratively schedules reminders for the user.", + "variables": [ + "user", + "user_firstname", + "context" + ], + "template": "You help {user_firstname} set ADHD-friendly reminders. Confirm the task, timing, and delivery preference. Summarize the reminder in natural language and invite any tweaks.\n\nWhen the user gives enough detail or explicitly says to schedule it, append one ```json block with:\n{\n \"action\": \"schedule_reminder\",\n \"reminder\": {\n \"title\": \"short label\",\n \"details\": \"context summary\",\n \"trigger\": {\n \"type\": \"datetime | relative | habit\",\n \"value\": \"ISO timestamp or human-friendly string\"\n },\n \"follow_up\": \"check-in question\",\n \"metadata\": {\n \"user\": \"{user}\",\n \"source\": \"prompt\"\n }\n }\n}\nSkip the JSON when the reminder details are incomplete—keep the conversation going instead." + }, + { + "category": "agentic", + "name": "hourly_review", + "description": "Scans notes and action lists to trigger autonomous workflows.", + "variables": [ + "user", + "context" + ], + "template": "You are the agentic autopilot for {user}. The context contains a JSON blob with:\n- `notes`: latest note entries.\n- `action_items`: recurring or periodic tasks with cadence, interval estimates, and recent progress.\n- `summaries`: high-level memory summaries.\n\nEvery hour you must:\n1. Parse the JSON to understand what the user captured recently and which action items might be due.\n2. Decide whether to trigger a follow-up. You can communicate with the user or create reminders/tasks using the same structured JSON actions (`take_note`, `store_task`, `schedule_reminder`).\n3. When nothing needs attention, briefly acknowledge the review and end the run.\n\nIf you trigger anything, clearly explain why (\"Dishwasher reset is overdue by 1 day; sending reminder\") before emitting the relevant ```json block. Keep the tone concise and operational." + } +] diff --git a/prompts/prompt_schema.template.json b/prompts/prompt_schema.template.json new file mode 100644 index 0000000..878a107 --- /dev/null +++ b/prompts/prompt_schema.template.json @@ -0,0 +1,10 @@ +{ + "category": "general", + "name": "welcome", + "description": "Explain what this prompt should accomplish.", + "variables": [ + "user", + "context" + ], + "template": "Hello {user}, context: {context}" +} diff --git a/prompts/tool_instructions.md b/prompts/tool_instructions.md new file mode 100644 index 0000000..4224f09 --- /dev/null +++ b/prompts/tool_instructions.md @@ -0,0 +1,15 @@ +# Tooling and JSON actions + +1. Only emit JSON when the user confirms they want an action performed. +2. Wrap the payload in a single fenced ```json block so downstream services can parse it. +3. Supported payloads today: `take_note`, `store_task`, `schedule_reminder`. +4. Keep conversational guidance before/after the block short and clear. + +When logging a note, output exactly: +```json +{ + "action": "take_note", + "note": "" +} +``` +Swap in the user's wording (including emojis or punctuation) for the placeholder and keep the block on its own. diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..b542655 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +fastapi==0.111.0 +uvicorn==0.30.1 +requests==2.32.3 diff --git a/scripts/generate_intranet.py b/scripts/generate_intranet.py new file mode 100644 index 0000000..3b8aa62 --- /dev/null +++ b/scripts/generate_intranet.py @@ -0,0 +1,478 @@ +#!/usr/bin/env python3 +"""Regenerate intranet/index.html by polling each server over SSH.""" + +from __future__ import annotations + +import base64 +import json +import pathlib +import shlex +import subprocess +import textwrap +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +REPO_ROOT = pathlib.Path(__file__).resolve().parents[1] +OUTPUT_PATH = REPO_ROOT / "intranet" / "index.html" + + +@dataclass +class HostConfig: + slug: str + ssh: str + title: str + subtitle: str + tags: List[str] + vhost_notes: Dict[str, str] + docker_notes: Dict[str, str] + + +HOSTS: List[HostConfig] = [ + HostConfig( + slug="la", + ssh="root@la.chelseawoodruff.net", + title="Los Angeles", + subtitle="la.chelseawoodruff.net", + tags=["nginx", "FRP relay"], + vhost_notes={ + "chat.scorpi.us": "Public chat frontend with HTTPS redirect", + }, + docker_notes={ + "frps": "Snowdreamtech FRP reverse proxy", + }, + ), + HostConfig( + slug="virginia", + ssh="root@virginia.chelseawoodruff.net", + title="Virginia", + subtitle="virginia.chelseawoodruff.net", + tags=["nginx", "Git", "Vikunja", "Dokuwiki", "Custom apps"], + vhost_notes={ + "hightimesfrom.space": "Personal landing page", + "pm.scorpi.us": "Leantime / PM suite", + "wiki.scorpi.us": "Dokuwiki knowledge base", + "news.scorpi.us": "Pseudo news clone", + "git.scorpi.us": "Gitea instance", + "reddit.scorpi.us": "Reddit proxy (forced HTTPS)", + "blocked.scorpi.us": "Block page mirror", + "requests.scorpi.us": "Requesty helper", + "youtube.scorpi.us": "PseudoTube experiment", + }, + docker_notes={ + "claude-proxy": "Claude proxy forwarder", + "solar_420": "Solartime demo app", + "frps": "Snowdreamtech FRP relay", + "balanceboard_app": "Balanceboard UI", + "balanceboard_postgres": "Balanceboard DB", + "vikunja_vikunja_1": "Vikunja task manager", + "vikunja_db_1": "Vikunja Postgres", + "gitea": "Gitea service", + "gitea_db_1": "Gitea Postgres", + "dokuwiki": "Dokuwiki container", + "requesty": "Requesty API", + "psuedo-tube": "PseudoTube UI", + "block-page": "Block-page helper", + }, + ), + HostConfig( + slug="chicago", + ssh="root@chicago.scorpi.us", + title="Chicago", + subtitle="chicago.scorpi.us", + tags=["nginx", "ADHDbot", "ntfy", "IRC"], + vhost_notes={ + "adhd.scorpi.us": "ADHDbot UI/API", + "matrix.scorpi.us": "Matrix homeserver proxy", + "ntfy.scorpi.us": "ntfy notification hub", + }, + docker_notes={ + "adhdbot-app_adhdbot_1": "ADHDbot FastAPI stack", + "ntfy": "ntfy topic server", + "inspircd": "InspIRCd daemon", + }, + ), + HostConfig( + slug="dallas", + ssh="root@dallas.scorpi.us", + title="Dallas", + subtitle="dallas.scorpi.us", + tags=["nginx", "Automation"], + vhost_notes={ + "n8n.dallas.scorpi.us": "n8n low-code automation suite", + }, + docker_notes={ + "n8n_n8n_1": "n8n worker (localhost only)", + }, + ), + HostConfig( + slug="phoenix", + ssh="root@phoenix.scorpi.us", + title="Phoenix", + subtitle="phoenix.scorpi.us", + tags=["Discourse", "Discord bridge"], + vhost_notes={}, + docker_notes={ + "app": "Discourse stack", + "discord-discourse-bridge": "Discord ↔ Discourse bridge", + }, + ), +] + + +REMOTE_PY = textwrap.dedent( + """ + import glob + import json + import os + import subprocess + + + def collect_nginx(): + base = "/etc/nginx/sites-enabled" + sites = [] + if not os.path.isdir(base): + return sites + for path in sorted(glob.glob(os.path.join(base, "*"))): + if not os.path.isfile(path): + continue + try: + with open(path, "r", encoding="utf-8") as handle: + lines = handle.readlines() + except OSError: + continue + server_names = [] + for line in lines: + stripped = line.strip() + if not stripped or stripped.startswith("#"): + continue + if stripped.startswith("server_name"): + content = stripped.split(None, 1)[1] if " " in stripped else "" + content = content.split("#", 1)[0].strip() + if content.endswith(";"): + content = content[:-1].strip() + if content: + server_names.extend([token for token in content.split() if token]) + sites.append({"filename": os.path.basename(path), "server_names": server_names}) + return sites + + + def collect_docker(): + try: + output = subprocess.check_output( + ["docker", "ps", "--format", "{{json .}}"], text=True, timeout=10 + ) + except Exception: + return [] + containers = [] + for line in output.splitlines(): + line = line.strip() + if not line: + continue + try: + data = json.loads(line) + except json.JSONDecodeError: + continue + containers.append( + { + "name": data.get("Names"), + "image": data.get("Image"), + "ports": data.get("Ports"), + } + ) + return containers + + + payload = { + "hostname": os.uname().nodename, + "nginx": collect_nginx(), + "docker": collect_docker(), + } + print("{{JSON}}" + json.dumps(payload) + "{{/JSON}}") + """ +).strip() + + +def run_remote_script(target: str) -> Dict[str, Any]: + encoded = base64.b64encode(REMOTE_PY.encode("utf-8")).decode("ascii") + remote_cmd = ( + "python3 -c " + + shlex.quote( + "import base64, json; exec(base64.b64decode({}))".format(repr(encoded)) + ) + ) + ssh_cmd = [ + "ssh", + "-o", + "BatchMode=yes", + "-o", + "StrictHostKeyChecking=no", + target, + remote_cmd, + ] + completed = subprocess.run( + ssh_cmd, + capture_output=True, + text=True, + check=False, + ) + if completed.returncode != 0: + raise RuntimeError(completed.stderr.strip() or completed.stdout.strip()) + + marker_start = "{{JSON}}" + marker_end = "{{/JSON}}" + stdout = completed.stdout + start = stdout.find(marker_start) + end = stdout.find(marker_end) + if start == -1 or end == -1: + raise RuntimeError("Could not locate JSON payload in ssh output") + json_text = stdout[start + len(marker_start) : end] + return json.loads(json_text) + + +def format_ports(ports: Optional[str]) -> str: + ports = (ports or "").strip() + return ports if ports else "—" + + +def render_host_card(config: HostConfig, payload: Optional[Dict[str, Any]], error: Optional[str]) -> str: + tag_html = "".join(f'{tag}' for tag in config.tags) + if error: + body = f'

Unable to load data: {error}

' + nginx_html = "" + docker_html = "" + else: + nginx_entries = payload.get("nginx", []) if payload else [] + docker_entries = payload.get("docker", []) if payload else [] + + if nginx_entries: + items = [] + for entry in nginx_entries: + names = entry.get("server_names") or [entry.get("filename", "unknown")] + label = ", ".join(names) + note = config.vhost_notes.get(names[0]) or config.vhost_notes.get(entry.get("filename", "")) + if note: + items.append(f"
  • {label} – {note}
  • ") + else: + items.append(f"
  • {label}
  • ") + nginx_html = "

    nginx sites

      " + "".join(items) + "
    " + else: + nginx_html = "

    nginx sites

    No vhosts detected.

    " + + if docker_entries: + rows = [] + for entry in docker_entries: + name = entry.get("name") or "unknown" + image = entry.get("image") or "?" + ports = format_ports(entry.get("ports")) + note = config.docker_notes.get(name) + note_span = f'
    {note}' if note else "" + rows.append( + "" + f"{name}{note_span}" + f"{image}" + f"{ports}" + "" + ) + docker_html = ( + "

    Docker

    " + "" + "" + + "".join(rows) + + "
    NameImagePorts
    " + ) + else: + docker_html = "

    Docker

    No containers running.

    " + body = nginx_html + docker_html + + return textwrap.dedent( + f""" +
    +
    +

    {config.title}

    +

    {config.subtitle}

    +
    {tag_html}
    +
    + {body} +
    + """ + ).strip() + + +def render_html(reports: List[str]) -> str: + cards_html = "\n".join(reports) + return textwrap.dedent( + f""" + + + + + + Service Inventory + + + + + + +
    +
    +

    Internal inventory

    +

    Intranet Services

    +

    Live snapshot of nginx vhosts and Docker workloads across Chelsea's network.

    +
    +
    + {cards_html} +
    +
    + + + """ + ).strip() + + +def main() -> None: + reports: List[str] = [] + for host in HOSTS: + try: + payload = run_remote_script(host.ssh) + reports.append(render_host_card(host, payload, error=None)) + except Exception as exc: # pragma: no cover - network dependent + reports.append(render_host_card(host, payload=None, error=str(exc))) + html = render_html(reports) + OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True) + OUTPUT_PATH.write_text(html, encoding="utf-8") + print(f"Wrote {OUTPUT_PATH}") + + +if __name__ == "__main__": + main() diff --git a/web_App.css b/web_App.css new file mode 100644 index 0000000..5de98fa --- /dev/null +++ b/web_App.css @@ -0,0 +1,627 @@ +:root { + color: #e2e8f0; + font-family: "Inter", "Segoe UI", system-ui, -apple-system, BlinkMacSystemFont, sans-serif; + background: radial-gradient(circle at top left, rgba(14, 165, 233, 0.35), transparent 45%), + radial-gradient(circle at bottom right, rgba(217, 70, 239, 0.25), transparent 40%), #030712; + min-height: 100%; +} + +* { + box-sizing: border-box; +} + +body { + margin: 0; + min-height: 100vh; + background: transparent; +} + +button, +input, +textarea, +select { + font: inherit; + color: inherit; +} + +.glass { + background: rgba(15, 23, 42, 0.65); + border: 1px solid rgba(226, 232, 240, 0.08); + box-shadow: 0 30px 80px rgba(2, 6, 23, 0.55); + border-radius: 24px; + backdrop-filter: blur(24px); +} + +.chat-shell { + min-height: 100vh; + padding: clamp(1rem, 3vw, 2.5rem); + display: flex; + flex-direction: column; + gap: 1.25rem; + max-width: 1200px; + margin: 0 auto; +} + +.chat-header { + display: flex; + flex-wrap: wrap; + gap: 1.25rem 2rem; + padding: clamp(1.25rem, 3vw, 2rem); + align-items: center; + justify-content: space-between; +} + +.chat-header h1 { + margin: 0.2rem 0; + font-size: clamp(1.8rem, 4vw, 2.6rem); +} + +.status-row { + display: flex; + align-items: center; + gap: 0.75rem; + margin-top: 0.25rem; +} + +.status-chip { + display: inline-flex; + align-items: center; + gap: 0.35rem; + font-size: 0.85rem; + border-radius: 999px; + padding: 0.2rem 0.65rem; + border: 1px solid rgba(148, 163, 184, 0.35); + background: rgba(15, 23, 42, 0.5); +} + +.status-chip.online { + border-color: rgba(34, 197, 94, 0.5); + background: rgba(16, 185, 129, 0.15); +} + +.status-chip.offline { + border-color: rgba(248, 113, 113, 0.5); + background: rgba(248, 113, 113, 0.15); +} + +.status-dot { + width: 8px; + height: 8px; + border-radius: 999px; + background: currentColor; + display: inline-block; +} + +.status-chip.online .status-dot { + background: #34d399; +} + +.status-chip.offline .status-dot { + background: #f87171; +} + +.eyebrow { + text-transform: uppercase; + letter-spacing: 0.18em; + font-size: 0.78rem; + margin: 0; + color: #a5b4fc; +} + +.muted { + color: rgba(226, 232, 240, 0.75); +} + +.muted.danger { + color: #fca5a5; +} + +.tiny { + font-size: 0.85rem; +} + +.header-controls { + display: flex; + flex-direction: column; + gap: 0.4rem; + min-width: 260px; +} + +.header-controls label { + display: flex; + flex-direction: column; + gap: 0.25rem; + font-size: 0.85rem; +} + +.header-controls input, +.header-controls select { + border: 1px solid rgba(148, 163, 184, 0.4); + border-radius: 999px; + padding: 0.45rem 0.95rem; + background: rgba(2, 6, 23, 0.4); +} + +.reset-chat { + border-radius: 999px; + border: 1px solid rgba(148, 163, 184, 0.4); + padding: 0.35rem 1rem; + background: rgba(2, 6, 23, 0.35); + cursor: pointer; +} + +.stat-row { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(180px, 1fr)); + gap: 1rem; +} + +.stat-card { + padding: 1rem 1.25rem; + display: flex; + flex-direction: column; + gap: 0.35rem; +} + +.stat-label { + text-transform: uppercase; + letter-spacing: 0.15em; + font-size: 0.72rem; + margin: 0; + color: rgba(226, 232, 240, 0.65); +} + +.stat-card strong { + font-size: 1.6rem; +} + +.mode-switch { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); + gap: 0.75rem; +} + +.mode-pill { + border-radius: 20px; + border: 1px solid rgba(148, 163, 184, 0.35); + background: rgba(15, 23, 42, 0.4); + padding: 0.75rem 1rem; + text-align: left; + cursor: pointer; + transition: border-color 0.2s ease, transform 0.2s ease; +} + +.mode-pill.active { + border-color: rgba(248, 250, 252, 0.85); + background: rgba(51, 65, 85, 0.6); + transform: translateY(-2px); +} + +.mode-pill__label { + display: flex; + align-items: center; + gap: 0.45rem; + font-weight: 600; +} + +.mode-pill__dot { + width: 10px; + height: 10px; + border-radius: 50%; + box-shadow: 0 0 10px currentColor; +} + +.mode-pill small { + display: block; + margin-top: 0.4rem; + color: rgba(226, 232, 240, 0.75); +} + +.dashboard-grid { + display: grid; + grid-template-columns: minmax(0, 1.8fr) minmax(260px, 1fr); + gap: 1.5rem; + align-items: flex-start; +} + +.conversation-stack { + display: flex; + flex-direction: column; + gap: 1rem; + padding: clamp(1rem, 2vw, 1.5rem); +} + +.conversation-header { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 1rem; +} + +.chat-main { + flex: 1; + padding: clamp(1rem, 2vw, 1.25rem); + overflow-y: auto; + display: flex; + flex-direction: column; + gap: 1rem; + border-radius: 20px; + border: 1px solid rgba(226, 232, 240, 0.05); + background: rgba(2, 6, 23, 0.4); +} + +.bubble { + padding: 0.85rem 1.1rem; + border-radius: 18px; + border: 1px solid transparent; + background: rgba(15, 23, 42, 0.55); + box-shadow: inset 0 0 0 1px rgba(255, 255, 255, 0.03); + animation: fadeIn 0.25s ease; +} + +.bubble.user { + align-self: flex-end; + background: rgba(59, 130, 246, 0.18); + border-color: rgba(59, 130, 246, 0.35); +} + +.bubble.assistant { + align-self: flex-start; + background: rgba(129, 140, 248, 0.18); + border-color: rgba(99, 102, 241, 0.3); +} + +.bubble.system { + align-self: center; + background: rgba(45, 212, 191, 0.18); + border-color: rgba(16, 185, 129, 0.35); +} + +.bubble.ghost { + opacity: 0.8; +} + +.meta { + display: flex; + justify-content: space-between; + font-size: 0.78rem; + color: rgba(226, 232, 240, 0.65); + gap: 0.5rem; +} + +.bubble p { + margin: 0.35rem 0 0; + line-height: 1.55; +} + +.pill { + display: inline-flex; + align-items: center; + gap: 0.35rem; + border-radius: 999px; + padding: 0.15rem 0.7rem; + font-size: 0.75rem; + text-transform: uppercase; + letter-spacing: 0.08em; + border: 1px solid rgba(148, 163, 184, 0.35); + background: rgba(148, 163, 184, 0.12); +} + +.pill--pending { + border-color: rgba(14, 165, 233, 0.5); + background: rgba(14, 165, 233, 0.15); +} + +.pill--error { + border-color: rgba(248, 113, 113, 0.5); + background: rgba(248, 113, 113, 0.2); +} + +.typing-dots { + display: flex; + gap: 0.3rem; +} + +.typing-dots span { + width: 8px; + height: 8px; + background: rgba(255, 255, 255, 0.7); + border-radius: 50%; + animation: pulse 1.2s infinite ease-in-out; +} + +.typing-dots span:nth-child(2) { + animation-delay: 0.2s; +} + +.typing-dots span:nth-child(3) { + animation-delay: 0.4s; +} + +.composer-wrapper { + padding: 1rem 1.25rem 1.25rem; + display: flex; + flex-direction: column; + gap: 0.6rem; + border-radius: 20px; + border: 1px solid rgba(226, 232, 240, 0.05); + background: rgba(2, 6, 23, 0.45); +} + +.composer-form { + display: flex; + gap: 0.75rem; + align-items: flex-end; +} + +.composer-form textarea { + flex: 1; + border-radius: 18px; + border: 1px solid rgba(148, 163, 184, 0.4); + background: rgba(3, 7, 18, 0.75); + padding: 0.75rem 1rem; + resize: none; + min-height: 56px; + max-height: 240px; + line-height: 1.4; +} + +.composer-form button { + border: none; + border-radius: 999px; + padding: 0.85rem 1.8rem; + background: linear-gradient(135deg, rgba(59, 130, 246, 0.95), rgba(236, 72, 153, 0.95)); + color: #fff; + font-weight: 600; + cursor: pointer; + box-shadow: 0 15px 30px rgba(14, 165, 233, 0.35); + transition: transform 0.2s ease, box-shadow 0.2s ease; +} + +.composer-form button:disabled { + opacity: 0.55; + cursor: not-allowed; + box-shadow: none; +} + +.composer-form button:not(:disabled):hover { + transform: translateY(-2px); +} + +.composer-meta { + display: flex; + justify-content: space-between; + font-size: 0.82rem; + color: rgba(226, 232, 240, 0.65); +} + +.suggestion-row { + display: flex; + flex-wrap: wrap; + gap: 0.4rem; +} + +.suggestion-row button { + border-radius: 999px; + border: 1px solid rgba(148, 163, 184, 0.4); + padding: 0.3rem 0.9rem; + background: rgba(2, 6, 23, 0.35); + cursor: pointer; + font-size: 0.85rem; +} + +.actions-panel { + padding: clamp(1rem, 2vw, 1.5rem); + display: flex; + flex-direction: column; + gap: 1rem; +} + +.actions-panel__header { + display: flex; + justify-content: space-between; + align-items: center; + gap: 1rem; +} + +.refresh-button { + border-radius: 999px; + border: 1px solid rgba(148, 163, 184, 0.4); + padding: 0.45rem 1.2rem; + background: rgba(2, 6, 23, 0.35); + cursor: pointer; +} + +.new-action-form { + display: flex; + flex-direction: column; + gap: 0.75rem; +} + +.new-action-form label { + display: flex; + flex-direction: column; + gap: 0.35rem; + font-size: 0.9rem; +} + +.new-action-form input, +.new-action-form select, +.new-action-form textarea { + border: 1px solid rgba(148, 163, 184, 0.35); + border-radius: 14px; + background: rgba(2, 6, 23, 0.35); + padding: 0.5rem 0.75rem; +} + +.new-action-form button { + align-self: flex-start; + border: none; + border-radius: 999px; + padding: 0.6rem 1.4rem; + background: rgba(129, 140, 248, 0.85); + color: #fff; + cursor: pointer; +} + +.field-cluster { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(160px, 1fr)); + gap: 0.8rem; +} + +.action-list { + list-style: none; + margin: 0; + padding: 0; + display: flex; + flex-direction: column; + gap: 1rem; +} + +.action-card { + padding: 1rem; + border-radius: 24px; + border: 1px solid rgba(226, 232, 240, 0.08); +} + +.action-card__title-row { + display: flex; + align-items: center; + gap: 0.6rem; + margin-bottom: 0.35rem; +} + +.action-card__body { + display: flex; + justify-content: space-between; + gap: 1rem; + flex-wrap: wrap; +} + +.action-card__body h3 { + margin: 0; +} + +.action-card__body p { + margin: 0; + max-width: 480px; +} + +.action-card__body dl { + display: flex; + gap: 1rem; + margin: 0; + flex-wrap: wrap; +} + +.action-card__body dl div { + min-width: 120px; +} + +.action-card__body dt { + font-size: 0.75rem; + text-transform: uppercase; + letter-spacing: 0.08em; + color: rgba(226, 232, 240, 0.65); +} + +.action-card__body dd { + margin: 0.25rem 0 0; + font-weight: 600; +} + +.action-card__progress { + margin-top: 0.75rem; + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.action-card__progress form { + display: flex; + flex-wrap: wrap; + gap: 0.5rem; +} + +.action-card__progress select, +.action-card__progress input { + border: 1px solid rgba(148, 163, 184, 0.35); + border-radius: 999px; + padding: 0.4rem 0.8rem; + background: rgba(2, 6, 23, 0.35); +} + +.action-card__progress button { + border-radius: 999px; + border: none; + padding: 0.45rem 1.1rem; + background: rgba(59, 130, 246, 0.8); + color: #fff; + cursor: pointer; +} + +.action-card__progress button.ghost { + background: rgba(239, 68, 68, 0.2); + border: 1px solid rgba(239, 68, 68, 0.4); +} + +.error-banner { + margin: 0; + padding: 0.6rem 0.9rem; + border-radius: 14px; + border: 1px solid rgba(239, 68, 68, 0.35); + background: rgba(248, 113, 113, 0.18); + font-size: 0.9rem; +} + +@keyframes fadeIn { + from { + opacity: 0; + transform: translateY(6px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes pulse { + 0%, 80%, 100% { + transform: scale(0.8); + opacity: 0.6; + } + 40% { + transform: scale(1); + opacity: 1; + } +} + +@media (max-width: 960px) { + .dashboard-grid { + grid-template-columns: 1fr; + } +} + +@media (max-width: 768px) { + .chat-header { + flex-direction: column; + align-items: flex-start; + } + .composer-form { + flex-direction: column; + } + .composer-form textarea { + width: 100%; + } + .composer-form button { + width: 100%; + text-align: center; + } + .action-card__body { + flex-direction: column; + } + .action-card__progress form { + flex-direction: column; + } +} diff --git a/web_App.tsx b/web_App.tsx new file mode 100644 index 0000000..078d9fe --- /dev/null +++ b/web_App.tsx @@ -0,0 +1,804 @@ +import { useCallback, useEffect, useMemo, useRef, useState, type FormEvent, type KeyboardEvent } from "react"; +import "./App.css"; + +type MessageRole = "user" | "assistant" | "system"; +type MessageStatus = "sent" | "pending" | "error"; + +type Message = { + id: string; + role: MessageRole; + text: string; + timestamp: number; + status?: MessageStatus; +}; + +type ProgressEntry = { + timestamp: string; + status: string; + note?: string | null; +}; + +type ActionItem = { + id: string; + title: string; + cadence: string; + details?: string | null; + interval_minutes?: number | null; + created_at: string; + updated_at: string; + progress: ProgressEntry[]; +}; + +type ProgressDraft = { + status: string; + note: string; +}; + +type PromptOption = { + label: string; + description: string; + category: string; + promptName: string; + accent: string; +}; + +const promptOptions: PromptOption[] = [ + { + label: "General support", + description: "Quick welcome / encouragement.", + category: "general", + promptName: "welcome", + accent: "#38bdf8", + }, + { + label: "Plan a thing", + description: "Break work into small steps.", + category: "planning", + promptName: "breakdown", + accent: "#f472b6", + }, + { + label: "Schedule reminder", + description: "Confirm timing + emit JSON.", + category: "reminders", + promptName: "schedule", + accent: "#c084fc", + }, +]; + +const contextSuggestions = [ + "Take a note that I'm experimenting with DeepSeek.", + "Help me plan my inbox zero session for 30 min.", + "Remind me in 10 minutes to stand up and stretch.", + "Break down cleaning my kitchen tonight.", + "Draft a gentle check-in for future-me about therapy homework.", +]; + +const storageKey = "adhd-conversation-cache"; +const maxContextLength = 1500; +const defaultProgress: ProgressDraft = { status: "update", note: "" }; + +const safeId = () => { + if (typeof crypto !== "undefined" && typeof crypto.randomUUID === "function") { + return crypto.randomUUID(); + } + return `id-${Math.random().toString(36).slice(2)}`; +}; + +const createSystemMessage = (): Message => ({ + id: safeId(), + role: "system", + timestamp: Date.now(), + text: "✨ Fresh chat. Pick a mode above, type anything below, and I'll relay it to ADHDbot.", +}); + +const shortDateFormatter = new Intl.DateTimeFormat("en", { + month: "short", + day: "numeric", + hour: "numeric", + minute: "2-digit", +}); + +function App() { + const [userId, setUserId] = useState("chelsea"); + const [modeIndex, setModeIndex] = useState(0); + const [context, setContext] = useState(""); + const [isSending, setIsSending] = useState(false); + const [error, setError] = useState(null); + const [actions, setActions] = useState([]); + const [actionsLoading, setActionsLoading] = useState(false); + const [actionsError, setActionsError] = useState(null); + const [newActionTitle, setNewActionTitle] = useState(""); + const [newActionCadence, setNewActionCadence] = useState("daily"); + const [newActionInterval, setNewActionInterval] = useState(""); + const [newActionDetails, setNewActionDetails] = useState(""); + const [progressDrafts, setProgressDrafts] = useState>({}); + const [messages, setMessages] = useState(() => { + if (typeof window === "undefined") { + return [createSystemMessage()]; + } + try { + const cached = window.localStorage.getItem(storageKey); + if (cached) { + const parsed = JSON.parse(cached) as Message[]; + if (Array.isArray(parsed) && parsed.length) { + return parsed; + } + } + } catch { + /* ignore */ + } + return [createSystemMessage()]; + }); + const [isOnline, setIsOnline] = useState(() => (typeof navigator === "undefined" ? true : navigator.onLine)); + const [lastRefreshedAt, setLastRefreshedAt] = useState(null); + + const selectedPrompt = useMemo(() => promptOptions[modeIndex], [modeIndex]); + const conversationRef = useRef(null); + const composerRef = useRef(null); + + const charCount = context.length; + const isOverLimit = charCount > maxContextLength; + const canSend = Boolean(context.trim()) && !isSending && !isOverLimit; + + const loadProgressDraft = useCallback( + (actionId: string): ProgressDraft => { + return progressDrafts[actionId] ?? defaultProgress; + }, + [progressDrafts], + ); + + const fetchActions = useCallback( + async (targetUserId: string) => { + if (!targetUserId) { + return; + } + setActionsLoading(true); + setActionsError(null); + try { + const response = await fetch(`/api/users/${targetUserId}/actions`, { + credentials: "include", + }); + if (!response.ok) { + throw new Error(`Failed to fetch actions: ${response.status}`); + } + const data = (await response.json()) as { action_items: ActionItem[] }; + setActions(data.action_items ?? []); + setProgressDrafts({}); + setLastRefreshedAt(Date.now()); + } catch (err) { + console.error(err); + setActionsError("Couldn't load action items."); + } finally { + setActionsLoading(false); + } + }, + [], + ); + + const actionSummary = useMemo(() => { + if (!actions.length) { + return { activeCount: 0, totalProgress: 0, lastUpdated: null as number | null }; + } + let lastUpdated: number | null = null; + let totalProgress = 0; + actions.forEach((action) => { + const updated = Date.parse(action.updated_at); + if (!Number.isNaN(updated) && (lastUpdated === null || updated > lastUpdated)) { + lastUpdated = updated; + } + totalProgress += action.progress?.length ?? 0; + }); + return { activeCount: actions.length, totalProgress, lastUpdated }; + }, [actions]); + + const conversationSummary = useMemo(() => { + const assistantTurns = messages.filter((msg) => msg.role === "assistant").length; + const userTurns = messages.filter((msg) => msg.role === "user").length; + const lastReply = messages.length ? messages[messages.length - 1]?.timestamp : null; + return { + turns: assistantTurns + userTurns, + lastReply, + }; + }, [messages]); + + const formattedActionUpdate = actionSummary.lastUpdated ? shortDateFormatter.format(actionSummary.lastUpdated) : "No updates yet"; + const formattedRefresh = lastRefreshedAt ? shortDateFormatter.format(lastRefreshedAt) : "Not synced yet"; + const formattedLastReply = conversationSummary.lastReply ? shortDateFormatter.format(conversationSummary.lastReply) : "—"; + + useEffect(() => { + if (typeof window === "undefined") { + return; + } + window.localStorage.setItem(storageKey, JSON.stringify(messages)); + }, [messages]); + + useEffect(() => { + conversationRef.current?.scrollTo({ + top: conversationRef.current.scrollHeight, + behavior: "smooth", + }); + }, [messages, isSending]); + + useEffect(() => { + if ("serviceWorker" in navigator) { + navigator.serviceWorker.register("/sw.js").catch(() => undefined); + } + document.cookie = "adhd_auth=1; Path=/; Max-Age=31536000; SameSite=Lax"; + }, []); + + useEffect(() => { + fetchActions(userId); + }, [userId, fetchActions]); + + useEffect(() => { + const handleOnline = () => setIsOnline(true); + const handleOffline = () => setIsOnline(false); + window.addEventListener("online", handleOnline); + window.addEventListener("offline", handleOffline); + return () => { + window.removeEventListener("online", handleOnline); + window.removeEventListener("offline", handleOffline); + }; + }, []); + + useEffect(() => { + if (!composerRef.current) { + return; + } + const element = composerRef.current; + element.style.height = "auto"; + const maxHeight = 240; + element.style.height = `${Math.min(element.scrollHeight, maxHeight)}px`; + }, [context]); + + useEffect(() => { + composerRef.current?.focus(); + }, [modeIndex]); + + useEffect(() => { + document.title = `ADHDbot • ${selectedPrompt.label}`; + }, [selectedPrompt.label]); + + const sendMessage = useCallback(async () => { + if (!context.trim() || isSending) { + return; + } + const trimmedContext = context.trim(); + const historyPayload = messages + .filter((msg) => msg.role !== "system") + .map((msg) => ({ role: msg.role, content: msg.text })); + + const payload = { + userId, + category: selectedPrompt.category, + promptName: selectedPrompt.promptName, + context: trimmedContext, + history: historyPayload, + modeHint: selectedPrompt.label, + }; + + const userMessage: Message = { + id: safeId(), + role: "user", + timestamp: Date.now(), + text: trimmedContext, + status: "sent", + }; + setMessages((prev) => [...prev, userMessage]); + setContext(""); + setIsSending(true); + setError(null); + + try { + const response = await fetch("/api/run", { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify(payload), + }); + if (!response.ok) { + throw new Error(`Request failed: ${response.status}`); + } + const data = await response.json(); + const botMessage: Message = { + id: safeId(), + role: "assistant", + timestamp: Date.now(), + text: data.message ?? "(No response returned)", + status: "sent", + }; + setMessages((prev) => [...prev, botMessage]); + } catch (err) { + console.error(err); + setError("Something went sideways. Double-check the API and try again."); + const errorMessage: Message = { + id: safeId(), + role: "system", + timestamp: Date.now(), + text: "⚠️ Message failed. Please verify the API service is reachable.", + status: "error", + }; + setMessages((prev) => [...prev, errorMessage]); + } finally { + setIsSending(false); + } + }, [context, isSending, messages, selectedPrompt.category, selectedPrompt.label, selectedPrompt.promptName, userId]); + + const handleSubmit = (event: FormEvent) => { + event.preventDefault(); + void sendMessage(); + }; + + const handleComposerKeyDown = (event: KeyboardEvent) => { + if ((event.metaKey || event.ctrlKey) && event.key === "Enter") { + event.preventDefault(); + void sendMessage(); + } + }; + + const applySuggestion = (suggestion: string) => { + setContext((prev) => (prev ? `${prev}\n${suggestion}` : suggestion)); + composerRef.current?.focus(); + }; + + const handleNewAction = async (event: FormEvent) => { + event.preventDefault(); + const payload = { + title: newActionTitle.trim(), + cadence: newActionCadence, + interval_minutes: newActionInterval ? Number(newActionInterval) : null, + details: newActionDetails.trim() || undefined, + }; + if (!payload.title) { + return; + } + try { + const response = await fetch(`/api/users/${userId}/actions`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify(payload), + }); + if (!response.ok) { + throw new Error(`Action create failed: ${response.status}`); + } + setNewActionTitle(""); + setNewActionDetails(""); + setNewActionInterval(""); + setNewActionCadence("daily"); + await fetchActions(userId); + } catch (err) { + console.error(err); + setActionsError("Couldn't save the new action."); + } + }; + + const handleDeleteAction = async (actionId: string) => { + if (!window.confirm("Remove this action item? This only affects local memory.")) { + return; + } + try { + const response = await fetch(`/api/users/${userId}/actions/${actionId}`, { + method: "DELETE", + credentials: "include", + }); + if (!response.ok && response.status !== 204) { + throw new Error(`Delete failed: ${response.status}`); + } + await fetchActions(userId); + } catch (err) { + console.error(err); + setActionsError("Couldn't delete that action."); + } + }; + + const handleProgressDraftChange = (actionId: string, field: keyof ProgressDraft, value: string) => { + setProgressDrafts((prev) => { + const base = prev[actionId] ?? defaultProgress; + return { + ...prev, + [actionId]: { + ...base, + [field]: value, + }, + }; + }); + }; + + const handleProgressSubmit = async (actionId: string) => { + const draft = loadProgressDraft(actionId); + try { + const response = await fetch(`/api/users/${userId}/actions/${actionId}/progress`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify({ + status: draft.status || "update", + note: draft.note || undefined, + }), + }); + if (!response.ok) { + throw new Error(`Progress failed: ${response.status}`); + } + setProgressDrafts((prev) => ({ + ...prev, + [actionId]: defaultProgress, + })); + await fetchActions(userId); + } catch (err) { + console.error(err); + setActionsError("Couldn't log progress."); + } + }; + + const handleResetConversation = () => { + if (!window.confirm("Start a fresh chat? This only clears your local history.")) { + return; + } + const fresh = createSystemMessage(); + setMessages([fresh]); + if (typeof window !== "undefined") { + window.localStorage.setItem(storageKey, JSON.stringify([fresh])); + } + }; + + return ( +
    +
    +
    +

    ADHD Coach Console

    +

    Instant Messaging

    +

    One thread for prompts, plans, notes, and reminders.

    +
    + + +
    +
    +
    + + + {selectedPrompt.description} +
    +
    + +
    + + + + +
    + + + +
    +
    +
    +
    +

    Live thread

    +

    {selectedPrompt.label}

    +

    {selectedPrompt.description}

    +
    + User ID: {userId || "—"} +
    +
    + {messages.map((message) => ( + + ))} + {isSending && } +
    + +
    +
    +