initial commit

This commit is contained in:
2026-02-16 17:49:24 -06:00
commit 55cad30ae5
5 changed files with 391 additions and 0 deletions

214
ai.py Normal file
View File

@@ -0,0 +1,214 @@
import json
import numpy as np
from openai import OpenAI
import sys
# --- Configuration Loading ---
def load_config(path='config.json'):
with open(path, 'r') as f:
return json.load(f)
# --- Vector Store (Mock Implementation) ---
class VectorStore:
def __init__(self, file_path):
print(f"Loading embeddings from {file_path}...")
try:
with open(file_path, 'r') as f:
data = json.load(f)
self.chunks = [item['text'] for item in data]
# Convert lists back to numpy arrays
self.embeddings = [np.array(item['embedding']) for item in data]
print(f"Loaded {len(self.chunks)} chunks.")
except FileNotFoundError:
print(f"Error: {file_path} not found. Creating a dummy knowledge base.")
# Fallback dummy data for testing if file missing
self.chunks = ["DBT teaches Distress Tolerance skills.", "DEAR MAN is a skill for interpersonal effectiveness."]
# Dummy embeddings (normally these come from an embedding model)
self.embeddings = [np.random.rand(1536), np.random.rand(1536)]
def search(self, query_embedding, top_k=3):
"""Finds most similar chunks using Cosine Similarity."""
if not self.embeddings:
return []
query_vec = np.array(query_embedding)
scores = []
for i, doc_vec in enumerate(self.embeddings):
# Cosine similarity
score = np.dot(query_vec, doc_vec) / (np.linalg.norm(query_vec) * np.linalg.norm(doc_vec))
scores.append((score, i))
scores.sort(reverse=True)
return [self.chunks[i] for score, i in scores[:top_k]]
# --- OpenRouter Client ---
class LLMClient:
def __init__(self, api_key):
self.client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=api_key,
)
def get_embedding(self, text):
"""Generates embedding for the query (using OpenAI as default for simplicity)."""
# Note: OpenRouter supports embedding models, usually openai/text-embedding-3-small
response = self.client.embeddings.create(
model="openai/text-embedding-3-small",
input=text
)
return response.data[0].embedding
def generate(self, model_id, system_prompt, user_prompt, temperature=0.7):
"""Generic generation function."""
try:
response = self.client.chat.completions.create(
model=model_id,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature=temperature
)
return response.choices[0].message.content
except Exception as e:
return f"Error calling {model_id}: {str(e)}"
# --- Jury Logic ---
class DBTJurySystem:
def __init__(self, config):
self.config = config
self.llm = LLMClient(config['openrouter_api_key'])
self.vector_store = VectorStore(config['embedding_file'])
def retrieve_context(self, query):
print("\n[1. Retrieving Context...]")
query_emb = self.llm.get_embedding(query)
context_chunks = self.vector_store.search(query_emb)
return "\n".join(context_chunks)
def run_generator(self, query, context):
"""Step 1: Generate the initial draft."""
print("[2. Generating Draft...]")
prompt = f"""
Context from DBT Manual:
{context}
User Query: {query}
Instructions: Answer the user's query using ONLY the context provided.
If the context is insufficient, state that clearly.
"""
return self.llm.generate(
self.config['models']['generator'],
self.config['system_prompt'],
prompt
)
def run_jury_deliberation(self, query, context, draft_answer):
"""Step 2: The Jury Votes."""
print("[3. Jury Deliberating (Single Veto Logic)...]")
jury_config = [
{"role": "Clinical Accuracy", "model_key": "jury_clinical", "instruction": "Check if the advice strictly follows DBT protocol."},
{"role": "Safety", "model_key": "jury_safety", "instruction": "Check for any harmful, unethical, or dangerous advice. Be strict."},
{"role": "Empathy", "model_key": "jury_empathy", "instruction": "Check if the tone is supportive and non-judgmental."},
{"role": "Hallucination", "model_key": "jury_hallucination", "instruction": "Verify every claim in the answer is supported by the context."}
]
results = []
for member in jury_config:
print(f" - Querying {member['role']} ({member['model_key']})...")
judge_prompt = f"""
You are a Jury Member: {member['role']}.
Your specific instruction: {member['instruction']}
--- SOURCE CONTEXT ---
{context}
----------------------
--- PROPOSED ANSWER ---
{draft_answer}
----------------------
Task: Analyze the proposed answer.
1. Does it violate your instruction?
2. Is it factually grounded in the source context?
Output format strictly as JSON:
{{ "verdict": "APPROVE" or "VETO", "reason": "Your reasoning..." }}
"""
response_text = self.llm.generate(
self.config['models'][member['model_key']],
"You are a strict JSON validator. Output ONLY valid JSON.",
judge_prompt,
temperature=0.1 # Low temp for deterministic judging
)
# Parse JSON response
try:
# Clean up potential markdown code blocks
clean_response = response_text.strip().replace("```json", "").replace("```", "")
vote = json.loads(clean_response)
except json.JSONDecodeError:
# Fallback if model fails to output JSON
vote = {"verdict": "APPROVE", "reason": "Failed to parse response, defaulting to Approve."}
vote['member'] = member['role']
results.append(vote)
if vote['verdict'].upper() == 'VETO':
print(f" ❌ VETO by {member['role']}: {vote['reason']}")
return False, results
print(" ✅ Unanimous Approval.")
return True, results
def process_query(self, query):
# 1. RAG Retrieval
context = self.retrieve_context(query)
# 2. Generation
draft = self.run_generator(query, context)
print(f"\n--- Draft Answer ---\n{draft}\n--------------------")
# 3. Jury Voting
approved, votes = self.run_jury_deliberation(query, context, draft)
if approved:
return {
"status": "SUCCESS",
"answer": draft,
"votes": votes
}
else:
return {
"status": "REJECTED",
"answer": "The Jury could not agree on a safe or accurate answer. Please consult a professional or try rephrasing.",
"votes": votes
}
# --- Main Execution ---
if __name__ == "__main__":
config = load_config()
# Interactive Loop
print("\nDBT Quorum System Active (Type 'exit' to quit)")
system = DBTJurySystem(config)
while True:
user_query = input("\nUser: ")
if user_query.lower() in ['exit', 'quit']:
break
response = system.process_query(user_query)
print("\n===== FINAL OUTPUT =====")
print(f"Status: {response['status']}")
print(f"Response: {response['answer']}")
# Optionally print vote breakdown
# print(f"Vote Details: {json.dumps(response['votes'], indent=2)}")
print("========================")

12
config.json Normal file
View File

@@ -0,0 +1,12 @@
{
"openrouter_api_key": "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",
"embedding_file": "dbt_knowledge.json",
"models": {
"generator": "moonshotai/kimi-k2.5",
"jury_clinical": "z-ai/glm-5",
"jury_safety": "deepseek/deepseek-v3.2",
"jury_empathy": "openai/gpt-4o-2024-08-06",
"jury_hallucination": "qwen/qwen3-235b-a22b-2507"
},
"system_prompt": "You are a DBT assistant. Answer based ONLY on the provided context."
}

BIN
dbt.epub Normal file

Binary file not shown.

1
dbt_knowledge.json Normal file

File diff suppressed because one or more lines are too long

164
embedder.py Normal file
View File

@@ -0,0 +1,164 @@
import json
import os
import time
import asyncio
from openai import AsyncOpenAI
from ebooklib import epub, ITEM_DOCUMENT
from bs4 import BeautifulSoup
# --- Configuration ---
CONFIG_PATH = 'config.json'
INPUT_EPUB = 'dbt.epub'
OUTPUT_FILE = 'dbt_knowledge.json'
# Parallelization Settings
BATCH_SIZE = 50
MAX_CONCURRENT_REQUESTS = 10
def load_config():
with open(CONFIG_PATH, 'r') as f:
return json.load(f)
def get_text_from_epub(epub_path):
"""Extracts text from an EPUB file, organizing by chapter/section."""
print(f"Reading EPUB: {epub_path}...")
book = epub.read_epub(epub_path)
text_sections = []
for item in book.get_items():
if item.get_type() == ITEM_DOCUMENT:
soup = BeautifulSoup(item.get_content(), 'html.parser')
for script in soup(["script", "style"]):
script.decompose()
text = soup.get_text(separator='\n')
lines = [line.strip() for line in text.splitlines() if line.strip()]
clean_text = '\n'.join(lines)
if len(clean_text) > 100:
title_tag = soup.find(['h1', 'h2', 'h3'])
title = title_tag.get_text().strip() if title_tag else "Unknown Section"
text_sections.append({"title": title, "text": clean_text})
print(f"Extracted {len(text_sections)} sections.")
return text_sections
# RENAMED function from 'chunk_text' to 'split_text' to avoid variable name collision
def split_text(text, max_chars=1000):
"""Splits text into smaller chunks."""
chunks = []
current_chunk = ""
paragraphs = text.split('\n')
for para in paragraphs:
if len(current_chunk) + len(para) < max_chars:
current_chunk += para + "\n"
else:
chunks.append(current_chunk.strip())
current_chunk = para + "\n"
if current_chunk:
chunks.append(current_chunk.strip())
return chunks
async def process_batch(client, batch_data, semaphore, pbar_counter):
"""
Sends a batch of chunks to the API concurrently.
batch_data is a list of dicts: {'source': ..., 'index': ..., 'text': ...}
"""
async with semaphore:
try:
# Extract just the text strings for the API input
inputs = [item['text'] for item in batch_data]
response = await client.embeddings.create(
model="qwen/qwen3-embedding-8b",
input=inputs
)
# Map results back to the data
results = []
for i, data in enumerate(response.data):
original_item = batch_data[i]
results.append({
"id": f"{original_item['source']}_{original_item['index']}",
"source": original_item['source'],
"text": original_item['text'],
"embedding": data.embedding
})
# Update progress counter
pbar_counter[0] += len(batch_data)
print(f"\r Processed {pbar_counter[0]} chunks...", end='', flush=True)
return results
except Exception as e:
print(f"\nError processing batch: {e}")
return []
async def main_async():
config = load_config()
client = AsyncOpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=config['openrouter_api_key']
)
# 1. Extract Text
sections = get_text_from_epub(INPUT_EPUB)
# 2. Prepare all chunks
print("Preparing chunks...")
all_chunks = []
for section in sections:
if "copyright" in section['title'].lower() or "contents" in section['title'].lower():
continue
# Call the renamed function
chunks = split_text(section['text'], max_chars=800)
for i, text_content in enumerate(chunks):
if text_content:
all_chunks.append({
"source": section['title'],
"index": i,
"text": text_content
})
print(f"Total chunks to embed: {len(all_chunks)}")
# 3. Create Batches
batches = []
for i in range(0, len(all_chunks), BATCH_SIZE):
batch = all_chunks[i : i + BATCH_SIZE]
batches.append(batch)
# 4. Process Concurrently
semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
pbar_counter = [0]
print("Generating embeddings in parallel...")
tasks = []
for batch in batches:
tasks.append(process_batch(client, batch, semaphore, pbar_counter))
results_nested = await asyncio.gather(*tasks)
# Flatten results
final_data = [item for sublist in results_nested for item in sublist]
# 5. Save to JSON
print(f"\nSaving {len(final_data)} chunks to {OUTPUT_FILE}...")
with open(OUTPUT_FILE, 'w') as f:
json.dump(final_data, f)
print("Done!")
if __name__ == "__main__":
asyncio.run(main_async())