feat(CF-1189): Add auto-rename, fix system prompt, load room settings
- Add !ai auto-rename on/off command to auto-name rooms based on conversation topic - Persist auto-rename setting via room state event (ai.agiliton.auto_rename) - Generate short title via LLM after first AI response, set as m.room.name - Load persisted model and auto-rename settings lazily from room state - Strengthen system prompt: prohibit asking about document storage, file locations - Fix bot suggesting !ai commands and admin contact to users Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
114
bot.py
114
bot.py
@@ -31,6 +31,7 @@ from livekit import api
|
|||||||
BOT_DEVICE_ID = "AIBOT"
|
BOT_DEVICE_ID = "AIBOT"
|
||||||
CALL_MEMBER_TYPE = "org.matrix.msc3401.call.member"
|
CALL_MEMBER_TYPE = "org.matrix.msc3401.call.member"
|
||||||
MODEL_STATE_TYPE = "ai.agiliton.model"
|
MODEL_STATE_TYPE = "ai.agiliton.model"
|
||||||
|
RENAME_STATE_TYPE = "ai.agiliton.auto_rename"
|
||||||
|
|
||||||
logger = logging.getLogger("matrix-ai-bot")
|
logger = logging.getLogger("matrix-ai-bot")
|
||||||
|
|
||||||
@@ -52,13 +53,23 @@ WILDFILES_ORG = os.environ.get("WILDFILES_ORG", "")
|
|||||||
|
|
||||||
SYSTEM_PROMPT = """You are a helpful AI assistant in a Matrix chat room.
|
SYSTEM_PROMPT = """You are a helpful AI assistant in a Matrix chat room.
|
||||||
Keep answers concise but thorough. Use markdown formatting when helpful.
|
Keep answers concise but thorough. Use markdown formatting when helpful.
|
||||||
If document context is provided, use it to inform your answers."""
|
|
||||||
|
IMPORTANT RULES — FOLLOW THESE STRICTLY:
|
||||||
|
- When document context is provided below, use it to answer. Always include any links.
|
||||||
|
- NEVER tell the user to run commands or type anything special. No commands exist.
|
||||||
|
- NEVER mention "!ai", "!ai search", "!ai read", or any slash/bang commands.
|
||||||
|
- NEVER say you cannot access files, documents, or links.
|
||||||
|
- NEVER ask the user where documents are stored, how they were uploaded, or under what filename.
|
||||||
|
- NEVER suggest contacting an administrator, using a web interface, or checking another system.
|
||||||
|
- NEVER ask follow-up questions about document storage or file locations.
|
||||||
|
- If no relevant documents were found, simply say you don't have information on that topic and ask if you can help with something else. Do NOT speculate about why or suggest the user look elsewhere."""
|
||||||
|
|
||||||
HELP_TEXT = """**AI Bot Commands**
|
HELP_TEXT = """**AI Bot Commands**
|
||||||
- `!ai help` — Show this help
|
- `!ai help` — Show this help
|
||||||
- `!ai models` — List available models
|
- `!ai models` — List available models
|
||||||
- `!ai set-model <model>` — Set model for this room
|
- `!ai set-model <model>` — Set model for this room
|
||||||
- `!ai search <query>` — Search documents (WildFiles)
|
- `!ai search <query>` — Search documents (WildFiles)
|
||||||
|
- `!ai auto-rename on|off` — Auto-rename room based on conversation topic
|
||||||
- **@mention the bot** or start with `!ai` for a regular AI response"""
|
- **@mention the bot** or start with `!ai` for a regular AI response"""
|
||||||
|
|
||||||
|
|
||||||
@@ -77,7 +88,7 @@ class DocumentRAG:
|
|||||||
async with httpx.AsyncClient(timeout=5.0) as client:
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
resp = await client.post(
|
resp = await client.post(
|
||||||
f"{self.base_url}/api/v1/rag/search",
|
f"{self.base_url}/api/v1/rag/search",
|
||||||
json={"query": query, "org": self.org, "top_k": top_k},
|
json={"query": query, "organization": self.org, "limit": top_k},
|
||||||
)
|
)
|
||||||
resp.raise_for_status()
|
resp.raise_for_status()
|
||||||
return resp.json().get("results", [])
|
return resp.json().get("results", [])
|
||||||
@@ -88,11 +99,24 @@ class DocumentRAG:
|
|||||||
def format_context(self, results: list[dict]) -> str:
|
def format_context(self, results: list[dict]) -> str:
|
||||||
if not results:
|
if not results:
|
||||||
return ""
|
return ""
|
||||||
parts = ["**Relevant documents:**"]
|
parts = ["The following documents were found in our document archive:"]
|
||||||
for r in results:
|
for r in results:
|
||||||
|
doc_id = r.get("id", "")
|
||||||
title = r.get("title", r.get("filename", "Untitled"))
|
title = r.get("title", r.get("filename", "Untitled"))
|
||||||
snippet = r.get("content", r.get("text", ""))[:500]
|
filename = r.get("metadata", {}).get("original_filename", "")
|
||||||
parts.append(f"- **{title}**: {snippet}")
|
category = r.get("category", "")
|
||||||
|
date = r.get("detected_date", "")
|
||||||
|
link = r.get("metadata", {}).get("source_url", "")
|
||||||
|
parts.append(f"- Title: {title}")
|
||||||
|
if filename:
|
||||||
|
parts.append(f" Filename: {filename}")
|
||||||
|
if category:
|
||||||
|
parts.append(f" Category: {category}")
|
||||||
|
if date:
|
||||||
|
parts.append(f" Date: {date}")
|
||||||
|
if link:
|
||||||
|
parts.append(f" Link: {link}")
|
||||||
|
parts.append("\nUse this information to answer the user. Always include document links when referencing documents.")
|
||||||
return "\n".join(parts)
|
return "\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
@@ -116,6 +140,9 @@ class Bot:
|
|||||||
self.rag = DocumentRAG(WILDFILES_BASE_URL, WILDFILES_ORG)
|
self.rag = DocumentRAG(WILDFILES_BASE_URL, WILDFILES_ORG)
|
||||||
self.llm = AsyncOpenAI(base_url=LITELLM_URL, api_key=LITELLM_KEY) if LITELLM_URL else None
|
self.llm = AsyncOpenAI(base_url=LITELLM_URL, api_key=LITELLM_KEY) if LITELLM_URL else None
|
||||||
self.room_models: dict[str, str] = {} # room_id -> model name
|
self.room_models: dict[str, str] = {} # room_id -> model name
|
||||||
|
self.auto_rename_rooms: set[str] = set() # rooms with auto-rename enabled
|
||||||
|
self.renamed_rooms: set[str] = set() # rooms already renamed this session
|
||||||
|
self._loaded_rooms: set[str] = set() # rooms where we've loaded state
|
||||||
self._sync_token_received = False
|
self._sync_token_received = False
|
||||||
self._verifications: dict[str, dict] = {} # txn_id -> verification state
|
self._verifications: dict[str, dict] = {} # txn_id -> verification state
|
||||||
|
|
||||||
@@ -269,6 +296,26 @@ class Bot:
|
|||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to leave call in %s", room_id)
|
logger.exception("Failed to leave call in %s", room_id)
|
||||||
|
|
||||||
|
async def _load_room_settings(self, room_id: str):
|
||||||
|
"""Load persisted model and auto-rename settings from room state."""
|
||||||
|
if room_id in self._loaded_rooms:
|
||||||
|
return
|
||||||
|
self._loaded_rooms.add(room_id)
|
||||||
|
for state_type, target in [
|
||||||
|
(MODEL_STATE_TYPE, "model"),
|
||||||
|
(RENAME_STATE_TYPE, "rename"),
|
||||||
|
]:
|
||||||
|
try:
|
||||||
|
resp = await self.client.room_get_state_event(room_id, state_type, "")
|
||||||
|
if hasattr(resp, "content"):
|
||||||
|
content = resp.content
|
||||||
|
if target == "model" and "model" in content:
|
||||||
|
self.room_models[room_id] = content["model"]
|
||||||
|
elif target == "rename" and content.get("enabled"):
|
||||||
|
self.auto_rename_rooms.add(room_id)
|
||||||
|
except Exception:
|
||||||
|
pass # State event doesn't exist yet
|
||||||
|
|
||||||
async def on_text_message(self, room, event: RoomMessageText):
|
async def on_text_message(self, room, event: RoomMessageText):
|
||||||
"""Handle text messages: commands and AI responses."""
|
"""Handle text messages: commands and AI responses."""
|
||||||
if event.sender == BOT_USER:
|
if event.sender == BOT_USER:
|
||||||
@@ -280,6 +327,7 @@ class Bot:
|
|||||||
if time.time() - server_ts > 30:
|
if time.time() - server_ts > 30:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
await self._load_room_settings(room.room_id)
|
||||||
body = event.body.strip()
|
body = event.body.strip()
|
||||||
|
|
||||||
# Command handling
|
# Command handling
|
||||||
@@ -347,6 +395,26 @@ class Bot:
|
|||||||
logger.debug("Could not persist model to room state", exc_info=True)
|
logger.debug("Could not persist model to room state", exc_info=True)
|
||||||
await self._send_text(room.room_id, f"Model set to `{model}` for this room.")
|
await self._send_text(room.room_id, f"Model set to `{model}` for this room.")
|
||||||
|
|
||||||
|
elif cmd.startswith("auto-rename "):
|
||||||
|
setting = cmd[12:].strip().lower()
|
||||||
|
if setting not in ("on", "off"):
|
||||||
|
await self._send_text(room.room_id, "Usage: `!ai auto-rename on|off`")
|
||||||
|
return
|
||||||
|
enabled = setting == "on"
|
||||||
|
if enabled:
|
||||||
|
self.auto_rename_rooms.add(room.room_id)
|
||||||
|
else:
|
||||||
|
self.auto_rename_rooms.discard(room.room_id)
|
||||||
|
try:
|
||||||
|
await self.client.room_put_state(
|
||||||
|
room.room_id, RENAME_STATE_TYPE,
|
||||||
|
{"enabled": enabled}, state_key="",
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Could not persist auto-rename to room state", exc_info=True)
|
||||||
|
status = "enabled" if enabled else "disabled"
|
||||||
|
await self._send_text(room.room_id, f"Auto-rename **{status}** for this room.")
|
||||||
|
|
||||||
elif cmd.startswith("search "):
|
elif cmd.startswith("search "):
|
||||||
query = cmd[7:].strip()
|
query = cmd[7:].strip()
|
||||||
if not query:
|
if not query:
|
||||||
@@ -379,7 +447,10 @@ class Bot:
|
|||||||
doc_results = await self.rag.search(user_message)
|
doc_results = await self.rag.search(user_message)
|
||||||
doc_context = self.rag.format_context(doc_results)
|
doc_context = self.rag.format_context(doc_results)
|
||||||
if doc_context:
|
if doc_context:
|
||||||
|
logger.info("RAG found %d docs for: %s", len(doc_results), user_message[:50])
|
||||||
messages.append({"role": "system", "content": doc_context})
|
messages.append({"role": "system", "content": doc_context})
|
||||||
|
else:
|
||||||
|
logger.info("RAG found 0 docs for: %s", user_message[:50])
|
||||||
|
|
||||||
# Fetch last N messages from room via API
|
# Fetch last N messages from room via API
|
||||||
try:
|
try:
|
||||||
@@ -406,10 +477,43 @@ class Bot:
|
|||||||
)
|
)
|
||||||
reply = resp.choices[0].message.content
|
reply = resp.choices[0].message.content
|
||||||
await self._send_text(room.room_id, reply)
|
await self._send_text(room.room_id, reply)
|
||||||
|
# Auto-rename room after first AI response
|
||||||
|
if (room.room_id in self.auto_rename_rooms
|
||||||
|
and room.room_id not in self.renamed_rooms):
|
||||||
|
await self._auto_rename_room(room, user_message, reply)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("LLM call failed")
|
logger.exception("LLM call failed")
|
||||||
await self._send_text(room.room_id, "Sorry, I couldn't generate a response.")
|
await self._send_text(room.room_id, "Sorry, I couldn't generate a response.")
|
||||||
|
|
||||||
|
async def _auto_rename_room(self, room, user_message: str, ai_reply: str):
|
||||||
|
"""Generate a short topic title and set it as the room name."""
|
||||||
|
try:
|
||||||
|
resp = await self.llm.chat.completions.create(
|
||||||
|
model=self.room_models.get(room.room_id, DEFAULT_MODEL),
|
||||||
|
messages=[
|
||||||
|
{"role": "system", "content": (
|
||||||
|
"Generate a very short room title (3-6 words, no quotes) "
|
||||||
|
"that captures the topic of this conversation. "
|
||||||
|
"Reply with ONLY the title, nothing else."
|
||||||
|
)},
|
||||||
|
{"role": "user", "content": user_message},
|
||||||
|
{"role": "assistant", "content": ai_reply[:200]},
|
||||||
|
{"role": "user", "content": "What is a good short title for this conversation?"},
|
||||||
|
],
|
||||||
|
max_tokens=30,
|
||||||
|
)
|
||||||
|
title = resp.choices[0].message.content.strip().strip('"\'')
|
||||||
|
if not title or len(title) > 80:
|
||||||
|
return
|
||||||
|
await self.client.room_put_state(
|
||||||
|
room.room_id, "m.room.name",
|
||||||
|
{"name": title}, state_key="",
|
||||||
|
)
|
||||||
|
self.renamed_rooms.add(room.room_id)
|
||||||
|
logger.info("Auto-renamed room %s to: %s", room.room_id, title)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Auto-rename failed", exc_info=True)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _md_to_html(text: str) -> str:
|
def _md_to_html(text: str) -> str:
|
||||||
"""Minimal markdown to HTML for Matrix formatted_body."""
|
"""Minimal markdown to HTML for Matrix formatted_body."""
|
||||||
|
|||||||
Reference in New Issue
Block a user