chore(MAT-13): Switch chunk summarization from claude-haiku to gemini-flash

Reduces cost for conversation chunk summarization in live indexing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Christian Gick
2026-03-01 18:27:43 +02:00
parent 9d2e2ddcf7
commit fecf99ef60

2
bot.py
View File

@@ -1231,7 +1231,7 @@ class Bot:
chunk_text = f"User: {user_message}\nAssistant: {ai_reply}" chunk_text = f"User: {user_message}\nAssistant: {ai_reply}"
try: try:
resp = await self.llm.chat.completions.create( resp = await self.llm.chat.completions.create(
model="claude-haiku", model="gemini-flash",
messages=[ messages=[
{"role": "system", "content": ( {"role": "system", "content": (
"Summarize this conversation exchange in 1-2 sentences for search indexing. " "Summarize this conversation exchange in 1-2 sentences for search indexing. "