fix(llm): use text-embedding-3-small with dimensions=1024 to match TM column

LiteLLM doesn't expose mxbai-embed-large; text-embedding-3-small with the
`dimensions` param produces 1024-dim vectors that fit translation_memory.embedding.

Refs: CF-3125
This commit is contained in:
Christian Gick
2026-04-14 16:33:57 +03:00
parent 220d8cfae8
commit e356cd93b5

View File

@@ -5,7 +5,10 @@
const LITELLM_URL = process.env.LITELLM_URL ?? 'http://llm:4000'; const LITELLM_URL = process.env.LITELLM_URL ?? 'http://llm:4000';
const LITELLM_KEY = process.env.LITELLM_API_KEY ?? ''; const LITELLM_KEY = process.env.LITELLM_API_KEY ?? '';
const TRANSLATE_MODEL = process.env.TRANSLATE_MODEL ?? 'gemini-2.5-flash'; const TRANSLATE_MODEL = process.env.TRANSLATE_MODEL ?? 'gemini-2.5-flash';
const EMBED_MODEL = process.env.TRANSLATE_EMBED_MODEL ?? 'mxbai-embed-large'; const EMBED_MODEL = process.env.TRANSLATE_EMBED_MODEL ?? 'text-embedding-3-small';
// SmartTranslate translation_memory.embedding is vector(1024); text-embedding-3-*
// supports runtime dimension reduction via the `dimensions` parameter.
const EMBED_DIMENSIONS = parseInt(process.env.TRANSLATE_EMBED_DIMENSIONS ?? '1024');
function headers() { function headers() {
const h = { 'Content-Type': 'application/json' }; const h = { 'Content-Type': 'application/json' };
@@ -17,7 +20,7 @@ export async function embed(text) {
const r = await fetch(`${LITELLM_URL}/v1/embeddings`, { const r = await fetch(`${LITELLM_URL}/v1/embeddings`, {
method: 'POST', method: 'POST',
headers: headers(), headers: headers(),
body: JSON.stringify({ model: EMBED_MODEL, input: text }), body: JSON.stringify({ model: EMBED_MODEL, input: text, dimensions: EMBED_DIMENSIONS }),
}); });
if (!r.ok) throw new Error(`embed ${r.status}: ${await r.text()}`); if (!r.ok) throw new Error(`embed ${r.status}: ${await r.text()}`);
const j = await r.json(); const j = await r.json();