feat: initial learning-mcp HTTP server (CF-3094)
This commit is contained in:
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
node_modules/
|
||||
*.log
|
||||
.env
|
||||
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM node:20-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package.json package-lock.json* ./
|
||||
RUN npm install --omit=dev --no-audit --no-fund
|
||||
|
||||
COPY src ./src
|
||||
|
||||
ENV NODE_ENV=production \
|
||||
MCP_HTTP_HOST=0.0.0.0 \
|
||||
MCP_HTTP_PORT=9221
|
||||
|
||||
EXPOSE 9221
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --retries=3 \
|
||||
CMD wget -qO- http://127.0.0.1:9221/health >/dev/null || exit 1
|
||||
|
||||
CMD ["node", "src/http-server.js"]
|
||||
48
README.md
Normal file
48
README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# learning-mcp
|
||||
|
||||
HTTP MCP server exposing the pgvector learnings DB. Replaces direct `psql`/`ssh` access used by `rag-query`, `learn-seed`, `learn-inject`, `learn-context`.
|
||||
|
||||
- Transport: streamable HTTP (`/mcp`)
|
||||
- Port: `9221`
|
||||
- Backend: `learnings_embeddings` on the infra postgres container
|
||||
- Embeddings: LiteLLM `/v1/embeddings` (`text-embedding-3-small`, 1536 dims)
|
||||
|
||||
## Tools
|
||||
|
||||
| Tool | Replaces |
|
||||
|---|---|
|
||||
| `learning_query` | `rag-query` |
|
||||
| `learning_search_fts` | `pg_search_fts` in `pglearn.sh` |
|
||||
| `learning_inject` | `learn-inject` |
|
||||
| `learning_context` | `learn-context` |
|
||||
| `learning_add` | `pg_insert_learning` |
|
||||
| `learning_mark_applied` | `pg_mark_applied` |
|
||||
|
||||
## Env
|
||||
|
||||
```
|
||||
PGHOST=postgres # docker service name on infra
|
||||
PGPORT=5432
|
||||
PGUSER=agiliton
|
||||
PGPASSWORD=... # from vault ag.postgres.agiliton_password
|
||||
PGDATABASE=agiliton
|
||||
LITELLM_URL=https://llm.agiliton.cloud
|
||||
LITELLM_API_KEY=... # vault litellm.ai_coder_key or similar
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
SENTRY_DSN= # optional
|
||||
```
|
||||
|
||||
## Local dev
|
||||
|
||||
```
|
||||
npm install
|
||||
LITELLM_API_KEY=$(vault get litellm.ai_coder_key) \
|
||||
PGHOST=localhost PGPORT=5433 \
|
||||
node src/http-server.js
|
||||
|
||||
curl -s http://127.0.0.1:9221/health
|
||||
```
|
||||
|
||||
## Deploy
|
||||
|
||||
Built as `gitea.agiliton.internal:3000/christian/learning-mcp:latest` and attached to the infra docker-compose stack with the postgres network. Registered in `/opt/apps/litellm/config.yaml` as a streamable-HTTP MCP server (CF-3081 pattern).
|
||||
2595
package-lock.json
generated
Normal file
2595
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
17
package.json
Normal file
17
package.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"name": "learning-mcp",
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"main": "src/server.js",
|
||||
"scripts": {
|
||||
"start": "node src/http-server.js",
|
||||
"start:http": "node src/http-server.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.0.4",
|
||||
"@sentry/node": "^10.39.0",
|
||||
"express": "^4.19.2",
|
||||
"pg": "^8.13.0",
|
||||
"pgvector": "^0.2.0"
|
||||
}
|
||||
}
|
||||
42
src/db.js
Normal file
42
src/db.js
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Postgres connection + pgvector helpers.
|
||||
* Expects to run on the infra VM where postgres is reachable.
|
||||
*/
|
||||
import pg from 'pg';
|
||||
import pgvector from 'pgvector/pg';
|
||||
|
||||
const { Pool } = pg;
|
||||
|
||||
const pool = new Pool({
|
||||
host: process.env.PGHOST ?? 'postgres',
|
||||
port: parseInt(process.env.PGPORT ?? '5432'),
|
||||
user: process.env.PGUSER ?? 'agiliton',
|
||||
password: process.env.PGPASSWORD ?? '',
|
||||
database: process.env.PGDATABASE ?? 'agiliton',
|
||||
max: 5,
|
||||
idleTimeoutMillis: 30_000,
|
||||
connectionTimeoutMillis: 5_000,
|
||||
});
|
||||
|
||||
pool.on('connect', async (client) => {
|
||||
await pgvector.registerTypes(client);
|
||||
});
|
||||
|
||||
export async function query(sql, params = []) {
|
||||
const client = await pool.connect();
|
||||
try {
|
||||
const res = await client.query(sql, params);
|
||||
return res.rows;
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
|
||||
export function toVector(arr) {
|
||||
return pgvector.toSql(arr);
|
||||
}
|
||||
|
||||
export async function close() {
|
||||
await pool.end();
|
||||
}
|
||||
26
src/embeddings.js
Normal file
26
src/embeddings.js
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Embeddings via LiteLLM (agiliton gateway).
|
||||
* Model: text-embedding-3-small (1536 dims).
|
||||
*/
|
||||
const LITELLM_URL = process.env.LITELLM_URL ?? 'https://llm.agiliton.cloud';
|
||||
const EMBEDDING_MODEL = process.env.EMBEDDING_MODEL ?? 'text-embedding-3-small';
|
||||
|
||||
export async function embed(text) {
|
||||
const key = process.env.LITELLM_API_KEY;
|
||||
if (!key) throw new Error('LITELLM_API_KEY not set');
|
||||
const res = await fetch(`${LITELLM_URL}/v1/embeddings`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
authorization: `Bearer ${key}`,
|
||||
},
|
||||
body: JSON.stringify({ model: EMBEDDING_MODEL, input: text }),
|
||||
});
|
||||
if (!res.ok) {
|
||||
const body = await res.text();
|
||||
throw new Error(`Embeddings failed ${res.status}: ${body}`);
|
||||
}
|
||||
const json = await res.json();
|
||||
return json.data?.[0]?.embedding;
|
||||
}
|
||||
61
src/http-server.js
Normal file
61
src/http-server.js
Normal file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env node
|
||||
import express from 'express';
|
||||
import { randomUUID } from 'crypto';
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
|
||||
import { createServer } from './server.js';
|
||||
|
||||
const PORT = parseInt(process.env.MCP_HTTP_PORT ?? '9221');
|
||||
const HOST = process.env.MCP_HTTP_HOST ?? '0.0.0.0';
|
||||
|
||||
const transports = new Map();
|
||||
const sessionServers = new Map();
|
||||
|
||||
const app = express();
|
||||
app.use(express.json({ limit: '2mb' }));
|
||||
|
||||
app.post('/mcp', async (req, res) => {
|
||||
try {
|
||||
const sid = req.headers['mcp-session-id'];
|
||||
if (sid && transports.has(sid)) {
|
||||
await transports.get(sid).handleRequest(req, res, req.body);
|
||||
return;
|
||||
}
|
||||
const transport = new StreamableHTTPServerTransport({
|
||||
sessionIdGenerator: () => randomUUID(),
|
||||
onsessioninitialized: (newSid) => { transports.set(newSid, transport); },
|
||||
});
|
||||
transport.onclose = () => {
|
||||
const newSid = transport.sessionId;
|
||||
if (newSid) { transports.delete(newSid); sessionServers.delete(newSid); }
|
||||
};
|
||||
const srv = createServer();
|
||||
await srv.connect(transport);
|
||||
const newSid = transport.sessionId;
|
||||
if (newSid) sessionServers.set(newSid, srv);
|
||||
await transport.handleRequest(req, res, req.body);
|
||||
} catch (err) {
|
||||
console.error('[learning-mcp] POST /mcp error:', err);
|
||||
if (!res.headersSent) res.status(500).json({ error: 'Internal' });
|
||||
}
|
||||
});
|
||||
|
||||
app.get('/mcp', async (req, res) => {
|
||||
const sid = req.headers['mcp-session-id'];
|
||||
if (!sid || !transports.has(sid)) { res.status(400).json({ error: 'bad session' }); return; }
|
||||
await transports.get(sid).handleRequest(req, res);
|
||||
});
|
||||
|
||||
app.delete('/mcp', async (req, res) => {
|
||||
const sid = req.headers['mcp-session-id'];
|
||||
if (!sid || !transports.has(sid)) { res.status(400).json({ error: 'bad session' }); return; }
|
||||
await transports.get(sid).handleRequest(req, res);
|
||||
});
|
||||
|
||||
app.get('/health', (_req, res) =>
|
||||
res.json({ status: 'ok', server: 'learning-mcp', activeSessions: transports.size }));
|
||||
|
||||
app.listen(PORT, HOST, () =>
|
||||
console.error(`learning-mcp: HTTP on http://${HOST}:${PORT}/mcp`));
|
||||
|
||||
process.on('SIGINT', () => process.exit(0));
|
||||
process.on('SIGTERM', () => process.exit(0));
|
||||
358
src/server.js
Normal file
358
src/server.js
Normal file
@@ -0,0 +1,358 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Learning MCP Server — pgvector-backed learnings + anti-patterns
|
||||
*
|
||||
* Tools:
|
||||
* learning_query — semantic (vector) search over learnings_embeddings
|
||||
* learning_search_fts — PostgreSQL full-text search fallback
|
||||
* learning_inject — get top learnings for task injection (by category/task)
|
||||
* learning_context — project-scoped learnings (Project + Related + Anti-patterns)
|
||||
* learning_add — insert a new learning (auto-embeds)
|
||||
* learning_mark_applied — increment applied_count by id
|
||||
*
|
||||
* Replaces direct psql access used by rag-query, learn-seed, learn-inject, learn-context.
|
||||
*/
|
||||
import * as Sentry from '@sentry/node';
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js';
|
||||
|
||||
import { query, toVector } from './db.js';
|
||||
import { embed } from './embeddings.js';
|
||||
|
||||
if (process.env.SENTRY_DSN) {
|
||||
Sentry.init({
|
||||
dsn: process.env.SENTRY_DSN,
|
||||
environment: process.env.SENTRY_ENVIRONMENT ?? 'production',
|
||||
tracesSampleRate: 0.1,
|
||||
});
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data access
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function semanticSearch({ text, limit = 5, category, minSimilarity = 0.3, project }) {
|
||||
const vec = await embed(text);
|
||||
const params = [toVector(vec), limit];
|
||||
let where = `(review_status = 'approved' OR review_status IS NULL)
|
||||
AND embedding IS NOT NULL
|
||||
AND deprecated_at IS NULL`;
|
||||
if (category) { params.push(category); where += ` AND category = $${params.length}`; }
|
||||
if (project) { params.push(project); where += ` AND (project = $${params.length} OR project IS NULL)`; }
|
||||
const sql = `
|
||||
SELECT id, learning_id, learning, context, category, project,
|
||||
is_anti_pattern, effectiveness_score, applied_count, source_file,
|
||||
1 - (embedding <=> $1::vector) AS similarity
|
||||
FROM learnings_embeddings
|
||||
WHERE ${where}
|
||||
ORDER BY embedding <=> $1::vector
|
||||
LIMIT $2`;
|
||||
const rows = await query(sql, params);
|
||||
return rows.filter((r) => r.similarity >= minSimilarity);
|
||||
}
|
||||
|
||||
async function ftsSearch({ text, limit = 10, category }) {
|
||||
const terms = text
|
||||
.split(/[^A-Za-z0-9]+/)
|
||||
.filter((w) => w.length >= 4)
|
||||
.slice(0, 10);
|
||||
if (terms.length === 0) return [];
|
||||
const tsquery = terms.join(' | ');
|
||||
const params = [tsquery, limit];
|
||||
let where = '';
|
||||
if (category) { params.push(category); where = `AND category = $${params.length}`; }
|
||||
const sql = `
|
||||
SELECT id, learning_id, learning, category, source_file, effectiveness_score,
|
||||
is_anti_pattern,
|
||||
ts_rank(to_tsvector('english', learning || ' ' || COALESCE(context,'')),
|
||||
to_tsquery('english', $1)) AS rank
|
||||
FROM learnings_embeddings
|
||||
WHERE to_tsvector('english', learning || ' ' || COALESCE(context,''))
|
||||
@@ to_tsquery('english', $1)
|
||||
AND (review_status = 'approved' OR review_status IS NULL)
|
||||
AND deprecated_at IS NULL
|
||||
${where}
|
||||
ORDER BY rank DESC, effectiveness_score DESC
|
||||
LIMIT $2`;
|
||||
return await query(sql, params);
|
||||
}
|
||||
|
||||
async function getProjectContext({ project, limit = 5 }) {
|
||||
const params = [project, limit];
|
||||
const primary = await query(
|
||||
`SELECT id, learning, category, is_anti_pattern, effectiveness_score, applied_count
|
||||
FROM learnings_embeddings
|
||||
WHERE (project = $1 OR project_tags ILIKE '%' || $1 || '%')
|
||||
AND is_anti_pattern = false
|
||||
AND (review_status = 'approved' OR review_status IS NULL)
|
||||
AND deprecated_at IS NULL
|
||||
ORDER BY effectiveness_score DESC, applied_count DESC
|
||||
LIMIT $2`, params);
|
||||
const antiPatterns = await query(
|
||||
`SELECT id, learning, category, effectiveness_score
|
||||
FROM learnings_embeddings
|
||||
WHERE (project = $1 OR project_tags ILIKE '%' || $1 || '%')
|
||||
AND is_anti_pattern = true
|
||||
AND (review_status = 'approved' OR review_status IS NULL)
|
||||
AND deprecated_at IS NULL
|
||||
ORDER BY effectiveness_score DESC
|
||||
LIMIT 5`, [project]);
|
||||
return { primary, antiPatterns };
|
||||
}
|
||||
|
||||
async function injectForTask({ category, task, compact = false, project }) {
|
||||
const nPrimary = compact ? 3 : 5;
|
||||
const nAnti = compact ? 2 : 3;
|
||||
let primary = [];
|
||||
let anti = [];
|
||||
if (task) {
|
||||
primary = await semanticSearch({
|
||||
text: task, limit: nPrimary, category, project, minSimilarity: 0.3,
|
||||
});
|
||||
anti = await semanticSearch({
|
||||
text: task, limit: nAnti, category, project, minSimilarity: 0.3,
|
||||
}).then((rows) => rows.filter((r) => r.is_anti_pattern));
|
||||
} else if (category) {
|
||||
primary = await query(
|
||||
`SELECT id, learning_id, learning, category, effectiveness_score, applied_count, is_anti_pattern
|
||||
FROM learnings_embeddings
|
||||
WHERE category = $1 AND is_anti_pattern = false
|
||||
AND (review_status = 'approved' OR review_status IS NULL)
|
||||
AND deprecated_at IS NULL
|
||||
ORDER BY effectiveness_score DESC, applied_count DESC LIMIT $2`, [category, nPrimary]);
|
||||
anti = await query(
|
||||
`SELECT id, learning_id, learning, category, effectiveness_score
|
||||
FROM learnings_embeddings
|
||||
WHERE category = $1 AND is_anti_pattern = true
|
||||
AND (review_status = 'approved' OR review_status IS NULL)
|
||||
AND deprecated_at IS NULL
|
||||
ORDER BY effectiveness_score DESC LIMIT $2`, [category, nAnti]);
|
||||
}
|
||||
return { primary, antiPatterns: anti };
|
||||
}
|
||||
|
||||
async function addLearning(args) {
|
||||
const {
|
||||
learning, context = '', project = '', category = 'GENERAL',
|
||||
is_anti_pattern = false, effectiveness_score = 0.7,
|
||||
source_file = '', session_id = '', review_status = 'approved',
|
||||
} = args;
|
||||
if (!learning) throw new Error('learning is required');
|
||||
|
||||
const nextIdRow = await query('SELECT COALESCE(MAX(learning_id), 0) + 1 AS n FROM learnings_embeddings');
|
||||
const nextId = nextIdRow[0]?.n ?? 1;
|
||||
|
||||
let vec = null;
|
||||
try { vec = await embed(learning); } catch (e) { console.error('embed failed:', e.message); }
|
||||
|
||||
const sql = `
|
||||
INSERT INTO learnings_embeddings
|
||||
(learning_id, learning, context, category, project, is_anti_pattern,
|
||||
effectiveness_score, source_file, session_id, review_status, embedding)
|
||||
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11::vector)
|
||||
RETURNING id, learning_id`;
|
||||
const params = [
|
||||
nextId, learning, context, category, project, is_anti_pattern,
|
||||
effectiveness_score, source_file, session_id, review_status,
|
||||
vec ? toVector(vec) : null,
|
||||
];
|
||||
const rows = await query(sql, params);
|
||||
return rows[0];
|
||||
}
|
||||
|
||||
async function markApplied(id) {
|
||||
await query(
|
||||
`UPDATE learnings_embeddings
|
||||
SET applied_count = applied_count + 1,
|
||||
last_applied = CURRENT_TIMESTAMP,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = $1`, [id]);
|
||||
return { ok: true, id };
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Formatting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function formatRows(rows, { showScore = true } = {}) {
|
||||
if (!rows?.length) return 'No learnings found';
|
||||
return rows.map((r) => {
|
||||
const marker = r.is_anti_pattern ? '⚠️ ANTI' : '✓';
|
||||
const score = showScore && r.similarity != null
|
||||
? ` (${(r.similarity * 100).toFixed(0)}%)`
|
||||
: r.effectiveness_score != null ? ` [eff=${Number(r.effectiveness_score).toFixed(2)}]` : '';
|
||||
return ` ${marker} [${r.category}] #${r.id ?? r.learning_id}${score}: ${r.learning}`;
|
||||
}).join('\n');
|
||||
}
|
||||
|
||||
function formatContext(project, { primary, antiPatterns }) {
|
||||
const lines = [`## Project Context: ${project}`, ''];
|
||||
if (primary.length) {
|
||||
lines.push('### Project Learnings');
|
||||
lines.push(formatRows(primary, { showScore: false }));
|
||||
lines.push('');
|
||||
}
|
||||
if (antiPatterns.length) {
|
||||
lines.push('### Anti-Patterns');
|
||||
lines.push(formatRows(antiPatterns, { showScore: false }));
|
||||
lines.push('');
|
||||
}
|
||||
const ids = [...primary, ...antiPatterns].map((r) => r.id).filter(Boolean);
|
||||
if (ids.length) lines.push(`<!-- LEARNING_IDS: ${ids.join(',')} -->`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MCP server
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TOOLS = [
|
||||
{
|
||||
name: 'learning_query',
|
||||
description: 'Semantic search over the pgvector learnings DB. Returns learnings most relevant to a natural-language task description.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: { type: 'string', description: 'Free-text query / task description' },
|
||||
limit: { type: 'integer', default: 5 },
|
||||
category: { type: 'string', description: 'Optional category filter (SWIFT, PYTHON, INFRASTRUCTURE, AI, ...)' },
|
||||
project: { type: 'string', description: 'Optional project scope (e.g., CF, LLB, WHMCS)' },
|
||||
min_similarity: { type: 'number', default: 0.3 },
|
||||
},
|
||||
required: ['query'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'learning_search_fts',
|
||||
description: 'PostgreSQL full-text search over learnings (no embeddings needed). Useful when semantic search returns poor matches.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: { type: 'string' },
|
||||
limit: { type: 'integer', default: 10 },
|
||||
category: { type: 'string' },
|
||||
},
|
||||
required: ['query'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'learning_inject',
|
||||
description: 'Return top learnings for injection into a task context, scored for relevance. Provide either a task description or a category.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
task: { type: 'string', description: 'Task description (semantic match)' },
|
||||
category: { type: 'string', description: 'Category filter' },
|
||||
project: { type: 'string' },
|
||||
compact: { type: 'boolean', default: false },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'learning_context',
|
||||
description: 'Get all learnings + anti-patterns for a project. Used at session start to surface project-scoped knowledge.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
project: { type: 'string', description: 'Project key (e.g., CF, LLB)' },
|
||||
limit: { type: 'integer', default: 5 },
|
||||
},
|
||||
required: ['project'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'learning_add',
|
||||
description: 'Insert a new learning into pgvector (auto-embeds via LiteLLM). Use sparingly — prefer learn-from-session for bulk.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
learning: { type: 'string' },
|
||||
context: { type: 'string' },
|
||||
project: { type: 'string' },
|
||||
category: { type: 'string', default: 'GENERAL' },
|
||||
is_anti_pattern: { type: 'boolean', default: false },
|
||||
effectiveness_score: { type: 'number', default: 0.7 },
|
||||
source_file: { type: 'string' },
|
||||
session_id: { type: 'string' },
|
||||
},
|
||||
required: ['learning'],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'learning_mark_applied',
|
||||
description: 'Increment applied_count for a learning id (call when a learning was successfully used).',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: { id: { type: 'integer' } },
|
||||
required: ['id'],
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
export function createServer() {
|
||||
const server = new Server(
|
||||
{ name: 'learning-mcp', version: '1.0.0' },
|
||||
{ capabilities: { tools: {} } }
|
||||
);
|
||||
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => ({ tools: TOOLS }));
|
||||
|
||||
server.setRequestHandler(CallToolRequestSchema, async (req) => {
|
||||
const name = req.params.name;
|
||||
const args = req.params.arguments ?? {};
|
||||
try {
|
||||
switch (name) {
|
||||
case 'learning_query': {
|
||||
const rows = await semanticSearch({
|
||||
text: args.query,
|
||||
limit: args.limit ?? 5,
|
||||
category: args.category,
|
||||
project: args.project,
|
||||
minSimilarity: args.min_similarity ?? 0.3,
|
||||
});
|
||||
return { content: [{ type: 'text', text: rows.length
|
||||
? formatRows(rows)
|
||||
: 'No learnings match (try lower min_similarity or different phrasing)' }] };
|
||||
}
|
||||
case 'learning_search_fts': {
|
||||
const rows = await ftsSearch({
|
||||
text: args.query, limit: args.limit ?? 10, category: args.category,
|
||||
});
|
||||
return { content: [{ type: 'text', text: formatRows(rows) }] };
|
||||
}
|
||||
case 'learning_inject': {
|
||||
const { primary, antiPatterns } = await injectForTask(args);
|
||||
const parts = [];
|
||||
if (primary.length) {
|
||||
parts.push('### Relevant Learnings', formatRows(primary, { showScore: false }));
|
||||
}
|
||||
if (antiPatterns.length) {
|
||||
parts.push('', '### Anti-Patterns', formatRows(antiPatterns, { showScore: false }));
|
||||
}
|
||||
if (!parts.length) parts.push('No learnings found for this task');
|
||||
return { content: [{ type: 'text', text: parts.join('\n') }] };
|
||||
}
|
||||
case 'learning_context': {
|
||||
const ctx = await getProjectContext({ project: args.project, limit: args.limit ?? 5 });
|
||||
return { content: [{ type: 'text', text: formatContext(args.project, ctx) }] };
|
||||
}
|
||||
case 'learning_add': {
|
||||
const row = await addLearning(args);
|
||||
return { content: [{ type: 'text', text: `Inserted learning id=${row.id} learning_id=${row.learning_id}` }] };
|
||||
}
|
||||
case 'learning_mark_applied': {
|
||||
await markApplied(args.id);
|
||||
return { content: [{ type: 'text', text: `Marked applied: id=${args.id}` }] };
|
||||
}
|
||||
default:
|
||||
return { content: [{ type: 'text', text: `Unknown tool: ${name}` }], isError: true };
|
||||
}
|
||||
} catch (err) {
|
||||
Sentry.captureException?.(err);
|
||||
console.error(`[learning-mcp] ${name}:`, err);
|
||||
return { content: [{ type: 'text', text: `Error: ${err.message}` }], isError: true };
|
||||
}
|
||||
});
|
||||
|
||||
return server;
|
||||
}
|
||||
Reference in New Issue
Block a user