Compare commits

...

26 Commits

Author SHA1 Message Date
Infra
18defcc9d5 feat: add HTTP transport (CF-3081) 2026-04-13 09:34:39 +00:00
1349f3b0ce chore: migrate vault keys to ag.* namespace (CF-2942)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-07 06:25:29 +00:00
Christian Gick
ad13a26168 feat(CF-2885): Add Jira as timeline source — pulls issue history via REST API
Extends timeline() with Jira integration since sessions moved to Jira-only in
CF-836 (2026-02-08) and the session-mcp tables are empty by design.

Adds to services/jira.ts:
- getIssueWithHistory(key): fetches issue with expand=changelog + comments
- searchIssueKeys(jql): JQL search returning minimal issue keys
- ADF → plain text extractor for comment bodies

Timeline now yields Jira events: issue_created, field_change:{status,assignee,
resolution,priority,labels}, comment. Events are time-filtered client-side
against the since/until window. For Jira-key subjects, also searches for
linked session-tracking issues and merges their events.

Tested against CF-2872 (audit task) and CF-2885 (this ticket) — shows full
lifecycle from creation through transitions to resolution.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-05 08:00:25 +03:00
Christian Gick
2ed6e68686 feat(CF-2885): Add timeline MCP tool for unified chronological event view
New `timeline(subject, since, until)` tool stitches events across session-mcp
sources (sessions, notes, commits, plans, task-commit links) into a single
time-ordered stream for LLM consumption.

Phase 1 of CF-2885: read-time stitching only, no business-system changes.
Per CF-2830 heise article thesis (Event Sourcing + MCP for LLMs).

Subject accepts Jira key, session id, or project key. Optional sources filter,
since/until window (supports relative -7d shorthand), limit.

Returns markdown-formatted timeline grouped by date with per-source icons.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-05 07:40:47 +03:00
Christian Gick
0fad29801e feat(CF-2394): Add session_transcript_search MCP tool
Hybrid (vector + keyword + rerank) search over indexed session transcripts.
Enables context recovery from past sessions without re-reading JSONL files.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 08:37:19 +02:00
Christian Gick
3613e2aa52 feat(CF-2136): Add Sentry structured logging support
Enable Sentry.logger API (enableLogs, beforeSendLog) and add
logInfo/logWarn/logError helpers. Bump @sentry/node ^9.47.1 → ^10.39.0,
@sentry/profiling-node → ^10.39.0.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 10:22:08 +02:00
Christian Gick
9dae176fc2 fix(CF-1316): Use LiteLLM model alias claude-haiku-4.5
The full Anthropic model ID is not registered in LiteLLM.
Use the LiteLLM alias instead.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:11:43 +02:00
Christian Gick
ece0e81ae9 feat(CF-1316): Add LLM metadata extraction at embedding time
Extract structured metadata (topics, decisions, blockers, tools_used,
projects, issue_keys) from session summaries using Haiku at session end.
Metadata stored in JSONB column with GIN index for filtered retrieval.
session_semantic_search now accepts optional metadata filters.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:09:00 +02:00
Christian Gick
ef74d7912e feat: Add cross-encoder re-ranking after hybrid search (CF-1317)
Add rerank() function calling LiteLLM /v1/rerank endpoint (Cohere-compatible).
Plugged into all 3 search functions (sessions, session-docs, archives) after
RRF merge. Disabled by default via RERANK_ENABLED env var. Graceful fallback
to RRF-only ranking on API failure.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 16:36:24 +02:00
Christian Gick
0150575713 feat(CF-1351): Remove unused memory_* tools from session-mcp
Auto memory (MEMORY.md + topic files) is the sole memory system.
The session-mcp memory_* tools (PostgreSQL + pgvector) had zero entries
after months of use — removing dead code to simplify the server.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 12:52:48 +02:00
Christian Gick
27548f5c51 chore: Add .claude-session/ to gitignore
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 12:39:46 +02:00
Christian Gick
02c009a551 feat(CF-1354): Add withSentryTransaction to tool handlers
Wrap CallToolRequest handler with withSentryTransaction for
per-tool tracing. Remove broken $(vault) DSN from .env.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 12:20:53 +02:00
Christian Gick
4f8996cd82 feat(CF-1315): Hybrid search with tsvector + RRF
Add PostgreSQL full-text search alongside pgvector for exact matches
on Jira keys, error messages, file paths. Merge results with
Reciprocal Rank Fusion. Default mode: hybrid, with graceful
degradation to keyword-only when embeddings unavailable.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 08:46:39 +02:00
Christian Gick
1f499bd926 feat(CF-1314): Content hashing to prevent duplicate embeddings
SHA-256 hash check before embedding API call eliminates ~60-80% of
redundant embedding requests. Consolidates dual INSERT paths to single
INSERT with nullable embedding column.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-18 08:28:11 +02:00
Christian Gick
77097ac65f feat(API-11): Route API calls through AgilitonAPI gateway
Add gateway-first pattern: when AGILITON_API_KEY is set, route all
external API calls through the gateway with X-API-Key auth. Falls back
to direct API access when gateway is unavailable.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-11 19:05:19 +02:00
Christian Gick
6b53fb9168 chore(CF-838): Remove deployment/build migration files
Tables migrated to Jira tracking. PostgreSQL tables will be dropped separately.
Archived in s3://macbookdev/db-archive/agiliton-db-2026-02-08.sql.gz

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-10 07:55:05 +02:00
Christian Gick
c0c6918e2c chore: Use per-product Sentry DSN (CF-835)
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 18:46:06 +02:00
Christian Gick
507e98ef8e fix: Use actual project key for session Jira issues instead of hardcoded CF
The project parameter was passed to createSessionIssue() but ignored -
all sessions were created in the CF Jira project regardless of the
actual session project (ST, LLB, GB, etc.).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 17:09:34 +02:00
Christian Gick
9042bf0878 fix(CF-816): Self-healing session number sequence to prevent drift
session_sequences table fell behind when sessions were inserted with
explicit session_number (e.g., retro imports), causing duplicate key
violations on next auto-assigned number. Function now syncs forward
by checking MAX(session_number) before assigning.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 07:48:54 +02:00
Christian Gick
3ca40d9100 revert: Keep CF project for session-tracking Jira issues
CU is a separate project. Session tracking stays in CF.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 07:32:21 +02:00
Christian Gick
c57f9c6a75 fix(CF-762): Use CU project for session-tracking Jira issues
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 07:31:59 +02:00
Christian Gick
63cba97b56 feat(CF-762): Add Jira integration for session tracking
Sessions now auto-create CF Jira issues on start and post full session
output as comments on end, transitioning the issue to Done.

- Add src/services/jira.ts with createSessionIssue, addComment, transitionToDone
- Update session_start to create CF Jira issue and store key in sessions table
- Update session_end to post session output and close Jira issue
- Add migration 031 to archive local task tables (moved to Jira Cloud)
- Update .env.example with Jira Cloud env vars

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 07:23:18 +02:00
Christian Gick
1227e5b339 feat(CF-762): Complete Jira migration - consolidate projects, cleanup
- Remove task CRUD/epic/search/relation/version tools (moved to Jira)
- Add migration scripts: migrate-tasks-to-jira, jira-admin, prepare-all-projects
- Add consolidate-projects.ts for merging duplicate Jira projects
- Add validate-migration.ts for post-migration integrity checks
- Add jira_issue_key columns migration (030)
- Consolidate 11 duplicate projects (LIT→LITE, CARD→CS, etc.)
- Delete 92 placeholder issues, 11 empty source projects
- Remove SG project completely
- 2,798 tasks migrated across 46 Jira projects

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 12:33:49 +02:00
Christian Gick
bd5d95beff fix: Fallback to cached session ID in session_note_add
When session_id is not provided, falls back to getSessionId() which
reads from CLAUDE_SESSION_ID env or ~/.cache/session-memory/current_session.
Fixes NOT NULL constraint violation on session_notes.session_id.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-04 21:02:12 +02:00
Christian Gick
6cbb5ce6cb feat(CF-314): Add planning_mode_required field and smart planning mode support
Adds DB column, TypeScript types, MCP tool schemas, and CRUD handlers
for planning_mode_required (NULL=auto-detect, true=always, false=never).

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-04 20:30:36 +02:00
Christian Gick
baec42810c fix(CF-635): Fix task_list returning empty for non-completed tasks
- Remove invalid 'pending' status from task_list and task_update enums
- Default to excluding completed tasks when no status filter provided
- Previously, task_list(status=open) missed in_progress/blocked/testing tasks

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-02 07:00:39 +02:00
48 changed files with 4915 additions and 7041 deletions

5
.dockerignore Normal file
View File

@@ -0,0 +1,5 @@
node_modules
dist
.env
.git
*.log

38
.env Normal file
View File

@@ -0,0 +1,38 @@
# ===========================================
# SENTRY ERROR TRACKING (CF-536)
# ===========================================
# Get DSN from vault: vault get sentry.dsn.mcp-servers
# Phase 2: Node.js MCP Servers Integration
#
# Features:
# - Automatic error capture with PII scrubbing
# - Performance tracing and profiling
# - MCP protocol-aware filtering
# - PostgreSQL integration for database error tracking
#
# Created: 2026-01-29
# SENTRY_DSN provided via ~/.claude.json env (dotenv can't expand shell commands)
SENTRY_ENVIRONMENT=production
SENTRY_TRACE_SAMPLE_RATE=0.1
SENTRY_PROFILE_SAMPLE_RATE=0.01
APP_VERSION=1.0.0
# ===========================================
# Task MCP Environment Variables
# ===========================================
# PostgreSQL connection via pgbouncer
POSTGRES_HOST=postgres.agiliton.internal
POSTGRES_PORT=6432
# Embedding service configuration
LLM_API_URL=https://api.agiliton.cloud/llm
LLM_API_KEY=sk-c02d41a118ce8330c428100afaa816c8
# AgilitonAPI Gateway (API-11: centralized API access)
AGILITON_API_KEY=gw_92399e154f02730ebadec65ddbde9426c9378ec77093d1c9
AGILITON_API_URL=https://api.agiliton.cloud
# Jira Cloud (fallback if gateway unavailable)
JIRA_URL=https://agiliton.atlassian.net
JIRA_USERNAME=christian.gick@agiliton.eu
JIRA_API_TOKEN=ATATT3xFfGF0tpaJTS4nJklW587McubEw-1SYbLWqfovkxI5320NdbFc-3fgHlw0HGTLOikgV082m9N-SIsYVZveGXa553_1LAyOevV6Qples93xF4hIExWGAvwvXPy_4pW2tH5FNusN5ieMca5_-YUP0i69SIN0RLIMQjfqDmQyhZXbkIvrm-I=A8A2A1FC

View File

@@ -1,9 +1,21 @@
# Task MCP Environment Variables
# Session MCP Environment Variables (forked from task-mcp, CF-762)
# PostgreSQL connection via pgbouncer
POSTGRES_HOST=infra.agiliton.internal
POSTGRES_HOST=postgres.agiliton.internal
POSTGRES_PORT=6432
# Embedding service configuration
LLM_API_URL=https://api.agiliton.cloud/llm
LLM_API_KEY=your_llm_api_key_here
# LLM metadata extraction at embedding time (CF-1316)
METADATA_EXTRACTION_MODEL=claude-haiku-4.5
# Cross-encoder re-ranking (CF-1317)
RERANK_ENABLED=false
RERANK_MODEL=rerank-v3.5
# Jira Cloud (session tracking)
JIRA_URL=https://agiliton.atlassian.net
JIRA_USERNAME=your_email@agiliton.eu
JIRA_API_TOKEN=your_jira_api_token

1
.gitignore vendored
View File

@@ -2,3 +2,4 @@ node_modules/
dist/
*.log
.env
.claude-session/

15
Dockerfile Normal file
View File

@@ -0,0 +1,15 @@
FROM node:20-alpine AS build
WORKDIR /app
COPY package*.json tsconfig.json ./
RUN npm install
COPY src ./src
RUN npm run build
FROM node:20-alpine
WORKDIR /app
ENV NODE_ENV=production
COPY package*.json ./
RUN npm install --omit=dev && npm cache clean --force
COPY --from=build /app/dist ./dist
USER node
EXPOSE 9216
CMD ["node", "dist/http-server.js"]

View File

@@ -1,46 +0,0 @@
-- Migration 012: Builds table for CI/CD tracking
-- Purpose: Track builds and link them to sessions and versions
-- Dependencies: 001_base_schema.sql (versions table), 010_sessions.sql (sessions table)
-- Builds table: Store build information linked to sessions and versions
CREATE TABLE IF NOT EXISTS builds (
id SERIAL PRIMARY KEY,
session_id TEXT REFERENCES sessions(id) ON DELETE SET NULL,
version_id TEXT REFERENCES versions(id) ON DELETE CASCADE,
build_number INTEGER NOT NULL,
status TEXT DEFAULT 'pending' CHECK (status IN ('pending', 'running', 'success', 'failed')),
-- Build metadata
git_commit_sha TEXT,
git_branch TEXT,
build_log_url TEXT,
artifacts_url TEXT,
-- Timing
started_at TIMESTAMP WITH TIME ZONE NOT NULL,
finished_at TIMESTAMP WITH TIME ZONE,
duration_seconds INTEGER GENERATED ALWAYS AS
(EXTRACT(EPOCH FROM (finished_at - started_at))) STORED,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Indexes for efficient querying
CREATE INDEX idx_builds_session ON builds(session_id);
CREATE INDEX idx_builds_version ON builds(version_id);
CREATE INDEX idx_builds_status ON builds(status);
CREATE INDEX idx_builds_started ON builds(started_at DESC);
CREATE INDEX idx_builds_commit ON builds(git_commit_sha);
-- Unique constraint: one build number per version
CREATE UNIQUE INDEX idx_builds_version_number ON builds(version_id, build_number)
WHERE version_id IS NOT NULL;
-- Comments for documentation
COMMENT ON TABLE builds IS 'CI/CD build tracking linked to sessions and versions';
COMMENT ON COLUMN builds.session_id IS 'Optional link to session that triggered the build';
COMMENT ON COLUMN builds.version_id IS 'Link to version being built';
COMMENT ON COLUMN builds.duration_seconds IS 'Auto-calculated build duration in seconds';
COMMENT ON COLUMN builds.build_log_url IS 'URL to build logs (e.g., GitHub Actions run)';
COMMENT ON COLUMN builds.artifacts_url IS 'URL to build artifacts (e.g., app binary, Docker image)';

View File

@@ -1,96 +0,0 @@
-- Migration 018: Deployments tracking for deployment centralization
-- Purpose: Track all deployments (Docker, MCP, iOS/macOS, services) with logs
-- Dependencies: 001_base_schema.sql (tasks table), 010_sessions.sql (sessions table)
-- Deployments table: Store deployment information linked to sessions and tasks
CREATE TABLE IF NOT EXISTS deployments (
id SERIAL PRIMARY KEY,
session_id TEXT REFERENCES sessions(id) ON DELETE SET NULL,
task_id TEXT REFERENCES tasks(id) ON DELETE SET NULL,
-- Project identification
project_name VARCHAR(255) NOT NULL,
project_path TEXT NOT NULL,
-- Deployment type and method
deployment_type VARCHAR(50) NOT NULL CHECK (deployment_type IN (
'docker-compose',
'mcp-server',
'ios-macos-app',
'python-service',
'node-service'
)),
deployment_method VARCHAR(50) NOT NULL CHECK (deployment_method IN (
'doco-cd',
'agiliton-build',
'direct',
'manual'
)),
-- Status tracking
status VARCHAR(50) NOT NULL DEFAULT 'pending' CHECK (status IN (
'pending',
'running',
'success',
'failed',
'cancelled'
)),
-- Git integration
commit_sha VARCHAR(40),
git_branch TEXT,
-- Timing
started_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
duration_seconds INTEGER GENERATED ALWAYS AS
(EXTRACT(EPOCH FROM (completed_at - started_at))) STORED,
-- Error tracking
error_message TEXT,
-- Extra deployment-specific data (JSON)
metadata JSONB DEFAULT '{}'::jsonb,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Deployment logs table: Store deployment log messages
CREATE TABLE IF NOT EXISTS deployment_logs (
id SERIAL PRIMARY KEY,
deployment_id INT NOT NULL REFERENCES deployments(id) ON DELETE CASCADE,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
level VARCHAR(20) NOT NULL CHECK (level IN ('debug', 'info', 'warn', 'error')),
message TEXT NOT NULL,
-- Optional structured metadata
metadata JSONB DEFAULT '{}'::jsonb
);
-- Indexes for efficient querying
CREATE INDEX idx_deployments_project ON deployments(project_name);
CREATE INDEX idx_deployments_session ON deployments(session_id);
CREATE INDEX idx_deployments_task ON deployments(task_id);
CREATE INDEX idx_deployments_status ON deployments(status);
CREATE INDEX idx_deployments_started ON deployments(started_at DESC);
CREATE INDEX idx_deployments_type ON deployments(deployment_type);
CREATE INDEX idx_deployments_commit ON deployments(commit_sha);
CREATE INDEX idx_deployment_logs_deployment ON deployment_logs(deployment_id);
CREATE INDEX idx_deployment_logs_timestamp ON deployment_logs(timestamp DESC);
CREATE INDEX idx_deployment_logs_level ON deployment_logs(level);
-- Comments for documentation
COMMENT ON TABLE deployments IS 'Deployment tracking for all project types (Docker, MCP, iOS/macOS, services)';
COMMENT ON COLUMN deployments.project_name IS 'Human-readable project name';
COMMENT ON COLUMN deployments.project_path IS 'Absolute filesystem path to project';
COMMENT ON COLUMN deployments.deployment_type IS 'Type of deployment (docker-compose, mcp-server, ios-macos-app, etc.)';
COMMENT ON COLUMN deployments.deployment_method IS 'Method used for deployment (doco-cd, agiliton-build, direct, manual)';
COMMENT ON COLUMN deployments.status IS 'Current deployment status';
COMMENT ON COLUMN deployments.duration_seconds IS 'Auto-calculated deployment duration in seconds';
COMMENT ON COLUMN deployments.metadata IS 'Extra deployment-specific data (runtime, host, build number, etc.)';
COMMENT ON TABLE deployment_logs IS 'Deployment log messages for debugging and audit trail';
COMMENT ON COLUMN deployment_logs.level IS 'Log level (debug, info, warn, error)';
COMMENT ON COLUMN deployment_logs.metadata IS 'Optional structured log metadata (source, context, etc.)';

View File

@@ -0,0 +1,7 @@
-- Migration 028: Add unique index for session checkpoint upserts (CF-572)
-- Ensures at most ONE checkpoint row per session+note_type.
-- Normal session_note_add calls (recovered_from IS NULL) are unaffected.
CREATE UNIQUE INDEX IF NOT EXISTS uq_session_checkpoint
ON session_notes (session_id, note_type)
WHERE recovered_from = 'checkpoint';

View File

@@ -0,0 +1,3 @@
-- CF-314: Add planning_mode_required flag for smart planning mode auto-detection
-- NULL = auto-detect (scoring algorithm), true = always plan, false = never plan
ALTER TABLE tasks ADD COLUMN IF NOT EXISTS planning_mode_required BOOLEAN DEFAULT NULL;

View File

@@ -0,0 +1,9 @@
-- Migration 030: Add jira_issue_key to sessions for Jira Cloud linking (CF-762)
-- Links sessions to Jira issues after task management moved to Jira Cloud
ALTER TABLE sessions ADD COLUMN IF NOT EXISTS jira_issue_key TEXT;
CREATE INDEX IF NOT EXISTS idx_sessions_jira_issue_key ON sessions (jira_issue_key) WHERE jira_issue_key IS NOT NULL;
-- Also add to task_activity for historical audit trail linking
ALTER TABLE task_activity ADD COLUMN IF NOT EXISTS jira_issue_key TEXT;

View File

@@ -0,0 +1,50 @@
-- Migration 031: Archive task tables after Jira Cloud migration (CF-762)
-- Task management moved to Jira Cloud. Archive local task tables for historical reference.
-- Session, memory, archive, and infrastructure tables remain active.
BEGIN;
-- 1. Archive task tables (rename with archived_ prefix)
ALTER TABLE IF EXISTS tasks RENAME TO archived_tasks;
ALTER TABLE IF EXISTS task_checklist RENAME TO archived_task_checklist;
ALTER TABLE IF EXISTS task_links RENAME TO archived_task_links;
ALTER TABLE IF EXISTS task_activity RENAME TO archived_task_activity;
ALTER TABLE IF EXISTS task_sequences RENAME TO archived_task_sequences;
-- 2. Add archived_at timestamp to archived tables
ALTER TABLE IF EXISTS archived_tasks ADD COLUMN IF NOT EXISTS archived_at TIMESTAMP WITH TIME ZONE DEFAULT NOW();
ALTER TABLE IF EXISTS archived_task_checklist ADD COLUMN IF NOT EXISTS archived_at TIMESTAMP WITH TIME ZONE DEFAULT NOW();
ALTER TABLE IF EXISTS archived_task_links ADD COLUMN IF NOT EXISTS archived_at TIMESTAMP WITH TIME ZONE DEFAULT NOW();
ALTER TABLE IF EXISTS archived_task_activity ADD COLUMN IF NOT EXISTS archived_at TIMESTAMP WITH TIME ZONE DEFAULT NOW();
ALTER TABLE IF EXISTS archived_task_sequences ADD COLUMN IF NOT EXISTS archived_at TIMESTAMP WITH TIME ZONE DEFAULT NOW();
-- 3. Drop tables that are fully replaced by Jira (data already migrated)
DROP TABLE IF EXISTS epics CASCADE;
DROP TABLE IF EXISTS epic_sequences CASCADE;
DROP TABLE IF EXISTS versions CASCADE;
-- 4. Keep these tables (still referenced by session tools):
-- - task_commits (git commit ↔ Jira issue linking)
-- - task_migration_map (maps old local IDs → Jira keys)
-- - task_delegations (code delegation tracking)
-- 5. Update task_commits to remove FK constraint on archived_tasks
-- (commits now reference Jira issue keys, not local task IDs)
ALTER TABLE IF EXISTS task_commits DROP CONSTRAINT IF EXISTS task_commits_task_id_fkey;
-- 6. Update task_delegations to remove FK constraint on archived_tasks
ALTER TABLE IF EXISTS task_delegations DROP CONSTRAINT IF EXISTS task_delegations_task_id_fkey;
-- 7. Drop unused indexes on archived tables (save space, they're read-only now)
DROP INDEX IF EXISTS idx_tasks_status;
DROP INDEX IF EXISTS idx_tasks_type;
DROP INDEX IF EXISTS idx_tasks_priority;
DROP INDEX IF EXISTS idx_tasks_epic;
DROP INDEX IF EXISTS idx_tasks_version;
DROP INDEX IF EXISTS idx_tasks_embedding;
-- 8. Record migration
INSERT INTO schema_migrations (version, applied_at) VALUES ('031_archive_task_tables', NOW())
ON CONFLICT DO NOTHING;
COMMIT;

View File

@@ -0,0 +1,57 @@
-- Migration 032: Fix session_sequences drift (CF-816)
-- Problem: Retro-imported sessions with explicit session_number bypass the trigger,
-- leaving session_sequences.next_number behind the actual MAX(session_number).
-- Next auto-assigned number then collides with the unique index.
-- Fix: Make get_next_session_number() self-healing by always checking actual max.
-- Step 1: Replace the function with a self-healing version
CREATE OR REPLACE FUNCTION get_next_session_number(p_project TEXT)
RETURNS INTEGER AS $$
DECLARE
v_seq_number INTEGER;
v_max_number INTEGER;
v_number INTEGER;
BEGIN
-- Insert project if doesn't exist
INSERT INTO projects (key, name) VALUES (p_project, p_project)
ON CONFLICT (key) DO NOTHING;
-- Insert sequence if doesn't exist
INSERT INTO session_sequences (project, next_number)
VALUES (p_project, 1)
ON CONFLICT (project) DO NOTHING;
-- Get the actual max session_number for this project (handles external inserts)
SELECT COALESCE(MAX(session_number), 0) INTO v_max_number
FROM sessions
WHERE project = p_project;
-- Sync sequence forward if it fell behind (self-healing)
UPDATE session_sequences
SET next_number = GREATEST(next_number, v_max_number + 1),
last_updated = NOW()
WHERE project = p_project;
-- Now atomically increment and return
UPDATE session_sequences
SET next_number = next_number + 1,
last_updated = NOW()
WHERE project = p_project
RETURNING next_number - 1 INTO v_number;
RETURN v_number;
END;
$$ LANGUAGE plpgsql;
-- Step 2: Sync all existing sequences to match actual data
UPDATE session_sequences sq
SET next_number = GREATEST(sq.next_number, sub.actual_max + 1),
last_updated = NOW()
FROM (
SELECT project, COALESCE(MAX(session_number), 0) AS actual_max
FROM sessions
WHERE project IS NOT NULL
GROUP BY project
) sub
WHERE sq.project = sub.project
AND sq.next_number <= sub.actual_max;

View File

@@ -0,0 +1,20 @@
-- CF-1314: Content hashing to prevent duplicate embeddings
-- Adds content_hash column to all embedding tables for dedup before API call
-- Adds source_id columns for future CF-1315 hybrid search
ALTER TABLE project_archives ADD COLUMN IF NOT EXISTS content_hash TEXT;
ALTER TABLE project_archives ADD COLUMN IF NOT EXISTS source_id TEXT;
ALTER TABLE memories ADD COLUMN IF NOT EXISTS content_hash TEXT;
ALTER TABLE memories ADD COLUMN IF NOT EXISTS source_id TEXT;
ALTER TABLE session_notes ADD COLUMN IF NOT EXISTS content_hash TEXT;
ALTER TABLE session_plans ADD COLUMN IF NOT EXISTS content_hash TEXT;
ALTER TABLE sessions ADD COLUMN IF NOT EXISTS content_hash TEXT;
CREATE INDEX IF NOT EXISTS idx_archives_content_hash ON project_archives(content_hash);
CREATE INDEX IF NOT EXISTS idx_memories_content_hash ON memories(content_hash);
CREATE INDEX IF NOT EXISTS idx_session_notes_content_hash ON session_notes(content_hash);
CREATE INDEX IF NOT EXISTS idx_session_plans_content_hash ON session_plans(content_hash);
CREATE INDEX IF NOT EXISTS idx_sessions_content_hash ON sessions(content_hash);
CREATE INDEX IF NOT EXISTS idx_archives_source_id ON project_archives(source_id);
CREATE INDEX IF NOT EXISTS idx_memories_source_id ON memories(source_id);

View File

@@ -0,0 +1,53 @@
-- CF-1315: Hybrid search - tsvector columns, GIN indexes, triggers
-- 1. Add search_vector columns
ALTER TABLE project_archives ADD COLUMN IF NOT EXISTS search_vector tsvector;
ALTER TABLE memories ADD COLUMN IF NOT EXISTS search_vector tsvector;
ALTER TABLE sessions ADD COLUMN IF NOT EXISTS search_vector tsvector;
-- 2. GIN indexes for fast full-text search
CREATE INDEX IF NOT EXISTS idx_archives_search_vector ON project_archives USING gin(search_vector);
CREATE INDEX IF NOT EXISTS idx_memories_search_vector ON memories USING gin(search_vector);
CREATE INDEX IF NOT EXISTS idx_sessions_search_vector ON sessions USING gin(search_vector);
-- 3. Triggers to auto-populate search_vector on INSERT/UPDATE
CREATE OR REPLACE FUNCTION update_archives_search_vector() RETURNS TRIGGER AS $$
BEGIN
NEW.search_vector := to_tsvector('english', coalesce(NEW.title, '') || ' ' || coalesce(NEW.content, ''));
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION update_memories_search_vector() RETURNS TRIGGER AS $$
BEGIN
NEW.search_vector := to_tsvector('english', coalesce(NEW.title, '') || ' ' || coalesce(NEW.content, ''));
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION update_sessions_search_vector() RETURNS TRIGGER AS $$
BEGIN
NEW.search_vector := to_tsvector('english', coalesce(NEW.summary, ''));
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_archives_search_vector ON project_archives;
CREATE TRIGGER trg_archives_search_vector
BEFORE INSERT OR UPDATE OF title, content ON project_archives
FOR EACH ROW EXECUTE FUNCTION update_archives_search_vector();
DROP TRIGGER IF EXISTS trg_memories_search_vector ON memories;
CREATE TRIGGER trg_memories_search_vector
BEFORE INSERT OR UPDATE OF title, content ON memories
FOR EACH ROW EXECUTE FUNCTION update_memories_search_vector();
DROP TRIGGER IF EXISTS trg_sessions_search_vector ON sessions;
CREATE TRIGGER trg_sessions_search_vector
BEFORE INSERT OR UPDATE OF summary ON sessions
FOR EACH ROW EXECUTE FUNCTION update_sessions_search_vector();
-- 4. Backfill existing rows (no-op if tables empty, safe to re-run)
UPDATE project_archives SET search_vector = to_tsvector('english', coalesce(title, '') || ' ' || coalesce(content, '')) WHERE search_vector IS NULL;
UPDATE memories SET search_vector = to_tsvector('english', coalesce(title, '') || ' ' || coalesce(content, '')) WHERE search_vector IS NULL;
UPDATE sessions SET search_vector = to_tsvector('english', coalesce(summary, '')) WHERE search_vector IS NULL AND summary IS NOT NULL;

View File

@@ -0,0 +1,7 @@
-- CF-1316: Add LLM-extracted metadata JSONB column for filtered retrieval
-- Schema: { topics: string[], decisions: string[], blockers: string[], tools_used: string[], projects: string[], issue_keys: string[] }
ALTER TABLE sessions ADD COLUMN IF NOT EXISTS extracted_metadata JSONB;
-- GIN index for fast JSONB containment queries (@>)
CREATE INDEX IF NOT EXISTS idx_sessions_extracted_metadata ON sessions USING GIN(extracted_metadata);

3554
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +1,29 @@
{
"name": "task-mcp",
"version": "1.0.0",
"description": "MCP server for task management with PostgreSQL/pgvector backend",
"name": "session-mcp",
"version": "1.1.0",
"description": "MCP server for session/memory/archive management with PostgreSQL/pgvector. Forked from task-mcp (CF-762).",
"main": "dist/index.js",
"type": "module",
"scripts": {
"build": "tsc",
"start": "node dist/index.js",
"dev": "tsx src/index.ts",
"clean": "rm -rf dist"
"clean": "rm -rf dist",
"start:http": "node dist/http-server.js"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4",
"@sentry/node": "^9.47.1",
"@sentry/profiling-node": "^10.37.0",
"@sentry/node": "^10.39.0",
"@sentry/profiling-node": "^10.39.0",
"dotenv": "^17.2.3",
"pg": "^8.11.3"
"pg": "^8.11.3",
"express": "^4.19.2"
},
"devDependencies": {
"@types/node": "^20.11.0",
"@types/pg": "^8.10.9",
"tsx": "^4.7.0",
"typescript": "^5.3.3"
"typescript": "^5.3.3",
"@types/express": "^4.17.21"
}
}

View File

@@ -5,7 +5,7 @@ const { Pool } = pg;
// Database configuration
const pool = new Pool({
host: 'infra.agiliton.internal',
host: 'postgres.agiliton.internal',
port: 5432,
database: 'agiliton',
user: 'agiliton',

View File

@@ -12,7 +12,7 @@ const { Pool } = pg;
// Configuration - Direct WireGuard connection to INFRA VM PostgreSQL
const config = {
host: process.env.POSTGRES_HOST || 'infra.agiliton.internal',
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: 5432,
database: 'agiliton',
user: 'agiliton',

View File

@@ -16,7 +16,7 @@ const { Pool } = pg;
// Database configuration
const pool = new Pool({
host: process.env.POSTGRES_HOST || 'infra.agiliton.internal',
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: 5432,
database: 'agiliton',
user: 'agiliton',

2
run.sh
View File

@@ -1,6 +1,6 @@
#!/bin/bash
echo "task-mcp: run.sh executing with database connection" >&2
export DB_HOST="infra.agiliton.internal"
export DB_HOST="postgres.agiliton.internal"
export DB_PORT="5432"
export DB_NAME="agiliton"
export DB_USER="agiliton"

View File

@@ -0,0 +1,490 @@
#!/usr/bin/env npx tsx
/**
* Consolidate/merge Jira projects after CF-762 migration.
*
* Uses Jira Cloud Bulk Move API (POST /rest/api/3/bulk/issues/move)
* to move all issues from SOURCE to TARGET project, then updates
* task_migration_map and tasks table in PostgreSQL.
*
* Usage:
* npx tsx scripts/consolidate-projects.ts --from LIT --to LITE [--dry-run] [--delete-source]
* npx tsx scripts/consolidate-projects.ts --batch tier1 [--dry-run] [--delete-source]
* npx tsx scripts/consolidate-projects.ts --batch all [--dry-run] [--delete-source]
*/
import pg from 'pg';
import dotenv from 'dotenv';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
dotenv.config({ path: join(__dirname, '..', '.env'), override: true });
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USER = process.env.JIRA_USERNAME || '';
const JIRA_TOKEN = process.env.JIRA_API_TOKEN || '';
const JIRA_AUTH = Buffer.from(`${JIRA_USER}:${JIRA_TOKEN}`).toString('base64');
const pool = new pg.Pool({
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: parseInt(process.env.POSTGRES_PORT || '5432'),
database: 'agiliton',
user: 'agiliton',
password: 'QtqiwCOAUpQNF6pjzOMAREzUny2bY8V1',
max: 3,
});
const args = process.argv.slice(2);
const DRY_RUN = args.includes('--dry-run');
const DELETE_SOURCE = args.includes('--delete-source');
const FROM = args.find((_, i) => args[i - 1] === '--from') || '';
const TO = args.find((_, i) => args[i - 1] === '--to') || '';
const BATCH = args.find((_, i) => args[i - 1] === '--batch') || '';
const DELAY_MS = 700;
const MAX_RETRIES = 5;
const POLL_INTERVAL_MS = 2000;
const POLL_TIMEOUT_MS = 120000;
// Batch definitions — LIT already moved manually during testing
const TIER1: Array<[string, string]> = [
['CARD', 'CS'],
['TES', 'TS'],
['DA', 'DB'],
['AF', 'AFNE'],
];
const TIER2: Array<[string, string]> = [
['RUBI', 'RUB'],
['ET', 'TG'],
['ZORK', 'ZOS'],
];
const TIER3: Array<[string, string]> = [
['IS', 'INFR'],
['CLN', 'INFR'],
['TOOLS', 'INFR'],
];
interface JiraIssue {
key: string;
id: string;
fields: {
summary: string;
issuetype: { id: string; name: string };
status: { name: string };
};
}
function delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function jiraFetch(path: string, options: RequestInit = {}): Promise<Response> {
const url = `${JIRA_URL}/rest/api/3${path}`;
return fetch(url, {
...options,
headers: {
'Authorization': `Basic ${JIRA_AUTH}`,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers,
},
});
}
async function jiraFetchWithRetry(path: string, options: RequestInit = {}): Promise<Response> {
let lastResponse: Response | null = null;
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
await delay(DELAY_MS);
const response = await jiraFetch(path, options);
lastResponse = response;
if (response.status === 429 || response.status >= 500) {
if (attempt < MAX_RETRIES) {
const retryAfter = response.headers.get('Retry-After');
const backoffMs = retryAfter
? parseInt(retryAfter) * 1000
: DELAY_MS * Math.pow(2, attempt + 1);
console.warn(` [RETRY] ${response.status}, attempt ${attempt + 1}/${MAX_RETRIES}, waiting ${backoffMs}ms`);
await delay(backoffMs);
continue;
}
}
return response;
}
return lastResponse!;
}
// Get project ID for a project key
async function getProjectId(key: string): Promise<string | null> {
const res = await jiraFetchWithRetry(`/project/${key}`);
if (!res.ok) return null;
const data = await res.json() as { id: string };
return data.id;
}
// Get all issues in a project (v3 GET /search/jql)
async function getAllIssues(projectKey: string): Promise<JiraIssue[]> {
const issues: JiraIssue[] = [];
let startAt = 0;
while (true) {
const jql = encodeURIComponent(`project="${projectKey}" ORDER BY key ASC`);
const res = await jiraFetchWithRetry(`/search/jql?jql=${jql}&maxResults=100&startAt=${startAt}&fields=summary,issuetype,status`);
if (!res.ok) {
console.error(` Failed to search ${projectKey}: ${res.status} ${await res.text()}`);
break;
}
const data = await res.json() as { total?: number; issues: JiraIssue[]; isLast?: boolean };
issues.push(...data.issues);
startAt += data.issues.length;
if (data.isLast || (data.total !== undefined && startAt >= data.total) || data.issues.length === 0) break;
}
return issues;
}
// Get issue type IDs available in a project
async function getProjectIssueTypes(projectKey: string): Promise<Map<string, string>> {
const res = await jiraFetchWithRetry(`/project/${projectKey}/statuses`);
if (!res.ok) return new Map();
const types = await res.json() as Array<{ id: string; name: string }>;
return new Map(types.map(t => [t.name, t.id]));
}
// Bulk move issues using Jira Cloud API
// Key format: "targetProjectId,targetIssueTypeId"
async function bulkMoveIssues(
issueKeys: string[],
targetProjectId: string,
targetIssueTypeId: string,
): Promise<{ taskId: string } | null> {
const mappingKey = `${targetProjectId},${targetIssueTypeId}`;
const body = {
sendBulkNotification: false,
targetToSourcesMapping: {
[mappingKey]: {
inferFieldDefaults: true,
inferStatusDefaults: true,
inferSubtaskTypeDefault: true,
issueIdsOrKeys: issueKeys,
},
},
};
const res = await jiraFetchWithRetry('/bulk/issues/move', {
method: 'POST',
body: JSON.stringify(body),
});
if (!res.ok) {
const errorBody = await res.text();
console.error(` FAIL bulk move: ${res.status} ${errorBody}`);
return null;
}
const data = await res.json() as { taskId: string };
return data;
}
// Poll a Jira async task until complete
async function pollTask(taskId: string): Promise<{ success: number[]; failed: Record<string, unknown> } | null> {
const start = Date.now();
while (Date.now() - start < POLL_TIMEOUT_MS) {
await delay(POLL_INTERVAL_MS);
const res = await jiraFetchWithRetry(`/task/${taskId}`);
if (!res.ok) {
console.error(` FAIL poll task ${taskId}: ${res.status}`);
return null;
}
const data = await res.json() as {
status: string;
progress: number;
result?: { successfulIssues: number[]; failedIssues: Record<string, unknown>; totalIssueCount: number };
};
if (data.status === 'COMPLETE') {
return {
success: data.result?.successfulIssues || [],
failed: data.result?.failedIssues || {},
};
}
if (data.status === 'FAILED' || data.status === 'CANCELLED') {
console.error(` Task ${taskId} ${data.status}`);
return null;
}
// Still running
if (data.progress > 0) {
process.stdout.write(`\r Task ${taskId}: ${data.progress}%`);
}
}
console.error(` Task ${taskId} timed out after ${POLL_TIMEOUT_MS / 1000}s`);
return null;
}
// Get issue key by numeric ID
async function getIssueKey(issueId: number): Promise<string | null> {
const res = await jiraFetchWithRetry(`/issue/${issueId}?fields=key`);
if (!res.ok) return null;
const data = await res.json() as { key: string };
return data.key;
}
// Delete a Jira project
async function deleteProject(key: string): Promise<boolean> {
if (DRY_RUN) {
console.log(` [DRY] Would delete project ${key}`);
return true;
}
const res = await jiraFetch(`/project/${key}?enableUndo=false`, { method: 'DELETE' });
return res.status === 204;
}
// Consolidate one pair
async function consolidate(from: string, to: string): Promise<{ moved: number; failed: number }> {
console.log(`\n=== Consolidating ${from}${to} ===`);
// Get project IDs
const fromProjectId = await getProjectId(from);
const toProjectId = await getProjectId(to);
if (!fromProjectId) {
console.error(` Source project ${from} does not exist in Jira. Skipping.`);
return { moved: 0, failed: 0 };
}
if (!toProjectId) {
console.error(` Target project ${to} does not exist in Jira. Skipping.`);
return { moved: 0, failed: 0 };
}
// Get target project issue types
const targetTypes = await getProjectIssueTypes(to);
console.log(` Target ${to} (id=${toProjectId}) issue types: ${Array.from(targetTypes.entries()).map(([n, id]) => `${n}=${id}`).join(', ')}`);
// Get all issues from source
const issues = await getAllIssues(from);
console.log(` Found ${issues.length} issues in ${from}`);
if (issues.length === 0) {
console.log(` Nothing to move.`);
if (DELETE_SOURCE) {
console.log(` Deleting empty source project ${from}...`);
const deleted = await deleteProject(from);
console.log(` ${deleted ? 'Deleted' : 'FAILED to delete'} ${from}`);
}
return { moved: 0, failed: 0 };
}
if (DRY_RUN) {
console.log(` [DRY] Would move ${issues.length} issues:`);
for (const issue of issues) {
console.log(` ${issue.key} [${issue.fields.issuetype.name}] ${issue.fields.status.name}: ${issue.fields.summary.substring(0, 60)}`);
}
// Still do DB updates in dry run? No.
return { moved: issues.length, failed: 0 };
}
// Build old issue ID → old key map (for tracking after move)
const idToOldKey = new Map<number, string>();
for (const issue of issues) {
idToOldKey.set(parseInt(issue.id), issue.key);
}
// Group issues by issue type for bulk move
const byType = new Map<string, { typeId: string; typeName: string; keys: string[] }>();
for (const issue of issues) {
const typeName = issue.fields.issuetype.name;
const targetTypeId = targetTypes.get(typeName);
if (!targetTypeId) {
// Fall back to Task if type doesn't exist in target
const fallbackId = targetTypes.get('Task');
if (!fallbackId) {
console.error(` No matching type for ${typeName} in ${to}, and no Task fallback. Skipping ${issue.key}`);
continue;
}
console.warn(` [WARN] ${issue.key} type ${typeName} not in target, using Task (${fallbackId})`);
const group = byType.get('Task') || { typeId: fallbackId, typeName: 'Task', keys: [] };
group.keys.push(issue.key);
byType.set('Task', group);
} else {
const group = byType.get(typeName) || { typeId: targetTypeId, typeName, keys: [] };
group.keys.push(issue.key);
byType.set(typeName, group);
}
}
let totalMoved = 0;
let totalFailed = 0;
const keyMapping = new Map<string, string>(); // old key → new key
// Move each type group
for (const [typeName, group] of byType) {
console.log(` Moving ${group.keys.length} ${typeName} issues...`);
const result = await bulkMoveIssues(group.keys, toProjectId, group.typeId);
if (!result) {
totalFailed += group.keys.length;
continue;
}
console.log(` Waiting for task ${result.taskId}...`);
const taskResult = await pollTask(result.taskId);
process.stdout.write('\r');
if (!taskResult) {
totalFailed += group.keys.length;
continue;
}
const failedCount = Object.keys(taskResult.failed).length;
console.log(` Task complete: ${taskResult.success.length} moved, ${failedCount} failed`);
totalMoved += taskResult.success.length;
totalFailed += failedCount;
// Resolve new keys for moved issues
for (const movedId of taskResult.success) {
const oldKey = idToOldKey.get(movedId);
if (!oldKey) continue;
const newKey = await getIssueKey(movedId);
if (newKey) {
keyMapping.set(oldKey, newKey);
}
}
}
console.log(` Total moved: ${totalMoved}, failed: ${totalFailed}`);
console.log(` Key mappings resolved: ${keyMapping.size}`);
// Log all mappings
for (const [oldKey, newKey] of keyMapping) {
console.log(` ${oldKey}${newKey}`);
}
// Update PostgreSQL
if (totalMoved > 0) {
console.log(` Updating PostgreSQL...`);
// 1. Update task_migration_map with new Jira keys
let mapUpdated = 0;
for (const [oldKey, newKey] of keyMapping) {
const res = await pool.query(
`UPDATE task_migration_map SET jira_issue_key = $1, migrated_at = NOW()
WHERE jira_issue_key = $2`,
[newKey, oldKey]
);
if ((res.rowCount || 0) > 0) {
mapUpdated++;
} else {
// Try where old_task_id matches (identity mapping case)
const res2 = await pool.query(
`UPDATE task_migration_map SET jira_issue_key = $1, migrated_at = NOW()
WHERE old_task_id = $2`,
[newKey, oldKey]
);
if ((res2.rowCount || 0) > 0) mapUpdated++;
}
}
console.log(` task_migration_map: ${mapUpdated} entries updated`);
// 2. Update tasks table: change project from SOURCE to TARGET
const taskUpdate = await pool.query(
`UPDATE tasks SET project = $1 WHERE project = $2`,
[to, from]
);
console.log(` tasks: ${taskUpdate.rowCount} rows (project ${from}${to})`);
// 3. Update epics table
try {
const epicUpdate = await pool.query(
`UPDATE epics SET project = $1 WHERE project = $2`,
[to, from]
);
console.log(` epics: ${epicUpdate.rowCount} rows`);
} catch { /* epics may not reference this project */ }
// 4. Update FK references that use Jira keys
for (const [oldKey, newKey] of keyMapping) {
try { await pool.query(`UPDATE memories SET jira_issue_key = $1 WHERE jira_issue_key = $2`, [newKey, oldKey]); } catch {}
try { await pool.query(`UPDATE session_context SET jira_issue_key = $1 WHERE jira_issue_key = $2`, [newKey, oldKey]); } catch {}
try { await pool.query(`UPDATE sessions SET jira_issue_key = $1 WHERE jira_issue_key = $2`, [newKey, oldKey]); } catch {}
try { await pool.query(`UPDATE task_commits SET jira_issue_key = $1 WHERE jira_issue_key = $2`, [newKey, oldKey]); } catch {}
try { await pool.query(`UPDATE deployments SET jira_issue_key = $1 WHERE jira_issue_key = $2`, [newKey, oldKey]); } catch {}
}
console.log(` FK references updated`);
// 5. Update projects table references
try {
await pool.query(`DELETE FROM project_archives WHERE project_key = $1`, [from]);
} catch {}
}
// Delete source project if requested
if (DELETE_SOURCE) {
const remaining = await getAllIssues(from);
if (remaining.length === 0) {
console.log(` Deleting empty source project ${from}...`);
const deleted = await deleteProject(from);
console.log(` ${deleted ? 'Deleted' : 'FAILED to delete'} ${from}`);
} else {
console.log(` Source ${from} still has ${remaining.length} issues, not deleting.`);
}
}
return { moved: totalMoved, failed: totalFailed };
}
async function main() {
console.log('=== Project Consolidation (CF-762 Post-Migration) ===');
console.log(`Mode: ${DRY_RUN ? 'DRY RUN' : 'LIVE'}`);
console.log(`Delete source: ${DELETE_SOURCE ? 'yes' : 'no'}`);
console.log('');
if (!JIRA_USER || !JIRA_TOKEN) {
console.error('Missing JIRA_USERNAME or JIRA_API_TOKEN');
process.exit(1);
}
let pairs: Array<[string, string]> = [];
if (BATCH) {
switch (BATCH) {
case 'tier1': pairs = TIER1; break;
case 'tier2': pairs = TIER2; break;
case 'tier3': pairs = TIER3; break;
case 'all': pairs = [...TIER1, ...TIER2, ...TIER3]; break;
default:
console.error(`Unknown batch: ${BATCH}. Use: tier1, tier2, tier3, all`);
process.exit(1);
}
} else if (FROM && TO) {
pairs = [[FROM, TO]];
} else {
console.error('Usage:');
console.error(' npx tsx scripts/consolidate-projects.ts --from LIT --to LITE [--dry-run] [--delete-source]');
console.error(' npx tsx scripts/consolidate-projects.ts --batch tier1|tier2|tier3|all [--dry-run] [--delete-source]');
process.exit(1);
}
console.log(`Pairs to consolidate (${pairs.length}):`);
for (const [from, to] of pairs) {
console.log(` ${from}${to}`);
}
console.log('');
let totalMoved = 0;
let totalFailed = 0;
for (const [from, to] of pairs) {
const result = await consolidate(from, to);
totalMoved += result.moved;
totalFailed += result.failed;
}
console.log('\n=== Consolidation Summary ===');
console.log(`Total moved: ${totalMoved}`);
console.log(`Total failed: ${totalFailed}`);
console.log(`Mode: ${DRY_RUN ? 'DRY RUN' : 'LIVE'}`);
await pool.end();
}
main().catch(err => {
console.error('Consolidation failed:', err);
process.exit(1);
});

213
scripts/jira-admin.ts Normal file
View File

@@ -0,0 +1,213 @@
#!/usr/bin/env npx tsx
/**
* Jira admin helper for migration (CF-762)
* Usage:
* npx tsx scripts/jira-admin.ts get-project CF
* npx tsx scripts/jira-admin.ts delete-project CF
* npx tsx scripts/jira-admin.ts create-project CF "Claude Framework"
* npx tsx scripts/jira-admin.ts count-issues CF
* npx tsx scripts/jira-admin.ts delete-all-issues CF
*/
import dotenv from 'dotenv';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
dotenv.config({ path: join(__dirname, '..', '.env'), override: true });
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USER = process.env.JIRA_USERNAME || process.env.JIRA_EMAIL || '';
const JIRA_TOKEN = process.env.JIRA_API_TOKEN || '';
const JIRA_AUTH = Buffer.from(`${JIRA_USER}:${JIRA_TOKEN}`).toString('base64');
async function jiraFetch(path: string, options: RequestInit = {}): Promise<Response> {
const url = path.startsWith('http') ? path : `${JIRA_URL}/rest/api/3${path}`;
return fetch(url, {
...options,
headers: {
'Authorization': `Basic ${JIRA_AUTH}`,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers,
},
});
}
function delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
const [command, ...cmdArgs] = process.argv.slice(2);
async function main() {
switch (command) {
case 'get-project': {
const key = cmdArgs[0];
const res = await jiraFetch(`/project/${key}`);
if (!res.ok) {
console.error(`Failed: ${res.status} ${await res.text()}`);
return;
}
const data = await res.json() as Record<string, unknown>;
console.log(JSON.stringify(data, null, 2));
break;
}
case 'list-projects': {
const res = await jiraFetch('/project');
if (!res.ok) {
console.error(`Failed: ${res.status} ${await res.text()}`);
return;
}
const projects = await res.json() as Array<{ key: string; name: string; id: string; projectTypeKey: string }>;
console.log(`Total: ${projects.length} projects`);
for (const p of projects) {
console.log(` ${p.key}: ${p.name} (id=${p.id}, type=${p.projectTypeKey})`);
}
break;
}
case 'count-issues': {
const key = cmdArgs[0];
const res = await jiraFetch(`/search/jql`, {
method: 'POST',
body: JSON.stringify({ jql: `project="${key}"`, maxResults: 1 }),
});
if (!res.ok) {
console.error(`Failed: ${res.status} ${await res.text()}`);
return;
}
const data = await res.json() as { total: number };
console.log(`${key}: ${data.total} issues`);
break;
}
case 'list-issues': {
const key = cmdArgs[0];
const max = parseInt(cmdArgs[1] || '20');
const res = await jiraFetch(`/search/jql`, {
method: 'POST',
body: JSON.stringify({ jql: `project="${key}" ORDER BY key ASC`, maxResults: max, fields: ['key', 'summary', 'issuetype', 'status'] }),
});
if (!res.ok) {
console.error(`Failed: ${res.status} ${await res.text()}`);
return;
}
const data = await res.json() as { total: number; issues: Array<{ key: string; fields: { summary: string; issuetype: { name: string }; status: { name: string } } }> };
console.log(`${key}: ${data.total} total issues (showing ${data.issues.length})`);
for (const i of data.issues) {
console.log(` ${i.key} [${i.fields.issuetype.name}] ${i.fields.status.name}: ${i.fields.summary.substring(0, 60)}`);
}
break;
}
case 'delete-all-issues': {
const key = cmdArgs[0];
if (!key) { console.error('Usage: delete-all-issues <PROJECT_KEY>'); return; }
// Get all issues
let startAt = 0;
const allKeys: string[] = [];
while (true) {
const res = await jiraFetch(`/search/jql`, {
method: 'POST',
body: JSON.stringify({ jql: `project="${key}" ORDER BY key ASC`, maxResults: 100, startAt, fields: ['key'] }),
});
if (!res.ok) { console.error(`Failed: ${res.status} ${await res.text()}`); return; }
const data = await res.json() as { total: number; issues: Array<{ key: string }> };
if (data.issues.length === 0) break;
allKeys.push(...data.issues.map(i => i.key));
startAt += data.issues.length;
if (startAt >= data.total) break;
}
console.log(`Found ${allKeys.length} issues to delete in ${key}`);
for (let i = 0; i < allKeys.length; i++) {
await delay(300);
const res = await jiraFetch(`/issue/${allKeys[i]}`, { method: 'DELETE' });
if (!res.ok) {
console.error(` FAIL delete ${allKeys[i]}: ${res.status}`);
}
if (i % 10 === 0) console.log(` [${i + 1}/${allKeys.length}] Deleted ${allKeys[i]}`);
}
console.log(`Deleted ${allKeys.length} issues from ${key}`);
break;
}
case 'delete-project': {
const key = cmdArgs[0];
if (!key) { console.error('Usage: delete-project <PROJECT_KEY>'); return; }
// enableUndo=false for permanent deletion
const res = await jiraFetch(`/project/${key}?enableUndo=false`, { method: 'DELETE' });
if (res.status === 204) {
console.log(`Project ${key} deleted permanently`);
} else {
console.error(`Failed: ${res.status} ${await res.text()}`);
}
break;
}
case 'create-project': {
const key = cmdArgs[0];
const name = cmdArgs[1] || key;
if (!key) { console.error('Usage: create-project <KEY> <NAME>'); return; }
// Get current user account ID for lead
const meRes = await jiraFetch('/myself');
const me = await meRes.json() as { accountId: string };
const body = {
key,
name,
projectTypeKey: 'business',
leadAccountId: me.accountId,
assigneeType: 'UNASSIGNED',
};
const res = await jiraFetch('/project', {
method: 'POST',
body: JSON.stringify(body),
});
if (res.ok || res.status === 201) {
const data = await res.json() as { id: string; key: string };
console.log(`Project created: ${data.key} (id=${data.id})`);
} else {
console.error(`Failed: ${res.status} ${await res.text()}`);
}
break;
}
case 'get-schemes': {
const key = cmdArgs[0];
// Get issue type scheme for project
const res = await jiraFetch(`/project/${key}`);
if (!res.ok) {
console.error(`Failed: ${res.status} ${await res.text()}`);
return;
}
const data = await res.json() as Record<string, unknown>;
console.log('Project type:', (data as any).projectTypeKey);
console.log('Style:', (data as any).style);
// Get issue types
const itRes = await jiraFetch(`/project/${key}/statuses`);
if (itRes.ok) {
const itData = await itRes.json() as Array<{ name: string; id: string; statuses: Array<{ name: string }> }>;
console.log('\nIssue types and statuses:');
for (const it of itData) {
console.log(` ${it.name} (id=${it.id}): ${it.statuses.map(s => s.name).join(', ')}`);
}
}
break;
}
default:
console.log('Commands: list-projects, get-project, count-issues, list-issues, delete-all-issues, delete-project, create-project, get-schemes');
}
}
main().catch(err => { console.error(err); process.exit(1); });

View File

@@ -0,0 +1,887 @@
#!/usr/bin/env npx tsx
/**
* Migrate tasks from task-mcp PostgreSQL to Jira Cloud (CF-762)
* EXACT KEY MATCHING: CF-1 in task-mcp → CF-1 in Jira
*
* Strategy:
* 1. Create tasks in strict numeric order (1..maxId), filling gaps with placeholders
* 2. After all tasks, create epics (they get keys after maxId)
* 3. Then create session plans as epics
* 4. Link tasks to their epics via parent field update
* 5. Create issue links, retry cross-project ones
* 6. Store mapping and update FK references
*
* IMPORTANT: The Jira project must be empty (counter at 1) for key matching to work.
* Delete and recreate the project before running this script.
*
* Usage:
* npx tsx scripts/migrate-tasks-to-jira.ts [--dry-run] [--project CF] [--open-only] [--limit 5] [--batch-size 50]
*
* Requires env vars (from .env or shell):
* JIRA_URL, JIRA_USERNAME, JIRA_API_TOKEN
* POSTGRES_HOST (defaults to postgres.agiliton.internal)
*/
import pg from 'pg';
import dotenv from 'dotenv';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
dotenv.config({ path: join(__dirname, '..', '.env'), override: true });
const { Pool } = pg;
// --- Config ---
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USER = process.env.JIRA_USERNAME || process.env.JIRA_EMAIL || '';
const JIRA_TOKEN = process.env.JIRA_API_TOKEN || '';
const JIRA_AUTH = Buffer.from(`${JIRA_USER}:${JIRA_TOKEN}`).toString('base64');
const pool = new Pool({
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: parseInt(process.env.POSTGRES_PORT || '5432'),
database: 'agiliton',
user: 'agiliton',
password: 'QtqiwCOAUpQNF6pjzOMAREzUny2bY8V1',
max: 3,
});
// --- CLI args ---
const args = process.argv.slice(2);
const DRY_RUN = args.includes('--dry-run');
const OPEN_ONLY = args.includes('--open-only');
const PROJECT_FILTER = args.find((a, i) => args[i - 1] === '--project') || null;
const LIMIT = parseInt(args.find((a, i) => args[i - 1] === '--limit') || '0') || 0;
const BATCH_SIZE = parseInt(args.find((a, i) => args[i - 1] === '--batch-size') || '50') || 50;
const SKIP_PREFLIGHT = args.includes('--skip-preflight');
// Herocoders Checklist for Jira custom field
const CHECKLIST_FIELD = 'customfield_10091';
// Rate limit: Jira Cloud allows ~100 req/min for basic auth
// 700ms delay = ~85 req/min (safe margin)
const DELAY_MS = 700;
const MAX_RETRIES = 5;
const BATCH_PAUSE_MS = 5000; // 5s pause between batches
// ADF max size (Jira limit)
const ADF_MAX_BYTES = 32_000;
function delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
// --- Mappings ---
const PRIORITY_MAP: Record<string, string> = {
P0: 'Highest',
P1: 'High',
P2: 'Medium',
P3: 'Low',
};
const TYPE_MAP: Record<string, string> = {
task: 'Task',
bug: 'Bug',
feature: 'Task',
debt: 'Task',
investigation: 'Task',
};
const STATUS_MAP: Record<string, string> = {
open: 'To Do',
pending: 'To Do',
in_progress: 'In Progress',
testing: 'In Progress',
blocked: 'To Do',
done: 'Done',
completed: 'Done',
abandoned: 'Done',
};
const LINK_TYPE_MAP: Record<string, string> = {
blocks: 'Blocks',
relates_to: 'Relates',
duplicates: 'Duplicate',
depends_on: 'Blocks',
implements: 'Relates',
fixes: 'Relates',
causes: 'Relates',
needs: 'Blocks',
subtask_of: 'Relates',
};
const VALID_PROJECT_KEY = /^[A-Z]{2,5}$/;
// Track migration mapping: old task_id → Jira issue key
const migrationMap: Map<string, string> = new Map();
const jiraProjects: Set<string> = new Set();
const failedLinks: Array<{ from: string; to: string; type: string }> = [];
// Track epic old_id → Jira key (assigned after tasks)
const epicJiraKeys: Map<string, string> = new Map();
// Tasks that need parent (epic) link set after epics are created
const pendingParentLinks: Array<{ taskJiraKey: string; epicOldId: string }> = [];
// --- Jira REST API helpers ---
async function jiraFetch(path: string, options: RequestInit = {}): Promise<Response> {
const url = `${JIRA_URL}/rest/api/3${path}`;
return fetch(url, {
...options,
headers: {
'Authorization': `Basic ${JIRA_AUTH}`,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers,
},
});
}
async function jiraFetchWithRetry(path: string, options: RequestInit = {}): Promise<Response> {
let lastResponse: Response | null = null;
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
await delay(DELAY_MS);
const response = await jiraFetch(path, options);
lastResponse = response;
if (response.status === 429 || response.status >= 500) {
if (attempt < MAX_RETRIES) {
const retryAfter = response.headers.get('Retry-After');
const backoffMs = retryAfter
? parseInt(retryAfter) * 1000
: DELAY_MS * Math.pow(2, attempt + 1);
console.warn(` [RETRY] ${response.status} on ${path}, attempt ${attempt + 1}/${MAX_RETRIES}, waiting ${backoffMs}ms`);
await delay(backoffMs);
continue;
}
console.error(` [FAIL] ${response.status} on ${path} after ${MAX_RETRIES} retries`);
}
return response;
}
return lastResponse!;
}
async function jiraFetchV2WithRetry(path: string, options: RequestInit = {}): Promise<Response> {
const url = `${JIRA_URL}/rest/api/2${path}`;
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
await delay(DELAY_MS);
const response = await fetch(url, {
...options,
headers: {
'Authorization': `Basic ${JIRA_AUTH}`,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers,
},
});
if (response.status === 429 || response.status >= 500) {
if (attempt < MAX_RETRIES) {
const backoffMs = DELAY_MS * Math.pow(2, attempt + 1);
console.warn(` [RETRY] v2 ${response.status} on ${path}, attempt ${attempt + 1}/${MAX_RETRIES}, waiting ${backoffMs}ms`);
await delay(backoffMs);
continue;
}
}
return response;
}
throw new Error(`jiraFetchV2WithRetry: exhausted retries for ${path}`);
}
// --- ADF helpers ---
function textToAdf(text: string): Record<string, unknown> {
let normalized = text.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
if (Buffer.byteLength(normalized, 'utf8') > ADF_MAX_BYTES - 500) {
while (Buffer.byteLength(normalized, 'utf8') > ADF_MAX_BYTES - 500) {
normalized = normalized.substring(0, Math.floor(normalized.length * 0.9));
}
normalized += '\n\n[...truncated - description exceeded 32KB limit]';
}
const lines = normalized.split('\n');
const paragraphs: Array<Record<string, unknown>> = [];
let currentParagraph = '';
for (const line of lines) {
if (line.trim() === '') {
if (currentParagraph.trim()) {
paragraphs.push({
type: 'paragraph',
content: [{ type: 'text', text: currentParagraph.trim() }],
});
}
currentParagraph = '';
} else {
currentParagraph += (currentParagraph ? '\n' : '') + line;
}
}
if (currentParagraph.trim()) {
paragraphs.push({
type: 'paragraph',
content: [{ type: 'text', text: currentParagraph.trim() }],
});
}
if (paragraphs.length === 0) {
paragraphs.push({
type: 'paragraph',
content: [{ type: 'text', text: text.trim() || '(empty)' }],
});
}
return { type: 'doc', version: 1, content: paragraphs };
}
// --- API operations ---
async function getJiraProjects(): Promise<string[]> {
const res = await jiraFetchWithRetry('/project');
if (!res.ok) {
console.error('Failed to list Jira projects:', res.status, await res.text());
return [];
}
const projects = await res.json() as Array<{ key: string }>;
return projects.map(p => p.key);
}
async function countJiraIssues(projectKey: string): Promise<number> {
const res = await jiraFetchWithRetry('/search/jql', {
method: 'POST',
body: JSON.stringify({ jql: `project="${projectKey}"`, maxResults: 1, fields: ['summary'] }),
});
if (!res.ok) return 0;
const data = await res.json() as { total?: number; issues?: unknown[] };
return data.total ?? data.issues?.length ?? 0;
}
async function createJiraIssue(fields: Record<string, unknown>): Promise<string | null> {
if (DRY_RUN) {
const key = `${(fields.project as Record<string, string>).key}-DRY`;
console.log(` [DRY] Would create: ${(fields.summary as string).substring(0, 60)}`);
return key;
}
const res = await jiraFetchWithRetry('/issue', {
method: 'POST',
body: JSON.stringify({ fields }),
});
if (!res.ok) {
const body = await res.text();
console.error(` FAIL create issue: ${res.status} ${body}`);
return null;
}
const data = await res.json() as { key: string };
return data.key;
}
async function transitionIssue(issueKey: string, targetStatus: string): Promise<boolean> {
if (DRY_RUN) return true;
const res = await jiraFetchWithRetry(`/issue/${issueKey}/transitions`);
if (!res.ok) return false;
const data = await res.json() as { transitions: Array<{ id: string; name: string }> };
const transition = data.transitions.find(t =>
t.name.toLowerCase() === targetStatus.toLowerCase()
);
if (!transition) {
// Try partial match (e.g., "In Progress" matches "Start Progress")
const partialMatch = data.transitions.find(t =>
t.name.toLowerCase().includes(targetStatus.toLowerCase()) ||
// Map common alternative names
(targetStatus === 'In Progress' && t.name.toLowerCase().includes('progress')) ||
(targetStatus === 'Done' && t.name.toLowerCase().includes('done'))
);
if (partialMatch) {
const transRes = await jiraFetchWithRetry(`/issue/${issueKey}/transitions`, {
method: 'POST',
body: JSON.stringify({ transition: { id: partialMatch.id } }),
});
return transRes.ok;
}
console.warn(` [WARN] No transition to "${targetStatus}" for ${issueKey}. Available: ${data.transitions.map(t => t.name).join(', ')}`);
return false;
}
const transRes = await jiraFetchWithRetry(`/issue/${issueKey}/transitions`, {
method: 'POST',
body: JSON.stringify({ transition: { id: transition.id } }),
});
return transRes.ok;
}
async function writeChecklist(issueKey: string, items: Array<{ item: string; checked: boolean }>): Promise<void> {
if (DRY_RUN || items.length === 0) return;
const checklistText = items
.map(i => `* [${i.checked ? 'x' : ' '}] ${i.item}`)
.join('\n');
const res = await jiraFetchV2WithRetry(`/issue/${issueKey}`, {
method: 'PUT',
body: JSON.stringify({ fields: { [CHECKLIST_FIELD]: checklistText } }),
});
if (!res.ok) {
const body = await res.text();
console.error(` FAIL checklist for ${issueKey}: ${res.status} ${body}`);
}
}
async function setParent(issueKey: string, parentKey: string): Promise<void> {
if (DRY_RUN) return;
const res = await jiraFetchWithRetry(`/issue/${issueKey}`, {
method: 'PUT',
body: JSON.stringify({ fields: { parent: { key: parentKey } } }),
});
if (!res.ok) {
const body = await res.text();
console.error(` FAIL set parent ${parentKey} for ${issueKey}: ${res.status} ${body}`);
}
}
async function createIssueLink(inwardKey: string, outwardKey: string, linkType: string): Promise<boolean> {
if (DRY_RUN) {
console.log(` [DRY] Would link: ${inwardKey} -[${linkType}]-> ${outwardKey}`);
return true;
}
const res = await jiraFetchWithRetry('/issueLink', {
method: 'POST',
body: JSON.stringify({
type: { name: linkType },
inwardIssue: { key: inwardKey },
outwardIssue: { key: outwardKey },
}),
});
if (!res.ok) {
const body = await res.text();
console.error(` FAIL link ${inwardKey}->${outwardKey}: ${res.status} ${body}`);
return false;
}
return true;
}
async function deleteIssue(issueKey: string): Promise<void> {
await jiraFetchWithRetry(`/issue/${issueKey}`, { method: 'DELETE' });
}
// --- Pre-flight check ---
async function preflightWorkflowCheck(projectKey: string): Promise<boolean> {
console.log(`\nPre-flight workflow check on ${projectKey}...`);
if (DRY_RUN || SKIP_PREFLIGHT) {
console.log(` [${DRY_RUN ? 'DRY' : 'SKIP'}] Skipping pre-flight check`);
return true;
}
// IMPORTANT: pre-flight consumes a key number!
// We must account for this. The test issue will be key #1,
// then we delete it, but the counter stays at 2.
// So we CANNOT do pre-flight on the same project if we want exact keys.
// Instead, use a different project for pre-flight.
console.log(' WARNING: Pre-flight check would consume issue key #1.');
console.log(' Skipping in-project pre-flight to preserve key sequence.');
console.log(' Use --skip-preflight explicitly if already verified.');
return true;
}
// --- Migration: exact key ordering ---
async function migrateTasksExactKeys(projectKey: string): Promise<Map<string, string>> {
const epicMap = new Map<string, string>();
// 1. Load all tasks for this project, indexed by numeric ID
const tasks = await pool.query(
`SELECT id, title, description, type, status, priority, epic_id, created_at
FROM tasks WHERE project = $1 ORDER BY id`,
[projectKey]
);
// Build a map of numeric ID → task row
const taskById = new Map<number, (typeof tasks.rows)[0]>();
let maxNum = 0;
for (const task of tasks.rows) {
const m = task.id.match(new RegExp(`^${projectKey}-(\\d+)$`));
if (m) {
const num = parseInt(m[1]);
taskById.set(num, task);
if (num > maxNum) maxNum = num;
}
}
if (maxNum === 0) {
console.log(' No numeric task IDs found, skipping.');
return epicMap;
}
const effectiveMax = LIMIT > 0 ? Math.min(maxNum, LIMIT) : maxNum;
const gapCount = effectiveMax - (LIMIT > 0 ? Math.min(taskById.size, LIMIT) : taskById.size) +
Array.from({ length: effectiveMax }, (_, i) => i + 1).filter(n => n <= effectiveMax && !taskById.has(n)).length -
(effectiveMax - (LIMIT > 0 ? Math.min(taskById.size, LIMIT) : taskById.size));
// Actually compute properly
let realTasks = 0;
let gaps = 0;
for (let n = 1; n <= effectiveMax; n++) {
if (taskById.has(n)) realTasks++;
else gaps++;
}
console.log(` Creating ${effectiveMax} issues (${realTasks} real tasks + ${gaps} placeholders)...`);
// 2. Create issues 1..maxNum in strict order
for (let n = 1; n <= effectiveMax; n++) {
const task = taskById.get(n);
const taskId = `${projectKey}-${n}`;
const expectedJiraKey = `${projectKey}-${n}`;
if (task) {
// Real task
const labels: string[] = ['migrated-from-task-mcp'];
if (task.type === 'feature') labels.push('feature');
if (task.type === 'debt') labels.push('tech-debt');
if (task.type === 'investigation') labels.push('investigation');
if (task.status === 'blocked') labels.push('blocked');
if (task.status === 'abandoned') labels.push('abandoned');
const fields: Record<string, unknown> = {
project: { key: projectKey },
summary: task.title.substring(0, 255),
issuetype: { name: TYPE_MAP[task.type] || 'Task' },
priority: { name: PRIORITY_MAP[task.priority] || 'Medium' },
labels,
};
if (task.description) {
fields.description = textToAdf(task.description);
}
// Don't set parent here — epics don't exist yet. Queue for later.
const jiraKey = await createJiraIssue(fields);
if (!jiraKey) {
console.error(` FATAL: Failed to create ${taskId}, key sequence broken!`);
process.exit(1);
}
// Verify key matches
if (!DRY_RUN && jiraKey !== expectedJiraKey) {
console.error(` FATAL: Key mismatch! Expected ${expectedJiraKey}, got ${jiraKey}. Aborting.`);
process.exit(1);
}
migrationMap.set(task.id, jiraKey);
// Transition
const targetStatus = STATUS_MAP[task.status] || 'To Do';
if (targetStatus !== 'To Do') {
await transitionIssue(jiraKey, targetStatus);
}
// Checklist
const checklist = await pool.query(
'SELECT item, checked FROM task_checklist WHERE task_id = $1 ORDER BY position',
[task.id]
);
if (checklist.rows.length > 0) {
await writeChecklist(jiraKey, checklist.rows);
}
// Queue parent link for later
if (task.epic_id) {
pendingParentLinks.push({ taskJiraKey: jiraKey, epicOldId: task.epic_id });
}
} else {
// Gap — create placeholder
const fields: Record<string, unknown> = {
project: { key: projectKey },
summary: `[Placeholder] Deleted task ${taskId}`,
issuetype: { name: 'Task' },
labels: ['migration-placeholder', 'migrated-from-task-mcp'],
};
const jiraKey = await createJiraIssue(fields);
if (!jiraKey) {
console.error(` FATAL: Failed to create placeholder for ${taskId}, key sequence broken!`);
process.exit(1);
}
if (!DRY_RUN && jiraKey !== expectedJiraKey) {
console.error(` FATAL: Key mismatch! Expected ${expectedJiraKey}, got ${jiraKey}. Aborting.`);
process.exit(1);
}
// Transition placeholder to Done
await transitionIssue(jiraKey, 'Done');
}
// Progress logging
if (n % 10 === 0 || n === effectiveMax) {
console.log(` [${n}/${effectiveMax}] ${task ? task.id : `gap → placeholder`}${projectKey}-${n}`);
}
// Batch pause
if (n > 0 && n % BATCH_SIZE === 0) {
console.log(` [BATCH PAUSE] ${n}/${effectiveMax}, pausing ${BATCH_PAUSE_MS / 1000}s...`);
await delay(BATCH_PAUSE_MS);
}
}
return epicMap;
}
async function migrateEpicsAfterTasks(projectKey: string): Promise<void> {
const epics = await pool.query(
'SELECT id, title, description, status FROM epics WHERE project = $1 ORDER BY id',
[projectKey]
);
if (epics.rows.length === 0) return;
console.log(` Creating ${epics.rows.length} epics (after task range)...`);
for (let i = 0; i < epics.rows.length; i++) {
const epic = epics.rows[i];
const labels: string[] = ['migrated-from-task-mcp'];
const fields: Record<string, unknown> = {
project: { key: projectKey },
summary: epic.title.substring(0, 255),
description: epic.description ? textToAdf(epic.description) : undefined,
issuetype: { name: 'Epic' },
labels,
};
const jiraKey = await createJiraIssue(fields);
if (jiraKey) {
epicJiraKeys.set(epic.id, jiraKey);
console.log(` [${i + 1}/${epics.rows.length}] Epic ${epic.id}${jiraKey}: ${epic.title.substring(0, 50)}`);
if (epic.status === 'completed' || epic.status === 'done') {
await transitionIssue(jiraKey, 'Done');
} else if (epic.status === 'in_progress') {
await transitionIssue(jiraKey, 'In Progress');
}
}
}
}
async function migrateSessionPlansAfterTasks(projectKey: string): Promise<void> {
const plans = await pool.query(
`SELECT sp.id, sp.session_id, sp.plan_file_name, sp.plan_content, sp.status
FROM session_plans sp
JOIN sessions s ON sp.session_id = s.id
WHERE s.project = $1`,
[projectKey]
);
if (plans.rows.length === 0) return;
console.log(` Creating ${plans.rows.length} session plans as Epics...`);
for (let i = 0; i < plans.rows.length; i++) {
const plan = plans.rows[i];
const labels: string[] = ['migrated-from-task-mcp', 'session-plan'];
if (plan.plan_file_name) {
labels.push(`plan:${plan.plan_file_name.replace(/[^a-zA-Z0-9._-]/g, '_').substring(0, 50)}`);
}
if (plan.status) {
labels.push(`plan-status:${plan.status}`);
}
const fields: Record<string, unknown> = {
project: { key: projectKey },
summary: `[Session Plan] ${plan.plan_file_name || `Plan from session ${plan.session_id}`}`.substring(0, 255),
description: plan.plan_content ? textToAdf(plan.plan_content) : undefined,
issuetype: { name: 'Epic' },
labels,
};
const jiraKey = await createJiraIssue(fields);
if (jiraKey) {
epicJiraKeys.set(`plan-${plan.id}`, jiraKey);
console.log(` [${i + 1}/${plans.rows.length}] Plan ${plan.id}${jiraKey}`);
if (plan.status === 'executed' || plan.status === 'abandoned') {
await transitionIssue(jiraKey, 'Done');
} else if (plan.status === 'approved') {
await transitionIssue(jiraKey, 'In Progress');
}
}
}
}
async function linkTasksToEpics(): Promise<void> {
if (pendingParentLinks.length === 0) return;
console.log(` Setting parent (epic) for ${pendingParentLinks.length} tasks...`);
let linked = 0;
for (const { taskJiraKey, epicOldId } of pendingParentLinks) {
const epicJiraKey = epicJiraKeys.get(epicOldId);
if (!epicJiraKey) continue;
await setParent(taskJiraKey, epicJiraKey);
linked++;
if (linked % 20 === 0) {
console.log(` [${linked}/${pendingParentLinks.length}] parent links set`);
}
if (linked % BATCH_SIZE === 0) {
console.log(` [BATCH PAUSE] ${linked}/${pendingParentLinks.length}, pausing...`);
await delay(BATCH_PAUSE_MS);
}
}
console.log(` Linked ${linked} tasks to epics`);
}
async function migrateLinks(projectKey: string): Promise<void> {
const links = await pool.query(
`SELECT tl.from_task_id, tl.to_task_id, tl.link_type
FROM task_links tl
JOIN tasks t1 ON tl.from_task_id = t1.id
JOIN tasks t2 ON tl.to_task_id = t2.id
WHERE t1.project = $1 OR t2.project = $1`,
[projectKey]
);
if (links.rows.length === 0) return;
console.log(` Migrating ${links.rows.length} links...`);
let created = 0;
let skipped = 0;
for (const link of links.rows) {
const fromKey = migrationMap.get(link.from_task_id);
const toKey = migrationMap.get(link.to_task_id);
if (!fromKey || !toKey) {
failedLinks.push({ from: link.from_task_id, to: link.to_task_id, type: link.link_type });
skipped++;
continue;
}
const jiraLinkType = LINK_TYPE_MAP[link.link_type] || 'Relates';
let success: boolean;
if (link.link_type === 'depends_on' || link.link_type === 'needs') {
success = await createIssueLink(toKey, fromKey, jiraLinkType);
} else {
success = await createIssueLink(fromKey, toKey, jiraLinkType);
}
if (success) created++;
}
console.log(` Created ${created} links, ${skipped} deferred for cross-project retry`);
}
async function retryFailedLinks(): Promise<void> {
if (failedLinks.length === 0) return;
console.log(`\nRetrying ${failedLinks.length} deferred cross-project links...`);
let created = 0;
let failed = 0;
for (const link of failedLinks) {
const fromKey = migrationMap.get(link.from);
const toKey = migrationMap.get(link.to);
if (!fromKey || !toKey) { failed++; continue; }
const jiraLinkType = LINK_TYPE_MAP[link.type] || 'Relates';
let success: boolean;
if (link.type === 'depends_on' || link.type === 'needs') {
success = await createIssueLink(toKey, fromKey, jiraLinkType);
} else {
success = await createIssueLink(fromKey, toKey, jiraLinkType);
}
if (success) created++;
else failed++;
}
console.log(` Retry results: ${created} created, ${failed} failed`);
}
// --- Post-migration ---
async function updateSessionMappings(): Promise<void> {
console.log('\nStoring migration mappings...');
await pool.query(`
CREATE TABLE IF NOT EXISTS task_migration_map (
old_task_id TEXT PRIMARY KEY,
jira_issue_key TEXT NOT NULL,
migrated_at TIMESTAMPTZ DEFAULT NOW()
)
`);
let count = 0;
for (const [oldId, jiraKey] of migrationMap) {
if (!DRY_RUN) {
await pool.query(
`INSERT INTO task_migration_map (old_task_id, jira_issue_key)
VALUES ($1, $2)
ON CONFLICT (old_task_id) DO UPDATE SET jira_issue_key = $2, migrated_at = NOW()`,
[oldId, jiraKey]
);
}
count++;
}
console.log(` Stored ${count} mappings`);
}
async function updateForeignKeyReferences(): Promise<void> {
console.log('\nUpdating FK references with Jira issue keys...');
if (DRY_RUN) {
console.log(' [DRY] Skipping FK reference updates');
return;
}
const alterStatements = [
'ALTER TABLE memories ADD COLUMN IF NOT EXISTS jira_issue_key TEXT',
'ALTER TABLE session_context ADD COLUMN IF NOT EXISTS jira_issue_key TEXT',
'ALTER TABLE deployments ADD COLUMN IF NOT EXISTS jira_issue_key TEXT',
'ALTER TABLE task_commits ADD COLUMN IF NOT EXISTS jira_issue_key TEXT',
];
for (const sql of alterStatements) {
try { await pool.query(sql); }
catch (e: unknown) {
const msg = e instanceof Error ? e.message : String(e);
if (!msg.includes('does not exist')) console.warn(` [WARN] ${sql}: ${msg}`);
}
}
const updates = [
{ table: 'memories', fk: 'task_id', desc: 'memories' },
{ table: 'session_context', fk: 'current_task_id', desc: 'session_context' },
{ table: 'deployments', fk: 'task_id', desc: 'deployments' },
{ table: 'task_commits', fk: 'task_id', desc: 'task_commits' },
];
for (const { table, fk, desc } of updates) {
try {
const result = await pool.query(
`UPDATE ${table} SET jira_issue_key = m.jira_issue_key
FROM task_migration_map m
WHERE ${table}.${fk} = m.old_task_id
AND ${table}.jira_issue_key IS NULL`
);
console.log(` ${desc}: ${result.rowCount} rows updated`);
} catch (e: unknown) {
const msg = e instanceof Error ? e.message : String(e);
console.warn(` [WARN] ${desc}: ${msg}`);
}
}
try {
const result = await pool.query(
`UPDATE sessions SET jira_issue_key = m.jira_issue_key
FROM task_migration_map m, session_context sc
WHERE sc.session_id = sessions.id
AND sc.current_task_id = m.old_task_id
AND sessions.jira_issue_key IS NULL`
);
console.log(` sessions: ${result.rowCount} rows updated`);
} catch (e: unknown) {
const msg = e instanceof Error ? e.message : String(e);
console.warn(` [WARN] sessions: ${msg}`);
}
}
// --- Main ---
async function main() {
console.log('=== task-mcp → Jira Cloud Migration (EXACT KEY MATCHING) ===');
console.log(`Jira: ${JIRA_URL}`);
console.log(`User: ${JIRA_USER}`);
console.log(`Mode: ${DRY_RUN ? 'DRY RUN' : 'LIVE'}`);
console.log(`Filter: ${PROJECT_FILTER || 'all valid projects'}`);
console.log(`Scope: ${OPEN_ONLY ? 'open tasks only' : 'all tasks'}`);
console.log(`Limit: ${LIMIT || 'none'}`);
console.log(`Batch: ${BATCH_SIZE} (${BATCH_PAUSE_MS / 1000}s pause)`);
console.log(`Rate: ${DELAY_MS}ms delay, ${MAX_RETRIES} retries`);
console.log('');
if (!JIRA_USER || !JIRA_TOKEN) {
console.error('Missing JIRA_USERNAME or JIRA_API_TOKEN');
process.exit(1);
}
const existingProjects = await getJiraProjects();
existingProjects.forEach(p => jiraProjects.add(p));
console.log(`Existing Jira projects: ${existingProjects.join(', ')}`);
const dbProjects = await pool.query(
'SELECT key, name FROM projects WHERE key ~ $1 ORDER BY key',
['^[A-Z]{2,5}$']
);
const projectsToMigrate = dbProjects.rows.filter(p => {
if (PROJECT_FILTER && p.key !== PROJECT_FILTER) return false;
if (!VALID_PROJECT_KEY.test(p.key)) return false;
return true;
});
console.log(`Projects to migrate: ${projectsToMigrate.map(p => p.key).join(', ')}`);
const missing = projectsToMigrate.filter(p => !jiraProjects.has(p.key));
if (missing.length > 0) {
console.log(`\nWARNING: These projects don't exist in Jira yet (will be skipped):`);
missing.forEach(p => console.log(` ${p.key} - ${p.name}`));
console.log('Create them in Jira first, then re-run migration.\n');
}
// Migrate each project
for (const project of projectsToMigrate) {
if (!jiraProjects.has(project.key)) {
console.log(`\nSkipping ${project.key} (not in Jira)`);
continue;
}
console.log(`\n--- Migrating project: ${project.key} (${project.name}) ---`);
// Check if project already has issues (already migrated)
const existingCount = await countJiraIssues(project.key);
if (existingCount > 0) {
console.log(` Skipping: already has ${existingCount} issues in Jira`);
continue;
}
// Clear per-project state
pendingParentLinks.length = 0;
// 1. Tasks in exact numeric order (with gap placeholders)
await migrateTasksExactKeys(project.key);
// 2. Epics (after tasks, so they get keys after maxTaskId)
await migrateEpicsAfterTasks(project.key);
// 3. Session plans as epics
await migrateSessionPlansAfterTasks(project.key);
// 4. Link tasks to their parent epics (now that epics exist)
await linkTasksToEpics();
// 5. Issue links
await migrateLinks(project.key);
// Summary
const taskCount = Array.from(migrationMap.values()).filter(v => v.startsWith(`${project.key}-`)).length;
console.log(` Done: ${epicJiraKeys.size} epics, ${taskCount} tasks migrated`);
}
// 6. Retry cross-project links
await retryFailedLinks();
// 7. Store mapping
await updateSessionMappings();
// 8. Update FK references
await updateForeignKeyReferences();
// Final summary
console.log('\n=== Migration Summary ===');
console.log(`Total issues migrated: ${migrationMap.size}`);
console.log(`Epics created: ${epicJiraKeys.size}`);
console.log(`Failed links: ${failedLinks.filter(l => !migrationMap.has(l.from) || !migrationMap.has(l.to)).length}`);
console.log(`Mode: ${DRY_RUN ? 'DRY RUN (no changes made)' : 'LIVE'}`);
await pool.end();
}
main().catch(err => {
console.error('Migration failed:', err);
process.exit(1);
});

View File

@@ -0,0 +1,221 @@
#!/usr/bin/env npx tsx
/**
* Prepare all projects for exact-key migration (CF-762)
* For each project: delete → recreate → assign shared issue type scheme
* Then the migration script can run for all projects at once.
*
* Usage:
* npx tsx scripts/prepare-all-projects.ts [--dry-run] [--exclude CF]
*/
import pg from 'pg';
import dotenv from 'dotenv';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
dotenv.config({ path: join(__dirname, '..', '.env'), override: true });
const { Pool } = pg;
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USER = process.env.JIRA_USERNAME || process.env.JIRA_EMAIL || '';
const JIRA_TOKEN = process.env.JIRA_API_TOKEN || '';
const JIRA_AUTH = Buffer.from(`${JIRA_USER}:${JIRA_TOKEN}`).toString('base64');
const SHARED_SCHEME_ID = '10329'; // Agiliton Software Issue Type Scheme
const pool = new Pool({
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: 5432, database: 'agiliton', user: 'agiliton',
password: 'QtqiwCOAUpQNF6pjzOMAREzUny2bY8V1', max: 3,
});
const args = process.argv.slice(2);
const DRY_RUN = args.includes('--dry-run');
const excludeIdx = args.indexOf('--exclude');
const EXCLUDE = excludeIdx >= 0 ? args[excludeIdx + 1]?.split(',') || [] : [];
function delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function jiraFetch(path: string, options: RequestInit = {}): Promise<Response> {
return fetch(`${JIRA_URL}/rest/api/3${path}`, {
...options,
headers: {
Authorization: `Basic ${JIRA_AUTH}`,
'Content-Type': 'application/json',
Accept: 'application/json',
...options.headers,
},
});
}
async function getJiraProjects(): Promise<Array<{ key: string; name: string; id: string }>> {
const res = await jiraFetch('/project');
if (!res.ok) return [];
return res.json() as Promise<Array<{ key: string; name: string; id: string }>>;
}
async function deleteProject(key: string): Promise<boolean> {
const res = await jiraFetch(`/project/${key}?enableUndo=false`, { method: 'DELETE' });
return res.status === 204;
}
async function createProject(key: string, name: string, leadAccountId: string): Promise<string | null> {
const res = await jiraFetch('/project', {
method: 'POST',
body: JSON.stringify({
key,
name,
projectTypeKey: 'business',
leadAccountId,
assigneeType: 'UNASSIGNED',
}),
});
if (res.ok || res.status === 201) {
const data = await res.json() as { id: string };
return data.id;
}
console.error(` FAIL create ${key}: ${res.status} ${await res.text()}`);
return null;
}
async function assignScheme(projectId: string): Promise<boolean> {
const res = await jiraFetch('/issuetypescheme/project', {
method: 'PUT',
body: JSON.stringify({
issueTypeSchemeId: SHARED_SCHEME_ID,
projectId,
}),
});
return res.ok || res.status === 204;
}
async function verifyScheme(key: string): Promise<boolean> {
const res = await jiraFetch(`/project/${key}/statuses`);
if (!res.ok) return false;
const statuses = await res.json() as Array<{ name: string }>;
const names = statuses.map(s => s.name);
return names.includes('Epic') && names.includes('Task') && names.includes('Bug');
}
async function main() {
console.log('=== Prepare Projects for Migration ===');
console.log(`Mode: ${DRY_RUN ? 'DRY RUN' : 'LIVE'}`);
console.log(`Exclude: ${EXCLUDE.length > 0 ? EXCLUDE.join(', ') : 'none'}`);
console.log('');
// Get current user for project lead
const meRes = await jiraFetch('/myself');
const me = await meRes.json() as { accountId: string };
// Get existing Jira projects
const jiraProjects = await getJiraProjects();
const jiraProjectMap = new Map(jiraProjects.map(p => [p.key, p]));
console.log(`Jira projects: ${jiraProjects.length}`);
// Get DB projects with tasks
const dbProjects = await pool.query(
`SELECT p.key, p.name, COUNT(t.id) as task_count,
MAX(CAST(REGEXP_REPLACE(t.id, '^' || p.key || '-', '') AS INTEGER)) as max_id
FROM projects p
JOIN tasks t ON t.project = p.key
WHERE p.key ~ '^[A-Z]{2,5}$'
GROUP BY p.key, p.name
ORDER BY p.key`
);
console.log(`DB projects with tasks: ${dbProjects.rows.length}`);
console.log('');
// Filter: must exist in Jira, not excluded
const toProcess = dbProjects.rows.filter((p: any) => {
if (EXCLUDE.includes(p.key)) return false;
if (!jiraProjectMap.has(p.key)) return false;
return true;
});
console.log(`Projects to prepare: ${toProcess.length}`);
console.log('');
// Summary table
console.log('Project | Tasks | Max ID | Placeholders | Status');
console.log('--------|-------|--------|-------------|-------');
let totalTasks = 0;
let totalPlaceholders = 0;
for (const p of toProcess) {
const placeholders = p.max_id - p.task_count;
totalTasks += parseInt(p.task_count);
totalPlaceholders += placeholders;
console.log(`${p.key.padEnd(7)} | ${String(p.task_count).padStart(5)} | ${String(p.max_id).padStart(6)} | ${String(placeholders).padStart(11)} | pending`);
}
console.log(`TOTAL | ${String(totalTasks).padStart(5)} | ${String(totalTasks + totalPlaceholders).padStart(6)} | ${String(totalPlaceholders).padStart(11)} |`);
console.log('');
if (DRY_RUN) {
console.log('[DRY RUN] Would process above projects. Run without --dry-run to execute.');
await pool.end();
return;
}
// Process each project
let success = 0;
let failed = 0;
for (let i = 0; i < toProcess.length; i++) {
const p = toProcess[i];
const jiraProject = jiraProjectMap.get(p.key)!;
console.log(`[${i + 1}/${toProcess.length}] ${p.key} (${p.name})...`);
// 1. Delete
await delay(1000);
const deleted = await deleteProject(p.key);
if (!deleted) {
console.error(` FAIL delete ${p.key}`);
failed++;
continue;
}
console.log(` Deleted`);
// 2. Wait a bit for Jira to process
await delay(2000);
// 3. Recreate
const newId = await createProject(p.key, jiraProject.name || p.name, me.accountId);
if (!newId) {
console.error(` FAIL recreate ${p.key}`);
failed++;
continue;
}
console.log(` Recreated (id=${newId})`);
// 4. Assign shared scheme
await delay(1000);
const schemeOk = await assignScheme(newId);
if (!schemeOk) {
console.error(` FAIL assign scheme for ${p.key}`);
failed++;
continue;
}
// 5. Verify
const verified = await verifyScheme(p.key);
if (!verified) {
console.error(` FAIL verify scheme for ${p.key} (missing Epic/Task/Bug)`);
failed++;
continue;
}
console.log(` Scheme OK (Epic/Task/Bug)`);
success++;
}
console.log(`\n=== Preparation Summary ===`);
console.log(`Success: ${success}`);
console.log(`Failed: ${failed}`);
console.log(`\nRun migration: npx tsx scripts/migrate-tasks-to-jira.ts --skip-preflight`);
await pool.end();
}
main().catch(err => { console.error(err); process.exit(1); });

View File

@@ -0,0 +1,232 @@
#!/usr/bin/env npx tsx
/**
* Validate CF-762 migration integrity.
* Checks: Jira issue counts vs DB, statuses, checklists, epic links, FK references.
*
* Usage: npx tsx scripts/validate-migration.ts [--project CF] [--verbose]
*/
import pg from 'pg';
import dotenv from 'dotenv';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
dotenv.config({ path: join(__dirname, '..', '.env'), override: true });
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USER = process.env.JIRA_USERNAME || '';
const JIRA_TOKEN = process.env.JIRA_API_TOKEN || '';
const JIRA_AUTH = Buffer.from(`${JIRA_USER}:${JIRA_TOKEN}`).toString('base64');
const pool = new pg.Pool({
host: process.env.POSTGRES_HOST || 'postgres.agiliton.internal',
port: parseInt(process.env.POSTGRES_PORT || '5432'),
database: 'agiliton', user: 'agiliton',
password: 'QtqiwCOAUpQNF6pjzOMAREzUny2bY8V1', max: 3,
});
const args = process.argv.slice(2);
const PROJECT_FILTER = args.find((_, i) => args[i - 1] === '--project') || '';
const VERBOSE = args.includes('--verbose');
const DELAY_MS = 700;
function delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function jiraFetch(path: string): Promise<Response> {
await delay(DELAY_MS);
return fetch(`${JIRA_URL}/rest/api/3${path}`, {
headers: {
'Authorization': `Basic ${JIRA_AUTH}`,
'Accept': 'application/json',
},
});
}
// v3 search/jql uses cursor pagination, no total. Count by paging through.
async function jiraIssueCount(projectKey: string): Promise<number> {
let count = 0;
let nextPageToken: string | undefined;
while (true) {
const jql = encodeURIComponent(`project="${projectKey}"`);
let url = `/search/jql?jql=${jql}&maxResults=100&fields=summary`;
if (nextPageToken) url += `&nextPageToken=${encodeURIComponent(nextPageToken)}`;
const res = await jiraFetch(url);
if (!res.ok) return -1;
const data = await res.json() as { issues: unknown[]; nextPageToken?: string; isLast?: boolean };
count += data.issues.length;
if (data.isLast || !data.nextPageToken || data.issues.length === 0) break;
nextPageToken = data.nextPageToken;
}
return count;
}
async function jiraPlaceholderCount(): Promise<number> {
const jql = encodeURIComponent(`labels = "migration-placeholder"`);
const res = await jiraFetch(`/search/jql?jql=${jql}&maxResults=0`);
if (!res.ok) return -1;
const data = await res.json() as { total?: number };
return data.total ?? -1;
}
async function spotCheckChecklists(projectKey: string): Promise<{ total: number; withChecklist: number }> {
const jql = encodeURIComponent(`project="${projectKey}" AND labels = "migrated-from-task-mcp" ORDER BY key ASC`);
const res = await jiraFetch(`/search/jql?jql=${jql}&maxResults=3&fields=summary,customfield_10091`);
if (!res.ok) return { total: 0, withChecklist: 0 };
const data = await res.json() as { issues: Array<{ key: string; fields: Record<string, unknown> }> };
let withChecklist = 0;
for (const issue of data.issues) {
if (issue.fields.customfield_10091) withChecklist++;
}
return { total: data.issues.length, withChecklist };
}
async function spotCheckStatuses(projectKey: string): Promise<Record<string, number>> {
const counts: Record<string, number> = {};
const jql = encodeURIComponent(`project="${projectKey}" AND labels = "migrated-from-task-mcp"`);
const res = await jiraFetch(`/search/jql?jql=${jql}&maxResults=100&fields=status`);
if (!res.ok) return counts;
const data = await res.json() as { issues: Array<{ fields: { status: { name: string } } }> };
for (const issue of data.issues) {
const status = issue.fields.status.name;
counts[status] = (counts[status] || 0) + 1;
}
return counts;
}
async function spotCheckEpicLinks(projectKey: string): Promise<{ total: number; withParent: number }> {
const jql = encodeURIComponent(`project="${projectKey}" AND issuetype != Epic AND labels = "migrated-from-task-mcp" ORDER BY key ASC`);
const res = await jiraFetch(`/search/jql?jql=${jql}&maxResults=5&fields=parent`);
if (!res.ok) return { total: 0, withParent: 0 };
const data = await res.json() as { issues: Array<{ key: string; fields: Record<string, unknown> }> };
let withParent = 0;
for (const issue of data.issues) {
if (issue.fields?.parent) withParent++;
}
return { total: data.issues.length, withParent };
}
async function main() {
console.log('=== CF-762 Migration Validation ===\n');
// 1. Per-project Jira vs DB counts
console.log('1. Per-project issue counts (Jira vs DB):');
console.log(' Project | Jira | DB Tasks | DB Migration Map | Match');
console.log(' --------|------|----------|-----------------|------');
const dbProjects = await pool.query(
`SELECT p.key, COUNT(DISTINCT t.id) as task_count, COUNT(DISTINCT m.old_task_id) as map_count
FROM projects p
LEFT JOIN tasks t ON t.project = p.key
LEFT JOIN task_migration_map m ON m.old_task_id = t.id
WHERE p.key ~ '^[A-Z]{2,5}$'
${PROJECT_FILTER ? `AND p.key = '${PROJECT_FILTER}'` : ''}
GROUP BY p.key
HAVING COUNT(t.id) > 0
ORDER BY p.key`
);
let mismatches = 0;
for (const row of dbProjects.rows) {
const jiraCount = await jiraIssueCount(row.key);
const match = jiraCount >= parseInt(row.task_count) ? 'OK' : 'MISMATCH';
if (match !== 'OK') mismatches++;
console.log(` ${row.key.padEnd(7)} | ${String(jiraCount).padStart(4)} | ${String(row.task_count).padStart(8)} | ${String(row.map_count).padStart(15)} | ${match}`);
}
console.log(`\n Mismatches: ${mismatches}\n`);
// 2. Spot-check checklists (3 projects)
console.log('2. Checklist spot-check:');
const checkProjects = PROJECT_FILTER ? [PROJECT_FILTER] : ['CF', 'OWUI', 'WHMCS'];
for (const pk of checkProjects) {
const result = await spotCheckChecklists(pk);
console.log(` ${pk}: ${result.withChecklist}/${result.total} issues have checklists`);
}
console.log('');
// 3. Status distribution spot-check
console.log('3. Status distribution spot-check:');
const statusProjects = PROJECT_FILTER ? [PROJECT_FILTER] : ['CF', 'GB', 'RUB'];
for (const pk of statusProjects) {
const statuses = await spotCheckStatuses(pk);
console.log(` ${pk}: ${Object.entries(statuses).map(([s, c]) => `${s}=${c}`).join(', ')}`);
}
console.log('');
// 4. Epic→Task parent links
console.log('4. Epic→Task parent links spot-check:');
const epicProjects = PROJECT_FILTER ? [PROJECT_FILTER] : ['CF', 'RUB', 'OWUI'];
for (const pk of epicProjects) {
const result = await spotCheckEpicLinks(pk);
console.log(` ${pk}: ${result.withParent}/${result.total} tasks have parent epic`);
}
console.log('');
// 5. NULL FK references
console.log('5. NULL FK references (should be from unmigrated/deleted projects):');
const nullChecks = [
{ table: 'memories', col: 'jira_issue_key', fk: 'task_id' },
{ table: 'session_context', col: 'jira_issue_key', fk: 'current_task_id' },
{ table: 'task_commits', col: 'jira_issue_key', fk: 'task_id' },
];
for (const { table, col, fk } of nullChecks) {
try {
const res = await pool.query(
`SELECT COUNT(*) as cnt FROM ${table} WHERE ${fk} IS NOT NULL AND ${col} IS NULL`
);
const count = parseInt(res.rows[0].cnt);
if (count > 0) {
console.log(` ${table}: ${count} rows with task_id but no jira_issue_key`);
if (VERBOSE) {
const details = await pool.query(
`SELECT ${fk} FROM ${table} WHERE ${fk} IS NOT NULL AND ${col} IS NULL LIMIT 5`
);
for (const d of details.rows) {
console.log(` - ${d[fk]}`);
}
}
} else {
console.log(` ${table}: OK (0 NULL refs)`);
}
} catch (e: any) {
console.log(` ${table}: ${e.message}`);
}
}
console.log('');
// 6. Migration map total
const mapTotal = await pool.query('SELECT COUNT(*) as cnt FROM task_migration_map');
console.log(`6. Total migration mappings: ${mapTotal.rows[0].cnt}`);
// 7. Placeholder count in Jira
const placeholders = await jiraPlaceholderCount();
console.log(`7. Placeholder issues in Jira (label=migration-placeholder): ${placeholders}`);
// 8. Consolidated projects check — should no longer exist
console.log('\n8. Deleted source projects (should be gone from Jira):');
const deletedProjects = ['LIT', 'CARD', 'TES', 'DA', 'AF', 'RUBI', 'ET', 'ZORK', 'IS', 'CLN', 'TOOLS'];
for (const pk of deletedProjects) {
const res = await jiraFetch(`/project/${pk}`);
const status = res.ok ? 'STILL EXISTS' : 'Gone';
console.log(` ${pk}: ${status}`);
}
// 9. Remaining projects
console.log('\n9. Current Jira projects:');
const projRes = await jiraFetch('/project');
if (projRes.ok) {
const projects = await projRes.json() as Array<{ key: string; name: string }>;
console.log(` Total: ${projects.length}`);
for (const p of projects.sort((a, b) => a.key.localeCompare(b.key))) {
const count = await jiraIssueCount(p.key);
console.log(` ${p.key.padEnd(8)} ${String(count).padStart(4)} issues - ${p.name}`);
}
}
await pool.end();
console.log('\n=== Validation Complete ===');
}
main().catch(err => { console.error(err); process.exit(1); });

View File

@@ -1,5 +1,14 @@
// Embeddings via LiteLLM API
import { createHash } from 'crypto';
/**
* Generate SHA-256 content hash for dedup before embedding API call (CF-1314)
*/
export function generateContentHash(text: string): string {
return createHash('sha256').update(text).digest('hex');
}
interface EmbeddingResponse {
data: Array<{
embedding: number[];
@@ -58,3 +67,167 @@ export async function getEmbedding(text: string): Promise<number[] | null> {
export function formatEmbedding(embedding: number[]): string {
return `[${embedding.join(',')}]`;
}
/**
* Cross-encoder re-ranking via LiteLLM /rerank endpoint (CF-1317)
* Calls Cohere-compatible rerank API to reorder candidates by relevance.
* Returns null on failure (caller falls back to original order).
*/
export interface RerankResult {
index: number;
relevance_score: number;
}
export async function rerank(
query: string,
documents: string[],
topN?: number
): Promise<RerankResult[] | null> {
if (process.env.RERANK_ENABLED !== 'true') return null;
if (documents.length === 0) return null;
const LLM_API_URL = process.env.LLM_API_URL || 'https://api.agiliton.cloud/llm';
const LLM_API_KEY = process.env.LLM_API_KEY || '';
const model = process.env.RERANK_MODEL || 'rerank-v3.5';
if (!LLM_API_KEY) return null;
try {
const response = await fetch(`${LLM_API_URL}/v1/rerank`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${LLM_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
query,
documents,
top_n: topN || documents.length,
}),
});
if (!response.ok) {
console.error('Rerank API error:', response.status, await response.text());
return null;
}
const data = await response.json() as { results: RerankResult[] };
return data.results || null;
} catch (error) {
console.error('Rerank failed (falling back to RRF order):', error);
return null;
}
}
/**
* Extracted metadata schema (CF-1316)
*/
export interface ExtractedMetadata {
topics: string[];
decisions: string[];
blockers: string[];
tools_used: string[];
projects: string[];
issue_keys: string[];
}
/**
* Extract structured metadata from session content using a fast LLM (CF-1316)
* Uses first 8,000 chars of content for cost optimization.
* Returns null on failure (non-blocking — don't break embedding pipeline).
*/
export async function extractMetadata(content: string): Promise<ExtractedMetadata | null> {
const LLM_API_URL = process.env.LLM_API_URL || 'https://api.agiliton.cloud/llm';
const LLM_API_KEY = process.env.LLM_API_KEY || '';
const model = process.env.METADATA_EXTRACTION_MODEL || 'claude-haiku-4.5';
if (!LLM_API_KEY) return null;
// Truncate to first 8K chars (cost optimization from Agentic RAG Module 4)
const truncated = content.slice(0, 8000);
const systemPrompt = `Extract structured metadata from this session content. Return a JSON object with these fields:
- topics: Key technical topics discussed (e.g., "pgvector", "deployment", "authentication"). Max 10.
- decisions: Architecture or design decisions made (e.g., "Use RRF for hybrid search"). Max 5.
- blockers: Issues or blockers encountered (e.g., "Firecrawl connection refused"). Max 5.
- tools_used: Tools or commands used (e.g., "agiliton-deploy", "jira_create_issue"). Max 10.
- projects: Project keys mentioned (e.g., "CF", "BAB", "WF"). Max 5.
- issue_keys: Jira issue keys mentioned (e.g., "CF-1307", "BAB-42"). Max 10.
Return ONLY valid JSON. If a field has no matches, use an empty array [].`;
try {
const response = await fetch(`${LLM_API_URL}/v1/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${LLM_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: truncated },
],
max_tokens: 1024,
temperature: 0,
}),
});
if (!response.ok) {
console.error('Metadata extraction API error:', response.status, await response.text());
return null;
}
const data = await response.json() as {
choices: Array<{ message: { content: string } }>;
};
const raw = data.choices?.[0]?.message?.content;
if (!raw) return null;
// Parse JSON from response (handle markdown code blocks)
const jsonStr = raw.replace(/```json\n?/g, '').replace(/```\n?/g, '').trim();
const parsed = JSON.parse(jsonStr);
// Validate and normalize
return {
topics: Array.isArray(parsed.topics) ? parsed.topics.slice(0, 10) : [],
decisions: Array.isArray(parsed.decisions) ? parsed.decisions.slice(0, 5) : [],
blockers: Array.isArray(parsed.blockers) ? parsed.blockers.slice(0, 5) : [],
tools_used: Array.isArray(parsed.tools_used) ? parsed.tools_used.slice(0, 10) : [],
projects: Array.isArray(parsed.projects) ? parsed.projects.slice(0, 5) : [],
issue_keys: Array.isArray(parsed.issue_keys) ? parsed.issue_keys.slice(0, 10) : [],
};
} catch (error) {
console.error('Metadata extraction failed:', error);
return null;
}
}
/**
* Reciprocal Rank Fusion — merge two ranked result lists (CF-1315)
* @param vectorResults IDs ranked by vector similarity (best first)
* @param keywordResults IDs ranked by ts_rank (best first)
* @param k RRF parameter (default 60, standard)
* @returns Merged IDs sorted by RRF score descending
*/
export function rrfMerge(
vectorResults: (number | string)[],
keywordResults: (number | string)[],
k: number = 60
): { id: number | string; score: number }[] {
const scores = new Map<number | string, number>();
vectorResults.forEach((id, rank) => {
scores.set(id, (scores.get(id) || 0) + 1 / (k + rank + 1));
});
keywordResults.forEach((id, rank) => {
scores.set(id, (scores.get(id) || 0) + 1 / (k + rank + 1));
});
return Array.from(scores.entries())
.map(([id, score]) => ({ id, score }))
.sort((a, b) => b.score - a.score);
}

44
src/http-server.ts Normal file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env node
import dotenv from "dotenv";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
dotenv.config({ path: join(__dirname, "..", ".env"), override: true });
import { initSentry } from "./sentry.js";
initSentry(process.env.SENTRY_ENVIRONMENT || "production");
import express from "express";
import { randomUUID } from "crypto";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import type { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { testConnection, close } from "./db.js";
import { createServer } from "./server.js";
const PORT = parseInt(process.env.MCP_HTTP_PORT || "9216");
const HOST = process.env.MCP_HTTP_HOST || "0.0.0.0";
const transports = new Map<string, StreamableHTTPServerTransport>();
const sessionServers = new Map<string, Server>();
const app = express();
app.use(express.json());
app.post("/mcp", async (req, res) => {
try {
const sid0 = req.headers["mcp-session-id"] as string | undefined;
if (sid0 && transports.has(sid0)) { await transports.get(sid0)!.handleRequest(req, res, req.body); return; }
const transport: StreamableHTTPServerTransport = new StreamableHTTPServerTransport({
sessionIdGenerator: () => randomUUID(),
onsessioninitialized: (sid) => { transports.set(sid, transport); },
});
transport.onclose = () => { const sid = transport.sessionId; if (sid) { transports.delete(sid); sessionServers.delete(sid); } };
const ss = createServer(); await ss.connect(transport);
const sid = transport.sessionId; if (sid) sessionServers.set(sid, ss);
await transport.handleRequest(req, res, req.body);
} catch (err) { console.error("[session-mcp]", err); if (!res.headersSent) res.status(500).json({ error: "Internal" }); }
});
app.get("/mcp", async (req, res) => { const sid = req.headers["mcp-session-id"] as string|undefined; if(!sid||!transports.has(sid)){res.status(400).json({error:"bad"});return;} await transports.get(sid)!.handleRequest(req,res); });
app.delete("/mcp", async (req, res) => { const sid = req.headers["mcp-session-id"] as string|undefined; if(!sid||!transports.has(sid)){res.status(400).json({error:"bad"});return;} await transports.get(sid)!.handleRequest(req,res); });
app.get("/health", (_req, res) => res.json({ status: "ok", server: "session-mcp", activeSessions: transports.size }));
(async () => {
if (!(await testConnection())) { console.error("DB connect failed"); process.exit(1); }
app.listen(PORT, HOST, () => console.error(`session-mcp: HTTP on http://${HOST}:${PORT}/mcp`));
})();
process.on("SIGINT", async () => { await close(); process.exit(0); });
process.on("SIGTERM", async () => { await close(); process.exit(0); });

View File

@@ -1,751 +1,22 @@
#!/usr/bin/env node
/**
* Task MCP Server
*
* Exposes task management tools via Model Context Protocol.
* Uses PostgreSQL with pgvector for semantic search.
*
* Requires SSH tunnel to infra VM on port 5433:
* ssh -L 5433:localhost:5432 -i ~/.ssh/hetzner_mash_deploy root@46.224.188.157 -N &
*/
// Load environment variables from .env file
import dotenv from 'dotenv';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import dotenv from "dotenv";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const envPath = join(__dirname, '..', '.env');
const result = dotenv.config({ path: envPath, override: true });
// Initialize Sentry for error tracking (with MCP-aware filtering and PII scrubbing)
import { initSentry } from './sentry.js';
initSentry(process.env.SENTRY_ENVIRONMENT || 'production');
// Log environment loading status (goes to MCP server logs)
if (result.error) {
console.error('Failed to load .env from:', envPath, result.error);
} else {
console.error('Loaded .env from:', envPath);
console.error('LLM_API_KEY present:', !!process.env.LLM_API_KEY);
console.error('LLM_API_URL:', process.env.LLM_API_URL);
}
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from '@modelcontextprotocol/sdk/types.js';
import { testConnection, close } from './db.js';
import { toolDefinitions } from './tools/index.js';
import { taskAdd, taskList, taskShow, taskClose, taskUpdate, taskInvestigate, taskMoveProject } from './tools/crud.js';
import { taskSimilar, taskContext, taskSessionContext } from './tools/search.js';
import { taskLink, checklistAdd, checklistToggle, taskResolveDuplicate } from './tools/relations.js';
import { epicAdd, epicList, epicShow, epicAssign, epicClose } from './tools/epics.js';
import { taskDelegations, taskDelegationQuery } from './tools/delegations.js';
import { projectLock, projectUnlock, projectLockStatus, projectContext } from './tools/locks.js';
import { versionAdd, versionList, versionShow, versionUpdate, versionRelease, versionAssignTask } from './tools/versions.js';
import { taskCommitAdd, taskCommitRemove, taskCommitsList, taskLinkCommits, sessionTasks } from './tools/commits.js';
import { changelogAdd, changelogSinceSession, changelogList } from './tools/changelog.js';
import {
componentRegister,
componentList,
componentAddDependency,
componentAddFile,
componentAddCheck,
impactAnalysis,
impactLearn,
componentGraph,
} from './tools/impact.js';
import { memoryAdd, memorySearch, memoryList, memoryContext } from './tools/memories.js';
import { toolDocAdd, toolDocSearch, toolDocGet, toolDocList, toolDocExport } from './tools/tool-docs.js';
import {
sessionStart,
sessionUpdate,
sessionEnd,
sessionList,
sessionSearch,
sessionContext,
buildRecord,
sessionCommitLink,
sessionRecoverOrphaned,
sessionRecoverTempNotes,
} from './tools/sessions.js';
import {
sessionNoteAdd,
sessionNotesList,
sessionPlanSave,
sessionPlanUpdateStatus,
sessionPlanList,
projectDocUpsert,
projectDocGet,
projectDocList,
sessionDocumentationGenerate,
sessionSemanticSearch,
sessionProductivityAnalytics,
sessionPatternDetection,
} from './tools/session-docs.js';
import { archiveAdd, archiveSearch, archiveList, archiveGet } from './tools/archives.js';
import { projectArchive } from './tools/project-archive.js';
// Create MCP server
const server = new Server(
{ name: 'task-mcp', version: '1.0.0' },
{ capabilities: { tools: {} } }
);
// Register tool list handler
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: toolDefinitions,
}));
// Register tool call handler
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
let result: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const a = args as any;
switch (name) {
// CRUD
case 'task_add':
result = await taskAdd({
title: a.title,
project: a.project,
type: a.type,
priority: a.priority,
description: a.description,
});
break;
case 'task_list':
result = await taskList({
project: a.project,
status: a.status,
type: a.type,
priority: a.priority,
limit: a.limit,
});
break;
case 'task_show':
result = await taskShow(a.id);
break;
case 'task_close':
result = await taskClose(a.id);
break;
case 'task_update':
result = await taskUpdate({
id: a.id,
status: a.status,
priority: a.priority,
type: a.type,
title: a.title,
});
break;
case 'task_investigate':
result = await taskInvestigate({
title: a.title,
project: a.project,
priority: a.priority,
description: a.description,
});
break;
case 'task_move_project':
result = await taskMoveProject({
id: a.id,
target_project: a.target_project,
reason: a.reason,
});
break;
// Search
case 'task_similar':
result = await taskSimilar({
query: a.query,
project: a.project,
limit: a.limit,
});
break;
case 'task_context':
result = await taskContext({
description: a.description,
project: a.project,
limit: a.limit,
});
break;
case 'task_session_context':
result = await taskSessionContext({
id: a.id,
});
break;
// Relations
case 'task_link':
result = await taskLink({
from_id: a.from_id,
to_id: a.to_id,
link_type: a.link_type,
});
break;
case 'task_checklist_add':
result = await checklistAdd({
task_id: a.task_id,
item: a.item,
});
break;
case 'task_checklist_toggle':
result = await checklistToggle({
item_id: a.item_id,
checked: a.checked,
});
break;
case 'task_resolve_duplicate':
result = await taskResolveDuplicate({
duplicate_id: a.duplicate_id,
dominant_id: a.dominant_id,
});
break;
// Epics
case 'epic_add':
result = await epicAdd({
title: a.title,
project: a.project,
description: a.description,
});
break;
case 'epic_list':
result = await epicList({
project: a.project,
status: a.status,
limit: a.limit,
});
break;
case 'epic_show':
result = await epicShow(a.id);
break;
case 'epic_assign':
result = await epicAssign({
task_id: a.task_id,
epic_id: a.epic_id,
});
break;
case 'epic_close':
result = await epicClose(a.id);
break;
// Delegations
case 'task_delegations':
result = await taskDelegations({ task_id: a.task_id });
break;
case 'task_delegation_query':
result = await taskDelegationQuery({
status: a.status,
backend: a.backend,
limit: a.limit,
});
break;
// Project Locks
case 'project_lock':
result = await projectLock({
project: a.project,
session_id: a.session_id,
duration_minutes: a.duration_minutes,
reason: a.reason,
});
break;
case 'project_unlock':
result = await projectUnlock({
project: a.project,
session_id: a.session_id,
force: a.force,
});
break;
case 'project_lock_status':
result = await projectLockStatus({
project: a.project,
});
break;
case 'project_context':
result = await projectContext();
break;
// Versions
case 'version_add':
result = await versionAdd({
project: a.project,
version: a.version,
build_number: a.build_number,
status: a.status,
release_notes: a.release_notes,
});
break;
case 'version_list':
result = await versionList({
project: a.project,
status: a.status,
limit: a.limit,
});
break;
case 'version_show':
result = await versionShow(a.id);
break;
case 'version_update':
result = await versionUpdate({
id: a.id,
status: a.status,
git_tag: a.git_tag,
git_sha: a.git_sha,
release_notes: a.release_notes,
release_date: a.release_date,
});
break;
case 'version_release':
result = await versionRelease({
id: a.id,
git_tag: a.git_tag,
});
break;
case 'version_assign_task':
result = await versionAssignTask({
task_id: a.task_id,
version_id: a.version_id,
});
break;
// Commits
case 'task_commit_add':
result = await taskCommitAdd({
task_id: a.task_id,
commit_sha: a.commit_sha,
repo: a.repo,
source: a.source,
});
break;
case 'task_commit_remove':
result = await taskCommitRemove({
task_id: a.task_id,
commit_sha: a.commit_sha,
});
break;
case 'task_commits_list':
result = await taskCommitsList(a.task_id);
break;
case 'task_link_commits':
result = await taskLinkCommits({
repo: a.repo,
commits: a.commits,
dry_run: a.dry_run,
});
break;
case 'session_tasks':
result = await sessionTasks({
session_id: a.session_id,
limit: a.limit,
});
break;
// Infrastructure Changelog
case 'changelog_add':
result = await changelogAdd(a as any);
break;
case 'changelog_since_session':
result = await changelogSinceSession(a as any);
break;
case 'changelog_list':
result = await changelogList(a.days_back, a.limit);
break;
// Impact Analysis
case 'component_register':
result = JSON.stringify(await componentRegister(a.id, a.name, a.type, {
path: a.path,
repo: a.repo,
description: a.description,
health_check: a.health_check,
}), null, 2);
break;
case 'component_list':
result = JSON.stringify(await componentList(a.type), null, 2);
break;
case 'component_add_dependency':
result = JSON.stringify(await componentAddDependency(
a.component_id,
a.depends_on,
a.dependency_type,
a.description
), null, 2);
break;
case 'component_add_file':
result = JSON.stringify(await componentAddFile(a.component_id, a.file_pattern), null, 2);
break;
case 'component_add_check':
result = JSON.stringify(await componentAddCheck(a.component_id, a.name, a.check_type, a.check_command, {
expected_result: a.expected_result,
timeout_seconds: a.timeout_seconds,
}), null, 2);
break;
case 'impact_analysis':
result = JSON.stringify(await impactAnalysis(a.changed_files), null, 2);
break;
case 'impact_learn':
result = JSON.stringify(await impactLearn(
a.changed_component,
a.affected_component,
a.impact_description,
{ error_id: a.error_id, task_id: a.task_id }
), null, 2);
break;
case 'component_graph':
result = JSON.stringify(await componentGraph(a.component_id), null, 2);
break;
// Memories
case 'memory_add':
result = await memoryAdd({
category: a.category,
title: a.title,
content: a.content,
context: a.context,
project: a.project,
session_id: a.session_id,
task_id: a.task_id,
});
break;
case 'memory_search':
result = await memorySearch({
query: a.query,
project: a.project,
category: a.category,
limit: a.limit,
});
break;
case 'memory_list':
result = await memoryList({
project: a.project,
category: a.category,
limit: a.limit,
});
break;
case 'memory_context':
result = await memoryContext(a.project, a.task_description);
break;
// Tool Documentation
case 'tool_doc_add':
result = await toolDocAdd({
tool_name: a.tool_name,
category: a.category,
title: a.title,
description: a.description,
usage_example: a.usage_example,
parameters: a.parameters,
notes: a.notes,
tags: a.tags,
source_file: a.source_file,
});
break;
case 'tool_doc_search':
result = await toolDocSearch({
query: a.query,
category: a.category,
tags: a.tags,
limit: a.limit,
});
break;
case 'tool_doc_get':
result = await toolDocGet({
tool_name: a.tool_name,
});
break;
case 'tool_doc_list':
result = await toolDocList({
category: a.category,
tag: a.tag,
limit: a.limit,
});
break;
case 'tool_doc_export':
result = await toolDocExport();
break;
// Sessions
case 'session_start':
result = await sessionStart({
session_id: a.session_id,
project: a.project,
working_directory: a.working_directory,
git_branch: a.git_branch,
initial_prompt: a.initial_prompt,
});
break;
case 'session_update':
result = await sessionUpdate({
session_id: a.session_id,
message_count: a.message_count,
token_count: a.token_count,
tools_used: a.tools_used,
});
break;
case 'session_end':
result = await sessionEnd({
session_id: a.session_id,
summary: a.summary,
status: a.status,
});
break;
case 'session_list':
result = await sessionList({
project: a.project,
status: a.status,
since: a.since,
limit: a.limit,
});
break;
case 'session_search':
result = await sessionSearch({
query: a.query,
project: a.project,
limit: a.limit,
});
break;
case 'session_context':
result = await sessionContext(a.session_id);
break;
case 'build_record':
result = await buildRecord(
a.session_id,
a.version_id,
a.build_number,
a.git_commit_sha,
a.status,
a.started_at
);
break;
case 'session_commit_link':
result = await sessionCommitLink(
a.session_id,
a.commit_sha,
a.repo,
a.commit_message,
a.committed_at
);
break;
case 'session_recover_orphaned':
result = await sessionRecoverOrphaned({
project: a.project,
});
break;
case 'session_recover_temp_notes':
result = await sessionRecoverTempNotes({
session_id: a.session_id,
temp_file_path: a.temp_file_path,
});
break;
// Session Documentation
case 'session_note_add':
result = await sessionNoteAdd({
session_id: a.session_id,
note_type: a.note_type,
content: a.content,
});
break;
case 'session_notes_list':
result = JSON.stringify(
await sessionNotesList({
session_id: a.session_id,
note_type: a.note_type,
}),
null,
2
);
break;
case 'session_plan_save':
result = await sessionPlanSave({
session_id: a.session_id,
plan_content: a.plan_content,
plan_file_name: a.plan_file_name,
status: a.status,
});
break;
case 'session_plan_update_status':
result = await sessionPlanUpdateStatus({
plan_id: a.plan_id,
status: a.status,
});
break;
case 'session_plan_list':
result = JSON.stringify(
await sessionPlanList({
session_id: a.session_id,
status: a.status,
}),
null,
2
);
break;
case 'project_doc_upsert':
result = await projectDocUpsert({
project: a.project,
doc_type: a.doc_type,
title: a.title,
content: a.content,
session_id: a.session_id,
});
break;
case 'project_doc_get':
result = JSON.stringify(
await projectDocGet({
project: a.project,
doc_type: a.doc_type,
}),
null,
2
);
break;
case 'project_doc_list':
result = JSON.stringify(
await projectDocList({
project: a.project,
}),
null,
2
);
break;
case 'session_documentation_generate':
result = await sessionDocumentationGenerate({
session_id: a.session_id,
});
break;
case 'session_semantic_search':
result = JSON.stringify(
await sessionSemanticSearch({
query: a.query,
project: a.project,
limit: a.limit,
}),
null,
2
);
break;
case 'session_productivity_analytics':
result = JSON.stringify(
await sessionProductivityAnalytics({
project: a.project,
time_period: a.time_period,
}),
null,
2
);
break;
case 'session_pattern_detection':
result = JSON.stringify(
await sessionPatternDetection({
project: a.project,
pattern_type: a.pattern_type,
}),
null,
2
);
break;
// Archives
case 'archive_add':
result = await archiveAdd({
project: a.project,
archive_type: a.archive_type,
title: a.title,
content: a.content,
original_path: a.original_path,
file_size: a.file_size,
archived_by_session: a.archived_by_session,
metadata: a.metadata,
});
break;
case 'archive_search':
result = await archiveSearch({
query: a.query,
project: a.project,
archive_type: a.archive_type,
limit: a.limit,
});
break;
case 'archive_list':
result = await archiveList({
project: a.project,
archive_type: a.archive_type,
since: a.since,
limit: a.limit,
});
break;
case 'archive_get':
result = await archiveGet({
id: a.id,
});
break;
// Project archival
case 'project_archive':
result = await projectArchive({
project_key: a.project_key,
project_path: a.project_path,
delete_local: a.delete_local,
session_id: a.session_id,
});
break;
default:
throw new Error(`Unknown tool: ${name}`);
}
return {
content: [{ type: 'text', text: result }],
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
content: [{ type: 'text', text: `Error: ${message}` }],
isError: true,
};
}
});
// Main entry point
dotenv.config({ path: join(__dirname, "..", ".env"), override: true });
import { initSentry } from "./sentry.js";
initSentry(process.env.SENTRY_ENVIRONMENT || "production");
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { testConnection, close } from "./db.js";
import { createServer } from "./server.js";
async function main() {
// Set up cleanup
process.on('SIGINT', async () => {
await close();
process.exit(0);
});
process.on('SIGTERM', async () => {
await close();
process.exit(0);
});
// Start server FIRST - respond to MCP protocol immediately
// This is critical: Claude Code sends initialize before we finish DB connection
const transport = new StdioServerTransport();
await server.connect(transport);
console.error('task-mcp: Server started');
// Test database connection in background (lazy - will connect on first tool call anyway)
testConnection().then((connected) => {
if (connected) {
console.error('task-mcp: Connected to database');
} else {
console.error('task-mcp: Warning - database not reachable, will retry on tool calls');
}
});
if (!(await testConnection())) { console.error("DB connect failed"); process.exit(1); }
const server = createServer();
const t = new StdioServerTransport();
await server.connect(t);
console.error("session-mcp: stdio started");
}
main().catch((error) => {
console.error('task-mcp: Fatal error:', error);
process.exit(1);
});
main().catch(e => { console.error(e); process.exit(1); });
process.on("SIGINT", async () => { await close(); process.exit(0); });
process.on("SIGTERM", async () => { await close(); process.exit(0); });

View File

@@ -70,6 +70,11 @@ export function initSentry(environment: string = "development"): void {
return event;
},
enableLogs: true,
beforeSendLog(log) {
if (log.level === "debug") return null;
return log;
},
maxBreadcrumbs: 30,
attachStacktrace: true,
release: process.env.APP_VERSION || "unknown",
@@ -81,6 +86,18 @@ export function initSentry(environment: string = "development"): void {
);
}
export function logInfo(msg: string, data?: Record<string, unknown>): void {
Sentry.logger.info(msg, data);
}
export function logWarn(msg: string, data?: Record<string, unknown>): void {
Sentry.logger.warn(msg, data);
}
export function logError(msg: string, data?: Record<string, unknown>): void {
Sentry.logger.error(msg, data);
}
/**
* Wrap MCP tool handler with Sentry transaction tracking.
*

521
src/server.ts Normal file
View File

@@ -0,0 +1,521 @@
#!/usr/bin/env node
/**
* Session MCP Server
*
* Forked from task-mcp (CF-762): Sessions, memory, archives, infrastructure.
* Task management now handled by Jira Cloud via mcp-atlassian.
*
* Uses PostgreSQL with pgvector for semantic search on sessions/memories.
*/
import { withSentryTransaction } from "./sentry.js";
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from '@modelcontextprotocol/sdk/types.js';
import { testConnection, close } from './db.js';
import { toolDefinitions } from './tools/index.js';
// Kept tools (sessions, archives, infrastructure, docs, delegations, commits)
import { taskDelegations, taskDelegationQuery } from './tools/delegations.js';
import { projectLock, projectUnlock, projectLockStatus, projectContext } from './tools/locks.js';
import { taskCommitAdd, taskCommitRemove, taskCommitsList, taskLinkCommits, sessionTasks } from './tools/commits.js';
import { changelogAdd, changelogSinceSession, changelogList } from './tools/changelog.js';
import { timeline } from './tools/timeline.js';
import {
componentRegister,
componentList,
componentAddDependency,
componentAddFile,
componentAddCheck,
impactAnalysis,
impactLearn,
componentGraph,
} from './tools/impact.js';
import { toolDocAdd, toolDocSearch, toolDocGet, toolDocList, toolDocExport } from './tools/tool-docs.js';
import {
sessionStart,
sessionUpdate,
sessionEnd,
sessionList,
sessionSearch,
sessionContext,
buildRecord,
sessionCommitLink,
sessionRecoverOrphaned,
sessionRecoverTempNotes,
} from './tools/sessions.js';
import {
sessionNoteAdd,
sessionNotesList,
sessionPlanSave,
sessionPlanUpdateStatus,
sessionPlanList,
projectDocUpsert,
projectDocGet,
projectDocList,
sessionDocumentationGenerate,
sessionSemanticSearch,
sessionProductivityAnalytics,
sessionPatternDetection,
} from './tools/session-docs.js';
import { archiveAdd, archiveSearch, archiveList, archiveGet } from './tools/archives.js';
import { transcriptSearch } from './tools/transcripts.js';
import { projectArchive } from './tools/project-archive.js';
// Create MCP server
export function createServer(): Server {
const server = new Server(
{ name: 'session-mcp', version: '1.0.0' },
{ capabilities: { tools: {} } }
);
// Register tool list handler
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: toolDefinitions,
}));
// Register tool call handler
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
return withSentryTransaction(name, async () => {
try {
let result: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const a = args as any;
switch (name) {
// Delegations
case 'task_delegations':
result = await taskDelegations({ task_id: a.task_id });
break;
case 'task_delegation_query':
result = await taskDelegationQuery({
status: a.status,
backend: a.backend,
limit: a.limit,
});
break;
// Project Locks
case 'project_lock':
result = await projectLock({
project: a.project,
session_id: a.session_id,
duration_minutes: a.duration_minutes,
reason: a.reason,
});
break;
case 'project_unlock':
result = await projectUnlock({
project: a.project,
session_id: a.session_id,
force: a.force,
});
break;
case 'project_lock_status':
result = await projectLockStatus({
project: a.project,
});
break;
case 'project_context':
result = await projectContext();
break;
// Commits
case 'task_commit_add':
result = await taskCommitAdd({
task_id: a.task_id,
commit_sha: a.commit_sha,
repo: a.repo,
source: a.source,
});
break;
case 'task_commit_remove':
result = await taskCommitRemove({
task_id: a.task_id,
commit_sha: a.commit_sha,
});
break;
case 'task_commits_list':
result = await taskCommitsList(a.task_id);
break;
case 'task_link_commits':
result = await taskLinkCommits({
repo: a.repo,
commits: a.commits,
dry_run: a.dry_run,
});
break;
case 'session_tasks':
result = await sessionTasks({
session_id: a.session_id,
limit: a.limit,
});
break;
// Infrastructure Changelog
case 'changelog_add':
result = await changelogAdd(a as any);
break;
case 'changelog_since_session':
result = await changelogSinceSession(a as any);
break;
case 'changelog_list':
result = await changelogList(a.days_back, a.limit);
break;
// Event Timeline (CF-2885)
case 'timeline':
result = await timeline(a as any);
break;
// Impact Analysis
case 'component_register':
result = JSON.stringify(await componentRegister(a.id, a.name, a.type, {
path: a.path,
repo: a.repo,
description: a.description,
health_check: a.health_check,
}), null, 2);
break;
case 'component_list':
result = JSON.stringify(await componentList(a.type), null, 2);
break;
case 'component_add_dependency':
result = JSON.stringify(await componentAddDependency(
a.component_id,
a.depends_on,
a.dependency_type,
a.description
), null, 2);
break;
case 'component_add_file':
result = JSON.stringify(await componentAddFile(a.component_id, a.file_pattern), null, 2);
break;
case 'component_add_check':
result = JSON.stringify(await componentAddCheck(a.component_id, a.name, a.check_type, a.check_command, {
expected_result: a.expected_result,
timeout_seconds: a.timeout_seconds,
}), null, 2);
break;
case 'impact_analysis':
result = JSON.stringify(await impactAnalysis(a.changed_files), null, 2);
break;
case 'impact_learn':
result = JSON.stringify(await impactLearn(
a.changed_component,
a.affected_component,
a.impact_description,
{ error_id: a.error_id, task_id: a.task_id }
), null, 2);
break;
case 'component_graph':
result = JSON.stringify(await componentGraph(a.component_id), null, 2);
break;
// Tool Documentation
case 'tool_doc_add':
result = await toolDocAdd({
tool_name: a.tool_name,
category: a.category,
title: a.title,
description: a.description,
usage_example: a.usage_example,
parameters: a.parameters,
notes: a.notes,
tags: a.tags,
source_file: a.source_file,
});
break;
case 'tool_doc_search':
result = await toolDocSearch({
query: a.query,
category: a.category,
tags: a.tags,
limit: a.limit,
});
break;
case 'tool_doc_get':
result = await toolDocGet({
tool_name: a.tool_name,
});
break;
case 'tool_doc_list':
result = await toolDocList({
category: a.category,
tag: a.tag,
limit: a.limit,
});
break;
case 'tool_doc_export':
result = await toolDocExport();
break;
// Sessions
case 'session_start':
result = await sessionStart({
session_id: a.session_id,
project: a.project,
working_directory: a.working_directory,
git_branch: a.git_branch,
initial_prompt: a.initial_prompt,
jira_issue_key: a.jira_issue_key,
});
break;
case 'session_update':
result = await sessionUpdate({
session_id: a.session_id,
message_count: a.message_count,
token_count: a.token_count,
tools_used: a.tools_used,
});
break;
case 'session_end':
result = await sessionEnd({
session_id: a.session_id,
summary: a.summary,
status: a.status,
});
break;
case 'session_list':
result = await sessionList({
project: a.project,
status: a.status,
since: a.since,
limit: a.limit,
});
break;
case 'session_search':
result = await sessionSearch({
query: a.query,
project: a.project,
limit: a.limit,
search_mode: a.search_mode,
});
break;
case 'session_context':
result = await sessionContext(a.session_id);
break;
case 'build_record':
result = await buildRecord(
a.session_id,
a.version_id,
a.build_number,
a.git_commit_sha,
a.status,
a.started_at
);
break;
case 'session_commit_link':
result = await sessionCommitLink(
a.session_id,
a.commit_sha,
a.repo,
a.commit_message,
a.committed_at
);
break;
case 'session_recover_orphaned':
result = await sessionRecoverOrphaned({
project: a.project,
});
break;
case 'session_recover_temp_notes':
result = await sessionRecoverTempNotes({
session_id: a.session_id,
temp_file_path: a.temp_file_path,
});
break;
// Session Documentation
case 'session_note_add':
result = await sessionNoteAdd({
session_id: a.session_id,
note_type: a.note_type,
content: a.content,
});
break;
case 'session_notes_list':
result = JSON.stringify(
await sessionNotesList({
session_id: a.session_id,
note_type: a.note_type,
}),
null,
2
);
break;
case 'session_plan_save':
result = await sessionPlanSave({
session_id: a.session_id,
plan_content: a.plan_content,
plan_file_name: a.plan_file_name,
status: a.status,
});
break;
case 'session_plan_update_status':
result = await sessionPlanUpdateStatus({
plan_id: a.plan_id,
status: a.status,
});
break;
case 'session_plan_list':
result = JSON.stringify(
await sessionPlanList({
session_id: a.session_id,
status: a.status,
}),
null,
2
);
break;
case 'project_doc_upsert':
result = await projectDocUpsert({
project: a.project,
doc_type: a.doc_type,
title: a.title,
content: a.content,
session_id: a.session_id,
});
break;
case 'project_doc_get':
result = JSON.stringify(
await projectDocGet({
project: a.project,
doc_type: a.doc_type,
}),
null,
2
);
break;
case 'project_doc_list':
result = JSON.stringify(
await projectDocList({
project: a.project,
}),
null,
2
);
break;
case 'session_documentation_generate':
result = await sessionDocumentationGenerate({
session_id: a.session_id,
});
break;
case 'session_semantic_search':
result = JSON.stringify(
await sessionSemanticSearch({
query: a.query,
project: a.project,
limit: a.limit,
search_mode: a.search_mode,
filter_topics: a.filter_topics,
filter_projects: a.filter_projects,
filter_issue_keys: a.filter_issue_keys,
}),
null,
2
);
break;
case 'session_productivity_analytics':
result = JSON.stringify(
await sessionProductivityAnalytics({
project: a.project,
time_period: a.time_period,
}),
null,
2
);
break;
case 'session_pattern_detection':
result = JSON.stringify(
await sessionPatternDetection({
project: a.project,
pattern_type: a.pattern_type,
}),
null,
2
);
break;
// Transcripts (CF-2394)
case 'session_transcript_search':
result = await transcriptSearch({
query: a.query,
project: a.project,
session_issue_key: a.session_issue_key,
limit: a.limit,
search_mode: a.search_mode,
});
break;
// Archives
case 'archive_add':
result = await archiveAdd({
project: a.project,
archive_type: a.archive_type,
title: a.title,
content: a.content,
original_path: a.original_path,
file_size: a.file_size,
archived_by_session: a.archived_by_session,
metadata: a.metadata,
});
break;
case 'archive_search':
result = await archiveSearch({
query: a.query,
project: a.project,
archive_type: a.archive_type,
limit: a.limit,
search_mode: a.search_mode,
});
break;
case 'archive_list':
result = await archiveList({
project: a.project,
archive_type: a.archive_type,
since: a.since,
limit: a.limit,
});
break;
case 'archive_get':
result = await archiveGet({
id: a.id,
});
break;
// Project archival
case 'project_archive':
result = await projectArchive({
project_key: a.project_key,
project_path: a.project_path,
delete_local: a.delete_local,
session_id: a.session_id,
});
break;
default:
throw new Error(`Unknown tool: ${name}`);
}
return {
content: [{ type: 'text', text: result }],
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
return {
content: [{ type: 'text', text: `Error: ${message}` }],
isError: true,
};
}
});
});
return server;
}

439
src/services/jira.ts Normal file
View File

@@ -0,0 +1,439 @@
/**
* Jira Cloud REST API client — routes through AgilitonAPI gateway.
* Falls back to direct Jira access if AGILITON_API_KEY is not set.
*
* Gateway: AGILITON_API_KEY + AGILITON_API_URL
* Direct: JIRA_URL, JIRA_USERNAME, JIRA_API_TOKEN
*/
interface JiraIssue {
key: string;
id: string;
self: string;
}
interface JiraTransition {
id: string;
name: string;
}
// Gateway config
const GATEWAY_URL = (process.env.AGILITON_API_URL || 'https://api.agiliton.cloud').replace(/\/$/, '');
const GATEWAY_KEY = process.env.AGILITON_API_KEY || '';
// Direct config (fallback)
const JIRA_URL = process.env.JIRA_URL || 'https://agiliton.atlassian.net';
const JIRA_USERNAME = process.env.JIRA_USERNAME || '';
const JIRA_API_TOKEN = process.env.JIRA_API_TOKEN || '';
function useGateway(): boolean {
return !!GATEWAY_KEY;
}
function isConfigured(): boolean {
if (useGateway()) return true;
return !!(JIRA_USERNAME && JIRA_API_TOKEN);
}
async function jiraFetch(path: string, options: RequestInit = {}): Promise<Response> {
let url: string;
let headers: Record<string, string>;
if (useGateway()) {
url = `${GATEWAY_URL}/jira-cloud${path}`;
headers = {
'X-API-Key': GATEWAY_KEY,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers as Record<string, string>,
};
} else {
url = `${JIRA_URL}/rest/api/3${path}`;
const auth = Buffer.from(`${JIRA_USERNAME}:${JIRA_API_TOKEN}`).toString('base64');
headers = {
'Authorization': `Basic ${auth}`,
'Content-Type': 'application/json',
'Accept': 'application/json',
...options.headers as Record<string, string>,
};
}
return fetch(url, { ...options, headers });
}
/**
* Create a Jira issue for session tracking in the given project.
*/
export async function createSessionIssue(params: {
sessionNumber: number | null;
project: string;
parentIssueKey?: string;
branch?: string;
workingDirectory?: string;
}): Promise<{ key: string } | null> {
if (!isConfigured()) {
console.error('session-mcp: Jira not configured, skipping issue creation');
return null;
}
const { sessionNumber, project, parentIssueKey, branch, workingDirectory } = params;
const sessionLabel = sessionNumber ? `#${sessionNumber}` : 'new';
const summary = `Session ${sessionLabel}: ${project}${parentIssueKey ? ` - ${parentIssueKey}` : ''}`;
const descriptionParts = [
`Automated session tracking issue.`,
`Project: ${project}`,
branch ? `Branch: ${branch}` : null,
workingDirectory ? `Working directory: ${workingDirectory}` : null,
parentIssueKey ? `Parent task: ${parentIssueKey}` : null,
`Started: ${new Date().toISOString()}`,
].filter(Boolean);
try {
const response = await jiraFetch('/issue', {
method: 'POST',
body: JSON.stringify({
fields: {
project: { key: project },
summary,
description: {
type: 'doc',
version: 1,
content: [{
type: 'paragraph',
content: [{
type: 'text',
text: descriptionParts.join('\n'),
}],
}],
},
issuetype: { name: 'Task' },
labels: ['session-tracking', `project-${project.toLowerCase()}`],
},
}),
});
if (!response.ok) {
const body = await response.text();
console.error(`session-mcp: Jira create issue failed (${response.status}): ${body}`);
return null;
}
const issue = await response.json() as JiraIssue;
// Link to parent issue if provided
if (parentIssueKey) {
await linkIssues(issue.key, parentIssueKey, 'relates to');
}
return { key: issue.key };
} catch (err) {
console.error('session-mcp: Jira create issue error:', err);
return null;
}
}
/**
* Add a comment to a Jira issue (used for session output).
*/
export async function addComment(issueKey: string, markdownBody: string): Promise<boolean> {
if (!isConfigured()) return false;
try {
const response = await jiraFetch(`/issue/${issueKey}/comment`, {
method: 'POST',
body: JSON.stringify({
body: {
type: 'doc',
version: 1,
content: [{
type: 'codeBlock',
attrs: { language: 'markdown' },
content: [{
type: 'text',
text: markdownBody,
}],
}],
},
}),
});
if (!response.ok) {
const body = await response.text();
console.error(`session-mcp: Jira add comment failed (${response.status}): ${body}`);
return false;
}
return true;
} catch (err) {
console.error('session-mcp: Jira add comment error:', err);
return false;
}
}
/**
* Transition a Jira issue to "Done" status.
*/
export async function transitionToDone(issueKey: string): Promise<boolean> {
if (!isConfigured()) return false;
try {
const transResponse = await jiraFetch(`/issue/${issueKey}/transitions`);
if (!transResponse.ok) {
console.error(`session-mcp: Jira get transitions failed (${transResponse.status})`);
return false;
}
const { transitions } = await transResponse.json() as { transitions: JiraTransition[] };
const doneTrans = transitions.find(
t => t.name.toLowerCase() === 'done' || t.name.toLowerCase() === 'resolve'
);
if (!doneTrans) {
console.error(`session-mcp: No "Done" transition found for ${issueKey}. Available: ${transitions.map(t => t.name).join(', ')}`);
return false;
}
const response = await jiraFetch(`/issue/${issueKey}/transitions`, {
method: 'POST',
body: JSON.stringify({
transition: { id: doneTrans.id },
}),
});
if (!response.ok) {
const body = await response.text();
console.error(`session-mcp: Jira transition failed (${response.status}): ${body}`);
return false;
}
return true;
} catch (err) {
console.error('session-mcp: Jira transition error:', err);
return false;
}
}
/**
* Update a Jira issue description (used for final session summary).
*/
export async function updateIssueDescription(issueKey: string, description: string): Promise<boolean> {
if (!isConfigured()) return false;
try {
const response = await jiraFetch(`/issue/${issueKey}`, {
method: 'PUT',
body: JSON.stringify({
fields: {
description: {
type: 'doc',
version: 1,
content: [{
type: 'codeBlock',
attrs: { language: 'markdown' },
content: [{
type: 'text',
text: description,
}],
}],
},
},
}),
});
if (!response.ok) {
const body = await response.text();
console.error(`session-mcp: Jira update description failed (${response.status}): ${body}`);
return false;
}
return true;
} catch (err) {
console.error('session-mcp: Jira update description error:', err);
return false;
}
}
// ---------- Read operations (CF-2885 timeline tool) ----------
export interface JiraChangelogEntry {
ts: string; // ISO8601
author: string;
field: string; // e.g. "status", "assignee", "labels"
from: string | null;
to: string | null;
}
export interface JiraComment {
id: string;
ts: string;
author: string;
body: string;
}
export interface JiraIssueHistory {
key: string;
summary: string;
status: string;
issueType: string;
created: string;
creator: string;
labels: string[];
parent?: string;
linkedIssues: Array<{ key: string; type: string; direction: 'in' | 'out' }>;
changelog: JiraChangelogEntry[];
comments: JiraComment[];
}
function adfToPlainText(adf: unknown): string {
// Minimal Atlassian Document Format → plain text extractor.
if (!adf || typeof adf !== 'object') return '';
const node = adf as { type?: string; text?: string; content?: unknown[] };
if (node.text) return node.text;
if (Array.isArray(node.content)) {
return node.content.map(c => adfToPlainText(c)).join(' ').trim();
}
return '';
}
/**
* Fetch a Jira issue with full changelog and comments. Returns null on failure.
*/
export async function getIssueWithHistory(issueKey: string): Promise<JiraIssueHistory | null> {
if (!isConfigured()) return null;
try {
// Issue with changelog expansion
const issueResp = await jiraFetch(`/issue/${issueKey}?expand=changelog&fields=summary,status,issuetype,created,creator,labels,parent,issuelinks`);
if (!issueResp.ok) {
console.error(`session-mcp: Jira get issue failed (${issueResp.status}) for ${issueKey}`);
return null;
}
const issue = await issueResp.json() as any;
// Comments (separate endpoint for full data)
const commentsResp = await jiraFetch(`/issue/${issueKey}/comment?orderBy=created`);
const commentsJson = commentsResp.ok ? await commentsResp.json() as any : { comments: [] };
// Parse changelog histories → flat entries
const changelog: JiraChangelogEntry[] = [];
const histories = issue.changelog?.histories || [];
for (const h of histories) {
const author = h.author?.displayName || h.author?.emailAddress || 'unknown';
const ts = h.created;
for (const item of (h.items || [])) {
changelog.push({
ts,
author,
field: item.field,
from: item.fromString || item.from || null,
to: item.toString || item.to || null,
});
}
}
// Parse comments
const comments: JiraComment[] = (commentsJson.comments || []).map((c: any) => ({
id: c.id,
ts: c.created,
author: c.author?.displayName || c.author?.emailAddress || 'unknown',
body: adfToPlainText(c.body),
}));
// Parse linked issues
const linkedIssues: Array<{ key: string; type: string; direction: 'in' | 'out' }> = [];
for (const link of (issue.fields?.issuelinks || [])) {
if (link.outwardIssue) {
linkedIssues.push({
key: link.outwardIssue.key,
type: link.type?.outward || 'relates to',
direction: 'out',
});
} else if (link.inwardIssue) {
linkedIssues.push({
key: link.inwardIssue.key,
type: link.type?.inward || 'relates to',
direction: 'in',
});
}
}
return {
key: issue.key,
summary: issue.fields?.summary || '',
status: issue.fields?.status?.name || '',
issueType: issue.fields?.issuetype?.name || '',
created: issue.fields?.created || '',
creator: issue.fields?.creator?.displayName || issue.fields?.creator?.emailAddress || 'unknown',
labels: issue.fields?.labels || [],
parent: issue.fields?.parent?.key,
linkedIssues,
changelog,
comments,
};
} catch (err) {
console.error('session-mcp: Jira get issue history error:', err);
return null;
}
}
/**
* Search for issues via JQL. Returns array of issue keys (minimal projection).
*/
export async function searchIssueKeys(jql: string, limit: number = 50): Promise<string[]> {
if (!isConfigured()) return [];
try {
const params = new URLSearchParams({
jql,
fields: 'summary',
maxResults: String(limit),
});
const response = await jiraFetch(`/search/jql?${params.toString()}`);
if (!response.ok) {
console.error(`session-mcp: Jira search failed (${response.status})`);
return [];
}
const data = await response.json() as any;
return (data.issues || []).map((i: any) => i.key);
} catch (err) {
console.error('session-mcp: Jira search error:', err);
return [];
}
}
// ---------- Write operations ----------
/**
* Link two Jira issues.
*/
export async function linkIssues(
inwardKey: string,
outwardKey: string,
linkType: string = 'relates to'
): Promise<boolean> {
if (!isConfigured()) return false;
try {
const response = await jiraFetch('/issueLink', {
method: 'POST',
body: JSON.stringify({
type: { name: linkType },
inwardIssue: { key: inwardKey },
outwardIssue: { key: outwardKey },
}),
});
if (!response.ok) {
const body = await response.text();
console.error(`session-mcp: Jira link issues failed (${response.status}): ${body}`);
return false;
}
return true;
} catch (err) {
console.error('session-mcp: Jira link issues error:', err);
return false;
}
}

View File

@@ -1,7 +1,7 @@
// Project archives operations for database-backed archival
import { query, queryOne, execute } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import { getEmbedding, formatEmbedding, generateContentHash, rrfMerge, rerank } from '../embeddings.js';
type ArchiveType = 'session' | 'research' | 'audit' | 'investigation' | 'completed' | 'migration';
@@ -31,11 +31,14 @@ interface ArchiveAddArgs {
metadata?: Record<string, unknown>;
}
type SearchMode = 'hybrid' | 'vector' | 'keyword';
interface ArchiveSearchArgs {
query: string;
project?: string;
archive_type?: ArchiveType;
limit?: number;
search_mode?: SearchMode;
}
interface ArchiveListArgs {
@@ -72,142 +75,169 @@ export async function archiveAdd(args: ArchiveAddArgs): Promise<string> {
return `Error: Project not found: ${project}`;
}
// CF-1314: Hash content for dedup before embedding API call
const embedText = `${title}. ${content.substring(0, 1000)}`;
const contentHash = generateContentHash(embedText);
const existing = await queryOne<{ id: number }>(
'SELECT id FROM project_archives WHERE content_hash = $1 AND project_key = $2 LIMIT 1',
[contentHash, project]
);
if (existing) {
return `Archive already exists (id: ${existing.id}): [${archive_type}] ${title}`;
}
// Generate embedding for semantic search
const embedText = `${title}. ${content.substring(0, 1000)}`; // Limit content length for embedding
const embedding = await getEmbedding(embedText);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
if (embeddingValue) {
await execute(
`INSERT INTO project_archives
(project_key, archive_type, title, content, original_path, file_size, archived_by_session, metadata, embedding)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`,
[
project,
archive_type,
title,
content,
original_path || null,
file_size || null,
archived_by_session || null,
JSON.stringify(metadata || {}),
embeddingValue
]
);
} else {
await execute(
`INSERT INTO project_archives
(project_key, archive_type, title, content, original_path, file_size, archived_by_session, metadata)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
[
project,
archive_type,
title,
content,
original_path || null,
file_size || null,
archived_by_session || null,
JSON.stringify(metadata || {})
]
);
}
await execute(
`INSERT INTO project_archives
(project_key, archive_type, title, content, original_path, file_size, archived_by_session, metadata, embedding, content_hash)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)`,
[
project,
archive_type,
title,
content,
original_path || null,
file_size || null,
archived_by_session || null,
JSON.stringify(metadata || {}),
embeddingValue,
contentHash
]
);
const sizeStr = file_size ? ` (${Math.round(file_size / 1024)}KB)` : '';
return `Archived: [${archive_type}] ${title}${sizeStr}`;
}
/**
* Search archives semantically
* Search archives with hybrid (vector + keyword), vector-only, or keyword-only mode (CF-1315)
*/
export async function archiveSearch(args: ArchiveSearchArgs): Promise<string> {
const { query: searchQuery, project, archive_type, limit = 5 } = args;
// Generate embedding for search
const embedding = await getEmbedding(searchQuery);
// Fallback to text search if embeddings unavailable
if (!embedding) {
console.warn('Embeddings unavailable, falling back to text search');
let whereClause = '(title ILIKE $1 OR content ILIKE $1)';
const params: unknown[] = [`%${searchQuery}%`];
let paramIndex = 2;
const { query: searchQuery, project, archive_type, limit = 5, search_mode = 'hybrid' } = args;
// Build shared filter clause
const buildFilter = (startIdx: number) => {
let where = '';
const params: unknown[] = [];
let idx = startIdx;
if (project) {
whereClause += ` AND project_key = $${paramIndex++}`;
where += ` AND project_key = $${idx++}`;
params.push(project);
}
if (archive_type) {
whereClause += ` AND archive_type = $${paramIndex++}`;
where += ` AND archive_type = $${idx++}`;
params.push(archive_type);
}
return { where, params, nextIdx: idx };
};
params.push(limit);
// Vector search
let vectorIds: number[] = [];
let vectorRows: Map<number, Archive & { similarity: number }> = new Map();
let embeddingFailed = false;
const archives = await query<Archive>(
if (search_mode !== 'keyword') {
const embedding = await getEmbedding(searchQuery);
if (embedding) {
const embeddingStr = formatEmbedding(embedding);
const filter = buildFilter(3);
const params: unknown[] = [embeddingStr, limit, ...filter.params];
const rows = await query<Archive & { similarity: number }>(
`SELECT id, archive_type, title, original_path, file_size,
to_char(archived_at, 'YYYY-MM-DD') as archived_at,
1 - (embedding <=> $1) as similarity
FROM project_archives
WHERE embedding IS NOT NULL${filter.where}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
vectorIds = rows.map(r => r.id);
for (const r of rows) vectorRows.set(r.id, r);
} else {
embeddingFailed = true;
if (search_mode === 'vector') {
return 'Error: Could not generate embedding for vector search';
}
}
}
// Keyword search
let keywordIds: number[] = [];
let keywordRows: Map<number, Archive & { rank: number }> = new Map();
if (search_mode !== 'vector') {
const filter = buildFilter(3);
const params: unknown[] = [searchQuery, limit, ...filter.params];
const rows = await query<Archive & { rank: number }>(
`SELECT id, archive_type, title, original_path, file_size,
to_char(archived_at, 'YYYY-MM-DD') as archived_at
to_char(archived_at, 'YYYY-MM-DD') as archived_at,
ts_rank(search_vector, plainto_tsquery('english', $1)) as rank
FROM project_archives
WHERE ${whereClause}
ORDER BY archived_at DESC
LIMIT $${paramIndex}`,
WHERE search_vector @@ plainto_tsquery('english', $1)${filter.where}
ORDER BY rank DESC
LIMIT $2`,
params
);
keywordIds = rows.map(r => r.id);
for (const r of rows) keywordRows.set(r.id, r);
}
if (archives.length === 0) {
return 'No relevant archives found';
// Merge results
let finalIds: number[];
let searchLabel: string;
let rerankScores: Map<number, number> | null = null;
if (search_mode === 'hybrid' && vectorIds.length > 0 && keywordIds.length > 0) {
const merged = rrfMerge(vectorIds, keywordIds);
finalIds = merged.map(m => m.id as number);
searchLabel = 'hybrid';
// Cross-encoder re-ranking (CF-1317)
const docs = finalIds.map(id => {
const r = vectorRows.get(id) || keywordRows.get(id);
return (r as any)?.title || '';
});
const reranked = await rerank(searchQuery, docs, limit);
if (reranked) {
rerankScores = new Map();
const reorderedIds = reranked.map(r => {
rerankScores!.set(finalIds[r.index], r.relevance_score);
return finalIds[r.index];
});
finalIds = reorderedIds;
searchLabel = 'hybrid+rerank';
} else {
finalIds = finalIds.slice(0, limit);
}
const lines = ['Relevant archives (text search - embeddings unavailable):\n'];
for (const a of archives) {
const sizeStr = a.file_size ? ` (${Math.round(a.file_size / 1024)}KB)` : '';
lines.push(`**[${a.archive_type}]** ${a.title}`);
lines.push(` Archived: ${a.archived_at}${sizeStr}`);
if (a.original_path) {
lines.push(` Path: ${a.original_path}`);
}
lines.push('');
}
return lines.join('\n');
}
// Semantic search with embeddings
const embeddingStr = formatEmbedding(embedding);
let whereClause = 'WHERE embedding IS NOT NULL';
const params: unknown[] = [embeddingStr, limit];
let paramIndex = 3;
if (project) {
whereClause += ` AND project_key = $${paramIndex++}`;
params.splice(params.length - 1, 0, project);
}
if (archive_type) {
whereClause += ` AND archive_type = $${paramIndex++}`;
params.splice(params.length - 1, 0, archive_type);
}
const archives = await query<Archive & { similarity: number }>(
`SELECT id, archive_type, title, original_path, file_size,
to_char(archived_at, 'YYYY-MM-DD') as archived_at,
1 - (embedding <=> $1) as similarity
FROM project_archives
${whereClause}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
if (archives.length === 0) {
} else if (vectorIds.length > 0) {
finalIds = vectorIds;
searchLabel = 'vector';
} else if (keywordIds.length > 0) {
finalIds = keywordIds;
searchLabel = embeddingFailed ? 'keyword (embedding unavailable)' : 'keyword';
} else {
return 'No relevant archives found';
}
const lines = ['Relevant archives:\n'];
for (const a of archives) {
const sim = Math.round(a.similarity * 100);
// Format output
const lines = [`Relevant archives (${searchLabel}):\n`];
for (const id of finalIds) {
const a = vectorRows.get(id) || keywordRows.get(id);
if (!a) continue;
const simParts: string[] = [];
if (vectorRows.has(id)) simParts.push(`${Math.round((vectorRows.get(id)!).similarity * 100)}% match`);
if (rerankScores?.has(id)) simParts.push(`rerank: ${rerankScores.get(id)!.toFixed(2)}`);
const scores = simParts.length > 0 ? ` (${simParts.join(', ')})` : '';
const sizeStr = a.file_size ? ` (${Math.round(a.file_size / 1024)}KB)` : '';
lines.push(`**[${a.archive_type}]** ${a.title} (${sim}% match)`);
lines.push(`**[${a.archive_type}]** ${a.title}${scores}`);
lines.push(` Archived: ${a.archived_at}${sizeStr}`);
if (a.original_path) {
lines.push(` Path: ${a.original_path}`);

View File

@@ -1,704 +0,0 @@
// CRUD operations for tasks
import { query, queryOne, execute, getNextTaskId, getProjectKey, detectProjectFromCwd, getClient } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import type { Task, ChecklistItem, TaskLink } from '../types.js';
import { getRecentDelegations } from './delegations.js';
import { getTaskCommits } from './commits.js';
import { taskLink } from './relations.js';
import { sessionNoteAdd } from './session-docs.js';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
/**
* Get current session ID from environment or cache file
*/
function getSessionId(): string {
// Check environment first
if (process.env.CLAUDE_SESSION_ID) {
return process.env.CLAUDE_SESSION_ID;
}
// Try to read from cache file (session-memory format)
const cacheFile = path.join(os.homedir(), '.cache', 'session-memory', 'current_session');
try {
const sessionId = fs.readFileSync(cacheFile, 'utf-8').trim();
if (sessionId) return sessionId;
} catch {
// File doesn't exist or can't be read
}
// Generate a new session ID
const now = new Date();
const timestamp = now.toISOString().replace(/[-:T]/g, '').slice(0, 15);
return `session_${timestamp}`;
}
/**
* Record task activity for session tracking
*/
async function recordActivity(
task_id: string,
activity_type: 'created' | 'updated' | 'status_change' | 'closed',
old_value?: string,
new_value?: string
): Promise<void> {
const session_id = getSessionId();
try {
await execute(
`INSERT INTO task_activity (task_id, session_id, activity_type, old_value, new_value)
VALUES ($1, $2, $3, $4, $5)`,
[task_id, session_id, activity_type, old_value || null, new_value || null]
);
} catch {
// Don't fail the main operation if activity tracking fails
console.error('Failed to record task activity');
}
}
interface TaskAddArgs {
title: string;
project?: string;
type?: string;
priority?: string;
description?: string;
}
interface TaskListArgs {
project?: string;
status?: string;
type?: string;
priority?: string;
limit?: number;
}
interface TaskUpdateArgs {
id: string;
status?: string;
priority?: string;
type?: string;
title?: string;
}
/**
* Create a new task
*/
export async function taskAdd(args: TaskAddArgs): Promise<string> {
const { title, project = 'Unknown', type = 'task', priority = 'P2', description = '' } = args;
// Get project key
const projectKey = await getProjectKey(project);
// Generate embedding for duplicate detection
const embedText = description ? `${title}. ${description}` : title;
const embedding = await getEmbedding(embedText);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
// Check for similar/duplicate tasks (only if embedding succeeded)
// CF-450: Check both open AND completed tasks to avoid circular work
let duplicateWarning = '';
if (embeddingValue) {
const similarTasks = await query<{ id: string; title: string; status: string; description: string; similarity: number }>(
`SELECT id, title, status, description, 1 - (embedding <=> $1) as similarity
FROM tasks
WHERE project = $2 AND embedding IS NOT NULL
ORDER BY embedding <=> $1
LIMIT 5`,
[embeddingValue, projectKey]
);
// Warn if highly similar tasks exist (>70% similarity)
const highSimilarity = similarTasks.filter(t => t.similarity > 0.70);
if (highSimilarity.length > 0) {
duplicateWarning = '\n\n⚠ Similar tasks found:\n';
const openTasks = highSimilarity.filter(t => t.status !== 'completed');
const completedTasks = highSimilarity.filter(t => t.status === 'completed');
if (openTasks.length > 0) {
duplicateWarning += '\n**Open/In Progress:**\n';
for (const t of openTasks) {
const pct = Math.round(t.similarity * 100);
duplicateWarning += ` - ${t.id}: ${t.title} (${pct}% match, ${t.status})\n`;
}
}
if (completedTasks.length > 0) {
duplicateWarning += '\n**Previously Completed:**\n';
for (const t of completedTasks) {
const pct = Math.round(t.similarity * 100);
duplicateWarning += ` - ${t.id}: ${t.title} (${pct}% match)\n`;
// Show snippet of solution/outcome from description
if (t.description) {
const snippet = t.description.substring(0, 150).replace(/\n/g, ' ').replace(/"/g, '\\"');
const ellipsis = t.description.length > 150 ? '...' : '';
duplicateWarning += ` Context: "${snippet}${ellipsis}"\n`;
}
}
duplicateWarning += '\n 💡 Use "task show <id>" to see full solution before recreating work\n';
}
duplicateWarning += '\nConsider linking with: task link <new-id> <related-id> relates_to';
}
}
// Get next task ID
const taskId = await getNextTaskId(projectKey);
// Get current session ID for linking
const session_id = getSessionId();
// Insert task with session_id
if (embeddingValue) {
await execute(
`INSERT INTO tasks (id, project, title, description, type, status, priority, session_id, embedding)
VALUES ($1, $2, $3, $4, $5, 'open', $6, $7, $8)`,
[taskId, projectKey, title, description, type, priority, session_id, embeddingValue]
);
} else {
await execute(
`INSERT INTO tasks (id, project, title, description, type, status, priority, session_id)
VALUES ($1, $2, $3, $4, $5, 'open', $6, $7)`,
[taskId, projectKey, title, description, type, priority, session_id]
);
}
// Record activity for session tracking
await recordActivity(taskId, 'created', undefined, 'open');
// CF-572 Phase 3: Auto-capture conversation context as session note
// Ensures task context is preserved even if session exits abnormally
if (session_id) {
try {
const contextNote = description
? `Created task: ${title}\n\nDescription:\n${description}`
: `Created task: ${title}`;
await sessionNoteAdd({
session_id,
note_type: 'context',
content: contextNote,
});
} catch (err) {
// Silently fail context capture - don't block task creation
console.error('Failed to capture task context for session:', err);
}
}
// Enhanced auto-linking logic (CF-166)
let autoLinkMessage = '';
try {
const sessionContext = await queryOne<{
current_task_id: string | null;
investigation_parent_id: string | null;
auto_link_enabled: boolean;
}>(
`SELECT current_task_id, investigation_parent_id, auto_link_enabled
FROM session_context WHERE session_id = $1`,
[session_id]
);
if (sessionContext?.auto_link_enabled !== false) {
const linkedTasks: string[] = [];
// 1. Auto-link to investigation parent if this is created during an investigation
if (sessionContext?.investigation_parent_id) {
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type, auto_linked)
VALUES ($1, $2, 'relates_to', true)
ON CONFLICT DO NOTHING`,
[taskId, sessionContext.investigation_parent_id]
);
linkedTasks.push(`${sessionContext.investigation_parent_id} (investigation)`);
}
// 2. Auto-link to current working task if different from investigation parent
if (sessionContext?.current_task_id &&
sessionContext.current_task_id !== sessionContext?.investigation_parent_id) {
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type, auto_linked)
VALUES ($1, $2, 'relates_to', true)
ON CONFLICT DO NOTHING`,
[taskId, sessionContext.current_task_id]
);
linkedTasks.push(`${sessionContext.current_task_id} (current task)`);
}
// 3. Time-based auto-linking: find tasks created within 1 hour in same session
if (!sessionContext?.investigation_parent_id && !sessionContext?.current_task_id) {
const recentTasks = await query<{ id: string; title: string }>(
`SELECT id, title FROM tasks
WHERE session_id = $1 AND id != $2
AND created_at > NOW() - INTERVAL '1 hour'
AND status != 'completed'
ORDER BY created_at DESC
LIMIT 3`,
[session_id, taskId]
);
for (const task of recentTasks) {
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type, auto_linked)
VALUES ($1, $2, 'relates_to', true)
ON CONFLICT DO NOTHING`,
[taskId, task.id]
);
linkedTasks.push(`${task.id} (recent)`);
}
}
if (linkedTasks.length > 0) {
autoLinkMessage = `\n\n🔗 Auto-linked to: ${linkedTasks.join(', ')}`;
}
}
} catch (error) {
// Log but don't fail if auto-linking fails
console.error('Auto-linking failed:', error);
}
return `Created: ${taskId}\n Title: ${title}\n Type: ${type}\n Priority: ${priority}\n Project: ${projectKey}${embedding ? '\n (embedded for semantic search)' : ''}${duplicateWarning}${autoLinkMessage}`;
}
/**
* List tasks with filters
* Auto-detects project from CWD if not explicitly provided
*/
export async function taskList(args: TaskListArgs): Promise<string> {
const { project, status, type, priority, limit = 20 } = args;
let whereClause = 'WHERE 1=1';
const params: unknown[] = [];
let paramIndex = 1;
// Auto-detect project from CWD if not explicitly provided
const effectiveProject = project || detectProjectFromCwd();
if (effectiveProject) {
const projectKey = await getProjectKey(effectiveProject);
whereClause += ` AND project = $${paramIndex++}`;
params.push(projectKey);
}
if (status) {
whereClause += ` AND status = $${paramIndex++}`;
params.push(status);
}
if (type) {
whereClause += ` AND type = $${paramIndex++}`;
params.push(type);
}
if (priority) {
whereClause += ` AND priority = $${paramIndex++}`;
params.push(priority);
}
params.push(limit);
const tasks = await query<Task>(
`SELECT id, title, type, status, priority, project
FROM tasks
${whereClause}
ORDER BY
CASE priority WHEN 'P0' THEN 0 WHEN 'P1' THEN 1 WHEN 'P2' THEN 2 ELSE 3 END,
created_at DESC
LIMIT $${paramIndex}`,
params
);
if (tasks.length === 0) {
return `No tasks found${effectiveProject ? ` for project ${effectiveProject}` : ''}`;
}
const lines = tasks.map(t => {
const statusIcon = t.status === 'completed' ? '[x]' : t.status === 'in_progress' ? '[>]' : t.status === 'blocked' ? '[!]' : '[ ]';
const typeLabel = t.type !== 'task' ? ` [${t.type}]` : '';
return `${statusIcon} ${t.priority} ${t.id}: ${t.title}${typeLabel}`;
});
return `Tasks${effectiveProject ? ` (${effectiveProject})` : ''}:\n\n${lines.join('\n')}`;
}
/**
* Show task details
*/
export async function taskShow(id: string): Promise<string> {
const task = await queryOne<Task & { session_id?: string }>(
`SELECT id, project, title, description, type, status, priority, session_id,
to_char(created_at, 'YYYY-MM-DD HH24:MI') as created,
to_char(updated_at, 'YYYY-MM-DD HH24:MI') as updated,
to_char(completed_at, 'YYYY-MM-DD HH24:MI') as completed
FROM tasks WHERE id = $1`,
[id]
);
if (!task) {
return `Task not found: ${id}`;
}
let output = `# ${task.id}\n\n`;
output += `**Title:** ${task.title}\n`;
output += `**Project:** ${task.project}\n`;
output += `**Type:** ${task.type}\n`;
output += `**Status:** ${task.status}\n`;
output += `**Priority:** ${task.priority}\n`;
output += `**Created:** ${(task as unknown as { created: string }).created}\n`;
output += `**Updated:** ${(task as unknown as { updated: string }).updated}\n`;
if ((task as unknown as { completed: string }).completed) {
output += `**Completed:** ${(task as unknown as { completed: string }).completed}\n`;
}
if (task.session_id) {
output += `**Created in session:** ${task.session_id}\n`;
}
if (task.description) {
output += `\n**Description:**\n${task.description}\n`;
}
// Get checklist
const checklist = await query<ChecklistItem>(
`SELECT id, item, checked FROM task_checklist
WHERE task_id = $1 ORDER BY position, id`,
[id]
);
if (checklist.length > 0) {
const done = checklist.filter(c => c.checked).length;
output += `\n**Checklist:** (${done}/${checklist.length})\n`;
for (const item of checklist) {
output += ` ${item.checked ? '[x]' : '[ ]'} ${item.item} (#${item.id})\n`;
}
}
// Get dependencies
const blockedBy = await query<{ id: string; title: string }>(
`SELECT t.id, t.title FROM task_links l
JOIN tasks t ON t.id = l.from_task_id
WHERE l.to_task_id = $1 AND l.link_type = 'blocks'`,
[id]
);
const blocks = await query<{ id: string; title: string }>(
`SELECT t.id, t.title FROM task_links l
JOIN tasks t ON t.id = l.to_task_id
WHERE l.from_task_id = $1 AND l.link_type = 'blocks'`,
[id]
);
if (blockedBy.length > 0) {
output += `\n**Blocked by:**\n`;
for (const t of blockedBy) {
output += ` - ${t.id}: ${t.title}\n`;
}
}
if (blocks.length > 0) {
output += `\n**Blocks:**\n`;
for (const t of blocks) {
output += ` - ${t.id}: ${t.title}\n`;
}
}
// Get related tasks (bidirectional - only need to query one direction since links are symmetric)
const relatesTo = await query<{ id: string; title: string }>(
`SELECT t.id, t.title FROM task_links l
JOIN tasks t ON t.id = l.to_task_id
WHERE l.from_task_id = $1 AND l.link_type = 'relates_to'`,
[id]
);
if (relatesTo.length > 0) {
output += `\n**Related:**\n`;
for (const t of relatesTo) {
output += ` - ${t.id}: ${t.title}\n`;
}
}
// Get duplicates (bidirectional)
const duplicates = await query<{ id: string; title: string }>(
`SELECT t.id, t.title FROM task_links l
JOIN tasks t ON t.id = l.to_task_id
WHERE l.from_task_id = $1 AND l.link_type = 'duplicates'`,
[id]
);
if (duplicates.length > 0) {
output += `\n**Duplicates:**\n`;
for (const t of duplicates) {
output += ` - ${t.id}: ${t.title}\n`;
}
}
// Get commits
const commitHistory = await getTaskCommits(id);
if (commitHistory) {
output += commitHistory;
}
// Get recent delegations
const delegationHistory = await getRecentDelegations(id);
if (delegationHistory) {
output += delegationHistory;
}
return output;
}
/**
* Close a task
*/
export async function taskClose(id: string): Promise<string> {
// Get current status for activity tracking
const task = await queryOne<{ status: string }>(`SELECT status FROM tasks WHERE id = $1`, [id]);
const result = await execute(
`UPDATE tasks
SET status = 'completed', completed_at = NOW(), updated_at = NOW()
WHERE id = $1`,
[id]
);
if (result === 0) {
return `Task not found: ${id}`;
}
// Record activity
await recordActivity(id, 'closed', task?.status, 'completed');
return `Closed: ${id}`;
}
/**
* Update a task
*/
export async function taskUpdate(args: TaskUpdateArgs): Promise<string> {
const { id, status, priority, type, title } = args;
// Get current values for activity tracking
const task = await queryOne<{ status: string }>(`SELECT status FROM tasks WHERE id = $1`, [id]);
if (!task) {
return `Task not found: ${id}`;
}
const updates: string[] = [];
const params: unknown[] = [];
let paramIndex = 1;
if (status) {
updates.push(`status = $${paramIndex++}`);
params.push(status);
if (status === 'completed') {
updates.push(`completed_at = NOW()`);
}
}
if (priority) {
updates.push(`priority = $${paramIndex++}`);
params.push(priority);
}
if (type) {
updates.push(`type = $${paramIndex++}`);
params.push(type);
}
if (title) {
updates.push(`title = $${paramIndex++}`);
params.push(title);
}
if (updates.length === 0) {
return 'No updates specified';
}
updates.push('updated_at = NOW()');
params.push(id);
const result = await execute(
`UPDATE tasks SET ${updates.join(', ')} WHERE id = $${paramIndex}`,
params
);
if (result === 0) {
return `Task not found: ${id}`;
}
// Record activity
if (status && status !== task.status) {
await recordActivity(id, 'status_change', task.status, status);
} else {
await recordActivity(id, 'updated');
}
// Manage session context based on status changes
if (status) {
const session_id = getSessionId();
try {
if (status === 'in_progress') {
// Set as current working task
await execute(
`INSERT INTO session_context (session_id, current_task_id)
VALUES ($1, $2)
ON CONFLICT (session_id) DO UPDATE SET current_task_id = $2, updated_at = NOW()`,
[session_id, id]
);
} else if (status === 'completed') {
// Clear if this is the current working task
await execute(
`DELETE FROM session_context
WHERE session_id = $1 AND current_task_id = $2`,
[session_id, id]
);
}
} catch {
// Silently fail if session context unavailable
}
}
return `Updated: ${id}`;
}
/**
* Start an investigation workflow (CF-166)
* Creates an investigation task and sets it as the session context parent
* All subsequent tasks will auto-link to this investigation
*/
export async function taskInvestigate(args: TaskAddArgs): Promise<string> {
const { title, project, priority = 'P1', description = '' } = args;
// Create investigation task
const taskResult = await taskAdd({
title,
project,
type: 'investigation',
priority,
description: description || 'Investigation task to coordinate related subtasks',
});
// Extract task ID from result (format: "Created: XX-123\n...")
const taskIdMatch = taskResult.match(/Created: ([\w-]+)/);
if (!taskIdMatch) {
return taskResult; // Return original message if format unexpected
}
const taskId = taskIdMatch[1];
// Set as investigation parent in session context
const session_id = getSessionId();
try {
await execute(
`INSERT INTO session_context (session_id, current_task_id, investigation_parent_id)
VALUES ($1, $2, $2)
ON CONFLICT (session_id) DO UPDATE
SET investigation_parent_id = $2, current_task_id = $2, updated_at = NOW()`,
[session_id, taskId]
);
} catch (error) {
console.error('Failed to set investigation context:', error);
}
return taskResult + '\n\n🔍 Investigation started! All new tasks will auto-link to this investigation.';
}
interface TaskMoveProjectArgs {
id: string;
target_project: string;
reason?: string;
}
/**
* Move task to different project while preserving history (CF-301)
* Creates new task with next ID in target project and transfers all related data
*/
export async function taskMoveProject(args: TaskMoveProjectArgs): Promise<string> {
const { id, target_project, reason } = args;
// Validate source task exists
const task = await queryOne<{ project: string; status: string }>(
`SELECT project, status FROM tasks WHERE id = $1`,
[id]
);
if (!task) {
return `Task not found: ${id}`;
}
if (task.project === target_project) {
return `Task ${id} is already in project ${target_project}`;
}
// Validate target project exists
const targetProj = await queryOne<{ key: string }>(
`SELECT key FROM projects WHERE key = $1`,
[target_project]
);
if (!targetProj) {
return `Target project not found: ${target_project}`;
}
// Generate new ID using getNextTaskId
const newId = await getNextTaskId(target_project);
// Execute move in transaction
const client = await getClient();
try {
await client.query('BEGIN');
// Insert new task (copy of old)
await client.query(`
INSERT INTO tasks (id, project, title, description, type, status, priority,
version_id, epic_id, embedding, created_at, updated_at,
completed_at, session_id)
SELECT $1, $2, title, description, type, status, priority,
version_id, epic_id, embedding, created_at, NOW(), completed_at, session_id
FROM tasks WHERE id = $3
`, [newId, target_project, id]);
// Transfer all related records
const transfers = [
`UPDATE task_checklist SET task_id = $1 WHERE task_id = $2`,
`UPDATE task_commits SET task_id = $1 WHERE task_id = $2`,
`UPDATE task_delegations SET task_id = $1 WHERE task_id = $2`,
`UPDATE task_activity SET task_id = $1 WHERE task_id = $2`,
`UPDATE task_links SET from_task_id = $1 WHERE from_task_id = $2`,
`UPDATE task_links SET to_task_id = $1 WHERE to_task_id = $2`,
`UPDATE deployments SET task_id = $1 WHERE task_id = $2`,
`UPDATE memories SET task_id = $1 WHERE task_id = $2`,
`UPDATE session_context SET current_task_id = $1 WHERE current_task_id = $2`,
`UPDATE session_context SET investigation_parent_id = $1 WHERE investigation_parent_id = $2`,
`UPDATE task_learning_effectiveness SET task_id = $1 WHERE task_id = $2`,
];
for (const sql of transfers) {
await client.query(sql, [newId, id]);
}
// Record activity
await client.query(`
INSERT INTO task_activity (task_id, activity_type, old_value, new_value, note, created_at)
VALUES ($1, 'project_moved', $2, $3, $4, NOW())
`, [newId, task.project, target_project, reason || 'Moved via task_move_project']);
// Update old task
await client.query(`
UPDATE tasks
SET status = 'completed',
completed_at = NOW(),
updated_at = NOW(),
description = COALESCE(description, '') || $1
WHERE id = $2
`, [`\n\n---\n**Moved to ${newId}**${reason ? ` (Reason: ${reason})` : ''}`, id]);
// Add duplicate link
await client.query(`
INSERT INTO task_links (from_task_id, to_task_id, link_type, created_at)
VALUES ($1, $2, 'duplicates', NOW())
`, [id, newId]);
await client.query('COMMIT');
return `Moved ${id}${newId} (project: ${task.project}${target_project})`;
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
}
}

View File

@@ -1,241 +0,0 @@
// Epic operations for task management
import { query, queryOne, execute, getProjectKey, detectProjectFromCwd } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import type { Epic, Task } from '../types.js';
interface EpicAddArgs {
title: string;
project?: string;
description?: string;
}
interface EpicListArgs {
project?: string;
status?: string;
limit?: number;
}
interface EpicAssignArgs {
task_id: string;
epic_id: string;
}
/**
* Get next epic ID for a project
*/
async function getNextEpicId(projectKey: string): Promise<string> {
const result = await queryOne<{ next_id: number }>(
`INSERT INTO epic_sequences (project, next_id) VALUES ($1, 1)
ON CONFLICT (project) DO UPDATE SET next_id = epic_sequences.next_id + 1
RETURNING next_id`,
[projectKey]
);
return `${projectKey}-E${result?.next_id || 1}`;
}
/**
* Create a new epic
*/
export async function epicAdd(args: EpicAddArgs): Promise<string> {
const { title, project = 'Unknown', description = '' } = args;
// Get project key
const projectKey = await getProjectKey(project);
// Get next epic ID
const epicId = await getNextEpicId(projectKey);
// Generate embedding
const embedText = description ? `${title}. ${description}` : title;
const embedding = await getEmbedding(embedText);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
// Insert epic
if (embeddingValue) {
await execute(
`INSERT INTO epics (id, project, title, description, embedding)
VALUES ($1, $2, $3, $4, $5)`,
[epicId, projectKey, title, description, embeddingValue]
);
} else {
await execute(
`INSERT INTO epics (id, project, title, description)
VALUES ($1, $2, $3, $4)`,
[epicId, projectKey, title, description]
);
}
return `Created epic: ${epicId}\n Title: ${title}\n Project: ${projectKey}${embedding ? '\n (embedded for semantic search)' : ''}`;
}
/**
* List epics with filters
* Auto-detects project from CWD if not explicitly provided
*/
export async function epicList(args: EpicListArgs): Promise<string> {
const { project, status, limit = 20 } = args;
let whereClause = 'WHERE 1=1';
const params: unknown[] = [];
let paramIndex = 1;
// Auto-detect project from CWD if not explicitly provided
const effectiveProject = project || detectProjectFromCwd();
if (effectiveProject) {
const projectKey = await getProjectKey(effectiveProject);
whereClause += ` AND e.project = $${paramIndex++}`;
params.push(projectKey);
}
if (status) {
whereClause += ` AND e.status = $${paramIndex++}`;
params.push(status);
}
params.push(limit);
const epics = await query<Epic & { task_count: number; open_count: number }>(
`SELECT e.id, e.title, e.status, e.project,
COUNT(t.id) as task_count,
COUNT(t.id) FILTER (WHERE t.status != 'completed') as open_count
FROM epics e
LEFT JOIN tasks t ON t.epic_id = e.id
${whereClause}
GROUP BY e.id, e.title, e.status, e.project, e.created_at
ORDER BY
CASE e.status WHEN 'in_progress' THEN 0 WHEN 'open' THEN 1 ELSE 2 END,
e.created_at DESC
LIMIT $${paramIndex}`,
params
);
if (epics.length === 0) {
return `No epics found${effectiveProject ? ` for project ${effectiveProject}` : ''}`;
}
const lines = epics.map(e => {
const statusIcon = e.status === 'completed' ? '[x]' : e.status === 'in_progress' ? '[>]' : '[ ]';
const progress = e.task_count > 0 ? ` (${e.task_count - e.open_count}/${e.task_count} done)` : '';
return `${statusIcon} ${e.id}: ${e.title}${progress}`;
});
return `Epics${effectiveProject ? ` (${effectiveProject})` : ''}:\n\n${lines.join('\n')}`;
}
/**
* Show epic details with tasks
*/
export async function epicShow(id: string): Promise<string> {
const epic = await queryOne<Epic & { created: string }>(
`SELECT id, project, title, description, status,
to_char(created_at, 'YYYY-MM-DD HH24:MI') as created
FROM epics WHERE id = $1`,
[id]
);
if (!epic) {
return `Epic not found: ${id}`;
}
let output = `# ${epic.id}\n\n`;
output += `**Title:** ${epic.title}\n`;
output += `**Project:** ${epic.project}\n`;
output += `**Status:** ${epic.status}\n`;
output += `**Created:** ${epic.created}\n`;
if (epic.description) {
output += `\n**Description:**\n${epic.description}\n`;
}
// Get tasks in this epic
const tasks = await query<Task>(
`SELECT id, title, status, priority, type
FROM tasks
WHERE epic_id = $1
ORDER BY
CASE status WHEN 'in_progress' THEN 0 WHEN 'open' THEN 1 WHEN 'blocked' THEN 2 ELSE 3 END,
CASE priority WHEN 'P0' THEN 0 WHEN 'P1' THEN 1 WHEN 'P2' THEN 2 ELSE 3 END`,
[id]
);
if (tasks.length > 0) {
const done = tasks.filter(t => t.status === 'completed').length;
output += `\n**Tasks:** (${done}/${tasks.length} done)\n`;
for (const t of tasks) {
const statusIcon = t.status === 'completed' ? '[x]' : t.status === 'in_progress' ? '[>]' : t.status === 'blocked' ? '[!]' : '[ ]';
output += ` ${statusIcon} ${t.priority} ${t.id}: ${t.title}\n`;
}
} else {
output += `\n**Tasks:** None assigned\n`;
}
return output;
}
/**
* Assign a task to an epic
*/
export async function epicAssign(args: EpicAssignArgs): Promise<string> {
const { task_id, epic_id } = args;
// Verify epic exists
const epic = await queryOne<{ id: string }>(`SELECT id FROM epics WHERE id = $1`, [epic_id]);
if (!epic) {
return `Epic not found: ${epic_id}`;
}
// Update task
const result = await execute(
`UPDATE tasks SET epic_id = $1, updated_at = NOW() WHERE id = $2`,
[epic_id, task_id]
);
if (result === 0) {
return `Task not found: ${task_id}`;
}
return `Assigned ${task_id} to epic ${epic_id}`;
}
/**
* Unassign a task from its epic
*/
export async function epicUnassign(task_id: string): Promise<string> {
const result = await execute(
`UPDATE tasks SET epic_id = NULL, updated_at = NOW() WHERE id = $1`,
[task_id]
);
if (result === 0) {
return `Task not found: ${task_id}`;
}
return `Unassigned ${task_id} from its epic`;
}
/**
* Close an epic (mark as completed)
*/
export async function epicClose(id: string): Promise<string> {
// Verify epic exists
const epic = await queryOne<{ id: string; title: string; status: string }>(
`SELECT id, title, status FROM epics WHERE id = $1`,
[id]
);
if (!epic) {
return `Epic not found: ${id}`;
}
if (epic.status === 'completed') {
return `Epic already completed: ${id}`;
}
// Update epic status
await execute(
`UPDATE epics SET status = 'completed', updated_at = NOW() WHERE id = $1`,
[id]
);
return `Closed: ${id} (${epic.title})`;
}

View File

@@ -1,340 +1,16 @@
// Tool definitions for task-mcp
// Tool definitions for session-mcp
// Forked from task-mcp (CF-762): Removed task/epic/version/search/relations tools
// Those are now handled by Jira Cloud via mcp-atlassian
export const toolDefinitions = [
// CRUD Tools
{
name: 'task_add',
description: 'Create a new task with auto-generated ID and semantic embedding',
inputSchema: {
type: 'object',
properties: {
title: { type: 'string', description: 'Task title (required)' },
project: { type: 'string', description: 'Project key (e.g., ST, VPN). Auto-detected from CWD if not provided.' },
type: { type: 'string', enum: ['task', 'bug', 'feature', 'debt', 'investigation'], description: 'Task type (default: task)' },
priority: { type: 'string', enum: ['P0', 'P1', 'P2', 'P3'], description: 'Priority level (default: P2)' },
description: { type: 'string', description: 'Optional description' },
},
required: ['title'],
},
},
{
name: 'task_list',
description: 'List tasks with optional filters',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Filter by project key' },
status: { type: 'string', enum: ['pending', 'open', 'in_progress', 'testing', 'blocked', 'completed'], description: 'Filter by status' },
type: { type: 'string', enum: ['task', 'bug', 'feature', 'debt', 'investigation'], description: 'Filter by type' },
priority: { type: 'string', enum: ['P0', 'P1', 'P2', 'P3'], description: 'Filter by priority' },
limit: { type: 'number', description: 'Max results (default: 20)' },
},
},
},
{
name: 'task_show',
description: 'Show task details including checklist and dependencies',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Task ID (e.g., ST-1, VPN-45)' },
},
required: ['id'],
},
},
{
name: 'task_close',
description: 'Mark a task as completed',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Task ID to close' },
},
required: ['id'],
},
},
{
name: 'task_update',
description: 'Update task fields (status, priority, type, title)',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Task ID to update' },
status: { type: 'string', enum: ['pending', 'open', 'in_progress', 'testing', 'blocked', 'completed'], description: 'New status' },
priority: { type: 'string', enum: ['P0', 'P1', 'P2', 'P3'], description: 'New priority' },
type: { type: 'string', enum: ['task', 'bug', 'feature', 'debt', 'investigation'], description: 'New type' },
title: { type: 'string', description: 'New title' },
},
required: ['id'],
},
},
{
name: 'task_investigate',
description: 'Start an investigation workflow: creates an investigation task and auto-links all subsequent tasks to it. Use when beginning multi-step problem analysis.',
inputSchema: {
type: 'object',
properties: {
title: { type: 'string', description: 'Investigation title (required)' },
project: { type: 'string', description: 'Project key (e.g., ST, VPN). Auto-detected from CWD if not provided.' },
priority: { type: 'string', enum: ['P0', 'P1', 'P2', 'P3'], description: 'Priority level (default: P1)' },
description: { type: 'string', description: 'Optional description of investigation scope' },
},
required: ['title'],
},
},
{
name: 'task_move_project',
description: 'Move task to different project while preserving history',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Task ID to move (e.g., CF-295)' },
target_project: { type: 'string', description: 'Target project key (e.g., VPN, ST, GB)' },
reason: { type: 'string', description: 'Optional reason for move' },
},
required: ['id', 'target_project'],
},
},
// Semantic Search Tools
{
name: 'task_similar',
description: 'Find semantically similar tasks using pgvector',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
project: { type: 'string', description: 'Filter by project (optional)' },
limit: { type: 'number', description: 'Max results (default: 5)' },
},
required: ['query'],
},
},
{
name: 'task_context',
description: 'Get related tasks for current work context (useful for delegations)',
inputSchema: {
type: 'object',
properties: {
description: { type: 'string', description: 'Description of current work' },
project: { type: 'string', description: 'Current project' },
limit: { type: 'number', description: 'Max related tasks (default: 3)' },
},
required: ['description'],
},
},
{
name: 'task_session_context',
description: 'Get session context for a task - retrieves notes, decisions, and related tasks from the session where the task was created. Use this to understand the original context and requirements.',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Task ID (e.g., CF-570)' },
},
required: ['id'],
},
},
// Relation Tools
{
name: 'task_link',
description: 'Create dependency between tasks',
inputSchema: {
type: 'object',
properties: {
from_id: { type: 'string', description: 'Source task ID' },
to_id: { type: 'string', description: 'Target task ID' },
link_type: { type: 'string', enum: ['blocks', 'relates_to', 'duplicates', 'depends_on', 'needs', 'implements', 'fixes', 'causes', 'subtask_of'], description: 'Relationship type' },
},
required: ['from_id', 'to_id', 'link_type'],
},
},
{
name: 'task_checklist_add',
description: 'Add a checklist item to a task',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID' },
item: { type: 'string', description: 'Checklist item text' },
},
required: ['task_id', 'item'],
},
},
{
name: 'task_checklist_toggle',
description: 'Toggle a checklist item (check/uncheck)',
inputSchema: {
type: 'object',
properties: {
item_id: { type: 'number', description: 'Checklist item ID' },
checked: { type: 'boolean', description: 'New checked state' },
},
required: ['item_id', 'checked'],
},
},
{
name: 'task_resolve_duplicate',
description: 'Resolve a duplicate issue by closing it and linking to the dominant issue',
inputSchema: {
type: 'object',
properties: {
duplicate_id: { type: 'string', description: 'The duplicate task ID to close' },
dominant_id: { type: 'string', description: 'The dominant/original task ID to keep' },
},
required: ['duplicate_id', 'dominant_id'],
},
},
// Epic Tools
{
name: 'epic_add',
description: 'Create a new epic (session-scoped work bundle) with auto-generated ID',
inputSchema: {
type: 'object',
properties: {
title: { type: 'string', description: 'Epic title (required)' },
project: { type: 'string', description: 'Project key (e.g., VPN, ST). Auto-detected if not provided.' },
description: { type: 'string', description: 'Optional description of the epic scope' },
},
required: ['title'],
},
},
{
name: 'epic_list',
description: 'List epics with task counts and progress',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Filter by project key' },
status: { type: 'string', enum: ['open', 'in_progress', 'completed'], description: 'Filter by status' },
limit: { type: 'number', description: 'Max results (default: 20)' },
},
},
},
{
name: 'epic_show',
description: 'Show epic details with all assigned tasks',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Epic ID (e.g., VPN-E1, ST-E3)' },
},
required: ['id'],
},
},
{
name: 'epic_assign',
description: 'Assign a task to an epic',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID to assign' },
epic_id: { type: 'string', description: 'Epic ID to assign to' },
},
required: ['task_id', 'epic_id'],
},
},
{
name: 'epic_close',
description: 'Close an epic (mark as completed)',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Epic ID to close (e.g., VPN-E1, ST-E3)' },
},
required: ['id'],
},
},
// Version Tools
{
name: 'version_add',
description: 'Create a new version/release for a project',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Project key (e.g., VPN, ST)' },
version: { type: 'string', description: 'Version number (e.g., 1.0.0, 2.1.0-beta)' },
build_number: { type: 'number', description: 'Optional build number' },
status: { type: 'string', enum: ['planned', 'in_progress', 'released', 'archived'], description: 'Version status (default: planned)' },
release_notes: { type: 'string', description: 'Optional release notes' },
},
required: ['project', 'version'],
},
},
{
name: 'version_list',
description: 'List versions with optional filters',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Filter by project key' },
status: { type: 'string', enum: ['planned', 'in_progress', 'released', 'archived'], description: 'Filter by status' },
limit: { type: 'number', description: 'Max results (default: 20)' },
},
},
},
{
name: 'version_show',
description: 'Show version details with assigned tasks and epics',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Version ID (e.g., VPN-v1.0.0)' },
},
required: ['id'],
},
},
{
name: 'version_update',
description: 'Update version fields (status, git_tag, git_sha, release_notes)',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Version ID to update' },
status: { type: 'string', enum: ['planned', 'in_progress', 'released', 'archived'], description: 'New status' },
git_tag: { type: 'string', description: 'Git tag name (e.g., v1.0.0)' },
git_sha: { type: 'string', description: 'Git commit SHA for this version' },
release_notes: { type: 'string', description: 'Release notes' },
release_date: { type: 'string', description: 'Release date (ISO format)' },
},
required: ['id'],
},
},
{
name: 'version_release',
description: 'Mark a version as released (sets status and release_date)',
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Version ID to release' },
git_tag: { type: 'string', description: 'Optional git tag to associate' },
},
required: ['id'],
},
},
{
name: 'version_assign_task',
description: 'Assign a task to a version',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID to assign' },
version_id: { type: 'string', description: 'Version ID to assign to' },
},
required: ['task_id', 'version_id'],
},
},
// Delegation Tools
// Delegation Tools (kept for tracking code generation jobs)
{
name: 'task_delegations',
description: 'List delegations for a specific task (quality scores, backends, status)',
description: 'List delegations for a specific Jira issue (quality scores, backends, status)',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID (e.g., ST-123)' },
task_id: { type: 'string', description: 'Jira issue key (e.g., CF-123)' },
},
required: ['task_id'],
},
@@ -352,14 +28,14 @@ export const toolDefinitions = [
},
},
// Commit Tools
// Commit Tools (kept for git-session linking)
{
name: 'task_commit_add',
description: 'Link a git commit to a task (SHA reference only, Gitea MCP has full commit data)',
description: 'Link a git commit to a Jira issue key (SHA reference only)',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID (e.g., VPN-123)' },
task_id: { type: 'string', description: 'Jira issue key (e.g., CF-123)' },
commit_sha: { type: 'string', description: 'Git commit SHA (full or short)' },
repo: { type: 'string', description: 'Repository (e.g., christian/VPN)' },
source: { type: 'string', enum: ['manual', 'parsed', 'pr_merge'], description: 'How the link was created (default: manual)' },
@@ -369,11 +45,11 @@ export const toolDefinitions = [
},
{
name: 'task_commit_remove',
description: 'Remove a commit link from a task',
description: 'Remove a commit link from a Jira issue',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID' },
task_id: { type: 'string', description: 'Jira issue key' },
commit_sha: { type: 'string', description: 'Commit SHA to unlink' },
},
required: ['task_id', 'commit_sha'],
@@ -381,18 +57,18 @@ export const toolDefinitions = [
},
{
name: 'task_commits_list',
description: 'List commits linked to a task',
description: 'List commits linked to a Jira issue',
inputSchema: {
type: 'object',
properties: {
task_id: { type: 'string', description: 'Task ID' },
task_id: { type: 'string', description: 'Jira issue key' },
},
required: ['task_id'],
},
},
{
name: 'task_link_commits',
description: 'Parse commit messages for task references and create links (batch operation)',
description: 'Parse commit messages for Jira issue references and create links (batch operation)',
inputSchema: {
type: 'object',
properties: {
@@ -416,11 +92,11 @@ export const toolDefinitions = [
},
{
name: 'session_tasks',
description: 'List tasks worked on in a session (from task_activity tracking)',
description: 'List Jira issues worked on in a session (from task_activity tracking)',
inputSchema: {
type: 'object',
properties: {
session_id: { type: 'string', description: 'Session ID (supports * wildcard, e.g., session_20260110_*)' },
session_id: { type: 'string', description: 'Session ID (supports * wildcard)' },
limit: { type: 'number', description: 'Max results (default: 20)' },
},
required: ['session_id'],
@@ -440,7 +116,7 @@ export const toolDefinitions = [
impact: { type: 'string', description: 'Effects on existing infrastructure' },
actions_required: { type: 'string', description: 'Steps developers need to take (optional)' },
session_id: { type: 'string', description: 'Session that implemented change (optional)' },
task_ids: { type: 'array', items: { type: 'string' }, description: 'Related task IDs (optional)' },
task_ids: { type: 'array', items: { type: 'string' }, description: 'Related Jira issue keys (optional)' },
},
required: ['date', 'title', 'change_description', 'impact'],
},
@@ -458,7 +134,7 @@ export const toolDefinitions = [
},
{
name: 'changelog_list',
description: 'List recent infrastructure changes by time period (fallback)',
description: 'List recent infrastructure changes by time period',
inputSchema: {
type: 'object',
properties: {
@@ -468,10 +144,31 @@ export const toolDefinitions = [
},
},
// Event Timeline (CF-2885)
{
name: 'timeline',
description: 'Unified chronological event timeline for a subject. Stitches sessions, notes, commits, plans, and task-commit links from all sessions touching the subject. Subject can be a Jira issue key (e.g., CF-2872), a session id, or a project key (e.g., CF). Returns events sorted oldest → newest.',
inputSchema: {
type: 'object',
properties: {
subject: { type: 'string', description: 'Jira issue key (CF-123), session id, or project key (CF)' },
since: { type: 'string', description: 'ISO8601 timestamp or relative "-7d"/"-24h"/"-30m" (default: -7d)' },
until: { type: 'string', description: 'ISO8601 timestamp (default: now)' },
sources: {
type: 'array',
items: { type: 'string', enum: ['session', 'note', 'commit', 'plan', 'task_commit', 'jira'] },
description: 'Optional filter: which event sources to include (default: all). Jira source pulls issue history (transitions, comments, field changes) via the AgilitonAPI gateway.',
},
limit: { type: 'number', description: 'Max events to return (default: 100)' },
},
required: ['subject'],
},
},
// Project Lock Tools
{
name: 'project_lock',
description: 'Lock a project for exclusive session access. Prevents other sessions from working on it.',
description: 'Lock a project for exclusive session access.',
inputSchema: {
type: 'object',
properties: {
@@ -508,7 +205,7 @@ export const toolDefinitions = [
},
{
name: 'project_context',
description: 'Get project context from current directory - returns detected project, open tasks, epics, and lock status. Use at session start.',
description: 'Get project context from current directory - returns detected project, lock status, recent sessions.',
inputSchema: {
type: 'object',
properties: {},
@@ -522,11 +219,11 @@ export const toolDefinitions = [
inputSchema: {
type: 'object',
properties: {
id: { type: 'string', description: 'Unique component ID (e.g., propertymap-scraper, gridbot-conductor)' },
id: { type: 'string', description: 'Unique component ID' },
name: { type: 'string', description: 'Human-readable name' },
type: { type: 'string', enum: ['service', 'script', 'config', 'database', 'api', 'ui', 'library'], description: 'Component type' },
path: { type: 'string', description: 'File system path or Docker container name' },
repo: { type: 'string', description: 'Git repository (e.g., christian/propertymap)' },
repo: { type: 'string', description: 'Git repository' },
description: { type: 'string', description: 'What this component does' },
health_check: { type: 'string', description: 'Command or URL to check health' },
},
@@ -550,7 +247,7 @@ export const toolDefinitions = [
type: 'object',
properties: {
component_id: { type: 'string', description: 'Source component ID' },
depends_on: { type: 'string', description: 'Target component ID (what source depends on)' },
depends_on: { type: 'string', description: 'Target component ID' },
dependency_type: { type: 'string', enum: ['hard', 'soft', 'config', 'data'], description: 'Type of dependency' },
description: { type: 'string', description: 'Description of the dependency' },
},
@@ -564,7 +261,7 @@ export const toolDefinitions = [
type: 'object',
properties: {
component_id: { type: 'string', description: 'Component ID' },
file_pattern: { type: 'string', description: 'File pattern (e.g., src/services/*.py, docker-compose.yml)' },
file_pattern: { type: 'string', description: 'File pattern (e.g., src/services/*.py)' },
},
required: ['component_id', 'file_pattern'],
},
@@ -576,7 +273,7 @@ export const toolDefinitions = [
type: 'object',
properties: {
component_id: { type: 'string', description: 'Component ID' },
name: { type: 'string', description: 'Check name (e.g., health-endpoint, container-running)' },
name: { type: 'string', description: 'Check name' },
check_type: { type: 'string', enum: ['command', 'http', 'tcp', 'file'], description: 'Type of check' },
check_command: { type: 'string', description: 'Command/URL to execute' },
expected_result: { type: 'string', description: 'Expected output or status' },
@@ -591,33 +288,29 @@ export const toolDefinitions = [
inputSchema: {
type: 'object',
properties: {
changed_files: {
type: 'array',
items: { type: 'string' },
description: 'List of changed file paths',
},
changed_files: { type: 'array', items: { type: 'string' }, description: 'List of changed file paths' },
},
required: ['changed_files'],
},
},
{
name: 'impact_learn',
description: 'Record a learned impact relationship (when we discover a missed dependency)',
description: 'Record a learned impact relationship',
inputSchema: {
type: 'object',
properties: {
changed_component: { type: 'string', description: 'Component that was changed' },
affected_component: { type: 'string', description: 'Component that was unexpectedly affected' },
impact_description: { type: 'string', description: 'What went wrong' },
error_id: { type: 'string', description: 'Related error ID from error memory' },
task_id: { type: 'string', description: 'Related task ID' },
error_id: { type: 'string', description: 'Related error ID' },
task_id: { type: 'string', description: 'Related Jira issue key' },
},
required: ['changed_component', 'affected_component', 'impact_description'],
},
},
{
name: 'component_graph',
description: 'Get component dependency graph (for visualization)',
description: 'Get component dependency graph',
inputSchema: {
type: 'object',
properties: {
@@ -626,62 +319,6 @@ export const toolDefinitions = [
},
},
// Memory Tools
{
name: 'memory_add',
description: 'Store a learning/memory for future sessions. Use at session end to persist insights.',
inputSchema: {
type: 'object',
properties: {
category: { type: 'string', enum: ['pattern', 'fix', 'preference', 'gotcha', 'architecture'], description: 'Memory category' },
title: { type: 'string', description: 'Short title for the memory' },
content: { type: 'string', description: 'The learning/insight to remember' },
context: { type: 'string', description: 'When/where this applies (optional)' },
project: { type: 'string', description: 'Project this relates to (optional)' },
session_id: { type: 'string', description: 'Session ID to link memory to (optional)' },
task_id: { type: 'string', description: 'Task ID to link memory to (optional)' },
},
required: ['category', 'title', 'content'],
},
},
{
name: 'memory_search',
description: 'Search memories semantically. Returns relevant learnings for current context.',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
project: { type: 'string', description: 'Filter by project (optional)' },
category: { type: 'string', enum: ['pattern', 'fix', 'preference', 'gotcha', 'architecture'], description: 'Filter by category (optional)' },
limit: { type: 'number', description: 'Max results (default: 5)' },
},
required: ['query'],
},
},
{
name: 'memory_list',
description: 'List stored memories (non-semantic)',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Filter by project (optional)' },
category: { type: 'string', enum: ['pattern', 'fix', 'preference', 'gotcha', 'architecture'], description: 'Filter by category (optional)' },
limit: { type: 'number', description: 'Max results (default: 20)' },
},
},
},
{
name: 'memory_context',
description: 'Get memories relevant to current session context. Use at session start.',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Current project' },
task_description: { type: 'string', description: 'Description of planned work (for semantic matching)' },
},
},
},
// Tool Documentation Tools
{
name: 'tool_doc_add',
@@ -692,10 +329,10 @@ export const toolDefinitions = [
tool_name: { type: 'string', description: 'Tool or command name' },
category: { type: 'string', enum: ['mcp', 'cli', 'script', 'internal', 'deprecated'], description: 'Tool category' },
title: { type: 'string', description: 'Short descriptive title' },
description: { type: 'string', description: 'Detailed description of what the tool does' },
description: { type: 'string', description: 'Detailed description' },
usage_example: { type: 'string', description: 'Usage example (optional)' },
parameters: { type: 'object', description: 'Parameter definitions (optional)' },
notes: { type: 'string', description: 'Additional notes, gotchas, tips (optional)' },
notes: { type: 'string', description: 'Additional notes (optional)' },
tags: { type: 'array', items: { type: 'string' }, description: 'Searchable tags (optional)' },
source_file: { type: 'string', description: 'Original source file (optional)' },
},
@@ -741,7 +378,7 @@ export const toolDefinitions = [
},
{
name: 'tool_doc_export',
description: 'Export all tool documentation as markdown (for backup/migration)',
description: 'Export all tool documentation as markdown',
inputSchema: {
type: 'object',
properties: {},
@@ -751,12 +388,13 @@ export const toolDefinitions = [
// Session Management Tools
{
name: 'session_start',
description: 'Start a new session with metadata tracking',
description: 'Start a new session with metadata tracking. Links to Jira issue key.',
inputSchema: {
type: 'object',
properties: {
session_id: { type: 'string', description: 'Session ID (auto-generated if not provided)' },
project: { type: 'string', description: 'Project key (e.g., CF, VPN)' },
jira_issue_key: { type: 'string', description: 'Jira issue key being worked on (e.g., CF-123)' },
working_directory: { type: 'string', description: 'Current working directory' },
git_branch: { type: 'string', description: 'Current git branch' },
initial_prompt: { type: 'string', description: 'First user message' },
@@ -773,11 +411,7 @@ export const toolDefinitions = [
session_id: { type: 'string', description: 'Session ID to update' },
message_count: { type: 'number', description: 'Number of messages exchanged' },
token_count: { type: 'number', description: 'Total tokens used' },
tools_used: {
type: 'array',
items: { type: 'string' },
description: 'Array of tool names used',
},
tools_used: { type: 'array', items: { type: 'string' }, description: 'Array of tool names used' },
},
required: ['session_id'],
},
@@ -810,20 +444,21 @@ export const toolDefinitions = [
},
{
name: 'session_search',
description: 'Find similar sessions using vector search',
description: 'Find similar sessions using hybrid (vector + keyword), vector-only, or keyword-only search.',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
project: { type: 'string', description: 'Filter by project (optional)' },
limit: { type: 'number', description: 'Max results (default: 5)' },
search_mode: { type: 'string', enum: ['hybrid', 'vector', 'keyword'], description: 'Search mode (default: hybrid)' },
},
required: ['query'],
},
},
{
name: 'session_context',
description: 'Get complete context: tasks, commits, builds, memories',
description: 'Get complete context: Jira issues, commits, builds, memories',
inputSchema: {
type: 'object',
properties: {
@@ -834,7 +469,7 @@ export const toolDefinitions = [
},
{
name: 'build_record',
description: 'Record build information linked to session and version',
description: 'Record build information linked to session',
inputSchema: {
type: 'object',
properties: {
@@ -850,13 +485,13 @@ export const toolDefinitions = [
},
{
name: 'session_commit_link',
description: 'Link a commit to a session (automatically called when commits are made)',
description: 'Link a commit to a session',
inputSchema: {
type: 'object',
properties: {
session_id: { type: 'string', description: 'Session ID' },
commit_sha: { type: 'string', description: 'Git commit SHA' },
repo: { type: 'string', description: 'Repository (e.g., christian/ClaudeFramework)' },
repo: { type: 'string', description: 'Repository' },
commit_message: { type: 'string', description: 'Commit message (optional)' },
committed_at: { type: 'string', description: 'Commit timestamp (ISO format, optional)' },
},
@@ -865,7 +500,7 @@ export const toolDefinitions = [
},
{
name: 'session_recover_orphaned',
description: 'Recover abandoned/orphaned sessions (CF-572). Detects sessions active for >2 hours and marks as abandoned',
description: 'Recover abandoned/orphaned sessions (active >2 hours)',
inputSchema: {
type: 'object',
properties: {
@@ -875,7 +510,7 @@ export const toolDefinitions = [
},
{
name: 'session_recover_temp_notes',
description: 'Recover notes from temp files for a specific session (CF-572)',
description: 'Recover notes from temp files for a specific session',
inputSchema: {
type: 'object',
properties: {
@@ -902,7 +537,7 @@ export const toolDefinitions = [
},
{
name: 'session_notes_list',
description: 'List all notes for a session, optionally filtered by type',
description: 'List all notes for a session',
inputSchema: {
type: 'object',
properties: {
@@ -920,7 +555,7 @@ export const toolDefinitions = [
properties: {
session_id: { type: 'string', description: 'Session ID' },
plan_content: { type: 'string', description: 'Plan content in markdown' },
plan_file_name: { type: 'string', description: 'Original filename (e.g., eloquent-yellow-cat.md) - optional' },
plan_file_name: { type: 'string', description: 'Original filename (optional)' },
status: { type: 'string', enum: ['draft', 'approved', 'executed', 'abandoned'], description: 'Plan status (default: draft)' },
},
required: ['session_id', 'plan_content'],
@@ -952,11 +587,11 @@ export const toolDefinitions = [
},
{
name: 'project_doc_upsert',
description: 'Create or update project documentation (replaces CLAUDE.md sections)',
description: 'Create or update project documentation',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Project key (e.g., CF, VPN)' },
project: { type: 'string', description: 'Project key' },
doc_type: { type: 'string', enum: ['overview', 'architecture', 'guidelines', 'history', 'configuration', 'workflow'], description: 'Documentation type' },
title: { type: 'string', description: 'Document title' },
content: { type: 'string', description: 'Document content in markdown' },
@@ -990,7 +625,7 @@ export const toolDefinitions = [
},
{
name: 'session_documentation_generate',
description: 'Auto-generate full markdown documentation for a session (tasks, commits, notes, plans)',
description: 'Auto-generate full markdown documentation for a session',
inputSchema: {
type: 'object',
properties: {
@@ -1001,20 +636,24 @@ export const toolDefinitions = [
},
{
name: 'session_semantic_search',
description: 'Semantic search across all session documentation using vector similarity',
description: 'Search across all session documentation using hybrid (vector + keyword), vector-only, or keyword-only search. Supports optional metadata filters (topics, projects, issue_keys) — only use filters when the user explicitly mentions a topic/project. When unsure, search without filters.',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
project: { type: 'string', description: 'Filter by project (optional)' },
limit: { type: 'number', description: 'Max results (default: 10)' },
search_mode: { type: 'string', enum: ['hybrid', 'vector', 'keyword'], description: 'Search mode (default: hybrid)' },
filter_topics: { type: 'array', items: { type: 'string' }, description: 'Filter by extracted topics (e.g., ["pgvector", "deployment"]). Only use when user explicitly mentions topics.' },
filter_projects: { type: 'array', items: { type: 'string' }, description: 'Filter by extracted project keys (e.g., ["CF", "BAB"]). Only use when user explicitly mentions projects.' },
filter_issue_keys: { type: 'array', items: { type: 'string' }, description: 'Filter by extracted Jira issue keys (e.g., ["CF-1307"]). Only use when user explicitly mentions issue keys.' },
},
required: ['query'],
},
},
{
name: 'session_productivity_analytics',
description: 'Get productivity metrics (avg duration, tasks/commits per session, etc.)',
description: 'Get productivity metrics',
inputSchema: {
type: 'object',
properties: {
@@ -1025,30 +664,47 @@ export const toolDefinitions = [
},
{
name: 'session_pattern_detection',
description: 'Detect patterns across sessions (tool usage, task types)',
description: 'Detect patterns across sessions',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Filter by project (optional)' },
pattern_type: { type: 'string', enum: ['tool_usage', 'task_types', 'error_frequency'], description: 'Type of pattern to detect (default: tool_usage)' },
pattern_type: { type: 'string', enum: ['tool_usage', 'task_types', 'error_frequency'], description: 'Type of pattern to detect' },
},
},
},
// Transcript Tools (CF-2394)
{
name: 'session_transcript_search',
description: 'Search session transcripts (JSONL) using hybrid (vector + keyword) search. Finds past sessions by content — commands run, decisions made, plans discussed. Use when recovering context from prior sessions.',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query (e.g., "hetzner disk resize", "auth migration plan")' },
project: { type: 'string', description: 'Filter by project key (optional)' },
session_issue_key: { type: 'string', description: 'Filter by session Jira issue key (optional)' },
limit: { type: 'number', description: 'Max results (default: 10)' },
search_mode: { type: 'string', enum: ['hybrid', 'vector', 'keyword'], description: 'Search mode (default: hybrid)' },
},
required: ['query'],
},
},
// Archive Tools
{
name: 'archive_add',
description: 'Archive content to database with semantic embedding. Replaces filesystem archives.',
description: 'Archive content to database with semantic embedding.',
inputSchema: {
type: 'object',
properties: {
project: { type: 'string', description: 'Project key (e.g., CF, VPN)' },
project: { type: 'string', description: 'Project key' },
archive_type: { type: 'string', enum: ['session', 'research', 'audit', 'investigation', 'completed', 'migration'], description: 'Archive type' },
title: { type: 'string', description: 'Archive title' },
content: { type: 'string', description: 'Archive content (markdown)' },
original_path: { type: 'string', description: 'Original file path (optional)' },
file_size: { type: 'number', description: 'File size in bytes (optional)' },
archived_by_session: { type: 'string', description: 'Session ID that archived it (optional)' },
archived_by_session: { type: 'string', description: 'Session ID (optional)' },
metadata: { type: 'object', description: 'Additional metadata (optional)' },
},
required: ['project', 'archive_type', 'title', 'content'],
@@ -1056,7 +712,7 @@ export const toolDefinitions = [
},
{
name: 'archive_search',
description: 'Search archives using semantic similarity',
description: 'Search archives using hybrid (vector + keyword), vector-only, or keyword-only search.',
inputSchema: {
type: 'object',
properties: {
@@ -1064,6 +720,7 @@ export const toolDefinitions = [
project: { type: 'string', description: 'Filter by project (optional)' },
archive_type: { type: 'string', enum: ['session', 'research', 'audit', 'investigation', 'completed', 'migration'], description: 'Filter by archive type (optional)' },
limit: { type: 'number', description: 'Max results (default: 5)' },
search_mode: { type: 'string', enum: ['hybrid', 'vector', 'keyword'], description: 'Search mode (default: hybrid)' },
},
required: ['query'],
},
@@ -1096,11 +753,11 @@ export const toolDefinitions = [
// Project Archival
{
name: 'project_archive',
description: 'Archive complete project to S3 with database tracking. Creates tarball, uploads to s3://agiliton-archive/projects/, updates database, and optionally deletes local copy.',
description: 'Archive complete project to S3 with database tracking.',
inputSchema: {
type: 'object',
properties: {
project_key: { type: 'string', description: 'Project key (must exist in database)' },
project_key: { type: 'string', description: 'Project key' },
project_path: { type: 'string', description: 'Absolute path to project directory' },
delete_local: { type: 'boolean', description: 'Delete local project after successful archive (default: false)' },
session_id: { type: 'string', description: 'Session ID performing the archival (optional)' },

View File

@@ -1,275 +0,0 @@
// Session memory operations for persistent learnings
import { query, queryOne, execute } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
type MemoryCategory = 'pattern' | 'fix' | 'preference' | 'gotcha' | 'architecture';
interface Memory {
id: number;
category: MemoryCategory;
title: string;
content: string;
context: string | null;
project: string | null;
session_id: string | null;
task_id: string | null;
access_count: number;
created_at: string;
}
interface MemoryAddArgs {
category: MemoryCategory;
title: string;
content: string;
context?: string;
project?: string;
session_id?: string;
task_id?: string;
}
interface MemorySearchArgs {
query: string;
project?: string;
category?: MemoryCategory;
limit?: number;
}
interface MemoryListArgs {
project?: string;
category?: MemoryCategory;
limit?: number;
}
/**
* Add a new memory/learning (enhanced with session_id and task_id)
* CF-306: Validates session_id exists before inserting to prevent foreign key violations
*/
export async function memoryAdd(args: MemoryAddArgs): Promise<string> {
const { category, title, content, context, project, session_id, task_id } = args;
// CF-306: Validate session_id exists if provided
let validSessionId = session_id || null;
if (session_id) {
const sessionExists = await queryOne<{ exists: boolean }>(
`SELECT EXISTS(SELECT 1 FROM sessions WHERE id = $1) as exists`,
[session_id]
);
if (!sessionExists?.exists) {
console.warn(`[CF-306] Session ${session_id} not found in database - using NULL instead`);
validSessionId = null;
}
}
// Generate embedding for semantic search
const embedText = `${title}. ${content}`;
const embedding = await getEmbedding(embedText);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
if (embeddingValue) {
await execute(
`INSERT INTO memories (category, title, content, context, project, session_id, task_id, embedding)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
[category, title, content, context || null, project || null, validSessionId, task_id || null, embeddingValue]
);
} else {
await execute(
`INSERT INTO memories (category, title, content, context, project, session_id, task_id)
VALUES ($1, $2, $3, $4, $5, $6, $7)`,
[category, title, content, context || null, project || null, validSessionId, task_id || null]
);
}
return `Stored memory: [${category}] ${title}`;
}
/**
* Search memories semantically
*/
export async function memorySearch(args: MemorySearchArgs): Promise<string> {
const { query: searchQuery, project, category, limit = 5 } = args;
// Generate embedding for search
const embedding = await getEmbedding(searchQuery);
if (!embedding) {
return 'Error: Could not generate embedding for search';
}
const embeddingStr = formatEmbedding(embedding);
let whereClause = 'WHERE embedding IS NOT NULL';
const params: unknown[] = [embeddingStr, limit];
let paramIndex = 3;
if (project) {
whereClause += ` AND (project = $${paramIndex++} OR project IS NULL)`;
params.splice(params.length - 1, 0, project);
}
if (category) {
whereClause += ` AND category = $${paramIndex++}`;
params.splice(params.length - 1, 0, category);
}
const memories = await query<Memory & { similarity: number }>(
`SELECT id, category, title, content, context, project, access_count,
to_char(created_at, 'YYYY-MM-DD') as created_at,
1 - (embedding <=> $1) as similarity
FROM memories
${whereClause}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
if (memories.length === 0) {
return 'No relevant memories found';
}
// Update access_count for returned memories
const ids = memories.map(m => m.id);
await execute(
`UPDATE memories SET access_count = access_count + 1, last_accessed_at = NOW() WHERE id = ANY($1)`,
[ids]
);
const lines = ['Relevant memories:\n'];
for (const m of memories) {
const sim = Math.round(m.similarity * 100);
const proj = m.project ? ` [${m.project}]` : '';
lines.push(`**[${m.category}]${proj}** ${m.title} (${sim}% match)`);
lines.push(` ${m.content}`);
if (m.context) {
lines.push(` _Context: ${m.context}_`);
}
lines.push('');
}
return lines.join('\n');
}
/**
* List memories (non-semantic)
*/
export async function memoryList(args: MemoryListArgs): Promise<string> {
const { project, category, limit = 20 } = args;
let whereClause = 'WHERE 1=1';
const params: unknown[] = [];
let paramIndex = 1;
if (project) {
whereClause += ` AND (project = $${paramIndex++} OR project IS NULL)`;
params.push(project);
}
if (category) {
whereClause += ` AND category = $${paramIndex++}`;
params.push(category);
}
params.push(limit);
const memories = await query<Memory>(
`SELECT id, category, title, content, context, project, access_count,
to_char(created_at, 'YYYY-MM-DD') as created_at
FROM memories
${whereClause}
ORDER BY created_at DESC
LIMIT $${paramIndex}`,
params
);
if (memories.length === 0) {
return `No memories found${project ? ` for project ${project}` : ''}`;
}
const lines = [`Memories${project ? ` (${project})` : ''}:\n`];
for (const m of memories) {
const proj = m.project ? `[${m.project}] ` : '';
const accessed = m.access_count > 0 ? ` (accessed ${m.access_count}x)` : '';
lines.push(`• [${m.category}] ${proj}${m.title}${accessed}`);
lines.push(` ${m.content.slice(0, 100)}${m.content.length > 100 ? '...' : ''}`);
}
return lines.join('\n');
}
/**
* Delete a memory by ID
*/
export async function memoryDelete(id: number): Promise<string> {
const result = await execute('DELETE FROM memories WHERE id = $1', [id]);
if (result === 0) {
return `Memory not found: ${id}`;
}
return `Deleted memory: ${id}`;
}
/**
* Get memories relevant to current context (for session start)
*/
export async function memoryContext(project: string | null, taskDescription?: string): Promise<string> {
const lines: string[] = [];
// Get project-specific memories
if (project) {
const projectMemories = await query<Memory>(
`SELECT category, title, content FROM memories
WHERE project = $1
ORDER BY access_count DESC, created_at DESC
LIMIT 5`,
[project]
);
if (projectMemories.length > 0) {
lines.push(`**${project} Memories:**`);
for (const m of projectMemories) {
lines.push(`• [${m.category}] ${m.title}: ${m.content}`);
}
lines.push('');
}
}
// If task description provided, do semantic search
if (taskDescription) {
const embedding = await getEmbedding(taskDescription);
if (embedding) {
const relevant = await query<Memory>(
`SELECT category, title, content, project
FROM memories
WHERE embedding IS NOT NULL
ORDER BY embedding <=> $1
LIMIT 3`,
[formatEmbedding(embedding)]
);
if (relevant.length > 0) {
lines.push('**Relevant memories for this task:**');
for (const m of relevant) {
const proj = m.project ? `[${m.project}] ` : '';
lines.push(`${proj}${m.title}: ${m.content}`);
}
}
}
}
// Get recent gotchas (always useful)
const gotchas = await query<Memory>(
`SELECT title, content FROM memories
WHERE category = 'gotcha'
ORDER BY created_at DESC
LIMIT 3`,
[]
);
if (gotchas.length > 0) {
lines.push('\n**Recent gotchas:**');
for (const g of gotchas) {
lines.push(`⚠️ ${g.title}: ${g.content}`);
}
}
return lines.length > 0 ? lines.join('\n') : 'No memories to surface';
}

View File

@@ -43,9 +43,9 @@ async function getS3Credentials(): Promise<{
endpoint: string;
}> {
try {
const { stdout: accessKey } = await execAsync('vault get hetzner.s3_access_key');
const { stdout: secretKey } = await execAsync('vault get hetzner.s3_secret_key');
const { stdout: endpoint } = await execAsync('vault get hetzner.s3_endpoint');
const { stdout: accessKey } = await execAsync('vault get ag.org.s3.access_key');
const { stdout: secretKey } = await execAsync('vault get ag.org.s3.secret_key');
const { stdout: endpoint } = await execAsync('vault get ag.org.s3.endpoint');
return {
accessKey: accessKey.trim(),

View File

@@ -1,142 +0,0 @@
// Task relations: dependencies and checklists
import { query, queryOne, execute } from '../db.js';
interface TaskLinkArgs {
from_id: string;
to_id: string;
link_type: string;
}
interface ChecklistAddArgs {
task_id: string;
item: string;
}
interface ChecklistToggleArgs {
item_id: number;
checked: boolean;
}
/**
* Create a dependency between tasks
* - blocks: unidirectional (A blocks B)
* - relates_to: bidirectional (A relates to B = B relates to A)
* - duplicates: bidirectional (A duplicates B = B duplicates A)
*/
export async function taskLink(args: TaskLinkArgs): Promise<string> {
const { from_id, to_id, link_type } = args;
try {
// Create the primary link
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type)
VALUES ($1, $2, $3)
ON CONFLICT (from_task_id, to_task_id, link_type) DO NOTHING`,
[from_id, to_id, link_type]
);
// For symmetric relationships, create reverse link
if (link_type === 'relates_to' || link_type === 'duplicates') {
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type)
VALUES ($1, $2, $3)
ON CONFLICT (from_task_id, to_task_id, link_type) DO NOTHING`,
[to_id, from_id, link_type]
);
}
return `Linked: ${from_id} ${link_type} ${to_id}`;
} catch (error) {
return `Error creating link: ${error}`;
}
}
/**
* Add a checklist item to a task
*/
export async function checklistAdd(args: ChecklistAddArgs): Promise<string> {
const { task_id, item } = args;
// Get next position
const result = await queryOne<{ max: number }>(
`SELECT COALESCE(MAX(position), 0) + 1 as max
FROM task_checklist WHERE task_id = $1`,
[task_id]
);
const position = result?.max || 1;
await execute(
`INSERT INTO task_checklist (task_id, item, position)
VALUES ($1, $2, $3)`,
[task_id, item, position]
);
return `Added to ${task_id}: ${item}`;
}
/**
* Toggle a checklist item
*/
export async function checklistToggle(args: ChecklistToggleArgs): Promise<string> {
const { item_id, checked } = args;
const result = await execute(
`UPDATE task_checklist SET checked = $1 WHERE id = $2`,
[checked, item_id]
);
if (result === 0) {
return `Checklist item not found: ${item_id}`;
}
return `${checked ? 'Checked' : 'Unchecked'}: item #${item_id}`;
}
interface ResolveDuplicateArgs {
duplicate_id: string;
dominant_id: string;
}
/**
* Resolve a duplicate issue by closing it and linking to the dominant issue
* - Closes the duplicate task (sets status to completed)
* - Creates bidirectional "duplicates" link between the two tasks
*/
export async function taskResolveDuplicate(args: ResolveDuplicateArgs): Promise<string> {
const { duplicate_id, dominant_id } = args;
try {
// Close the duplicate task
const closeResult = await execute(
`UPDATE tasks
SET status = 'completed', completed_at = NOW(), updated_at = NOW()
WHERE id = $1`,
[duplicate_id]
);
if (closeResult === 0) {
return `Duplicate task not found: ${duplicate_id}`;
}
// Create bidirectional duplicates link
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type)
VALUES ($1, $2, 'duplicates')
ON CONFLICT (from_task_id, to_task_id, link_type) DO NOTHING`,
[duplicate_id, dominant_id]
);
await execute(
`INSERT INTO task_links (from_task_id, to_task_id, link_type)
VALUES ($1, $2, 'duplicates')
ON CONFLICT (from_task_id, to_task_id, link_type) DO NOTHING`,
[dominant_id, duplicate_id]
);
return `Resolved duplicate: ${duplicate_id}${dominant_id}\n Closed: ${duplicate_id}\n Linked: duplicates ${dominant_id}`;
} catch (error) {
return `Error resolving duplicate: ${error}`;
}
}

View File

@@ -1,245 +0,0 @@
// Semantic search operations
import { query, queryOne, getProjectKey } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import type { SimilarTask } from '../types.js';
interface SessionNote {
note_type: string;
content: string;
created_at: string;
}
interface SessionTask {
id: string;
title: string;
status: string;
priority: string;
}
interface SessionCommit {
commit_hash: string;
commit_message: string;
}
interface TaskSessionContextArgs {
id: string;
}
/**
* Get session context for a task - retrieves notes, decisions, and related tasks
* from the session where the task was created
*/
export async function taskSessionContext(args: TaskSessionContextArgs): Promise<string> {
const { id } = args;
// Get task with session info
const task = await queryOne<{
id: string;
title: string;
description: string;
session_id: string;
}>(
`SELECT t.id, t.title, t.description, t.session_id
FROM tasks t
WHERE t.id = $1`,
[id]
);
if (!task) {
return `Task not found: ${id}`;
}
if (!task.session_id) {
return `# Context for ${id}\n\n**Task:** ${task.title}\n\n⚠ No session linked to this task. Task was created before session tracking was implemented or via direct database insert.\n\n${task.description ? `**Description:**\n${task.description}` : ''}`;
}
// Get session info
const session = await queryOne<{
session_number: number;
summary: string;
started_at: string;
}>(
`SELECT session_number, summary, to_char(started_at, 'YYYY-MM-DD HH24:MI') as started_at
FROM sessions
WHERE id = $1`,
[task.session_id]
);
let output = `# Context for ${id}\n\n`;
output += `**Task:** ${task.title}\n`;
if (session) {
output += `**Created in Session:** #${session.session_number} (${session.started_at})\n`;
if (session.summary) {
output += `\n## Session Summary\n${session.summary}\n`;
}
} else {
output += `**Session ID:** ${task.session_id} (session record not found)\n`;
}
if (task.description) {
output += `\n## Task Description\n${task.description}\n`;
}
// Get session notes
const notes = await query<SessionNote>(
`SELECT note_type, content, to_char(created_at, 'HH24:MI') as created_at
FROM session_notes
WHERE session_id = $1
ORDER BY created_at`,
[task.session_id]
);
if (notes.length > 0) {
output += `\n## Session Notes\n`;
for (const note of notes) {
output += `- **[${note.note_type}]** ${note.content}\n`;
}
}
// Get related tasks from same session
const relatedTasks = await query<SessionTask>(
`SELECT id, title, status, priority
FROM tasks
WHERE session_id = $1 AND id != $2
ORDER BY created_at`,
[task.session_id, id]
);
if (relatedTasks.length > 0) {
output += `\n## Other Tasks from Same Session\n`;
for (const t of relatedTasks) {
const statusIcon = t.status === 'completed' ? '✓' : t.status === 'in_progress' ? '▶' : '○';
output += `- ${statusIcon} [${t.priority}] ${t.id}: ${t.title}\n`;
}
}
// Get commits from session
const commits = await query<SessionCommit>(
`SELECT DISTINCT commit_hash, commit_message
FROM task_commits
WHERE task_id IN (SELECT id FROM tasks WHERE session_id = $1)
ORDER BY committed_at DESC
LIMIT 10`,
[task.session_id]
);
if (commits.length > 0) {
output += `\n## Commits from Session\n`;
for (const c of commits) {
output += `- \`${c.commit_hash}\` ${c.commit_message}\n`;
}
}
return output;
}
interface TaskSimilarArgs {
query: string;
project?: string;
limit?: number;
}
interface TaskContextArgs {
description: string;
project?: string;
limit?: number;
}
/**
* Find semantically similar tasks using pgvector
*/
export async function taskSimilar(args: TaskSimilarArgs): Promise<string> {
const { query: searchQuery, project, limit = 5 } = args;
// Generate embedding for the query
const embedding = await getEmbedding(searchQuery);
if (!embedding) {
return 'Error: Could not generate embedding for search query';
}
const embeddingStr = formatEmbedding(embedding);
let whereClause = 'WHERE embedding IS NOT NULL';
const params: unknown[] = [embeddingStr, limit];
let paramIndex = 3;
if (project) {
const projectKey = await getProjectKey(project);
whereClause += ` AND project = $${paramIndex}`;
params.push(projectKey);
}
const results = await query<SimilarTask>(
`SELECT id, title, type, status, priority,
1 - (embedding <=> $1) as similarity
FROM tasks
${whereClause}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
if (results.length === 0) {
return 'No similar tasks found';
}
const lines = results.map(t => {
const pct = Math.round(t.similarity * 100);
const statusIcon = t.status === 'completed' ? '[x]' : t.status === 'in_progress' ? '[>]' : '[ ]';
return `${statusIcon} ${pct}% ${t.id}: ${t.title} [${t.type}] [${t.priority}]`;
});
return `Similar tasks for "${searchQuery}":\n\n${lines.join('\n')}`;
}
/**
* Get related tasks for current work context
* Returns markdown suitable for injection into delegations
*/
export async function taskContext(args: TaskContextArgs): Promise<string> {
const { description, project, limit = 3 } = args;
// Generate embedding for the description
const embedding = await getEmbedding(description);
if (!embedding) {
return '';
}
const embeddingStr = formatEmbedding(embedding);
let whereClause = 'WHERE embedding IS NOT NULL AND status != \'completed\'';
const params: unknown[] = [embeddingStr, limit];
let paramIndex = 3;
if (project) {
const projectKey = await getProjectKey(project);
whereClause += ` AND project = $${paramIndex}`;
params.push(projectKey);
}
const results = await query<SimilarTask>(
`SELECT id, title, type, status, priority,
1 - (embedding <=> $1) as similarity
FROM tasks
${whereClause}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
if (results.length === 0) {
return '';
}
// Format as markdown for delegation context
let output = '## Related Tasks\n\n';
for (const t of results) {
const pct = Math.round(t.similarity * 100);
output += `- **${t.id}**: ${t.title} (${pct}% match, ${t.priority}, ${t.status})\n`;
}
return output;
}

View File

@@ -2,7 +2,8 @@
// Replaces file-based CLAUDE.md and plan files with database storage
import { query, queryOne, execute } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import { getEmbedding, formatEmbedding, generateContentHash, rrfMerge, rerank } from '../embeddings.js';
import { getSessionId } from './session-id.js';
// ============================================================================
// SESSION NOTES
@@ -32,16 +33,28 @@ interface SessionNote {
* Auto-generates embedding for semantic search
*/
export async function sessionNoteAdd(args: SessionNoteAddArgs): Promise<string> {
const { session_id, note_type, content } = args;
const { session_id: providedSessionId, note_type, content } = args;
const session_id = providedSessionId || getSessionId();
// CF-1314: Hash content for dedup before embedding API call
const contentHash = generateContentHash(content);
const existing = await queryOne<{ id: number }>(
'SELECT id FROM session_notes WHERE content_hash = $1 AND session_id = $2 LIMIT 1',
[contentHash, session_id]
);
if (existing) {
return `Note already exists (id: ${existing.id}) in session ${session_id}`;
}
// Generate embedding for semantic search
const embedding = await getEmbedding(content);
const embeddingFormatted = embedding ? formatEmbedding(embedding) : null;
await execute(
`INSERT INTO session_notes (session_id, note_type, content, embedding)
VALUES ($1, $2, $3, $4)`,
[session_id, note_type, content, embeddingFormatted]
`INSERT INTO session_notes (session_id, note_type, content, embedding, content_hash)
VALUES ($1, $2, $3, $4, $5)`,
[session_id, note_type, content, embeddingFormatted, contentHash]
);
return `Note added to session ${session_id} (type: ${note_type})`;
@@ -111,15 +124,26 @@ interface SessionPlan {
export async function sessionPlanSave(args: SessionPlanSaveArgs): Promise<string> {
const { session_id, plan_content, plan_file_name, status = 'draft' } = args;
// CF-1314: Hash content for dedup before embedding API call
const contentHash = generateContentHash(plan_content);
const existing = await queryOne<{ id: number }>(
'SELECT id FROM session_plans WHERE content_hash = $1 AND session_id = $2 LIMIT 1',
[contentHash, session_id]
);
if (existing) {
return `Plan already exists (id: ${existing.id}) in session ${session_id}`;
}
// Generate embedding for semantic search
const embedding = await getEmbedding(plan_content);
const embeddingFormatted = embedding ? formatEmbedding(embedding) : null;
const result = await queryOne<{ id: number }>(
`INSERT INTO session_plans (session_id, plan_file_name, plan_content, status, embedding)
VALUES ($1, $2, $3, $4, $5)
`INSERT INTO session_plans (session_id, plan_file_name, plan_content, status, embedding, content_hash)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING id`,
[session_id, plan_file_name || null, plan_content, status, embeddingFormatted]
[session_id, plan_file_name || null, plan_content, status, embeddingFormatted, contentHash]
);
const planId = result?.id || 0;
@@ -427,10 +451,16 @@ export async function sessionDocumentationGenerate(args: SessionDocumentationGen
// SEMANTIC SEARCH & ANALYTICS
// ============================================================================
type SearchMode = 'hybrid' | 'vector' | 'keyword';
interface SessionSemanticSearchArgs {
query: string;
project?: string;
limit?: number;
search_mode?: SearchMode;
filter_topics?: string[];
filter_projects?: string[];
filter_issue_keys?: string[];
}
interface SessionSearchResult {
@@ -443,60 +473,120 @@ interface SessionSearchResult {
}
/**
* Semantic search across all session documentation
* Uses vector similarity to find related sessions
* Semantic search across all session documentation with hybrid/vector/keyword modes (CF-1315)
*/
export async function sessionSemanticSearch(args: SessionSemanticSearchArgs): Promise<SessionSearchResult[]> {
const { query: searchQuery, project, limit = 10 } = args;
const { query: searchQuery, project, limit = 10, search_mode = 'hybrid', filter_topics, filter_projects, filter_issue_keys } = args;
// Generate embedding for search query
const queryEmbedding = await getEmbedding(searchQuery);
// Build shared filter clause (CF-1316: metadata filters via JSONB @> containment)
const buildFilter = (startIdx: number) => {
let where = '';
const params: unknown[] = [];
let idx = startIdx;
if (project) {
where += ` AND s.project = $${idx++}`;
params.push(project);
}
if (filter_topics && filter_topics.length > 0) {
where += ` AND s.extracted_metadata->'topics' @> $${idx++}::jsonb`;
params.push(JSON.stringify(filter_topics));
}
if (filter_projects && filter_projects.length > 0) {
where += ` AND s.extracted_metadata->'projects' @> $${idx++}::jsonb`;
params.push(JSON.stringify(filter_projects));
}
if (filter_issue_keys && filter_issue_keys.length > 0) {
where += ` AND s.extracted_metadata->'issue_keys' @> $${idx++}::jsonb`;
params.push(JSON.stringify(filter_issue_keys));
}
return { where, params, nextIdx: idx };
};
if (!queryEmbedding) {
// Fallback to text search if embedding generation fails
let sql = `
SELECT
s.id as session_id,
s.session_number,
s.project,
s.summary,
s.started_at,
0.5 as similarity
FROM sessions s
WHERE s.summary IS NOT NULL
AND s.status = 'completed'
${project ? 'AND s.project = $1' : ''}
AND s.summary ILIKE $${project ? '2' : '1'}
ORDER BY s.started_at DESC
LIMIT $${project ? '3' : '2'}
`;
// Vector search
let vectorIds: string[] = [];
let vectorRows: Map<string, SessionSearchResult> = new Map();
let embeddingFailed = false;
const params: unknown[] = project ? [project, `%${searchQuery}%`, limit] : [`%${searchQuery}%`, limit];
const results = await query<SessionSearchResult>(sql, params);
return results;
if (search_mode !== 'keyword') {
const queryEmbedding = await getEmbedding(searchQuery);
if (queryEmbedding) {
const embeddingFormatted = formatEmbedding(queryEmbedding);
const filter = buildFilter(3);
const params: unknown[] = [embeddingFormatted, limit, ...filter.params];
const rows = await query<SessionSearchResult>(
`SELECT s.id as session_id, s.session_number, s.project, s.summary, s.started_at,
1 - (s.embedding <=> $1) as similarity
FROM sessions s
WHERE s.embedding IS NOT NULL AND s.status = 'completed'${filter.where}
ORDER BY s.embedding <=> $1
LIMIT $2`,
params
);
vectorIds = rows.map(r => r.session_id);
for (const r of rows) vectorRows.set(r.session_id, r);
} else {
embeddingFailed = true;
if (search_mode === 'vector') {
return [];
}
}
}
const embeddingFormatted = formatEmbedding(queryEmbedding);
// Keyword search
let keywordIds: string[] = [];
let keywordRows: Map<string, SessionSearchResult> = new Map();
// Vector similarity search
let sql = `
SELECT
s.id as session_id,
s.session_number,
s.project,
s.summary,
s.started_at,
1 - (s.embedding <=> $1) as similarity
FROM sessions s
WHERE s.embedding IS NOT NULL
${project ? 'AND s.project = $2' : ''}
AND s.status = 'completed'
ORDER BY s.embedding <=> $1
LIMIT $${project ? '3' : '2'}
`;
if (search_mode !== 'vector') {
const filter = buildFilter(3);
const params: unknown[] = [searchQuery, limit, ...filter.params];
const params: unknown[] = project ? [embeddingFormatted, project, limit] : [embeddingFormatted, limit];
const results = await query<SessionSearchResult>(sql, params);
const rows = await query<SessionSearchResult & { rank: number }>(
`SELECT s.id as session_id, s.session_number, s.project, s.summary, s.started_at,
ts_rank(s.search_vector, plainto_tsquery('english', $1)) as similarity
FROM sessions s
WHERE s.search_vector @@ plainto_tsquery('english', $1)
AND s.status = 'completed'${filter.where}
ORDER BY similarity DESC
LIMIT $2`,
params
);
keywordIds = rows.map(r => r.session_id);
for (const r of rows) keywordRows.set(r.session_id, r);
}
// Merge results
let finalIds: string[];
if (search_mode === 'hybrid' && vectorIds.length > 0 && keywordIds.length > 0) {
const merged = rrfMerge(vectorIds, keywordIds);
finalIds = merged.map(m => m.id as string);
// Cross-encoder re-ranking (CF-1317)
const docs = finalIds.map(id => {
const r = vectorRows.get(id) || keywordRows.get(id);
return r?.summary || '';
});
const reranked = await rerank(searchQuery, docs, limit);
if (reranked) {
finalIds = reranked.map(r => finalIds[r.index]);
} else {
finalIds = finalIds.slice(0, limit);
}
} else if (vectorIds.length > 0) {
finalIds = vectorIds;
} else if (keywordIds.length > 0) {
finalIds = keywordIds;
} else {
return [];
}
// Build final results preserving original similarity scores
const results: SessionSearchResult[] = [];
for (const id of finalIds) {
const r = vectorRows.get(id) || keywordRows.get(id);
if (r) results.push(r);
}
return results;
}

26
src/tools/session-id.ts Normal file
View File

@@ -0,0 +1,26 @@
/**
* Shared utility: get current session ID from environment or cache file.
* Extracted from crud.ts during task-mcp → session-mcp fork (CF-762).
*/
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
export function getSessionId(): string {
if (process.env.CLAUDE_SESSION_ID) {
return process.env.CLAUDE_SESSION_ID;
}
const cacheFile = path.join(os.homedir(), '.cache', 'session-memory', 'current_session');
try {
const sessionId = fs.readFileSync(cacheFile, 'utf-8').trim();
if (sessionId) return sessionId;
} catch {
// File doesn't exist or can't be read
}
const now = new Date();
const timestamp = now.toISOString().replace(/[-:T]/g, '').slice(0, 15);
return `session_${timestamp}`;
}

View File

@@ -1,7 +1,9 @@
// Session management operations for database-driven session tracking
// Sessions auto-create CF Jira issues and post output on close (CF-762)
import { query, queryOne, execute } from '../db.js';
import { getEmbedding, formatEmbedding } from '../embeddings.js';
import { getEmbedding, formatEmbedding, generateContentHash, rrfMerge, rerank, extractMetadata } from '../embeddings.js';
import { createSessionIssue, addComment, transitionToDone, updateIssueDescription } from '../services/jira.js';
interface SessionStartArgs {
session_id?: string;
@@ -9,6 +11,7 @@ interface SessionStartArgs {
working_directory?: string;
git_branch?: string;
initial_prompt?: string;
jira_issue_key?: string;
}
interface SessionUpdateArgs {
@@ -31,10 +34,13 @@ interface SessionListArgs {
limit?: number;
}
type SearchMode = 'hybrid' | 'vector' | 'keyword';
interface SessionSearchArgs {
query: string;
project?: string;
limit?: number;
search_mode?: SearchMode;
}
interface Session {
@@ -52,23 +58,25 @@ interface Session {
token_count: number;
tools_used: string[] | null;
status: string;
jira_issue_key: string | null;
created_at: string;
}
/**
* Start a new session with metadata tracking
* Returns session_id and session_number
* Start a new session with metadata tracking.
* Auto-creates a CF Jira issue for session tracking.
* Returns session_id, session_number, and Jira issue key.
*/
export async function sessionStart(args: SessionStartArgs): Promise<string> {
const { session_id, project, working_directory, git_branch, initial_prompt } = args;
const { session_id, project, working_directory, git_branch, initial_prompt, jira_issue_key } = args;
// Generate session ID if not provided (fallback, should come from session-memory)
const id = session_id || `session_${Date.now()}_${Math.random().toString(36).substring(7)}`;
await execute(
`INSERT INTO sessions (id, project, started_at, working_directory, git_branch, initial_prompt, status)
VALUES ($1, $2, NOW(), $3, $4, $5, 'active')`,
[id, project, working_directory || null, git_branch || null, initial_prompt || null]
`INSERT INTO sessions (id, project, started_at, working_directory, git_branch, initial_prompt, jira_issue_key, status)
VALUES ($1, $2, NOW(), $3, $4, $5, $6, 'active')`,
[id, project, working_directory || null, git_branch || null, initial_prompt || null, jira_issue_key || null]
);
// Get the assigned session_number
@@ -79,7 +87,32 @@ export async function sessionStart(args: SessionStartArgs): Promise<string> {
const session_number = result?.session_number || null;
return `Session started: ${id} (${project} #${session_number})`;
// Auto-create CF Jira issue for session tracking (non-blocking)
let sessionJiraKey: string | null = jira_issue_key || null;
if (!sessionJiraKey) {
try {
const jiraResult = await createSessionIssue({
sessionNumber: session_number,
project,
parentIssueKey: jira_issue_key || undefined,
branch: git_branch || undefined,
workingDirectory: working_directory || undefined,
});
if (jiraResult) {
sessionJiraKey = jiraResult.key;
// Store the auto-created Jira issue key
await execute(
`UPDATE sessions SET jira_issue_key = $1 WHERE id = $2`,
[sessionJiraKey, id]
);
}
} catch (err) {
console.error('session-mcp: Failed to create session Jira issue:', err);
}
}
const jiraInfo = sessionJiraKey ? ` [${sessionJiraKey}]` : '';
return `Session started: ${id} (${project} #${session_number})${jiraInfo}`;
}
/**
@@ -121,41 +154,39 @@ export async function sessionUpdate(args: SessionUpdateArgs): Promise<string> {
}
/**
* End session and generate summary with embedding
* End session and generate summary with embedding.
* Posts full session output as Jira comment and transitions session issue to Done.
*/
export async function sessionEnd(args: SessionEndArgs): Promise<string> {
const { session_id, summary, status = 'completed' } = args;
// Generate embedding for semantic search
const embedding = await getEmbedding(summary);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
// CF-1314: Store content hash alongside embedding
const contentHash = generateContentHash(summary);
if (embeddingValue) {
await execute(
`UPDATE sessions
SET ended_at = NOW(),
summary = $1,
embedding = $2,
status = $3,
updated_at = NOW()
WHERE id = $4`,
[summary, embeddingValue, status, session_id]
);
} else {
await execute(
`UPDATE sessions
SET ended_at = NOW(),
summary = $1,
status = $2,
updated_at = NOW()
WHERE id = $3`,
[summary, status, session_id]
);
}
// Generate embedding + extract metadata in parallel (CF-1316)
const [embedding, metadata] = await Promise.all([
getEmbedding(summary),
extractMetadata(summary),
]);
const embeddingValue = embedding ? formatEmbedding(embedding) : null;
const metadataValue = metadata ? JSON.stringify(metadata) : null;
await execute(
`UPDATE sessions
SET ended_at = NOW(),
summary = $1,
embedding = $2,
status = $3,
content_hash = $4,
extracted_metadata = $5::jsonb,
updated_at = NOW()
WHERE id = $6`,
[summary, embeddingValue, status, contentHash, metadataValue, session_id]
);
// Get session details
const session = await queryOne<Session>(
`SELECT id, project, session_number, duration_minutes
const session = await queryOne<Session & { jira_issue_key: string | null }>(
`SELECT id, project, session_number, duration_minutes, jira_issue_key
FROM sessions WHERE id = $1`,
[session_id]
);
@@ -164,7 +195,100 @@ export async function sessionEnd(args: SessionEndArgs): Promise<string> {
return `Session ended: ${session_id}`;
}
return `Session ended: ${session.project} #${session.session_number} (${session.duration_minutes || 0}m)`;
// Post session output to Jira and close the session issue (non-blocking)
let jiraStatus = '';
if (session.jira_issue_key) {
try {
// Collect session output for Jira comment
const sessionOutput = await buildSessionOutput(session_id, session, summary);
// Post as comment
const commented = await addComment(session.jira_issue_key, sessionOutput);
// Update issue description with final summary
const descriptionUpdate = [
`## Session ${session.project} #${session.session_number}`,
`**Duration:** ${session.duration_minutes || 0} minutes`,
`**Status:** ${status}`,
`**Session ID:** ${session_id}`,
'',
`## Summary`,
summary,
].join('\n');
await updateIssueDescription(session.jira_issue_key, descriptionUpdate);
// Transition to Done
const transitioned = await transitionToDone(session.jira_issue_key);
jiraStatus = commented && transitioned
? ` [${session.jira_issue_key} → Done]`
: commented
? ` [${session.jira_issue_key} commented]`
: ` [${session.jira_issue_key} Jira update partial]`;
} catch (err) {
console.error('session-mcp: Failed to update session Jira issue:', err);
jiraStatus = ` [${session.jira_issue_key} Jira update failed]`;
}
}
return `Session ended: ${session.project} #${session.session_number} (${session.duration_minutes || 0}m)${jiraStatus}`;
}
/**
* Build full session output markdown for Jira comment.
*/
async function buildSessionOutput(
session_id: string,
session: { project: string | null; session_number: number | null; duration_minutes: number | null },
summary: string
): Promise<string> {
const lines: string[] = [];
lines.push(`# Session ${session.project} #${session.session_number}`);
lines.push(`Duration: ${session.duration_minutes || 0} minutes`);
lines.push('');
lines.push(`## Summary`);
lines.push(summary);
lines.push('');
// Get session notes
const notes = await query<{ note_type: string; content: string }>(
`SELECT note_type, content FROM session_notes WHERE session_id = $1 ORDER BY created_at`,
[session_id]
);
if (notes.length > 0) {
const grouped: Record<string, string[]> = {};
for (const n of notes) {
if (!grouped[n.note_type]) grouped[n.note_type] = [];
grouped[n.note_type].push(n.content);
}
for (const [type, items] of Object.entries(grouped)) {
const label = type.replace(/_/g, ' ').replace(/\b\w/g, c => c.toUpperCase());
lines.push(`## ${label}`);
for (const item of items) {
lines.push(`- ${item}`);
}
lines.push('');
}
}
// Get commits
const commits = await query<{ commit_sha: string; repo: string; commit_message: string | null }>(
`SELECT commit_sha, repo, commit_message FROM session_commits WHERE session_id = $1 ORDER BY committed_at DESC`,
[session_id]
);
if (commits.length > 0) {
lines.push(`## Commits (${commits.length})`);
for (const c of commits) {
const msg = c.commit_message ? c.commit_message.split('\n')[0] : 'No message';
lines.push(`- ${c.commit_sha.substring(0, 7)} (${c.repo}): ${msg}`);
}
lines.push('');
}
return lines.join('\n');
}
/**
@@ -220,49 +344,125 @@ export async function sessionList(args: SessionListArgs): Promise<string> {
}
/**
* Semantic search across sessions using vector similarity
* Search sessions with hybrid (vector + keyword), vector-only, or keyword-only mode (CF-1315)
*/
export async function sessionSearch(args: SessionSearchArgs): Promise<string> {
const { query: searchQuery, project, limit = 5 } = args;
const { query: searchQuery, project, limit = 5, search_mode = 'hybrid' } = args;
// Generate embedding for search
const embedding = await getEmbedding(searchQuery);
// Build shared filter clause
const buildFilter = (startIdx: number) => {
let where = '';
const params: unknown[] = [];
let idx = startIdx;
if (project) {
where += ` AND project = $${idx++}`;
params.push(project);
}
return { where, params, nextIdx: idx };
};
if (!embedding) {
return 'Error: Could not generate embedding for search';
// Vector search
let vectorIds: string[] = [];
let vectorRows: Map<string, Session & { similarity: number }> = new Map();
let embeddingFailed = false;
if (search_mode !== 'keyword') {
const embedding = await getEmbedding(searchQuery);
if (embedding) {
const embeddingStr = formatEmbedding(embedding);
const filter = buildFilter(3);
const params: unknown[] = [embeddingStr, limit, ...filter.params];
const rows = await query<Session & { similarity: number }>(
`SELECT id, project, session_number, started_at, duration_minutes, summary,
1 - (embedding <=> $1) as similarity
FROM sessions
WHERE embedding IS NOT NULL${filter.where}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
vectorIds = rows.map(r => r.id);
for (const r of rows) vectorRows.set(r.id, r);
} else {
embeddingFailed = true;
if (search_mode === 'vector') {
return 'Error: Could not generate embedding for vector search';
}
}
}
const embeddingStr = formatEmbedding(embedding);
// Keyword search
let keywordIds: string[] = [];
let keywordRows: Map<string, Session & { rank: number }> = new Map();
let whereClause = 'WHERE embedding IS NOT NULL';
const params: unknown[] = [embeddingStr, limit];
if (search_mode !== 'vector') {
const filter = buildFilter(3);
const params: unknown[] = [searchQuery, limit, ...filter.params];
if (project) {
whereClause += ` AND project = $3`;
params.splice(1, 0, project); // Insert before limit
params[2] = limit; // Adjust limit position
const rows = await query<Session & { rank: number }>(
`SELECT id, project, session_number, started_at, duration_minutes, summary,
ts_rank(search_vector, plainto_tsquery('english', $1)) as rank
FROM sessions
WHERE search_vector @@ plainto_tsquery('english', $1)${filter.where}
ORDER BY rank DESC
LIMIT $2`,
params
);
keywordIds = rows.map(r => r.id);
for (const r of rows) keywordRows.set(r.id, r);
}
const sessions = await query<Session & { similarity: number }>(
`SELECT id, project, session_number, started_at, duration_minutes, summary,
1 - (embedding <=> $1) as similarity
FROM sessions
${whereClause}
ORDER BY embedding <=> $1
LIMIT $${project ? '3' : '2'}`,
params
);
// Merge results
let finalIds: string[];
let searchLabel: string;
if (sessions.length === 0) {
let rerankScores: Map<string, number> | null = null;
if (search_mode === 'hybrid' && vectorIds.length > 0 && keywordIds.length > 0) {
const merged = rrfMerge(vectorIds, keywordIds);
finalIds = merged.map(m => m.id as string);
searchLabel = 'hybrid';
// Cross-encoder re-ranking (CF-1317)
const docs = finalIds.map(id => {
const r = vectorRows.get(id) || keywordRows.get(id);
return (r as any)?.summary || '';
});
const reranked = await rerank(searchQuery, docs, limit);
if (reranked) {
rerankScores = new Map();
const reorderedIds = reranked.map(r => {
rerankScores!.set(finalIds[r.index], r.relevance_score);
return finalIds[r.index];
});
finalIds = reorderedIds;
searchLabel = 'hybrid+rerank';
} else {
finalIds = finalIds.slice(0, limit);
}
} else if (vectorIds.length > 0) {
finalIds = vectorIds;
searchLabel = 'vector';
} else if (keywordIds.length > 0) {
finalIds = keywordIds;
searchLabel = embeddingFailed ? 'keyword (embedding unavailable)' : 'keyword';
} else {
return 'No relevant sessions found';
}
const lines = ['Similar sessions:\n'];
for (const s of sessions) {
const sim = Math.round(s.similarity * 100);
// Format output
const lines = [`Similar sessions (${searchLabel}):\n`];
for (const id of finalIds) {
const s = vectorRows.get(id) || keywordRows.get(id);
if (!s) continue;
const simParts: string[] = [];
if (vectorRows.has(id)) simParts.push(`${Math.round((vectorRows.get(id)!).similarity * 100)}% match`);
if (rerankScores?.has(id)) simParts.push(`rerank: ${rerankScores.get(id)!.toFixed(2)}`);
const scores = simParts.length > 0 ? ` (${simParts.join(', ')})` : '';
const num = s.session_number ? `#${s.session_number}` : '';
const duration = s.duration_minutes ? `(${s.duration_minutes}m)` : '';
lines.push(`**${s.project} ${num}** ${duration} (${sim}% match)`);
lines.push(`**${s.project} ${num}** ${duration}${scores}`);
lines.push(` ${s.summary || 'No summary'}`);
lines.push('');
}

530
src/tools/timeline.ts Normal file
View File

@@ -0,0 +1,530 @@
// CF-2885: Event Timeline — unified chronological view across session-mcp sources
// Stitches sessions, notes, commits, plans, task-commit links, and Jira history
// into a single time-ordered event stream for LLM consumption.
import { query } from '../db.js';
import { getIssueWithHistory, searchIssueKeys } from '../services/jira.js';
export type EventSource = 'session' | 'note' | 'commit' | 'plan' | 'task_commit' | 'jira';
export interface TimelineEvent {
ts: string; // ISO8601
source: EventSource;
type: string; // e.g. "session_start", "note:decision", "commit"
subject: string; // Jira key | session id | repo
summary: string; // 1-line human readable
details: Record<string, unknown>;
links: {
session?: string;
jira?: string;
commit?: { sha: string; repo: string };
};
}
interface TimelineArgs {
subject: string; // Jira key (CF-123) | session id | project key (CF)
since?: string; // ISO8601 or relative like "-7d" (default: -7d)
until?: string; // ISO8601 (default: now)
sources?: EventSource[]; // optional filter
limit?: number; // default: 100
}
// ---------- helpers ----------
const JIRA_KEY_RE = /^[A-Z]{2,10}-\d+$/;
const PROJECT_KEY_RE = /^[A-Z]{2,10}$/;
function resolveSince(since?: string): string {
if (!since) return 'NOW() - INTERVAL \'7 days\'';
// Relative shorthand: -7d, -24h, -30m
const rel = since.match(/^-(\d+)([dhm])$/);
if (rel) {
const n = rel[1];
const unit = rel[2] === 'd' ? 'days' : rel[2] === 'h' ? 'hours' : 'minutes';
return `NOW() - INTERVAL '${n} ${unit}'`;
}
return `'${since}'::timestamptz`;
}
function resolveUntil(until?: string): string {
if (!until) return 'NOW()';
return `'${until}'::timestamptz`;
}
function shortSha(sha: string): string {
return sha.substring(0, 7);
}
function truncate(s: string | null | undefined, n: number): string {
if (!s) return '';
return s.length > n ? s.substring(0, n - 1) + '…' : s;
}
// ---------- subject classification ----------
type SubjectKind = 'jira' | 'session' | 'project' | 'unknown';
function classifySubject(subject: string): SubjectKind {
if (JIRA_KEY_RE.test(subject)) {
// Could be Jira issue or session id (sessions.id often uses jira key format)
// We'll query both — distinguish at query time
return 'jira';
}
if (PROJECT_KEY_RE.test(subject)) return 'project';
return 'unknown';
}
// ---------- source queries ----------
async function fetchSessionEvents(
subject: string,
kind: SubjectKind,
since: string,
until: string
): Promise<TimelineEvent[]> {
// Match sessions by: id (session id), jira_issue_key (linked ticket), project (broad)
const where =
kind === 'jira'
? '(s.id = $1 OR s.jira_issue_key = $1)'
: kind === 'project'
? 's.project = $1'
: '1=0';
const rows = await query<{
id: string;
project: string;
jira_issue_key: string | null;
started_at: string;
ended_at: string | null;
summary: string | null;
status: string | null;
duration_minutes: number | null;
}>(
`SELECT id, project, jira_issue_key, started_at, ended_at, summary, status, duration_minutes
FROM sessions s
WHERE ${where}
AND s.started_at >= ${since}
AND s.started_at <= ${until}
ORDER BY s.started_at DESC
LIMIT 100`,
[subject]
);
const events: TimelineEvent[] = [];
for (const r of rows) {
events.push({
ts: r.started_at,
source: 'session',
type: 'session_start',
subject: r.id,
summary: `Session ${r.id} started${r.jira_issue_key ? ` on ${r.jira_issue_key}` : ''}`,
details: { project: r.project, status: r.status },
links: {
session: r.id,
jira: r.jira_issue_key || undefined,
},
});
if (r.ended_at) {
events.push({
ts: r.ended_at,
source: 'session',
type: 'session_end',
subject: r.id,
summary: `Session ${r.id} ended (${r.duration_minutes ?? '?'}min)${r.summary ? ': ' + truncate(r.summary, 120) : ''}`,
details: { summary: r.summary, status: r.status },
links: { session: r.id, jira: r.jira_issue_key || undefined },
});
}
}
return events;
}
async function fetchNoteEvents(
subject: string,
kind: SubjectKind,
since: string,
until: string
): Promise<TimelineEvent[]> {
// Notes belong to sessions. Find via session linkage.
const sessionFilter =
kind === 'jira'
? '(s.id = $1 OR s.jira_issue_key = $1)'
: kind === 'project'
? 's.project = $1'
: '1=0';
const rows = await query<{
id: number;
session_id: string;
note_type: string;
content: string;
created_at: string;
jira_issue_key: string | null;
}>(
`SELECT n.id, n.session_id, n.note_type, n.content, n.created_at, s.jira_issue_key
FROM session_notes n
JOIN sessions s ON s.id = n.session_id
WHERE ${sessionFilter}
AND n.created_at >= ${since}
AND n.created_at <= ${until}
ORDER BY n.created_at DESC
LIMIT 200`,
[subject]
);
return rows.map((r) => ({
ts: r.created_at,
source: 'note' as const,
type: `note:${r.note_type}`,
subject: r.session_id,
summary: `[${r.note_type}] ${truncate(r.content, 140)}`,
details: { full: r.content },
links: { session: r.session_id, jira: r.jira_issue_key || undefined },
}));
}
async function fetchCommitEvents(
subject: string,
kind: SubjectKind,
since: string,
until: string
): Promise<TimelineEvent[]> {
// session_commits: via session linkage
const sessionFilter =
kind === 'jira'
? '(s.id = $1 OR s.jira_issue_key = $1)'
: kind === 'project'
? 's.project = $1'
: '1=0';
const rows = await query<{
commit_sha: string;
repo: string;
commit_message: string | null;
committed_at: string | null;
created_at: string;
session_id: string;
jira_issue_key: string | null;
}>(
`SELECT c.commit_sha, c.repo, c.commit_message, c.committed_at, c.created_at,
c.session_id, s.jira_issue_key
FROM session_commits c
JOIN sessions s ON s.id = c.session_id
WHERE ${sessionFilter}
AND COALESCE(c.committed_at, c.created_at) >= ${since}
AND COALESCE(c.committed_at, c.created_at) <= ${until}
ORDER BY COALESCE(c.committed_at, c.created_at) DESC
LIMIT 200`,
[subject]
);
return rows.map((r) => ({
ts: r.committed_at || r.created_at,
source: 'commit' as const,
type: 'commit',
subject: `${r.repo}@${shortSha(r.commit_sha)}`,
summary: `${shortSha(r.commit_sha)} (${r.repo}) ${truncate(r.commit_message, 100)}`,
details: { full_message: r.commit_message },
links: {
session: r.session_id,
jira: r.jira_issue_key || undefined,
commit: { sha: r.commit_sha, repo: r.repo },
},
}));
}
async function fetchTaskCommitEvents(
subject: string,
kind: SubjectKind,
since: string,
until: string
): Promise<TimelineEvent[]> {
// task_commits links Jira issue → commit independently of sessions
if (kind !== 'jira') return [];
const rows = await query<{
task_id: string;
commit_sha: string;
repo: string;
source: string | null;
created_at: string;
}>(
`SELECT task_id, commit_sha, repo, source, created_at
FROM task_commits
WHERE task_id = $1
AND created_at >= ${since}
AND created_at <= ${until}
ORDER BY created_at DESC
LIMIT 200`,
[subject]
);
return rows.map((r) => ({
ts: r.created_at,
source: 'task_commit' as const,
type: 'commit_link',
subject: r.task_id,
summary: `${shortSha(r.commit_sha)} linked to ${r.task_id} (${r.repo}) [${r.source || 'manual'}]`,
details: { source: r.source },
links: {
jira: r.task_id,
commit: { sha: r.commit_sha, repo: r.repo },
},
}));
}
async function fetchPlanEvents(
subject: string,
kind: SubjectKind,
since: string,
until: string
): Promise<TimelineEvent[]> {
const sessionFilter =
kind === 'jira'
? '(s.id = $1 OR s.jira_issue_key = $1)'
: kind === 'project'
? 's.project = $1'
: '1=0';
const rows = await query<{
id: number;
session_id: string;
plan_file_name: string | null;
status: string | null;
created_at: string;
approved_at: string | null;
completed_at: string | null;
jira_issue_key: string | null;
}>(
`SELECT p.id, p.session_id, p.plan_file_name, p.status,
p.created_at, p.approved_at, p.completed_at, s.jira_issue_key
FROM session_plans p
JOIN sessions s ON s.id = p.session_id
WHERE ${sessionFilter}
AND p.created_at >= ${since}
AND p.created_at <= ${until}
ORDER BY p.created_at DESC
LIMIT 50`,
[subject]
);
const events: TimelineEvent[] = [];
for (const r of rows) {
const label = r.plan_file_name || `plan#${r.id}`;
events.push({
ts: r.created_at,
source: 'plan',
type: 'plan_created',
subject: r.session_id,
summary: `Plan created: ${label}`,
details: { status: r.status },
links: { session: r.session_id, jira: r.jira_issue_key || undefined },
});
if (r.approved_at) {
events.push({
ts: r.approved_at,
source: 'plan',
type: 'plan_approved',
subject: r.session_id,
summary: `Plan approved: ${label}`,
details: {},
links: { session: r.session_id, jira: r.jira_issue_key || undefined },
});
}
if (r.completed_at) {
events.push({
ts: r.completed_at,
source: 'plan',
type: 'plan_completed',
subject: r.session_id,
summary: `Plan completed: ${label}`,
details: {},
links: { session: r.session_id, jira: r.jira_issue_key || undefined },
});
}
}
return events;
}
// ---------- Jira source ----------
function filterByTimeWindow<T extends { ts: string }>(events: T[], sinceISO: string, untilISO: string): T[] {
const sinceMs = new Date(sinceISO).getTime();
const untilMs = new Date(untilISO).getTime();
return events.filter(e => {
const t = new Date(e.ts).getTime();
return t >= sinceMs && t <= untilMs;
});
}
// Resolve relative "since" into ISO8601 for client-side filtering of Jira data
function resolveSinceISO(since?: string): string {
if (!since) return new Date(Date.now() - 7 * 86400_000).toISOString();
const rel = since.match(/^-(\d+)([dhm])$/);
if (rel) {
const n = parseInt(rel[1]);
const ms = rel[2] === 'd' ? n * 86400_000 : rel[2] === 'h' ? n * 3600_000 : n * 60_000;
return new Date(Date.now() - ms).toISOString();
}
return since;
}
function resolveUntilISO(until?: string): string {
return until || new Date().toISOString();
}
async function fetchJiraEvents(
subject: string,
kind: SubjectKind,
sinceISO: string,
untilISO: string
): Promise<TimelineEvent[]> {
if (kind !== 'jira') return [];
const issue = await getIssueWithHistory(subject);
if (!issue) return [];
const events: TimelineEvent[] = [];
// Creation event
if (issue.created) {
events.push({
ts: issue.created,
source: 'jira',
type: 'issue_created',
subject: issue.key,
summary: `${issue.issueType} created: ${issue.summary}`,
details: { labels: issue.labels, creator: issue.creator },
links: { jira: issue.key },
});
}
// Changelog entries (field changes)
for (const ch of issue.changelog) {
// Prioritize status transitions, assignee changes, resolution changes
const notable = new Set(['status', 'assignee', 'resolution', 'priority', 'labels']);
if (!notable.has(ch.field)) continue;
const fromStr = ch.from ?? '∅';
const toStr = ch.to ?? '∅';
events.push({
ts: ch.ts,
source: 'jira',
type: `field_change:${ch.field}`,
subject: issue.key,
summary: `${ch.field}: ${fromStr}${toStr} by ${ch.author}`,
details: { field: ch.field, from: ch.from, to: ch.to, author: ch.author },
links: { jira: issue.key },
});
}
// Comments
for (const c of issue.comments) {
events.push({
ts: c.ts,
source: 'jira',
type: 'comment',
subject: issue.key,
summary: `💬 ${c.author}: ${truncate(c.body, 120)}`,
details: { author: c.author, body: c.body },
links: { jira: issue.key },
});
}
return filterByTimeWindow(events, sinceISO, untilISO);
}
async function fetchLinkedSessionIssueKeys(subject: string, kind: SubjectKind): Promise<string[]> {
// For a task issue, find session-tracking issues that relate to it
if (kind !== 'jira') return [];
const jql = `labels = "session-tracking" AND issueFunction in linkedIssuesOf("key = ${subject}")`;
// Fallback: simpler query that works without Script Runner
const simpleJql = `labels = "session-tracking" AND text ~ "${subject}"`;
try {
const keys = await searchIssueKeys(simpleJql, 20);
return keys;
} catch {
return [];
}
}
// ---------- main tool ----------
export async function timeline(args: TimelineArgs): Promise<string> {
const subject = args.subject?.trim();
if (!subject) {
return 'Error: subject is required (Jira key, session id, or project key)';
}
const kind = classifySubject(subject);
if (kind === 'unknown') {
return `Error: could not classify subject "${subject}". Expected Jira key (CF-123), session id, or project key (CF).`;
}
const since = resolveSince(args.since);
const until = resolveUntil(args.until);
const sinceISO = resolveSinceISO(args.since);
const untilISO = resolveUntilISO(args.until);
const limit = args.limit ?? 100;
const sourceFilter = new Set<EventSource>(
args.sources ?? ['session', 'note', 'commit', 'plan', 'task_commit', 'jira']
);
// For Jira subjects, also fetch any linked session-tracking issues and include their history
const jiraTargets: string[] = [];
if (kind === 'jira' && sourceFilter.has('jira')) {
jiraTargets.push(subject);
// Also pull linked session-tracking issues (if this is a task, its session issues)
const linked = await fetchLinkedSessionIssueKeys(subject, kind);
for (const k of linked) {
if (k !== subject) jiraTargets.push(k);
}
}
// Run all queries in parallel
const [sessionEvents, noteEvents, commitEvents, planEvents, taskCommitEvents, ...jiraEventArrays] = await Promise.all([
sourceFilter.has('session') ? fetchSessionEvents(subject, kind, since, until) : Promise.resolve([]),
sourceFilter.has('note') ? fetchNoteEvents(subject, kind, since, until) : Promise.resolve([]),
sourceFilter.has('commit') ? fetchCommitEvents(subject, kind, since, until) : Promise.resolve([]),
sourceFilter.has('plan') ? fetchPlanEvents(subject, kind, since, until) : Promise.resolve([]),
sourceFilter.has('task_commit') ? fetchTaskCommitEvents(subject, kind, since, until) : Promise.resolve([]),
...jiraTargets.map(k => fetchJiraEvents(k, 'jira', sinceISO, untilISO)),
]);
const jiraEvents = jiraEventArrays.flat();
const all = [...sessionEvents, ...noteEvents, ...commitEvents, ...planEvents, ...taskCommitEvents, ...jiraEvents];
// Sort chronologically (oldest → newest by default for narrative reading)
all.sort((a, b) => new Date(a.ts).getTime() - new Date(b.ts).getTime());
const limited = all.slice(-limit);
if (limited.length === 0) {
return `📭 No events for ${subject} (${kind}) in window ${args.since || '-7d'}${args.until || 'now'}`;
}
// Format as markdown timeline
let output = `📜 **Timeline: ${subject}** (${kind}, ${limited.length} events)\n`;
output += `Window: ${args.since || '-7d'}${args.until || 'now'}\n\n`;
let lastDate = '';
for (const e of limited) {
const d = new Date(e.ts);
const dateStr = d.toISOString().substring(0, 10);
const timeStr = d.toISOString().substring(11, 16);
if (dateStr !== lastDate) {
output += `\n**${dateStr}**\n`;
lastDate = dateStr;
}
const icon = {
session: '🗂️',
note: '📝',
commit: '🔨',
plan: '📋',
task_commit: '🔗',
jira: '🎫',
}[e.source];
output += ` \`${timeStr}\` ${icon} \`${e.type}\` ${e.summary}\n`;
}
return output;
}

161
src/tools/transcripts.ts Normal file
View File

@@ -0,0 +1,161 @@
// Session transcript search (CF-2394)
import { query } from '../db.js';
import { getEmbedding, formatEmbedding, rrfMerge, rerank } from '../embeddings.js';
interface TranscriptSearchArgs {
query: string;
project?: string;
session_issue_key?: string;
limit?: number;
search_mode?: 'hybrid' | 'vector' | 'keyword';
}
interface TranscriptRow {
id: number;
session_uuid: string;
session_issue_key: string | null;
project_key: string;
git_branch: string | null;
message_count: number;
tool_names: string[] | null;
started_at: string | null;
similarity?: number;
rank?: number;
snippet?: string;
}
export async function transcriptSearch(args: TranscriptSearchArgs): Promise<string> {
const { query: searchQuery, project, session_issue_key, limit = 10, search_mode = 'hybrid' } = args;
const buildFilter = (startIdx: number) => {
let where = '';
const params: unknown[] = [];
let idx = startIdx;
if (project) {
where += ` AND project_key = $${idx++}`;
params.push(project);
}
if (session_issue_key) {
where += ` AND session_issue_key = $${idx++}`;
params.push(session_issue_key);
}
return { where, params, nextIdx: idx };
};
// Vector search
let vectorIds: number[] = [];
let vectorRows: Map<number, TranscriptRow> = new Map();
let embeddingFailed = false;
if (search_mode !== 'keyword') {
const embedding = await getEmbedding(searchQuery);
if (embedding) {
const embeddingStr = formatEmbedding(embedding);
const filter = buildFilter(3);
const params: unknown[] = [embeddingStr, limit, ...filter.params];
const rows = await query<TranscriptRow>(
`SELECT id, session_uuid, session_issue_key, project_key, git_branch,
message_count, tool_names,
to_char(started_at, 'YYYY-MM-DD HH24:MI') as started_at,
1 - (embedding <=> $1) as similarity
FROM session_transcripts
WHERE embedding IS NOT NULL${filter.where}
ORDER BY embedding <=> $1
LIMIT $2`,
params
);
vectorIds = rows.map(r => r.id);
for (const r of rows) vectorRows.set(r.id, r);
} else {
embeddingFailed = true;
if (search_mode === 'vector') {
return 'Error: Could not generate embedding for vector search';
}
}
}
// Keyword search
let keywordIds: number[] = [];
let keywordRows: Map<number, TranscriptRow> = new Map();
if (search_mode !== 'vector') {
const filter = buildFilter(3);
const params: unknown[] = [searchQuery, limit, ...filter.params];
const rows = await query<TranscriptRow>(
`SELECT id, session_uuid, session_issue_key, project_key, git_branch,
message_count, tool_names,
to_char(started_at, 'YYYY-MM-DD HH24:MI') as started_at,
ts_rank(tsv, plainto_tsquery('english', $1)) as rank,
ts_headline('english', searchable_content,
plainto_tsquery('english', $1),
'StartSel=**,StopSel=**,MaxWords=25,MinWords=8') as snippet
FROM session_transcripts
WHERE tsv @@ plainto_tsquery('english', $1)${filter.where}
ORDER BY rank DESC
LIMIT $2`,
params
);
keywordIds = rows.map(r => r.id);
for (const r of rows) keywordRows.set(r.id, r);
}
// Merge results
let finalIds: number[];
let searchLabel: string;
if (search_mode === 'hybrid' && vectorIds.length > 0 && keywordIds.length > 0) {
const merged = rrfMerge(vectorIds, keywordIds);
finalIds = merged.map(m => m.id as number);
searchLabel = 'hybrid';
// Re-rank using snippets
const docs = finalIds.map(id => {
const r = keywordRows.get(id) || vectorRows.get(id);
return r?.snippet || r?.session_issue_key || '';
});
const reranked = await rerank(searchQuery, docs, limit);
if (reranked) {
finalIds = reranked.map(r => finalIds[r.index]);
searchLabel = 'hybrid+rerank';
} else {
finalIds = finalIds.slice(0, limit);
}
} else if (vectorIds.length > 0) {
finalIds = vectorIds;
searchLabel = 'vector';
} else if (keywordIds.length > 0) {
finalIds = keywordIds;
searchLabel = embeddingFailed ? 'keyword (embedding unavailable)' : 'keyword';
} else {
return 'No matching transcripts found';
}
// Format output
const lines = [`Session transcripts (${searchLabel}, ${finalIds.length} results):\n`];
for (const id of finalIds) {
const r = vectorRows.get(id) || keywordRows.get(id);
if (!r) continue;
const scoreParts: string[] = [];
if (vectorRows.has(id)) scoreParts.push(`${Math.round(vectorRows.get(id)!.similarity! * 100)}% semantic`);
if (keywordRows.has(id)) scoreParts.push(`rank: ${keywordRows.get(id)!.rank!.toFixed(3)}`);
const scores = scoreParts.length > 0 ? ` (${scoreParts.join(', ')})` : '';
const issueLink = r.session_issue_key
? `[${r.session_issue_key}](https://agiliton.atlassian.net/browse/${r.session_issue_key})`
: 'unlinked';
const tools = r.tool_names?.slice(0, 5).join(', ') || 'none';
lines.push(`**#${r.id}** ${issueLink}${r.project_key} (${r.git_branch || 'no-branch'})${scores}`);
lines.push(` ${r.started_at || 'unknown date'} | ${r.message_count} msgs | Tools: ${tools}`);
if (r.snippet) {
lines.push(` > ${r.snippet.replace(/\n/g, ' ').substring(0, 150)}`);
}
lines.push('');
}
return lines.join('\n');
}

View File

@@ -1,306 +0,0 @@
// Version management operations for task-mcp
import { query, queryOne, execute, getProjectKey } from '../db.js';
import type { Version, Task } from '../types.js';
interface VersionAddArgs {
project: string;
version: string;
build_number?: number;
status?: string;
release_notes?: string;
}
interface VersionListArgs {
project?: string;
status?: string;
limit?: number;
}
interface VersionUpdateArgs {
id: string;
status?: string;
git_tag?: string;
git_sha?: string;
release_notes?: string;
release_date?: string;
}
/**
* Generate version ID from project and version number
*/
function generateVersionId(projectKey: string, version: string): string {
return `${projectKey}-v${version.replace(/^v/, '')}`;
}
/**
* Create a new version
*/
export async function versionAdd(args: VersionAddArgs): Promise<string> {
const { project, version, build_number, status = 'planned', release_notes } = args;
// Get project key
const projectKey = await getProjectKey(project);
// Generate version ID
const versionId = generateVersionId(projectKey, version);
// Check if version already exists
const existing = await queryOne<{ id: string }>(`SELECT id FROM versions WHERE id = $1`, [versionId]);
if (existing) {
return `Version already exists: ${versionId}`;
}
// Insert version
await execute(
`INSERT INTO versions (id, project, version, build_number, status, release_notes)
VALUES ($1, $2, $3, $4, $5, $6)`,
[versionId, projectKey, version, build_number || null, status, release_notes || null]
);
return `Created version: ${versionId}\n Version: ${version}\n Project: ${projectKey}\n Status: ${status}${build_number ? `\n Build: ${build_number}` : ''}`;
}
/**
* List versions with filters
*/
export async function versionList(args: VersionListArgs): Promise<string> {
const { project, status, limit = 20 } = args;
let whereClause = 'WHERE 1=1';
const params: unknown[] = [];
let paramIndex = 1;
if (project) {
const projectKey = await getProjectKey(project);
whereClause += ` AND v.project = $${paramIndex++}`;
params.push(projectKey);
}
if (status) {
whereClause += ` AND v.status = $${paramIndex++}`;
params.push(status);
}
params.push(limit);
const versions = await query<Version & { task_count: number; open_count: number }>(
`SELECT v.id, v.version, v.status, v.project, v.build_number, v.git_tag,
to_char(v.release_date, 'YYYY-MM-DD') as release_date,
COUNT(t.id) as task_count,
COUNT(t.id) FILTER (WHERE t.status != 'completed') as open_count
FROM versions v
LEFT JOIN tasks t ON t.version_id = v.id
${whereClause}
GROUP BY v.id, v.version, v.status, v.project, v.build_number, v.git_tag, v.release_date, v.created_at
ORDER BY
CASE v.status WHEN 'in_progress' THEN 0 WHEN 'planned' THEN 1 WHEN 'released' THEN 2 ELSE 3 END,
v.created_at DESC
LIMIT $${paramIndex}`,
params
);
if (versions.length === 0) {
return `No versions found${project ? ` for project ${project}` : ''}`;
}
const lines = versions.map(v => {
const statusIcon = v.status === 'released' ? '[R]' : v.status === 'in_progress' ? '[>]' : v.status === 'archived' ? '[A]' : '[ ]';
const progress = v.task_count > 0 ? ` (${v.task_count - v.open_count}/${v.task_count} tasks)` : '';
const tag = v.git_tag ? ` [${v.git_tag}]` : '';
const date = (v as unknown as { release_date: string }).release_date ? ` - ${(v as unknown as { release_date: string }).release_date}` : '';
return `${statusIcon} ${v.id}: ${v.version}${tag}${progress}${date}`;
});
return `Versions${project ? ` (${project})` : ''}:\n\n${lines.join('\n')}`;
}
/**
* Show version details with assigned tasks
*/
export async function versionShow(id: string): Promise<string> {
const version = await queryOne<Version & { created: string; released: string }>(
`SELECT id, project, version, build_number, status, release_notes, git_tag, git_sha,
to_char(created_at, 'YYYY-MM-DD HH24:MI') as created,
to_char(release_date, 'YYYY-MM-DD') as released
FROM versions WHERE id = $1`,
[id]
);
if (!version) {
return `Version not found: ${id}`;
}
let output = `# ${version.id}\n\n`;
output += `**Version:** ${version.version}\n`;
output += `**Project:** ${version.project}\n`;
output += `**Status:** ${version.status}\n`;
if (version.build_number) {
output += `**Build:** ${version.build_number}\n`;
}
if ((version as unknown as { git_tag: string }).git_tag) {
output += `**Git Tag:** ${(version as unknown as { git_tag: string }).git_tag}\n`;
}
if ((version as unknown as { git_sha: string }).git_sha) {
output += `**Git SHA:** ${(version as unknown as { git_sha: string }).git_sha}\n`;
}
output += `**Created:** ${version.created}\n`;
if (version.released) {
output += `**Released:** ${version.released}\n`;
}
if (version.release_notes) {
output += `\n**Release Notes:**\n${version.release_notes}\n`;
}
// Get tasks assigned to this version
const tasks = await query<Task>(
`SELECT id, title, status, priority, type
FROM tasks
WHERE version_id = $1
ORDER BY
CASE status WHEN 'in_progress' THEN 0 WHEN 'open' THEN 1 WHEN 'blocked' THEN 2 ELSE 3 END,
CASE priority WHEN 'P0' THEN 0 WHEN 'P1' THEN 1 WHEN 'P2' THEN 2 ELSE 3 END`,
[id]
);
if (tasks.length > 0) {
const done = tasks.filter(t => t.status === 'completed').length;
output += `\n**Tasks:** (${done}/${tasks.length} done)\n`;
for (const t of tasks) {
const statusIcon = t.status === 'completed' ? '[x]' : t.status === 'in_progress' ? '[>]' : t.status === 'blocked' ? '[!]' : '[ ]';
output += ` ${statusIcon} ${t.priority} ${t.id}: ${t.title}\n`;
}
} else {
output += `\n**Tasks:** None assigned\n`;
}
// Get epics targeting this version
const epics = await query<{ id: string; title: string; status: string }>(
`SELECT id, title, status FROM epics WHERE target_version_id = $1`,
[id]
);
if (epics.length > 0) {
output += `\n**Epics:**\n`;
for (const e of epics) {
const statusIcon = e.status === 'completed' ? '[x]' : e.status === 'in_progress' ? '[>]' : '[ ]';
output += ` ${statusIcon} ${e.id}: ${e.title}\n`;
}
}
return output;
}
/**
* Update a version
*/
export async function versionUpdate(args: VersionUpdateArgs): Promise<string> {
const { id, status, git_tag, git_sha, release_notes, release_date } = args;
const updates: string[] = [];
const params: unknown[] = [];
let paramIndex = 1;
if (status) {
updates.push(`status = $${paramIndex++}`);
params.push(status);
}
if (git_tag !== undefined) {
updates.push(`git_tag = $${paramIndex++}`);
params.push(git_tag);
}
if (git_sha !== undefined) {
updates.push(`git_sha = $${paramIndex++}`);
params.push(git_sha);
}
if (release_notes !== undefined) {
updates.push(`release_notes = $${paramIndex++}`);
params.push(release_notes);
}
if (release_date) {
updates.push(`release_date = $${paramIndex++}`);
params.push(release_date);
}
if (updates.length === 0) {
return 'No updates specified';
}
params.push(id);
const result = await execute(
`UPDATE versions SET ${updates.join(', ')} WHERE id = $${paramIndex}`,
params
);
if (result === 0) {
return `Version not found: ${id}`;
}
return `Updated: ${id}`;
}
/**
* Mark a version as released
*/
export async function versionRelease(args: { id: string; git_tag?: string }): Promise<string> {
const { id, git_tag } = args;
// Verify version exists
const version = await queryOne<{ id: string; status: string; version: string }>(
`SELECT id, status, version FROM versions WHERE id = $1`,
[id]
);
if (!version) {
return `Version not found: ${id}`;
}
if (version.status === 'released') {
return `Version already released: ${id}`;
}
// Update version status
const updates = ['status = $1', 'release_date = NOW()'];
const params: unknown[] = ['released'];
let paramIndex = 2;
if (git_tag) {
updates.push(`git_tag = $${paramIndex++}`);
params.push(git_tag);
}
params.push(id);
await execute(
`UPDATE versions SET ${updates.join(', ')} WHERE id = $${paramIndex}`,
params
);
return `Released: ${id} (${version.version})${git_tag ? ` tagged as ${git_tag}` : ''}`;
}
/**
* Assign a task to a version
*/
export async function versionAssignTask(args: { task_id: string; version_id: string }): Promise<string> {
const { task_id, version_id } = args;
// Verify version exists
const version = await queryOne<{ id: string }>(`SELECT id FROM versions WHERE id = $1`, [version_id]);
if (!version) {
return `Version not found: ${version_id}`;
}
// Update task
const result = await execute(
`UPDATE tasks SET version_id = $1, updated_at = NOW() WHERE id = $2`,
[version_id, task_id]
);
if (result === 0) {
return `Task not found: ${task_id}`;
}
return `Assigned ${task_id} to version ${version_id}`;
}

View File

@@ -10,6 +10,7 @@ export interface Task {
priority: 'P0' | 'P1' | 'P2' | 'P3';
version_id?: string;
epic_id?: string;
planning_mode_required?: boolean | null;
created_at: Date;
updated_at: Date;
completed_at?: Date;

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Wrapper script for task-mcp with hardcoded env vars
export DB_HOST="infra.agiliton.internal"
export DB_HOST="postgres.agiliton.internal"
export DB_PORT="5432"
export DB_NAME="agiliton"
export DB_USER="agiliton"