feat: add pipeline engine with approval flow and file triggers
Sequential step executor (script, claude_prompt, approval, api_call, template, skyvern placeholder), reaction-based approvals, file upload trigger matching, portal API state sync. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
32
pipelines/steps/claude_prompt.py
Normal file
32
pipelines/steps/claude_prompt.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Claude prompt step — call LLM via LiteLLM proxy."""
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def execute_claude_prompt(
|
||||
config: dict,
|
||||
llm=None,
|
||||
default_model: str = "claude-haiku",
|
||||
escalation_model: str = "claude-sonnet",
|
||||
**_kwargs,
|
||||
) -> str:
|
||||
"""Send a prompt to Claude and return the response."""
|
||||
if not llm:
|
||||
raise RuntimeError("LLM client not configured")
|
||||
|
||||
prompt = config.get("prompt", "")
|
||||
if not prompt:
|
||||
raise ValueError("claude_prompt step requires 'prompt' field")
|
||||
|
||||
model_name = config.get("model", "default")
|
||||
model = escalation_model if model_name == "escalation" else default_model
|
||||
|
||||
response = await llm.chat.completions.create(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
max_tokens=4096,
|
||||
)
|
||||
|
||||
return response.choices[0].message.content or ""
|
||||
Reference in New Issue
Block a user