Sequential step executor (script, claude_prompt, approval, api_call, template, skyvern placeholder), reaction-based approvals, file upload trigger matching, portal API state sync. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
33 lines
889 B
Python
33 lines
889 B
Python
"""Claude prompt step — call LLM via LiteLLM proxy."""
|
|
|
|
import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
async def execute_claude_prompt(
|
|
config: dict,
|
|
llm=None,
|
|
default_model: str = "claude-haiku",
|
|
escalation_model: str = "claude-sonnet",
|
|
**_kwargs,
|
|
) -> str:
|
|
"""Send a prompt to Claude and return the response."""
|
|
if not llm:
|
|
raise RuntimeError("LLM client not configured")
|
|
|
|
prompt = config.get("prompt", "")
|
|
if not prompt:
|
|
raise ValueError("claude_prompt step requires 'prompt' field")
|
|
|
|
model_name = config.get("model", "default")
|
|
model = escalation_model if model_name == "escalation" else default_model
|
|
|
|
response = await llm.chat.completions.create(
|
|
model=model,
|
|
messages=[{"role": "user", "content": prompt}],
|
|
max_tokens=4096,
|
|
)
|
|
|
|
return response.choices[0].message.content or ""
|