"""Claude prompt step — call LLM via LiteLLM proxy.""" import logging logger = logging.getLogger(__name__) async def execute_claude_prompt( config: dict, llm=None, default_model: str = "claude-haiku", escalation_model: str = "claude-sonnet", **_kwargs, ) -> str: """Send a prompt to Claude and return the response.""" if not llm: raise RuntimeError("LLM client not configured") prompt = config.get("prompt", "") if not prompt: raise ValueError("claude_prompt step requires 'prompt' field") model_name = config.get("model", "default") model = escalation_model if model_name == "escalation" else default_model response = await llm.chat.completions.create( model=model, messages=[{"role": "user", "content": prompt}], max_tokens=4096, ) return response.choices[0].message.content or ""