feat: add pitrader_script step type + image vision for pipeline triggers
Add pitrader_script executor for running PITrader scripts (pi-scan, playbook, execute_trades) as pipeline steps with vault credential injection and JSON output capture. Extend claude_prompt step with vision support (image_b64 in trigger context). Add image pipeline trigger to on_image_message handler. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -7,12 +7,17 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
async def execute_claude_prompt(
|
||||
config: dict,
|
||||
context: dict | None = None,
|
||||
llm=None,
|
||||
default_model: str = "claude-haiku",
|
||||
escalation_model: str = "claude-sonnet",
|
||||
**_kwargs,
|
||||
) -> str:
|
||||
"""Send a prompt to Claude and return the response."""
|
||||
"""Send a prompt to Claude and return the response.
|
||||
|
||||
Supports vision: if config contains 'image_b64' or trigger context has
|
||||
'image_b64', the image is included as a vision content block.
|
||||
"""
|
||||
if not llm:
|
||||
raise RuntimeError("LLM client not configured")
|
||||
|
||||
@@ -23,9 +28,34 @@ async def execute_claude_prompt(
|
||||
model_name = config.get("model", "default")
|
||||
model = escalation_model if model_name == "escalation" else default_model
|
||||
|
||||
# Check for image data (from config or trigger context)
|
||||
image_b64 = config.get("image_b64", "")
|
||||
image_mime = config.get("image_mime", "image/png")
|
||||
if not image_b64 and context:
|
||||
trigger = context.get("trigger", {})
|
||||
image_b64 = trigger.get("image_b64", "")
|
||||
image_mime = trigger.get("mime_type", "image/png")
|
||||
|
||||
# Build message content
|
||||
if image_b64:
|
||||
content = [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{image_mime};base64,{image_b64}",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": prompt,
|
||||
},
|
||||
]
|
||||
else:
|
||||
content = prompt
|
||||
|
||||
response = await llm.chat.completions.create(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
messages=[{"role": "user", "content": content}],
|
||||
max_tokens=4096,
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user