from haliosai import HaliosGuard
from openai import AsyncOpenAI
async def process_with_guardrails():
async with HaliosGuard(agent_id="your-agent-id") as guard:
client = AsyncOpenAI()
messages = [{"role": "user", "content": "Hello!"}]
# Evaluate request before LLM call
req_result = await guard.evaluate(messages, "request")
if req_result.get("guardrails_triggered", 0) > 0:
return "Request blocked by guardrails"
# Proceed with LLM call
response = await client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
max_tokens=200
)
assistant_message = response.choices[0].message.content
# Evaluate response after LLM call
resp_messages = messages + [{"role": "assistant", "content": assistant_message}]
resp_result = await guard.evaluate(resp_messages, "response")
if resp_result.get("guardrails_triggered", 0) > 0:
return "Response blocked by guardrails"
return assistant_message