Skip to content

feat: Add Programmatic Configuration Support for Tracing Decorators #495

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 31, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 141 additions & 0 deletions examples/tracing/programmatic_configuration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
"""
Example: Programmatic Configuration for Openlayer Tracing

This example demonstrates how to configure Openlayer tracing programmatically
using the configure() function, instead of relying on environment variables.
"""

import os
import openai
from openlayer.lib import configure, trace, trace_openai


def example_environment_variables():
"""Traditional approach using environment variables."""
print("=== Environment Variables Approach ===")

# Set environment variables (traditional approach)
os.environ["OPENLAYER_API_KEY"] = "your_openlayer_api_key_here"
os.environ["OPENLAYER_INFERENCE_PIPELINE_ID"] = "your_pipeline_id_here"
os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here"

# Use the @trace decorator
@trace()
def generate_response(query: str) -> str:
"""Generate a response using OpenAI."""
# Configure OpenAI client and trace it
client = trace_openai(openai.OpenAI())

response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": query}],
max_tokens=100,
)
return response.choices[0].message.content

# Test the function
result = generate_response("What is machine learning?")
print(f"Response: {result}")


def example_programmatic_configuration():
"""New approach using programmatic configuration."""
print("\n=== Programmatic Configuration Approach ===")

# Configure Openlayer programmatically
configure(
api_key="your_openlayer_api_key_here",
inference_pipeline_id="your_pipeline_id_here",
# base_url="https://api.openlayer.com/v1" # Optional: custom base URL
)

# Set OpenAI API key
os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here"

# Use the @trace decorator (no environment variables needed for Openlayer)
@trace()
def generate_response_programmatic(query: str) -> str:
"""Generate a response using OpenAI with programmatic configuration."""
# Configure OpenAI client and trace it
client = trace_openai(openai.OpenAI())

response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": query}],
max_tokens=100,
)
return response.choices[0].message.content

# Test the function
result = generate_response_programmatic("What is deep learning?")
print(f"Response: {result}")


def example_per_decorator_override():
"""Example showing how to override pipeline ID per decorator."""
print("\n=== Per-Decorator Pipeline ID Override ===")

# Configure default settings
configure(
api_key="your_openlayer_api_key_here",
inference_pipeline_id="default_pipeline_id",
)

# Function using default pipeline ID
@trace()
def default_pipeline_function(query: str) -> str:
return f"Response to: {query}"

# Function using specific pipeline ID (overrides default)
@trace(inference_pipeline_id="specific_pipeline_id")
def specific_pipeline_function(query: str) -> str:
return f"Specific response to: {query}"

# Test both functions
default_pipeline_function("Question 1") # Uses default_pipeline_id
specific_pipeline_function("Question 2") # Uses specific_pipeline_id

print("Both functions executed with different pipeline IDs")


def example_mixed_configuration():
"""Example showing mixed environment and programmatic configuration."""
print("\n=== Mixed Configuration Approach ===")

# Set API key via environment variable
os.environ["OPENLAYER_API_KEY"] = "your_openlayer_api_key_here"

# Set pipeline ID programmatically
configure(inference_pipeline_id="programmatic_pipeline_id")

@trace()
def mixed_config_function(query: str) -> str:
"""Function using mixed configuration."""
return f"Mixed config response to: {query}"

# Test the function
result = mixed_config_function("What is the best approach?")
print(f"Response: {result}")


if __name__ == "__main__":
print("Openlayer Tracing Configuration Examples")
print("=" * 50)

# Note: Replace the placeholder API keys and IDs with real values
print("Note: Replace placeholder API keys and pipeline IDs with real values before running.")
print()

try:
# Run examples (these will fail without real API keys)
example_environment_variables()
example_programmatic_configuration()
example_per_decorator_override()
example_mixed_configuration()

except Exception as e:
print(f"Example failed (expected with placeholder keys): {e}")
print("\nTo run this example successfully:")
print("1. Replace placeholder API keys with real values")
print("2. Replace pipeline IDs with real Openlayer pipeline IDs")
print("3. Ensure you have valid OpenAI and Openlayer accounts")
15 changes: 5 additions & 10 deletions src/openlayer/lib/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Openlayer lib."""

__all__ = [
"configure",
"trace",
"trace_anthropic",
"trace_openai",
Expand All @@ -15,6 +16,7 @@
# ---------------------------------- Tracing --------------------------------- #
from .tracing import tracer

configure = tracer.configure
trace = tracer.trace
trace_async = tracer.trace_async

Expand Down Expand Up @@ -93,18 +95,11 @@ def trace_bedrock(client):
try:
import boto3
except ImportError:
raise ImportError(
"boto3 is required for Bedrock tracing. Install with: pip install boto3"
)
raise ImportError("boto3 is required for Bedrock tracing. Install with: pip install boto3")

from .integrations import bedrock_tracer

# Check if it's a boto3 client for bedrock-runtime service
if (
not hasattr(client, "_service_model")
or client._service_model.service_name != "bedrock-runtime"
):
raise ValueError(
"Invalid client. Please provide a boto3 bedrock-runtime client."
)
if not hasattr(client, "_service_model") or client._service_model.service_name != "bedrock-runtime":
raise ValueError("Invalid client. Please provide a boto3 bedrock-runtime client.")
return bedrock_tracer.trace_bedrock(client)
99 changes: 71 additions & 28 deletions src/openlayer/lib/tracing/tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,50 @@
TRUE_LIST = ["true", "on", "1"]

_publish = utils.get_env_variable("OPENLAYER_DISABLE_PUBLISH") not in TRUE_LIST
_verify_ssl = (
utils.get_env_variable("OPENLAYER_VERIFY_SSL") or "true"
).lower() in TRUE_LIST
_verify_ssl = (utils.get_env_variable("OPENLAYER_VERIFY_SSL") or "true").lower() in TRUE_LIST
_client = None

# Configuration variables for programmatic setup
_configured_api_key: Optional[str] = None
_configured_pipeline_id: Optional[str] = None
_configured_base_url: Optional[str] = None


def configure(
api_key: Optional[str] = None,
inference_pipeline_id: Optional[str] = None,
base_url: Optional[str] = None,
) -> None:
"""Configure the Openlayer tracer with custom settings.

This function allows you to programmatically set the API key, inference pipeline ID,
and base URL for the Openlayer client, instead of relying on environment variables.

Args:
api_key: The Openlayer API key. If not provided, falls back to OPENLAYER_API_KEY environment variable.
inference_pipeline_id: The default inference pipeline ID to use for tracing.
If not provided, falls back to OPENLAYER_INFERENCE_PIPELINE_ID environment variable.
base_url: The base URL for the Openlayer API. If not provided, falls back to
OPENLAYER_BASE_URL environment variable or the default.

Examples:
>>> import openlayer.lib.tracing.tracer as tracer
>>> # Configure with API key and pipeline ID
>>> tracer.configure(api_key="your_api_key_here", inference_pipeline_id="your_pipeline_id_here")
>>> # Now use the decorators normally
>>> @tracer.trace()
>>> def my_function():
... return "result"
"""
global _configured_api_key, _configured_pipeline_id, _configured_base_url, _client

_configured_api_key = api_key
_configured_pipeline_id = inference_pipeline_id
_configured_base_url = base_url

# Reset the client so it gets recreated with new configuration
_client = None


def _get_client() -> Optional[Openlayer]:
"""Get or create the Openlayer client with lazy initialization."""
Expand All @@ -37,13 +76,24 @@ def _get_client() -> Optional[Openlayer]:

if _client is None:
# Lazy initialization - create client when first needed
client_kwargs = {}

# Use configured API key if available, otherwise fall back to environment variable
if _configured_api_key is not None:
client_kwargs["api_key"] = _configured_api_key

# Use configured base URL if available, otherwise fall back to environment variable
if _configured_base_url is not None:
client_kwargs["base_url"] = _configured_base_url

if _verify_ssl:
_client = Openlayer()
_client = Openlayer(**client_kwargs)
else:
_client = Openlayer(
http_client=DefaultHttpxClient(
verify=False,
),
**client_kwargs,
)
return _client

Expand Down Expand Up @@ -163,9 +213,7 @@ def wrapper(*func_args, **func_kwargs):
if step_kwargs.get("name") is None:
step_kwargs["name"] = func.__name__

with create_step(
*step_args, inference_pipeline_id=inference_pipeline_id, **step_kwargs
) as step:
with create_step(*step_args, inference_pipeline_id=inference_pipeline_id, **step_kwargs) as step:
output = exception = None
try:
output = func(*func_args, **func_kwargs)
Expand Down Expand Up @@ -252,14 +300,12 @@ async def __anext__(self):
# Initialize tracing on first iteration only
if not self._trace_initialized:
self._original_gen = func(*func_args, **func_kwargs)
self._step, self._is_root_step, self._token = (
_create_and_initialize_step(
step_name=step_name,
step_type=enums.StepType.USER_CALL,
inputs=None,
output=None,
metadata=None,
)
self._step, self._is_root_step, self._token = _create_and_initialize_step(
step_name=step_name,
step_type=enums.StepType.USER_CALL,
inputs=None,
output=None,
metadata=None,
)
self._inputs = _extract_function_inputs(
func_signature=func_signature,
Expand Down Expand Up @@ -453,9 +499,7 @@ def _create_and_initialize_step(
return new_step, is_root_step, token


def _handle_trace_completion(
is_root_step: bool, step_name: str, inference_pipeline_id: Optional[str] = None
) -> None:
def _handle_trace_completion(is_root_step: bool, step_name: str, inference_pipeline_id: Optional[str] = None) -> None:
"""Handle trace completion and data streaming."""
if is_root_step:
logger.debug("Ending the trace...")
Expand Down Expand Up @@ -486,8 +530,12 @@ def _handle_trace_completion(
)
if _publish:
try:
inference_pipeline_id = inference_pipeline_id or utils.get_env_variable(
"OPENLAYER_INFERENCE_PIPELINE_ID"
# Use provided pipeline_id, or fall back to configured default,
# or finally to environment variable
inference_pipeline_id = (
inference_pipeline_id
or _configured_pipeline_id
or utils.get_env_variable("OPENLAYER_INFERENCE_PIPELINE_ID")
)
client = _get_client()
if client:
Expand All @@ -503,8 +551,7 @@ def _handle_trace_completion(
except Exception as err: # pylint: disable=broad-except
logger.error(traceback.format_exc())
logger.error(
"Could not stream data to Openlayer (pipeline_id: %s, base_url: %s)"
" Error: %s",
"Could not stream data to Openlayer (pipeline_id: %s, base_url: %s) Error: %s",
inference_pipeline_id,
client.base_url,
err,
Expand Down Expand Up @@ -536,9 +583,7 @@ def _process_wrapper_inputs_and_outputs(
func_kwargs=func_kwargs,
context_kwarg=context_kwarg,
)
_finalize_step_logging(
step=step, inputs=inputs, output=output, start_time=step.start_time
)
_finalize_step_logging(step=step, inputs=inputs, output=output, start_time=step.start_time)


def _extract_function_inputs(
Expand Down Expand Up @@ -606,9 +651,7 @@ def _finalize_async_generator_step(
) -> None:
"""Finalize async generator step - called when generator is consumed."""
_current_step.reset(token)
_finalize_step_logging(
step=step, inputs=inputs, output=output, start_time=step.start_time
)
_finalize_step_logging(step=step, inputs=inputs, output=output, start_time=step.start_time)
_handle_trace_completion(
is_root_step=is_root_step,
step_name=step_name,
Expand Down
Loading
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy