diff --git a/docs/v2/integrations/anthropic.mdx b/docs/v2/integrations/anthropic.mdx index e80ed48e1..a0b9a3261 100644 --- a/docs/v2/integrations/anthropic.mdx +++ b/docs/v2/integrations/anthropic.mdx @@ -129,9 +129,14 @@ client = anthropic.Anthropic(api_key="") # Define tools tools = [ { + "type": "custom", "name": "get_current_time", "description": "Get the current date and time", - "input_schema": {}, + "input_schema": { + "type": "object", + "properties": {}, + "required": [] + } } ] @@ -149,9 +154,9 @@ def get_current_time(): ) # Handle tool use -if message.content[0].type == "tool_use": - tool_use = message.content[0].tool_use - tool_name = tool_use.name +if message.content[0].type == "tool_calls": + tool_call = message.content[0].tool_calls[0] + tool_name = tool_call.name if tool_name == "get_current_time": tool_response = get_current_time() @@ -162,18 +167,26 @@ if message.content[0].type == "tool_use": max_tokens=1000, messages=[ {"role": "user", "content": "What time is it now?"}, - {"role": "assistant", "content": [ - {"type": "tool_use", "tool_use": { - "name": "get_current_time", - "input": {} - }} - ]}, - {"role": "user", "content": [ - {"type": "tool_result", "tool_result": { - "tool_name": "get_current_time", - "content": json.dumps(tool_response) - }} - ]} + { + "role": "assistant", + "content": [ + { + "type": "tool_calls", + "tool_calls": [ + { + "type": "custom", + "name": "get_current_time", + "input": {} + } + ] + } + ] + }, + { + "role": "tool", + "content": json.dumps(tool_response), + "tool_call_id": tool_call.id + } ] ) diff --git a/docs/v2/integrations/crewai.mdx b/docs/v2/integrations/crewai.mdx index 8b5198692..6ed25f911 100644 --- a/docs/v2/integrations/crewai.mdx +++ b/docs/v2/integrations/crewai.mdx @@ -21,55 +21,65 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' - - ```python python - import agentops - from crewai import Agent, Task, Crew, LLM - import os - from dotenv import load_dotenv - - # Load environment variables - load_dotenv() - OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or "YOUR_OPENAI_API_KEY" - AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or "YOUR_AGENTOPS_API_KEY" - - # Initialize AgentOps at the start of your application - agentops.init(AGENTOPS_API_KEY) - - # Set up your CrewAI elements as you normally would - # AgentOps will automatically track all interactions - llm = LLM(model="openai/gpt-4o", api_key=OPENAI_API_KEY) - - researcher = Agent( - role="Researcher", - goal="Find accurate information about a topic", - backstory="You're an expert researcher with keen attention to detail", - llm=llm - ) - - research_task = Task( - description="Research the latest developments in quantum computing", - expected_output="A comprehensive report on the latest breakthroughs in quantum computing, including advancements in quantum algorithms, hardware, and potential applications.", - agent=researcher - ) - - crew = Crew( - agents=[researcher], - tasks=[research_task], - verbose=True - ) - - # Run the crew - AgentOps will track everything - result = crew.kickoff() - ``` - + + ```python python + import os + from dotenv import load_dotenv + import agentops + from crewai import Agent, Task, Crew + from langchain_openai import ChatOpenAI + + # Load environment variables + load_dotenv() + + # Initialize AgentOps + agentops.init(os.getenv("AGENTOPS_API_KEY")) + + # Create LLM + llm = ChatOpenAI( + model="gpt-4", + temperature=0.7, + api_key=os.getenv("OPENAI_API_KEY") + ) + + # Create an agent + researcher = Agent( + role='Researcher', + goal='Research and provide accurate information about cities and their history', + backstory='You are an expert researcher with vast knowledge of world geography and history.', + llm=llm, + verbose=True + ) + + # Create a task + research_task = Task( + description='What is the capital of France? Provide a detailed answer about its history, culture, and significance.', + expected_output='A comprehensive response about Paris, including its status as the capital of France, historical significance, cultural importance, and key landmarks.', + agent=researcher + ) + + # Create a crew with the researcher + crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=True + ) + + # Execute the task - AgentOps will automatically track all LLM calls + result = crew.kickoff() + + print("\nCrew Research Results:") + print(result) + ``` + - - ```python .env - AGENTOPS_API_KEY= - ``` - - Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration) + + ```python .env + AGENTOPS_API_KEY= + OPENAI_API_KEY= + ``` + + Read more about environment variables in [Advanced Configuration](/v2/usage/advanced-configuration) Execute your program and visit [app.agentops.ai/traces](https://app.agentops.ai/traces) to observe your Crew! 🕵️ @@ -83,6 +93,12 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' +## Important Notes + +1. The `Task` class requires an `expected_output` field that describes what the task should produce +2. Setting `verbose=True` on both the agent and crew provides better visibility into the execution process +3. Tasks should have clear, detailed descriptions to guide the agent effectively + ## Crew + AgentOps Examples @@ -95,4 +111,3 @@ import EnvTooltip from '/snippets/add-env-tooltip.mdx' - diff --git a/docs/v2/integrations/gemini.mdx b/docs/v2/integrations/gemini.mdx index ace6e6d3c..504a8922d 100644 --- a/docs/v2/integrations/gemini.mdx +++ b/docs/v2/integrations/gemini.mdx @@ -48,19 +48,23 @@ print(response.text) ## Alternative Client Setup -You can also set up the client using environment variables or with Vertex AI: +You can also set up the client using Vertex AI or with environment variables: -```python Using Environment Variables + +```python Vertex AI import agentops from google import genai # Initialize AgentOps agentops.init() -# Set GOOGLE_API_KEY environment variable before running -# export GOOGLE_API_KEY='your-api-key' -client = genai.Client() +# Using Vertex AI +client = genai.Client( + vertexai=True, + project='your-project-id', + location='us-central1' +) # Generate content response = client.models.generate_content( @@ -71,19 +75,16 @@ response = client.models.generate_content( print(response.text) ``` -```python Vertex AI +```python Using Environment Variables import agentops from google import genai # Initialize AgentOps agentops.init() -# Using Vertex AI -client = genai.Client( - vertexai=True, - project='your-project-id', - location='us-central1' -) +# Set GOOGLE_API_KEY environment variable before running +# export GOOGLE_API_KEY='your-api-key' +client = genai.Client() # Generate content response = client.models.generate_content( @@ -142,10 +143,9 @@ agentops.init() client = genai.Client(api_key="YOUR_GEMINI_API_KEY") # Generate streaming content -for chunk in client.models.generate_content( +for chunk in client.models.generate_content_stream( model='gemini-2.0-flash-001', contents='Explain quantum computing in simple terms.', - stream=True ): print(chunk.text, end="", flush=True) diff --git a/docs/v2/integrations/litellm.mdx b/docs/v2/integrations/litellm.mdx index e53203120..6cd83e40a 100644 --- a/docs/v2/integrations/litellm.mdx +++ b/docs/v2/integrations/litellm.mdx @@ -25,32 +25,18 @@ The simplest way to integrate AgentOps with LiteLLM is to set up the success_cal ```python Simple Integration +import os +from dotenv import load_dotenv import litellm - -# Configure LiteLLM to use AgentOps -litellm.success_callback = ["agentops"] - -# Make completion requests with LiteLLM -response = litellm.completion( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Hello, how are you?"}] -) - -print(response.choices[0].message.content) - -# All LiteLLM API calls are automatically tracked by AgentOps -``` - - -You can also initialize AgentOps separately for additional configuration: - - -```python With AgentOps Init -import agentops from litellm import completion -# Initialize AgentOps -agentops.init() +# Load environment variables +load_dotenv() + +# Set API keys for different providers +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") +os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY") # Configure LiteLLM to use AgentOps litellm.success_callback = ["agentops"] @@ -67,40 +53,22 @@ print(response.choices[0].message.content) ``` -## Using the LiteLLM Client - - -```python Python -import litellm -from litellm import LiteLLM - -# Configure LiteLLM to use AgentOps -litellm.success_callback = ["agentops"] - -# Create a LiteLLM client -client = LiteLLM() - -# Make a completion request -response = client.completion( - model="anthropic/claude-3-opus-20240229", - messages=[{"role": "user", "content": "What are the benefits of using LiteLLM?"}] -) - -print(response.choices[0].message.content) - -# All client requests are automatically tracked by AgentOps -``` - - ## Streaming Example AgentOps also tracks streaming requests with LiteLLM: -```python Python +```python Streaming +import os +from dotenv import load_dotenv import litellm from litellm import completion +# Load environment variables and set API keys +load_dotenv() +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY") + # Configure LiteLLM to use AgentOps litellm.success_callback = ["agentops"] @@ -115,8 +83,7 @@ response = completion( for chunk in response: if chunk.choices[0].delta.content: print(chunk.choices[0].delta.content, end="", flush=True) - -# All streaming requests are automatically tracked by AgentOps +print() # Add a newline at the end ``` @@ -125,10 +92,18 @@ for chunk in response: One of LiteLLM's key features is the ability to switch between providers easily: -```python Python +```python Multi-Provider +import os +from dotenv import load_dotenv import litellm from litellm import completion +# Load environment variables and set API keys +load_dotenv() +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") +os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY") +os.environ["AGENTOPS_API_KEY"] = os.getenv("AGENTOPS_API_KEY") + # Configure LiteLLM to use AgentOps litellm.success_callback = ["agentops"] diff --git a/docs/v2/integrations/openai.mdx b/docs/v2/integrations/openai.mdx index f48793b1d..b5962a7d1 100644 --- a/docs/v2/integrations/openai.mdx +++ b/docs/v2/integrations/openai.mdx @@ -91,15 +91,15 @@ AgentOps tracks function-calling conversations with OpenAI models: ```python Function Calling import agentops import json - from openai import OpenAI +from openai import OpenAI # Initialize AgentOps - agentops.init() +agentops.init() # Create OpenAI client - client = OpenAI() +client = OpenAI() -# Define functions +# Define tools tools = [ { "type": "function", @@ -125,40 +125,36 @@ def get_weather(location): return json.dumps({"location": location, "temperature": "72", "unit": "fahrenheit", "forecast": ["sunny", "windy"]}) # Make a function call API request +messages = [ + {"role": "system", "content": "You are a helpful weather assistant."}, + {"role": "user", "content": "What's the weather like in Boston?"} +] + response = client.chat.completions.create( model="gpt-4", - messages=[ - {"role": "system", "content": "You are a helpful weather assistant."}, - {"role": "user", "content": "What's the weather like in Boston?"} - ], + messages=messages, tools=tools, - tool_choice="auto" + tool_choice="auto", ) # Process response response_message = response.choices[0].message -tool_calls = response_message.tool_calls - -if tool_calls: - # Handle tool calls - messages = [ - {"role": "system", "content": "You are a helpful weather assistant."}, - {"role": "user", "content": "What's the weather like in Boston?"}, - response_message - ] - +messages.append({"role": "assistant", "content": response_message.content, "tool_calls": response_message.tool_calls}) + +if response_message.tool_calls: # Process each tool call - for tool_call in tool_calls: + for tool_call in response_message.tool_calls: function_name = tool_call.function.name function_args = json.loads(tool_call.function.arguments) if function_name == "get_weather": function_response = get_weather(function_args.get("location")) + # Add tool response to messages messages.append( { - "tool_call_id": tool_call.id, "role": "tool", + "tool_call_id": tool_call.id, "name": function_name, "content": function_response, }