Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add Gemini API integration #650

Merged
merged 44 commits into from
Jan 18, 2025
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
1939b6d
feat: Add Gemini API integration
devin-ai-integration[bot] Jan 17, 2025
9e4f471
fix: Pass session correctly to track LLM events in Gemini provider
devin-ai-integration[bot] Jan 17, 2025
b95fe6e
feat: Add Gemini integration with example notebook
devin-ai-integration[bot] Jan 17, 2025
72e985a
fix: Add null checks and improve test coverage for Gemini provider
devin-ai-integration[bot] Jan 17, 2025
6df9b7e
style: Add blank lines between test functions
devin-ai-integration[bot] Jan 17, 2025
200dcf1
test: Improve test coverage for Gemini provider
devin-ai-integration[bot] Jan 17, 2025
cd31098
style: Fix formatting in test_gemini.py
devin-ai-integration[bot] Jan 17, 2025
fef63a9
test: Add comprehensive test coverage for edge cases and error handling
devin-ai-integration[bot] Jan 17, 2025
10900f5
test: Add graceful API key handling and skip tests when key is missing
devin-ai-integration[bot] Jan 17, 2025
4b96b0f
style: Fix formatting issues in test files
devin-ai-integration[bot] Jan 17, 2025
062f82d
style: Remove trailing whitespace in test_gemini.py
devin-ai-integration[bot] Jan 17, 2025
d418202
test: Add coverage for error handling, edge cases, and argument handl…
devin-ai-integration[bot] Jan 17, 2025
a9cea74
test: Add streaming exception handling test coverage
devin-ai-integration[bot] Jan 17, 2025
11c7343
style: Apply ruff auto-formatting to test_gemini.py
devin-ai-integration[bot] Jan 17, 2025
4f0b0fe
test: Fix type errors and improve test coverage for Gemini provider
devin-ai-integration[bot] Jan 17, 2025
1a6e1ca
test: Add comprehensive error handling test coverage for Gemini provider
devin-ai-integration[bot] Jan 17, 2025
9efc0f1
style: Apply ruff-format fixes to test_gemini.py
devin-ai-integration[bot] Jan 17, 2025
071a610
fix: Configure Gemini API key before model initialization
devin-ai-integration[bot] Jan 17, 2025
970c318
fix: Update GeminiProvider to properly handle instance methods
devin-ai-integration[bot] Jan 17, 2025
18143b5
fix: Use provider instance in closure for proper method binding
devin-ai-integration[bot] Jan 17, 2025
a27b2e4
fix: Use class-level storage for original method
devin-ai-integration[bot] Jan 17, 2025
aed3a1b
fix: Use module-level storage for original method
devin-ai-integration[bot] Jan 17, 2025
8297371
style: Apply ruff-format fixes to Gemini integration
devin-ai-integration[bot] Jan 17, 2025
9c9af3a
fix: Move Gemini tests to unit test directory for proper coverage rep…
devin-ai-integration[bot] Jan 17, 2025
bff477c
fix: Update Gemini provider to properly handle prompt extraction and …
devin-ai-integration[bot] Jan 17, 2025
f8fd56d
test: Add comprehensive test coverage for Gemini provider session han…
devin-ai-integration[bot] Jan 17, 2025
59db821
style: Apply ruff-format fixes to test files
devin-ai-integration[bot] Jan 17, 2025
f163e23
fix: Pass LlmTracker client to GeminiProvider constructor
devin-ai-integration[bot] Jan 17, 2025
6d7ee0f
remove extra files
areibman Jan 17, 2025
6e4d965
fix: Improve code efficiency and error handling in Gemini provider
devin-ai-integration[bot] Jan 17, 2025
54a9d36
chore: Clean up test files and merge remote changes
devin-ai-integration[bot] Jan 17, 2025
c845a34
test: Add comprehensive test coverage for Gemini provider
devin-ai-integration[bot] Jan 17, 2025
973e59f
fix: Set None as default values and improve test coverage
devin-ai-integration[bot] Jan 17, 2025
481a8d7
build: Add google-generativeai as test dependency
devin-ai-integration[bot] Jan 17, 2025
0871398
docs: Update examples and README for Gemini integration
devin-ai-integration[bot] Jan 17, 2025
cddab5b
add gemini logo image
the-praxs Jan 18, 2025
681cd18
add gemini to examples
the-praxs Jan 18, 2025
9e8e85e
add gemini to docs
the-praxs Jan 18, 2025
e75fa84
refactor handle_response method
the-praxs Jan 18, 2025
86dec80
cleanup gemini tracking code
the-praxs Jan 18, 2025
3384b2d
delete unit test for gemini
the-praxs Jan 18, 2025
392677a
rename and clean gemini example notebook
the-praxs Jan 18, 2025
38e2621
ruff
the-praxs Jan 18, 2025
9e3393d
update docs
the-praxs Jan 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
186 changes: 186 additions & 0 deletions agentops/llms/providers/gemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
from typing import Optional, Generator, Any, Dict, Union

from agentops.llms.providers.base import BaseProvider
from agentops.event import LLMEvent, ErrorEvent
from agentops.session import Session
from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id
from agentops.log_config import logger
from agentops.singleton import singleton

# Store original methods at module level
_ORIGINAL_METHODS = {}


@singleton
class GeminiProvider(BaseProvider):
"""Provider for Google's Gemini API.

This provider is automatically detected and initialized when agentops.init()
is called and the google.generativeai package is imported. No manual
initialization is required."""

def __init__(self, client=None):
"""Initialize the Gemini provider.

Args:
client: Optional client instance. If not provided, will be set during override.
"""
super().__init__(client)
self._provider_name = "Gemini"

Check warning on line 29 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L28-L29

Added lines #L28 - L29 were not covered by tests

def handle_response(
self, response, kwargs, init_timestamp, session: Optional[Session] = None
) -> Union[Any, Generator[Any, None, None]]:
"""Handle responses from Gemini API for both sync and streaming modes.

Args:
response: The response from the Gemini API
kwargs: The keyword arguments passed to generate_content
init_timestamp: The timestamp when the request was initiated
session: Optional AgentOps session for recording events

Returns:
For sync responses: The original response object
For streaming responses: A generator yielding response chunks

Note:
Token counts are extracted from usage_metadata if available.
"""
llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
llm_event.session_id = session.session_id

Check warning on line 51 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L49-L51

Added lines #L49 - L51 were not covered by tests

# For streaming responses
if kwargs.get("stream", False):
accumulated_text = [] # Use list to accumulate text chunks

Check warning on line 55 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L54-L55

Added lines #L54 - L55 were not covered by tests

def handle_stream_chunk(chunk):

Check warning on line 57 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L57

Added line #L57 was not covered by tests
nonlocal llm_event
try:
if llm_event.returns is None:
llm_event.returns = chunk
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.model = getattr(chunk, "model", "gemini-1.5-flash")
llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", []))

Check warning on line 64 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L59-L64

Added lines #L59 - L64 were not covered by tests

if hasattr(chunk, "text") and chunk.text:
accumulated_text.append(chunk.text)

Check warning on line 67 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L66-L67

Added lines #L66 - L67 were not covered by tests

# Extract token counts if available
if hasattr(chunk, "usage_metadata"):
usage = chunk.usage_metadata
llm_event.prompt_tokens = getattr(usage, "prompt_token_count", None)
llm_event.completion_tokens = getattr(usage, "candidates_token_count", None)

Check warning on line 73 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L70-L73

Added lines #L70 - L73 were not covered by tests

# If this is the last chunk
if hasattr(chunk, "finish_reason") and chunk.finish_reason:
llm_event.completion = "".join(accumulated_text)
llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)

Check warning on line 79 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L76-L79

Added lines #L76 - L79 were not covered by tests

except Exception as e:
if session is not None:
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
logger.warning(

Check warning on line 84 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L81-L84

Added lines #L81 - L84 were not covered by tests
f"Unable to parse chunk for Gemini LLM call. Error: {str(e)}\n"
f"Chunk: {chunk}\n"
f"kwargs: {kwargs}\n"
)

def stream_handler(stream):
try:
for chunk in stream:
handle_stream_chunk(chunk)
yield chunk
except Exception as e:
if session is not None:
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
raise # Re-raise after recording error

Check warning on line 98 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L90-L98

Added lines #L90 - L98 were not covered by tests

return stream_handler(response)

Check warning on line 100 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L100

Added line #L100 was not covered by tests

# For synchronous responses
try:
llm_event.returns = response
llm_event.agent_id = check_call_stack_for_agent_id()
llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", []))
llm_event.completion = response.text
llm_event.model = getattr(response, "model", "gemini-1.5-flash")

Check warning on line 108 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L103-L108

Added lines #L103 - L108 were not covered by tests

# Extract token counts from usage metadata if available
if hasattr(response, "usage_metadata"):
usage = response.usage_metadata
llm_event.prompt_tokens = getattr(usage, "prompt_token_count", None)
llm_event.completion_tokens = getattr(usage, "candidates_token_count", None)

Check warning on line 114 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L111-L114

Added lines #L111 - L114 were not covered by tests

llm_event.end_timestamp = get_ISO_time()
self._safe_record(session, llm_event)
except Exception as e:
if session is not None:
self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
logger.warning(

Check warning on line 121 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L116-L121

Added lines #L116 - L121 were not covered by tests
f"Unable to parse response for Gemini LLM call. Error: {str(e)}\n"
f"Response: {response}\n"
f"kwargs: {kwargs}\n"
)

return response

Check warning on line 127 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L127

Added line #L127 was not covered by tests

def override(self):
"""Override Gemini's generate_content method to track LLM events.

Note:
This method is called automatically by AgentOps during initialization.
Users should not call this method directly."""
import google.generativeai as genai

Check warning on line 135 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L135

Added line #L135 was not covered by tests

# Store original method if not already stored
if "generate_content" not in _ORIGINAL_METHODS:
_ORIGINAL_METHODS["generate_content"] = genai.GenerativeModel.generate_content

Check warning on line 139 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L138-L139

Added lines #L138 - L139 were not covered by tests

# Store provider instance for the closure
provider = self

Check warning on line 142 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L142

Added line #L142 was not covered by tests

def patched_function(self, *args, **kwargs):
init_timestamp = get_ISO_time()

Check warning on line 145 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L144-L145

Added lines #L144 - L145 were not covered by tests

# Extract and remove session from kwargs if present
session = kwargs.pop("session", None)

Check warning on line 148 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L148

Added line #L148 was not covered by tests

# Handle positional prompt argument
event_kwargs = kwargs.copy() # Create a copy for event tracking
if args and len(args) > 0:

Check warning on line 152 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L151-L152

Added lines #L151 - L152 were not covered by tests
# First argument is the prompt
prompt = args[0]
if "contents" not in kwargs:
kwargs["contents"] = prompt
event_kwargs["prompt"] = prompt # Store original prompt for event tracking
args = args[1:] # Remove prompt from args since we moved it to kwargs

Check warning on line 158 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L154-L158

Added lines #L154 - L158 were not covered by tests

# Call original method and track event
try:
if "generate_content" in _ORIGINAL_METHODS:
result = _ORIGINAL_METHODS["generate_content"](self, *args, **kwargs)
return provider.handle_response(result, event_kwargs, init_timestamp, session=session)

Check warning on line 164 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L161-L164

Added lines #L161 - L164 were not covered by tests
else:
logger.error("Original generate_content method not found. Cannot proceed with override.")
return None
except Exception as e:
logger.error(f"Error in Gemini generate_content: {str(e)}")
if session is not None:
provider._safe_record(session, ErrorEvent(exception=e))
raise # Re-raise the exception after recording

Check warning on line 172 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L166-L172

Added lines #L166 - L172 were not covered by tests

# Override the method at class level
genai.GenerativeModel.generate_content = patched_function

Check warning on line 175 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L175

Added line #L175 was not covered by tests

def undo_override(self):
"""Restore original Gemini methods.

Note:
This method is called automatically by AgentOps during cleanup.
Users should not call this method directly."""
if "generate_content" in _ORIGINAL_METHODS:
import google.generativeai as genai

Check warning on line 184 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L183-L184

Added lines #L183 - L184 were not covered by tests

genai.GenerativeModel.generate_content = _ORIGINAL_METHODS["generate_content"]

Check warning on line 186 in agentops/llms/providers/gemini.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/providers/gemini.py#L186

Added line #L186 was not covered by tests
25 changes: 25 additions & 0 deletions agentops/llms/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .providers.ai21 import AI21Provider
from .providers.llama_stack_client import LlamaStackClientProvider
from .providers.taskweaver import TaskWeaverProvider
from .providers.gemini import GeminiProvider

original_func = {}
original_create = None
Expand All @@ -24,6 +25,9 @@

class LlmTracker:
SUPPORTED_APIS = {
"google.generativeai": {
"0.1.0": ("GenerativeModel.generate_content", "GenerativeModel.generate_content_stream"),
},
"litellm": {"1.3.1": ("openai_chat_completions.completion",)},
"openai": {
"1.0.0": (
Expand Down Expand Up @@ -210,6 +214,26 @@
else:
logger.warning(f"Only TaskWeaver>=0.0.1 supported. v{module_version} found.")

if api == "google.generativeai":
module_version = version(api)

Check warning on line 218 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L217-L218

Added lines #L217 - L218 were not covered by tests

if Version(module_version) >= parse("0.1.0"):
import google.generativeai as genai
import os

Check warning on line 222 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L220-L222

Added lines #L220 - L222 were not covered by tests

api_key = os.getenv("GEMINI_API_KEY")
if api_key:
try:
genai.configure(api_key=api_key)
provider = GeminiProvider(self.client)
provider.override()
except Exception as e:
logger.warning(f"Failed to initialize Gemini provider: {str(e)}")

Check warning on line 231 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L224-L231

Added lines #L224 - L231 were not covered by tests
else:
logger.warning("GEMINI_API_KEY environment variable is required for Gemini integration")

Check warning on line 233 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L233

Added line #L233 was not covered by tests
else:
logger.warning(f"Only google.generativeai>=0.1.0 supported. v{module_version} found.")

Check warning on line 235 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L235

Added line #L235 was not covered by tests

def stop_instrumenting(self):
OpenAiProvider(self.client).undo_override()
GroqProvider(self.client).undo_override()
Expand All @@ -221,3 +245,4 @@
AI21Provider(self.client).undo_override()
LlamaStackClientProvider(self.client).undo_override()
TaskWeaverProvider(self.client).undo_override()
GeminiProvider(self.client).undo_override()

Check warning on line 248 in agentops/llms/tracker.py

View check run for this annotation

Codecov / codecov/patch

agentops/llms/tracker.py#L248

Added line #L248 was not covered by tests
135 changes: 135 additions & 0 deletions examples/gemini_examples/gemini_example_sync.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "580c85ac",
"metadata": {},
"source": [
"# Gemini API Example with AgentOps\n",
"\n",
"This notebook demonstrates how to use AgentOps with Google's Gemini API for both synchronous and streaming text generation."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d731924a",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"import agentops\n",
"from agentops.llms.providers.gemini import GeminiProvider"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a94545c9",
"metadata": {},
"outputs": [],
"source": [
"# Configure the Gemini API\n",
"import os\n",
"\n",
"# Replace with your API key\n",
"# You can get one at: https://ai.google.dev/tutorials/setup\n",
"GEMINI_API_KEY = \"YOUR_API_KEY_HERE\" # Replace with your API key\n",
"genai.configure(api_key=GEMINI_API_KEY)\n",
"\n",
"# Note: In production, use environment variables:\n",
"# import os\n",
"# GEMINI_API_KEY = os.getenv(\"GEMINI_API_KEY\")\n",
"# genai.configure(api_key=GEMINI_API_KEY)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d632fe48",
"metadata": {},
"outputs": [],
"source": [
"# Initialize AgentOps and Gemini model\n",
"ao_client = agentops.init()\n",
"model = genai.GenerativeModel(\"gemini-1.5-flash\")\n",
"\n",
"# Initialize and override Gemini provider\n",
"provider = GeminiProvider(model)\n",
"provider.override()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3923b6b8",
"metadata": {},
"outputs": [],
"source": [
"# Test synchronous generation\n",
"print(\"Testing synchronous generation:\")\n",
"response = model.generate_content(\n",
" \"What are the three laws of robotics?\",\n",
" session=ao_client\n",
")\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "da54e521",
"metadata": {},
"outputs": [],
"source": [
"# Test streaming generation\n",
"print(\"\\nTesting streaming generation:\")\n",
"response = model.generate_content(\n",
" \"Explain the concept of machine learning in simple terms.\",\n",
" stream=True,\n",
" session=ao_client\n",
")\n",
"\n",
"for chunk in response:\n",
" print(chunk.text, end=\"\")\n",
"print() # Add newline after streaming output\n",
"\n",
"# Test another synchronous generation\n",
"print(\"\\nTesting another synchronous generation:\")\n",
"response = model.generate_content(\n",
" \"What is the difference between supervised and unsupervised learning?\",\n",
" session=ao_client\n",
")\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6a674c0",
"metadata": {},
"outputs": [],
"source": [
"# End session and check stats\n",
"agentops.end_session(\n",
" end_state=\"Success\",\n",
" end_state_reason=\"Gemini integration example completed successfully\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b6d35f28",
"metadata": {},
"outputs": [],
"source": [
"# Clean up\n",
"provider.undo_override()"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}
Loading