Skip to content

Commit 52cbd48

Browse files
committed
Add GoogleModel support
This PR adds support to Google Models via: - Gemini API - VertexAI The actual backend used depends on the parameters passed to GoogleModel
1 parent 8741728 commit 52cbd48

File tree

6 files changed

+334
-7
lines changed

6 files changed

+334
-7
lines changed

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ compat = ["six>=1.17.0"]
3636
ai = ["httpx==0.28.1", "langchain>=1.2.15", "mcp>=1.27.0", "pydantic>=2.13.1"]
3737
anthropic = ["splunk-sdk[ai]>=2.1.1", "langchain-anthropic>=1.4.0"]
3838
openai = ["splunk-sdk[ai]>=2.1.1", "langchain-openai>=1.1.13"]
39+
google = ["splunk-sdk[ai]>=2.1.1", "langchain-google-genai>=4.2.2", "google-auth>=2.0.0"]
3940

4041
# Treat the same as NPM's `devDependencies`
4142
[dependency-groups]
@@ -50,7 +51,7 @@ release = ["build>=1.4.3", "jinja2>=3.1.6", "sphinx>=9.1.0", "twine>=6.2.0"]
5051
lint = ["basedpyright>=1.39.0", "ruff>=0.15.10"]
5152
dev = [
5253
"rich>=14.3.3",
53-
"splunk-sdk[openai, anthropic]",
54+
"splunk-sdk[openai, anthropic, google]",
5455
{ include-group = "test" },
5556
{ include-group = "lint" },
5657
{ include-group = "release" },

splunklib/ai/README.md

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ We support following predefined models:
4747

4848
- `OpenAIModel` - works with OpenAI and any [OpenAI-compatible API](https://platform.openai.com/docs/api-reference).
4949
- `AnthropicModel` - works with Anthropic and any [Anthropic-compatible API](https://docs.anthropic.com/en/api).
50+
- `GoogleModel` - works with Google's Gemini models via the [Gemini API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/overview).
5051

5152
### OpenAI
5253

@@ -76,6 +77,88 @@ model = AnthropicModel(
7677
async with Agent(model=model) as agent: ....
7778
```
7879

80+
### Google
81+
82+
`GoogleModel` supports two backends: the [Gemini API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/overview).
83+
The backend is selected automatically based on the parameters you provide, or you can
84+
force it with the `vertexai` flag.
85+
86+
Requires the `google` optional extra:
87+
88+
```sh
89+
pip install "splunk-sdk[google]"
90+
# or with uv:
91+
uv add splunk-sdk[google]
92+
```
93+
94+
#### Gemini API
95+
96+
Use this when you have a Google AI Studio API key and do not need Vertex AI infrastructure.
97+
Only `model` and `api_key` are required.
98+
99+
```py
100+
from splunklib.ai import Agent, GoogleModel
101+
102+
model = GoogleModel(
103+
model="gemini-2.0-flash",
104+
api_key="YOUR_GOOGLE_API_KEY",
105+
)
106+
107+
async with Agent(model=model) as agent: ...
108+
```
109+
110+
#### Vertex AI - API key
111+
112+
Use this to route requests through Vertex AI with an API key. Providing `project` is enough
113+
for the SDK to switch to the Vertex AI backend automatically.
114+
115+
```py
116+
from splunklib.ai import Agent, GoogleModel
117+
118+
model = GoogleModel(
119+
model="gemini-2.0-flash",
120+
api_key="YOUR_VERTEX_API_KEY",
121+
project="your-gcp-project-id",
122+
# location="us-central1", # optional, defaults to us-central1
123+
)
124+
125+
async with Agent(model=model) as agent: ...
126+
```
127+
128+
#### Vertex AI - service account credentials
129+
130+
Use this when authenticating with a service account key file (or any
131+
`google.auth.credentials.Credentials`-compatible object). No `api_key` is needed.
132+
133+
```py
134+
from google.oauth2 import service_account
135+
from splunklib.ai import Agent, GoogleModel
136+
137+
credentials = service_account.Credentials.from_service_account_file(
138+
"path/to/service-account.json",
139+
scopes=["https://www.googleapis.com/auth/cloud-platform"],
140+
)
141+
142+
model = GoogleModel(
143+
model="gemini-2.0-flash",
144+
project="your-gcp-project-id",
145+
credentials=credentials,
146+
# location="us-central1", # optional, defaults to us-central1
147+
)
148+
149+
async with Agent(model=model) as agent: ...
150+
```
151+
152+
#### Backend selection rules
153+
154+
| `project` | `credentials` | `vertexai` | Backend used |
155+
|---|---|---|---|
156+
| not set | not set | `None` (default) | Gemini API |
157+
| set | - | `None` (default) | Vertex AI |
158+
| - | set | `None` (default) | Vertex AI |
159+
| any | any | `True` | Vertex AI (forced) |
160+
| any | any | `False` | Gemini API (forced) |
161+
79162
### Self-hosted models via Ollama
80163

81164
[Ollama](https://ollama.com/) can serve local models with both OpenAI and Anthropic-compatible endpoints, so either model class works.

splunklib/ai/engines/langchain.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@
109109
subagent_middleware,
110110
tool_middleware,
111111
)
112-
from splunklib.ai.model import AnthropicModel, OpenAIModel, PredefinedModel
112+
from splunklib.ai.model import AnthropicModel, GoogleModel, OpenAIModel, PredefinedModel
113113
from splunklib.ai.security import create_structured_prompt
114114
from splunklib.ai.structured_output import (
115115
StructuredOutputGenerationException,
@@ -1694,6 +1694,33 @@ def _create_langchain_model(model: PredefinedModel) -> BaseChatModel:
16941694
+ "# or if using uv:\n"
16951695
+ "uv add splunk-sdk[anthropic]"
16961696
)
1697+
case GoogleModel():
1698+
try:
1699+
from langchain_google_genai import ChatGoogleGenerativeAI
1700+
1701+
google_kwargs: dict[str, Any] = {"model": model.model}
1702+
if model.api_key is not None:
1703+
google_kwargs["google_api_key"] = model.api_key
1704+
if model.project is not None:
1705+
google_kwargs["project"] = model.project
1706+
if model.location is not None:
1707+
google_kwargs["location"] = model.location
1708+
if model.credentials is not None:
1709+
google_kwargs["credentials"] = model.credentials
1710+
if model.vertexai is not None:
1711+
google_kwargs["vertexai"] = model.vertexai
1712+
if model.temperature is not None:
1713+
google_kwargs["temperature"] = model.temperature
1714+
1715+
return ChatGoogleGenerativeAI(**google_kwargs)
1716+
except ImportError:
1717+
raise ImportError(
1718+
"Google GenAI support is not installed.\n"
1719+
+ "To enable Google / Gemini models, install the optional extra:\n"
1720+
+ 'pip install "splunk-sdk[google]"\n'
1721+
+ "# or if using uv:\n"
1722+
+ "uv add splunk-sdk[google]"
1723+
)
16971724
case _:
16981725
raise InvalidModelError(
16991726
"Cannot create langchain model - invalid SDK model provided"

splunklib/ai/model.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,13 @@
1313
# under the License.
1414

1515
from dataclasses import dataclass
16-
from typing import Any, Mapping
16+
from typing import TYPE_CHECKING, Any, Mapping
1717

1818
import httpx
1919

20+
if TYPE_CHECKING:
21+
from google.oauth2 import service_account
22+
2023

2124
@dataclass(frozen=True)
2225
class PredefinedModel:
@@ -63,8 +66,40 @@ class AnthropicModel(PredefinedModel):
6366
temperature: float | None = None
6467

6568

69+
@dataclass(frozen=True)
70+
class GoogleModel(PredefinedModel):
71+
"""Predefined Google Model
72+
73+
Supports the Gemini API and Vertex AI. The backend is chosen
74+
automatically: Vertex AI when ``project`` or ``credentials`` is set,
75+
otherwise the Gemini API. Override with ``vertexai=True/False``.
76+
77+
See the README for full usage examples and authentication options.
78+
"""
79+
80+
model: str
81+
api_key: str | None = None
82+
"""API key for the Gemini API or Vertex AI."""
83+
84+
project: str | None = None
85+
"""Google Cloud project ID (Vertex AI only)."""
86+
87+
location: str | None = None
88+
"""Vertex AI region, e.g. ``"us-central1"`` or ``"europe-west4"``."""
89+
90+
credentials: "service_account.Credentials | None" = None
91+
"""Service account credentials for Vertex AI. When set, ``api_key`` is not required."""
92+
93+
vertexai: bool | None = None
94+
"""Force backend selection: ``True`` for Vertex AI, ``False`` for Gemini API, ``None`` to auto-detect."""
95+
96+
temperature: float | None = None
97+
"""Sampling temperature in the range ``[0.0, 2.0]``."""
98+
99+
66100
__all__ = [
67101
"AnthropicModel",
102+
"GoogleModel",
68103
"OpenAIModel",
69104
"PredefinedModel",
70105
]

tests/unit/ai/engine/test_langchain_backend.py

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
ToolMessage,
4040
ToolResult,
4141
)
42-
from splunklib.ai.model import AnthropicModel, OpenAIModel, PredefinedModel
42+
from splunklib.ai.model import AnthropicModel, GoogleModel, OpenAIModel, PredefinedModel
4343
from splunklib.ai.tools import ToolType
4444

4545

@@ -387,6 +387,56 @@ def test_create_langchain_model_anthropic_with_base_url(self) -> None:
387387
# ChatAnthropic stores base_url in anthropic_api_url
388388
assert result.anthropic_api_url == model.base_url
389389

390+
def test_create_langchain_model_google_gemini_api(self) -> None:
391+
pytest.importorskip("langchain_google_genai")
392+
import langchain_google_genai
393+
394+
model = GoogleModel(model="gemini-2.0-flash", api_key="test-key")
395+
result = lc._create_langchain_model(model)
396+
397+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
398+
assert result.model == model.model
399+
assert result._use_vertexai is False # pyright: ignore[reportAttributeAccessIssue]
400+
401+
def test_create_langchain_model_google_vertex_ai_via_project(self) -> None:
402+
pytest.importorskip("langchain_google_genai")
403+
import langchain_google_genai
404+
405+
model = GoogleModel(
406+
model="gemini-2.0-flash",
407+
api_key="test-key",
408+
project="my-project",
409+
)
410+
result = lc._create_langchain_model(model)
411+
412+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
413+
assert result.project == model.project
414+
assert result._use_vertexai is True # pyright: ignore[reportAttributeAccessIssue]
415+
416+
def test_create_langchain_model_google_vertex_ai_explicit_flag(self) -> None:
417+
pytest.importorskip("langchain_google_genai")
418+
import langchain_google_genai
419+
420+
model = GoogleModel(
421+
model="gemini-2.0-flash",
422+
api_key="test-key",
423+
vertexai=True,
424+
)
425+
result = lc._create_langchain_model(model)
426+
427+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
428+
assert result._use_vertexai is True # pyright: ignore[reportAttributeAccessIssue]
429+
430+
def test_create_langchain_model_google_temperature(self) -> None:
431+
pytest.importorskip("langchain_google_genai")
432+
import langchain_google_genai
433+
434+
model = GoogleModel(model="gemini-2.0-flash", api_key="test-key", temperature=0.5)
435+
result = lc._create_langchain_model(model)
436+
437+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
438+
assert result.temperature == model.temperature
439+
390440

391441
@pytest.mark.parametrize(
392442
("name", "tool_type", "expected_name"),

0 commit comments

Comments
 (0)