Skip to content

Commit 19f8238

Browse files
Lanzelot1claude
andcommitted
Merge main into feature/skillbook-rename
- Integrate new ACELiteLLM parameters (api_key, base_url, extra_headers, ssl_verify) - Resolve conflicts keeping both: new API parameters + skillbook terminology - All 271 tests pass 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <[email protected]>
2 parents 87f2533 + 0ff3489 commit 19f8238

File tree

4 files changed

+221
-8
lines changed

4 files changed

+221
-8
lines changed

ace/integrations/litellm.py

Lines changed: 41 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
agent = ACELiteLLM(model="gpt-4o-mini", skillbook_path="my_agent.json")
4040
"""
4141

42-
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple
42+
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Union
4343

4444
from ..skillbook import Skillbook
4545
from ..roles import Agent, Reflector, SkillManager, AgentOutput
@@ -103,9 +103,18 @@ def __init__(
103103
model: str = "gpt-4o-mini",
104104
max_tokens: int = 2048,
105105
temperature: float = 0.0,
106+
# Authentication & endpoint
107+
api_key: Optional[str] = None,
108+
base_url: Optional[str] = None,
109+
# HTTP/SSL settings
110+
extra_headers: Optional[Dict[str, str]] = None,
111+
ssl_verify: Optional[Union[bool, str]] = None,
112+
# ACE-specific settings
106113
skillbook_path: Optional[str] = None,
107114
is_learning: bool = True,
108115
dedup_config: Optional["DeduplicationConfig"] = None,
116+
# Pass-through for advanced LiteLLM options
117+
**llm_kwargs: Any,
109118
):
110119
"""
111120
Initialize ACELiteLLM agent.
@@ -115,9 +124,14 @@ def __init__(
115124
Supports 100+ providers: OpenAI, Anthropic, Google, etc.
116125
max_tokens: Max tokens for responses (default: 2048)
117126
temperature: Sampling temperature (default: 0.0)
127+
api_key: API key for the LLM provider. Falls back to env vars if not set.
128+
base_url: Custom API endpoint URL (e.g., http://localhost:1234/v1)
129+
extra_headers: Custom HTTP headers dict (e.g., {"X-Tenant-ID": "abc"})
130+
ssl_verify: SSL verification. False to disable, or path to CA bundle.
118131
skillbook_path: Path to existing skillbook (optional)
119132
is_learning: Enable/disable learning (default: True)
120133
dedup_config: Optional DeduplicationConfig for skill deduplication
134+
**llm_kwargs: Additional LiteLLM parameters (timeout, max_retries, etc.)
121135
122136
Raises:
123137
ImportError: If LiteLLM is not installed
@@ -132,6 +146,23 @@ def __init__(
132146
# Google
133147
agent = ACELiteLLM(model="gemini/gemini-pro")
134148
149+
# With explicit API key
150+
agent = ACELiteLLM(model="gpt-4", api_key="sk-...")
151+
152+
# Custom endpoint (LM Studio, Ollama)
153+
agent = ACELiteLLM(
154+
model="openai/local-model",
155+
base_url="http://localhost:1234/v1"
156+
)
157+
158+
# Enterprise with custom headers and SSL
159+
agent = ACELiteLLM(
160+
model="gpt-4",
161+
base_url="https://proxy.company.com/v1",
162+
extra_headers={"X-Tenant-ID": "team-alpha"},
163+
ssl_verify="/path/to/internal-ca.pem"
164+
)
165+
135166
# With existing skillbook
136167
agent = ACELiteLLM(
137168
model="gpt-4o-mini",
@@ -165,9 +196,16 @@ def __init__(
165196
else:
166197
self.skillbook = Skillbook()
167198

168-
# Create LLM client
199+
# Create LLM client with configuration
169200
self.llm = LiteLLMClient(
170-
model=model, max_tokens=max_tokens, temperature=temperature
201+
model=model,
202+
max_tokens=max_tokens,
203+
temperature=temperature,
204+
api_key=api_key,
205+
api_base=base_url, # Map user-friendly name to LiteLLM's api_base
206+
extra_headers=extra_headers,
207+
ssl_verify=ssl_verify,
208+
**llm_kwargs,
171209
)
172210

173211
# Create ACE components with v2.1 prompts

ace/llm_providers/litellm_client.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,10 @@ class LiteLLMConfig:
6464
# Anthropic API limitation: temperature and top_p cannot both be specified
6565
sampling_priority: str = "temperature" # "temperature" | "top_p" | "top_k"
6666

67+
# HTTP/SSL settings
68+
extra_headers: Optional[Dict[str, str]] = None # Custom HTTP headers
69+
ssl_verify: Optional[Union[bool, str]] = None # True/False or path to CA bundle
70+
6771

6872
class LiteLLMClient(LLMClient):
6973
"""
@@ -436,6 +440,12 @@ def complete(
436440
if self.config.api_base:
437441
call_params["api_base"] = self.config.api_base
438442

443+
# Add HTTP/SSL settings
444+
if self.config.extra_headers:
445+
call_params["extra_headers"] = self.config.extra_headers
446+
if self.config.ssl_verify is not None:
447+
call_params["ssl_verify"] = self.config.ssl_verify
448+
439449
# Add Opik span association for role-level token aggregation
440450
if OPIK_SPAN_AVAILABLE and get_current_span_data:
441451
try:
@@ -560,6 +570,12 @@ async def acomplete(
560570
if self.config.api_base:
561571
call_params["api_base"] = self.config.api_base
562572

573+
# Add HTTP/SSL settings
574+
if self.config.extra_headers:
575+
call_params["extra_headers"] = self.config.extra_headers
576+
if self.config.ssl_verify is not None:
577+
call_params["ssl_verify"] = self.config.ssl_verify
578+
563579
# Add Opik span association for role-level token aggregation
564580
if OPIK_SPAN_AVAILABLE and get_current_span_data:
565581
try:

examples/local-models/lm_studio_example.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
from ace.integrations import ACELiteLLM
1010
from ace import Sample, SimpleEnvironment
1111
from pathlib import Path
12-
import os
1312

1413

1514
def main():
@@ -19,17 +18,15 @@ def main():
1918
lm_studio_url = "http://localhost:1234/v1"
2019

2120
# 1. Create ACELiteLLM agent pointing to LM Studio
22-
# Note: Use "openai/" prefix with api_base for LM Studio
21+
# Note: Use "openai/" prefix with base_url for LM Studio
2322
print(f"\n📡 Connecting to LM Studio at {lm_studio_url}...")
2423

25-
# Set environment variable for LiteLLM to use custom endpoint
26-
os.environ["OPENAI_API_BASE"] = lm_studio_url
27-
2824
skillbook_path = Path("lm_studio_learned_strategies.json")
2925

3026
try:
3127
agent = ACELiteLLM(
3228
model="openai/local-model", # LM Studio serves any model as 'local-model'
29+
base_url=lm_studio_url,
3330
max_tokens=512,
3431
temperature=0.2,
3532
is_learning=True,

tests/test_litellm_client.py

Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -247,5 +247,167 @@ def test_config_with_explicit_top_p(self):
247247
self.assertEqual(config.top_p, 0.9)
248248

249249

250+
@pytest.mark.unit
251+
class TestACELiteLLMConfiguration(unittest.TestCase):
252+
"""Test ACELiteLLM configuration parameter passing."""
253+
254+
def _mock_response(self):
255+
"""Create mock LiteLLM response."""
256+
mock = MagicMock()
257+
mock.choices = [
258+
MagicMock(
259+
message=MagicMock(
260+
content='{"reasoning":"test","bullet_ids":[],"final_answer":"ok"}'
261+
)
262+
)
263+
]
264+
mock.usage = None
265+
mock.model = "gpt-4"
266+
return mock
267+
268+
@patch("ace.llm_providers.litellm_client.completion")
269+
def test_api_key_passed_to_client(self, mock_completion):
270+
"""Test api_key parameter is passed to LiteLLMClient."""
271+
mock_completion.return_value = self._mock_response()
272+
273+
from ace.integrations import ACELiteLLM
274+
275+
agent = ACELiteLLM(model="gpt-4", api_key="test-key-123")
276+
self.assertEqual(agent.llm.config.api_key, "test-key-123")
277+
278+
@patch("ace.llm_providers.litellm_client.completion")
279+
def test_base_url_maps_to_api_base(self, mock_completion):
280+
"""Test base_url maps to api_base in config."""
281+
mock_completion.return_value = self._mock_response()
282+
283+
from ace.integrations import ACELiteLLM
284+
285+
agent = ACELiteLLM(model="openai/local", base_url="http://localhost:1234/v1")
286+
self.assertEqual(agent.llm.config.api_base, "http://localhost:1234/v1")
287+
288+
@patch("ace.llm_providers.litellm_client.completion")
289+
def test_extra_headers_passed(self, mock_completion):
290+
"""Test extra_headers parameter is passed through."""
291+
mock_completion.return_value = self._mock_response()
292+
293+
from ace.integrations import ACELiteLLM
294+
295+
headers = {"X-Custom": "value", "X-Tenant-ID": "team-alpha"}
296+
agent = ACELiteLLM(model="gpt-4", extra_headers=headers)
297+
self.assertEqual(agent.llm.config.extra_headers, headers)
298+
299+
@patch("ace.llm_providers.litellm_client.completion")
300+
def test_ssl_verify_false(self, mock_completion):
301+
"""Test ssl_verify=False is passed through."""
302+
mock_completion.return_value = self._mock_response()
303+
304+
from ace.integrations import ACELiteLLM
305+
306+
agent = ACELiteLLM(model="gpt-4", ssl_verify=False)
307+
self.assertEqual(agent.llm.config.ssl_verify, False)
308+
309+
@patch("ace.llm_providers.litellm_client.completion")
310+
def test_ssl_verify_path(self, mock_completion):
311+
"""Test ssl_verify with CA bundle path."""
312+
mock_completion.return_value = self._mock_response()
313+
314+
from ace.integrations import ACELiteLLM
315+
316+
agent = ACELiteLLM(model="gpt-4", ssl_verify="/path/to/ca.pem")
317+
self.assertEqual(agent.llm.config.ssl_verify, "/path/to/ca.pem")
318+
319+
@patch("ace.llm_providers.litellm_client.completion")
320+
def test_kwargs_passed_through(self, mock_completion):
321+
"""Test **llm_kwargs are passed to LiteLLMClient."""
322+
mock_completion.return_value = self._mock_response()
323+
324+
from ace.integrations import ACELiteLLM
325+
326+
agent = ACELiteLLM(model="gpt-4", timeout=120, max_retries=5)
327+
self.assertEqual(agent.llm.config.timeout, 120)
328+
self.assertEqual(agent.llm.config.max_retries, 5)
329+
330+
@patch("ace.llm_providers.litellm_client.completion")
331+
def test_backward_compatibility(self, mock_completion):
332+
"""Test that existing code without new params still works."""
333+
mock_completion.return_value = self._mock_response()
334+
335+
from ace.integrations import ACELiteLLM
336+
337+
agent = ACELiteLLM(model="gpt-4o-mini", max_tokens=1024, temperature=0.5)
338+
self.assertEqual(agent.model, "gpt-4o-mini")
339+
self.assertEqual(agent.llm.config.max_tokens, 1024)
340+
self.assertEqual(agent.llm.config.temperature, 0.5)
341+
# New params should be None by default (api_key may be picked up from env vars)
342+
self.assertIsNone(agent.llm.config.extra_headers)
343+
self.assertIsNone(agent.llm.config.ssl_verify)
344+
345+
346+
@pytest.mark.unit
347+
class TestLiteLLMClientDirectConfig(unittest.TestCase):
348+
"""Test LiteLLMClient direct configuration."""
349+
350+
def _mock_response(self):
351+
"""Create mock LiteLLM response."""
352+
mock = MagicMock()
353+
mock.choices = [MagicMock(message=MagicMock(content="Test response"))]
354+
mock.usage = None
355+
mock.model = "gpt-4"
356+
return mock
357+
358+
@patch("ace.llm_providers.litellm_client.completion")
359+
def test_extra_headers_in_call_params(self, mock_completion):
360+
"""Test extra_headers is included in LiteLLM call_params."""
361+
mock_completion.return_value = self._mock_response()
362+
363+
from ace.llm_providers import LiteLLMClient
364+
365+
headers = {"X-Custom": "value"}
366+
client = LiteLLMClient(model="gpt-4", extra_headers=headers)
367+
client.complete("Test prompt")
368+
369+
call_kwargs = mock_completion.call_args[1]
370+
self.assertEqual(call_kwargs["extra_headers"], headers)
371+
372+
@patch("ace.llm_providers.litellm_client.completion")
373+
def test_ssl_verify_false_in_call_params(self, mock_completion):
374+
"""Test ssl_verify=False is included in LiteLLM call_params."""
375+
mock_completion.return_value = self._mock_response()
376+
377+
from ace.llm_providers import LiteLLMClient
378+
379+
client = LiteLLMClient(model="gpt-4", ssl_verify=False)
380+
client.complete("Test prompt")
381+
382+
call_kwargs = mock_completion.call_args[1]
383+
self.assertEqual(call_kwargs["ssl_verify"], False)
384+
385+
@patch("ace.llm_providers.litellm_client.completion")
386+
def test_ssl_verify_path_in_call_params(self, mock_completion):
387+
"""Test ssl_verify path is included in LiteLLM call_params."""
388+
mock_completion.return_value = self._mock_response()
389+
390+
from ace.llm_providers import LiteLLMClient
391+
392+
client = LiteLLMClient(model="gpt-4", ssl_verify="/path/to/ca.pem")
393+
client.complete("Test prompt")
394+
395+
call_kwargs = mock_completion.call_args[1]
396+
self.assertEqual(call_kwargs["ssl_verify"], "/path/to/ca.pem")
397+
398+
@patch("ace.llm_providers.litellm_client.completion")
399+
def test_ssl_verify_none_not_in_call_params(self, mock_completion):
400+
"""Test ssl_verify=None is NOT included in call_params."""
401+
mock_completion.return_value = self._mock_response()
402+
403+
from ace.llm_providers import LiteLLMClient
404+
405+
client = LiteLLMClient(model="gpt-4") # ssl_verify defaults to None
406+
client.complete("Test prompt")
407+
408+
call_kwargs = mock_completion.call_args[1]
409+
self.assertNotIn("ssl_verify", call_kwargs)
410+
411+
250412
if __name__ == "__main__":
251413
unittest.main()

0 commit comments

Comments
 (0)