-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconfig_manager.py
More file actions
299 lines (245 loc) · 12.2 KB
/
config_manager.py
File metadata and controls
299 lines (245 loc) · 12.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
#!/usr/bin/env python3
"""
Simplified ConfigManager for LaunchDarkly AI Agent integration
"""
import os
import time
import json
from pathlib import Path
import ldclient
from ldclient import Context
from ldai.client import LDAIClient, LDAIAgentConfig, LDAIAgentDefaults, ModelConfig, ProviderConfig
from ldai.tracker import FeedbackKind
from dotenv import load_dotenv
from utils.logger import log_student, log_debug
import boto3
load_dotenv()
class FixedConfigManager:
def __init__(self):
"""Initialize LaunchDarkly client and AI client"""
self.sdk_key = os.getenv('LD_SDK_KEY')
if not self.sdk_key:
raise ValueError("LD_SDK_KEY environment variable is required")
# Load defaults from .ai_config_defaults.json
self._load_config_defaults()
self._initialize_launchdarkly_client()
self._initialize_ai_client()
# Initialize AWS Bedrock session for SSO authentication
self._initialize_bedrock_session()
def _load_config_defaults(self):
"""Load AI config defaults from .ai_config_defaults.json
This file is generated by the CI/CD test suite (ld-aic test) and contains
the latest validated configs from LaunchDarkly production.
"""
defaults_path = Path(".ai_config_defaults.json")
if not defaults_path.exists():
raise FileNotFoundError(
"❌ .ai_config_defaults.json not found!\n\n"
"This file contains fallback AI config defaults and should be generated by:\n"
" 1. Running CI/CD tests: The 'ld-aic test' command automatically creates it\n"
" 2. Manual generation: Run 'ld-aic validate' locally\n\n"
"To generate it now:\n"
" ld-aic validate --config-keys 'supervisor-agent,support-agent,security-agent'\n\n"
"Or if you have the CI/CD workflow running, it will be created automatically."
)
try:
with open(defaults_path, 'r') as f:
data = json.load(f)
self.config_defaults = data.get("configs", {})
metadata = data.get("_metadata", {})
log_debug(f"DEFAULTS: Loaded {len(self.config_defaults)} config defaults from {metadata.get('environment', 'unknown')}")
log_debug(f"DEFAULTS: Generated at {metadata.get('generated_at', 'unknown')}")
except json.JSONDecodeError as e:
raise ValueError(
f"❌ Failed to parse .ai_config_defaults.json: {e}\n\n"
"The file may be corrupted. Regenerate it by running:\n"
" ld-aic validate --config-keys 'supervisor-agent,support-agent,security-agent'"
)
def _get_default_config(self, config_key: str) -> LDAIAgentDefaults:
"""Get fallback config from .ai_config_defaults.json
Args:
config_key: The AI config key (e.g., 'support-agent')
Returns:
LDAIAgentDefaults object with config from the defaults file
Raises:
ValueError: If config key not found in defaults
"""
if config_key not in self.config_defaults:
available_keys = list(self.config_defaults.keys())
raise ValueError(
f"❌ Config '{config_key}' not found in .ai_config_defaults.json!\n\n"
f"Available configs: {', '.join(available_keys)}\n\n"
"To add this config:\n"
" 1. Create it in LaunchDarkly dashboard\n"
" 2. Run: ld-aic validate --config-keys '{config_key},...'\n"
" 3. The config will be added to .ai_config_defaults.json"
)
config_data = self.config_defaults[config_key]
# Convert JSON config to LDAIAgentDefaults
# Note: Tools are managed by LaunchDarkly and not part of defaults
return LDAIAgentDefaults(
enabled=config_data.get("enabled", True),
model=ModelConfig(
name=config_data["model"]["name"],
parameters=config_data["model"].get("parameters", {})
),
provider=ProviderConfig(
name=config_data["provider"]["name"]
),
instructions=config_data.get("instructions", "You are a helpful assistant.")
)
def _initialize_launchdarkly_client(self):
"""Initialize LaunchDarkly client"""
config = ldclient.Config(self.sdk_key)
ldclient.set_config(config)
self.ld_client = ldclient.get()
max_wait = 10
wait_time = 0
while not self.ld_client.is_initialized() and wait_time < max_wait:
time.sleep(0.5)
wait_time += 0.5
if not self.ld_client.is_initialized():
raise RuntimeError("LaunchDarkly client initialization failed")
def _initialize_ai_client(self):
"""Initialize AI client"""
self.ai_client = LDAIClient(self.ld_client)
def _initialize_bedrock_session(self):
"""Initialize AWS Bedrock session if AUTH_METHOD=sso"""
auth_method = os.getenv('AUTH_METHOD', 'api-key').lower()
if auth_method != 'sso':
self.boto3_session = None
log_debug("AUTH_METHOD not set to 'sso', Bedrock unavailable")
return
try:
# Initialize AWS session with optional profile support
aws_region = os.getenv('AWS_REGION', 'us-east-1')
aws_profile = os.getenv('AWS_PROFILE')
# Create session with profile if specified, otherwise use default credentials
if aws_profile:
self.boto3_session = boto3.Session(
region_name=aws_region,
profile_name=aws_profile
)
log_debug(f"AWS: Using profile '{aws_profile}' in region {aws_region}")
else:
self.boto3_session = boto3.Session(region_name=aws_region)
log_debug(f"AWS: Using default credentials in region {aws_region}")
self.aws_region = aws_region
self.aws_profile = aws_profile
# Test current SSO session
sts = self.boto3_session.client('sts')
identity = sts.get_caller_identity()
user_name = identity['Arn'].split('/')[-1]
account = identity['Account']
log_student(f"AWS: Connected via SSO as {user_name} (Account: {account})")
except Exception as e:
log_student(f"AWS SSO session not available: {e}")
# Provide helpful login command with profile if specified
profile_hint = f" --profile {os.getenv('AWS_PROFILE')}" if os.getenv('AWS_PROFILE') else ""
log_student(f"Run: aws sso login{profile_hint}")
raise
def build_context(self, user_id: str, user_context: dict = None) -> Context:
"""Build a LaunchDarkly context with consistent attributes.
This ensures the same context is used for both AI Config evaluation
and custom metric tracking, which is required for experiment association.
"""
context_builder = Context.builder(user_id).kind('user')
if user_context:
# Set all attributes from user_context for consistency
for key, value in user_context.items():
context_builder.set(key, value)
log_debug(f"CONFIG MANAGER: Set {key}={value}")
return context_builder.build()
async def get_config(self, user_id: str, config_key: str = None, user_context: dict = None):
"""Get LaunchDarkly AI Config with fallback to .ai_config_defaults.json
Fallback chain:
1. Try LaunchDarkly (live config with targeting)
2. If that fails, use .ai_config_defaults.json (validated production defaults)
3. If config not in defaults file, raise helpful error
"""
log_debug(f"CONFIG MANAGER: Getting config for user_id={user_id}, config_key={config_key}")
log_debug(f"CONFIG MANAGER: User context: {user_context}")
# Build context using centralized method
ld_user_context = self.build_context(user_id, user_context)
log_debug(f"CONFIG MANAGER: Built LaunchDarkly context: {ld_user_context}")
ai_config_key = config_key or os.getenv('LAUNCHDARKLY_AI_CONFIG_KEY', 'support-agent')
log_debug(f"CONFIG MANAGER: Using AI config key: {ai_config_key}")
# Load default from .ai_config_defaults.json (fails with helpful error if not found)
default_config = self._get_default_config(ai_config_key)
log_debug(f"CONFIG MANAGER: Loaded fallback default - model: {default_config.model.name}")
agent_config = LDAIAgentConfig(
key=ai_config_key,
default_value=default_config # Use validated production defaults from file
)
# Call LaunchDarkly - SDK automatically falls back to default_value if LD unavailable
result = self.ai_client.agent(agent_config, ld_user_context)
log_debug("CONFIG MANAGER: ✅ Got config (from LaunchDarkly or fallback)")
# Debug the actual configuration received (basic info only)
try:
config_dict = result.to_dict()
log_debug(f"CONFIG MANAGER: Model: {config_dict.get('model', {}).get('name', 'unknown')}")
if hasattr(result, 'tracker') and hasattr(result.tracker, '_variation_key'):
log_debug(f"CONFIG MANAGER: Variation: {result.tracker._variation_key}")
except Exception as debug_e:
log_debug(f"CONFIG MANAGER: Could not debug result: {debug_e}")
return result
def clear_cache(self):
"""Clear LaunchDarkly SDK cache"""
self.ld_client.flush()
def flush_metrics(self):
"""Flush metrics to LaunchDarkly"""
self.ld_client.flush()
def track_cost_metric(self, agent_config, context, cost, config_key):
"""Track cost metric with AI Config metadata for experiment attribution.
This ensures cost events include trackJsonData so they're properly
associated with AI Config variations in experiments, matching the
pattern used by token and feedback tracking.
Args:
agent_config: The AI config object with tracker
context: LaunchDarkly context
cost: Cost value in dollars
config_key: The AI config key (e.g., 'support-agent', 'security-agent')
"""
try:
# Extract metadata from agent_config for experiment attribution
metadata = {
"version": 1,
"configKey": config_key,
"variationKey": agent_config.tracker._variation_key if hasattr(agent_config.tracker, '_variation_key') else 'unknown',
"modelName": agent_config.model.name if hasattr(agent_config, 'model') else 'unknown',
"providerName": agent_config.provider.name if hasattr(agent_config, 'provider') else 'unknown'
}
# Track with metadata - this creates trackJsonData in the event
self.ld_client.track("ai_cost_per_request", context, metadata, cost)
self.ld_client.flush()
except Exception as e:
log_debug(f"COST TRACKING ERROR: {e}")
# Fallback to basic tracking if metadata extraction fails
self.ld_client.track("ai_cost_per_request", context, None, cost)
self.ld_client.flush()
def track_feedback(self, tracker, thumbs_up: bool):
"""Track user feedback with LaunchDarkly"""
if not tracker:
return False
try:
# Use LaunchDarkly's feedback tracking
feedback_dict = {
"kind": FeedbackKind.Positive if thumbs_up else FeedbackKind.Negative
}
tracker.track_feedback(feedback_dict)
log_student(f"FEEDBACK TRACKED: {'👍 Positive' if thumbs_up else '👎 Negative'}")
self.ld_client.flush()
return True
except Exception as e:
log_debug(f"FEEDBACK TRACKING ERROR: {e}")
return False
def close(self):
"""Close LaunchDarkly client"""
try:
self.ld_client.flush()
except Exception:
pass
try:
self.ld_client.close()
except Exception:
pass