File size: 12,338 Bytes
2fd8593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc496da
2fd8593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
"""
LLM Provider abstraction layer for Blog2Code.
Supports multiple LLM providers: OpenAI, Google Gemini, NVIDIA Gemma
"""
import os
from typing import Dict, List, Any, Optional
from abc import ABC, abstractmethod


class LLMProvider(ABC):
    """Base class for LLM providers"""
    
    @abstractmethod
    def create_completion(self, messages: List[Dict], model: str, **kwargs) -> Any:
        """Create a chat completion"""
        pass
    
    @abstractmethod
    def get_response_text(self, completion: Any) -> str:
        """Extract text from completion response"""
        pass
    
    @abstractmethod
    def get_usage_info(self, completion: Any) -> Dict:
        """Extract token usage information"""
        pass
    
    @abstractmethod
    def calculate_cost(self, usage: Dict, model: str) -> float:
        """Calculate cost based on usage"""
        pass


class OpenAIProvider(LLMProvider):
    """OpenAI API implementation"""
    
    def __init__(self, api_key: Optional[str] = None):
        from openai import OpenAI
        self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"))
    
    def create_completion(self, messages: List[Dict], model: str, **kwargs) -> Any:
        """Create OpenAI chat completion"""
        return self.client.chat.completions.create(
            model=model,
            messages=messages,
            **kwargs
        )
    
    def get_response_text(self, completion: Any) -> str:
        """Extract text from OpenAI response"""
        return completion.choices[0].message.content
    
    def get_usage_info(self, completion: Any) -> Dict:
        """Extract usage from OpenAI response"""
        return {
            'prompt_tokens': completion.usage.prompt_tokens,
            'completion_tokens': completion.usage.completion_tokens,
            'total_tokens': completion.usage.total_tokens,
            'cached_tokens': getattr(completion.usage.prompt_tokens_details, 'cached_tokens', 0) if hasattr(completion.usage, 'prompt_tokens_details') else 0
        }
    
    def calculate_cost(self, usage: Dict, model: str) -> float:
        """Calculate OpenAI cost"""
        # Pricing per 1M tokens
        model_costs = {
            "gpt-4o-mini": {"input": 0.150, "cached": 0.075, "output": 0.600},
            "gpt-4o": {"input": 2.50, "cached": 1.25, "output": 10.00},
            "gpt-3.5-turbo": {"input": 0.50, "cached": 0.25, "output": 1.50},
            "o3-mini": {"input": 1.10, "cached": 0.55, "output": 4.40},
        }
        
        costs = model_costs.get(model, model_costs["gpt-4o-mini"])
        
        prompt_tokens = usage['prompt_tokens']
        cached_tokens = usage.get('cached_tokens', 0)
        completion_tokens = usage['completion_tokens']
        
        actual_input_tokens = prompt_tokens - cached_tokens
        
        input_cost = (actual_input_tokens / 1_000_000) * costs["input"]
        cached_cost = (cached_tokens / 1_000_000) * costs["cached"]
        output_cost = (completion_tokens / 1_000_000) * costs["output"]
        
        return input_cost + cached_cost + output_cost


class GeminiProvider(LLMProvider):
    """Google Gemini API implementation"""
    
    def __init__(self, api_key: Optional[str] = None):
        try:
            import google.generativeai as genai
            self.genai = genai
            genai.configure(api_key=api_key or os.environ.get("GEMINI_API_KEY"))
        except ImportError:
            raise ImportError(
                "google-generativeai not installed. "
                "Install with: pip install google-generativeai"
            )
    
    def create_completion(self, messages: List[Dict], model: str, **kwargs) -> Any:
        """Create Gemini chat completion"""
        # Convert OpenAI message format to Gemini format
        gemini_messages = self._convert_messages(messages)
        
        # Fix model name - Gemini expects models/model-name format
        if not model.startswith('models/'):
            model = f'models/{model}'
        
        # Create model
        gemini_model = self.genai.GenerativeModel(model)
        
        # Generate response
        response = gemini_model.generate_content(
            gemini_messages,
            generation_config=self._get_generation_config(**kwargs)
        )
        
        return response
    
    def _convert_messages(self, messages: List[Dict]) -> str:
        """Convert OpenAI messages to Gemini prompt format"""
        # Gemini uses a simpler format - concatenate all messages
        prompt_parts = []
        
        for msg in messages:
            role = msg['role']
            content = msg['content']
            
            if role == 'system':
                prompt_parts.append(f"System Instructions:\n{content}\n")
            elif role == 'user':
                prompt_parts.append(f"User:\n{content}\n")
            elif role == 'assistant':
                prompt_parts.append(f"Assistant:\n{content}\n")
        
        return "\n".join(prompt_parts)
    
    def _get_generation_config(self, **kwargs):
        """Convert OpenAI kwargs to Gemini generation config"""
        config = {}
        
        # Map common parameters
        if 'temperature' in kwargs:
            config['temperature'] = kwargs['temperature']
        if 'max_tokens' in kwargs:
            config['max_output_tokens'] = kwargs['max_tokens']
        if 'top_p' in kwargs:
            config['top_p'] = kwargs['top_p']
        
        return config
    
    def get_response_text(self, completion: Any) -> str:
        """Extract text from Gemini response"""
        return completion.text
    
    def get_usage_info(self, completion: Any) -> Dict:
        """Extract usage from Gemini response"""
        # Gemini provides token counts in metadata
        try:
            metadata = completion.usage_metadata
            return {
                'prompt_tokens': metadata.prompt_token_count,
                'completion_tokens': metadata.candidates_token_count,
                'total_tokens': metadata.total_token_count,
                'cached_tokens': getattr(metadata, 'cached_content_token_count', 0)
            }
        except:
            # Fallback if metadata not available
            return {
                'prompt_tokens': 0,
                'completion_tokens': 0,
                'total_tokens': 0,
                'cached_tokens': 0
            }
    
    def calculate_cost(self, usage: Dict, model: str) -> float:
        """Calculate Gemini cost"""
        # Gemini pricing per 1M tokens (as of Jan 2026)
        model_costs = {
            "gemini-1.5-flash": {"input": 0.075, "cached": 0.01875, "output": 0.30},
            "gemini-1.5-pro": {"input": 1.25, "cached": 0.3125, "output": 5.00},
            "gemini-2.0-flash-exp": {"input": 0.0, "cached": 0.0, "output": 0.0},  # Free during preview
        }
        
        costs = model_costs.get(model, model_costs["gemini-1.5-flash"])
        
        prompt_tokens = usage['prompt_tokens']
        cached_tokens = usage.get('cached_tokens', 0)
        completion_tokens = usage['completion_tokens']
        
        actual_input_tokens = prompt_tokens - cached_tokens
        
        input_cost = (actual_input_tokens / 1_000_000) * costs["input"]
        cached_cost = (cached_tokens / 1_000_000) * costs["cached"]
        output_cost = (completion_tokens / 1_000_000) * costs["output"]
        
        return input_cost + cached_cost + output_cost


class GemmaProvider(LLMProvider):
    """NVIDIA Gemma API implementation"""
    
    def __init__(self, api_key: Optional[str] = None):
        import requests
        self.requests = requests
        self.api_key = api_key or os.environ.get("NVIDIA_API_KEY")
        if not self.api_key:
            raise ValueError(
                "NVIDIA_API_KEY not found. "
                "Set it as an environment variable or pass it to the constructor."
            )
        self.invoke_url = "https://integrate.api.nvidia.com/v1/chat/completions"
    
    def create_completion(self, messages: List[Dict], model: str, **kwargs) -> Any:
        """Create Gemma chat completion"""
        # Prepare headers
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Accept": "application/json"  # Non-streaming for simplicity
        }
        
        # Prepare payload
        payload = {
            "model": model,
            "messages": messages,
            "max_tokens": kwargs.get('max_tokens', 512),
            "temperature": kwargs.get('temperature', 0.20),
            "top_p": kwargs.get('top_p', 0.70),
            "stream": False  # Disable streaming for now
        }
        
        # Make request
        response = self.requests.post(self.invoke_url, headers=headers, json=payload)
        response.raise_for_status()
        
        return response.json()
    
    def get_response_text(self, completion: Any) -> str:
        """Extract text from Gemma response"""
        # NVIDIA API returns OpenAI-compatible format
        if isinstance(completion, dict):
            return completion['choices'][0]['message']['content']
        return str(completion)
    
    def get_usage_info(self, completion: Any) -> Dict:
        """Extract usage from Gemma response"""
        try:
            usage = completion.get('usage', {})
            return {
                'prompt_tokens': usage.get('prompt_tokens', 0),
                'completion_tokens': usage.get('completion_tokens', 0),
                'total_tokens': usage.get('total_tokens', 0),
                'cached_tokens': 0  # NVIDIA API doesn't provide cached token info
            }
        except:
            return {
                'prompt_tokens': 0,
                'completion_tokens': 0,
                'total_tokens': 0,
                'cached_tokens': 0
            }
    
    def calculate_cost(self, usage: Dict, model: str) -> float:
        """Calculate Gemma cost"""
        # NVIDIA API pricing (check current pricing at build.nvidia.com)
        # For now, using placeholder values - update with actual pricing
        model_costs = {
            "google/gemma-3-27b-it": {"input": 0.0, "output": 0.0},  # Free tier or update with actual costs
        }
        
        costs = model_costs.get(model, {"input": 0.0, "output": 0.0})
        
        prompt_tokens = usage['prompt_tokens']
        completion_tokens = usage['completion_tokens']
        
        input_cost = (prompt_tokens / 1_000_000) * costs["input"]
        output_cost = (completion_tokens / 1_000_000) * costs["output"]
        
        return input_cost + output_cost


def get_provider(provider_name: str, api_key: Optional[str] = None) -> LLMProvider:
    """
    Factory function to get LLM provider.
    
    Args:
        provider_name: Name of provider ('openai' or 'gemini')
        api_key: Optional API key (uses env var if not provided)
    
    Returns:
        LLMProvider instance
    """
    providers = {
        'openai': OpenAIProvider,
        'gemini': GeminiProvider,
        'gemma': GemmaProvider,
    }
    
    if provider_name not in providers:
        raise ValueError(
            f"Unknown provider: {provider_name}. "
            f"Available providers: {list(providers.keys())}"
        )
    
    return providers[provider_name](api_key=api_key)


def get_default_model(provider_name: str) -> str:
    """Get default model for a provider"""
    defaults = {
        'openai': 'gpt-4o-mini',
        'gemini': 'gemini-2.0-flash-lite',
        'gemma': 'google/gemma-3-27b-it',
    }
    return defaults.get(provider_name, 'gpt-4o-mini')


if __name__ == "__main__":
    # Test script
    print("Testing LLM Provider abstraction...")
    
    # Test OpenAI
    try:
        provider = get_provider('openai')
        print("βœ… OpenAI provider initialized")
    except Exception as e:
        print(f"❌ OpenAI provider failed: {e}")
    
    # Test Gemini
    try:
        provider = get_provider('gemini')
        print("βœ… Gemini provider initialized")
    except Exception as e:
        print(f"❌ Gemini provider failed: {e}")
    
    # Test Gemma
    try:
        provider = get_provider('gemma')
        print("βœ… Gemma provider initialized")
    except Exception as e:
        print(f"❌ Gemma provider failed: {e}")