| import hashlib |
| import json |
|
|
| import backoff |
| import openai |
| import requests |
|
|
| |
| from openai.error import ( |
| APIConnectionError, |
| APIError, |
| RateLimitError, |
| ServiceUnavailableError, |
| Timeout, |
| ) |
|
|
| CACHE_PATH = "~/.aider.send.cache.v1" |
| CACHE = None |
| |
|
|
|
|
| @backoff.on_exception( |
| backoff.expo, |
| ( |
| Timeout, |
| APIError, |
| ServiceUnavailableError, |
| RateLimitError, |
| APIConnectionError, |
| requests.exceptions.ConnectionError, |
| ), |
| max_tries=10, |
| on_backoff=lambda details: print( |
| f"{details.get('exception','Exception')}\nRetry in {details['wait']:.1f} seconds." |
| ), |
| ) |
| def send_with_retries(model, messages, functions, stream): |
| kwargs = dict( |
| model=model, |
| messages=messages, |
| temperature=0, |
| stream=stream, |
| ) |
| if functions is not None: |
| kwargs["functions"] = functions |
|
|
| |
| if hasattr(openai, "api_deployment_id"): |
| kwargs["deployment_id"] = openai.api_deployment_id |
| if hasattr(openai, "api_engine"): |
| kwargs["engine"] = openai.api_engine |
|
|
| key = json.dumps(kwargs, sort_keys=True).encode() |
|
|
| |
| hash_object = hashlib.sha1(key) |
|
|
| if not stream and CACHE is not None and key in CACHE: |
| return hash_object, CACHE[key] |
|
|
| res = openai.ChatCompletion.create(**kwargs) |
|
|
| if not stream and CACHE is not None: |
| CACHE[key] = res |
|
|
| return hash_object, res |
|
|
|
|
| def simple_send_with_retries(model, messages): |
| try: |
| _hash, response = send_with_retries( |
| model=model, |
| messages=messages, |
| functions=None, |
| stream=False, |
| ) |
| return response.choices[0].message.content |
| except (AttributeError, openai.error.InvalidRequestError): |
| return |
|
|