tanlie 1 rok pred
rodič
commit
4e98cc614d

+ 0 - 1
bot/ali/ali_qwen_bot.py

@@ -5,7 +5,6 @@ import time
 from typing import List, Tuple
 
 import openai
-import openai.error
 import broadscope_bailian
 from broadscope_bailian import ChatQaMessage
 

+ 9 - 10
bot/chatgpt/chat_gpt_bot.py

@@ -3,12 +3,8 @@
 import time
 
 import openai
-from openai import AzureOpenAI
-
-client = AzureOpenAI(api_key=conf().get("open_ai_api_key"),
-api_version=conf().get("azure_api_version", "2023-06-01-preview"))
-import openai.error
 import requests
+from openai import OpenAI
 
 from bot.bot import Bot
 from bot.chatgpt.chat_gpt_session import ChatGPTSession
@@ -20,6 +16,8 @@ from common.log import logger
 from common.token_bucket import TokenBucket
 from config import conf, load_config
 
+client = OpenAI(api_key=conf().get("open_ai_api_key"),
+                base_url=conf().get("open_ai_api_base", "https://api.openai.com/v1"))
 
 # OpenAI对话模型API (可用)
 class ChatGPTBot(Bot, OpenAIImage):
@@ -28,11 +26,11 @@ class ChatGPTBot(Bot, OpenAIImage):
         # set the default api_key
         if conf().get("open_ai_api_base"):
             # TODO: The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(base_url=conf().get("open_ai_api_base"))'
-            # openai.api_base = conf().get("open_ai_api_base")
+            openai.base_url = conf().get("open_ai_api_base")
         proxy = conf().get("proxy")
         if proxy:
             # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy=proxy)'
-            # openai.proxy = proxy
+            openai.proxy = proxy
         if conf().get("rate_limit_chatgpt"):
             self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
 
@@ -125,7 +123,7 @@ class ChatGPTBot(Bot, OpenAIImage):
             # if api_key == None, the default openai.api_key will be used
             if args is None:
                 args = self.args
-            response = client.chat.completions.create(api_key=api_key, messages=session.messages, **args)
+            response = client.chat.completions.create(messages=session.messages, **args)
             # logger.debug("[CHATGPT] response={}".format(response))
             # logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
             return {
@@ -173,9 +171,10 @@ class AzureChatGPTBot(ChatGPTBot):
         super().__init__()
         self.args["deployment_id"] = conf().get("azure_deployment_id")
 
-    def create_img(self, query, retry_count=0, api_key=None):
+    def create_img(self, query, retry_count=0, api_key=None, **kwargs):
+        # TODO 升级一下这个version
         api_version = "2022-08-03-preview"
-        url = "{}dalle/text-to-image?api-version={}".format(openai.api_base, api_version)
+        url = "{}dalle/text-to-image?api-version={}".format(openai.base_url, api_version)
         api_key = api_key or openai.api_key
         headers = {"api-key": api_key, "Content-Type": "application/json"}
         try:

+ 5 - 5
bot/claudeapi/claude_api_bot.py

@@ -3,7 +3,6 @@
 import time
 
 import openai
-import openai.error
 import anthropic
 
 from bot.bot import Bot
@@ -28,11 +27,11 @@ class ClaudeAPIBot(Bot, OpenAIImage):
         )
         if conf().get("open_ai_api_base"):
             # TODO: The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(base_url=conf().get("open_ai_api_base"))'
-            # openai.api_base = conf().get("open_ai_api_base")
+            openai.base_url = conf().get("open_ai_api_base")
         proxy = conf().get("proxy")
         if proxy:
             # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy=proxy)'
-            # openai.proxy = proxy
+            openai.proxy = proxy
 
         self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "text-davinci-003")
 
@@ -59,7 +58,8 @@ class ClaudeAPIBot(Bot, OpenAIImage):
                         result["content"],
                     )
                     logger.debug(
-                        "[CLAUDE_API] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens)
+                        "[CLAUDE_API] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
+                            str(session), session_id, reply_content, completion_tokens)
                     )
 
                     if total_tokens == 0:
@@ -88,7 +88,7 @@ class ClaudeAPIBot(Bot, OpenAIImage):
             )
             # response = openai.Completion.create(prompt=str(session), **self.args)
             res_content = response.content[0].text.strip().replace("<|endoftext|>", "")
-            total_tokens = response.usage.input_tokens+response.usage.output_tokens
+            total_tokens = response.usage.input_tokens + response.usage.output_tokens
             completion_tokens = response.usage.output_tokens
             logger.info("[CLAUDE_API] reply={}".format(res_content))
             return {

+ 2 - 3
bot/moonshot/moonshot_bot.py

@@ -2,8 +2,8 @@
 
 import time
 
-import openai
-import openai.error
+import requests
+
 from bot.bot import Bot
 from bot.session_manager import SessionManager
 from bridge.context import ContextType
@@ -11,7 +11,6 @@ from bridge.reply import Reply, ReplyType
 from common.log import logger
 from config import conf, load_config
 from .moonshot_session import MoonshotSession
-import requests
 
 
 # ZhipuAI对话模型API

+ 7 - 6
bot/openai/open_ai_bot.py

@@ -5,9 +5,6 @@ import time
 import openai
 from openai import OpenAI
 
-client = OpenAI(api_key=conf().get("open_ai_api_key"))
-import openai.error
-
 from bot.bot import Bot
 from bot.openai.open_ai_image import OpenAIImage
 from bot.openai.open_ai_session import OpenAISession
@@ -17,6 +14,9 @@ from bridge.reply import Reply, ReplyType
 from common.log import logger
 from config import conf
 
+client = OpenAI(api_key=conf().get("open_ai_api_key"),
+                base_url=conf().get("open_ai_api_base", "https://api.openai.com/v1"))
+
 user_session = dict()
 
 
@@ -26,11 +26,11 @@ class OpenAIBot(Bot, OpenAIImage):
         super().__init__()
         if conf().get("open_ai_api_base"):
             # TODO: The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(base_url=conf().get("open_ai_api_base"))'
-            # openai.api_base = conf().get("open_ai_api_base")
+            openai.base_url = conf().get("open_ai_api_base")
         proxy = conf().get("proxy")
         if proxy:
             # TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(proxy=proxy)'
-            # openai.proxy = proxy
+            openai.proxy = proxy
 
         self.sessions = SessionManager(OpenAISession, model=conf().get("model") or "text-davinci-003")
         self.args = {
@@ -67,7 +67,8 @@ class OpenAIBot(Bot, OpenAIImage):
                         result["content"],
                     )
                     logger.debug(
-                        "[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens)
+                        "[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
+                            str(session), session_id, reply_content, completion_tokens)
                     )
 
                     if total_tokens == 0:

+ 6 - 7
bot/openai/open_ai_image.py

@@ -3,13 +3,13 @@ import time
 import openai
 from openai import OpenAI
 
-client = OpenAI(api_key=conf().get("open_ai_api_key"))
-import openai.error
-
 from common.log import logger
 from common.token_bucket import TokenBucket
 from config import conf
 
+client = OpenAI(api_key=conf().get("open_ai_api_key"),
+                base_url=conf().get("open_ai_api_base", "https://api.openai.com/v1"))
+
 
 # OPENAI提供的画图接口
 class OpenAIImage(object):
@@ -22,10 +22,9 @@ class OpenAIImage(object):
             if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
                 return False, "请求太快了,请休息一下再问我吧"
             logger.info("[OPEN_AI] image_query={}".format(query))
-            response = client.images.generate(api_key=api_key,
-            prompt=query,  # 图片描述
-            n=1,  # 每次生成图片的数量
-            model=conf().get("text_to_image") or "dall-e-2")
+            response = client.images.generate(prompt=query,  # 图片描述
+                                              n=1,  # 每次生成图片的数量
+                                              model=conf().get("text_to_image") or "dall-e-2")
             image_url = response.data[0].url
             logger.info("[OPEN_AI] image_url={}".format(image_url))
             return True, image_url

+ 0 - 1
bot/zhipuai/zhipuai_bot.py

@@ -3,7 +3,6 @@
 import time
 
 import openai
-import openai.error
 from bot.bot import Bot
 from bot.zhipuai.zhipu_ai_session import ZhipuAISession
 from bot.zhipuai.zhipu_ai_image import ZhipuAIImage

+ 2 - 1
requirements.txt

@@ -1,4 +1,4 @@
-openai==0.27.8
+openai==1.30.1
 HTMLParser>=0.0.2
 PyQRCode>=1.2.1
 qrcode>=7.4.2
@@ -8,3 +8,4 @@ Pillow
 pre-commit
 web.py
 linkai>=0.0.6.0
+tiktoken>=0.3.2

+ 8 - 9
voice/openai/openai_voice.py

@@ -1,21 +1,20 @@
 """
 google voice service
 """
-import json
 
-import openai
+import datetime
+import random
+
+import requests
 
 from bridge.reply import Reply, ReplyType
+from common import const
 from common.log import logger
 from config import conf
 from voice.voice import Voice
-import requests
-from common import const
-import datetime, random
 
-class OpenaiVoice(Voice):
-    def __init__(self):
 
+class OpenaiVoice(Voice):
     def voiceToText(self, voice_file):
         logger.debug("[Openai] voice file name={}".format(voice_file))
         try:
@@ -42,7 +41,6 @@ class OpenaiVoice(Voice):
         finally:
             return reply
 
-
     def textToVoice(self, text):
         try:
             api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
@@ -57,7 +55,8 @@ class OpenaiVoice(Voice):
                 'voice': conf().get("tts_voice_id") or "alloy"
             }
             response = requests.post(url, headers=headers, json=data)
-            file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
+            file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(
+                random.randint(0, 1000)) + ".mp3"
             logger.debug(f"[OPENAI] text_to_Voice file_name={file_name}, input={text}")
             with open(file_name, 'wb') as f:
                 f.write(response.content)