diff --git a/lambda_function.py b/lambda_function.py index 3132692..12d6b03 100644 --- a/lambda_function.py +++ b/lambda_function.py @@ -51,7 +51,16 @@ def process_event(event): text = text.split(" ", 1)[1] messages = [{"role": "user", "content": text}] bot.send_chat_action(chat_id, "typing") - response = escape_special_characters(create_conversation(messages)) + conversation = create_conversation(messages) + content = escape_special_characters(conversation[0]) + model = conversation[1] + tokens = conversation[2] + response = ( + f"{content}\n\n" + f"\-\-\-\-\-\-\-\-\-\- *Information* \-\-\-\-\-\-\-\-\-\-\n" + f"Model: `{model}`\n" + f"Tokens: `{tokens[0]}({tokens[1]}\+{tokens[2]})`" + ) bot.reply_message(chat_id, message_id, response) except IndexError: bot.reply_message(chat_id, message_id, "请输入对话内容") diff --git a/utils/chat.py b/utils/chat.py index fc7c541..409c655 100644 --- a/utils/chat.py +++ b/utils/chat.py @@ -5,10 +5,17 @@ import openai openai.api_key = os.environ["OPENAI_API_KEY"] -def create_conversation(messages: list) -> str: +def create_conversation(messages: list): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, temperature=1, ) - return response["choices"][0]["message"]["content"] + content = response["choices"][0]["message"]["content"] + model = response["model"] + tokens = [ + response["usage"]["total_tokens"], + response["usage"]["prompt_tokens"], + response["usage"]["completion_tokens"], + ] + return content, model, tokens