diff --git a/camel/model_backend.py b/camel/model_backend.py index 5d895b44..d93f2c5e 100644 --- a/camel/model_backend.py +++ b/camel/model_backend.py @@ -18,6 +18,7 @@ import openai import tiktoken from camel.typing import ModelType +from chatdev.statistics import prompt_cost from chatdev.utils import log_and_print_online @@ -69,11 +70,16 @@ class OpenAIModel(ModelBackend): response = openai.ChatCompletion.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict) + cost = prompt_cost( + self.model_type.value, + num_prompt_tokens=response["usage"]["prompt_tokens"], + num_completion_tokens=response["usage"]["completion_tokens"] + ) log_and_print_online( - "**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\n".format( + "**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format( response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"], - response["usage"]["total_tokens"])) + response["usage"]["total_tokens"], cost)) if not isinstance(response, Dict): raise RuntimeError("Unexpected return from OpenAI API") return response diff --git a/chatdev/statistics.py b/chatdev/statistics.py index e9687dff..f603a3ea 100644 --- a/chatdev/statistics.py +++ b/chatdev/statistics.py @@ -3,9 +3,37 @@ import os import numpy as np +def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens: float): + input_cost_map = { + "gpt-3.5-turbo": 0.0015, + "gpt-3.5-turbo-16k": 0.003, + "gpt-3.5-turbo-0613": 0.0015, + "gpt-3.5-turbo-16k-0613": 0.003, + "gpt-4": 0.03, + "gpt-4-0613": 0.03, + "gpt-4-32k": 0.06, + } + + output_cost_map = { + "gpt-3.5-turbo": 0.002, + "gpt-3.5-turbo-16k": 0.004, + "gpt-3.5-turbo-0613": 0.002, + "gpt-3.5-turbo-16k-0613": 0.004, + "gpt-4": 0.06, + "gpt-4-0613": 0.06, + "gpt-4-32k": 0.12, + } + + if model_type not in input_cost_map or model_type not in output_cost_map: + return -1 + + return num_prompt_tokens * input_cost_map[model_type] / 1000.0 + num_completion_tokens * output_cost_map[model_type] / 1000.0 + + def get_info(dir, log_filepath): print("dir:", dir) + model_type = "" version_updates = -1 num_code_files = -1 num_png_files = -1 @@ -68,6 +96,18 @@ def get_info(dir, log_filepath): code_lines += len([line for line in lines if len(line.strip()) > 0]) # print("code_lines:", code_lines) + lines = open(log_filepath, "r", encoding="utf8").read().split("\n") + sublines = [line for line in lines if "| **model_type** |" in line] + if len(sublines) > 0: + model_type = sublines[0].split("| **model_type** | ModelType.")[-1].split(" | ")[0] + if model_type == "GPT_3_5_TURBO": + model_type = "gpt-3.5-turbo" + elif model_type == "GPT_4": + model_type = "gpt-4" + elif model_type == "GPT_4_32k": + model_type = "gpt-4-32k" + # print("model_type:", model_type) + lines = open(log_filepath, "r", encoding="utf8").read().split("\n") start_lines = [line for line in lines if "**[Start Chat]**" in line] chat_lines = [line for line in lines if "<->" in line] @@ -107,10 +147,8 @@ def get_info(dir, log_filepath): cost = 0.0 if num_png_files != -1: cost += num_png_files * 0.016 - if num_prompt_tokens != -1: - cost += num_prompt_tokens * 0.003 / 1000.0 - if num_completion_tokens != -1: - cost += num_completion_tokens * 0.004 / 1000.0 + if prompt_cost(model_type, num_prompt_tokens, num_completion_tokens) != -1: + cost += prompt_cost(model_type, num_prompt_tokens, num_completion_tokens) # info = f"🕑duration={duration}s 💰cost=${cost} 🔨version_updates={version_updates} 📃num_code_files={num_code_files} 🏞num_png_files={num_png_files} 📚num_doc_files={num_doc_files} 📃code_lines={code_lines} 📋env_lines={env_lines} 📒manual_lines={manual_lines} 🗣num_utterances={num_utterance} 🤔num_self_reflections={num_reflection} ❓num_prompt_tokens={num_prompt_tokens} ❗num_completion_tokens={num_completion_tokens} ⁉️num_total_tokens={num_total_tokens}"