diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml index 3808d670c3..566055e3f7 100644 --- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/_position.yaml @@ -1,4 +1,6 @@ - gpt-4 +- gpt-4o +- gpt-4o-2024-05-13 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-turbo-preview diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml new file mode 100644 index 0000000000..f0d835cba2 --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml @@ -0,0 +1,44 @@ +model: gpt-4o-2024-05-13 +label: + zh_Hans: gpt-4o-2024-05-13 + en_US: gpt-4o-2024-05-13 +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 4096 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '5.00' + output: '15.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml new file mode 100644 index 0000000000..4f141f772f --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml @@ -0,0 +1,44 @@ +model: gpt-4o +label: + zh_Hans: gpt-4o + en_US: gpt-4o +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 4096 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '5.00' + output: '15.00' + unit: '0.000001' + currency: USD diff --git a/api/requirements.txt b/api/requirements.txt index 6d08202527..39cbfaad99 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -9,8 +9,8 @@ flask-restful~=0.3.10 flask-cors~=4.0.0 gunicorn~=22.0.0 gevent~=23.9.1 -openai~=1.26.0 -tiktoken~=0.6.0 +openai~=1.29.0 +tiktoken~=0.7.0 psycopg2-binary~=2.9.6 pycryptodome==3.19.1 python-dotenv==1.0.0