From d726473c6d81924b49442155498e3742a42a2d06 Mon Sep 17 00:00:00 2001 From: Joel Date: Tue, 23 Jul 2024 13:31:32 +0800 Subject: [PATCH] Revert "chore: use node specify llm to auto generate prompt" (#6555) --- .../config-prompt/simple-prompt-input.tsx | 10 -- .../config/automatic/get-automatic-res.tsx | 12 +- .../nodes/_base/components/prompt/editor.tsx | 5 +- .../llm/components/config-prompt-item.tsx | 5 +- .../nodes/llm/components/config-prompt.tsx | 6 +- .../llm/components/prompt-generator-btn.tsx | 6 - .../components/workflow/nodes/llm/panel.tsx | 1 - web/service/debug.ts | 1 - web/types/app.ts | 109 +++++++++--------- 9 files changed, 57 insertions(+), 98 deletions(-) diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx index da46d3a609..b0a140fc97 100644 --- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx +++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx @@ -14,7 +14,6 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap' import cn from '@/utils/classnames' import { type PromptVariable } from '@/models/debug' import Tooltip from '@/app/components/base/tooltip' -import type { CompletionParams } from '@/types/app' import { AppType } from '@/types/app' import { getNewVar, getVars } from '@/utils/var' import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn' @@ -59,7 +58,6 @@ const Prompt: FC = ({ const { eventEmitter } = useEventEmitterContextContext() const { modelConfig, - completionParams, dataSets, setModelConfig, setPrevPromptConfig, @@ -249,14 +247,6 @@ const Prompt: FC = ({ {showAutomatic && ( void onFinished: (res: AutomaticRes) => void @@ -59,7 +57,6 @@ const TryLabel: FC<{ const GetAutomaticRes: FC = ({ mode, - model, isShow, onClose, isInLLMNode, @@ -152,17 +149,10 @@ const GetAutomaticRes: FC = ({ return setLoadingTrue() try { - const { error, ...res } = await generateRule({ + const res = await generateRule({ instruction, - model_config: model, }) setRes(res) - if (error) { - Toast.notify({ - type: 'error', - message: error, - }) - } } finally { setLoadingFalse() diff --git a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx index 873dc0f17e..b350ff0f67 100644 --- a/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx +++ b/web/app/components/workflow/nodes/_base/components/prompt/editor.tsx @@ -9,7 +9,6 @@ import { useTranslation } from 'react-i18next' import { useBoolean } from 'ahooks' import { BlockEnum, EditionType } from '../../../../types' import type { - ModelConfig, Node, NodeOutPutVar, Variable, @@ -59,7 +58,6 @@ type Props = { availableNodes?: Node[] isSupportPromptGenerator?: boolean onGenerated?: (prompt: string) => void - modelConfig?: ModelConfig // for jinja isSupportJinja?: boolean editionType?: EditionType @@ -92,7 +90,6 @@ const Editor: FC = ({ varList = [], handleAddVariable, onGenerated, - modelConfig, }) => { const { t } = useTranslation() const { eventEmitter } = useEventEmitterContextContext() @@ -133,7 +130,7 @@ const Editor: FC = ({
{value?.length || 0}
{isSupportPromptGenerator && ( - + )}
diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx index 39715a7c71..237e90ca9a 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx @@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react' import { uniqueId } from 'lodash-es' import { useTranslation } from 'react-i18next' import { RiQuestionLine } from '@remixicon/react' -import type { ModelConfig, PromptItem, Variable } from '../../../types' +import type { PromptItem, Variable } from '../../../types' import { EditionType } from '../../../types' import { useWorkflowStore } from '../../../store' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' @@ -38,7 +38,6 @@ type Props = { availableNodes: any varList: Variable[] handleAddVariable: (payload: any) => void - modelConfig?: ModelConfig } const roleOptions = [ @@ -78,7 +77,6 @@ const ConfigPromptItem: FC = ({ availableNodes, varList, handleAddVariable, - modelConfig, }) => { const { t } = useTranslation() const workflowStore = useWorkflowStore() @@ -140,7 +138,6 @@ const ConfigPromptItem: FC = ({ availableNodes={availableNodes} isSupportPromptGenerator={payload.role === PromptRole.system} onGenerated={handleGenerated} - modelConfig={modelConfig} isSupportJinja editionType={payload.edition_type} onEditionTypeChange={onEditionTypeChange} diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx index 2c6d725c42..8db8425b61 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx @@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next' import produce from 'immer' import { ReactSortable } from 'react-sortablejs' import { v4 as uuid4 } from 'uuid' -import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types' +import type { PromptItem, ValueSelector, Var, Variable } from '../../../types' import { EditionType, PromptRole } from '../../../types' import useAvailableVarList from '../../_base/hooks/use-available-var-list' import { useWorkflowStore } from '../../../store' @@ -33,7 +33,6 @@ type Props = { } varList?: Variable[] handleAddVariable: (payload: any) => void - modelConfig: ModelConfig } const ConfigPrompt: FC = ({ @@ -48,7 +47,6 @@ const ConfigPrompt: FC = ({ hasSetBlockStatus, varList = [], handleAddVariable, - modelConfig, }) => { const { t } = useTranslation() const workflowStore = useWorkflowStore() @@ -201,7 +199,6 @@ const ConfigPrompt: FC = ({ availableNodes={availableNodesWithParent} varList={varList} handleAddVariable={handleAddVariable} - modelConfig={modelConfig} />
) @@ -237,7 +234,6 @@ const ConfigPrompt: FC = ({ onEditionTypeChange={handleCompletionEditionTypeChange} handleAddVariable={handleAddVariable} onGenerated={handleGenerated} - modelConfig={modelConfig} /> )} diff --git a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx index ed8e7df770..63d123402e 100644 --- a/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx +++ b/web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx @@ -7,19 +7,14 @@ import { Generator } from '@/app/components/base/icons/src/vender/other' import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res' import { AppType } from '@/types/app' import type { AutomaticRes } from '@/service/debug' -import type { ModelConfig } from '@/app/components/workflow/types' -import type { Model } from '@/types/app' - type Props = { className?: string onGenerated?: (prompt: string) => void - modelConfig?: ModelConfig } const PromptGeneratorBtn: FC = ({ className, onGenerated, - modelConfig, }) => { const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false) const handleAutomaticRes = useCallback((res: AutomaticRes) => { @@ -37,7 +32,6 @@ const PromptGeneratorBtn: FC = ({ isShow={showAutomatic} onClose={showAutomaticFalse} onFinished={handleAutomaticRes} - model={modelConfig as Model} isInLLMNode /> )} diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index 1c2ec3c985..791dc6133d 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -178,7 +178,6 @@ const Panel: FC> = ({ hasSetBlockStatus={hasSetBlockStatus} varList={inputs.prompt_config?.jinja2_variables || []} handleAddVariable={handleAddVariable} - modelConfig={model} /> )} diff --git a/web/service/debug.ts b/web/service/debug.ts index 8e90fe565f..a373a0dd6a 100644 --- a/web/service/debug.ts +++ b/web/service/debug.ts @@ -7,7 +7,6 @@ export type AutomaticRes = { prompt: string variables: string[] opening_statement: string - error?: string } export const sendChatMessage = async (appId: string, body: Record, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: { diff --git a/web/types/app.ts b/web/types/app.ts index 9432e6d05a..ed73e2f5f7 100644 --- a/web/types/app.ts +++ b/web/types/app.ts @@ -135,64 +135,9 @@ export enum AgentStrategy { react = 'react', } -export type CompletionParams = { - /** Maximum number of tokens in the answer message returned by Completion */ - max_tokens: number - /** - * A number between 0 and 2. - * The larger the number, the more random the result; - * otherwise, the more deterministic. - * When in use, choose either `temperature` or `top_p`. - * Default is 1. - */ - temperature: number - /** - * Represents the proportion of probability mass samples to take, - * e.g., 0.1 means taking the top 10% probability mass samples. - * The determinism between the samples is basically consistent. - * Among these results, the `top_p` probability mass results are taken. - * When in use, choose either `temperature` or `top_p`. - * Default is 1. - */ - top_p: number - /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */ - echo: boolean - /** - * Specify up to 4 to automatically stop generating before the text specified in `stop`. - * Suitable for use in chat mode. - * For example, specify "Q" and "A", - * and provide some Q&A examples as context, - * and the model will give out in Q&A format and stop generating before Q&A. - */ - stop: string[] - /** - * A number between -2.0 and 2.0. - * The larger the value, the less the model will repeat topics and the more it will provide new topics. - */ - presence_penalty: number - /** - * A number between -2.0 and 2.0. - * A lower setting will make the model appear less cultured, - * always repeating expressions. - * The difference between `frequency_penalty` and `presence_penalty` - * is that `frequency_penalty` penalizes a word based on its frequency in the training data, - * while `presence_penalty` penalizes a word based on its occurrence in the input text. - */ - frequency_penalty: number -} /** * Model configuration. The backend type. */ -export type Model = { - /** LLM provider, e.g., OPENAI */ - provider: string - /** Model name, e.g, gpt-3.5.turbo */ - name: string - mode: ModelModeType - /** Default Completion call parameters */ - completion_params: CompletionParams -} - export type ModelConfig = { opening_statement: string suggested_questions?: string[] @@ -229,7 +174,59 @@ export type ModelConfig = { strategy?: AgentStrategy tools: ToolItem[] } - model: Model + model: { + /** LLM provider, e.g., OPENAI */ + provider: string + /** Model name, e.g, gpt-3.5.turbo */ + name: string + mode: ModelModeType + /** Default Completion call parameters */ + completion_params: { + /** Maximum number of tokens in the answer message returned by Completion */ + max_tokens: number + /** + * A number between 0 and 2. + * The larger the number, the more random the result; + * otherwise, the more deterministic. + * When in use, choose either `temperature` or `top_p`. + * Default is 1. + */ + temperature: number + /** + * Represents the proportion of probability mass samples to take, + * e.g., 0.1 means taking the top 10% probability mass samples. + * The determinism between the samples is basically consistent. + * Among these results, the `top_p` probability mass results are taken. + * When in use, choose either `temperature` or `top_p`. + * Default is 1. + */ + top_p: number + /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */ + echo: boolean + /** + * Specify up to 4 to automatically stop generating before the text specified in `stop`. + * Suitable for use in chat mode. + * For example, specify "Q" and "A", + * and provide some Q&A examples as context, + * and the model will give out in Q&A format and stop generating before Q&A. + */ + stop: string[] + /** + * A number between -2.0 and 2.0. + * The larger the value, the less the model will repeat topics and the more it will provide new topics. + */ + presence_penalty: number + /** + * A number between -2.0 and 2.0. + * A lower setting will make the model appear less cultured, + * always repeating expressions. + * The difference between `frequency_penalty` and `presence_penalty` + * is that `frequency_penalty` penalizes a word based on its frequency in the training data, + * while `presence_penalty` penalizes a word based on its occurrence in the input text. + */ + frequency_penalty: number + } + } dataset_configs: DatasetConfigs file_upload?: { image: VisionSettings