Browse Source

fix: remove the stream option of zhipu and gemini (#9319)

非法操作 6 months ago
parent
commit
da25b91980
26 changed files with 0 additions and 234 deletions
  1. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml
  2. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml
  3. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml
  4. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml
  5. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml
  6. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml
  7. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml
  8. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml
  9. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml
  10. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml
  11. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml
  12. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml
  13. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml
  14. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml
  15. 0 9
      api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml
  16. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml
  17. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml
  18. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml
  19. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml
  20. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml
  21. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml
  22. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml
  23. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml
  24. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml
  25. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml
  26. 0 9
      api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml

@@ -32,15 +32,6 @@ parameter_rules:
     max: 8192
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml

@@ -27,15 +27,6 @@ parameter_rules:
     default: 4096
     min: 1
     max: 4096
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml

@@ -31,15 +31,6 @@ parameter_rules:
     max: 2048
   - name: response_format
     use_template: response_format
-  - name: stream
-    label:
-      zh_Hans: 流式输出
-      en_US: Stream
-    type: boolean
-    help:
-      zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
-      en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
-    default: false
 pricing:
   input: '0.00'
   output: '0.00'

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml

@@ -28,15 +28,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: return_type
     label:
       zh_Hans: 回复类型

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml

@@ -35,15 +35,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml

@@ -32,15 +32,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml

@@ -30,15 +30,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024

+ 0 - 9
api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml

@@ -30,15 +30,6 @@ parameter_rules:
       zh_Hans: do_sample  true 时启用采样策略,do_sample  false 时采样策略 temperature、top_p 将不生效。默认值为 true
       en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true.
     default: true
-  - name: stream
-    label:
-      zh_Hans: 流处理
-      en_US: Event Stream
-    type: boolean
-    help:
-      zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。
-      en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts.
-    default: false
   - name: max_tokens
     use_template: max_tokens
     default: 1024