|
@@ -1,8 +1,9 @@
|
|
|
from typing import Optional
|
|
|
|
|
|
-from flask import Config, Flask
|
|
|
+from flask import Flask
|
|
|
from pydantic import BaseModel
|
|
|
|
|
|
+from configs import dify_config
|
|
|
from core.entities.provider_entities import QuotaUnit, RestrictModel
|
|
|
from core.model_runtime.entities.model_entities import ModelType
|
|
|
from models.provider import ProviderQuotaType
|
|
@@ -44,32 +45,30 @@ class HostingConfiguration:
|
|
|
moderation_config: HostedModerationConfig = None
|
|
|
|
|
|
def init_app(self, app: Flask) -> None:
|
|
|
- config = app.config
|
|
|
-
|
|
|
- if config.get("EDITION") != "CLOUD":
|
|
|
+ if dify_config.EDITION != "CLOUD":
|
|
|
return
|
|
|
|
|
|
- self.provider_map["azure_openai"] = self.init_azure_openai(config)
|
|
|
- self.provider_map["openai"] = self.init_openai(config)
|
|
|
- self.provider_map["anthropic"] = self.init_anthropic(config)
|
|
|
- self.provider_map["minimax"] = self.init_minimax(config)
|
|
|
- self.provider_map["spark"] = self.init_spark(config)
|
|
|
- self.provider_map["zhipuai"] = self.init_zhipuai(config)
|
|
|
+ self.provider_map["azure_openai"] = self.init_azure_openai()
|
|
|
+ self.provider_map["openai"] = self.init_openai()
|
|
|
+ self.provider_map["anthropic"] = self.init_anthropic()
|
|
|
+ self.provider_map["minimax"] = self.init_minimax()
|
|
|
+ self.provider_map["spark"] = self.init_spark()
|
|
|
+ self.provider_map["zhipuai"] = self.init_zhipuai()
|
|
|
|
|
|
- self.moderation_config = self.init_moderation_config(config)
|
|
|
+ self.moderation_config = self.init_moderation_config()
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_azure_openai(app_config: Config) -> HostingProvider:
|
|
|
+ def init_azure_openai() -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.TIMES
|
|
|
- if app_config.get("HOSTED_AZURE_OPENAI_ENABLED"):
|
|
|
+ if dify_config.HOSTED_AZURE_OPENAI_ENABLED:
|
|
|
credentials = {
|
|
|
- "openai_api_key": app_config.get("HOSTED_AZURE_OPENAI_API_KEY"),
|
|
|
- "openai_api_base": app_config.get("HOSTED_AZURE_OPENAI_API_BASE"),
|
|
|
+ "openai_api_key": dify_config.HOSTED_AZURE_OPENAI_API_KEY,
|
|
|
+ "openai_api_base": dify_config.HOSTED_AZURE_OPENAI_API_BASE,
|
|
|
"base_model_name": "gpt-35-turbo",
|
|
|
}
|
|
|
|
|
|
quotas = []
|
|
|
- hosted_quota_limit = int(app_config.get("HOSTED_AZURE_OPENAI_QUOTA_LIMIT", "1000"))
|
|
|
+ hosted_quota_limit = dify_config.HOSTED_AZURE_OPENAI_QUOTA_LIMIT
|
|
|
trial_quota = TrialHostingQuota(
|
|
|
quota_limit=hosted_quota_limit,
|
|
|
restrict_models=[
|
|
@@ -122,31 +121,31 @@ class HostingConfiguration:
|
|
|
quota_unit=quota_unit,
|
|
|
)
|
|
|
|
|
|
- def init_openai(self, app_config: Config) -> HostingProvider:
|
|
|
+ def init_openai(self) -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.CREDITS
|
|
|
quotas = []
|
|
|
|
|
|
- if app_config.get("HOSTED_OPENAI_TRIAL_ENABLED"):
|
|
|
- hosted_quota_limit = int(app_config.get("HOSTED_OPENAI_QUOTA_LIMIT", "200"))
|
|
|
- trial_models = self.parse_restrict_models_from_env(app_config, "HOSTED_OPENAI_TRIAL_MODELS")
|
|
|
+ if dify_config.HOSTED_OPENAI_TRIAL_ENABLED:
|
|
|
+ hosted_quota_limit = dify_config.HOSTED_OPENAI_QUOTA_LIMIT
|
|
|
+ trial_models = self.parse_restrict_models_from_env("HOSTED_OPENAI_TRIAL_MODELS")
|
|
|
trial_quota = TrialHostingQuota(quota_limit=hosted_quota_limit, restrict_models=trial_models)
|
|
|
quotas.append(trial_quota)
|
|
|
|
|
|
- if app_config.get("HOSTED_OPENAI_PAID_ENABLED"):
|
|
|
- paid_models = self.parse_restrict_models_from_env(app_config, "HOSTED_OPENAI_PAID_MODELS")
|
|
|
+ if dify_config.HOSTED_OPENAI_PAID_ENABLED:
|
|
|
+ paid_models = self.parse_restrict_models_from_env("HOSTED_OPENAI_PAID_MODELS")
|
|
|
paid_quota = PaidHostingQuota(restrict_models=paid_models)
|
|
|
quotas.append(paid_quota)
|
|
|
|
|
|
if len(quotas) > 0:
|
|
|
credentials = {
|
|
|
- "openai_api_key": app_config.get("HOSTED_OPENAI_API_KEY"),
|
|
|
+ "openai_api_key": dify_config.HOSTED_OPENAI_API_KEY,
|
|
|
}
|
|
|
|
|
|
- if app_config.get("HOSTED_OPENAI_API_BASE"):
|
|
|
- credentials["openai_api_base"] = app_config.get("HOSTED_OPENAI_API_BASE")
|
|
|
+ if dify_config.HOSTED_OPENAI_API_BASE:
|
|
|
+ credentials["openai_api_base"] = dify_config.HOSTED_OPENAI_API_BASE
|
|
|
|
|
|
- if app_config.get("HOSTED_OPENAI_API_ORGANIZATION"):
|
|
|
- credentials["openai_organization"] = app_config.get("HOSTED_OPENAI_API_ORGANIZATION")
|
|
|
+ if dify_config.HOSTED_OPENAI_API_ORGANIZATION:
|
|
|
+ credentials["openai_organization"] = dify_config.HOSTED_OPENAI_API_ORGANIZATION
|
|
|
|
|
|
return HostingProvider(enabled=True, credentials=credentials, quota_unit=quota_unit, quotas=quotas)
|
|
|
|
|
@@ -156,26 +155,26 @@ class HostingConfiguration:
|
|
|
)
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_anthropic(app_config: Config) -> HostingProvider:
|
|
|
+ def init_anthropic() -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.TOKENS
|
|
|
quotas = []
|
|
|
|
|
|
- if app_config.get("HOSTED_ANTHROPIC_TRIAL_ENABLED"):
|
|
|
- hosted_quota_limit = int(app_config.get("HOSTED_ANTHROPIC_QUOTA_LIMIT", "0"))
|
|
|
+ if dify_config.HOSTED_ANTHROPIC_TRIAL_ENABLED:
|
|
|
+ hosted_quota_limit = dify_config.HOSTED_ANTHROPIC_QUOTA_LIMIT
|
|
|
trial_quota = TrialHostingQuota(quota_limit=hosted_quota_limit)
|
|
|
quotas.append(trial_quota)
|
|
|
|
|
|
- if app_config.get("HOSTED_ANTHROPIC_PAID_ENABLED"):
|
|
|
+ if dify_config.HOSTED_ANTHROPIC_PAID_ENABLED:
|
|
|
paid_quota = PaidHostingQuota()
|
|
|
quotas.append(paid_quota)
|
|
|
|
|
|
if len(quotas) > 0:
|
|
|
credentials = {
|
|
|
- "anthropic_api_key": app_config.get("HOSTED_ANTHROPIC_API_KEY"),
|
|
|
+ "anthropic_api_key": dify_config.HOSTED_ANTHROPIC_API_KEY,
|
|
|
}
|
|
|
|
|
|
- if app_config.get("HOSTED_ANTHROPIC_API_BASE"):
|
|
|
- credentials["anthropic_api_url"] = app_config.get("HOSTED_ANTHROPIC_API_BASE")
|
|
|
+ if dify_config.HOSTED_ANTHROPIC_API_BASE:
|
|
|
+ credentials["anthropic_api_url"] = dify_config.HOSTED_ANTHROPIC_API_BASE
|
|
|
|
|
|
return HostingProvider(enabled=True, credentials=credentials, quota_unit=quota_unit, quotas=quotas)
|
|
|
|
|
@@ -185,9 +184,9 @@ class HostingConfiguration:
|
|
|
)
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_minimax(app_config: Config) -> HostingProvider:
|
|
|
+ def init_minimax() -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.TOKENS
|
|
|
- if app_config.get("HOSTED_MINIMAX_ENABLED"):
|
|
|
+ if dify_config.HOSTED_MINIMAX_ENABLED:
|
|
|
quotas = [FreeHostingQuota()]
|
|
|
|
|
|
return HostingProvider(
|
|
@@ -203,9 +202,9 @@ class HostingConfiguration:
|
|
|
)
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_spark(app_config: Config) -> HostingProvider:
|
|
|
+ def init_spark() -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.TOKENS
|
|
|
- if app_config.get("HOSTED_SPARK_ENABLED"):
|
|
|
+ if dify_config.HOSTED_SPARK_ENABLED:
|
|
|
quotas = [FreeHostingQuota()]
|
|
|
|
|
|
return HostingProvider(
|
|
@@ -221,9 +220,9 @@ class HostingConfiguration:
|
|
|
)
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_zhipuai(app_config: Config) -> HostingProvider:
|
|
|
+ def init_zhipuai() -> HostingProvider:
|
|
|
quota_unit = QuotaUnit.TOKENS
|
|
|
- if app_config.get("HOSTED_ZHIPUAI_ENABLED"):
|
|
|
+ if dify_config.HOSTED_ZHIPUAI_ENABLED:
|
|
|
quotas = [FreeHostingQuota()]
|
|
|
|
|
|
return HostingProvider(
|
|
@@ -239,17 +238,15 @@ class HostingConfiguration:
|
|
|
)
|
|
|
|
|
|
@staticmethod
|
|
|
- def init_moderation_config(app_config: Config) -> HostedModerationConfig:
|
|
|
- if app_config.get("HOSTED_MODERATION_ENABLED") and app_config.get("HOSTED_MODERATION_PROVIDERS"):
|
|
|
- return HostedModerationConfig(
|
|
|
- enabled=True, providers=app_config.get("HOSTED_MODERATION_PROVIDERS").split(",")
|
|
|
- )
|
|
|
+ def init_moderation_config() -> HostedModerationConfig:
|
|
|
+ if dify_config.HOSTED_MODERATION_ENABLED and dify_config.HOSTED_MODERATION_PROVIDERS:
|
|
|
+ return HostedModerationConfig(enabled=True, providers=dify_config.HOSTED_MODERATION_PROVIDERS.split(","))
|
|
|
|
|
|
return HostedModerationConfig(enabled=False)
|
|
|
|
|
|
@staticmethod
|
|
|
- def parse_restrict_models_from_env(app_config: Config, env_var: str) -> list[RestrictModel]:
|
|
|
- models_str = app_config.get(env_var)
|
|
|
+ def parse_restrict_models_from_env(env_var: str) -> list[RestrictModel]:
|
|
|
+ models_str = dify_config.model_dump().get(env_var)
|
|
|
models_list = models_str.split(",") if models_str else []
|
|
|
return [
|
|
|
RestrictModel(model=model_name.strip(), model_type=ModelType.LLM)
|