moderation.py 1.3 KB

1234567891011121314151617181920212223242526272829303132
  1. import logging
  2. import openai
  3. from flask import current_app
  4. from core.model_providers.error import LLMBadRequestError
  5. from core.model_providers.providers.base import BaseModelProvider
  6. from models.provider import ProviderType
  7. def check_moderation(model_provider: BaseModelProvider, text: str) -> bool:
  8. if current_app.config['HOSTED_MODERATION_ENABLED'] and current_app.config['HOSTED_MODERATION_PROVIDERS']:
  9. moderation_providers = current_app.config['HOSTED_MODERATION_PROVIDERS'].split(',')
  10. if model_provider.provider.provider_type == ProviderType.SYSTEM.value \
  11. and model_provider.provider_name in moderation_providers:
  12. # 2000 text per chunk
  13. length = 2000
  14. chunks = [text[i:i + length] for i in range(0, len(text), length)]
  15. try:
  16. moderation_result = openai.Moderation.create(input=chunks,
  17. api_key=current_app.config['HOSTED_OPENAI_API_KEY'])
  18. except Exception as ex:
  19. logging.exception(ex)
  20. raise LLMBadRequestError('Rate limit exceeded, please try again later.')
  21. for result in moderation_result.results:
  22. if result['flagged'] is True:
  23. return False
  24. return True