moderation.py 1.5 KB

12345678910111213141516171819202122232425262728293031323334
  1. import logging
  2. import openai
  3. from core.model_providers.error import LLMBadRequestError
  4. from core.model_providers.providers.base import BaseModelProvider
  5. from core.model_providers.providers.hosted import hosted_config, hosted_model_providers
  6. from models.provider import ProviderType
  7. def check_moderation(model_provider: BaseModelProvider, text: str) -> bool:
  8. if hosted_config.moderation.enabled is True and hosted_model_providers.openai:
  9. if model_provider.provider.provider_type == ProviderType.SYSTEM.value \
  10. and model_provider.provider_name in hosted_config.moderation.providers:
  11. # 2000 text per chunk
  12. length = 2000
  13. text_chunks = [text[i:i + length] for i in range(0, len(text), length)]
  14. max_text_chunks = 32
  15. chunks = [text_chunks[i:i + max_text_chunks] for i in range(0, len(text_chunks), max_text_chunks)]
  16. for text_chunk in chunks:
  17. try:
  18. moderation_result = openai.Moderation.create(input=text_chunk,
  19. api_key=hosted_model_providers.openai.api_key)
  20. except Exception as ex:
  21. logging.exception(ex)
  22. raise LLMBadRequestError('Rate limit exceeded, please try again later.')
  23. for result in moderation_result.results:
  24. if result['flagged'] is True:
  25. return False
  26. return True