cached_embedding.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. import base64
  2. import logging
  3. from typing import Optional, cast
  4. import numpy as np
  5. from sqlalchemy.exc import IntegrityError
  6. from configs import dify_config
  7. from core.entities.embedding_type import EmbeddingInputType
  8. from core.model_manager import ModelInstance
  9. from core.model_runtime.entities.model_entities import ModelPropertyKey
  10. from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
  11. from core.rag.embedding.embedding_base import Embeddings
  12. from extensions.ext_database import db
  13. from extensions.ext_redis import redis_client
  14. from libs import helper
  15. from models.dataset import Embedding
  16. logger = logging.getLogger(__name__)
  17. class CacheEmbedding(Embeddings):
  18. def __init__(self, model_instance: ModelInstance, user: Optional[str] = None) -> None:
  19. self._model_instance = model_instance
  20. self._user = user
  21. def embed_documents(self, texts: list[str]) -> list[list[float]]:
  22. """Embed search docs in batches of 10."""
  23. # use doc embedding cache or store if not exists
  24. text_embeddings = [None for _ in range(len(texts))]
  25. embedding_queue_indices = []
  26. for i, text in enumerate(texts):
  27. hash = helper.generate_text_hash(text)
  28. embedding = (
  29. db.session.query(Embedding)
  30. .filter_by(
  31. model_name=self._model_instance.model, hash=hash, provider_name=self._model_instance.provider
  32. )
  33. .first()
  34. )
  35. if embedding:
  36. text_embeddings[i] = embedding.get_embedding()
  37. else:
  38. embedding_queue_indices.append(i)
  39. if embedding_queue_indices:
  40. embedding_queue_texts = [texts[i] for i in embedding_queue_indices]
  41. embedding_queue_embeddings = []
  42. try:
  43. model_type_instance = cast(TextEmbeddingModel, self._model_instance.model_type_instance)
  44. model_schema = model_type_instance.get_model_schema(
  45. self._model_instance.model, self._model_instance.credentials
  46. )
  47. max_chunks = (
  48. model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS]
  49. if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties
  50. else 1
  51. )
  52. for i in range(0, len(embedding_queue_texts), max_chunks):
  53. batch_texts = embedding_queue_texts[i : i + max_chunks]
  54. embedding_result = self._model_instance.invoke_text_embedding(
  55. texts=batch_texts, user=self._user, input_type=EmbeddingInputType.DOCUMENT
  56. )
  57. for vector in embedding_result.embeddings:
  58. try:
  59. normalized_embedding = (vector / np.linalg.norm(vector)).tolist()
  60. # stackoverflow best way: https://stackoverflow.com/questions/20319813/how-to-check-list-containing-nan
  61. if np.isnan(normalized_embedding).any():
  62. # for issue #11827 float values are not json compliant
  63. logger.warning(f"Normalized embedding is nan: {normalized_embedding}")
  64. continue
  65. embedding_queue_embeddings.append(normalized_embedding)
  66. except IntegrityError:
  67. db.session.rollback()
  68. except Exception as e:
  69. logging.exception("Failed transform embedding")
  70. cache_embeddings = []
  71. try:
  72. for i, embedding in zip(embedding_queue_indices, embedding_queue_embeddings):
  73. text_embeddings[i] = embedding
  74. hash = helper.generate_text_hash(texts[i])
  75. if hash not in cache_embeddings:
  76. embedding_cache = Embedding(
  77. model_name=self._model_instance.model,
  78. hash=hash,
  79. provider_name=self._model_instance.provider,
  80. )
  81. embedding_cache.set_embedding(embedding)
  82. db.session.add(embedding_cache)
  83. cache_embeddings.append(hash)
  84. db.session.commit()
  85. except IntegrityError:
  86. db.session.rollback()
  87. except Exception as ex:
  88. db.session.rollback()
  89. logger.exception("Failed to embed documents: %s")
  90. raise ex
  91. return text_embeddings
  92. def embed_query(self, text: str) -> list[float]:
  93. """Embed query text."""
  94. # use doc embedding cache or store if not exists
  95. hash = helper.generate_text_hash(text)
  96. embedding_cache_key = f"{self._model_instance.provider}_{self._model_instance.model}_{hash}"
  97. embedding = redis_client.get(embedding_cache_key)
  98. if embedding:
  99. redis_client.expire(embedding_cache_key, 600)
  100. decoded_embedding = np.frombuffer(base64.b64decode(embedding), dtype="float")
  101. return [float(x) for x in decoded_embedding]
  102. try:
  103. embedding_result = self._model_instance.invoke_text_embedding(
  104. texts=[text], user=self._user, input_type=EmbeddingInputType.QUERY
  105. )
  106. embedding_results = embedding_result.embeddings[0]
  107. embedding_results = (embedding_results / np.linalg.norm(embedding_results)).tolist()
  108. except Exception as ex:
  109. if dify_config.DEBUG:
  110. logging.exception(f"Failed to embed query text '{text[:10]}...({len(text)} chars)'")
  111. raise ex
  112. try:
  113. # encode embedding to base64
  114. embedding_vector = np.array(embedding_results)
  115. vector_bytes = embedding_vector.tobytes()
  116. # Transform to Base64
  117. encoded_vector = base64.b64encode(vector_bytes)
  118. # Transform to string
  119. encoded_str = encoded_vector.decode("utf-8")
  120. redis_client.setex(embedding_cache_key, 600, encoded_str)
  121. except Exception as ex:
  122. if dify_config.DEBUG:
  123. logging.exception(f"Failed to add embedding to redis for the text '{text[:10]}...({len(text)} chars)'")
  124. raise ex
  125. return embedding_results