Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Enhance error handling in Azure document embedder #8941

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 28 additions & 19 deletions haystack/components/embedders/azure_document_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@
from openai.lib.azure import AzureOpenAI
from tqdm import tqdm

from haystack import Document, component, default_from_dict, default_to_dict
from haystack import Document, component, default_from_dict, default_to_dict, logging
from haystack.utils import Secret, deserialize_secrets_inplace

logger = logging.getLogger(__name__)


@component
class AzureOpenAIDocumentEmbedder:
Expand Down Expand Up @@ -208,24 +210,31 @@ def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List
meta: Dict[str, Any] = {"model": "", "usage": {"prompt_tokens": 0, "total_tokens": 0}}
for i in tqdm(range(0, len(texts_to_embed), batch_size), desc="Embedding Texts"):
batch = texts_to_embed[i : i + batch_size]
if self.dimensions is not None:
response = self._client.embeddings.create(
model=self.azure_deployment, dimensions=self.dimensions, input=batch
)
else:
response = self._client.embeddings.create(model=self.azure_deployment, input=batch)

# Append embeddings to the list
all_embeddings.extend(el.embedding for el in response.data)

# Update the meta information only once if it's empty
if not meta["model"]:
meta["model"] = response.model
meta["usage"] = dict(response.usage)
else:
# Update the usage tokens
meta["usage"]["prompt_tokens"] += response.usage.prompt_tokens
meta["usage"]["total_tokens"] += response.usage.total_tokens
try:
if self.dimensions is not None:
response = self._client.embeddings.create(
model=self.azure_deployment, dimensions=self.dimensions, input=batch
)
else:
response = self._client.embeddings.create(model=self.azure_deployment, input=batch)

# Append embeddings to the list
all_embeddings.extend(el.embedding for el in response.data)

# Update the meta information only once if it's empty
if not meta["model"]:
meta["model"] = response.model
meta["usage"] = dict(response.usage)
else:
# Update the usage tokens
meta["usage"]["prompt_tokens"] += response.usage.prompt_tokens
meta["usage"]["total_tokens"] += response.usage.total_tokens

except Exception as e:
# Log the error but continue processing
batch_range = f"{i} - {i + batch_size}"
logger.exception(f"Failed embedding of documents in range: {batch_range} caused by {e}")
continue
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please align this implementation with that of the OpenAIDocumentEmbedder?

I think that it is better for a few reasons:

  • groups args for the embedding creation API call
  • uses the more specific APIError instead of Exception
  • logs the IDs of the Documents fow which the embedding generation failed

Copy link
Author

@mdrazak2001 mdrazak2001 Mar 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the review @anakin87.
For points 1 and 2, I can update the implementation to:

  • Group args for the embedding creation API call
  • Use the more specific APIError instead of Exception

For point 3 (logging document IDs), I notice this would require changing the signature of _prepare_texts_to_embed from:

def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]

to:

def _prepare_texts_to_embed(self, documents: List[Document]) -> Dict[str, str]

Would you be okay with this signature change to align it with OpenAIDocumentEmbedder's implementation? This would help improve error logging by identifying which specific documents failed during embedding.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm totally OK with changing the signature of _prepare_texts_to_embed.
It's an internal method (_something), so changing its signature and behavior is not considered a breaking change.


return all_embeddings, meta

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
enhancements:
- |
Handle Azure OpenAI embedder exceptions gracefully
- Add error handling in _embed_batch inside `AzureOpenAIDocumentEmbedder` to continue processing remaining documents
- Add unit tests for graceful error handling
24 changes: 24 additions & 0 deletions test/components/embedders/test_azure_document_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import os

from openai import APIError

from haystack.utils.auth import Secret
import pytest

from haystack import Document
from haystack.components.embedders import AzureOpenAIDocumentEmbedder
from unittest.mock import Mock, patch


class TestAzureOpenAIDocumentEmbedder:
Expand Down Expand Up @@ -83,6 +87,26 @@ def test_from_dict(self, monkeypatch):
assert component.suffix == ""
assert component.default_headers == {}

def test_embed_batch_handles_exceptions_gracefully(self, caplog):
embedder = AzureOpenAIDocumentEmbedder(
azure_endpoint="https://test.openai.azure.com",
api_key=Secret.from_token("fake-api-key"),
azure_deployment="text-embedding-ada-002",
embedding_separator=" | ",
)

fake_texts_to_embed = ["Cuisine | I love cheese", "ML | A transformer is a deep learning architecture"]

with patch.object(
embedder._client.embeddings,
"create",
side_effect=APIError(message="Mocked error", request=Mock(), body=None),
):
embedder._embed_batch(texts_to_embed=fake_texts_to_embed, batch_size=32)

assert len(caplog.records) == 1
assert "Failed embedding of documents in range: 0 - 32 caused by Mocked error" in caplog.text

@pytest.mark.integration
@pytest.mark.skipif(
not os.environ.get("AZURE_OPENAI_API_KEY", None) and not os.environ.get("AZURE_OPENAI_ENDPOINT", None),
Expand Down