Skip to content

Commit

Permalink
fix(langchain): avoid patching libraries if not available [backport #…
Browse files Browse the repository at this point in the history
…8339 to 2.8] (#8986)

Backport #8339 to 2.8.

This PR changes the langchain integration such that it will check for
partner libraries before attempting to patch them.

The langchain integration patches `langchain_openai.OpenAIEmbeddings.*`
and `langchain_pinecone.PineconeVectorStore.*`, which are partner
libraries that are not required to be installed. Currently if they are
not available, we raise `ModuleNotFoundError`. This PR fixes this so
that we'll skip patching those methods if the corresponding partner
library is not available.

Additionally, this PR also adds importing
`langchain_community.llm/chat_models` as those are not automatically
imported by importing `langchain_community`. This is important as we
reference those submodules in our patch code later on.


## Checklist

- [x] Change(s) are motivated and described in the PR description
- [x] Testing strategy is described if automated tests are not included
in the PR
- [x] Risks are described (performance impact, potential for breakage,
maintainability)
- [x] Change is maintainable (easy to change, telemetry, documentation)
- [x] [Library release note
guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)
are followed or label `changelog/no-changelog` is set
- [x] Documentation is included (in-code, generated user docs, [public
corp docs](https://github.com/DataDog/documentation/))
- [x] Backport labels are set (if
[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))
- [x] If this PR changes the public interface, I've notified
`@DataDog/apm-tees`.

## Reviewer Checklist

- [x] Title is accurate
- [x] All changes are related to the pull request's stated goal
- [x] Description motivates each change
- [x] Avoids breaking
[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)
changes
- [x] Testing strategy adequately addresses listed risks
- [x] Change is maintainable (easy to change, telemetry, documentation)
- [x] Release note makes sense to a user of the library
- [x] Author has acknowledged and discussed the performance implications
of this PR as reported in the benchmarks PR comment
- [x] Backport labels are set in a manner that is consistent with the
[release branch maintenance

policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
  • Loading branch information
Yun-Kim committed Apr 16, 2024
1 parent 7e49c10 commit b4a5e64
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 6 deletions.
14 changes: 10 additions & 4 deletions ddtrace/contrib/langchain/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,9 @@ def patch():
# ref: https://github.com/DataDog/dd-trace-py/issues/7123
if SHOULD_PATCH_LANGCHAIN_COMMUNITY:
from langchain.chains.base import Chain # noqa:F401
from langchain_community import chat_models # noqa:F401
from langchain_community import embeddings # noqa:F401
from langchain_community import llms # noqa:F401
from langchain_community import vectorstores # noqa:F401

wrap("langchain_core", "language_models.llms.BaseLLM.generate", traced_llm_generate(langchain))
Expand All @@ -796,8 +798,10 @@ def patch():
)
wrap("langchain", "chains.base.Chain.invoke", traced_chain_call(langchain))
wrap("langchain", "chains.base.Chain.ainvoke", traced_chain_acall(langchain))
wrap("langchain_openai", "OpenAIEmbeddings.embed_documents", traced_embedding(langchain))
wrap("langchain_pinecone", "PineconeVectorStore.similarity_search", traced_similarity_search(langchain))
if langchain_openai:
wrap("langchain_openai", "OpenAIEmbeddings.embed_documents", traced_embedding(langchain))
if langchain_pinecone:
wrap("langchain_pinecone", "PineconeVectorStore.similarity_search", traced_similarity_search(langchain))
else:
from langchain import embeddings # noqa:F401
from langchain import vectorstores # noqa:F401
Expand Down Expand Up @@ -884,8 +888,10 @@ def unpatch():
unwrap(langchain_core.language_models.chat_models.BaseChatModel, "agenerate")
unwrap(langchain.chains.base.Chain, "invoke")
unwrap(langchain.chains.base.Chain, "ainvoke")
unwrap(langchain_openai.OpenAIEmbeddings, "embed_documents")
unwrap(langchain_pinecone.PineconeVectorStore, "similarity_search")
if langchain_openai:
unwrap(langchain_openai.OpenAIEmbeddings, "embed_documents")
if langchain_pinecone:
unwrap(langchain_pinecone.PineconeVectorStore, "similarity_search")

else:
unwrap(langchain.llms.base.BaseLLM, "generate")
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
fixes:
- |
langchain: This fix resolves an issue where the LangChain integration always attempted to patch LangChain partner
libraries, even if they were not available.
4 changes: 2 additions & 2 deletions tests/contrib/langchain/test_langchain_community.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ def test_openai_math_chain_sync(langchain, langchain_openai, request_vcr):
@pytest.mark.snapshot(token="tests.contrib.langchain.test_langchain_community.test_chain_invoke")
def test_chain_invoke_dict_input(langchain, langchain_openai, request_vcr):
prompt_template = "what is {base} raised to the fifty-fourth power?"
prompt = langchain.prompts.PromptTemplate(input_variables=["adjective"], template=prompt_template)
prompt = langchain.prompts.PromptTemplate(input_variables=["base"], template=prompt_template)
chain = langchain.chains.LLMChain(llm=langchain_openai.OpenAI(temperature=0), prompt=prompt)
with request_vcr.use_cassette("openai_math_chain_sync.yaml"):
chain.invoke(input={"base": "two"})
Expand All @@ -496,7 +496,7 @@ def test_chain_invoke_dict_input(langchain, langchain_openai, request_vcr):
@pytest.mark.snapshot(token="tests.contrib.langchain.test_langchain_community.test_chain_invoke")
def test_chain_invoke_str_input(langchain, langchain_openai, request_vcr):
prompt_template = "what is {base} raised to the fifty-fourth power?"
prompt = langchain.prompts.PromptTemplate(input_variables=["adjective"], template=prompt_template)
prompt = langchain.prompts.PromptTemplate(input_variables=["base"], template=prompt_template)
chain = langchain.chains.LLMChain(llm=langchain_openai.OpenAI(temperature=0), prompt=prompt)
with request_vcr.use_cassette("openai_math_chain_sync.yaml"):
chain.invoke("two")
Expand Down

0 comments on commit b4a5e64

Please sign in to comment.