Summary
- μν©λ³ Method νμ©λ²
- Runnable νμ©λ²
LCEL(LangChain Expression Language)μ ν둬ννΈ κ΅¬μ±, λͺ¨λΈ μΈμ€ν΄μ€ μμ±, μΆλ ₯ μμ±μ κ³Όμ μ ==Chain==μΌλ‘ λ¬Άμ΄ λ³΅μ‘ν μν¬νλ‘μ°λ₯Ό μ½κ³ μ§κ΄μ μΌλ‘ ꡬμΆν μ μλλ‘ λλ μΈν°νμ΄μ€μ΄λ€.
νΉμλ¬Έμ(|)λ₯Ό νμ©νμ¬ λ³ΈμΈλ§μ Chainμ ꡬμΆν μ μλ€.
1οΈβ£ Methods
| Sync/Async | Description |
|---|---|
invoke/ainvoke | μ λ ₯μ λν κ²°κ³Όλ₯Ό μΆλ ₯νλ€. |
batch/abatch | λ°λ³΅λλ μ λ ₯μ 리μ€νΈλ‘ μ λ ₯νμ¬ μ²λ¦¬νλ€. |
stream/astream | chunkλ§λ€ μΆλ ₯λκ² νλ€. |
astream_log | μ€κ° λ¨κ³λ₯Ό μ€νΈλ¦¬λ°νλ€. |
λκΈ°(Synchronous)μ λΉλκΈ°(Asynchronous)
| λκΈ°(Synchronous) | λΉλκΈ°(Asynchronous) | |
|---|---|---|
| μ€λͺ | μμ μ μμ°¨μ μΌλ‘ μ§ν | μ¬λ¬ μμ μ λμμ μ§ν |
| μ₯μ | μ½λκ° λ¨μνκ³ μ΄ν΄νκΈ° μ½λ€ λλ²κΉ μ΄ μ½λ€ | ν¨μ¨μ μ΄λ€ λ°μμ±μ΄ μ’λ€ |
| λ¨μ | μμ νμ©μ΄ λΉν¨μ¨μ μ΄λ€ ν μμ μ΄ μλͺ»λλ©΄ μ 체 νλ‘κ·Έλ¨μ΄ λ©μΆ μ μλ€ | λλ²κΉ μ΄ λ³΅μ‘νλ€ |
# κΈ°λ³Έ μ½λ
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = PromptTemplate.from_template("{input}μ λν΄ νκ΅μ΄λ‘ ν μ€λ‘ μ€λͺ
ν΄μ€")
model = ChatOpenAI(model_name = "gpt-3.5-turbo")
output_parser = StrOutputParser()
chain = prompt | model | output_parserπ invoke / ainvoke
import time
import asyncio
def run_sync(input_list):
"""invoke μ€ν ν¨μ"""
start_time = time.time()
for input in input_list:
result = chain.invoke(input)
print(result)
end_time = time.time()
print("="*100)
print(f"Sync execution time: {end_time - start_time:.2f} seconds")
async def run_async(input_list):
"""ainvoke μ€ν ν¨μ"""
start_time = time.time()
tasks = [chain.ainvoke(input) for input in input_list]
results = await asyncio.gather(*tasks)
end_time = time.time()
print(f"Async execution time: {end_time - start_time:.2f} seconds")
print("="*100)
for result in results:
print(result)
run_sync(input_list)
await run_async(input_list)# Sync execution time: 7.13 seconds
# Async execution time: 1.58 seconds
π batch / abatch
batchμ abatchμ μλ μ°¨μ΄κ° ν¬κ² λμ§ μλ κ²μ²λΌ 보μ΄μ§λ§, λ³΄λ€ λ 볡μ‘ν μ½λμμλ μ°¨μ΄κ° λ κ²μ΄λ€.
import time
import asyncio
def run_sync(input_list):
"""batch μ€ν ν¨μ"""
start_time = time.time()
result = chain.batch(input_list)
end_time = time.time()
print(f"Sync execution time: {end_time - start_time:.2f} seconds")
print("="*100)
print("\n".join(result))
async def run_async():
"""abatch μ€ν ν¨μ"""
start_time = time.time()
tasks = chain.abatch(input_list)
result = await tasks
end_time = time.time()
print(f"Async execution time: {end_time - start_time:.2f} seconds")
print("="*100)
print("\n".join(result))
run_sync(input_list)
await run_async(input_list)# Sync execution time: 1.78 seconds
# Async execution time: 1.65 seconds
π stream / astream
generatorλ‘ μΆλ ₯λμ΄ for loopμΌλ‘ print νλ©΄ chunkλ³λ‘ streaming λλ€.
# generatorλ‘ μΆλ ₯λλ κ²μ νμΈ
chain.stream({"input":"νμ΄μ¬"})
# Output
# <generator object RunnableSequence.stream at 0x0000014E37FB6650># stream
for chunk in chain.stream({"input":"νμ΄μ¬"}):
print(chunk, end="", flush=True)
# astream
for chunk in chain.stream({"input":"νμ΄μ¬"}):
print(chunk, end="", flush=True)π stream_log
chain μ€ν κ³Όμ μ λ‘κΉ νλ ν¨μλ‘ λλ²κΉ ν λ μ©μ΄νλ€.
stream = chain.astream_log({"input":"νμ΄μ¬"})
async for chunk in stream:
print(chunk)
print("="*100)μ½λ κ²°κ³Ό
> RunLogPatch({'op': 'replace', 'path': '', 'value': {'final_output': None, 'id': '7e312a61-7190-46b7-8f04-c88ab333f58d', 'logs': {}, 'name': 'RunnableSequence', 'streamed_output': [], 'type': 'chain'}}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/PromptTemplate', 'value': {'end_time': None, 'final_output': None, 'id': 'aec36f3d-a8ea-4b51-8c74-e0ab0d56b3b6', 'metadata': {}, 'name': 'PromptTemplate', 'start_time': '2024-06-27T01:56:14.017+00:00', 'streamed_output': [], 'streamed_output_str': [], 'tags': ['seq:step:1'], 'type': 'prompt'}}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/PromptTemplate/final_output', 'value': StringPromptValue(text='νμ΄μ¬μ λν΄ νκ΅μ΄λ‘ ν μ€λ‘ μ€λͺ ν΄μ€')}, {'op': 'add', 'path': '/logs/PromptTemplate/end_time', 'value': '2024-06-27T01:56:14.023+00:00'}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/ChatOpenAI', 'value': {'end_time': None, 'final_output': None, 'id': 'b0736693-8368-4871-8021-62c6c0f3255f', 'metadata': {}, 'name': 'ChatOpenAI', 'start_time': '2024-06-27T01:56:14.030+00:00', 'streamed_output': [], 'streamed_output_str': [], 'tags': ['seq:step:2'], 'type': 'llm'}}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output_str/-', 'value': ''}, {'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output/-', 'value': AIMessageChunk(content='', id='run-b0736693-8368-4871-8021-62c6c0f3255f')}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/StrOutputParser', 'value': {'end_time': None, 'final_output': None, 'id': '202ac825-a78c-47c7-bf02-bbe3e8e3b798', 'metadata': {}, 'name': 'StrOutputParser', 'start_time': '2024-06-27T01:56:14.866+00:00', 'streamed_output': [], 'streamed_output_str': [], 'tags': ['seq:step:3'], 'type': 'parser'}}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/StrOutputParser/streamed_output/-', 'value': ''}) ================================================== RunLogPatch({'op': 'add', 'path': '/streamed_output/-', 'value': ''}, {'op': 'replace', 'path': '/final_output', 'value': ''}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output_str/-', 'value': 'ν'}, {'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output/-', 'value': AIMessageChunk(content='ν', id='run-b0736693-8368-4871-8021-62c6c0f3255f')}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/StrOutputParser/streamed_output/-', 'value': 'ν'}) ================================================== RunLogPatch({'op': 'add', 'path': '/streamed_output/-', 'value': 'ν'}, {'op': 'replace', 'path': '/final_output', 'value': 'ν'}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output_str/-', 'value': 'μ΄'}, {'op': 'add', 'path': '/logs/ChatOpenAI/streamed_output/-', 'value': AIMessageChunk(content='μ΄', id='run-b0736693-8368-4871-8021-62c6c0f3255f')}) ================================================== RunLogPatch({'op': 'add', 'path': '/logs/StrOutputParser/streamed_output/-', 'value': 'μ΄'}) ================================================== RunLogPatch({'op': 'add', 'path': '/streamed_output/-', 'value': 'μ΄'}, {'op': 'replace', 'path': '/final_output', 'value': 'νμ΄'}) ==================================================
π callbacks
Streamingμ ChatOpenAIμμ callbacks μ΅μ
μΌλ‘λ κ°λ₯νλ€.
BaseCallbackHandlerν΄λμ€κ° μλλ° μ΄ ν΄λμ€λ₯Ό 컀μ€ν
νμ¬ .stream()λ³΄λ€ λ λ€μν μν©μ μ μ©ν μ μλ€.
ν΄λμ€μ κ΄λ ¨λ λ΄μ©μ Langchain 곡μλ¬Έμμμ μ°Έκ³ νμ.
# κΈ°λ³Έ μ½λ
from typing import Any
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.callbacks import BaseCallbackHandler
class CustomHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
print(token, end="", flush=True)
prompt = PromptTemplate.from_template("{input}μ λν΄ νκ΅μ΄λ‘ ν μ€λ‘ μ€λͺ
ν΄μ€")
output_parser = StrOutputParser()λ°©λ² 1
model = ChatOpenAI(
model_name = "gpt-3.5-turbo",
streaming=True,
callbacks=[CustomHandler()]
)
chain = prompt | model | output_parser
response = chain.invoke({"input": "νμ΄μ¬"})λ°©λ² 2
model = ChatOpenAI(
model_name = "gpt-3.5-turbo",
streaming=True
)
chain = prompt | model | output_parser
model.callbacks = [CustomHandler()]
response = chain.invoke({"input": "νμ΄μ¬"})λ°©λ² 3
model = ChatOpenAI(
model_name = "gpt-3.5-turbo",
streaming=True
)
chain = prompt | model | output_parser
response = chain.invoke(
{"input": "νμ΄μ¬"},
{"callbacks": [CustomHandler()]}
)2οΈβ£ Runnable
μ λ ₯κ°λ€μ λν λ³νμ΄ νμν κ²½μ° μ μ°νκ² μ»€μ€ν ν μ μλ λꡬλ€μ΄λ€.
| Function | Description |
|---|---|
RunnablePassthrough() | μ λ ₯λ κ°μ κ·Έλλ‘ μ λ¬νλ€. |
RunnablePassthrough.assign() | μ λ ₯λ κ°μ λ³ννκ±°λ μλ‘μ΄ λ³μλ₯Ό λ§λ λ€. |
RunnableLambda() | μ λ ₯λ κ°μ μ΄μ©ν΄μ ν¨μλ‘ μλ‘μ΄ λ³μλ₯Ό λ§λ λ€. |
RunnableParallel() | λμΌν μ λ ₯μ κ°μ§ chainμ λ³λ ¬λ‘ μ²λ¦¬νλ€. |
# κΈ°λ³Έ μ½λ
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = PromptTemplate.from_template("'{input}'μ μμ΄λ‘ λ²μν΄μ£ΌμΈμ")
model = ChatOpenAI(model_name = "gpt-3.5-turbo")
output_parser = StrOutputParser()π RunnablePassthrough()
μ λ ₯λ κ°μ κ·Έλλ‘ μ λ¬νλ€.
λ°λΌμ μλλ λμ
λλ¦¬λ‘ μ
λ ₯ν΄μΌ νμ§λ§, λ¬Έμμ΄λ‘ μ
λ ₯ λ°μ runnableμμ λμ
λ리λ₯Ό λ§λ ν ν둬ννΈμ μ λ¬ν μ μλ€.
runnable = {"input": RunnablePassthrough()}
chain = runnable | prompt | model | output_parser
chain.invoke("λ€λμ₯")
# Output
# 'Squirrel'π RunnablePassthrough.assign()
μ λ ₯κ°μ λ³ννκ±°λ μλ‘μ΄ μ λ ₯κ°μ λ§λ€ μ μλ€.
add_runnable = RunnablePassthrough.assign(input = lambda x: x["input"] + "λ₯Ό 보μμ΅λλ€")
add_runnable.invoke({"input": "λ€λμ₯"})
# Output
# {'input': 'λ€λμ₯λ₯Ό 보μμ΅λλ€'}π RunnableLambda()
ν¨μλ₯Ό λ§λ€μ΄ μ λ ₯κ°μ λ³νν μ μλ€.
from langchain_core.runnables import RunnableLambda
def add_text(input):
return "μΈμμμ κ°μ₯ μμ " + input
runnable = {"input": RunnableLambda(add_text)}
chain = runnable | prompt | model | output_parser
chain.invoke("λ€λμ₯")
# Output
# 'The smallest squirrel in the world'π RunnableParallel()
μ λ ₯κ°μ΄ λμΌν μ¬λ¬ κ°μ 체μΈμ λ³λ ¬μ μΌλ‘ κ΄λ¦¬ν μ μλ€.
prompt1 = PromptTemplate.from_template("{country}μ μ£Όμ μΈμ΄λ₯Ό μλ €μ€")
prompt2 = PromptTemplate.from_template("{country}μ λνμ μΈ λλλ§ν¬ 3κ°λ₯Ό μλ €μ€")
chain1 = prompt1 | model | output_parser
chain2 = prompt2 | model | output_parser
combined = RunnableParallel(
language = chain1,
landmarks = chain2
)
combined.invoke({"country":"νκ΅"})
# Output
# {'language': 'νκ΅μ μ£Όμ μΈμ΄λ νκ΅μ΄μ
λλ€. νκ΅μ΄λ λλΆλΆμ νκ΅ μ¬λλ€μ΄ μ¬μ©νλ μΈμ΄λ‘, κ΅λ΄μμλ 곡μ μΈμ΄λ‘ μ¬μ©λκ³ μμ΅λλ€. λν, μμ΄λ λ§μ μ¬λλ€μ΄ νμ΅νκ³ μ¬μ©νκ³ μμΌλ©°, μ€κ΅μ΄μ μΌλ³Έμ΄λ μΌλΆ μ§μμμ μ¬μ©λκ³ μμ΅λλ€.',
# Β 'landmarks': '1. λ¨μ°νμ - μμΈμ λνμ μΈ λλλ§ν¬λ‘μ, μμΈ μλ΄μ νκ°μ νλμ λ³Ό μ μλ μ λ§λκ° μ λͺ
νλ€.\n2. κ²½λ³΅κΆ - μμΈμ μμΉν μ‘°μ μλμ κΆκΆλ‘μ, μλ¦λ€μ΄ μ ν΅ νμ₯ 건물과 κ·Όμ μ , κ²½ν루 λ±μ λ³Ό μ μλ€.\n3. λΆμ° νμ - λΆμ°μ λλλ§ν¬λ‘μ, λΆμ° μλ΄μ ν΄μλλ‘λ₯Ό νλμ λ³Ό μ μλ μ λ§λμ μΌκ²½μ΄ μ λͺ
νλ€.'}π itemgetterμ νμ©
itemgetterλ λμ
λ리μμ νΉμ ν€μ valueλ₯Ό μΆμΆνλ κΈ°λ₯μ νλ€.
from operator import itemgetter
prompt = PromptTemplate.from_template("'{input}'μ {language}λ²μν΄μ£ΌμΈμ")
runnable = {
"input" : itemgetter("input") | RunnableLambda(lambda x: x + "λ λ§μμ΄"),
"language": itemgetter("language")
}
chain = runnable | prompt | model | output_parser
chain.invoke({"input": "μ€λ μ§", "language": "μμ΄"})
# Output
# '"Orange is delicious."'References