1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
| import asyncio from typing import Any,Dict,List from langchain_deepseek import ChatDeepSeek from langchain.schema import LLMResult,HumanMessage from langchain.callbacks import AsyncIteratorCallbackHandler from langchain_core.callbacks import BaseCallbackHandler from dotenv import load_dotenv
load_dotenv()
class CustomSyncHandler(BaseCallbackHandler): def on_llm_new_token(self,token: str,**kwargs) -> None: print(f"同步处理器: token: {token}") def on_llm_start(self,serialized: dict[str, any],prompts: List[str],**kwargs: any) -> None: print("同步处理:LLM正在开始") def on_llm_end(self,response: LLMResult,**kwargs: any) -> None: print("同步处理:LLM正在结束")
class CustomAsyncHandler(BaseCallbackHandler): def on_llm_new_token(self,token: str,**kwargs) -> None: print(f"异步处理器: token: {token}") async def on_llm_start(self,serialized: dict[str, any],prompts: List[str],**kwargs: any) -> None: print("异步处理:LLM正在开始") async def on_llm_end(self,response: LLMResult,**kwargs: any) -> None: print("同步处理:LLM正在结束")
llm_sync = ChatDeepSeek(model="deepseek-chat",callbacks=[CustomSyncHandler()]) llm_async = ChatDeepSeek(model="deepseek-chat",streaming=True,callbacks=[CustomAsyncHandler()])
def main(): print("=== 同步调用示例 ===") result_sync = llm_sync.invoke(input=[HumanMessage(content="给我讲一个笑话")]) print(f"同步结果: {result_sync.content}")
print("\n=== 异步调用示例 ===") async def async_call(): result_async = await llm_async.ainvoke( input=[HumanMessage(content="给我讲一个笑话")] ) print(f"异步结果: {result_async.content}")
asyncio.run(async_call())
if __name__ == "__main__": main()
...
|