{"cells":[{"cell_type":"markdown","id":"f1571abe-8e84-44d1-b222-e4121fdbb4be","metadata":{},"source":["# 高级 RAG 评估\n","\n","本手册介绍了在高级 RAG 上运行 eval(s) 的过程。\n","\n","这对于确定适合您应用程序的最佳 RAG 方法非常有用。"]},{"cell_type":"code","execution_count":null,"id":"0d8415ee-709c-407f-9ac2-f03a9d697aaf","metadata":{},"outputs":[],"source":["# 安装最新版本的 langchain, openai, chromadb, langchain-experimental\n","! pip install -U langchain openai chromadb langchain-experimental"]},{"cell_type":"code","execution_count":null,"id":"191f8465-fd6b-4017-8f0e-d284971b45ae","metadata":{},"outputs":[],"source":["# 由于较新版本中存在持久性错误,将版本锁定为0.10.19\n","! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch\n","\n","# 安装所需的软件包\n","# - \"unstructured[all-docs]==0.10.19\":安装名为unstructured的软件包,版本为0.10.19,并包括所有文档\n","# - pillow:Python图像处理库\n","# - pydantic:用于数据验证和设置的Python库\n","# - lxml:用于处理XML和HTML的Python库\n","# - matplotlib:用于绘制图表和可视化数据的Python库\n","# - tiktoken:用于分词和标记化的Python库\n","# - open_clip_torch:用于训练和使用OpenAI的CLIP模型的Python库\n","# - torch:用于深度学习的Python库"]},{"cell_type":"markdown","id":"45949db5-d9b6-44a9-85f8-96d83a288616","metadata":{},"source":["## 数据加载\n","\n","让我们来看一个[示例白皮书](https://sgp.fas.org/crs/misc/IF10244.pdf),其中包含有关美国森林火灾的表格、文本和图片的混合内容。"]},{"cell_type":"markdown","id":"961a42b9-c16b-472e-b994-3c3f73afbbcb","metadata":{},"source":["### 选项 1:加载文本"]},{"cell_type":"code","execution_count":1,"id":"12f24fc0-c176-4201-982b-8a84b278ff1b","metadata":{},"outputs":[],"source":["# 路径\n","path = \"/Users/rlm/Desktop/cpi/\"\n","\n","# 加载\n","from langchain_community.document_loaders import PyPDFLoader\n","\n","loader = PyPDFLoader(path + \"cpi.pdf\")\n","pdf_pages = loader.load()\n","\n","# 分割\n","from langchain_text_splitters import RecursiveCharacterTextSplitter\n","\n","text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n","all_splits_pypdf = text_splitter.split_documents(pdf_pages)\n","all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]"]},{"cell_type":"markdown","id":"92fc1870-1836-4bc3-945a-78e2c16ad823","metadata":{},"source":["### 选项2:加载文本、表格和图片\n","\n"]},{"cell_type":"code","execution_count":2,"id":"7d863632-f894-4471-b4cc-a1d9aa834d29","metadata":{},"outputs":[],"source":["\n","from unstructured.partition.pdf import partition_pdf # 导入 partition_pdf 函数\n","\n","# 提取图片、表格和分块文本\n","raw_pdf_elements = partition_pdf( # 调用 partition_pdf 函数\n"," filename=path + \"cpi.pdf\", # 传入文件名参数\n"," extract_images_in_pdf=True, # 提取 PDF 中的图片\n"," infer_table_structure=True, # 推断表格结构\n"," chunking_strategy=\"by_title\", # 按标题分块策略\n"," max_characters=4000, # 最大字符数\n"," new_after_n_chars=3800, # 在第 n 个字符后换行\n"," combine_text_under_n_chars=2000, # 在少于 n 个字符的文本下合并\n"," image_output_dir_path=path, # 图片输出目录路径\n",")\n","\n","# 按类型分类\n","tables = [] # 表格列表\n","texts = [] # 文本列表\n","for element in raw_pdf_elements: # 遍历 raw_pdf_elements\n"," if \"unstructured.documents.elements.Table\" in str(type(element)): # 如果是表格类型\n"," tables.append(str(element)) # 将元素转换为字符串并添加到表格列表\n"," elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)): # 如果是复合元素类型\n"," texts.append(str(element)) # 将元素转换为字符串并添加到文本列表\n"]},{"cell_type":"markdown","id":"65f399c5-bd91-4ed4-89c6-c89d2e17466e","metadata":{},"source":["## 存储\n","\n","### 选项 1:嵌入,存储文本块"]},{"cell_type":"code","execution_count":3,"id":"7d7ecdb2-0bb5-46b8-bcff-af8fc272e88e","metadata":{},"outputs":[],"source":["from langchain_community.vectorstores import Chroma\n","from langchain_openai import OpenAIEmbeddings\n","\n","baseline = Chroma.from_texts(\n"," texts=all_splits_pypdf_texts,\n"," collection_name=\"baseline\",\n"," embedding=OpenAIEmbeddings(),\n",")\n","retriever_baseline = baseline.as_retriever()"]},{"cell_type":"markdown","id":"6a0eaefe-5e4b-4853-94c7-5abd6f7fbeac","metadata":{},"source":["### 选项2:多向量检索器\n","\n","#### 文本摘要"]},{"cell_type":"code","execution_count":4,"id":"3d4b4b43-e96e-48ab-899d-c39d0430562e","metadata":{},"outputs":[],"source":["from langchain_core.output_parsers import StrOutputParser\n","from langchain_core.prompts import ChatPromptTemplate\n","from langchain_openai import ChatOpenAI\n","\n","# 提示\n","prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n","These summaries will be embedded and used to retrieve the raw text or table elements. \\\n","Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} \"\"\"\n","prompt = ChatPromptTemplate.from_template(prompt_text)\n","\n","# 文本摘要链\n","model = ChatOpenAI(temperature=0, model=\"gpt-4\")\n","summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n","\n","# 应用于文本\n","text_summaries = summarize_chain.batch(texts, {\"max_concurrency\": 5})\n","\n","# 应用于表格\n","table_summaries = summarize_chain.batch(tables, {\"max_concurrency\": 5})"]},{"cell_type":"markdown","id":"bdb5c903-5b4c-4ddb-8f9a-e20f5155dfb9","metadata":{},"source":["#### 图像摘要"]},{"cell_type":"code","execution_count":9,"id":"4570578c-531b-422c-bedd-cc519d9b7887","metadata":{},"outputs":[],"source":["# 图像摘要链\n","import base64\n","import io\n","import os\n","from io import BytesIO\n","\n","from langchain_core.messages import HumanMessage\n","from PIL import Image\n","\n","# 定义函数,用于将图像编码为base64字符串\n","def encode_image(image_path):\n"," \"\"\"获取base64字符串\"\"\"\n"," with open(image_path, \"rb\") as image_file:\n"," return base64.b64encode(image_file.read()).decode(\"utf-8\")\n","\n","# 定义函数,用于图像摘要\n","def image_summarize(img_base64, prompt):\n"," \"\"\"图像摘要\"\"\"\n"," chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=1024)\n","\n"," msg = chat.invoke(\n"," [\n"," HumanMessage(\n"," content=[\n"," {\"type\": \"text\", \"text\": prompt},\n"," {\n"," \"type\": \"image_url\",\n"," \"image_url\": {\"url\": f\"data:image/jpeg;base64,{img_base64}\"},\n"," },\n"," ]\n"," )\n"," ]\n"," )\n"," return msg.content\n","\n","# 存储base64编码的图像\n","img_base64_list = []\n","\n","# 存储图像摘要\n","image_summaries = []\n","\n","# 提示信息\n","prompt = \"\"\"You are an assistant tasked with summarizing images for retrieval. \\\n","These summaries will be embedded and used to retrieve the raw image. \\\n","Give a concise summary of the image that is well optimized for retrieval.\"\"\"\n","\n","# 对图像应用操作\n","for img_file in sorted(os.listdir(path)):\n"," if img_file.endswith(\".jpg\"):\n"," img_path = os.path.join(path, img_file)\n"," base64_image = encode_image(img_path)\n"," img_base64_list.append(base64_image)\n"," image_summaries.append(image_summarize(base64_image, prompt))"]},{"cell_type":"markdown","id":"87e03f07-4c82-4743-a3c6-d0597fb55107","metadata":{},"source":["### 选项2a:多向量检索器,带原始图像\n","\n","* 将图像返回给LLM进行答案合成"]},{"cell_type":"code","execution_count":11,"id":"6bf8a07d-203f-4397-8b0b-a84ec4d0adab","metadata":{},"outputs":[],"source":["\n","import uuid # 导入uuid模块,用于生成唯一标识符\n","from base64 import b64decode # 从base64模块中导入b64decode函数\n","\n","from langchain.retrievers.multi_vector import MultiVectorRetriever # 从langchain.retrievers.multi_vector模块中导入MultiVectorRetriever类\n","from langchain.storage import InMemoryStore # 从langchain.storage模块中导入InMemoryStore类\n","from langchain_core.documents import Document # 从langchain_core.documents模块中导入Document类\n","\n","\n","def create_multi_vector_retriever(\n"," vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images\n","):\n"," # 初始化存储层\n"," store = InMemoryStore()\n"," id_key = \"doc_id\" # 设置文档ID的键名为\"doc_id\"\n","\n"," # 创建多向量检索器\n"," retriever = MultiVectorRetriever(\n"," vectorstore=vectorstore, # 向量存储\n"," docstore=store, # 文档存储\n"," id_key=id_key, # ID键名\n"," )\n","\n"," # 辅助函数,用于向向量存储和文档存储中添加文档\n"," def add_documents(retriever, doc_summaries, doc_contents):\n"," doc_ids = [str(uuid.uuid4()) for _ in doc_contents] # 生成与文档内容数量相等的唯一标识符列表\n"," summary_docs = [\n"," Document(page_content=s, metadata={id_key: doc_ids[i]}) # 创建包含摘要内容和元数据的文档对象\n"," for i, s in enumerate(doc_summaries)\n"," ]\n"," retriever.vectorstore.add_documents(summary_docs) # 向向量存储中添加摘要文档\n"," retriever.docstore.mset(list(zip(doc_ids, doc_contents))) # 向文档存储中添加文档内容\n","\n"," # 添加文本、表格和图片\n"," # 在添加之前检查文本摘要是否为空\n"," if text_summaries:\n"," add_documents(retriever, text_summaries, texts)\n"," # 在添加之前检查表格摘要是否为空\n"," if table_summaries:\n"," add_documents(retriever, table_summaries, tables)\n"," # 在添加之前检查图片摘要是否为空\n"," if image_summaries:\n"," add_documents(retriever, image_summaries, images)\n","\n"," return retriever # 返回创建的多向量检索器\n","\n","\n","# 用于索引摘要的向量存储\n","multi_vector_img = Chroma(\n"," collection_name=\"multi_vector_img\", embedding_function=OpenAIEmbeddings()\n",")\n","\n","# 创建检索器\n","retriever_multi_vector_img = create_multi_vector_retriever(\n"," multi_vector_img, # 向量存储\n"," text_summaries, # 文本摘要\n"," texts, # 文本内容\n"," table_summaries, # 表格摘要\n"," tables, # 表格内容\n"," image_summaries, # 图片摘要\n"," img_base64_list, # 图片的base64编码列表\n",")\n"]},{"cell_type":"code","execution_count":32,"id":"84d5b4ea-51b8-49cf-8ad1-db8f7a50e3cf","metadata":{},"outputs":[],"source":["# 在检索上进行测试\n","\n","# 设置查询语句\n","query = \"CPI中有多少百分比用于住房,与医疗保健、服装和其他商品和服务的百分比相比如何?\"\n","# 设置图片后缀\n","suffix_for_images = \" 包括任何饼图、图表或表格。\"\n","# 调用检索器的多向量图像检索方法,并传入查询语句和图片后缀\n","docs = retriever_multi_vector_img.invoke(query + suffix_for_images)"]},{"cell_type":"code","execution_count":19,"id":"8db51ac6-ec0c-4c5d-a9a7-0316035e139d","metadata":{},"outputs":[{"data":{"text/html":[""],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from IPython.display import HTML, display\n","\n","def plt_img_base64(img_base64):\n"," # 创建一个包含base64字符串作为源的HTML img标签\n"," image_html = f''\n","\n"," # 通过渲染HTML来显示图像\n"," display(HTML(image_html))\n","\n","plt_img_base64(docs[1])"]},{"cell_type":"markdown","id":"48b268ec-db04-4107-9833-ea1615f6dbd1","metadata":{},"source":["### 选项2b:带有图像摘要的多向量检索器\n","\n","* 将图像的文本摘要返回给LLM以进行答案合成"]},{"cell_type":"code","execution_count":20,"id":"ae57c804-0dd1-4806-b761-a913efc4f173","metadata":{},"outputs":[],"source":["# 使用的向量存储库来索引摘要\n","multi_vector_text = Chroma(\n"," collection_name=\"multi_vector_text\", embedding_function=OpenAIEmbeddings()\n",")\n","\n","# 创建检索器\n","retriever_multi_vector_img_summary = create_multi_vector_retriever(\n"," multi_vector_text,\n"," text_summaries, # 文本摘要\n"," texts, # 文本\n"," table_summaries, # 表格摘要\n"," tables, # 表格\n"," image_summaries, # 图像摘要\n"," image_summaries, # 图像\n",")"]},{"cell_type":"markdown","id":"580a3d55-5025-472d-9c14-cec7a384379f","metadata":{},"source":["### 选项3:多模态嵌入\n","\n"]},{"cell_type":"code","execution_count":22,"id":"8dbed5dc-f7a3-4324-9436-1c3ebc24f9fd","metadata":{},"outputs":[],"source":["from langchain_experimental.open_clip import OpenCLIPEmbeddings # 导入OpenCLIPEmbeddings类\n","\n","# 创建具有多模态嵌入的Chroma对象\n","multimodal_embd = Chroma(\n"," collection_name=\"multimodal_embd\", embedding_function=OpenCLIPEmbeddings() # 使用OpenCLIPEmbeddings函数创建多模态嵌入\n",")\n","\n","# 获取图像URI\n","image_uris = sorted(\n"," [\n"," os.path.join(path, image_name) # 将图像名称与路径连接起来\n"," for image_name in os.listdir(path) # 遍历路径下的所有文件名\n"," if image_name.endswith(\".jpg\") # 选择以.jpg结尾的文件\n"," ]\n",")\n","\n","# 添加图像和文档\n","if image_uris: # 如果存在图像URI\n"," multimodal_embd.add_images(uris=image_uris) # 添加图像\n","if texts: # 如果存在文本\n"," multimodal_embd.add_texts(texts=texts) # 添加文本\n","if tables: # 如果存在表格\n"," multimodal_embd.add_texts(texts=tables) # 添加表格\n","\n","# 创建检索器\n","retriever_multimodal_embd = multimodal_embd.as_retriever() # 将多模态嵌入转换为检索器\n"]},{"cell_type":"markdown","id":"647abb6c-adf3-4d29-acd2-885c4925fa12","metadata":{},"source":["## RAG\n","\n","### 文本流水线"]},{"cell_type":"code","execution_count":23,"id":"73440ca0-4330-4c16-9d9d-6f27c249ae58","metadata":{},"outputs":[],"source":["from operator import itemgetter # 导入itemgetter函数\n","\n","from langchain_core.runnables import RunnablePassthrough # 从langchain_core.runnables模块导入RunnablePassthrough类\n","\n","# Prompt\n","template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n","{context}\n","Question: {question}\n","\"\"\"\n","rag_prompt_text = ChatPromptTemplate.from_template(template) # 从模板创建rag_prompt_text对象\n","\n","# Build\n","def text_rag_chain(retriever):\n"," \"\"\"RAG chain\"\"\"\n"," \n"," # LLM\n"," model = ChatOpenAI(temperature=0, model=\"gpt-4\") # 使用ChatOpenAI类创建model对象,设置temperature为0,模型为\"gpt-4\"\n","\n"," # RAG pipeline\n"," chain = (\n"," {\"context\": retriever, \"question\": RunnablePassthrough()} # 创建包含retriever和RunnablePassthrough()的字典\n"," | rag_prompt_text # 应用rag_prompt_text\n"," | model # 应用model\n"," | StrOutputParser() # 使用StrOutputParser类解析输出\n"," )\n","\n"," return chain # 返回chain"]},{"cell_type":"markdown","id":"14b358ad-42fd-4c6d-b2c0-215dba135707","metadata":{},"source":["### 多模态管道"]},{"cell_type":"code","execution_count":24,"id":"ae89ce84-283e-4634-8169-9ff16f152807","metadata":{},"outputs":[],"source":["\n","import re\n","\n","from langchain_core.documents import Document\n","from langchain_core.runnables import RunnableLambda\n","\n","# 检查字符串是否类似于base64编码\n","def looks_like_base64(sb):\n"," \"\"\"Check if the string looks like base64.\"\"\"\n"," return re.match(\"^[A-Za-z0-9+/]+[=]{0,2}$\", sb) is not None\n","\n","# 检查base64数据是否为图像数据\n","def is_image_data(b64data):\n"," \"\"\"Check if the base64 data is an image by looking at the start of the data.\"\"\"\n"," image_signatures = {\n"," b\"\\xff\\xd8\\xff\": \"jpg\",\n"," b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n"," b\"\\x47\\x49\\x46\\x38\": \"gif\",\n"," b\"\\x52\\x49\\x46\\x46\": \"webp\",\n"," }\n"," try:\n"," header = base64.b64decode(b64data)[:8] # 解码并获取前8个字节\n"," for sig, format in image_signatures.items():\n"," if header.startswith(sig):\n"," return True\n"," return False\n"," except Exception:\n"," return False\n","\n","# 分割base64编码的图像和文本\n","def split_image_text_types(docs):\n"," \"\"\"Split base64-encoded images and texts.\"\"\"\n"," b64_images = []\n"," texts = []\n"," for doc in docs:\n"," # 如果文档是Document类型,则提取page_content\n"," if isinstance(doc, Document):\n"," doc = doc.page_content\n"," if looks_like_base64(doc) and is_image_data(doc):\n"," b64_images.append(doc)\n"," else:\n"," texts.append(doc)\n"," return {\"images\": b64_images, \"texts\": texts}\n","\n","# 图像提示函数\n","def img_prompt_func(data_dict):\n"," # 将上下文文本连接成单个字符串\n"," formatted_texts = \"\\n\".join(data_dict[\"context\"][\"texts\"])\n"," messages = []\n","\n"," # 如果存在图像,则将图像添加到消息中\n"," if data_dict[\"context\"][\"images\"]:\n"," image_message = {\n"," \"type\": \"image_url\",\n"," \"image_url\": {\n"," \"url\": f\"data:image/jpeg;base64,{data_dict['context']['images'][0]}\"\n"," },\n"," }\n"," messages.append(image_message)\n","\n"," # 添加用于分析的文本消息\n"," text_message = {\n"," \"type\": \"text\",\n"," \"text\": (\n"," \"Answer the question based only on the provided context, which can include text, tables, and image(s). \"\n"," \"If an image is provided, analyze it carefully to help answer the question.\\n\"\n"," f\"User-provided question / keywords: {data_dict['question']}\\n\\n\"\n"," \"Text and / or tables:\\n\"\n"," f\"{formatted_texts}\"\n"," ),\n"," }\n"," messages.append(text_message)\n"," return [HumanMessage(content=messages)]\n","\n","# 多模态RAG链\n","def multi_modal_rag_chain(retriever):\n"," \"\"\"Multi-modal RAG chain\"\"\"\n","\n"," # 多模态LLM\n"," model = ChatOpenAI(temperature=0, model=\"gpt-4-vision-preview\", max_tokens=1024)\n","\n"," # RAG管道\n"," chain = (\n"," {\n"," \"context\": retriever | RunnableLambda(split_image_text_types),\n"," \"question\": RunnablePassthrough(),\n"," }\n"," | RunnableLambda(img_prompt_func)\n"," | model\n"," | StrOutputParser()\n"," )\n","\n"," return chain\n"]},{"cell_type":"markdown","id":"5e8b0e26-bb7e-420a-a7bd-8512b7eef92f","metadata":{},"source":["### 构建 RAG 管道\n","\n"]},{"cell_type":"code","execution_count":25,"id":"4f1ec8a9-f0fe-4f08-928f-23504803897c","metadata":{},"outputs":[],"source":["# RAG链\n","# 使用retriever_baseline创建文本RAG链\n","chain_baseline = text_rag_chain(retriever_baseline)\n","# 使用retriever_multi_vector_img_summary创建文本RAG链\n","chain_mv_text = text_rag_chain(retriever_multi_vector_img_summary)\n","\n","# 多模态RAG链\n","# 使用retriever_multi_vector_img创建多模态RAG链\n","chain_multimodal_mv_img = multi_modal_rag_chain(retriever_multi_vector_img)\n","# 使用retriever_multimodal_embd创建多模态RAG链\n","chain_multimodal_embd = multi_modal_rag_chain(retriever_multimodal_embd)"]},{"cell_type":"markdown","id":"448d943c-a1b1-4300-9197-891a03232ee4","metadata":{},"source":["## 评估集"]},{"cell_type":"code","execution_count":34,"id":"9aabf72f-26be-437f-9372-b06dc2509235","metadata":{},"outputs":[{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
QuestionAnswerSource
0What percentage of CPI is dedicated to Housing?Housing occupies 42% of CPI.Figure 1
1Medical Care and Transportation account for wh...Transportation accounts for 18% of CPI. Medica...Figure 1
2Based on the CPI Owners' Equivalent Rent and t...The FHFA Purchase Only Price Index appears to ...Figure 2
\n","
"],"text/plain":[" Question \\\n","0 What percentage of CPI is dedicated to Housing? \n","1 Medical Care and Transportation account for wh... \n","2 Based on the CPI Owners' Equivalent Rent and t... \n","\n"," Answer Source \n","0 Housing occupies 42% of CPI. Figure 1 \n","1 Transportation accounts for 18% of CPI. Medica... Figure 1 \n","2 The FHFA Purchase Only Price Index appears to ... Figure 2 "]},"execution_count":34,"metadata":{},"output_type":"execute_result"}],"source":["# 导入 pandas 库\n","import pandas as pd\n","\n","# 读取 cpi_eval.csv 文件并存储在 eval_set 变量中\n","eval_set = pd.read_csv(path + \"cpi_eval.csv\")\n","\n","# 显示 eval_set 的前三行数据\n","eval_set.head(3)"]},{"cell_type":"code","execution_count":35,"id":"7fdeb77a-e185-47d2-a93f-822f1fc810a2","metadata":{},"outputs":[],"source":["from langsmith import Client\n","\n","# 数据集\n","client = Client() # 创建一个Client对象\n","dataset_name = f\"CPI Eval {str(uuid.uuid4())}\" # 创建一个唯一的数据集名称\n","dataset = client.create_dataset(dataset_name=dataset_name) # 创建一个数据集\n","\n","# 填充数据集\n","for _, row in eval_set.iterrows(): # 遍历eval_set的每一行\n"," # 获取问题和答案\n"," q = row[\"Question\"] # 获取问题\n"," a = row[\"Answer\"] # 获取答案\n"," # 使用这些值创建一个example\n"," client.create_example(\n"," inputs={\"question\": q}, outputs={\"answer\": a}, dataset_id=dataset.id\n"," ) # 在数据集中创建一个example,包括输入问题和输出答案"]},{"cell_type":"code","execution_count":36,"id":"3c4faf4b-f29f-4a42-9cf2-bfbb5158ab59","metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["View the evaluation results for project 'CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126-baseline' at:\n","https://smith.langchain.com/o/1fa8b1f4-fcb9-4072-9aa9-983e35ad61b8/projects/p/533846be-d907-4d9c-82db-ce2f1a18fdbf?eval=true\n","\n","View all tests for Dataset CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126 at:\n","https://smith.langchain.com/datasets/d1762232-5e01-40e7-9978-63002a4c95a3\n","[------------------------------------------------->] 4/4View the evaluation results for project 'CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126-mv_text' at:\n","https://smith.langchain.com/o/1fa8b1f4-fcb9-4072-9aa9-983e35ad61b8/projects/p/f5caeede-6f8e-46f7-b4f2-9f23daa31eda?eval=true\n","\n","View all tests for Dataset CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126 at:\n","https://smith.langchain.com/datasets/d1762232-5e01-40e7-9978-63002a4c95a3\n","[------------------------------------------------->] 4/4View the evaluation results for project 'CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126-mv_img' at:\n","https://smith.langchain.com/o/1fa8b1f4-fcb9-4072-9aa9-983e35ad61b8/projects/p/48cf1002-7ae2-451d-a9b1-5bd8088f6a69?eval=true\n","\n","View all tests for Dataset CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126 at:\n","https://smith.langchain.com/datasets/d1762232-5e01-40e7-9978-63002a4c95a3\n","[------------------------------------------------->] 4/4View the evaluation results for project 'CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126-mm_embd' at:\n","https://smith.langchain.com/o/1fa8b1f4-fcb9-4072-9aa9-983e35ad61b8/projects/p/aaa1c2e3-79b0-43e0-b5d5-8e3d00a51d50?eval=true\n","\n","View all tests for Dataset CPI Eval 9648e7fe-5ae2-469f-8701-33c63212d126 at:\n","https://smith.langchain.com/datasets/d1762232-5e01-40e7-9978-63002a4c95a3\n","[------------------------------------------------->] 4/4"]}],"source":["\n","from langchain.smith import RunEvalConfig # 导入RunEvalConfig类\n","\n","eval_config = RunEvalConfig( # 创建RunEvalConfig对象\n"," evaluators=[\"qa\"], # 设置评估器为\"qa\"\n",")\n","\n","\n","def run_eval(chain, run_name, dataset_name): # 定义名为run_eval的函数,接受chain, run_name, dataset_name三个参数\n"," _ = client.run_on_dataset( # 调用client的run_on_dataset方法\n"," dataset_name=dataset_name, # 设置dataset_name参数为dataset_name\n"," llm_or_chain_factory=lambda: (lambda x: x[\"question\"] + suffix_for_images) # 使用lambda函数创建llm_or_chain_factory\n"," | chain, # 将chain作为参数传递给llm_or_chain_factory\n"," evaluation=eval_config, # 设置evaluation参数为eval_config\n"," project_name=run_name, # 设置project_name参数为run_name\n"," )\n","\n","\n","for chain, run in zip( # 使用zip函数同时迭代chain和[\"baseline\", \"mv_text\", \"mv_img\", \"mm_embd\"]列表\n"," [chain_baseline, chain_mv_text, chain_multimodal_mv_img, chain_multimodal_embd], # 定义包含四个chain对象的列表\n"," [\"baseline\", \"mv_text\", \"mv_img\", \"mm_embd\"], # 定义包含四个字符串的列表\n","):\n"," run_eval(chain, dataset_name + \"-\" + run, dataset_name) # 调用run_eval函数,传递chain, dataset_name + \"-\" + run, dataset_name三个参数\n"]}],"metadata":{"kernelspec":{"display_name":"Python 3 (ipykernel)","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.9.16"}},"nbformat":4,"nbformat_minor":5}