[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-steamship-core--steamship-langchain":3,"tool-steamship-core--steamship-langchain":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",149489,2,"2026-04-10T11:32:46",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":64,"description_zh":65,"ai_summary_zh":65,"readme_en":66,"readme_zh":67,"quickstart_zh":68,"use_case_zh":69,"hero_image_url":70,"owner_login":71,"owner_name":72,"owner_avatar_url":73,"owner_bio":74,"owner_company":75,"owner_location":75,"owner_email":76,"owner_twitter":77,"owner_website":78,"owner_url":79,"languages":80,"stars":85,"forks":86,"last_commit_at":87,"license":88,"difficulty_score":32,"env_os":89,"env_gpu":89,"env_ram":89,"env_deps":90,"category_tags":95,"github_topics":75,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":96,"updated_at":97,"faqs":98,"releases":129},6213,"steamship-core\u002Fsteamship-langchain","steamship-langchain","steamship-langchain 是连接 LangChain 框架与 Steamship 部署平台的桥梁，旨在帮助开发者轻松将语言 AI 应用从原型转化为生产级服务。它解决了开发者在模型落地时面临的诸多难题，如 API 接口搭建、系统横向扩展、状态持久化存储、身份认证授权以及多租户支持等，让用户无需重复造轮子即可拥有企业级的基础设施能力。\n\n这款工具主要面向使用 LangChain 进行开发的 AI 工程师和软件开发者。其核心技术亮点在于提供了一套丰富的适配器，无缝集成了大模型调用缓存、日志回调处理、向量数据库存储以及多种数据加载器（支持 GitHub 仓库、YouTube 视频、Sphinx 文档等来源）。特别值得一提的是，它还内置了基于抽象语法树（AST）的 Python 代码分割器，能更智能地处理代码上下文。通过简单的 pip 安装，开发者即可利用 Steamship 的生态能力，快速构建具备自动扩缩容、完整监控日志及与其他技能（如音频转录）协同工作的全生命周期 AI 应用，极大提升了从实验到上线的效率。","# Steamship Python Client Library For LangChain (🦜️🔗)\n\n[![Steamship](https:\u002F\u002Fraw.githubusercontent.com\u002Fsteamship-core\u002Fpython-client\u002Fmain\u002Fbadge.svg)](https:\u002F\u002Fwww.steamship.com\u002Fbuild\u002Flangchain-apps?utm_source=github&utm_medium=badge&utm_campaign=github_repo&utm_id=github_langchain_repo) [![Twitter](https:\u002F\u002Fimg.shields.io\u002Ftwitter\u002Furl\u002Fhttps\u002Ftwitter.com\u002Fgetsteamship.svg?style=social&label=Follow%20%40GetSteamship)](https:\u002F\u002Ftwitter.com\u002FGetSteamship) [![](https:\u002F\u002Fdcbadge.vercel.app\u002Fapi\u002Fserver\u002F5Vry5ANVwT?compact=true&style=flat)](https:\u002F\u002Fdiscord.gg\u002F5Vry5ANVwT)\n\n[![License: MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT) [![Run Tests](https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Factions\u002Fworkflows\u002Ftest-main.yml\u002Fbadge.svg?branch=main)](https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Factions\u002Fworkflows\u002Ftest-main.yml)\n\n\n[Steamship](https:\u002F\u002Fsteamship.com\u002F) is the fastest way to build, ship, and use full-lifecycle language AI.\n\nThis repository contains [LangChain](https:\u002F\u002Flangchain.readthedocs.io\u002Fen\u002Flatest\u002F) adapters for Steamship, enabling \nLangChain developers to rapidly deploy their apps on Steamship to automatically get:\n\n- Production-ready API endpoint(s)\n- Horizontal scaling across dependencies \u002F backends\n- Persistent storage of app state (including caches)\n- Built-in support for Authn\u002Fz \n- Multi-tenancy support\n- Seamless integration with other Steamship skills (ex: audio transcription) \n- Usage Metrics and Logging\n- And more...\n\nRead more about Steamship and LangChain on our [website](https:\u002F\u002Fwww.steamship.com\u002Fbuild\u002Flangchain-apps?utm_source=github&utm_medium=explainer&utm_campaign=github_repo&utm_id=github_langchain_repo). \n\n## Installing\n\nInstall via pip:\n\n```commandline\npip install steamship-langchain\n```\n\n## Adapters\n\nInitial support is offered for the following (with more to follow soon):\n- LLMs\n  - An adapter is provided for Steamship's OpenAI integration (`steamship_langchain.llms.OpenAI`)\n  - An adapter is provided for *caching* LLM calls, via Steamship's Key-Value store (`SteamshipCache`) \n- Callbacks\n  - A callback that uses Python's `logging` module to record events is provided (`steamship_langchain.callbacks.LoggingCallbackHandler`). This can be used with `ship logs` to access verbose logs when deployed.\n- Document Loaders\n  - An adapter for exporting Steamship Files as LangChain Documents is provided (`steamship_langchain.document_loaders.SteamshipLoader`)\n- Tools\n  - Search:\n    - An adapter is provided for Steamship's SERPAPI integration (`SteamshipSERP`)\n- Memory\n  - Chat History (`steamship_langchain.memory.ChatMessageHistory`)\n- VectorStores\n  - An adapter is provided for a persistent VectorStore (`steamship_langchain.vectorstores.SteamshipVectorStore`)\n- Text Splitters\n  - A splitter for Python code, based on the AST, is provided (`steamship_langchain.python_splitter.PythonCodeSplitter`). This provides additional context for code snippets (parent classes) while breaking the code into segments around function definitions.\n- Miscellaneous Utilities\n  - Importing data into Steamship\n    - In order to take advantage of Steamship's persistent storage, an initial set of loader utilities are provided for a variety of sources, including:\n      - Text files: `steamship_langchain.file_loaders.TextFileLoader`\n      - Directories: `steamship_langchain.file_loaders.DirectoryLoader`\n      - GitHub repositories: `steamship_langchain.file_loaders.GitHubRepositoryLoader`\n      - Sphinx documentation sites: `steamship_langchain.file_loaders.SphinxSiteLoader` (and others)\n      - YouTube videos: `steamship_langchain.file_loaders.YouTubeFileLoader`\n      - Various text and image formats: `steamship_langchain.file_loaders.UnstructuredFileLoader`\n\n## 📖 Documentation\nPlease see our [here](https:\u002F\u002Fsteamship-langchain.readthedocs.org\u002F) for full documentation on:\n\n- Getting started (installation, setting up the environment, simple examples)\n- How-To examples (demos, integrations, helper functions)\n\n## Example Use Cases\n\nHere are a few examples of using LangChain on Steamship:\n- [Basic Prompting](#basic-prompting)\n- [Self Ask With Search](#self-ask-with-search)\n- [ChatBot](#chatbot)\n- [Summarize Audio](#summarize-audio--async-chaining-)\n- [Question Answering With Sources](#question-answering-with-sources--embeddings-)\n\nThe examples use temporary workspaces to provide full cleanup during experimentation.\n[Workspaces](https:\u002F\u002Fdocs.steamship.com\u002Fworkspaces\u002Findex.html) provide a unit of tenant isolation within Steamship.\nFor production uses, persistent workspaces can be created and retrieved via `Steamship(workspace_handle=\"my_workspace\")` .\n\n> **NOTE**\n> These examples omit `import` blocks. Please consult the `examples\u002F` directory for complete source code. \n\n> **NOTE** \n> Client examples assume that the user has a Steamship API key and that it is exposed to the environment (see: [API Keys](#api-keys))\n\n### Basic Prompting\n\nExample of a basic prompt using a Steamship LLM integration (full source: [examples\u002Fgreeting](.\u002Fexamples\u002Fgreeting))\n\n[![Run on Repl.it](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSimple-LangChain-Prompting-on-Steamship)\n\n#### Server Snippet\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"greet\")\ndef greet(self, user: str) -> str:\n    prompt = PromptTemplate(\n      input_variables=[\"user\"],\n      template=\n      \"Create a welcome message for user {user}. Thank them for running their LangChain app on Steamship. \"\n      \"Encourage them to deploy their app via `ship it` when ready.\",\n    )\n    llm = OpenAI(client=self.client, temperature=0.8)\n    return llm(prompt.format(user=user))\n```\n\n#### Client Snippet\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    while True:\n        name = input(\"Name: \")\n        print(f'{api.invoke(\"\u002Fgreet\", user=name).strip()}\\n')\n```\n\n### Self Ask With Search\n\nExecutes the LangChain `self-ask-with-search` agent using the Steamship GPT and SERP Tool plugins (full source: [examples\u002Fself-ask-with-search](.\u002Fexamples\u002Fself-ask-with-search))\n\n[![Run on Repl.it](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSelf-Ask-With-Search-with-LangChain-and-Steamship)\n\n#### Server Snippet\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"\u002Fself_ask_with_search\")\ndef self_ask_with_search(self, query: str) -> str:\n    llm = OpenAI(client=self.client, temperature=0.0, cache=True)\n    serp_tool = SteamshipSERP(client=self.client, cache=True)\n    tools = [Tool(name=\"Intermediate Answer\", func=serp_tool.search)]\n    self_ask_with_search = initialize_agent(tools, llm, agent=\"self-ask-with-search\", verbose=False)\n    return self_ask_with_search.run(query)\n```\n\n#### Client Snippet\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    query = \"Who was president the last time the Twins won the World Series?\"\n    print(f\"Query: {query}\")\n    print(f\"Answer: {api.invoke('\u002Fself_ask_with_search', query=query)}\")\n```\n\n### ChatBot\n\nImplements a basic Chatbot (similar to ChatGPT) in Steamship with LangChain (full source: [examples\u002Fchatbot](.\u002Fexamples\u002Fchatbot)).\n\n[![Run on Repl.it](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FPersistent-ChatBot-Using-Steamship-and-GPT4)\n\n> **NOTE**\n> The full ChatBot transcript will persist for the lifetime of the Steamship Workspace. \n\n#### Server Snippet\n\n```python\nfrom langchain.memory import ConversationBufferWindowMemory\n\nfrom steamship_langchain.llms import OpenAIChat\nfrom steamship_langchain.memory import ChatMessageHistory\n\n@post(\"\u002Fsend_message\")\ndef send_message(self, message: str, chat_history_handle: str) -> str:\n  chat_memory = ChatMessageHistory(client=self.client, key=chat_history_handle)\n  mem = ConversationBufferWindowMemory(chat_memory=chat_memory, k=2)\n  chatgpt = LLMChain(\n    llm=OpenAIChat(client=self.client, temperature=0),\n    prompt=CHATBOT_PROMPT,\n    memory=mem,\n  )\n\n  return chatgpt.predict(human_input=message)\n```\n\n#### Client Snippet\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    session_handle = \"foo-user-session-1234\"\n    while True:\n        msg = input(\"You: \")\n        print(f\"AI: {api.invoke('\u002Fsend_message', message=msg, chat_history_handle=session_handle)}\")\n```\n\n### Summarize Audio (Async Chaining)\n\n> [![stability-experimental](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fstability-experimental-orange.svg)](https:\u002F\u002Fgithub.com\u002Fmkenney\u002Fsoftware-guides\u002Fblob\u002Fmaster\u002FSTABILITY-BADGES.md#experimental)\n>\n> Audio transcription support not yet considered fully-production ready on Steamship. We are working hard on\n> productionizing support for audio transcription at scale, but there may be some existing issues that you encounter\n> as you try this out.\n\n\nThis provides an example of using LangChain to process audio transcriptions\nobtained via Steamship's speech-to-text plugins (full source: [examples\u002Fsummarize-audio](.\u002Fexamples\u002Fsummarize-audio))\n\nA brief introduction to the Task system (and Task dependencies, for chaining) is\nprovided in this example. Here, we use `task.wait()` style polling, but time-based\n`task.refresh()` style polling, etc., is also available.\n\n[![Run on Repl.it](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSummarize-Audio-with-LangChain-and-Steamship)\n\n#### Server Snippet\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"summarize_file\")\ndef summarize_file(self, file_handle: str) -> str:\n    file = File.get(self.client, handle=file_handle)\n    text_splitter = CharacterTextSplitter()\n    texts = []\n    for block in file.blocks:\n        texts.extend(text_splitter.split_text(block.text))\n    docs = [Document(page_content=t) for t in texts]\n    llm = OpenAI(client=self.client, cache=True)\n    chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n    return chain.run(docs)\n\n@post(\"summarize_audio_file\")\ndef summarize_audio_file(self, audio_file_handle: str) -> Task[str]:\n    transcriber = self.client.use_plugin(\"whisper-s2t-blockifier\")\n    audio_file = File.get(self.client, handle=audio_file_handle)\n    transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle)\n    return self.invoke_later(\"summarize_file\", wait_on_tasks=[transcribe_task], arguments={\"file_handle\": audio_file.handle})\n```\n\n#### Client Snippet\n```python\n\nchurchill_yt_url = \"https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=MkTw3_PmKtc\"\n\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    yt_importer = client.use_plugin(\"youtube-file-importer\")\n    import_task = File.create_with_plugin(client=client,\n                                         plugin_instance=yt_importer.handle, \n                                         url=churchill_yt_url)\n    import_task.wait()\n    audio_file = import_task.output\n    \n    summarize_task_response = api.invoke(\"\u002Fsummarize_audio_file\", audio_file_handle=audio_file.handle)\n    summarize_task = Task(client=client, **summarize_task_response)\n    summarize_task.wait()\n    \n    if summarize_task.state == TaskState.succeeded:\n      summary = base64.b64decode(summarize_task.output).decode(\"utf-8\")\n      print(f\"Summary: {summary.strip()}\")\n```\n\n### Question Answering with Sources (Embeddings)\n\nProvides a basic example of using Steamship to manage embeddings and power a LangChain agent\nfor question answering with sources (full source: [examples\u002Fqa_with_sources](.\u002Fexamples\u002Fqa_with_sources))\n\n> **NOTE** \n> The embeddings will persist for the lifetime of the Workspace.\n\n[![Run on Repl.it](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FQuestion-Answering-with-Sources-using-LangChain-on-Steamship)\n\n#### Server Snippet\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\ndef __init__(self, **kwargs):\n    super().__init__(**kwargs)\n    langchain.llm_cache = SteamshipCache(self.client)\n    self.llm = OpenAI(client=self.client, temperature=0, cache=True, max_words=250)\n    # create a persistent embedding store  \n    self.index = SteamshipVectorStore(\n        client=self.client, index_name=\"qa-demo\", embedding=\"text-embedding-ada-002\"\n    )\n\n@post(\"index_file\")\ndef index_file(self, file_handle: str) -> bool:\n    text_splitter = CharacterTextSplitter(chunk_size=250, chunk_overlap=0)\n    file = File.get(self.client, handle=file_handle)\n    texts = [text for block in file.blocks for text in text_splitter.split_text(block.text)]\n    metadatas = [{\"source\": f\"{file.handle}-offset-{i * 250}\"} for i, text in enumerate(texts)]\n\n    self.index.add_texts(texts=texts, metadatas=metadatas)\n    return True\n\n@post(\"search_embeddings\")\ndef search_embeddings(self, query: str, k: int) -> List[SearchResult]:\n    \"\"\"Return the `k` closest items in the embedding index.\"\"\"\n    search_results = self.index.search(query, k=k)\n    search_results.wait()\n    items = search_results.output.items\n    return items\n\n\n@post(\"\u002Fqa_with_sources\")\ndef qa_with_sources(self, query: str) -> Dict[str, Any]:\n    chain = VectorDBQAWithSourcesChain.from_chain_type(\n        OpenAI(client=self.client, temperature=0),\n        chain_type=\"map_reduce\",\n        vectorstore=self.index,\n    )\n\n    return chain({\"question\": query}, return_only_outputs=False)\n```\n\n#### Client Snippet\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    \n    # Upload the State of the Union address\n    with open(\"state_of_the_union.txt\") as f:\n        sotu_file = File.create(client, blocks=[Block(text=f.read())])\n\n    # Embed\n    api.invoke(\"\u002Findex_file\", file_handle=sotu_file.handle)\n\n    # Issue Query\n    query = \"What did the president say about Justice Breyer?\"\n    response = api.invoke(\"\u002Fqa_with_sources\", query=query)\n    print(f\"Answer: {response['result'].strip()}\")\n```\n\n## API Keys\n\nSteamship API Keys provide access to our SDK for AI models, including OpenAI, GPT, Cohere, Whisper, and more.\n\nGet your free API key here: https:\u002F\u002Fsteamship.com\u002Faccount\u002Fapi.\n\nOnce you have an API Key, you can :\n* Set the env var `STEAMSHIP_API_KEY` for your client\n* Pass it directly via `Steamship(api_key=)` or `Steamship.tempory_workspace(api_key=)`.\n\nAlternatively, you can run `ship login`, which will guide you through setting up your environment.\n\n## Deploying on Steamship\n\nDeploying LangChain apps on Steamship is simple: `ship it`.\n\nFrom your package directory (where your `api.py` lives), you can issue the `ship it` command to generate a manifest file and push your package to Steamship. You may then use the Steamship SDK to create instances of your package in Workspaces as best fits your needs.\n\nMore on deployment and Workspaces can be found in [our docs](https:\u002F\u002Fdocs.steamship.com\u002F).\n\n## Feedback and Support\n\nHave any feedback on this package? Or on [Steamship](https:\u002F\u002Fsteamship.com) in general?\n\nWe'd love to hear from you. Please reach out to: hello@steamship.com.\n","# LangChain 的 Steamship Python 客户端库 (🦜️🔗)\n\n[![Steamship](https:\u002F\u002Fraw.githubusercontent.com\u002Fsteamship-core\u002Fpython-client\u002Fmain\u002Fbadge.svg)](https:\u002F\u002Fwww.steamship.com\u002Fbuild\u002Flangchain-apps?utm_source=github&utm_medium=badge&utm_campaign=github_repo&utm_id=github_langchain_repo) [![Twitter](https:\u002F\u002Fimg.shields.io\u002Ftwitter\u002Furl\u002Fhttps\u002Ftwitter.com\u002Fgetsteamship.svg?style=social&label=Follow%20%40GetSteamship)](https:\u002F\u002Ftwitter.com\u002FGetSteamship) [![](https:\u002F\u002Fdcbadge.vercel.app\u002Fapi\u002Fserver\u002F5Vry5ANVwT?compact=true&style=flat)](https:\u002F\u002Fdiscord.gg\u002F5Vry5ANVwT)\n\n[![License: MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT) [![运行测试](https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Factions\u002Fworkflows\u002Ftest-main.yml\u002Fbadge.svg?branch=main)](https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Factions\u002Fworkflows\u002Ftest-main.yml)\n\n\n[Steamship](https:\u002F\u002Fsteamship.com\u002F) 是构建、部署和使用全生命周期语言 AI 的最快方式。\n\n本仓库包含用于 Steamship 的 [LangChain](https:\u002F\u002Flangchain.readthedocs.io\u002Fen\u002Flatest\u002F) 适配器，使 LangChain 开发者能够快速在 Steamship 上部署其应用，从而自动获得：\n\n- 生产就绪的 API 端点\n- 跨依赖项\u002F后端的水平扩展\n- 应用状态的持久化存储（包括缓存）\n- 内置的身份验证与授权支持\n- 多租户支持\n- 与其他 Steamship 技能的无缝集成（例如音频转录）\n- 使用情况指标与日志记录\n- 以及更多……\n\n有关 Steamship 和 LangChain 的更多信息，请访问我们的 [网站](https:\u002F\u002Fwww.steamship.com\u002Fbuild\u002Flangchain-apps?utm_source=github&utm_medium=explainer&utm_campaign=github_repo&utm_id=github_langchain_repo)。\n\n## 安装\n\n通过 pip 安装：\n\n```commandline\npip install steamship-langchain\n```\n\n## 适配器\n\n目前提供以下初步支持（更多功能即将推出）：\n- LLM\n  - 提供了针对 Steamship OpenAI 集成的适配器 (`steamship_langchain.llms.OpenAI`)\n  - 提供了通过 Steamship 键值存储实现 LLM 调用缓存的适配器 (`SteamshipCache`)\n- 回调\n  - 提供了一个使用 Python `logging` 模块记录事件的回调 (`steamship_langchain.callbacks.LoggingCallbackHandler`)。此回调可与 `ship logs` 配合使用，在部署时访问详细日志。\n- 文档加载器\n  - 提供了一个将 Steamship 文件导出为 LangChain 文档的适配器 (`steamship_langchain.document_loaders.SteamshipLoader`)\n- 工具\n  - 搜索：\n    - 提供了针对 Steamship SERPAPI 集成的适配器 (`SteamshipSERP`)\n- 记忆\n  - 对话历史 (`steamship_langchain.memory.ChatMessageHistory`)\n- 向量存储\n  - 提供了一个持久化向量存储的适配器 (`steamship_langchain.vectorstores.SteamshipVectorStore`)\n- 文本分割器\n  - 提供了一个基于 AST 的 Python 代码分割器 (`steamship_langchain.python_splitter.PythonCodeSplitter`)。该分割器在以函数定义为边界拆分代码的同时，还能为代码片段提供额外上下文信息（父类）。\n- 其他实用工具\n  - 将数据导入 Steamship\n    - 为了充分利用 Steamship 的持久化存储能力，我们提供了一系列初始加载工具，适用于多种数据源，包括：\n      - 文本文件：`steamship_langchain.file_loaders.TextFileLoader`\n      - 目录：`steamship_langchain.file_loaders.DirectoryLoader`\n      - GitHub 仓库：`steamship_langchain.file_loaders.GitHubRepositoryLoader`\n      - Sphinx 文档站点：`steamship_langchain.file_loaders.SphinxSiteLoader`（以及其他）\n      - YouTube 视频：`steamship_langchain.file_loaders.YouTubeFileLoader`\n      - 各种文本和图像格式：`steamship_langchain.file_loaders.UnstructuredFileLoader`\n\n## 📖 文档\n请参阅我们的 [文档](https:\u002F\u002Fsteamship-langchain.readthedocs.org\u002F)，了解关于以下内容的完整说明：\n\n- 入门指南（安装、环境设置、简单示例）\n- 操作示例（演示、集成、辅助函数）\n\n## 示例用法\n\n以下是几个在 Steamship 上使用 LangChain 的示例：\n- [基础提示](#basic-prompting)\n- [自问自答与搜索](#self-ask-with-search)\n- [聊天机器人](#chatbot)\n- [音频摘要](#summarize-audio--async-chaining-)\n- [带来源的问题回答](#question-answering-with-sources--embeddings-)\n\n这些示例使用临时工作空间，以便在实验过程中进行完全清理。\n[工作空间](https:\u002F\u002Fdocs.steamship.com\u002Fworkspaces\u002Findex.html) 在 Steamship 中提供了租户隔离单元。\n对于生产用途，可以通过 `Steamship(workspace_handle=\"my_workspace\")` 创建并获取持久化工作空间。\n\n> **注意**\n> 这些示例省略了 `import` 语句。完整的源代码请参考 `examples\u002F` 目录。\n\n> **注意**\n> 客户端示例假定用户已拥有 Steamship API 密钥，并且该密钥已暴露于环境变量中（详见：[API 密钥](#api-keys)）。\n\n### 基础提示\n\n使用 Steamship LLM 集成的基本提示示例（完整源码：[examples\u002Fgreeting](.\u002Fexamples\u002Fgreeting)）\n\n[![在 Repl.it 上运行](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSimple-LangChain-Prompting-on-Steamship)\n\n#### 服务器片段\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"greet\")\ndef greet(self, user: str) -> str:\n    prompt = PromptTemplate(\n      input_variables=[\"user\"],\n      template=\n      \"为用户 {user} 创作一条欢迎消息。感谢他们使用 LangChain 在 Steamship 上运行应用。鼓励他们在准备就绪时通过 `ship it` 部署应用。\",\n    )\n    llm = OpenAI(client=self.client, temperature=0.8)\n    return llm(prompt.format(user=user))\n```\n\n#### 客户端片段\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    while True:\n        name = input(\"姓名：\")\n        print(f'{api.invoke(\"\u002Fgreet\", user=name).strip()}\\n')\n```\n\n### 自我提问与搜索\n\n使用 Steamship GPT 和 SERP Tool 插件执行 LangChain 的 `self-ask-with-search` 代理（完整源码：[examples\u002Fself-ask-with-search](.\u002Fexamples\u002Fself-ask-with-search)）\n\n[![在 Repl.it 上运行](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSelf-Ask-With-Search-with-LangChain-and-Steamship)\n\n#### 服务器片段\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"\u002Fself_ask_with_search\")\ndef self_ask_with_search(self, query: str) -> str:\n    llm = OpenAI(client=self.client, temperature=0.0, cache=True)\n    serp_tool = SteamshipSERP(client=self.client, cache=True)\n    tools = [Tool(name=\"Intermediate Answer\", func=serp_tool.search)]\n    self_ask_with_search = initialize_agent(tools, llm, agent=\"self-ask-with-search\", verbose=False)\n    return self_ask_with_search.run(query)\n```\n\n#### 客户端片段\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    query = \"双城队上一次赢得世界大赛时，谁是总统？\"\n    print(f\"查询：{query}\")\n    print(f\"答案：{api.invoke('\u002Fself_ask_with_search', query=query)}\")\n```\n\n### 聊天机器人\n\n在 Steamship 中使用 LangChain 实现一个基础的聊天机器人（类似于 ChatGPT）（完整源码：[examples\u002Fchatbot](.\u002Fexamples\u002Fchatbot)）。\n\n[![在 Repl.it 上运行](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FPersistent-ChatBot-Using-Steamship-and-GPT4)\n\n> **注意**\n> 整个聊天机器人的对话记录将在 Steamship 工作空间的生命周期内持续保存。\n\n#### 服务器片段\n\n```python\nfrom langchain.memory import ConversationBufferWindowMemory\n\nfrom steamship_langchain.llms import OpenAIChat\nfrom steamship_langchain.memory import ChatMessageHistory\n\n@post(\"\u002Fsend_message\")\ndef send_message(self, message: str, chat_history_handle: str) -> str:\n  chat_memory = ChatMessageHistory(client=self.client, key=chat_history_handle)\n  mem = ConversationBufferWindowMemory(chat_memory=chat_memory, k=2)\n  chatgpt = LLMChain(\n    llm=OpenAIChat(client=self.client, temperature=0),\n    prompt=CHATBOT_PROMPT,\n    memory=mem,\n  )\n\n  return chatgpt.predict(human_input=message)\n```\n\n#### 客户端片段\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    session_handle = \"foo-user-session-1234\"\n    while True:\n        msg = input(\"你：\")\n        print(f\"AI：{api.invoke('\u002Fsend_message', message=msg，chat_history_handle=session_handle)}\")\n```\n\n### 异步链式处理音频摘要\n\n> [![stability-experimental](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fstability-experimental-orange.svg)](https:\u002F\u002Fgithub.com\u002Fmkenney\u002Fsoftware-guides\u002Fblob\u002Fmaster\u002FSTABILITY-BADGES.md#experimental)\n>\n> Steamship 目前尚未将音频转录支持视为完全成熟的生产环境。我们正在努力实现大规模音频转录的支持，但在尝试此功能时，您可能会遇到一些现有问题。\n\n\n本示例展示了如何使用 LangChain 处理通过 Steamship 的语音转文本插件获取的音频转录内容（完整源码：[examples\u002Fsummarize-audio](.\u002Fexamples\u002Fsummarize-audio)）。\n\n该示例简要介绍了任务系统（以及用于链式调用的任务依赖关系）。这里我们使用了 `task.wait()` 风格的轮询方式，但也可以使用基于时间的 `task.refresh()` 等其他轮询方式。\n\n[![在 Repl.it 上运行](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FSummarize-Audio-with-LangChain-and-Steamship)\n\n#### 服务器片段\n```python\nfrom steamship_langchain.llms import OpenAI\n\n@post(\"summarize_file\")\ndef summarize_file(self, file_handle: str) -> str:\n    file = File.get(self.client, handle=file_handle)\n    text_splitter = CharacterTextSplitter()\n    texts = []\n    for block in file.blocks:\n        texts.extend(text_splitter.split_text(block.text))\n    docs = [Document(page_content=t) for t in texts]\n    llm = OpenAI(client=self.client, cache=True)\n    chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n    return chain.run(docs)\n\n@post(\"summarize_audio_file\")\ndef summarize_audio_file(self, audio_file_handle: str) -> Task[str]:\n    transcriber = self.client.use_plugin(\"whisper-s2t-blockifier\")\n    audio_file = File.get(self.client, handle=audio_file_handle)\n    transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle)\n    return self.invoke_later(\"summarize_file\", wait_on_tasks=[transcribe_task], arguments={\"file_handle\": audio_file.handle})\n```\n\n#### 客户端片段\n```python\n\nchurchill_yt_url = \"https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=MkTw3_PmKtc\"\n\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    yt_importer = client.use_plugin(\"youtube-file-importer\")\n    import_task = File.create_with_plugin(client=client,\n                                         plugin_instance=yt_importer.handle, \n                                         url=churchill_yt_url)\n    import_task.wait()\n    audio_file = import_task.output\n    \n    summarize_task_response = api.invoke(\"\u002Fsummarize_audio_file\", audio_file_handle=audio_file.handle)\n    summarize_task = Task(client=client, **summarize_task_response)\n    summarize_task.wait()\n    \n    if summarize_task.state == TaskState.succeeded:\n      summary = base64.b64decode(summarize_task.output).decode(\"utf-8\")\n      print(f\"摘要：{summary.strip()}\")\n```\n\n### 带来源的答案问答（嵌入）\n\n提供了一个使用 Steamship 管理嵌入并为 LangChain 代理提供支持的基本示例，用于实现带来源的答案问答功能（完整源码：[examples\u002Fqa_with_sources](.\u002Fexamples\u002Fqa_with_sources)）\n\n> **注意** \n> 嵌入将在工作区的生命周期内持续保存。\n\n[![在 Repl.it 上运行](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_readme_26e23a0dcf49.png)](https:\u002F\u002Freplit.com\u002F@SteamshipDoug\u002FQuestion-Answering-with-Sources-using-LangChain-on-Steamship)\n\n#### 服务器代码片段\n\n```python\nfrom steamship_langchain.llms import OpenAI\n\ndef __init__(self, **kwargs):\n    super().__init__(**kwargs)\n    langchain.llm_cache = SteamshipCache(self.client)\n    self.llm = OpenAI(client=self.client, temperature=0, cache=True, max_words=250)\n    # 创建一个持久化的嵌入存储\n    self.index = SteamshipVectorStore(\n        client=self.client, index_name=\"qa-demo\", embedding=\"text-embedding-ada-002\"\n    )\n\n@post(\"index_file\")\ndef index_file(self, file_handle: str) -> bool:\n    text_splitter = CharacterTextSplitter(chunk_size=250, chunk_overlap=0)\n    file = File.get(self.client, handle=file_handle)\n    texts = [text for block in file.blocks for text in text_splitter.split_text(block.text)]\n    metadatas = [{\"source\": f\"{file.handle}-offset-{i * 250}\"} for i, text in enumerate(texts)]\n\n    self.index.add_texts(texts=texts, metadatas=metadatas)\n    return True\n\n@post(\"search_embeddings\")\ndef search_embeddings(self, query: str, k: int) -> List[SearchResult]:\n    \"\"\"返回嵌入索引中最接近的 `k` 个条目。\"\"\"\n    search_results = self.index.search(query, k=k)\n    search_results.wait()\n    items = search_results.output.items\n    return items\n\n\n@post(\"\u002Fqa_with_sources\")\ndef qa_with_sources(self, query: str) -> Dict[str, Any]:\n    chain = VectorDBQAWithSourcesChain.from_chain_type(\n        OpenAI(client=self.client, temperature=0),\n        chain_type=\"map_reduce\",\n        vectorstore=self.index,\n    )\n\n    return chain({\"question\": query}, return_only_outputs=False)\n```\n\n#### 客户端代码片段\n\n```python\nwith Steamship.temporary_workspace() as client:\n    api = client.use(\"my-langchain-app\")\n    \n    # 上传国情咨文演讲稿\n    with open(\"state_of_the_union.txt\") as f:\n        sotu_file = File.create(client, blocks=[Block(text=f.read())])\n\n    # 进行嵌入\n    api.invoke(\"\u002Findex_file\", file_handle=sotu_file.handle)\n\n    # 发出查询\n    query = \"总统对布雷耶大法官说了什么？\"\n    response = api.invoke(\"\u002Fqa_with_sources\", query=query)\n    print(f\"答案：{response['result'].strip()}\")\n```\n\n## API 密钥\n\nSteamship API 密钥可让您访问我们的 SDK，用于 AI 模型，包括 OpenAI、GPT、Cohere、Whisper 等。\n\n在此处获取您的免费 API 密钥：https:\u002F\u002Fsteamship.com\u002Faccount\u002Fapi。\n\n获得 API 密钥后，您可以：\n* 设置客户端的环境变量 `STEAMSHIP_API_KEY`\n* 直接通过 `Steamship(api_key=)` 或 `Steamship.tempory_workspace(api_key=)` 传递密钥。\n\n或者，您也可以运行 `ship login`，它将指导您完成环境设置。\n\n## 在 Steamship 上部署\n\n在 Steamship 上部署 LangChain 应用非常简单：只需执行 `ship it` 命令即可。\n\n从您的包目录（即 `api.py` 所在的目录）中，您可以运行 `ship it` 命令来生成清单文件，并将您的包推送到 Steamship。随后，您可以使用 Steamship SDK 根据需要在工作区中创建您的包实例。\n\n有关部署和工作区的更多信息，请参阅[我们的文档](https:\u002F\u002Fdocs.steamship.com\u002F)。\n\n## 反馈与支持\n\n您对本包或对 [Steamship](https:\u002F\u002Fsteamship.com) 整体有任何反馈吗？\n\n我们非常期待您的意见。请联系我们：hello@steamship.com。","# Steamship-LangChain 快速上手指南\n\n本指南帮助中国开发者快速将 LangChain 应用部署到 Steamship 平台，以获得生产级 API、自动扩缩容、持久化存储及多租户支持。\n\n## 环境准备\n\n在开始之前，请确保满足以下前置条件：\n\n1.  **Python 环境**：建议使用 Python 3.8 或更高版本。\n2.  **Steamship 账号与 API Key**：\n    *   访问 [Steamship 官网](https:\u002F\u002Fsteamship.com\u002F) 注册账号。\n    *   获取 API Key 并配置到环境变量中：\n        ```bash\n        export STEAMSHIP_API_KEY=your_api_key_here\n        ```\n3.  **网络环境**：由于 Steamship 服务位于海外，建议确保网络通畅以便连接其云端服务。\n\n## 安装步骤\n\n通过 `pip` 安装官方客户端库：\n\n```commandline\npip install steamship-langchain\n```\n\n> **提示**：如果下载速度较慢，可尝试使用国内镜像源加速安装：\n> ```commandline\n> pip install steamship-langchain -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n> ```\n\n## 基本使用\n\n以下示例展示如何创建一个最简单的 LangChain 应用，该应用接收用户名称并生成欢迎语。此示例利用了 Steamship 的临时工作区（Temporary Workspace）进行快速测试。\n\n### 1. 服务端代码 (Server Snippet)\n\n创建一个继承自 `PackageService` 的类，定义一个 POST 接口 `\u002Fgreet`，调用 Steamship 集成的 OpenAI 模型。\n\n```python\nfrom steamship import PackageService, post\nfrom langchain.prompts import PromptTemplate\nfrom steamship_langchain.llms import OpenAI\n\nclass MyLangChainApp(PackageService):\n    @post(\"greet\")\n    def greet(self, user: str) -> str:\n        # 定义提示模板\n        prompt = PromptTemplate(\n          input_variables=[\"user\"],\n          template=\n          \"Create a welcome message for user {user}. Thank them for running their LangChain app on Steamship. \"\n          \"Encourage them to deploy their app via `ship it` when ready.\",\n        )\n        \n        # 初始化 LLM (使用 Steamship 客户端)\n        llm = OpenAI(client=self.client, temperature=0.8)\n        \n        # 执行生成\n        return llm(prompt.format(user=user))\n```\n\n### 2. 客户端调用 (Client Snippet)\n\n在本地脚本中启动临时工作区，部署上述应用并进行交互调用。\n\n```python\nfrom steamship import Steamship\n\n# 启动临时工作区（实验结束后自动清理）\nwith Steamship.temporary_workspace() as client:\n    # 部署并使用应用 (假设上述代码已打包为 my-langchain-app)\n    # 在实际开发中，通常先运行 'ship deploy' 命令\n    api = client.use(\"my-langchain-app\")\n    \n    while True:\n        name = input(\"Name: \")\n        if not name:\n            break\n        # 调用远程接口\n        response = api.invoke(\"\u002Fgreet\", user=name)\n        print(f'{response.strip()}\\n')\n```\n\n### 核心特性说明\n*   **持久化与扩展**：上述代码中的 `OpenAI` 适配器自动支持缓存和后端扩展。\n*   **生产部署**：实验完成后，可使用 `ship deploy` 命令将应用部署为永久运行的生产级 API，无需修改代码即可获得身份验证、日志记录和监控功能。\n*   **更多组件**：除了 LLM，该库还支持 `SteamshipVectorStore` (向量存储)、`SteamshipSERP` (搜索工具) 以及多种文件加载器（如 GitHub 仓库、YouTube 视频等）。","一家初创团队正在开发基于 LangChain 的智能代码助手，需要快速将本地原型转化为支持多用户、可持久化存储的生产级服务。\n\n### 没有 steamship-langchain 时\n- 开发者需手动搭建 API 网关和处理并发请求，部署流程繁琐且容易出错。\n- 对话历史和向量索引存储在本地内存中，服务重启后数据丢失，无法实现多租户隔离。\n- 缺乏统一的日志和监控体系，排查线上问题时难以追踪具体的 LLM 调用链和错误信息。\n- 集成音频转录等额外功能时，需要单独寻找并对接第三方服务，增加了架构复杂度。\n- 代码分割仅按字符数机械切分，导致函数上下文断裂，降低了代码问答的准确性。\n\n### 使用 steamship-langchain 后\n- 直接利用内置适配器一键部署生产级 API 端点，自动获得横向扩展能力，无需操心基础设施。\n- 通过 `SteamshipVectorStore` 和 `ChatMessageHistory` 实现状态持久化与多租户支持，确保数据安全隔离。\n- 集成 `LoggingCallbackHandler` 后可通过 `ship logs` 实时查看详细的运行日志和用量指标，运维透明高效。\n- 无缝调用 Steamship 生态中的音频转录等技能，轻松构建多模态交互流程。\n- 采用基于 AST 的 `PythonCodeSplitter` 智能切割代码，保留父类上下文，显著提升了代码理解效果。\n\nsteamship-langchain 让开发者从繁琐的基础设施建设中解放出来，专注于核心业务逻辑，极速完成从原型到生产环境的跨越。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsteamship-core_steamship-langchain_7e63b48a.png","steamship-core","Steamship","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fsteamship-core_c845f939.png","The fastest way to add language AI to your product.",null,"hello@steamship.com","GetSteamship","https:\u002F\u002Fwww.steamship.com\u002F","https:\u002F\u002Fgithub.com\u002Fsteamship-core",[81],{"name":82,"color":83,"percentage":84},"Python","#3572A5",100,510,91,"2026-04-09T20:36:34","MIT","未说明",{"notes":91,"python":89,"dependencies":92},"该工具是 Steamship 的 LangChain 适配器库，主要通过 pip 安装。运行此客户端库本身对硬件无特殊要求，但实际使用取决于所调用的后端服务（如 OpenAI、SERPAPI 等）以及是否在本地运行额外的模型。示例代码假设用户已拥有 Steamship API Key 并配置在环境变量中。部分功能（如音频转录）在 README 中标记为实验性阶段。",[93,94],"steamship","langchain",[35,14,13],"2026-03-27T02:49:30.150509","2026-04-10T20:46:33.769664",[99,104,109,114,119,124],{"id":100,"question_zh":101,"answer_zh":102,"source_url":103},28123,"如何在 steamship-langchain 中使用 GPT-4 模型？","如果在指定 model_name='gpt-4' 时遇到缺少 openai_api_key 的验证错误，可以尝试以下解决方案：\n1. 短期变通方法：直接使用 steamship_langchain 的 `OpenAIChat` 类。\n2. 长期修复：该问题已在 0.0.19 版本中修复，请升级库版本。\n3. 确保已正确设置环境变量 `OPENAI_API_KEY` 或在代码中作为命名参数传递。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F41",{"id":105,"question_zh":106,"answer_zh":107,"source_url":108},28124,"如何将 steamship-langchain 集成到 SwiftUI 应用中？","可以通过 HTTP 直接调用部署的 Package 来集成。基本步骤如下：\n1. 构建 URL：`https:\u002F\u002F{userHandle}.steamship.run\u002F{workspaceHandle}\u002F{instanceHandle}\u002F{apiVerb}`\n2. 创建 URLRequest 并添加认证头：`request.addValue(\"Bearer {apiKey}\", forHTTPHeaderField: \"Authorization\")`\n3. 使用 URLSession 发起请求。\n具体 Swift 代码示例可参考官方文档中关于通过 HTTP 访问 Package 的部分。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F13",{"id":110,"question_zh":111,"answer_zh":112,"source_url":113},28125,"运行示例代码时出现 'NameError: name 'Steamship' is not defined' 或 OpenAI 客户端验证错误怎么办？","这通常是因为导入语句不完整或未正确传递 client 参数。请确保：\n1. 导入必要的模块：`from steamship import Steamship`, `from steamship_langchain.llms import OpenAI`。\n2. 在实例化 OpenAI 类时，必须传入 `client=self.client` 参数。\n完整示例代码结构应包含 PackageService 类定义，并在 generate 方法中正确初始化 LLM 对象。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F47",{"id":115,"question_zh":116,"answer_zh":117,"source_url":118},28126,"如何处理 CSV、JSON 或 PDF 等非文本文件？","可以使用 Steamship 的 `File` 类来处理非文本文件。建议查阅官方文档中关于数据文件（Data Files）的部分（https:\u002F\u002Fdocs.steamship.com\u002Fdata\u002Ffiles.html），其中包含了处理不同文件格式的具体指南和示例。如果针对特定格式（如 CSV）有进一步需求，可参考文档中的相关说明。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F46",{"id":120,"question_zh":121,"answer_zh":122,"source_url":123},28127,"遇到 'AttributeError: 'NoneType' object has no attribute 'file'' 错误如何解决？","该错误通常发生在任务（task）执行失败导致输出为 None 时。建议尝试重新运行应用。如果问题持续存在，可能是由于底层任务执行异常，维护者已提交 PR 以帮助排查此类问题。请检查日志确认任务是否成功完成，并确保 API Key 等配置正确无误。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F45",{"id":125,"question_zh":126,"answer_zh":127,"source_url":128},28128,"在 macOS 上 pip 安装 steamship-langchain 失败，提示 libxml2 相关错误怎么办？","在 macOS 上安装时如果遇到 'Could not find function xmlCheckVersion in library libxml2' 错误，通常是因为缺少开发工具或库。请尝试运行命令 `xcode-select --install` 来安装 Xcode 命令行工具，这将包含所需的 libxml2 库。安装完成后重试 pip install 命令。","https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fissues\u002F21",[130,135,140,145,150,155,160,165,170,175,180,185,190,195,199,204,209],{"id":131,"version":132,"summary_zh":133,"released_at":134},189045,"0.0.26","## 变更内容\n* 由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F60 中修复了 OpenAIChat 中模型名称的传递问题\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.24...0.0.26","2023-09-12T20:16:58",{"id":136,"version":137,"summary_zh":138,"released_at":139},189046,"0.0.23","## 变更内容\n* 清理（文档）：展示 OpenAIChat 的用法，并由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F49 中澄清快速入门指南。\n* 更好地处理生成任务失败的情况，由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F50 中实现。\n\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.22...0.0.23","2023-06-05T16:04:07",{"id":141,"version":142,"summary_zh":143,"released_at":144},189047,"0.0.22","## 变更内容\n* 由 @EniasCailliau 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F48 中升级 LC 和 Steamship 的版本\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.21...0.0.22","2023-05-31T15:12:30",{"id":146,"version":147,"summary_zh":148,"released_at":149},189048,"0.0.21","## 变更内容\n* 依赖：由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F44 中更新至最新的 LangChain\n\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.20...0.0.21","2023-05-12T19:04:59",{"id":151,"version":152,"summary_zh":153,"released_at":154},189049,"0.0.20","## 变更内容\n* 更宽松的 requirements.txt，由 @EniasCailliau 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F43 中提出\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.19...0.0.20","2023-05-08T15:25:05",{"id":156,"version":157,"summary_zh":158,"released_at":159},189050,"0.0.19","## 变更内容\n* 修复：@douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F38 中修复了聊天消息历史对 AI 消息的处理问题。\n* 依赖更新：@douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F39 中将 Steamship 更新至最新版本。\n* 修复：@douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F40 中确保消息历史始终使用有效的文件句柄。\n* 修复：@douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F42 中使用聊天模型初始化 OpenAI。\n\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.18...0.0.19","2023-04-28T21:02:02",{"id":161,"version":162,"summary_zh":163,"released_at":164},189051,"0.0.18","## 变更内容\n* 添加了带分数的相似度搜索，由 @EniasCailliau 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F37 中实现。\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.17...0.0.18","2023-04-17T18:15:50",{"id":166,"version":167,"summary_zh":168,"released_at":169},189052,"0.0.17","## 变更内容\n* @EniasCailliau 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F36 中修复了 chat_memory 的热修复问题\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.16...0.0.17","2023-04-14T14:59:02",{"id":171,"version":172,"summary_zh":173,"released_at":174},189053,"0.0.16","新增对 `ChatModel` 的支持，并更新至最新的 LangChain 版本。\n\n## 变更内容\n* 更新：@douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F34 中将依赖固定为最新版本的 LangChain。\n* 新增 chat_model.openai：@EniasCailliau 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F35 中完成。\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.15...0.0.16","2023-04-12T20:26:53",{"id":176,"version":177,"summary_zh":178,"released_at":179},189054,"0.0.15","## 变更内容\n* 修复 @eltociear 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F29 中对 getting_started.ipynb 的拼写错误\n* 修复：上游重构导致示例代码失效的问题，由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F31 中完成\n* 新功能：添加支持 GPT-4 的 OpenAIChat 实现，由 @douglas-reid 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F30 中完成\n\n## 新贡献者\n* @eltociear 在 https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F29 中完成了首次贡献\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.14...0.0.15","2023-03-20T17:52:44",{"id":181,"version":182,"summary_zh":183,"released_at":184},189055,"0.0.14","# ⚠️ Breaking Changes ⚠️ \r\n* Removed `memory.ConversationBufferWindowMemory` and `memory.ConversationBufferMemory`. They have been replaced with `memory.ChatMessageHistory`.\r\n\r\nIn order to keep up with the changes in upstream LangChain, we converted our Memory classes to a single `ChatMessageHistory` that may be used with the upstream conversational memory classes. This is a bit unfortunate, but should allow us to continue to stay in-step with the latest LangChain developments.\r\n\r\nWe apologize for the impact.\r\n\r\n## What's Changed\r\n* loaders: support allowed failures in bulk operations by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F23\r\n* feat: add support for direct File import to VectorStore by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F25\r\n* feat: add file loader for sphinx-based sites by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F26\r\n* deps: update to match latest LC memory refactor by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F28\r\n* feat: allow sphinx loader to sanitize and ignore by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F27\r\n\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.13...0.0.14","2023-03-09T01:22:51",{"id":186,"version":187,"summary_zh":188,"released_at":189},189056,"0.0.13","## What's Changed\r\n* Hotfix steamship vector store by @EniasCailliau in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F22\r\n\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.12...0.0.13","2023-02-27T18:27:37",{"id":191,"version":192,"summary_zh":193,"released_at":194},189057,"0.0.12","## What's Changed\r\n* feat: add logging callback by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F18\r\n* docs: add information on logging callback by @steamship-developers in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F20\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.11...0.0.12","2023-02-24T23:40:48",{"id":196,"version":197,"summary_zh":75,"released_at":198},189058,"0.0.12-rc.0","2023-02-24T17:08:45",{"id":200,"version":201,"summary_zh":202,"released_at":203},189059,"0.0.11","## What's Changed\r\n* Update embedding model to text-embedding-ada-002 by @EniasCailliau in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F8\r\n* Enable in place replacement of `langchain.OpenAI` with `steamship_langchain.OpenAI` by @EniasCailliau in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F4\r\n* First version of a SteamshipVectorStore by @EniasCailliau in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F5\r\n* feat: add initial set of file_loaders by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F10\r\n* feat: readthedocs support by @douglas-reid in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F14\r\n\r\n## New Contributors\r\n* @EniasCailliau made their first contribution in https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fpull\u002F8\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002Fsteamship-core\u002Fsteamship-langchain\u002Fcompare\u002F0.0.10...0.0.11","2023-02-23T17:16:16",{"id":205,"version":206,"summary_zh":207,"released_at":208},189060,"0.0.10","This release updates the LLM support to provide a full drop-in replacement for the OpenAI LLM in LangChain. This allows users to use the Steamship backend for LLM calls with only a package name change. Thanks @EniasCailliau for the contribution.","2023-02-14T19:23:13",{"id":210,"version":211,"summary_zh":212,"released_at":213},189061,"0.0.9","Includes more forgiving `wait()` time in Task handling around generation. This will work better with the updated retry \u002F backoff behavior in the LLM plugin.","2023-02-08T22:18:02"]