[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-RageAgainstThePixel--com.openai.unity":3,"tool-RageAgainstThePixel--com.openai.unity":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":80,"owner_website":79,"owner_url":81,"languages":82,"stars":87,"forks":88,"last_commit_at":89,"license":90,"difficulty_score":23,"env_os":91,"env_gpu":92,"env_ram":92,"env_deps":93,"category_tags":104,"github_topics":105,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":126,"updated_at":127,"faqs":128,"releases":159},2916,"RageAgainstThePixel\u002Fcom.openai.unity","com.openai.unity","A Non-Official OpenAI Rest Client for Unity (UPM)","com.openai.unity 是一款专为 Unity 游戏引擎设计的非官方 OpenAI REST API 客户端插件。它旨在帮助开发者在 Unity 项目中轻松集成 OpenAI 的强大能力，如智能对话、文本生成、语音交互及函数调用等功能，无需从零开始编写复杂的网络请求代码。\n\n该工具主要解决了 Unity 开发者在对接 OpenAI 服务时面临的接口适配难、异步处理复杂以及依赖管理繁琐等痛点。通过封装标准的 RESTful 接口，它让游戏或应用能够流畅地调用大模型能力，实现更智能化的 NPC 互动、动态内容生成或实时语音助手。\n\ncom.openai.unity 非常适合 Unity 游戏开发者、技术美术以及希望在交互式应用中融入 AI 功能的软件工程师使用。其独特的技术亮点在于基于成熟的 OpenAI-DotNet 库构建，原生支持 Unity 的异步编程模式，并提供了对 Azure OpenAI、流式响应（Streaming）以及实时会话（Realtime）的深度支持。安装方面，它完美兼容 Unity 包管理器（UPM）和 OpenUPM 注册表，一键即可配置好所有必要的依","com.openai.unity 是一款专为 Unity 游戏引擎设计的非官方 OpenAI REST API 客户端插件。它旨在帮助开发者在 Unity 项目中轻松集成 OpenAI 的强大能力，如智能对话、文本生成、语音交互及函数调用等功能，无需从零开始编写复杂的网络请求代码。\n\n该工具主要解决了 Unity 开发者在对接 OpenAI 服务时面临的接口适配难、异步处理复杂以及依赖管理繁琐等痛点。通过封装标准的 RESTful 接口，它让游戏或应用能够流畅地调用大模型能力，实现更智能化的 NPC 互动、动态内容生成或实时语音助手。\n\ncom.openai.unity 非常适合 Unity 游戏开发者、技术美术以及希望在交互式应用中融入 AI 功能的软件工程师使用。其独特的技术亮点在于基于成熟的 OpenAI-DotNet 库构建，原生支持 Unity 的异步编程模式，并提供了对 Azure OpenAI、流式响应（Streaming）以及实时会话（Realtime）的深度支持。安装方面，它完美兼容 Unity 包管理器（UPM）和 OpenUPM 注册表，一键即可配置好所有必要的依赖项，大幅降低了接入门槛，让创作者能更专注于创意实现而非底层技术细节。","# OpenAI\n\n[![Discord](https:\u002F\u002Fimg.shields.io\u002Fdiscord\u002F855294214065487932.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https:\u002F\u002Fdiscord.gg\u002FxQgMW9ufN4) [![openupm](https:\u002F\u002Fimg.shields.io\u002Fnpm\u002Fv\u002Fcom.openai.unity?label=openupm&registry_uri=https:\u002F\u002Fpackage.openupm.com)](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity\u002F) [![openupm](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fdynamic\u002Fjson?color=brightgreen&label=downloads&query=%24.downloads&suffix=%2Fmonth&url=https%3A%2F%2Fpackage.openupm.com%2Fdownloads%2Fpoint%2Flast-month%2Fcom.openai.unity)](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity\u002F)\n\nBased on [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet)\n\nA [OpenAI](https:\u002F\u002Fopenai.com\u002F) package for the [Unity](https:\u002F\u002Funity.com\u002F) to use though their RESTful API.\nIndependently developed, this is not an official library and I am not affiliated with OpenAI.\nAn OpenAI API account is required.\n\n***All copyrights, trademarks, logos, and assets are the property of their respective owners.***\n\n## Installing\n\nRequires Unity 2021.3 LTS or higher.\n\nThe recommended installation method is though the unity package manager and [OpenUPM](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity).\n\n### Via Unity Package Manager and OpenUPM\n\n#### Terminal\n\n```bash\nopenupm add com.openai.unity\n```\n\n#### Manual\n\n- Open your Unity project settings\n- Add the OpenUPM package registry:\n  - Name: `OpenUPM`\n  - URL: `https:\u002F\u002Fpackage.openupm.com`\n  - Scope(s):\n    - `com.openai`\n    - `com.utilities`\n\n![scoped-registries](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FRageAgainstThePixel_com.openai.unity_readme_3c8844747a35.png)\n\n- Open the Unity Package Manager window\n- Change the Registry from Unity to `My Registries`\n- Add the `OpenAI` package\n\n### Via Unity Package Manager and Git url\n\n> [!WARNING]\n> This repo has dependencies on other repositories! You are responsible for adding these on your own.\n\n- Open your Unity Package Manager\n- Add package from git url: `https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity.git#upm`\n  - [com.utilities.async](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.async)\n  - [com.utilities.websockets](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.websockets)\n  - [com.utilities.extensions](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.extensions)\n  - [com.utilities.rest](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.rest)\n  - [com.utilities.audio](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.audio)\n  - [com.utilities.encoder.wav](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.encoder.wav)\n\n---\n\n## [Documentation](https:\u002F\u002Frageagainstthepixel.github.io\u002FOpenAI-DotNet)\n\n> Check out our new api docs!\n\n\u003Chttps:\u002F\u002Frageagainstthepixel.github.io\u002FOpenAI-DotNet>\n\n### Table of Contents\n\n- [Authentication](#authentication)\n- [Azure OpenAI](#azure-openai)\n  - [Azure Active Directory Authentication](#azure-active-directory-authentication)\n- [OpenAI API Proxy](#openai-api-proxy)\n- [Models](#models)\n  - [List Models](#list-models)\n  - [Retrieve Models](#retrieve-model)\n  - [Delete Fine Tuned Model](#delete-fine-tuned-model)\n- [Responses](#responses)\n  - [Create Response](#create-response)\n    - [Simple Text Response](#simple-response-with-text)\n    - [Streaming Response with Function Calling](#streaming-response-with-function-calling)\n  - [Get Response](#get-response)\n  - [List Input Items](#list-input-items)\n  - [Cancel Response](#cancel-response)\n  - [Delete Response](#delete-response)\n- [Conversations](#conversations)\n  - [Create Conversation](#create-conversation)\n  - [Retrieve Conversation](#retrieve-conversation)\n  - [Update Conversation](#update-conversation)\n  - [Delete Conversation](#delete-conversation)\n  - [List Conversation Items](#list-conversation-items)\n  - [Create Conversation Item](#create-conversation-item)\n  - [Retrieve Conversation Item](#retrieve-conversation-item)\n  - [Delete Conversation Item](#delete-conversation-item)\n- [Realtime](#realtime)\n  - [Create Realtime Session](#create-realtime-session)\n  - [Client Events](#client-events)\n    - [Sending Client Events](#sending-client-events)\n  - [Server Events](#server-events)\n    - [Receiving Server Events](#receiving-server-events)\n- [Assistants](#assistants)\n  - [List Assistants](#list-assistants)\n  - [Create Assistant](#create-assistant)\n  - [Retrieve Assistant](#retrieve-assistant)\n  - [Modify Assistant](#modify-assistant)\n  - [Delete Assistant](#delete-assistant)\n  - [Assistant Streaming](#assistant-streaming)\n  - [Threads](#threads)\n    - [Create Thread](#create-thread)\n    - [Create Thread and Run](#create-thread-and-run)\n      - [Streaming](#create-thread-and-run-streaming)\n    - [Retrieve Thread](#retrieve-thread)\n    - [Modify Thread](#modify-thread)\n    - [Delete Thread](#delete-thread)\n    - [Thread Messages](#thread-messages)\n      - [List Messages](#list-thread-messages)\n      - [Create Message](#create-thread-message)\n      - [Retrieve Message](#retrieve-thread-message)\n      - [Modify Message](#modify-thread-message)\n    - [Thread Runs](#thread-runs)\n      - [List Runs](#list-thread-runs)\n      - [Create Run](#create-thread-run)\n        - [Streaming](#create-thread-run-streaming)\n      - [Retrieve Run](#retrieve-thread-run)\n      - [Modify Run](#modify-thread-run)\n      - [Submit Tool Outputs to Run](#thread-submit-tool-outputs-to-run)\n      - [Structured Outputs](#thread-structured-outputs)\n      - [List Run Steps](#list-thread-run-steps)\n      - [Retrieve Run Step](#retrieve-thread-run-step)\n      - [Cancel Run](#cancel-thread-run)\n  - [Vector Stores](#vector-stores)\n    - [List Vector Stores](#list-vector-stores)\n    - [Create Vector Store](#create-vector-store)\n    - [Retrieve Vector Store](#retrieve-vector-store)\n    - [Modify Vector Store](#modify-vector-store)\n    - [Delete Vector Store](#delete-vector-store)\n    - [Vector Store Files](#vector-store-files)\n      - [List Vector Store Files](#list-vector-store-files)\n      - [Create Vector Store File](#create-vector-store-file)\n      - [Retrieve Vector Store File](#retrieve-vector-store-file)\n      - [Delete Vector Store File](#delete-vector-store-file)\n    - [Vector Store File Batches](#vector-store-file-batches)\n      - [Create Vector Store File Batch](#create-vector-store-file-batch)\n      - [Retrieve Vector Store File Batch](#retrieve-vector-store-file-batch)\n      - [List Files In Vector Store Batch](#list-files-in-vector-store-batch)\n      - [Cancel Vector Store File Batch](#cancel-vector-store-file-batch)\n- [Chat](#chat)\n  - [Chat Completions](#chat-completions)\n  - [Streaming](#chat-streaming)\n  - [Tools](#chat-tools)\n  - [Vision](#chat-vision)\n  - [Audio](#chat-audio)\n  - [Structured Outputs](#chat-structured-outputs)\n  - [Json Mode](#chat-json-mode)\n- [Audio](#audio)\n  - [Create Speech](#create-speech)\n    - [Stream Speech](#stream-speech)\n  - [Create Transcription](#create-transcription)\n  - [Create Translation](#create-translation)\n- [Images](#images)\n  - [Create Image](#create-image)\n  - [Edit Image](#edit-image)\n  - [Create Image Variation](#create-image-variation)\n- [Files](#files)\n  - [List Files](#list-files)\n  - [Upload File](#upload-file)\n  - [Delete File](#delete-file)\n  - [Retrieve File](#retrieve-file-info)\n  - [Download File Content](#download-file-content)\n- [Fine Tuning](#fine-tuning)\n  - [Create Fine Tune Job](#create-fine-tune-job)\n  - [List Fine Tune Jobs](#list-fine-tune-jobs)\n  - [Retrieve Fine Tune Job Info](#retrieve-fine-tune-job-info)\n  - [Cancel Fine Tune Job](#cancel-fine-tune-job)\n  - [List Fine Tune Job Events](#list-fine-tune-job-events)\n- [Batches](#batches)\n  - [List Batches](#list-batches)\n  - [Create Batch](#create-batch)\n  - [Retrieve Batch](#retrieve-batch)\n  - [Cancel Batch](#cancel-batch)\n- [Embeddings](#embeddings)\n  - [Create Embedding](#create-embeddings)\n- [Moderations](#moderations)\n  - [Create Moderation](#create-moderation)\n\n### [Authentication](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fauthentication)\n\nThere are 4 ways to provide your API keys, in order of precedence:\n\n> [!WARNING]\n> We recommended using the environment variables to load the API key instead of having it hard coded in your source. It is not recommended use this method in production, but only for accepting user credentials, local testing and quick start scenarios.\n\n1. [Pass keys directly with constructor](#pass-keys-directly-with-constructor) :warning:\n2. [Unity Scriptable Object](#unity-scriptable-object) :warning:\n3. [Load key from configuration file](#load-key-from-configuration-file)\n4. [Use System Environment Variables](#use-system-environment-variables)\n\nYou use the `OpenAIAuthentication` when you initialize the API as shown:\n\n#### Pass keys directly with constructor\n\n> [!WARNING]\n> We recommended using the environment variables to load the API key instead of having it hard coded in your source. It is not recommended use this method in production, but only for accepting user credentials, local testing and quick start scenarios.\n\n```csharp\nvar api = new OpenAIClient(\"sk-apiKey\");\n```\n\nOr create a `OpenAIAuthentication` object manually\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication(\"sk-apiKey\", \"org-yourOrganizationId\", \"proj_yourProjectId\"));\n```\n\n#### Unity Scriptable Object\n\nYou can save the key directly into a scriptable object that is located in the `Assets\u002FResources` folder.\n\nYou can create a new one by using the context menu of the project pane and creating a new `OpenAIConfiguration` scriptable object.\n\n> [!WARNING]\n> Beware checking this file into source control, as other people will be able to see your API key. It is recommended to use the [OpenAI-DotNet-Proxy](#openai-api-proxy) and authenticate users with your preferred OAuth provider.\n\n![Create new OpenAIConfiguration](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FRageAgainstThePixel_com.openai.unity_readme_227e8313d044.png)\n\n#### Load key from configuration file\n\nAttempts to load api keys from a configuration file, by default `.openai` in the current directory, optionally traversing up the directory tree or in the user's home directory.\n\nTo create a configuration file, create a new text file named `.openai` and containing the line:\n\n> [!NOTE]\n> Organization and project id entries are optional.\n\n##### Json format\n\n```json\n{\n  \"apiKey\": \"sk-aaaabbbbbccccddddd\",\n  \"organizationId\": \"org-yourOrganizationId\",\n  \"projectId\": \"proj_yourProjectId\"\n}\n```\n\n##### Deprecated format\n\n```shell\nOPENAI_API_KEY=sk-aaaabbbbbccccddddd\nOPENAI_ORGANIZATION_ID=org-yourOrganizationId\nOPENAI_PROJECT_ID=proj_yourProjectId\n```\n\nYou can also load the configuration file directly with known path by calling static methods in `OpenAIAuthentication`:\n\n- Loads the default `.openai` config in the specified directory:\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromDirectory(\"path\u002Fto\u002Fyour\u002Fdirectory\"));\n```\n\n- Loads the configuration file from a specific path. File does not need to be named `.openai` as long as it conforms to the json format:\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromPath(\"path\u002Fto\u002Fyour\u002Ffile.json\"));\n```\n\n#### Use System Environment Variables\n\nUse your system's environment variables specify an api key and organization to use.\n\n- Use `OPENAI_API_KEY` for your api key.\n- Use `OPENAI_ORGANIZATION_ID` to specify an organization.\n- Use `OPENAI_PROJECT_ID` to specify a project.\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromEnvironment());\n```\n\n---\n\n### [Azure OpenAI](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Fcognitive-services\u002Fopenai)\n\nYou can also choose to use Microsoft's Azure OpenAI deployments as well.\n\nYou can find the required information in the Azure Playground by clicking the `View Code` button and view a URL like this:\n\n```markdown\nhttps:\u002F\u002F{your-resource-name}.openai.azure.com\u002Fopenai\u002Fdeployments\u002F{deployment-id}\u002Fchat\u002Fcompletions?api-version={api-version}\n```\n\n- `your-resource-name` The name of your Azure OpenAI Resource.\n- `deployment-id` The deployment name you chose when you deployed the model.\n- `api-version` The API version to use for this operation. This follows the YYYY-MM-DD format.\n\nTo setup the client to use your deployment, you'll need to pass in `OpenAISettings` into the client constructor.\n\n```csharp\nvar auth = new OpenAIAuthentication(\"sk-apiKey\");\nvar settings = new OpenAISettings(resourceName: \"your-resource-name\", deploymentId: \"deployment-id\", apiVersion: \"api-version\");\nvar api = new OpenAIClient(auth, settings);\n```\n\n#### [Azure Active Directory Authentication](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Fcognitive-services\u002Fopenai\u002Freference#authentication)\n\n[Authenticate with MSAL](https:\u002F\u002Fgithub.com\u002FAzureAD\u002Fmicrosoft-authentication-library-for-dotnet) as usual and get access token, then use the access token when creating your `OpenAIAuthentication`. Then be sure to set useAzureActiveDirectory to true when creating your `OpenAISettings`.\n\n[Tutorial: Desktop app that calls web APIs: Acquire a token](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Factive-directory\u002Fdevelop\u002Fscenario-desktop-acquire-token?tabs=dotnet)\n\n```csharp\n\u002F\u002F get your access token using any of the MSAL methods\nvar accessToken = result.AccessToken;\nvar auth = new OpenAIAuthentication(accessToken);\nvar settings = new OpenAISettings(resourceName: \"your-resource\", deploymentId: \"deployment-id\", apiVersion: \"api-version\", useActiveDirectoryAuthentication: true);\nvar api = new OpenAIClient(auth, settings);\n```\n\n---\n\n### [OpenAI API Proxy](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet\u002Fblob\u002Fmain\u002FOpenAI-DotNet-Proxy\u002FReadme.md)\n\n[![NuGet version (OpenAI-DotNet-Proxy)](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fv\u002FOpenAI-DotNet-Proxy.svg?label=OpenAI-DotNet-Proxy&logo=nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FOpenAI-DotNet-Proxy\u002F)\n\nUsing either the [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet) or [com.openai.unity](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity) packages directly in your front-end app may expose your API keys and other sensitive information. To mitigate this risk, it is recommended to set up an intermediate API that makes requests to OpenAI on behalf of your front-end app. This library can be utilized for both front-end and intermediary host configurations, ensuring secure communication with the OpenAI API.\n\n#### Front End Example\n\nIn the front end example, you will need to securely authenticate your users using your preferred OAuth provider. Once the user is authenticated, exchange your custom auth token with your API key on the backend.\n\nFollow these steps:\n\n1. Setup a new project using either the [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet) or [com.openai.unity](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity) packages.\n2. Authenticate users with your OAuth provider.\n3. After successful authentication, create a new `OpenAIAuthentication` object and pass in the custom token with the prefix `sess-`.\n4. Create a new `OpenAISettings` object and specify the domain where your intermediate API is located.\n5. Pass your new `auth` and `settings` objects to the `OpenAIClient` constructor when you create the client instance.\n\nHere's an example of how to set up the front end:\n\n```csharp\nvar authToken = await LoginAsync();\nvar auth = new OpenAIAuthentication($\"sess-{authToken}\");\nvar settings = new OpenAISettings(domain: \"api.your-custom-domain.com\");\nvar api = new OpenAIClient(auth, settings);\n```\n\nThis setup allows your front end application to securely communicate with your backend that will be using the OpenAI-DotNet-Proxy, which then forwards requests to the OpenAI API. This ensures that your OpenAI API keys and other sensitive information remain secure throughout the process.\n\n#### Back End Example\n\nIn this example, we demonstrate how to set up and use `OpenAIProxy` in a new ASP.NET Core web app. The proxy server will handle authentication and forward requests to the OpenAI API, ensuring that your API keys and other sensitive information remain secure.\n\n1. Create a new [ASP.NET Core minimal web API](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Faspnet\u002Fcore\u002Ftutorials\u002Fmin-web-api?view=aspnetcore-6.0) project.\n2. Add the OpenAI-DotNet nuget package to your project.\n    - Powershell install: `Install-Package OpenAI-DotNet-Proxy`\n    - Dotnet install: `dotnet add package OpenAI-DotNet-Proxy`\n    - Manually editing .csproj: `\u003CPackageReference Include=\"OpenAI-DotNet-Proxy\" \u002F>`\n3. Create a new class that inherits from `AbstractAuthenticationFilter` and override the `ValidateAuthentication` method. This will implement the `IAuthenticationFilter` that you will use to check user session token against your internal server.\n4. In `Program.cs`, create a new proxy web application by calling `OpenAIProxy.CreateWebApplication` method, passing your custom `AuthenticationFilter` as a type argument.\n5. Create `OpenAIAuthentication` and `OpenAIClientSettings` as you would normally with your API keys, org id, or Azure settings.\n\n```csharp\npublic partial class Program\n{\n    private class AuthenticationFilter : AbstractAuthenticationFilter\n    {\n        public override async Task ValidateAuthenticationAsync(IHeaderDictionary request)\n        {\n            await Task.CompletedTask; \u002F\u002F remote resource call to verify token\n\n            \u002F\u002F You will need to implement your own class to properly test\n            \u002F\u002F custom issued tokens you've setup for your end users.\n            if (!request.Authorization.ToString().Contains(TestUserToken))\n            {\n                throw new AuthenticationException(\"User is not authorized\");\n            }\n        }\n    }\n\n    public static void Main(string[] args)\n    {\n        var auth = OpenAIAuthentication.LoadFromEnv();\n        var settings = new OpenAIClientSettings(\u002F* your custom settings if using Azure OpenAI *\u002F);\n        using var openAIClient = new OpenAIClient(auth, settings);\n        OpenAIProxy.CreateWebApplication\u003CAuthenticationFilter>(args, openAIClient).Run();\n    }\n}\n```\n\nOnce you have set up your proxy server, your end users can now make authenticated requests to your proxy api instead of directly to the OpenAI API. The proxy server will handle authentication and forward requests to the OpenAI API, ensuring that your API keys and other sensitive information remain secure.\n\n---\n\n### [Models](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels)\n\nList and describe the various models available in the API. You can refer to the [Models documentation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fmodels) to understand what models are available and the differences between them.\n\nAlso checkout [model endpoint compatibility](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fmodels\u002Fmodel-endpoint-compatibility) to understand which models work with which endpoints.\n\nTo specify a custom model not pre-defined in this library:\n\n```csharp\nvar model = new Model(\"model-id\");\n```\n\nThe Models API is accessed via `OpenAIClient.ModelsEndpoint`\n\n#### [List models](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels\u002Flist)\n\nLists the currently available models, and provides basic information about each one such as the owner and availability.\n\n```csharp\nvar api = new OpenAIClient();\nvar models = await api.ModelsEndpoint.GetModelsAsync();\n\nforeach (var model in models)\n{\n    Debug.Log(model.ToString());\n}\n```\n\n#### [Retrieve model](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels\u002Fretrieve)\n\nRetrieves a model instance, providing basic information about the model such as the owner and permissions.\n\n```csharp\nvar api = new OpenAIClient();\nvar model = await api.ModelsEndpoint.GetModelDetailsAsync(\"gpt-4o\");\nDebug.Log(model.ToString());\n```\n\n#### [Delete Fine Tuned Model](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tunes\u002Fdelete-model)\n\nDelete a fine-tuned model. You must have the Owner role in your organization.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ModelsEndpoint.DeleteFineTuneModelAsync(\"your-fine-tuned-model\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [Responses](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses)\n\nOpenAI's most advanced interface for generating model responses. Supports text and image inputs, and text outputs. Create stateful interactions with the model, using the output of previous responses as input. Extend the model's capabilities with built-in tools for file search, web search, computer use, and more. Allow the model access to external systems and data using function calling.\n\n- Related Guides:\n  - [QuickStart](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fquickstart?api-mode=responses)\n  - [Text Inputs and Outputs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftext?api-mode=responses)\n  - [Image Inputs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fimages?api-mode=responses)\n  - [Structured Outputs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs?api-mode=responses)\n  - [Conversation State](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fconversation-state?api-mode=responses)\n  - [Extend the model with tools](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftools?api-mode=responses)\n\nThe Response API is accessed via `OpenAIClient.ResponsesEndpoint`\n\n#### [Create Response](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fcreate)\n\nCreates a model response. Provide text or image inputs to generate text or JSON outputs. Have the model call your own custom code or use built-in tools like web search or file search to use your own data as input for the model's response.\n\n##### Simple Response With Text\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ResponsesEndpoint.CreateModelResponseAsync(\"Tell me a three sentence bedtime story about a unicorn.\");\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}:{responseItem}\");\nresponse.PrintUsage();\n```\n\n##### Streaming Response with Function Calling\n\n```csharp\nvar api = new OpenAIClient();\nvar conversation = new List\u003CIResponseItem>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, \"What time is it?\"),\n};\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(typeof(DateTimeUtility), nameof(DateTimeUtility.GetDateTime))\n};\nvar request = new CreateResponseRequest(conversation, Model.GPT5_Nano, tools: tools);\n\nasync Task StreamCallback(string @event, IServerSentEvent sseEvent)\n{\n    switch (sseEvent)\n    {\n        case Message messageItem:\n            conversation.Add(messageItem);\n            break;\n        case FunctionToolCall functionToolCall:\n            conversation.Add(functionToolCall);\n            var output = await functionToolCall.InvokeFunctionAsync();\n            conversation.Add(output);\n            await api.ResponsesEndpoint.CreateModelResponseAsync(new(conversation, Model.GPT5_Nano, tools: tools, toolChoice: \"none\"), StreamCallback);\n            break;\n    }\n}\n\nvar response = await api.ResponsesEndpoint.CreateModelResponseAsync(request, StreamCallback);\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}: {responseItem}\");\nresponse.PrintUsage();\n```\n\n#### [Get Response](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fget)\n\nRetrieves a model response with the given ID.\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ResponsesEndpoint.GetModelResponseAsync(\"response-id\");\nDebug.Log(response.ToString());\n```\n\n#### [List Input Items](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Finput-items)\n\nReturns a list of input items for a given response.\n\n```csharp\nvar api = new OpenAIClient();\nvar responseInputItems = await api.ResponsesEndpoint.ListInputItemsAsync(\"response-id\");\nforeach (var item in responseInputItems)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [Cancel Response](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fcancel)\n\nCancels a model response with the given ID.\n\n> [!NOTE]\n> Only responses created with the background parameter set to true can be cancelled.\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.ResponsesEndpoint.CancelModelResponseAsync(\"response-id\");\nAssert.IsTrue(isCancelled);\n```\n\n#### [Delete Response](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fdelete)\n\nDeletes a model response with the given ID.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ResponsesEndpoint.DeleteModelResponseAsync(\"response-id\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [Conversations](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations)\n\nCreate and manage conversations to store and retrieve conversation state across Response API calls.\n\nThe Conversations API is accessed via `OpenAIClient.ConversationsEndpoint`\n\n#### [Create Conversation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fcreate)\n\nCreate a conversation.\n\n```csharp\nvar api = new OpenAIClient();\nconversation = await api.ConversationsEndpoint.CreateConversationAsync(\n    new CreateConversationRequest(new Message(Role.Developer, systemPrompt)));\nDebug.Log(conversation.ToString());\n\u002F\u002F use the conversation object when creating responses.\nvar request = await api.ResponsesEndpoint.CreateResponseAsync(\n    new CreateResponseRequest(textInput: \"Hello!\", conversationId: conversation, model: Model.GPT5_Nano));\nvar response = await openAI.ResponsesEndpoint.CreateModelResponseAsync(request);\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}:{responseItem}\");\nresponse.PrintUsage();\n```\n\n#### [Retrieve Conversation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fretrieve)\n\nGet a conversation by id.\n\n```csharp\nvar api = new OpenAIClient();\nvar conversation = await api.ConversationsEndpoint.GetConversationAsync(\"conversation-id\");\nDebug.Log(conversation.ToString());\n```\n\n#### [Update Conversation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fupdate)\n\nUpdate a conversation with custom metadata.\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, object>\n{\n    { \"favorite_color\", \"blue\" },\n    { \"favorite_food\", \"pizza\" }\n};\nvar updatedConversation = await api.ConversationsEndpoint.UpdateConversationAsync(\"conversation-id\", metadata);\n```\n\n#### [Delete Conversation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fdelete)\n\nDelete a conversation by id.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ConversationsEndpoint.DeleteConversationAsync(\"conversation-id\");\nAssert.IsTrue(isDeleted);\n```\n\n#### [List Conversation Items](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Flist-items)\n\nList all items for a conversation with the given ID.\n\n```csharp\nvar api = new OpenAIClient();\nvar query = new ListQuery(limit: 10);\nvar items = await api.ConversationsEndpoint.ListConversationItemsAsync(\"conversation-id\", query);\n\nforeach (var item in items)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [Create Conversation Item](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fcreate-item)\n\nCreate a new conversation item for a conversation with the given ID.\n\n```csharp\nvar api = new OpenAIClient();\nvar items = new List\u003CIResponseItem>\n{\n    new Message(Role.User, \"Hello!\"),\n    new Message(Role.Assistant, \"Hi! How can I help you?\")\n}\nvar addedItems = await api.ConversationsEndpoint.CreateConversationItemsAsync(\"conversation-id\", items);\n\nforeach (var item in addedItems)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [Retrieve Conversation Item](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fretrieve-item)\n\nGet a conversation item by id.\n\n```csharp\nvar api = new OpenAIClient();\nvar item = await api.ConversationsEndpoint.GetConversationItemAsync(\"conversation-id\", \"item-id\");\nDebug.Log(item.ToJsonString());\n```\n\n#### [Delete Conversation Item](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fdelete-item)\n\nDelete a conversation item by id.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ConversationsEndpoint.DeleteConversationItemAsync(\"conversation-id\", \"item-id\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [Realtime](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime)\n\n> [!WARNING]\n> Beta Feature. API subject to breaking changes.\n\n- [Realtime Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Frealtime)\n\nThe Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.\n\nThe Assistants API is accessed via `OpenAIClient.RealtimeEndpoint`\n\n#### Create Realtime Session\n\nHere is a simple example of how to create a realtime session and to send and receive messages from the model.\n\n```csharp\nvar api = new OpenAIClient();\nvar cancellationTokenSource = new CancellationTokenSource();\nvar tools = new List\u003CTool>\n{\n    Tool.FromFunc(\"goodbye\", () =>\n    {\n        cancellationTokenSource.Cancel();\n        return \"Goodbye!\";\n    })\n};\nvar configuration = new SessionConfiguration(Model.GPT4oRealtime, tools: tools);\nusing var session = await api.RealtimeEndpoint.CreateSessionAsync(configuration);\nvar responseTask = session.ReceiveUpdatesAsync\u003CIServerEvent>(ServerEvents, cancellationTokenSource.Token);\nawait session.SendAsync(new ConversationItemCreateRequest(\"Hello!\"));\nawait session.SendAsync(new CreateResponseRequest());\nawait session.SendAsync(new InputAudioBufferAppendRequest(new ReadOnlyMemory\u003Cbyte>(new byte[1024 * 4])), cancellationTokenSource.Token);\nawait session.SendAsync(new ConversationItemCreateRequest(\"GoodBye!\"));\nawait session.SendAsync(new CreateResponseRequest());\nawait responseTask;\n\nvoid ServerEvents(IServerEvent @event)\n{\n    switch (@event)\n    {\n        case ResponseAudioTranscriptResponse transcriptResponse:\n            Debug.Log(transcriptResponse.ToString());\n            break;\n        case ResponseFunctionCallArgumentsResponse functionCallResponse:\n            if (functionCallResponse.IsDone)\n            {\n                ToolCall toolCall = functionCallResponse;\n                toolCall.InvokeFunction();\n            }\n\n            break;\n    }\n}\n```\n\n#### Client Events\n\nThe library implements `IClientEvent` interface for outgoing client sent events.\n\n- [`UpdateSessionRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fsession\u002Fupdate): Update the session with new session options.\n- [`InputAudioBufferAppendRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fappend): Append audio to the input audio buffer. (Unlike made other client events, the server will not send a confirmation response to this event).\n- [`InputAudioBufferCommitRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fcommit): Commit the input audio buffer. (When in Server VAD mode, the client does not need to send this event).\n- [`InputAudioBufferClearRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fclear): Clear the input audio buffer.\n- [`ConversationItemCreateRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Fcreate): Create a new conversation item. This is the main way to send user content to the model.\n- [`ConversationItemTruncateRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Ftruncate): Send this event to truncate a previous assistant message’s audio.\n- [`ConversationItemDeleteRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Fdelete): Delete a conversation item. This is useful when you want to remove a message from the conversation history.\n- [`CreateResponseRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fresponse\u002Fcreate): Create a response from the model. Send this event after creating new conversation items or invoking tool calls. This will trigger the model to generate a response.\n- [`ResponseCancelRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fresponse\u002Fcancel) -Send this event to cancel an in-progress response.\n\n##### Sending Client Events\n\nYou can send client events at any time to the server by calling the `RealtimeSession.SendAsync` method on the session object. The send call will return a `IServerEvent` handle that best represents the appropriate response from the server for that event. This is useful if you want to handle server responses in a more granular way.\n\nIdeally though, you may want to handle all server responses with [`RealtimeSession.ReceiveUpdatesAsync`](#receiving-server-events).\n\n> [!NOTE]\n> The server will not send a confirmation response to the `InputAudioBufferAppendRequest` event.\n\n> [!IMPORTANT]\n> You will also need to send `CreateResponseRequest` to trigger the model to generate a response.\n\n```csharp\nvar serverEvent = await session.SendAsync(new ConversationItemCreateRequest(\"Hello!\"));\nDebug.Log(serverEvent.ToJsonString());\nserverEvent = await session.SendAsync(new CreateResponseRequest());\nDebug.Log(serverEvent.ToJsonString());\n```\n\n#### Server Events\n\nThe library implements `IServerEvent` interface for incoming server sent events.\n\n- [`RealtimeEventError`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Ferror): Returned when an error occurs, which could be a client problem or a server problem.\n- [`SessionResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fsession): Returned for both a `session.created` and `session.updated` event.\n- [`RealtimeConversationResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fcreated): Returned when a new conversation item is created.\n- [`ConversationItemCreatedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Fcreated): Returned when a new conversation item is created.\n- [`ConversationItemInputAudioTranscriptionResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation): Returned when the input audio transcription is completed or failed.\n- [`ConversationItemTruncatedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Ftruncated): Returned when a conversation item is truncated.\n- [`ConversationItemDeletedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Fdeleted): Returned when a conversation item is deleted.\n- [`InputAudioBufferCommittedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fcommitted): Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode.\n- [`InputAudioBufferClearedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fcleared): Returned when an input audio buffer is cleared.\n- [`InputAudioBufferStartedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fspeech_started): Sent by the server when in server_vad mode to indicate that speech has been detected in the audio buffer. This can happen any time audio is added to the buffer (unless speech is already detected). The client may want to use this event to interrupt audio playback or provide visual feedback to the user.\n- [`InputAudioBufferStoppedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fspeech_stopped): Returned in server_vad mode when the server detects the end of speech in the audio buffer.\n- [`RealtimeResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse): Returned when a response is created or done.\n- [`ResponseOutputItemResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Foutput_item): Returned when a response output item is added or done.\n- [`ResponseContentPartResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Fcontent_part): Returned when a response content part is added or done.\n- [`ResponseTextResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Ftext): Returned when a response text is updated or done.\n- [`ResponseAudioTranscriptResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Faudio_transcript): Returned when a response audio transcript is updated or done.\n- [`ResponseAudioResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Faudio): Returned when a response audio is updated or done.\n- [`ResponseFunctionCallArgumentsResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Ffunction_call_arguments): Returned when a response function call arguments are updated or done.\n- [`RateLimitsResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Frate_limits): Returned when rate limits are updated.\n\n##### Receiving Server Events\n\nTo receive server events, you will need to call the `RealtimeSession.ReceiveUpdatesAsync` method on the session object. This method will return a `Task` that will complete when the session is closed or when the cancellation token is triggered. Ideally this method should be called once and runs for the duration of the session.\n\n> [!NOTE]\n> You can also get sent `IClientEvent` callbacks as well by using the `IRealtimeEvent` interface instead of `IServerEvent`.\n\n```csharp\nawait session.ReceiveUpdatesAsync\u003CIServerEvent>(ServerEvents, cancellationTokenSource.Token);\n\nvoid ServerEvents(IServerEvent @event)\n{\n    switch (@event)\n    {\n        case RealtimeEventError error:\n            \u002F\u002F raised anytime an error occurs\n            break;\n        case SessionResponse sessionResponse:\n            \u002F\u002F raised when a session is created or updated\n            break;\n        case RealtimeConversationResponse conversationResponse:\n            \u002F\u002F raised when a new conversation is created\n            break;\n        case ConversationItemCreatedResponse conversationItemCreated:\n            \u002F\u002F raised when a new conversation item is created\n            break;\n        case ConversationItemInputAudioTranscriptionResponse conversationItemTranscription:\n            \u002F\u002F raised when the input audio transcription is completed or failed\n            break;\n        case ConversationItemTruncatedResponse conversationItemTruncated:\n            \u002F\u002F raised when a conversation item is truncated\n            break;\n        case ConversationItemDeletedResponse conversationItemDeleted:\n            \u002F\u002F raised when a conversation item is deleted\n            break;\n        case InputAudioBufferCommittedResponse committedResponse:\n            \u002F\u002F raised when an input audio buffer is committed\n            break;\n        case InputAudioBufferClearedResponse clearedResponse:\n            \u002F\u002F raised when an input audio buffer is cleared\n            break;\n        case InputAudioBufferStartedResponse startedResponse:\n            \u002F\u002F raised when speech is detected in the audio buffer\n            break;\n        case InputAudioBufferStoppedResponse stoppedResponse:\n            \u002F\u002F raised when speech stops in the audio buffer\n            break;\n        case RealtimeResponse realtimeResponse:\n            \u002F\u002F raised when a response is created or done\n            break;\n        case ResponseOutputItemResponse outputItemResponse:\n            \u002F\u002F raised when a response output item is added or done\n            break;\n        case ResponseContentPartResponse contentPartResponse:\n            \u002F\u002F raised when a response content part is added or done\n            break;\n        case ResponseTextResponse textResponse:\n            \u002F\u002F raised when a response text is updated or done\n            break;\n        case ResponseAudioTranscriptResponse transcriptResponse:\n            \u002F\u002F raised when a response audio transcript is updated or done\n            break;\n        case ResponseFunctionCallArgumentsResponse functionCallResponse:\n            \u002F\u002F raised when a response function call arguments are updated or done\n            break;\n        case RateLimitsResponse rateLimitsResponse:\n            \u002F\u002F raised when rate limits are updated\n            break;\n    }\n}\n```\n\n---\n\n### [Assistants](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants)\n\n> [!WARNING]\n> Beta Feature. API subject to breaking changes.\n\nBuild assistants that can call models and use tools to perform tasks.\n\n- [Assistants Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants)\n- [OpenAI Assistants Cookbook](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook\u002Fblob\u002Fmain\u002Fexamples\u002FAssistants_API_overview_python.ipynb)\n\nThe Assistants API is accessed via `OpenAIClient.AssistantsEndpoint`\n\n#### [List Assistants](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FlistAssistants)\n\nReturns a list of assistants.\n\n```csharp\nvar api = new OpenAIClient();\nvar assistantsList = await api.AssistantsEndpoint.ListAssistantsAsync();\n\nforeach (var assistant in assistantsList.Items)\n{\n    Debug.Log($\"{assistant} -> {assistant.CreatedAt}\");\n}\n```\n\n#### [Create Assistant](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FcreateAssistant)\n\nCreate an assistant with a model and instructions.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new CreateAssistantRequest(Model.GPT4o);\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(request);\n```\n\n#### [Retrieve Assistant](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FgetAssistant)\n\nRetrieves an assistant.\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.RetrieveAssistantAsync(\"assistant-id\");\nDebug.Log($\"{assistant} -> {assistant.CreatedAt}\");\n```\n\n#### [Modify Assistant](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FmodifyAssistant)\n\nModifies an assistant.\n\n```csharp\nvar api = new OpenAIClient();\nvar createRequest = new CreateAssistantRequest(Model.GPT4_Turbo);\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(createRequest);\nvar modifyRequest = new CreateAssistantRequest(Model.GPT4o);\nvar modifiedAssistant = await api.AssistantsEndpoint.ModifyAssistantAsync(assistant.Id, modifyRequest);\n\u002F\u002F OR AssistantExtension for easier use!\nvar modifiedAssistantEx = await assistant.ModifyAsync(modifyRequest);\n```\n\n#### [Delete Assistant](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FdeleteAssistant)\n\nDelete an assistant.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.AssistantsEndpoint.DeleteAssistantAsync(\"assistant-id\");\n\u002F\u002F OR AssistantExtension for easier use!\nvar isDeleted = await assistant.DeleteAsync();\nAssert.IsTrue(isDeleted);\n```\n\n#### [Assistant Streaming](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants-streaming)\n\n> [!NOTE]\n> Assistant stream events can be easily added to existing thread calls by passing `Func\u003CIServerSentEvent, Task> streamEventHandler` callback to any existing method that supports streaming.\n\n#### [Threads](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads)\n\nCreate Threads that [Assistants](#assistants) can interact with.\n\nThe Threads API is accessed via `OpenAIClient.ThreadsEndpoint`\n\n##### [Create Thread](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FcreateThread)\n\nCreate a thread.\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nDebug.Log($\"Create thread {thread.Id} -> {thread.CreatedAt}\");\n```\n\n##### [Create Thread and Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcreateThreadAndRun)\n\nCreate a thread and run it in one request.\n\n> See also: [Thread Runs](#thread-runs)\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"Math Tutor\",\n        instructions: \"You are a personal math tutor. Answer questions briefly, in a sentence or less.\",\n        model: Model.GPT4o));\nvar messages = new List\u003CMessage> { \"I need to solve the equation `3x + 11 = 14`. Can you help me?\" };\nvar threadRequest = new CreateThreadRequest(messages);\nvar run = await assistant.CreateThreadAndRunAsync(threadRequest);\nDebug.Log($\"Created thread and run: {run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n```\n\n###### Create Thread and Run Streaming\n\nCreate a thread and run it in one request while streaming events.\n\n```csharp\nvar api = new OpenAIClient();\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(typeof(WeatherService), nameof(WeatherService.GetCurrentWeatherAsync))\n};\nvar assistantRequest = new CreateAssistantRequest(tools: tools, instructions: \"You are a helpful weather assistant. Use the appropriate unit based on geographical location.\");\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(assistantRequest);\nThreadResponse thread = null;\nasync Task StreamEventHandler(IServerSentEvent streamEvent)\n{\n    switch (streamEvent)\n    {\n        case ThreadResponse threadResponse:\n            thread = threadResponse;\n            break;\n        case RunResponse runResponse:\n            if (runResponse.Status == RunStatus.RequiresAction)\n            {\n                var toolOutputs = await assistant.GetToolOutputsAsync(runResponse);\n\n                foreach (var toolOutput in toolOutputs)\n                {\n                    Debug.Log($\"Tool Output: {toolOutput}\");\n                }\n\n                await runResponse.SubmitToolOutputsAsync(toolOutputs, StreamEventHandler);\n            }\n            break;\n        default:\n            Debug.Log(streamEvent.ToJsonString());\n            break;\n    }\n}\n\nvar run = await assistant.CreateThreadAndRunAsync(\"I'm in Kuala-Lumpur, please tell me what's the temperature now?\", StreamEventHandler);\nrun = await run.WaitForStatusChangeAsync();\nvar messages = await thread.ListMessagesAsync();\nforeach (var response in messages.Items.Reverse())\n{\n    Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n}\n```\n\n##### [Retrieve Thread](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FgetThread)\n\nRetrieves a thread.\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.RetrieveThreadAsync(\"thread-id\");\n\u002F\u002F OR if you simply wish to get the latest state of a thread\nthread = await thread.UpdateAsync();\nDebug.Log($\"Retrieve thread {thread.Id} -> {thread.CreatedAt}\");\n```\n\n##### [Modify Thread](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FmodifyThread)\n\nModifies a thread.\n\n> [!NOTE]\n> Only the metadata can be modified.\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"custom thread metadata\" }\n}\nthread = await api.ThreadsEndpoint.ModifyThreadAsync(thread.Id, metadata);\n\u002F\u002F OR use extension method for convenience!\nthread = await thread.ModifyAsync(metadata);\nDebug.Log($\"Modify thread {thread.Id} -> {thread.Metadata[\"key\"]}\");\n```\n\n##### [Delete Thread](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FdeleteThread)\n\nDelete a thread.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ThreadsEndpoint.DeleteThreadAsync(\"thread-id\");\n\u002F\u002F OR use extension method for convenience!\nvar isDeleted = await thread.DeleteAsync();\nAssert.IsTrue(isDeleted);\n```\n\n##### [Thread Messages](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages)\n\nCreate messages within threads.\n\n###### [List Thread Messages](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FlistMessages)\n\nReturns a list of messages for a given thread.\n\n```csharp\nvar api = new OpenAIClient();\nvar messageList = await api.ThreadsEndpoint.ListMessagesAsync(\"thread-id\");\n\u002F\u002F OR use extension method for convenience!\nvar messageList = await thread.ListMessagesAsync();\n\nforeach (var message in messageList.Items)\n{\n    Debug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n}\n```\n\n###### [Create Thread Message](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FcreateMessage)\n\nCreate a message.\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar request = new CreateMessageRequest(\"Hello world!\");\nvar message = await api.ThreadsEndpoint.CreateMessageAsync(thread.Id, request);\n\u002F\u002F OR use extension method for convenience!\nvar message = await thread.CreateMessageAsync(\"Hello World!\");\nDebug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n```\n\n###### [Retrieve Thread Message](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FgetMessage)\n\nRetrieve a message.\n\n```csharp\nvar api = new OpenAIClient();\nvar message = await api.ThreadsEndpoint.RetrieveMessageAsync(\"thread-id\", \"message-id\");\n\u002F\u002F OR use extension methods for convenience!\nvar message = await thread.RetrieveMessageAsync(\"message-id\");\nvar message = await message.UpdateAsync();\nDebug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n```\n\n###### [Modify Thread Message](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FmodifyMessage)\n\nModify a message.\n\n> [!NOTE]\n> Only the message metadata can be modified.\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"custom message metadata\" }\n};\nvar message = await api.ThreadsEndpoint.ModifyMessageAsync(\"thread-id\", \"message-id\", metadata);\n\u002F\u002F OR use extension method for convenience!\nvar message = await message.ModifyAsync(metadata);\nDebug.Log($\"Modify message metadata: {message.Id} -> {message.Metadata[\"key\"]}\");\n```\n\n##### [Thread Runs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns)\n\nRepresents an execution run on a thread.\n\n###### [List Thread Runs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FlistRuns)\n\nReturns a list of runs belonging to a thread.\n\n```csharp\nvar api = new OpenAIClient();\nvar runList = await api.ThreadsEndpoint.ListRunsAsync(\"thread-id\");\n\u002F\u002F OR use extension method for convenience!\nvar runList = await thread.ListRunsAsync();\n\nforeach (var run in runList.Items)\n{\n    Debug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n}\n```\n\n###### [Create Thread Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcreateRun)\n\nCreate a run.\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"Math Tutor\",\n        instructions: \"You are a personal math tutor. Answer questions briefly, in a sentence or less.\",\n        model: Model.GPT4o));\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar message = await thread.CreateMessageAsync(\"I need to solve the equation `3x + 11 = 14`. Can you help me?\");\nvar run = await thread.CreateRunAsync(assistant);\nDebug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n```\n\n###### Create Thread Run Streaming\n\nCreate a run and stream the events.\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"Math Tutor\",\n        instructions: \"You are a personal math tutor. Answer questions briefly, in a sentence or less. Your responses should be formatted in JSON.\",\n        model: Model.GPT4o,\n        responseFormat: ChatResponseFormat.Json));\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar message = await thread.CreateMessageAsync(\"I need to solve the equation `3x + 11 = 14`. Can you help me?\");\nvar run = await thread.CreateRunAsync(assistant, async streamEvent =>\n{\n    Debug.Log(streamEvent.ToJsonString());\n    await Task.CompletedTask;\n});\nvar messages = await thread.ListMessagesAsync();\n\nforeach (var response in messages.Items.Reverse())\n{\n    Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n}\n```\n\n###### [Retrieve Thread Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FgetRun)\n\nRetrieves a run.\n\n```csharp\nvar api = new OpenAIClient();\nvar run = await api.ThreadsEndpoint.RetrieveRunAsync(\"thread-id\", \"run-id\");\n\u002F\u002F OR use extension method for convenience!\nvar run = await thread.RetrieveRunAsync(\"run-id\");\nvar run = await run.UpdateAsync();\nDebug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n```\n\n###### [Modify Thread Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FmodifyRun)\n\nModifies a run.\n\n> [!NOTE]\n> Only the metadata can be modified.\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"custom run metadata\" }\n};\nvar run = await api.ThreadsEndpoint.ModifyRunAsync(\"thread-id\", \"run-id\", metadata);\n\u002F\u002F OR use extension method for convenience!\nvar run = await run.ModifyAsync(metadata);\nDebug.Log($\"Modify run {run.Id} -> {run.Metadata[\"key\"]}\");\n```\n\n###### [Thread Submit Tool Outputs to Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FsubmitToolOutputs)\n\nWhen a run has the status: `requires_action` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed.\nAll outputs must be submitted in a single request.\n\n> [!NOTE]\n> See [Create Thread and Run Streaming](#create-thread-and-run-streaming) example on how to stream tool output events.\n\n```csharp\nvar api = new OpenAIClient();\nvar tools = new List\u003CTool>\n{\n    \u002F\u002F Use a predefined tool\n    Tool.Retrieval, Tool.CodeInterpreter,\n    \u002F\u002F Or create a tool from a type and the name of the method you want to use for function calling\n    Tool.GetOrCreateTool(typeof(WeatherService), nameof(WeatherService.GetCurrentWeatherAsync)),\n    \u002F\u002F Pass in an instance of an object to call a method on it\n    Tool.GetOrCreateTool(api.ImagesEndPoint, nameof(ImagesEndpoint.GenerateImageAsync)),\n    \u002F\u002F Define func\u003C,> callbacks\n    Tool.FromFunc(\"name_of_func\", () => { \u002F* callback function *\u002F }),\n    Tool.FromFunc\u003CT1,T2,TResult>(\"func_with_multiple_params\", (t1, t2) => { \u002F* logic that calculates return value *\u002F return tResult; })\n};\nvar assistantRequest = new CreateAssistantRequest(tools: tools, instructions: \"You are a helpful weather assistant. Use the appropriate unit based on geographical location.\");\nvar testAssistant = await api.AssistantsEndpoint.CreateAssistantAsync(assistantRequest);\nvar run = await testAssistant.CreateThreadAndRunAsync(\"I'm in Kuala-Lumpur, please tell me what's the temperature now?\");\n\u002F\u002F waiting while run is Queued and InProgress\nrun = await run.WaitForStatusChangeAsync();\n\n\u002F\u002F Invoke all of the tool call functions and return the tool outputs.\nvar toolOutputs = await testAssistant.GetToolOutputsAsync(run.RequiredAction.SubmitToolOutputs.ToolCalls);\n\nforeach (var toolOutput in toolOutputs)\n{\n    Debug.Log($\"tool call output: {toolOutput.Output}\");\n}\n\u002F\u002F submit the tool outputs\nrun = await run.SubmitToolOutputsAsync(toolOutputs);\n\u002F\u002F waiting while run in Queued and InProgress\nrun = await run.WaitForStatusChangeAsync();\nvar messages = await run.ListMessagesAsync();\n\nforeach (var message in messages.Items.OrderBy(response => response.CreatedAt))\n{\n    Debug.Log($\"{message.Role}: {message.PrintContent()}\");\n}\n```\n\n##### [Thread Structured Outputs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs)\n\nStructured Outputs is the evolution of JSON mode. While both ensure valid JSON is produced, only Structured Outputs ensure schema adherence.\n\n> [!IMPORTANT]\n>\n> - When using JSON mode, always instruct the model to produce JSON via some message in the conversation, for example via your system message. If you don't include an explicit instruction to generate JSON, the model may generate an unending stream of whitespace and the request may run continually until it reaches the token limit. To help ensure you don't forget, the API will throw an error if the string \"JSON\" does not appear somewhere in the context.\n> - The JSON in the message the model returns may be partial (i.e. cut off) if `finish_reason` is length, which indicates the generation exceeded max_tokens or the conversation exceeded the token limit. To guard against this, check `finish_reason` before parsing the response.\n\nFirst define the structure of your responses. These will be used as your schema.\nThese are the objects you'll deserialize to, so be sure to use standard Json object models.\n\n```csharp\npublic class MathResponse\n{\n    [JsonProperty(\"steps\")]\n    public IReadOnlyList\u003CMathStep> Steps { get; private set; }\n\n    [JsonProperty(\"final_answer\")]\n    public string FinalAnswer { get; private set; }\n}\n\npublic class MathStep\n{\n    [JsonProperty(\"explanation\")]\n    public string Explanation { get; private set; }\n\n    [JsonProperty(\"output\")]\n    public string Output { get; private set; }\n}\n```\n\nTo use, simply specify the `MathResponse` type as a generic constraint in either `CreateAssistantAsync`, `CreateRunAsync`, or `CreateThreadAndRunAsync`.\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync\u003CMathResponse>(\n    new CreateAssistantRequest(\n        name: \"Math Tutor\",\n        instructions: \"You are a helpful math tutor. Guide the user through the solution step by step.\",\n        model: \"gpt-4o-2024-08-06\"));\nThreadResponse thread = null;\n\ntry\n{\n    async Task StreamEventHandler(IServerSentEvent @event)\n    {\n        try\n        {\n            switch (@event)\n            {\n                case MessageResponse message:\n                    if (message.Status != MessageStatus.Completed)\n                    {\n                        Debug.Log(@event.ToJsonString());\n                        break;\n                    }\n\n                    var mathResponse = message.FromSchema\u003CMathResponse>();\n\n                    for (var i = 0; i \u003C mathResponse.Steps.Count; i++)\n                    {\n                        var step = mathResponse.Steps[i];\n                        Debug.Log($\"Step {i}: {step.Explanation}\");\n                        Debug.Log($\"Result: {step.Output}\");\n                    }\n\n                    Debug.Log($\"Final Answer: {mathResponse.FinalAnswer}\");\n                    break;\n                default:\n                    Debug.Log(@event.ToJsonString());\n                    break;\n            }\n        }\n        catch (Exception e)\n        {\n            Debug.Log(e);\n            throw;\n        }\n\n        await Task.CompletedTask;\n    }\n\n    var run = await assistant.CreateThreadAndRunAsync(\"how can I solve 8x + 7 = -23\", StreamEventHandler);\n    thread = await run.GetThreadAsync();\n    run = await run.WaitForStatusChangeAsync();\n    Debug.Log($\"Created thread and run: {run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n    var messages = await thread.ListMessagesAsync();\n\n    foreach (var response in messages.Items.OrderBy(response => response.CreatedAt))\n    {\n        Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n    }\n}\nfinally\n{\n    await assistant.DeleteAsync(deleteToolResources: thread == null);\n\n    if (thread != null)\n    {\n        var isDeleted = await thread.DeleteAsync(deleteToolResources: true);\n    }\n}\n```\n\nYou can also manually create json schema json string as well, but you will be responsible for deserializing your response data:\n\n```csharp\nvar api = new OpenAIClient();\nvar mathSchema = new JsonSchema(\"math_response\", @\"\n{\n  \"\"type\"\": \"\"object\"\",\n  \"\"properties\"\": {\n    \"\"steps\"\": {\n      \"\"type\"\": \"\"array\"\",\n      \"\"items\"\": {\n        \"\"type\"\": \"\"object\"\",\n        \"\"properties\"\": {\n          \"\"explanation\"\": {\n            \"\"type\"\": \"\"string\"\"\n          },\n          \"\"output\"\": {\n            \"\"type\"\": \"\"string\"\"\n          }\n        },\n        \"\"required\"\": [\n          \"\"explanation\"\",\n          \"\"output\"\"\n        ],\n        \"\"additionalProperties\"\": false\n      }\n    },\n    \"\"final_answer\"\": {\n      \"\"type\"\": \"\"string\"\"\n    }\n  },\n  \"\"required\"\": [\n    \"\"steps\"\",\n    \"\"final_answer\"\"\n  ],\n  \"\"additionalProperties\"\": false\n}\");\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"Math Tutor\",\n        instructions: \"You are a helpful math tutor. Guide the user through the solution step by step.\",\n        model: \"gpt-4o-2024-08-06\",\n        jsonSchema: mathSchema));\nThreadResponse thread = null;\n\ntry\n{\n    var run = await assistant.CreateThreadAndRunAsync(\"how can I solve 8x + 7 = -23\",\n        async @event =>\n        {\n            Debug.Log(@event.ToJsonString());\n            await Task.CompletedTask;\n        });\n    thread = await run.GetThreadAsync();\n    run = await run.WaitForStatusChangeAsync();\n    Debug.Log($\"Created thread and run: {run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n    var messages = await thread.ListMessagesAsync();\n\n    foreach (var response in messages.Items)\n    {\n        Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n    }\n}\nfinally\n{\n    await assistant.DeleteAsync(deleteToolResources: thread == null);\n\n    if (thread != null)\n    {\n        var isDeleted = await thread.DeleteAsync(deleteToolResources: true);\n        Assert.IsTrue(isDeleted);\n    }\n}\n```\n\n###### [List Thread Run Steps](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FlistRunSteps)\n\nReturns a list of run steps belonging to a run.\n\n```csharp\nvar api = new OpenAIClient();\nvar runStepList = await api.ThreadsEndpoint.ListRunStepsAsync(\"thread-id\", \"run-id\");\n\u002F\u002F OR use extension method for convenience!\nvar runStepList = await run.ListRunStepsAsync();\n\nforeach (var runStep in runStepList.Items)\n{\n    Debug.Log($\"[{runStep.Id}] {runStep.Status} {runStep.CreatedAt} -> {runStep.ExpiresAt}\");\n}\n```\n\n###### [Retrieve Thread Run Step](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FgetRunStep)\n\nRetrieves a run step.\n\n```csharp\nvar api = new OpenAIClient();\nvar runStep = await api.ThreadsEndpoint.RetrieveRunStepAsync(\"thread-id\", \"run-id\", \"step-id\");\n\u002F\u002F OR use extension method for convenience!\nvar runStep = await run.RetrieveRunStepAsync(\"step-id\");\nvar runStep = await runStep.UpdateAsync();\nDebug.Log($\"[{runStep.Id}] {runStep.Status} {runStep.CreatedAt} -> {runStep.ExpiresAt}\");\n```\n\n###### [Cancel Thread Run](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcancelRun)\n\nCancels a run that is `in_progress`.\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.ThreadsEndpoint.CancelRunAsync(\"thread-id\", \"run-id\");\n\u002F\u002F OR use extension method for convenience!\nvar isCancelled = await run.CancelAsync();\nAssert.IsTrue(isCancelled);\n```\n\n#### [Vector Stores](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores)\n\nVector stores are used to store files for use by the `file_search` tool.\n\n- [File Search Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\nThe Vector Stores API is accessed via `OpenAIClient.VectorStoresEndpoint`\n\n##### [List Vector Stores](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Flist)\n\nReturns a list of vector stores.\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStores = await api.VectorStoresEndpoint.ListVectorStoresAsync();\n\nforeach (var vectorStore in vectorStores.Items)\n{\n    Debug.Log(vectorStore);\n}\n```\n\n##### [Create Vector Store](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fcreate)\n\nCreate a vector store.\n\n```csharp\nvar api = new OpenAIClient();\nvar createVectorStoreRequest = new CreateVectorStoreRequest(\"test-vector-store\");\nvar vectorStore = await api.VectorStoresEndpoint.CreateVectorStoreAsync(createVectorStoreRequest);\nDebug.Log(vectorStore);\n```\n\n##### [Retrieve Vector Store](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fretrieve)\n\nRetrieves a vector store.\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStore = await api.VectorStoresEndpoint.GetVectorStoreAsync(\"vector-store-id\");\nDebug.Log(vectorStore);\n```\n\n##### [Modify Vector Store](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fmodify)\n\nModifies a vector store.\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, object> { { \"Test\", DateTime.UtcNow } };\nvar vectorStore = await api.VectorStoresEndpoint.ModifyVectorStoreAsync(\"vector-store-id\", metadata: metadata);\nDebug.Log(vectorStore);\n```\n\n##### [Delete Vector Store](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fdelete)\n\nDelete a vector store.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.VectorStoresEndpoint.DeleteVectorStoreAsync(\"vector-store-id\");\nAssert.IsTrue(isDeleted);\n```\n\n##### [Vector Store Files](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files)\n\nVector store files represent files inside a vector store.\n\n- [File Search Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\n###### [List Vector Store Files](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FlistFiles)\n\nReturns a list of vector store files.\n\n```csharp\nvar api = new OpenAIClient();\nvar files = await api.VectorStoresEndpoint.ListVectorStoreFilesAsync(\"vector-store-id\");\n\nforeach (var file in vectorStoreFiles.Items)\n{\n    Debug.Log(file);\n}\n```\n\n###### [Create Vector Store File](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FcreateFile)\n\nCreate a vector store file by attaching a file to a vector store.\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.VectorStoresEndpoint.CreateVectorStoreFileAsync(\"vector-store-id\", \"file-id\", new ChunkingStrategy(ChunkingStrategyType.Static));\nDebug.Log(file);\n```\n\n###### [Retrieve Vector Store File](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FgetFile)\n\nRetrieves a vector store file.\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.VectorStoresEndpoint.GetVectorStoreFileAsync(\"vector-store-id\", \"vector-store-file-id\");\nDebug.Log(file);\n```\n\n###### [Delete Vector Store File](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FdeleteFile)\n\nDelete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the delete file endpoint.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.VectorStoresEndpoint.DeleteVectorStoreFileAsync(\"vector-store-id\", vectorStoreFile);\nAssert.IsTrue(isDeleted);\n```\n\n##### [Vector Store File Batches](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches)\n\nVector store files represent files inside a vector store.\n\n- [File Search Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\n###### [Create Vector Store File Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FcreateBatch)\n\nCreate a vector store file batch.\n\n```csharp\nvar api = new OpenAIClient();\nvar files = new List\u003Cstring> { \"file_id_1\",\"file_id_2\" };\nvar vectorStoreFileBatch = await api.VectorStoresEndpoint.CreateVectorStoreFileBatchAsync(\"vector-store-id\", files);\nDebug.Log(vectorStoreFileBatch);\n```\n\n###### [Retrieve Vector Store File Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FgetBatch)\n\nRetrieves a vector store file batch.\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStoreFileBatch = await api.VectorStoresEndpoint.GetVectorStoreFileBatchAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n\u002F\u002F you can also use convenience methods!\nvectorStoreFileBatch = await vectorStoreFileBatch.UpdateAsync();\nvectorStoreFileBatch = await vectorStoreFileBatch.WaitForStatusChangeAsync();\n```\n\n###### [List Files In Vector Store Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FlistBatchFiles)\n\nReturns a list of vector store files in a batch.\n\n```csharp\nvar api = new OpenAIClient();\nvar files = await api.VectorStoresEndpoint.ListVectorStoreBatchFilesAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n\nforeach (var file in files.Items)\n{\n    Debug.Log(file);\n}\n```\n\n###### [Cancel Vector Store File Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FcancelBatch)\n\nCancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.VectorStoresEndpoint.CancelVectorStoreFileBatchAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n```\n\n---\n\n### [Chat](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat)\n\nGiven a chat conversation, the model will return a chat completion response.\n\nThe Chat API is accessed via `OpenAIClient.ChatEndpoint`\n\n#### [Chat Completions](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat\u002Fcreate)\n\nCreates a completion for the chat message\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, \"Who won the world series in 2020?\"),\n    new Message(Role.Assistant, \"The Los Angeles Dodgers won the World Series in 2020.\"),\n    new Message(Role.User, \"Where was it played?\"),\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4o);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nvar choice = response.FirstChoice;\nDebug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice.Message} | Finish Reason: {choice.FinishReason}\");\n```\n\n#### [Chat Streaming](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat\u002Fcreate#chat\u002Fcreate-stream)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, \"Who won the world series in 2020?\"),\n    new Message(Role.Assistant, \"The Los Angeles Dodgers won the World Series in 2020.\"),\n    new Message(Role.User, \"Where was it played?\"),\n};\nvar chatRequest = new ChatRequest(messages);\nvar response = await api.ChatEndpoint.StreamCompletionAsync(chatRequest, async partialResponse =>\n{\n    Debug.Log(partialResponse.FirstChoice.Delta.ToString());\n    await Task.CompletedTask;\n});\nvar choice = response.FirstChoice;\nDebug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice.Message} | Finish Reason: {choice.FinishReason}\");\n```\n\n#### [Chat Tools](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ffunction-calling)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new(Role.System, \"You are a helpful weather assistant. Always prompt the user for their location.\"),\n    new Message(Role.User, \"What's the weather like today?\"),\n};\n\nforeach (var message in messages)\n{\n    Debug.Log($\"{message.Role}: {message}\");\n}\n\n\u002F\u002F Define the tools that the assistant is able to use:\n\u002F\u002F 1. Get a list of all the static methods decorated with FunctionAttribute\nvar tools = Tool.GetAllAvailableTools(includeDefaults: false, forceUpdate: true, clearCache: true);\n\u002F\u002F 2. Define a custom list of tools:\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(objectInstance, \"TheNameOfTheMethodToCall\"),\n    Tool.FromFunc(\"a_custom_name_for_your_function\", ()=> { \u002F* Some logic to run *\u002F })\n};\nvar chatRequest = new ChatRequest(messages, tools: tools, toolChoice: \"auto\");\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nmessages.Add(response.FirstChoice.Message);\n\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | Finish Reason: {response.FirstChoice.FinishReason}\");\n\nvar locationMessage = new Message(Role.User, \"I'm in Glasgow, Scotland\");\nmessages.Add(locationMessage);\nDebug.Log($\"{locationMessage.Role}: {locationMessage.Content}\");\nchatRequest = new ChatRequest(messages, tools: tools, toolChoice: \"auto\");\nresponse = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n\nmessages.Add(response.FirstChoice.Message);\n\nif (response.FirstChoice.FinishReason == \"stop\")\n{\n    Debug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | Finish Reason: {response.FirstChoice.FinishReason}\");\n\n    var unitMessage = new Message(Role.User, \"Fahrenheit\");\n    messages.Add(unitMessage);\n    Debug.Log($\"{unitMessage.Role}: {unitMessage.Content}\");\n    chatRequest = new ChatRequest(messages, tools: tools, toolChoice: \"auto\");\n    response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n}\n\n\u002F\u002F iterate over all tool calls and invoke them\nforeach (var toolCall in response.FirstChoice.Message.ToolCalls)\n{\n    Debug.Log($\"{response.FirstChoice.Message.Role}: {toolCall.Function.Name} | Finish Reason: {response.FirstChoice.FinishReason}\");\n    Debug.Log($\"{toolCall.Function.Arguments}\");\n    \u002F\u002F Invokes function to get a generic json result to return for tool call.\n    var functionResult = await toolCall.InvokeFunctionAsync();\n    \u002F\u002F If you know the return type and do additional processing you can use generic overload\n    var functionResult = await toolCall.InvokeFunctionAsync\u003Cstring>();\n    messages.Add(new Message(toolCall, functionResult));\n    Debug.Log($\"{Role.Tool}: {functionResult}\");\n}\n\u002F\u002F System: You are a helpful weather assistant.\n\u002F\u002F User: What's the weather like today?\n\u002F\u002F Assistant: Sure, may I know your current location? | Finish Reason: stop\n\u002F\u002F User: I'm in Glasgow, Scotland\n\u002F\u002F Assistant: GetCurrentWeather | Finish Reason: tool_calls\n\u002F\u002F {\n\u002F\u002F   \"location\": \"Glasgow, Scotland\",\n\u002F\u002F   \"unit\": \"celsius\"\n\u002F\u002F }\n\u002F\u002F Tool: The current weather in Glasgow, Scotland is 39°C.\n```\n\n#### [Chat Vision](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fvision)\n\n> [!WARNING]\n> Beta Feature. API subject to breaking changes.\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, new List\u003CContent>\n    {\n        \"What's in this image?\",\n        new ImageUrl(\"https:\u002F\u002Fupload.wikimedia.org\u002Fwikipedia\u002Fcommons\u002Fthumb\u002Fd\u002Fdd\u002FGfp-wisconsin-madison-the-nature-boardwalk.jpg\u002F2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\", ImageDetail.Low)\n    })\n};\nvar chatRequest = new ChatRequest(messages, model: Model.GPT4o);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice.Message.Content} | Finish Reason: {response.FirstChoice.FinishDetails}\");\n```\n\nYou can even pass in a `Texture2D`!\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, new List\u003CContent>\n    {\n        \"What's in this image?\",\n        texture\n    })\n};\nvar chatRequest = new ChatRequest(messages, model: Model.GPT4o);\nvar result = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{result.FirstChoice.Message.Role}: {result.FirstChoice} | Finish Reason: {result.FirstChoice.FinishDetails}\");\n```\n\n#### [Chat Audio](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Faudio)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant.\"),\n    new Message(Role.User, \"Is a golden retriever a good family dog?\")\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4oAudio, audioConfig: Voice.Alloy);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | Finish Reason: {response.FirstChoice.FinishDetails}\");\naudioSource.PlayOneShot(response.FirstChoice.Message.AudioOutput.AudioClip);\n```\n\n#### [Chat Structured Outputs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs)\n\nThe evolution of  [Json Mode](#chat-json-mode). While both ensure valid JSON is produced, only Structured Outputs ensure schema adherence.\n\n> [!IMPORTANT]\n>\n> - When using JSON mode, always instruct the model to produce JSON via some message in the conversation, for example via your system message. If you don't include an explicit instruction to generate JSON, the model may generate an unending stream of whitespace and the request may run continually until it reaches the token limit. To help ensure you don't forget, the API will throw an error if the string \"JSON\" does not appear somewhere in the context.\n> - The JSON in the message the model returns may be partial (i.e. cut off) if `finish_reason` is length, which indicates the generation exceeded max_tokens or the conversation exceeded the token limit. To guard against this, check `finish_reason` before parsing the response.\n\nFirst define the structure of your responses. These will be used as your schema.\nThese are the objects you'll deserialize to, so be sure to use standard Json object models.\n\n```csharp\npublic class MathResponse\n{\n    [JsonProperty(\"steps\")]\n    public IReadOnlyList\u003CMathStep> Steps { get; private set; }\n\n    [JsonProperty(\"final_answer\")]\n    public string FinalAnswer { get; private set; }\n}\n\npublic class MathStep\n{\n    [JsonProperty(\"explanation\")]\n    public string Explanation { get; private set; }\n\n    [JsonProperty(\"output\")]\n    public string Output { get; private set; }\n}\n```\n\nTo use, simply specify the `MathResponse` type as a generic constraint when requesting a completion.\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new(Role.System, \"You are a helpful math tutor. Guide the user through the solution step by step.\"),\n    new(Role.User, \"how can I solve 8x + 7 = -23\")\n};\n\nvar chatRequest = new ChatRequest(messages, model: \"gpt-4o-2024-08-06\");\nvar (mathResponse, chatResponse) = await api.ChatEndpoint.GetCompletionAsync\u003CMathResponse>(chatRequest);\n\nfor (var i = 0; i \u003C mathResponse.Steps.Count; i++)\n{\n    var step = mathResponse.Steps[i];\n    Debug.Log($\"Step {i}: {step.Explanation}\");\n    Debug.Log($\"Result: {step.Output}\");\n}\n\nDebug.Log($\"Final Answer: {mathResponse.FinalAnswer}\");\nchatResponse.GetUsage();\n```\n\n#### [Chat Json Mode](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftext-generation\u002Fjson-mode)\n\n> [!IMPORTANT]\n>\n> - When using JSON mode, always instruct the model to produce JSON via some message in the conversation, for example via your system message. If you don't include an explicit instruction to generate JSON, the model may generate an unending stream of whitespace and the request may run continually until it reaches the token limit. To help ensure you don't forget, the API will throw an error if the string \"JSON\" does not appear somewhere in the context.\n> - The JSON in the message the model returns may be partial (i.e. cut off) if `finish_reason` is length, which indicates the generation exceeded max_tokens or the conversation exceeded the token limit. To guard against this, check `finish_reason` before parsing the response.\n> - JSON mode will not guarantee the output matches any specific schema, only that it is valid and parses without errors.\n\n```csharp\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"You are a helpful assistant designed to output JSON.\"),\n    new Message(Role.User, \"Who won the world series in 2020?\"),\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4o, responseFormat: ChatResponseFormat.Json);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n\nforeach (var choice in response.Choices)\n{\n    Debug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice} | Finish Reason: {choice.FinishReason}\");\n}\n\nresponse.GetUsage();\n```\n\n---\n\n### [Audio](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio)\n\nConverts audio into text.\n\nThe Audio API is accessed via `OpenAIClient.AudioEndpoint`\n\n#### [Create Speech](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateSpeech)\n\nGenerates audio from the input text.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new SpeechRequest(\"Hello world!\");\nvar speechClip = await api.AudioEndpoint.GetSpeechAsync(request);\naudioSource.PlayOneShot(speechClip);\nDebug.Log(speechClip);\n```\n\n##### [Stream Speech]\n\nGenerate streamed audio from the input text.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new SpeechRequest(\"Hello world!\", responseFormat: SpeechResponseFormat.PCM);\nvar speechClip = await api.AudioEndpoint.GetSpeechAsync(request, partialClip =>\n{\n    audioSource.PlayOneShot(partialClip);\n});\nDebug.Log(speechClip);\n```\n\n> [!NOTE]\n> Checkout any of the demo scenes for best practices on how to handle playback with `OnAudioFilterRead`.\n\n#### [Create Transcription](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateTranscription)\n\nTranscribes audio into the input language.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new AudioTranscriptionRequest(audioClip, language: \"en\");\nvar result = await api.AudioEndpoint.CreateTranscriptionAsync(request);\nDebug.Log(result);\n```\n\nYou can also get detailed information using `verbose_json` to get timestamp granularities:\n\n```csharp\nvar api = new OpenAIClient();\nusing var request = new AudioTranscriptionRequest(transcriptionAudio, responseFormat: AudioResponseFormat.Verbose_Json, timestampGranularity: TimestampGranularity.Word, temperature: 0.1f, language: \"en\");\nvar response = await api.AudioEndpoint.CreateTranscriptionTextAsync(request);\n\nforeach (var word in response.Words)\n{\n    Debug.Log($\"[{word.Start}-{word.End}] \\\"{word.Word}\\\"\");\n}\n```\n\n#### [Create Translation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateTranslation)\n\nTranslates audio into into English.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new AudioTranslationRequest(audioClip);\nvar result = await api.AudioEndpoint.CreateTranslationAsync(request);\nDebug.Log(result);\n```\n\n---\n\n### [Images](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages)\n\nGiven a prompt and\u002For an input image, the model will generate a new image.\n\nThe Images API is accessed via `OpenAIClient.ImagesEndpoint`\n\n#### [Create Image](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate)\n\nCreates an image given a prompt.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageGenerationRequest(\"A house riding a velociraptor\", Models.Model.DallE_3);\nvar imageResults = await api.ImagesEndPoint.GenerateImageAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n#### [Edit Image](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate-edit)\n\nCreates an edited or extended image given an original image and a prompt.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageEditRequest(Path.GetFullPath(imageAssetPath), Path.GetFullPath(maskAssetPath), \"A sunlit indoor lounge area with a pool containing a flamingo\", size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageEditAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n#### [Create Image Variation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate-variation)\n\nCreates a variation of a given image.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageVariationRequest(imageTexture, size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageVariationAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\nAlternatively, the endpoint can directly take a Texture2D with Read\u002FWrite enabled and Compression set to None.\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageVariationRequest(imageTexture, size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageVariationAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n---\n\n### [Files](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles)\n\nFiles are used to upload documents that can be used with features like [Fine-tuning](#fine-tuning).\n\nThe Files API is accessed via `OpenAIClient.FilesEndpoint`\n\n#### [List Files](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Flist)\n\nReturns a list of files that belong to the user's organization.\n\n```csharp\nvar api = new OpenAIClient();\nvar fileList = await api.FilesEndpoint.ListFilesAsync();\n\nforeach (var file in fileList)\n{\n    Debug.Log($\"{file.Id} -> {file.Object}: {file.FileName} | {file.Size} bytes\");\n}\n```\n\n#### [Upload File](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fcreate)\n\nUpload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB.\n\nThe size of individual files can be a maximum of 512 MB. See the Assistants Tools guide to learn more about the types of files supported. The Fine-tuning API only supports .jsonl files.\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.FilesEndpoint.UploadFileAsync(\"path\u002Fto\u002Fyour\u002Ffile.jsonl\", FilePurpose.FineTune);\nDebug.Log(file.Id);\n```\n\n#### [Delete File](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fdelete)\n\nDelete a file.\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.FilesEndpoint.DeleteFileAsync(fileId);\nAssert.IsTrue(isDeleted);\n```\n\n#### [Retrieve File Info](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fretrieve)\n\nReturns information about a specific file.\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.FilesEndpoint.GetFileInfoAsync(fileId);\nDebug.Log($\"{file.Id} -> {file.Object}: {file.FileName} | {file.Size} bytes\");\n```\n\n#### [Download File Content](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fretrieve-content)\n\nDownloads the file content to the specified directory.\n\n```csharp\nvar api = new OpenAIClient();\nvar downloadedFilePath = await api.FilesEndpoint.DownloadFileAsync(fileId);\nDebug.Log(downloadedFilePath);\nAssert.IsTrue(File.Exists(downloadedFilePath));\n```\n\n---\n\n### [Fine Tuning](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning)\n\nManage fine-tuning jobs to tailor a model to your specific training data.\n\nRelated guide: [Fine-tune models](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ffine-tuning)\n\nThe Files API is accessed via `OpenAIClient.FineTuningEndpoint`\n\n#### [Create Fine Tune Job](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fcreate)\n\nCreates a job that fine-tunes a specified model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n```csharp\nvar api = new OpenAIClient();\nvar fileId = \"file-abc123\";\nvar request = new CreateFineTuneRequest(fileId);\nvar job = await api.FineTuningEndpoint.CreateJobAsync(Model.GPT3_5_Turbo, request);\nDebug.Log($\"Started {job.Id} | Status: {job.Status}\");\n```\n\n#### [List Fine Tune Jobs](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Flist)\n\nList your organization's fine-tuning jobs.\n\n```csharp\nvar api = new OpenAIClient();\nvar jobList = await api.FineTuningEndpoint.ListJobsAsync();\n\nforeach (var job in jobList.Items.OrderByDescending(job => job.CreatedAt)))\n{\n    Debug.Log($\"{job.Id} -> {job.CreatedAt} | {job.Status}\");\n}\n```\n\n#### [Retrieve Fine Tune Job Info](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fretrieve)\n\nGets info about the fine-tune job.\n\n```csharp\nvar api = new OpenAIClient();\nvar job = await api.FineTuningEndpoint.GetJobInfoAsync(fineTuneJob);\nDebug.Log($\"{job.Id} -> {job.CreatedAt} | {job.Status}\");\n```\n\n#### [Cancel Fine Tune Job](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fcancel)\n\nImmediately cancel a fine-tune job.\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.FineTuningEndpoint.CancelFineTuneJobAsync(fineTuneJob);\nAssert.IsTrue(isCancelled);\n```\n\n#### [List Fine Tune Job Events](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Flist-events)\n\nGet status updates for a fine-tuning job.\n\n```csharp\nvar api = new OpenAIClient();\nvar eventList = await api.FineTuningEndpoint.ListJobEventsAsync(fineTuneJob);\nDebug.Log($\"{fineTuneJob.Id} -> status: {fineTuneJob.Status} | event count: {eventList.Events.Count}\");\n\nforeach (var @event in eventList.Items.OrderByDescending(@event => @event.CreatedAt))\n{\n    Debug.Log($\"  {@event.CreatedAt} [{@event.Level}] {@event.Message}\");\n}\n```\n\n---\n\n### [Batches](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch)\n\nCreate large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\n\n- [Batch Guide](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fbatch)\n\nThe Batches API is accessed via `OpenAIClient.BatchesEndpoint`\n\n#### [List Batches](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Flist)\n\nList your organization's batches.\n\n```csharp\nvar api = new OpenAIClient();\nvar batches = await api.BatchEndpoint.ListBatchesAsync();\n\nforeach (var batch in listResponse.Items)\n{\n    Debug.Log(batch);\n}\n```\n\n#### [Create Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fcreate)\n\nCreates and executes a batch from an uploaded file of requests\n\n```csharp\nvar api = new OpenAIClient();\nvar batchRequest = new CreateBatchRequest(\"file-id\", Endpoint.ChatCompletions);\nvar batch = await api.BatchEndpoint.CreateBatchAsync(batchRequest);\n```\n\n#### [Retrieve Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fretrieve)\n\nRetrieves a batch.\n\n```csharp\nvar api = new OpenAIClient();\nvar batch = await api.BatchEndpoint.RetrieveBatchAsync(\"batch-id\");\n\u002F\u002F you can also use convenience methods!\nbatch = await batch.UpdateAsync();\nbatch = await batch.WaitForStatusChangeAsync();\n```\n\n#### [Cancel Batch](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fcancel)\n\nCancels an in-progress batch. The batch will be in status cancelling for up to 10 minutes, before changing to cancelled, where it will have partial results (if any) available in the output file.\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.BatchEndpoint.CancelBatchAsync(batch);\nAssert.IsTrue(isCancelled);\n```\n\n---\n\n### [Embeddings](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fembeddings)\n\nGet a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\nRelated guide: [Embeddings](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fembeddings)\n\nThe Edits API is accessed via `OpenAIClient.EmbeddingsEndpoint`\n\n#### [Create Embeddings](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fembeddings\u002Fcreate)\n\nCreates an embedding vector representing the input text.\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.EmbeddingsEndpoint.CreateEmbeddingAsync(\"The food was delicious and the waiter...\", Models.Embedding_Ada_002);\nDebug.Log(response);\n```\n\n---\n\n### [Moderations](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmoderations)\n\nGiven a input text, outputs if the model classifies it as violating OpenAI's content policy.\n\nRelated guide: [Moderations](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fmoderation)\n\nThe Moderations API can be accessed via `OpenAIClient.ModerationsEndpoint`\n\n#### [Create Moderation](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmoderations\u002Fcreate)\n\nClassifies if text violates OpenAI's Content Policy.\n\n```csharp\nvar api = new OpenAIClient();\nvar isViolation = await api.ModerationsEndpoint.GetModerationAsync(\"I want to kill them.\");\nAssert.IsTrue(isViolation);\n```\n\nAdditionally you can also get the scores of a given input.\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ModerationsEndpoint.CreateModerationAsync(new ModerationsRequest(\"I love you\"));\nAssert.IsNotNull(response);\nDebug.Log(response.Results?[0]?.Scores?.ToString());\n```\n\n---\n","# OpenAI\n\n[![Discord](https:\u002F\u002Fimg.shields.io\u002Fdiscord\u002F855294214065487932.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https:\u002F\u002Fdiscord.gg\u002FxQgMW9ufN4) [![openupm](https:\u002F\u002Fimg.shields.io\u002Fnpm\u002Fv\u002Fcom.openai.unity?label=openupm&registry_uri=https:\u002F\u002Fpackage.openupm.com)](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity\u002F) [![openupm](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fdynamic\u002Fjson?color=brightgreen&label=downloads&query=%24.downloads&suffix=%2Fmonth&url=https%3A%2F%2Fpackage.openupm.com%2Fdownloads%2Fpoint%2Flast-month%2Fcom.openai.unity)](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity\u002F)\n\n基于 [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet)\n\n一个用于 [Unity](https:\u002F\u002Funity.com\u002F) 的 [OpenAI](https:\u002F\u002Fopenai.com\u002F) 包，可通过其 RESTful API 使用。\n本项目为独立开发，并非官方库，本人与 OpenAI 无任何关联。使用前需拥有 OpenAI API 账号。\n\n***所有版权、商标、标识及资产均属于其各自所有者。***\n\n## 安装\n\n需要 Unity 2021.3 LTS 或更高版本。\n\n推荐通过 Unity 包管理器和 [OpenUPM](https:\u002F\u002Fopenupm.com\u002Fpackages\u002Fcom.openai.unity) 进行安装。\n\n### 通过 Unity 包管理器和 OpenUPM\n\n#### 终端\n\n```bash\nopenupm add com.openai.unity\n```\n\n#### 手动操作\n\n- 打开您的 Unity 项目设置。\n- 添加 OpenUPM 包注册表：\n  - 名称：`OpenUPM`\n  - URL：`https:\u002F\u002Fpackage.openupm.com`\n  - 作用域：\n    - `com.openai`\n    - `com.utilities`\n\n![scoped-registries](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FRageAgainstThePixel_com.openai.unity_readme_3c8844747a35.png)\n\n- 打开 Unity 包管理器窗口。\n- 将注册表从 Unity 切换到 `My Registries`。\n- 添加 `OpenAI` 包。\n\n### 通过 Unity 包管理器和 Git URL\n\n> [!WARNING]\n> 本仓库依赖其他仓库！您需要自行添加这些依赖。\n\n- 打开您的 Unity 包管理器。\n- 从 Git URL 添加包：`https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity.git#upm`\n  - [com.utilities.async](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.async)\n  - [com.utilities.websockets](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.websockets)\n  - [com.utilities.extensions](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.extensions)\n  - [com.utilities.rest](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.rest)\n  - [com.utilities.audio](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.audio)\n  - [com.utilities.encoder.wav](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.utilities.encoder.wav)\n\n---\n\n## [文档](https:\u002F\u002Frageagainstthepixel.github.io\u002FOpenAI-DotNet)\n\n> 欢迎查看我们的全新 API 文档！\n\n\u003Chttps:\u002F\u002Frageagainstthepixel.github.io\u002FOpenAI-DotNet>\n\n### 目录\n\n- [身份验证](#authentication)\n- [Azure OpenAI](#azure-openai)\n  - [Azure Active Directory 身份验证](#azure-active-directory-authentication)\n- [OpenAI API 代理](#openai-api-proxy)\n- [模型](#models)\n  - [列出模型](#list-models)\n  - [获取模型](#retrieve-model)\n  - [删除微调模型](#delete-fine-tuned-model)\n- [响应](#responses)\n  - [创建响应](#create-response)\n    - [简单的文本响应](#simple-response-with-text)\n    - [带有函数调用的流式响应](#streaming-response-with-function-calling)\n  - [获取响应](#get-response)\n  - [列出输入项](#list-input-items)\n  - [取消响应](#cancel-response)\n  - [删除响应](#delete-response)\n- [对话](#conversations)\n  - [创建对话](#create-conversation)\n  - [获取对话](#retrieve-conversation)\n  - [更新对话](#update-conversation)\n  - [删除对话](#delete-conversation)\n  - [列出对话项](#list-conversation-items)\n  - [创建对话项](#create-conversation-item)\n  - [获取对话项](#retrieve-conversation-item)\n  - [删除对话项](#delete-conversation-item)\n- [实时](#realtime)\n  - [创建实时会话](#create-realtime-session)\n  - [客户端事件](#client-events)\n    - [发送客户端事件](#sending-client-events)\n  - [服务器事件](#server-events)\n    - [接收服务器事件](#receiving-server-events)\n- [助手](#assistants)\n  - [列出助手](#list-assistants)\n  - [创建助手](#create-assistant)\n  - [获取助手](#retrieve-assistant)\n  - [修改助手](#modify-assistant)\n  - [删除助手](#delete-assistant)\n  - [助手流式处理](#assistant-streaming)\n  - [线程](#threads)\n    - [创建线程](#create-thread)\n    - [创建线程并运行](#create-thread-and-run)\n      - [流式处理](#create-thread-and-run-streaming)\n    - [获取线程](#retrieve-thread)\n    - [修改线程](#modify-thread)\n    - [删除线程](#delete-thread)\n    - [线程消息](#thread-messages)\n      - [列出消息](#list-thread-messages)\n      - [创建消息](#create-thread-message)\n      - [获取消息](#retrieve-thread-message)\n      - [修改消息](#modify-thread-message)\n    - [线程运行](#thread-runs)\n      - [列出运行](#list-thread-runs)\n      - [创建运行](#create-thread-run)\n        - [流式处理](#create-thread-run-streaming)\n      - [获取运行](#retrieve-thread-run)\n      - [修改运行](#modify-thread-run)\n      - [向运行提交工具输出](#thread-submit-tool-outputs-to-run)\n      - [结构化输出](#thread-structured-outputs)\n      - [列出运行步骤](#list-thread-run-steps)\n      - [获取运行步骤](#retrieve-thread-run-step)\n      - [取消运行](#cancel-thread-run)\n  - [向量存储](#vector-stores)\n    - [列出向量存储](#list-vector-stores)\n    - [创建向量存储](#create-vector-store)\n    - [获取向量存储](#retrieve-vector-store)\n    - [修改向量存储](#modify-vector-store)\n    - [删除向量存储](#delete-vector-store)\n    - [向量存储文件](#vector-store-files)\n      - [列出向量存储文件](#list-vector-store-files)\n      - [创建向量存储文件](#create-vector-store-file)\n      - [获取向量存储文件](#retrieve-vector-store-file)\n      - [删除向量存储文件](#delete-vector-store-file)\n    - [向量存储文件批次](#vector-store-file-batches)\n      - [创建向量存储文件批次](#create-vector-store-file-batch)\n      - [获取向量存储文件批次](#retrieve-vector-store-file-batch)\n      - [列出向量存储批次中的文件](#list-files-in-vector-store-batch)\n      - [取消向量存储文件批次](#cancel-vector-store-file-batch)\n- [聊天](#chat)\n  - [聊天完成](#chat-completions)\n  - [流式处理](#chat-streaming)\n  - [工具](#chat-tools)\n  - [视觉](#chat-vision)\n  - [音频](#chat-audio)\n  - [结构化输出](#chat-structured-outputs)\n  - [JSON 模式](#chat-json-mode)\n- [音频](#audio)\n  - [创建语音](#create-speech)\n    - [流式语音](#stream-speech)\n  - [创建转录](#create-transcription)\n  - [创建翻译](#create-translation)\n- [图片](#images)\n  - [创建图片](#create-image)\n  - [编辑图片](#edit-image)\n  - [创建图片变体](#create-image-variation)\n- [文件](#files)\n  - [列出文件](#list-files)\n  - [上传文件](#upload-file)\n  - [删除文件](#delete-file)\n  - [获取文件信息](#retrieve-file-info)\n  - [下载文件内容](#download-file-content)\n- [微调](#fine-tuning)\n  - [创建微调任务](#create-fine-tune-job)\n  - [列出微调任务](#list-fine-tune-jobs)\n  - [获取微调任务信息](#retrieve-fine-tune-job-info)\n  - [取消微调任务](#cancel-fine-tune-job)\n  - [列出微调任务事件](#list-fine-tune-job-events)\n- [批处理](#batches)\n  - [列出批处理](#list-batches)\n  - [创建批处理](#create-batch)\n  - [获取批处理](#retrieve-batch)\n  - [取消批处理](#cancel-batch)\n- [嵌入](#embeddings)\n  - [创建嵌入](#create-embeddings)\n- [内容审核](#moderations)\n  - [创建内容审核](#create-moderation)\n\n### [身份验证](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fauthentication)\n\n提供 API 密钥的方式有四种，按优先级顺序排列如下：\n\n> [!WARNING]\n> 建议使用环境变量来加载 API 密钥，而不是将其硬编码在源代码中。不建议在生产环境中使用此方法，而仅适用于接收用户凭据、本地测试和快速入门场景。\n\n1. [通过构造函数直接传递密钥](#pass-keys-directly-with-constructor) :warning:\n2. [Unity 可脚本化对象](#unity-scriptable-object) :warning:\n3. [从配置文件加载密钥](#load-key-from-configuration-file)\n4. [使用系统环境变量](#use-system-environment-variables)\n\n您可以在初始化 API 时使用 `OpenAIAuthentication`，如下所示：\n\n#### 通过构造函数直接传递密钥\n\n> [!WARNING]\n> 建议使用环境变量来加载 API 密钥，而不是将其硬编码在源代码中。不建议在生产环境中使用此方法，而仅适用于接收用户凭据、本地测试和快速入门场景。\n\n```csharp\nvar api = new OpenAIClient(\"sk-apiKey\");\n```\n\n或者手动创建一个 `OpenAIAuthentication` 对象：\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication(\"sk-apiKey\", \"org-yourOrganizationId\", \"proj_yourProjectId\"));\n```\n\n#### Unity 可脚本化对象\n\n您可以将密钥直接保存到位于 `Assets\u002FResources` 文件夹中的可脚本化对象中。\n\n可以通过项目面板的上下文菜单创建一个新的 `OpenAIConfiguration` 可脚本化对象。\n\n> [!WARNING]\n> 请注意不要将此文件提交到版本控制系统中，因为其他人将能够看到您的 API 密钥。建议使用 [OpenAI-DotNet-Proxy](#openai-api-proxy)，并通过您首选的 OAuth 提供商对用户进行身份验证。\n\n![创建新的 OpenAIConfiguration](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FRageAgainstThePixel_com.openai.unity_readme_227e8313d044.png)\n\n#### 从配置文件加载密钥\n\n尝试从配置文件中加载 API 密钥，默认情况下为当前目录下的 `.openai` 文件，也可以选择向上遍历目录树或在用户的主目录中查找。\n\n要创建配置文件，可以新建一个名为 `.openai` 的文本文件，并在其中添加以下内容：\n\n> [!NOTE]\n> 组织 ID 和项目 ID 是可选的。\n\n##### JSON 格式\n\n```json\n{\n  \"apiKey\": \"sk-aaaabbbbbccccddddd\",\n  \"organizationId\": \"org-yourOrganizationId\",\n  \"projectId\": \"proj_yourProjectId\"\n}\n```\n\n##### 已弃用的格式\n\n```shell\nOPENAI_API_KEY=sk-aaaabbbbbccccddddd\nOPENAI_ORGANIZATION_ID=org-yourOrganizationId\nOPENAI_PROJECT_ID=proj_yourProjectId\n```\n\n您还可以通过调用 `OpenAIAuthentication` 中的静态方法，直接从已知路径加载配置文件：\n\n- 加载指定目录中的默认 `.openai` 配置文件：\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromDirectory(\"path\u002Fto\u002Fyour\u002Fdirectory\"));\n```\n\n- 从特定路径加载配置文件。文件不必命名为 `.openai`，只要符合 JSON 格式即可：\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromPath(\"path\u002Fto\u002Fyour\u002Ffile.json\"));\n```\n\n#### 使用系统环境变量\n\n使用系统的环境变量来指定要使用的 API 密钥和组织。\n\n- 使用 `OPENAI_API_KEY` 指定您的 API 密钥。\n- 使用 `OPENAI_ORGANIZATION_ID` 指定组织。\n- 使用 `OPENAI_PROJECT_ID` 指定项目。\n\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromEnvironment());\n```\n\n---\n\n### [Azure OpenAI](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Fcognitive-services\u002Fopenai)\n\n您也可以选择使用 Microsoft 的 Azure OpenAI 部署。\n\n您可以在 Azure Playground 中找到所需的信息，点击 `View Code` 按钮，查看类似如下的 URL：\n\n```markdown\nhttps:\u002F\u002F{your-resource-name}.openai.azure.com\u002Fopenai\u002Fdeployments\u002F{deployment-id}\u002Fchat\u002Fcompletions?api-version={api-version}\n```\n\n- `your-resource-name` 您的 Azure OpenAI 资源名称。\n- `deployment-id` 您部署模型时选择的部署名称。\n- `api-version` 用于此操作的 API 版本，格式为 YYYY-MM-DD。\n\n要将客户端设置为使用您的部署，您需要在客户端构造函数中传入 `OpenAISettings`。\n\n```csharp\nvar auth = new OpenAIAuthentication(\"sk-apiKey\");\nvar settings = new OpenAISettings(resourceName: \"your-resource-name\", deploymentId: \"deployment-id\", apiVersion: \"api-version\");\nvar api = new OpenAIClient(auth, settings);\n```\n\n#### [Azure Active Directory 身份验证](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Fcognitive-services\u002Fopenai\u002Freference#authentication)\n\n按照常规方式使用 MSAL（[Microsoft 身份验证库 for .NET](https:\u002F\u002Fgithub.com\u002FAzureAD\u002Fmicrosoft-authentication-library-for-dotnet)）获取访问令牌，然后在创建 `OpenAIAuthentication` 时使用该访问令牌。此外，在创建 `OpenAISettings` 时，请确保将 `useAzureActiveDirectory` 设置为 `true`。\n\n[教程：调用 Web API 的桌面应用程序：获取令牌](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Factive-directory\u002Fdevelop\u002Fscenario-desktop-acquire-token?tabs=dotnet)\n\n```csharp\n\u002F\u002F 使用任何 MSAL 方法获取访问令牌\nvar accessToken = result.AccessToken;\nvar auth = new OpenAIAuthentication(accessToken);\nvar settings = new OpenAISettings(resourceName: \"your-resource\", deploymentId: \"deployment-id\", apiVersion: \"api-version\", useActiveDirectoryAuthentication: true);\nvar api = new OpenAIClient(auth, settings);\n```\n\n---\n\n### [OpenAI API 代理](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet\u002Fblob\u002Fmain\u002FOpenAI-DotNet-Proxy\u002FReadme.md)\n\n[![NuGet 版本 (OpenAI-DotNet-Proxy)](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fv\u002FOpenAI-DotNet-Proxy.svg?label=OpenAI-DotNet-Proxy&logo=nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FOpenAI-DotNet-Proxy\u002F)\n\n在前端应用中直接使用 [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet) 或 [com.openai.unity](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity) 包可能会暴露您的 API 密钥及其他敏感信息。为降低此风险，建议搭建一个中间 API，由该 API 代表您的前端应用向 OpenAI 发送请求。本库既可用于前端配置，也可用于中间层主机配置，从而确保与 OpenAI API 的安全通信。\n\n#### 前端示例\n\n在前端示例中，您需要使用首选的 OAuth 提供商安全地验证用户身份。用户通过身份验证后，再将自定义的身份令牌与后端的 API 密钥进行交换。\n\n请按照以下步骤操作：\n\n1. 使用 [OpenAI-DotNet](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002FOpenAI-DotNet) 或 [com.openai.unity](https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity) 包创建新项目。\n2. 使用您的 OAuth 提供商对用户进行身份验证。\n3. 身份验证成功后，创建一个新的 `OpenAIAuthentication` 对象，并传入以 `sess-` 为前缀的自定义令牌。\n4. 创建一个新的 `OpenAISettings` 对象，指定中间 API 所在的域名。\n5. 在创建客户端实例时，将新的 `auth` 和 `settings` 对象传递给 `OpenAIClient` 构造函数。\n\n以下是前端设置的示例：\n\n```csharp\nvar authToken = await LoginAsync();\nvar auth = new OpenAIAuthentication($\"sess-{authToken}\");\nvar settings = new OpenAISettings(domain: \"api.your-custom-domain.com\");\nvar api = new OpenAIClient(auth, settings);\n```\n\n通过这种设置，您的前端应用可以安全地与后端通信，而后端将使用 OpenAI-DotNet-Proxy 将请求转发至 OpenAI API。这样可以确保您的 OpenAI API 密钥及其他敏感信息在整个过程中始终保持安全。\n\n#### 后端示例\n\n在此示例中，我们将演示如何在新的 ASP.NET Core Web 应用中设置并使用 `OpenAIProxy`。代理服务器将负责身份验证，并将请求转发至 OpenAI API，从而确保您的 API 密钥及其他敏感信息的安全。\n\n1. 创建一个新的 [ASP.NET Core 极简 Web API](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Faspnet\u002Fcore\u002Ftutorials\u002Fmin-web-api?view=aspnetcore-6.0) 项目。\n2. 将 OpenAI-DotNet NuGet 包添加到您的项目中。\n   - PowerShell 安装：`Install-Package OpenAI-DotNet-Proxy`\n   - .NET CLI 安装：`dotnet add package OpenAI-DotNet-Proxy`\n   - 手动编辑 .csproj 文件：`\u003CPackageReference Include=\"OpenAI-DotNet-Proxy\" \u002F>`\n3. 创建一个继承自 `AbstractAuthenticationFilter` 的新类，并重写 `ValidateAuthentication` 方法。这将实现 `IAuthenticationFilter` 接口，用于根据内部服务器验证用户会话令牌。\n4. 在 `Program.cs` 中，调用 `OpenAIProxy.CreateWebApplication` 方法创建新的代理 Web 应用程序，并将自定义的 `AuthenticationFilter` 作为类型参数传递。\n5. 按照常规方式创建 `OpenAIAuthentication` 和 `OpenAIClientSettings`，使用您的 API 密钥、组织 ID 或 Azure 设置。\n\n```csharp\npublic partial class Program\n{\n    private class AuthenticationFilter : AbstractAuthenticationFilter\n    {\n        public override async Task ValidateAuthenticationAsync(IHeaderDictionary request)\n        {\n            await Task.CompletedTask; \u002F\u002F 调用远程资源验证令牌\n\n            \u002F\u002F 您需要实现自己的类来正确测试为您终端用户颁发的自定义令牌。\n            if (!request.Authorization.ToString().Contains(TestUserToken))\n            {\n                throw new AuthenticationException(\"用户未授权\");\n            }\n        }\n    }\n\n    public static void Main(string[] args)\n    {\n        var auth = OpenAIAuthentication.LoadFromEnv();\n        var settings = new OpenAIClientSettings(\u002F* 如果使用 Azure OpenAI，请填写自定义设置 *\u002F);\n        using var openAIClient = new OpenAIClient(auth, settings);\n        OpenAIProxy.CreateWebApplication\u003CAuthenticationFilter>(args, openAIClient).Run();\n    }\n}\n```\n\n一旦您设置了代理服务器，终端用户就可以向您的代理 API 发送经过身份验证的请求，而无需直接访问 OpenAI API。代理服务器将处理身份验证并将请求转发至 OpenAI API，从而确保您的 API 密钥及其他敏感信息的安全。\n\n---\n\n### [模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels)\n\n列出并描述 API 中可用的各种模型。您可以参考 [模型文档](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fmodels)，了解有哪些模型以及它们之间的区别。\n\n此外，请查看 [模型端点兼容性](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fmodels\u002Fmodel-endpoint-compatibility)，以了解哪些模型适用于哪些端点。\n\n要指定本库中未预定义的自定义模型：\n\n```csharp\nvar model = new Model(\"model-id\");\n```\n\n可通过 `OpenAIClient.ModelsEndpoint` 访问模型 API。\n\n#### [列出模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels\u002Flist)\n\n列出当前可用的模型，并提供每个模型的基本信息，例如所有者和可用性。\n\n```csharp\nvar api = new OpenAIClient();\nvar models = await api.ModelsEndpoint.GetModelsAsync();\n\nforeach (var model in models)\n{\n    Debug.Log(model.ToString());\n}\n```\n\n#### [获取模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmodels\u002Fretrieve)\n\n获取某个模型的详细信息，包括所有者和权限等基本信息。\n\n```csharp\nvar api = new OpenAIClient();\nvar model = await api.ModelsEndpoint.GetModelDetailsAsync(\"gpt-4o\");\nDebug.Log(model.ToString());\n```\n\n#### [删除微调模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tunes\u002Fdelete-model)\n\n删除一个微调过的模型。您必须在组织中拥有所有者角色。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ModelsEndpoint.DeleteFineTuneModelAsync(\"your-fine-tuned-model\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [响应](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses)\n\nOpenAI 最先进的模型响应生成接口。支持文本和图像输入，以及文本输出。利用先前响应的输出作为输入，与模型建立有状态的交互。通过内置的文件搜索、网页搜索、计算机操作等工具扩展模型的能力。使用函数调用功能让模型访问外部系统和数据。\n\n- 相关指南：\n  - [快速入门](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fquickstart?api-mode=responses)\n  - [文本输入与输出](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftext?api-mode=responses)\n  - [图像输入](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fimages?api-mode=responses)\n  - [结构化输出](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs?api-mode=responses)\n  - [对话状态](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fconversation-state?api-mode=responses)\n  - [使用工具扩展模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftools?api-mode=responses)\n\n响应 API 可通过 `OpenAIClient.ResponsesEndpoint` 访问。\n\n#### [创建响应](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fcreate)\n\n创建模型响应。提供文本或图像输入以生成文本或 JSON 输出。让模型调用您自定义的代码，或使用内置工具（如网页搜索、文件搜索）来将您自己的数据作为模型响应的输入。\n\n##### 简单文本响应\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ResponsesEndpoint.CreateModelResponseAsync(\"给我讲一个关于独角兽的三句话睡前故事。\");\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}: {responseItem}\");\nresponse.PrintUsage();\n```\n\n##### 带函数调用的流式响应\n\n```csharp\nvar api = new OpenAIClient();\nvar conversation = new List\u003CIResponseItem>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, \"现在几点了？\"),\n};\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(typeof(DateTimeUtility), nameof(DateTimeUtility.GetDateTime))\n};\nvar request = new CreateResponseRequest(conversation, Model.GPT5_Nano, tools: tools);\n\nasync Task StreamCallback(string @event, IServerSentEvent sseEvent)\n{\n    switch (sseEvent)\n    {\n        case Message messageItem:\n            conversation.Add(messageItem);\n            break;\n        case FunctionToolCall functionToolCall:\n            conversation.Add(functionToolCall);\n            var output = await functionToolCall.InvokeFunctionAsync();\n            conversation.Add(output);\n            await api.ResponsesEndpoint.CreateModelResponseAsync(new(conversation, Model.GPT5_Nano, tools: tools, toolChoice: \"none\"), StreamCallback);\n            break;\n    }\n}\n\nvar response = await api.ResponsesEndpoint.CreateModelResponseAsync(request, StreamCallback);\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}: {responseItem}\");\nresponse.PrintUsage();\n```\n\n#### [获取响应](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fget)\n\n根据给定的 ID 获取模型响应。\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ResponsesEndpoint.GetModelResponseAsync(\"response-id\");\nDebug.Log(response.ToString());\n```\n\n#### [列出输入项](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Finput-items)\n\n返回指定响应的所有输入项列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar responseInputItems = await api.ResponsesEndpoint.ListInputItemsAsync(\"response-id\");\nforeach (var item in responseInputItems)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [取消响应](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fcancel)\n\n根据给定的 ID 取消模型响应。\n\n> [!注意]\n> 只有在创建时将 background 参数设置为 true 的响应才能被取消。\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.ResponsesEndpoint.CancelModelResponseAsync(\"response-id\");\nAssert.IsTrue(isCancelled);\n```\n\n#### [删除响应](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fresponses\u002Fdelete)\n\n根据给定的 ID 删除模型响应。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ResponsesEndpoint.DeleteModelResponseAsync(\"response-id\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [对话](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations)\n\n创建和管理对话，以便在多次响应 API 调用之间存储和检索对话状态。\n\n对话 API 通过 `OpenAIClient.ConversationsEndpoint` 访问。\n\n#### [创建对话](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fcreate)\n\n创建一个对话。\n\n```csharp\nvar api = new OpenAIClient();\nconversation = await api.ConversationsEndpoint.CreateConversationAsync(\n    new CreateConversationRequest(new Message(Role.Developer, systemPrompt)));\nDebug.Log(conversation.ToString());\n\u002F\u002F 在创建响应时使用对话对象。\nvar request = await api.ResponsesEndpoint.CreateResponseAsync(\n    new CreateResponseRequest(textInput: \"Hello!\", conversationId: conversation, model: Model.GPT5_Nano));\nvar response = await openAI.ResponsesEndpoint.CreateModelResponseAsync(request);\nvar responseItem = response.Output.LastOrDefault();\nDebug.Log($\"{responseItem.Role}:{responseItem}\");\nresponse.PrintUsage();\n```\n\n#### [获取对话](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fretrieve)\n\n根据 ID 获取对话。\n\n```csharp\nvar api = new OpenAIClient();\nvar conversation = await api.ConversationsEndpoint.GetConversationAsync(\"conversation-id\");\nDebug.Log(conversation.ToString());\n```\n\n#### [更新对话](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fupdate)\n\n使用自定义元数据更新对话。\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, object>\n{\n    { \"favorite_color\", \"blue\" },\n    { \"favorite_food\", \"pizza\" }\n};\nvar updatedConversation = await api.ConversationsEndpoint.UpdateConversationAsync(\"conversation-id\", metadata);\n```\n\n#### [删除对话](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fdelete)\n\n根据 ID 删除对话。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ConversationsEndpoint.DeleteConversationAsync(\"conversation-id\");\nAssert.IsTrue(isDeleted);\n```\n\n#### [列出对话项](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Flist-items)\n\n列出具有给定 ID 的对话的所有项。\n\n```csharp\nvar api = new OpenAIClient();\nvar query = new ListQuery(limit: 10);\nvar items = await api.ConversationsEndpoint.ListConversationItemsAsync(\"conversation-id\", query);\n\nforeach (var item in items)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [创建对话项](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fcreate-item)\n\n为具有给定 ID 的对话创建一个新的对话项。\n\n```csharp\nvar api = new OpenAIClient();\nvar items = new List\u003CIResponseItem>\n{\n    new Message(Role.User, \"Hello!\"),\n    new Message(Role.Assistant, \"Hi! How can I help you?\")\n}\nvar addedItems = await api.ConversationsEndpoint.CreateConversationItemsAsync(\"conversation-id\", items);\n\nforeach (var item in addedItems)\n{\n    Debug.Log(item.ToJsonString());\n}\n```\n\n#### [获取对话项](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fretrieve-item)\n\n根据 ID 获取对话项。\n\n```csharp\nvar api = new OpenAIClient();\nvar item = await api.ConversationsEndpoint.GetConversationItemAsync(\"conversation-id\", \"item-id\");\nDebug.Log(item.ToJsonString());\n```\n\n#### [删除对话项](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fconversations\u002Fdelete-item)\n\n根据 ID 删除对话项。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ConversationsEndpoint.DeleteConversationItemAsync(\"conversation-id\", \"item-id\");\nAssert.IsTrue(isDeleted);\n```\n\n---\n\n### [实时](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime)\n\n> [!WARNING]\n> 测试功能。API 可能会发生重大变更。\n\n- [实时指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Frealtime)\n\n实时 API 使您能够构建低延迟、多模态的对话体验。目前它支持文本和音频作为输入和输出，以及函数调用。\n\n助手 API 通过 `OpenAIClient.RealtimeEndpoint` 访问。\n\n#### 创建实时会话\n\n以下是一个简单的示例，说明如何创建实时会话，并向模型发送和接收消息。\n\n```csharp\nvar api = new OpenAIClient();\nvar cancellationTokenSource = new CancellationTokenSource();\nvar tools = new List\u003CTool>\n{\n    Tool.FromFunc(\"goodbye\", () =>\n    {\n        cancellationTokenSource.Cancel();\n        return \"Goodbye!\";\n    })\n};\nvar configuration = new SessionConfiguration(Model.GPT4oRealtime, tools: tools);\nusing var session = await api.RealtimeEndpoint.CreateSessionAsync(configuration);\nvar responseTask = session.ReceiveUpdatesAsync\u003CIServerEvent>(ServerEvents, cancellationTokenSource.Token);\nawait session.SendAsync(new ConversationItemCreateRequest(\"Hello!\"));\nawait session.SendAsync(new CreateResponseRequest());\nawait session.SendAsync(new InputAudioBufferAppendRequest(new ReadOnlyMemory\u003Cbyte>(new byte[1024 * 4])), cancellationTokenSource.Token);\nawait session.SendAsync(new ConversationItemCreateRequest(\"GoodBye!\"));\nawait session.SendAsync(new CreateResponseRequest());\nawait responseTask;\n\nvoid ServerEvents(IServerEvent @event)\n{\n    switch (@event)\n    {\n        case ResponseAudioTranscriptResponse transcriptResponse:\n            Debug.Log(transcriptResponse.ToString());\n            break;\n        case ResponseFunctionCallArgumentsResponse functionCallResponse:\n            if (functionCallResponse.IsDone)\n            {\n                ToolCall toolCall = functionCallResponse;\n                toolCall.InvokeFunction();\n            }\n\n            break;\n    }\n}\n```\n\n#### 客户端事件\n\n该库实现了用于发送客户端事件的 `IClientEvent` 接口。\n\n- [`UpdateSessionRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fsession\u002Fupdate)：使用新的会话选项更新会话。\n- [`InputAudioBufferAppendRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fappend)：将音频追加到输入音频缓冲区。（与其他客户端事件不同，服务器不会对此事件发送确认响应）。\n- [`InputAudioBufferCommitRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fcommit)：提交输入音频缓冲区。（在服务器 VAD 模式下，客户端无需发送此事件）。\n- [`InputAudioBufferClearRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Finput-audio-buffer\u002Fclear)：清空输入音频缓冲区。\n- [`ConversationItemCreateRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Fcreate)：创建一个新的对话项。这是向模型发送用户内容的主要方式。\n- [`ConversationItemTruncateRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Ftruncate)：发送此事件以截断先前助手消息的音频。\n- [`ConversationItemDeleteRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fconversation\u002Fitem\u002Fdelete)：删除一个对话项。当您希望从对话历史中移除某条消息时，这非常有用。\n- [`CreateResponseRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fresponse\u002Fcreate)：从模型生成回复。在创建新的对话项或调用工具函数后发送此事件。这将触发模型生成回复。\n- [`ResponseCancelRequest`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-client-events\u002Fresponse\u002Fcancel)：发送此事件以取消正在进行的回复。\n\n##### 发送客户端事件\n\n您可以随时通过调用会话对象上的 `RealtimeSession.SendAsync` 方法向服务器发送客户端事件。发送调用将返回一个 `IServerEvent` 句柄，该句柄最能代表服务器对该事件的适当响应。如果您希望以更细粒度的方式处理服务器响应，这将非常有用。\n\n不过，理想情况下，您可能希望使用 [`RealtimeSession.ReceiveUpdatesAsync`](#receiving-server-events) 来处理所有服务器响应。\n\n> [!注意]\n> 服务器不会对 `InputAudioBufferAppendRequest` 事件发送确认响应。\n\n> [!重要提示]\n> 您还需要发送 `CreateResponseRequest` 以触发模型生成回复。\n\n```csharp\nvar serverEvent = await session.SendAsync(new ConversationItemCreateRequest(\"你好！\"));\nDebug.Log(serverEvent.ToJsonString());\nserverEvent = await session.SendAsync(new CreateResponseRequest());\nDebug.Log(serverEvent.ToJsonString());\n```\n\n#### 服务器事件\n\n该库为传入的服务器发送事件实现了 `IServerEvent` 接口。\n\n- [`RealtimeEventError`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Ferror)：在发生错误时返回，可能是客户端问题或服务器问题。\n- [`SessionResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fsession)：用于 `session.created` 和 `session.updated` 事件。\n- [`RealtimeConversationResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fcreated)：在创建新对话项时返回。\n- [`ConversationItemCreatedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Fcreated)：在创建新对话项时返回。\n- [`ConversationItemInputAudioTranscriptionResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation)：在输入音频转录完成或失败时返回。\n- [`ConversationItemTruncatedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Ftruncated)：在对话项被截断时返回。\n- [`ConversationItemDeletedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fconversation\u002Fitem\u002Fdeleted)：在对话项被删除时返回。\n- [`InputAudioBufferCommittedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fcommitted)：在输入音频缓冲区被提交时返回，无论是由客户端提交还是在服务器 VAD 模式下自动提交。\n- [`InputAudioBufferClearedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fcleared)：在输入音频缓冲区被清空时返回。\n- [`InputAudioBufferStartedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fspeech_started)：在服务器 VAD 模式下，当检测到音频缓冲区中有语音时，由服务器发送此事件。每当有音频添加到缓冲区时都可能发生（除非已检测到语音）。客户端可能会利用此事件来中断音频播放或向用户提供视觉反馈。\n- [`InputAudioBufferStoppedResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Finput_audio_buffer\u002Fspeech_stopped)：在服务器 VAD 模式下，当服务器检测到音频缓冲区中的语音结束时返回。\n- [`RealtimeResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse)：在创建或完成回复时返回。\n- [`ResponseOutputItemResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Foutput_item)：在添加或完成回复输出项时返回。\n- [`ResponseContentPartResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Fcontent_part)：在添加或完成回复内容部分时返回。\n- [`ResponseTextResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Ftext)：在更新或完成回复文本时返回。\n- [`ResponseAudioTranscriptResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Faudio_transcript)：在更新或完成回复音频转录时返回。\n- [`ResponseAudioResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Faudio)：在更新或完成回复音频时返回。\n- [`ResponseFunctionCallArgumentsResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Fresponse\u002Ffunction_call_arguments)：在更新或完成回复函数调用参数时返回。\n- [`RateLimitsResponse`](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Frealtime-server-events\u002Frate_limits)：在速率限制更新时返回。\n\n##### 接收服务器事件\n\n要接收服务器事件，您需要在会话对象上调用 `RealtimeSession.ReceiveUpdatesAsync` 方法。该方法将返回一个 `Task`，当会话关闭或取消令牌触发时，此任务将完成。理想情况下，此方法只需调用一次，并在整个会话期间持续运行。\n\n> [!NOTE]\n> 您也可以通过使用 `IRealtimeEvent` 接口而不是 `IServerEvent` 来获取 `IClientEvent` 回调。\n\n```csharp\nawait session.ReceiveUpdatesAsync\u003CIServerEvent>(ServerEvents, cancellationTokenSource.Token);\n\nvoid ServerEvents(IServerEvent @event)\n{\n    switch (@event)\n    {\n        case RealtimeEventError error:\n            \u002F\u002F 任何错误发生时都会触发\n            break;\n        case SessionResponse sessionResponse:\n            \u002F\u002F 当会话创建或更新时触发\n            break;\n        case RealtimeConversationResponse conversationResponse:\n            \u002F\u002F 当新对话创建时触发\n            break;\n        case ConversationItemCreatedResponse conversationItemCreated:\n            \u002F\u002F 当新对话项创建时触发\n            break;\n        case ConversationItemInputAudioTranscriptionResponse conversationItemTranscription:\n            \u002F\u002F 当输入音频转录完成或失败时触发\n            break;\n        case ConversationItemTruncatedResponse conversationItemTruncated:\n            \u002F\u002F 当对话项被截断时触发\n            break;\n        case ConversationItemDeletedResponse conversationItemDeleted:\n            \u002F\u002F 当对话项删除时触发\n            break;\n        case InputAudioBufferCommittedResponse committedResponse:\n            \u002F\u002F 当输入音频缓冲区提交时触发\n            break;\n        case InputAudioBufferClearedResponse clearedResponse:\n            \u002F\u002F 当输入音频缓冲区清空时触发\n            break;\n        case InputAudioBufferStartedResponse startedResponse:\n            \u002F\u002F 当音频缓冲区中检测到语音时触发\n            break;\n        case InputAudioBufferStoppedResponse stoppedResponse:\n            \u002F\u002F 当音频缓冲区中的语音停止时触发\n            break;\n        case RealtimeResponse realtimeResponse:\n            \u002F\u002F 当响应创建或完成时触发\n            break;\n        case ResponseOutputItemResponse outputItemResponse:\n            \u002F\u002F 当响应输出项添加或完成时触发\n            break;\n        case ResponseContentPartResponse contentPartResponse:\n            \u002F\u002F 当响应内容部分添加或完成时触发\n            break;\n        case ResponseTextResponse textResponse:\n            \u002F\u002F 当响应文本更新或完成时触发\n            break;\n        case ResponseAudioTranscriptResponse transcriptResponse:\n            \u002F\u002F 当响应音频转录更新或完成时触发\n            break;\n        case ResponseFunctionCallArgumentsResponse functionCallResponse:\n            \u002F\u002F 当响应函数调用参数更新或完成时触发\n            break;\n        case RateLimitsResponse rateLimitsResponse:\n            \u002F\u002F 当速率限制更新时触发\n            break;\n    }\n}\n```\n\n---\n\n\n\n### [助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants)\n\n> [!WARNING]\n> 测试功能。API 可能会发生重大变更。\n\n构建可以调用模型并使用工具来执行任务的助手。\n\n- [助手指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants)\n- [OpenAI 助手示例库](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook\u002Fblob\u002Fmain\u002Fexamples\u002FAssistants_API_overview_python.ipynb)\n\n助手 API 通过 `OpenAIClient.AssistantsEndpoint` 访问。\n\n#### [列出助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FlistAssistants)\n\n返回助手列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar assistantsList = await api.AssistantsEndpoint.ListAssistantsAsync();\n\nforeach (var assistant in assistantsList.Items)\n{\n    Debug.Log($\"{assistant} -> {assistant.CreatedAt}\");\n}\n```\n\n#### [创建助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FcreateAssistant)\n\n使用模型和指令创建助手。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new CreateAssistantRequest(Model.GPT4o);\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(request);\n```\n\n#### [获取助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FgetAssistant)\n\n获取助手信息。\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.RetrieveAssistantAsync(\"assistant-id\");\nDebug.Log($\"{assistant} -> {assistant.CreatedAt}\");\n```\n\n#### [修改助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FmodifyAssistant)\n\n修改助手信息。\n\n```csharp\nvar api = new OpenAIClient();\nvar createRequest = new CreateAssistantRequest(Model.GPT4_Turbo);\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(createRequest);\nvar modifyRequest = new CreateAssistantRequest(Model.GPT4o);\nvar modifiedAssistant = await api.AssistantsEndpoint.ModifyAssistantAsync(assistant.Id, modifyRequest);\n\u002F\u002F 或者使用 AssistantExtension 更方便！\nvar modifiedAssistantEx = await assistant.ModifyAsync(modifyRequest);\n```\n\n#### [删除助手](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants\u002FdeleteAssistant)\n\n删除助手。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.AssistantsEndpoint.DeleteAssistantAsync(\"assistant-id\");\n\u002F\u002F 或者使用 AssistantExtension 更方便！\nvar isDeleted = await assistant.DeleteAsync();\nAssert.IsTrue(isDeleted);\n```\n\n#### [助手流式传输](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fassistants-streaming)\n\n> [!NOTE]\n> 通过将 `Func\u003CIServerSentEvent, Task> streamEventHandler` 回调传递给任何支持流式传输的方法，可以轻松地将助手流事件添加到现有的线程调用中。\n\n#### [线程](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads)\n\n创建助手可以与之交互的线程。\n\n线程 API 通过 `OpenAIClient.ThreadsEndpoint` 访问。\n\n##### [创建线程](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FcreateThread)\n\n创建线程。\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nDebug.Log($\"创建线程 {thread.Id} -> {thread.CreatedAt}\");\n```\n\n##### [创建线程并运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcreateThreadAndRun)\n\n在一个请求中创建线程并运行。\n\n> 另请参阅：[线程运行](#thread-runs)\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"数学辅导老师\",\n        instructions: \"你是一位私人数学辅导老师。请用一句话或更短的回答来解答问题。\",\n        model: Model.GPT4o));\nvar messages = new List\u003CMessage> { \"我需要解方程 `3x + 11 = 14`。你能帮我吗？\" };\nvar threadRequest = new CreateThreadRequest(messages);\nvar run = await assistant.CreateThreadAndRunAsync(threadRequest);\nDebug.Log($\"创建的线程和运行：{run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n```\n\n###### 创建线程并流式执行\n\n在一个请求中创建线程并执行，同时流式接收事件。\n\n```csharp\nvar api = new OpenAIClient();\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(typeof(WeatherService), nameof(WeatherService.GetCurrentWeatherAsync))\n};\nvar assistantRequest = new CreateAssistantRequest(tools: tools, instructions: \"你是一位有用的天气助手。请根据地理位置使用合适的单位。\");\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(assistantRequest);\nThreadResponse thread = null;\nasync Task StreamEventHandler(IServerSentEvent streamEvent)\n{\n    switch (streamEvent)\n    {\n        case ThreadResponse threadResponse:\n            thread = threadResponse;\n            break;\n        case RunResponse runResponse:\n            if (runResponse.Status == RunStatus.RequiresAction)\n            {\n                var toolOutputs = await assistant.GetToolOutputsAsync(runResponse);\n\n                foreach (var toolOutput in toolOutputs)\n                {\n                    Debug.Log($\"工具输出：{toolOutput}\");\n                }\n\n                await runResponse.SubmitToolOutputsAsync(toolOutputs, StreamEventHandler);\n            }\n            break;\n        default:\n            Debug.Log(streamEvent.ToJsonString());\n            break;\n    }\n}\n\nvar run = await assistant.CreateThreadAndRunAsync(\"我在吉隆坡，请告诉我现在温度是多少？\", StreamEventHandler);\nrun = await run.WaitForStatusChangeAsync();\nvar messages = await thread.ListMessagesAsync();\nforeach (var response in messages.Items.Reverse())\n{\n    Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n}\n```\n\n##### [检索线程](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FgetThread)\n\n检索一个线程。\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.RetrieveThreadAsync(\"thread-id\");\n\u002F\u002F 或者，如果你只想获取线程的最新状态\nthread = await thread.UpdateAsync();\nDebug.Log($\"检索线程 {thread.Id} -> {thread.CreatedAt}\");\n```\n\n##### [修改线程](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FmodifyThread)\n\n修改一个线程。\n\n> [!注意]\n> 只能修改元数据。\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"自定义线程元数据\" }\n};\nthread = await api.ThreadsEndpoint.ModifyThreadAsync(thread.Id, metadata);\n\u002F\u002F 或者使用扩展方法以方便操作！\nthread = await thread.ModifyAsync(metadata);\nDebug.Log($\"修改线程 {thread.Id} -> {thread.Metadata[\"key\"]}\");\n```\n\n##### [删除线程](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fthreads\u002FdeleteThread)\n\n删除一个线程。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.ThreadsEndpoint.DeleteThreadAsync(\"thread-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar isDeleted = await thread.DeleteAsync();\nAssert.IsTrue(isDeleted);\n```\n\n##### [线程消息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages)\n\n在线程中创建消息。\n\n###### [列出线程消息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FlistMessages)\n\n返回给定线程的消息列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar messageList = await api.ThreadsEndpoint.ListMessagesAsync(\"thread-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar messageList = await thread.ListMessagesAsync();\n\nforeach (var message in messageList.Items)\n{\n    Debug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n}\n```\n\n###### [创建线程消息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FcreateMessage)\n\n创建一条消息。\n\n```csharp\nvar api = new OpenAIClient();\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar request = new CreateMessageRequest(\"你好，世界！\");\nvar message = await api.ThreadsEndpoint.CreateMessageAsync(thread.Id, request);\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar message = await thread.CreateMessageAsync(\"你好，世界！\");\nDebug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n```\n\n###### [检索线程消息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FgetMessage)\n\n检索一条消息。\n\n```csharp\nvar api = new OpenAIClient();\nvar message = await api.ThreadsEndpoint.RetrieveMessageAsync(\"thread-id\", \"message-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar message = await thread.RetrieveMessageAsync(\"message-id\");\nvar message = await message.UpdateAsync();\nDebug.Log($\"{message.Id}: {message.Role}: {message.PrintContent()}\");\n```\n\n###### [修改线程消息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmessages\u002FmodifyMessage)\n\n修改一条消息。\n\n> [!注意]\n> 只能修改消息的元数据。\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"自定义消息元数据\" }\n};\nvar message = await api.ThreadsEndpoint.ModifyMessageAsync(\"thread-id\", \"message-id\", metadata);\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar message = await message.ModifyAsync(metadata);\nDebug.Log($\"修改消息元数据：{message.Id} -> {message.Metadata[\"key\"]}\");\n```\n\n##### [线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns)\n\n表示在线程上执行的一次运行。\n\n###### [列出线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FlistRuns)\n\n返回属于某个线程的所有运行列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar runList = await api.ThreadsEndpoint.ListRunsAsync(\"thread-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar runList = await thread.ListRunsAsync();\n\nforeach (var run in runList.Items)\n{\n    Debug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n}\n```\n\n###### [创建线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcreateRun)\n\n创建一次运行。\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"数学家教\",\n        instructions: \"你是一位私人数学家教。请用一句话或更短的篇幅简要回答问题。\",\n        model: Model.GPT4o));\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar message = await thread.CreateMessageAsync(\"我需要解方程 `3x + 11 = 14`。你能帮我吗？\");\nvar run = await thread.CreateRunAsync(assistant);\nDebug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n```\n\n###### 创建线程并流式处理运行\n\n创建一个运行，并流式传输事件。\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    new CreateAssistantRequest(\n        name: \"数学家教\",\n        instructions: \"你是一位私人数学家教。请用一句话或更短的篇幅简要回答问题。你的回答应以 JSON 格式呈现。\",\n        model: Model.GPT4o,\n        responseFormat: ChatResponseFormat.Json));\nvar thread = await api.ThreadsEndpoint.CreateThreadAsync();\nvar message = await thread.CreateMessageAsync(\"我需要解方程 `3x + 11 = 14`。你能帮我吗？\");\nvar run = await thread.CreateRunAsync(assistant, async streamEvent =>\n{\n    Debug.Log(streamEvent.ToJsonString());\n    await Task.CompletedTask;\n});\nvar messages = await thread.ListMessagesAsync();\n\nforeach (var response in messages.Items.Reverse())\n{\n    Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n}\n```\n\n###### [检索线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FgetRun)\n\n检索一个运行。\n\n```csharp\nvar api = new OpenAIClient();\nvar run = await api.ThreadsEndpoint.RetrieveRunAsync(\"thread-id\", \"run-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar run = await thread.RetrieveRunAsync(\"run-id\");\nvar run = await run.UpdateAsync();\nDebug.Log($\"[{run.Id}] {run.Status} | {run.CreatedAt}\");\n```\n\n###### [修改线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FmodifyRun)\n\n修改一个运行。\n\n> [!注意]\n> 只能修改元数据。\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = new Dictionary\u003Cstring, string>\n{\n    { \"key\", \"自定义运行元数据\" }\n};\nvar run = await api.ThreadsEndpoint.ModifyRunAsync(\"thread-id\", \"run-id\", metadata);\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar run = await run.ModifyAsync(metadata);\nDebug.Log($\"修改运行 {run.Id} -> {run.Metadata[\"key\"]}\");\n```\n\n###### [向运行提交工具输出](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FsubmitToolOutputs)\n\n当运行状态为 `requires_action`，且 `required_action.type` 为 `submit_tool_outputs` 时，可以使用此端点在所有工具调用完成后提交工具输出。所有输出必须在一次请求中提交。\n\n> [!注意]\n> 请参阅“创建线程并流式处理运行”示例，了解如何流式传输工具输出事件。\n\n```csharp\nvar api = new OpenAIClient();\nvar tools = new List\u003CTool>\n{\n    \u002F\u002F 使用预定义工具\n    Tool.Retrieval, Tool.CodeInterpreter,\n    \u002F\u002F 或者根据类型和您希望用于函数调用的方法名称创建工具\n    Tool.GetOrCreateTool(typeof(WeatherService), nameof(WeatherService.GetCurrentWeatherAsync)),\n    \u002F\u002F 传入对象实例以在其上调用方法\n    Tool.GetOrCreateTool(api.ImagesEndPoint, nameof(ImagesEndpoint.GenerateImageAsync)),\n    \u002F\u002F 定义 func\u003C,> 回调函数\n    Tool.FromFunc(\"name_of_func\", () => { \u002F* 回调函数 *\u002F }),\n    Tool.FromFunc\u003CT1,T2,TResult>(\"func_with_multiple_params\", (t1, t2) => { \u002F* 计算返回值的逻辑 *\u002F return tResult; })\n};\nvar assistantRequest = new CreateAssistantRequest(tools: tools, instructions: \"你是一位有用的天气助手。请根据地理位置使用适当的单位。\");\nvar testAssistant = await api.AssistantsEndpoint.CreateAssistantAsync(assistantRequest);\nvar run = await testAssistant.CreateThreadAndRunAsync(\"我在吉隆坡，请告诉我现在的温度是多少？\");\n\u002F\u002F 等待运行处于排队和进行中状态\nrun = await run.WaitForStatusChangeAsync();\n\n\u002F\u002F 调用所有工具调用函数并获取工具输出。\nvar toolOutputs = await testAssistant.GetToolOutputsAsync(run.RequiredAction.SubmitToolOutputs.ToolCalls);\n\nforeach (var toolOutput in toolOutputs)\n{\n    Debug.Log($\"工具调用输出：{toolOutput.Output}\");\n}\n\u002F\u002F 提交工具输出\nrun = await run.SubmitToolOutputsAsync(toolOutputs);\n\u002F\u002F 等待运行再次进入排队和进行中状态\nrun = await run.WaitForStatusChangeAsync();\nvar messages = await run.ListMessagesAsync。\n\nforeach (var message in messages.Items.OrderBy(response => response.CreatedAt))\n{\n    Debug.Log($\"{message.Role}: {message.PrintContent()}\");\n}\n```\n\n##### [线程结构化输出](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs)\n\n结构化输出是 JSON 模式的演进版本。虽然两者都能确保生成有效的 JSON，但只有结构化输出才能保证符合模式要求。\n\n> [!重要提示]\n>\n> - 使用 JSON 模式时，务必通过对话中的某条消息（例如系统消息）指示模型生成 JSON。如果不明确指示生成 JSON，模型可能会生成无休止的空白内容，请求也可能持续进行，直到达到令牌限制。为了防止这种情况发生，API 会在上下文中未出现“JSON”字符串时抛出错误。\n> - 如果 `finish_reason` 是 length，表示生成内容超过了 max_tokens 或对话的令牌限制，则模型返回的消息中的 JSON 可能是不完整的（即被截断）。为避免这种情况，在解析响应前请检查 `finish_reason`。\n\n首先定义您的响应结构。这些将用作您的模式。\n这些是您将反序列化的目标对象，因此请务必使用标准的 Json 对象模型。\n\n```csharp\npublic class MathResponse\n{\n    [JsonProperty(\"steps\")]\n    public IReadOnlyList\u003CMathStep> Steps { get; private set; }\n\n    [JsonProperty(\"final_answer\")]\n    public string FinalAnswer { get; private set; }\n}\n\npublic class MathStep\n{\n    [JsonProperty(\"explanation\")]\n    public string Explanation { get; private set; }\n\n    [JsonProperty(\"output\")]\n    public string Output { get; private set; }\n}\n```\n\n使用时，只需在 `CreateAssistantAsync`、`CreateRunAsync` 或 `CreateThreadAndRunAsync` 中将 `MathResponse` 类型指定为泛型约束即可。\n\n```csharp\nvar api = new OpenAIClient();\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync\u003CMathResponse>(\n    new CreateAssistantRequest(\n        name: \"数学家教\",\n        instructions: \"你是一位有帮助的数学家教。请逐步引导用户完成解题过程。\",\n        model: \"gpt-4o-2024-08-06\"));\nThreadResponse thread = null;\n\n尝试\n{\n    异步任务 StreamEventHandler(IServerSentEvent @event)\n    {\n        try\n        {\n            切换 (@event)\n            {\n                案例 MessageResponse message:\n                    如果 (message.Status != MessageStatus.Completed)\n                    {\n                        Debug.Log(@event.ToJsonString());\n                        中断;\n                    }\n\n                    var mathResponse = message.FromSchema\u003CMathResponse>();\n\n                    对于 (var i = 0; i \u003C mathResponse.Steps.Count; i++)\n                    {\n                        var step = mathResponse.Steps[i];\n                        Debug.Log($\"步骤 {i}: {step.Explanation}\");\n                        Debug.Log($\"结果: {step.Output}\");\n                    }\n\n                    Debug.Log($\"最终答案: {mathResponse.FinalAnswer}\");\n                    中断;\n                默认:\n                    Debug.Log(@event.ToJsonString());\n                    中断;\n            }\n        }\n        抓取 (Exception e)\n        {\n            Debug.Log(e);\n            抛出;\n        }\n\n        等待 Task.CompletedTask;\n    }\n\n    var run = await assistant.CreateThreadAndRunAsync(\"如何解方程 8x + 7 = -23\", StreamEventHandler);\n    thread = await run.GetThreadAsync();\n    run = await run.WaitForStatusChangeAsync();\n    Debug.Log($\"创建了线程和运行：{run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n    var messages = await thread.ListMessagesAsync();\n\n    对于 (var response 在 messages.Items 中按 CreatedAt 排序)\n    {\n        Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n    }\n}\n最后\n{\n    等待 assistant.DeleteAsync(deleteToolResources: thread == null);\n\n    如果 (thread != null)\n    {\n        var isDeleted = await thread.DeleteAsync(deleteToolResources: true);\n    }\n}\n```\n\n你也可以手动创建 JSON 模式 JSON 字符串，但你需要负责反序列化你的响应数据：\n\n```csharp\nvar api = new OpenAIClient();\nvar mathSchema = new JsonSchema(\"math_response\", @\"\n{\n  \"\"type\"\": \"\"object\"\",\n  \"\"properties\"\": {\n    \"\"steps\"\": {\n      \"\"type\"\": \"\"array\"\",\n      \"\"items\"\": {\n        \"\"type\"\": \"\"object\"\",\n        \"\"properties\"\": {\n          \"\"explanation\"\": {\n            \"\"type\"\": \"\"string\"\"\n          },\n          \"\"output\"\": {\n            \"\"type\"\": \"\"string\"\"\n          }\n        },\n        \"\"required\"\": [\n          \"\"explanation\"\",\n          \"\"output\"\"\n        ],\n        \"\"additionalProperties\"\": false\n      }\n    },\n    \"\"final_answer\"\": {\n      \"\"type\"\": \"\"string\"\"\n    }\n  },\n  \"\"required\"\": [\n    \"\"steps\"\",\n    \"\"final_answer\"\"\n  ],\n  \"\"additionalProperties\"\": false\n}\");\nvar assistant = await api.AssistantsEndpoint.CreateAssistantAsync(\n    新的 CreateAssistantRequest(\n        名称: \"数学辅导老师\",\n        指令: \"你是一位乐于助人的数学辅导老师。请逐步引导用户解决问题。\",\n        模型: \"gpt-4o-2024-08-06\",\n        jsonSchema: mathSchema));\nThreadResponse thread = null;\n\n尝试\n{\n    var run = await assistant.CreateThreadAndRunAsync(\"如何解方程 8x + 7 = -23\",\n        异步 @event =>\n        {\n            Debug.Log(@event.ToJsonString());\n            等待 Task.CompletedTask;\n        });\n    thread = await run.GetThreadAsync();\n    run = await run.WaitForStatusChangeAsync();\n    Debug.Log($\"创建了线程和运行：{run.ThreadId} -> {run.Id} -> {run.CreatedAt}\");\n    var messages = await thread.ListMessagesAsync。\n\n    对于 (var response 在 messages.Items 中)\n    {\n        Debug.Log($\"{response.Role}: {response.PrintContent()}\");\n    }\n}\n最后\n{\n    等待 assistant.DeleteAsync(deleteToolResources: thread == null);\n\n    如果 (thread != null)\n    {\n        var isDeleted = await thread.DeleteAsync(deleteToolResources: true);\n        Assert.IsTrue(isDeleted);\n    }\n}\n```\n\n###### [列出线程运行步骤](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FlistRunSteps)\n\n返回属于某个运行的运行步骤列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar runStepList = await api.ThreadsEndpoint.ListRunStepsAsync(\"thread-id\", \"run-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar runStepList = await run.ListRunStepsAsync。\n\n对于 (var runStep 在 runStepList.Items 中)\n{\n    Debug.Log($\"[{runStep.Id}] {runStep.Status} {runStep.CreatedAt} -> {runStep.ExpiresAt}\");\n}\n```\n\n###### [获取线程运行步骤](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FgetRunStep)\n\n获取一个运行步骤。\n\n```csharp\nvar api = new OpenAIClient();\nvar runStep = await api.ThreadsEndpoint.RetrieveRunStepAsync(\"thread-id\", \"run-id\", \"step-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar runStep = await run.RetrieveRunStepAsync(\"step-id\");\nvar runStep = await runStep.UpdateAsync();\nDebug.Log($\"[{runStep.Id}] {runStep.Status} {runStep.CreatedAt} -> {runStep.ExpiresAt}\");\n```\n\n###### [取消线程运行](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fruns\u002FcancelRun)\n\n取消一个处于 `in_progress` 状态的运行。\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.ThreadsEndpoint.CancelRunAsync(\"thread-id\", \"run-id\");\n\u002F\u002F 或者使用扩展方法以方便操作！\nvar isCancelled = await run.CancelAsync();\nAssert.IsTrue(isCancelled);\n```\n\n#### [向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores)\n\n向量存储用于存储文件，供 `file_search` 工具使用。\n\n- [文件搜索指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\n通过 `OpenAIClient.VectorStoresEndpoint` 访问向量存储 API。\n\n##### [列出向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Flist)\n\n返回向量存储列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStores = await api.VectorStoresEndpoint.ListVectorStoresAsync。\n\n对于 (var vectorStore 在 vectorStores.Items 中)\n{\n    Debug.Log(vectorStore);\n}\n```\n\n##### [创建向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fcreate)\n\n创建一个向量存储。\n\n```csharp\nvar api = new OpenAIClient();\nvar createVectorStoreRequest = 新的 CreateVectorStoreRequest(\"测试向量存储\");\nvar vectorStore = await api.VectorStoresEndpoint.CreateVectorStoreAsync(createVectorStoreRequest);\nDebug.Log(vectorStore);\n```\n\n##### [获取向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fretrieve)\n\n获取一个向量存储。\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStore = await api.VectorStoresEndpoint.GetVectorStoreAsync(\"向量存储ID\");\nDebug.Log(vectorStore);\n```\n\n##### [修改向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fmodify)\n\n修改一个向量存储。\n\n```csharp\nvar api = new OpenAIClient();\nvar metadata = 新的 字典\u003Cstring, object> { { \"测试\", 当前时间 } };\nvar vectorStore = await api.VectorStoresEndpoint.ModifyVectorStoreAsync(\"向量存储ID\", metadata: metadata);\nDebug.Log(vectorStore);\n```\n\n##### [删除向量存储](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores\u002Fdelete)\n\n删除向量存储。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.VectorStoresEndpoint.DeleteVectorStoreAsync(\"vector-store-id\");\nAssert.IsTrue(isDeleted);\n```\n\n##### [向量存储文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files)\n\n向量存储文件表示向量存储中的文件。\n\n- [文件搜索指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\n###### [列出向量存储文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FlistFiles)\n\n返回向量存储文件的列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar files = await api.VectorStoresEndpoint.ListVectorStoreFilesAsync(\"vector-store-id\");\n\nforeach (var file in vectorStoreFiles.Items)\n{\n    Debug.Log(file);\n}\n```\n\n###### [创建向量存储文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FcreateFile)\n\n通过将文件附加到向量存储来创建向量存储文件。\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.VectorStoresEndpoint.CreateVectorStoreFileAsync(\"vector-store-id\", \"file-id\", new ChunkingStrategy(ChunkingStrategyType.Static));\nDebug.Log(file);\n```\n\n###### [获取向量存储文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FgetFile)\n\n获取向量存储文件。\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.VectorStoresEndpoint.GetVectorStoreFileAsync(\"vector-store-id\", \"vector-store-file-id\");\nDebug.Log(file);\n```\n\n###### [删除向量存储文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-files\u002FdeleteFile)\n\n删除向量存储文件。这会从向量存储中移除文件，但文件本身不会被删除。要删除文件，请使用删除文件端点。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.VectorStoresEndpoint.DeleteVectorStoreFileAsync(\"vector-store-id\", vectorStoreFile);\nAssert.IsTrue(isDeleted);\n```\n\n##### [向量存储文件批次](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches)\n\n向量存储文件表示向量存储中的文件。\n\n- [文件搜索指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fassistants\u002Ftools\u002Ffile-search)\n\n###### [创建向量存储文件批次](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FcreateBatch)\n\n创建向量存储文件批次。\n\n```csharp\nvar api = new OpenAIClient();\nvar files = new List\u003Cstring> { \"file_id_1\",\"file_id_2\" };\nvar vectorStoreFileBatch = await api.VectorStoresEndpoint.CreateVectorStoreFileBatchAsync(\"vector-store-id\", files);\nDebug.Log(vectorStoreFileBatch);\n```\n\n###### [获取向量存储文件批次](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FgetBatch)\n\n获取向量存储文件批次。\n\n```csharp\nvar api = new OpenAIClient();\nvar vectorStoreFileBatch = await api.VectorStoresEndpoint.GetVectorStoreFileBatchAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n\u002F\u002F 你也可以使用便捷方法！\nvectorStoreFileBatch = await vectorStoreFileBatch.UpdateAsync();\nvectorStoreFileBatch = await vectorStoreFileBatch.WaitForStatusChangeAsync();\n```\n\n###### [列出向量存储批次中的文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FlistBatchFiles)\n\n返回批次中向量存储文件的列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar files = await api.VectorStoresEndpoint.ListVectorStoreBatchFilesAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n\nforeach (var file in files.Items)\n{\n    Debug.Log(file);\n}\n```\n\n###### [取消向量存储文件批次](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fvector-stores-file-batches\u002FcancelBatch)\n\n取消向量存储文件批次。这会尽快尝试取消该批次中文件的处理。\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.VectorStoresEndpoint.CancelVectorStoreFileBatchAsync(\"vector-store-id\", \"vector-store-file-batch-id\");\n```\n\n---\n\n\n\n### [聊天](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat)\n\n给定一次聊天对话，模型将返回一个聊天完成响应。\n\n聊天 API 通过 `OpenAIClient.ChatEndpoint` 访问。\n\n#### [聊天完成](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat\u002Fcreate)\n\n为聊天消息创建完成。\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, \"2020年的世界大赛是谁赢的？\"),\n    new Message(Role.Assistant, \"洛杉矶道奇队在2020年赢得了世界大赛。\"),\n    new Message(Role.User, \"比赛是在哪里举行的？\"),\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4o);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nvar choice = response.FirstChoice;\nDebug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice.Message} | 结束原因: {choice.FinishReason}\");\n```\n\n#### [聊天流式传输](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fchat\u002Fcreate#chat\u002Fcreate-stream)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, \"2020年的世界大赛是谁赢的？\"),\n    new Message(Role.Assistant, \"洛杉矶道奇队在2020年赢得了世界大赛。\"),\n    new Message(Role.User, \"比赛是在哪里举行的？\"),\n};\nvar chatRequest = new ChatRequest(messages);\nvar response = await api.ChatEndpoint.StreamCompletionAsync(chatRequest, async partialResponse =>\n{\n    Debug.Log(partialResponse.FirstChoice.Delta.ToString());\n    await Task.CompletedTask;\n});\nvar choice = response.FirstChoice;\nDebug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice.Message} | 结束原因: {choice.FinishReason}\");\n```\n\n#### [聊天工具](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ffunction-calling)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new(Message.Role.System, \"你是一个有用的天气助手。始终提示用户他们的位置。\"),\n    new Message(Role.User, \"今天天气怎么样？\"),\n};\n\nforeach (var message in messages)\n{\n    Debug.Log($\"{message.Role}: {message}\");\n}\n\n\u002F\u002F 定义助手可以使用的工具：\n\u002F\u002F 1. 获取所有用 FunctionAttribute 装饰的静态方法列表\nvar tools = Tool.GetAllAvailableTools(includeDefaults: false, forceUpdate: true, clearCache: true);\n\u002F\u002F 2. 定义自定义工具列表：\nvar tools = new List\u003CTool>\n{\n    Tool.GetOrCreateTool(objectInstance, \"要调用的方法名称\"),\n    Tool.FromFunc(\"你函数的自定义名称\", ()=> { \u002F* 某些逻辑要执行 *\u002F })\n};\nvar chatRequest = new ChatRequest(messages, 工具: tools, 工具选择: \"auto\");\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nmessages.Add(response.FirstChoice.Message);\n\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | 结束原因: {response.FirstChoice.FinishReason}\");\n\nvar locationMessage = new Message(Role.User, \"我在苏格兰的格拉斯哥\");\nmessages.Add(locationMessage);\nDebug.Log($\"{locationMessage.Role}: {locationMessage.Content}\");\nchatRequest = new ChatRequest(messages, tools: tools, toolChoice: \"auto\");\nresponse = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n\nmessages.Add(response.FirstChoice.Message);\n\nif (response.FirstChoice.FinishReason == \"stop\")\n{\n    Debug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | 结束原因: {response.FirstChoice.FinishReason}\");\n\n    var unitMessage = new Message(Role.User, \"华氏度\");\n    messages.Add(unitMessage);\n    Debug.Log($\"{unitMessage.Role}: {unitMessage.Content}\");\n    chatRequest = new ChatRequest(messages, tools: tools, toolChoice: \"auto\");\n    response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n}\n\n\u002F\u002F 遍历所有工具调用并执行它们\nforeach (var toolCall in response.FirstChoice.Message.ToolCalls)\n{\n    Debug.Log($\"{response.FirstChoice.Message.Role}: {toolCall.Function.Name} | 结束原因: {response.FirstChoice.FinishReason}\");\n    Debug.Log($\"{toolCall.Function.Arguments}\");\n    \u002F\u002F 调用函数以获取通用的 JSON 结果作为工具调用的返回值。\n    var functionResult = await toolCall.InvokeFunctionAsync();\n    \u002F\u002F 如果你知道返回类型并需要进行额外处理，可以使用泛型重载版本。\n    var functionResult = await toolCall.InvokeFunctionAsync\u003Cstring>();\n    messages.Add(new Message(toolCall, functionResult));\n    Debug.Log($\"{Role.Tool}: {functionResult}\");\n}\n\u002F\u002F 系统：你是一个有用的天气助手。\n\u002F\u002F 用户：今天天气怎么样？\n\u002F\u002F 助手：当然，请问您目前的位置是哪里？| 结束原因：stop\n\u002F\u002F 用户：我在苏格兰的格拉斯哥\n\u002F\u002F 助手：GetCurrentWeather | 结束原因：tool_calls\n\u002F\u002F {\n\u002F\u002F   \"location\": \"Glasgow, Scotland\",\n\u002F\u002F   \"unit\": \"celsius\"\n\u002F\u002F }\n\u002F\u002F 工具：苏格兰格拉斯哥当前的气温为39°C。\n```\n\n#### [视觉聊天](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fvision)\n\n> [!WARNING]\n> 测试功能。API 可能会随时发生变化。\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, new List\u003CContent>\n    {\n        \"这张图片里有什么？\",\n        new ImageUrl(\"https:\u002F\u002Fupload.wikimedia.org\u002Fwikipedia\u002Fcommons\u002Fthumb\u002Fd\u002Fdd\u002FGfp-wisconsin-madison-the-nature-boardwalk.jpg\u002F2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\", ImageDetail.Low)\n    })\n};\nvar chatRequest = new ChatRequest(messages, model: Model.GPT4o);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice.Message.Content} | 结束原因: {response.FirstChoice.FinishDetails}\");\n```\n\n你甚至可以直接传入一个 `Texture2D`！\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, new List\u003CContent>\n    {\n        \"这张图片里有什么？\",\n        texture\n    })\n};\nvar chatRequest = new ChatRequest(messages, model: Model.GPT4o);\nvar result = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{result.FirstChoice.Message.Role}: {result.FirstChoice} | 结束原因: {result.FirstChoice.FinishDetails}\");\n```\n\n#### [音频聊天](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Faudio)\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个有用的助手。\"),\n    new Message(Role.User, \"金毛寻回犬适合作为家庭宠物吗？\")\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4oAudio, audioConfig: Voice.Alloy);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\nDebug.Log($\"{response.FirstChoice.Message.Role}: {response.FirstChoice} | 结束原因: {response.FirstChoice.FinishDetails}\");\naudioSource.PlayOneShot(response.FirstChoice.Message.AudioOutput.AudioClip);\n```\n\n#### [结构化输出聊天](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fstructured-outputs)\n\n这是 [JSON 模式](#chat-json-mode) 的进一步发展。虽然两者都能确保生成有效的 JSON，但只有结构化输出才能保证符合预定义的模式。\n\n> [!IMPORTANT]\n>\n> - 使用 JSON 模式时，务必在对话中通过某条消息明确指示模型生成 JSON，例如通过系统提示语。如果不包含明确的 JSON 生成指令，模型可能会持续输出空白字符，请求将一直运行直到达到令牌上限。为了防止这种情况发生，API 会在上下文中未出现“JSON”字样时抛出错误。\n> - 如果 `finish_reason` 是 length，则模型返回的消息中的 JSON 可能是不完整的（即被截断），这表明生成内容超出了最大令牌数或对话已超过令牌限制。为避免此问题，在解析响应之前请检查 `finish_reason`。\n\n首先定义你的响应结构。这些将作为你的模式使用。\n这些是你将反序列化的目标对象，因此请确保使用标准的 JSON 对象模型。\n\n```csharp\npublic class MathResponse\n{\n    [JsonProperty(\"steps\")]\n    public IReadOnlyList\u003CMathStep> Steps { get; private set; }\n\n    [JsonProperty(\"final_answer\")]\n    public string FinalAnswer { get; private set; }\n}\n\npublic class MathStep\n{\n    [JsonProperty(\"explanation\")]\n    public string Explanation { get; private set; }\n\n    [JsonProperty(\"output\")]\n    public string Output { get; private set; }\n}\n```\n\n使用时，只需在请求完成时指定 `MathResponse` 类型作为泛型约束即可。\n\n```csharp\nvar api = new OpenAIClient();\nvar messages = new List\u003CMessage>\n{\n    new(Message.Role.System, \"你是一位有用的教学助理。请逐步引导用户解决问题。\"),\n    new(Message.Role.User, \"如何解方程 8x + 7 = -23\")\n};\n\nvar chatRequest = new ChatRequest(messages, model: \"gpt-4o-2024-08-06\");\nvar (mathResponse, chatResponse) = await api.ChatEndpoint.GetCompletionAsync\u003CMathResponse>(chatRequest);\n\nfor (int i = 0; i \u003C mathResponse.Steps.Count; i++)\n{\n    var step = mathResponse.Steps[i];\n    Debug.Log($\"第 {i} 步：{step.Explanation}\");\n    Debug.Log($\"结果：{step.Output}\");\n}\n\nDebug.Log($\"最终答案：{mathResponse.FinalAnswer}\");\nchatResponse.GetUsage();\n```\n\n#### [JSON 模式聊天](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ftext-generation\u002Fjson-mode)\n\n> [!IMPORTANT]\n>\n> - 使用 JSON 模式时，务必通过对话中的某条消息（例如系统消息）指示模型生成 JSON。如果不包含明确的 JSON 生成指令，模型可能会生成无休止的空白字符流，请求将持续运行直到达到令牌限制。为避免这种情况，如果上下文中未出现字符串“JSON”，API 将抛出错误。\n> - 如果 `finish_reason` 为 `length`，则模型返回的消息中的 JSON 可能是不完整的（即被截断），这表示生成内容超出了 `max_tokens` 或对话的令牌限制。为防止这种情况，在解析响应之前，请检查 `finish_reason`。\n> - JSON 模式不会保证输出符合任何特定模式，仅保证其有效且可无错误地解析。\n\n```csharp\nvar messages = new List\u003CMessage>\n{\n    new Message(Role.System, \"你是一个旨在输出 JSON 的助手。\"),\n    new Message(Role.User, \"2020 年世界大赛是谁赢了？\"),\n};\nvar chatRequest = new ChatRequest(messages, Model.GPT4o, responseFormat: ChatResponseFormat.Json);\nvar response = await api.ChatEndpoint.GetCompletionAsync(chatRequest);\n\nforeach (var choice in response.Choices)\n{\n    Debug.Log($\"[{choice.Index}] {choice.Message.Role}: {choice} | 结束原因: {choice.FinishReason}\");\n}\n\nresponse.GetUsage();\n```\n\n---\n\n\n\n### [音频](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio)\n\n将音频转换为文本。\n\n音频 API 通过 `OpenAIClient.AudioEndpoint` 访问。\n\n#### [创建语音](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateSpeech)\n\n根据输入文本生成音频。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new SpeechRequest(\"你好，世界！\");\nvar speechClip = await api.AudioEndpoint.GetSpeechAsync(request);\naudioSource.PlayOneShot(speechClip);\nDebug.Log(speechClip);\n```\n\n##### [流式语音]\n\n根据输入文本生成流式音频。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new SpeechRequest(\"你好，世界！\", responseFormat: SpeechResponseFormat.PCM);\nvar speechClip = await api.AudioEndpoint.GetSpeechAsync(request, partialClip =>\n{\n    audioSource.PlayOneShot(partialClip);\n});\nDebug.Log(speechClip);\n```\n\n> [!NOTE]\n> 请查看任何演示场景，以了解如何使用 `OnAudioFilterRead` 处理播放的最佳实践。\n\n#### [创建转录](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateTranscription)\n\n将音频转录为输入语言。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new AudioTranscriptionRequest(audioClip, language: \"en\");\nvar result = await api.AudioEndpoint.CreateTranscriptionAsync(request);\nDebug.Log(result);\n```\n\n您还可以使用 `verbose_json` 获取更详细的带时间戳信息：\n\n```csharp\nvar api = new OpenAIClient();\nusing var request = new AudioTranscriptionRequest(transcriptionAudio, responseFormat: AudioResponseFormat.Verbose_Json, timestampGranularity: TimestampGranularity.Word, temperature: 0.1f, language: \"en\");\nvar response = await api.AudioEndpoint.CreateTranscriptionTextAsync(request);\n\nforeach (var word in response.Words)\n{\n    Debug.Log($\"[{word.Start}-{word.End}] \\\"{word.Word}\\\"\");\n}\n```\n\n#### [创建翻译](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Faudio\u002FcreateTranslation)\n\n将音频翻译成英语。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new AudioTranslationRequest(audioClip);\nvar result = await api.AudioEndpoint.CreateTranslationAsync(request);\nDebug.Log(result);\n```\n\n---\n\n### [图片](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages)\n\n根据提示和\u002F或输入图像，模型会生成一张新图像。\n\n图片 API 通过 `OpenAIClient.ImagesEndpoint` 访问。\n\n#### [创建图像](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate)\n\n根据提示创建图像。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageGenerationRequest(\"一只骑着迅猛龙的房子\", Models.Model.DallE_3);\nvar imageResults = await api.ImagesEndPoint.GenerateImageAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n#### [编辑图像](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate-edit)\n\n根据原始图像和提示创建编辑或扩展后的图像。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageEditRequest(Path.GetFullPath(imageAssetPath), Path.GetFullPath(maskAssetPath), \"一个阳光明媚的室内休息区，池塘里有一只火烈鸟\", size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageEditAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n#### [创建图像变体](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fimages\u002Fcreate-variation)\n\n根据给定图像创建变体。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageVariationRequest(imageTexture, size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageVariationAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n或者，该端点可以直接接受启用了读写权限且压缩设置为“无”的 Texture2D。\n\n```csharp\nvar api = new OpenAIClient();\nvar request = new ImageVariationRequest(imageTexture, size: ImageSize.Small);\nvar imageResults = await api.ImagesEndPoint.CreateImageVariationAsync(request);\n\nforeach (var result in imageResults)\n{\n    Debug.Log(result.ToString());\n    Assert.IsNotNull(result.Texture);\n}\n```\n\n---\n\n### [文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles)\n\n文件用于上传文档，这些文档可以与诸如[微调](#fine-tuning)等功能一起使用。\n\n文件 API 通过 `OpenAIClient.FilesEndpoint` 访问。\n\n#### [列出文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Flist)\n\n返回属于用户组织的文件列表。\n\n```csharp\nvar api = new OpenAIClient();\nvar fileList = await api.FilesEndpoint.ListFilesAsync();\n\nforeach (var file in fileList)\n{\n    Debug.Log($\"{file.Id} -> {file.Object}: {file.FileName} | {file.Size} bytes\");\n}\n```\n\n#### [上传文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fcreate)\n\n上传可在多个端点使用的文件。单个组织上传的所有文件大小上限为 100 GB。\n\n单个文件的最大大小为 512 MB。有关支持的文件类型，请参阅助手工具指南。微调 API 仅支持 .jsonl 文件。\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.FilesEndpoint.UploadFileAsync(\"path\u002Fto\u002Fyour\u002Ffile.jsonl\", FilePurpose.FineTune);\nDebug.Log(file.Id);\n```\n\n#### [删除文件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fdelete)\n\n删除文件。\n\n```csharp\nvar api = new OpenAIClient();\nvar isDeleted = await api.FilesEndpoint.DeleteFileAsync(fileId);\nAssert.IsTrue(isDeleted);\n```\n\n#### [获取文件信息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fretrieve)\n\n返回特定文件的信息。\n\n```csharp\nvar api = new OpenAIClient();\nvar file = await api.FilesEndpoint.GetFileInfoAsync(fileId);\nDebug.Log($\"{file.Id} -> {file.Object}: {file.FileName} | {file.Size} bytes\");\n```\n\n#### [下载文件内容](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffiles\u002Fretrieve-content)\n\n将文件内容下载到指定目录。\n\n```csharp\nvar api = new OpenAIClient();\nvar downloadedFilePath = await api.FilesEndpoint.DownloadFileAsync(fileId);\nDebug.Log(downloadedFilePath);\nAssert.IsTrue(File.Exists(downloadedFilePath));\n```\n\n---\n\n### [微调](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning)\n\n管理微调任务，以根据您的特定训练数据定制模型。\n\n相关指南：[微调模型](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Ffine-tuning)\n\n文件 API 通过 `OpenAIClient.FineTuningEndpoint` 访问。\n\n#### [创建微调任务](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fcreate)\n\n从给定数据集创建一个微调指定模型的任务。\n\n响应包括已加入队列的任务详细信息，包括任务状态以及完成后的微调模型名称。\n\n```csharp\nvar api = new OpenAIClient();\nvar fileId = \"file-abc123\";\nvar request = new CreateFineTuneRequest(fileId);\nvar job = await api.FineTuningEndpoint.CreateJobAsync(Model.GPT3_5_Turbo, request);\nDebug.Log($\"已启动 {job.Id} | 状态: {job.Status}\");\n```\n\n#### [列出微调任务](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Flist)\n\n列出您组织的微调任务。\n\n```csharp\nvar api = new OpenAIClient();\nvar jobList = await api.FineTuningEndpoint.ListJobsAsync();\n\nforeach (var job in jobList.Items.OrderByDescending(job => job.CreatedAt))\n{\n    Debug.Log($\"{job.Id} -> {job.CreatedAt} | {job.Status}\");\n}\n```\n\n#### [获取微调任务信息](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fretrieve)\n\n获取微调任务的信息。\n\n```csharp\nvar api = new OpenAIClient();\nvar job = await api.FineTuningEndpoint.GetJobInfoAsync(fineTuneJob);\nDebug.Log($\"{job.Id} -> {job.CreatedAt} | {job.Status}\");\n```\n\n#### [取消微调任务](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Fcancel)\n\n立即取消微调任务。\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.FineTuningEndpoint.CancelFineTuneJobAsync(fineTuneJob);\nAssert.IsTrue(isCancelled);\n```\n\n#### [列出微调任务事件](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Ffine-tuning\u002Flist-events)\n\n获取微调任务的状态更新。\n\n```csharp\nvar api = new OpenAIClient();\nvar eventList = await api.FineTuningEndpoint.ListJobEventsAsync(fineTuneJob);\nDebug.Log(`${fineTuneJob.Id} -> 状态: ${fineTuneJob.Status} | 事件数量: ${eventList.Events.Count}`);\n\nforeach (var @event in eventList.Items.OrderByDescending(@event => @event.CreatedAt))\n{\n    Debug.Log($\"  {@event.CreatedAt} [{@event.Level}] {@event.Message}\");\n}\n```\n\n---\n\n### [批处理](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch)\n\n创建大量异步处理的 API 请求批次。批处理 API 可在 24 小时内返回结果，并享受 50% 的折扣。\n\n- [批处理指南](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fbatch)\n\n批处理 API 通过 `OpenAIClient.BatchesEndpoint` 访问。\n\n#### [列出批处理](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Flist)\n\n列出您组织的批处理。\n\n```csharp\nvar api = new OpenAIClient();\nvar batches = await api.BatchEndpoint.ListBatchesAsync();\n\nforeach (var batch in listResponse.Items)\n{\n    Debug.Log(batch);\n}\n```\n\n#### [创建批处理](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fcreate)\n\n从上传的请求文件创建并执行批处理。\n\n```csharp\nvar api = new OpenAIClient();\nvar batchRequest = new CreateBatchRequest(\"file-id\", Endpoint.ChatCompletions);\nvar batch = await api.BatchEndpoint.CreateBatchAsync(batchRequest);\n```\n\n#### [获取批处理](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fretrieve)\n\n获取批处理。\n\n```csharp\nvar api = new OpenAIClient();\nvar batch = await api.BatchEndpoint.RetrieveBatchAsync(\"batch-id\");\n\u002F\u002F 您也可以使用便捷方法！\nbatch = await batch.UpdateAsync();\nbatch = await batch.WaitForStatusChangeAsync();\n```\n\n#### [取消批处理](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fbatch\u002Fcancel)\n\n取消正在进行的批处理。批处理将在“取消中”状态持续最多 10 分钟，随后变为“已取消”，此时输出文件中将包含部分结果（如有）。\n\n```csharp\nvar api = new OpenAIClient();\nvar isCancelled = await api.BatchEndpoint.CancelBatchAsync(batch);\nAssert.IsTrue(isCancelled);\n```\n\n---\n\n### [嵌入](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fembeddings)\n\n获取给定输入的向量表示，该表示可被机器学习模型和算法轻松使用。\n\n相关指南：[嵌入](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fembeddings)\n\n嵌入 API 通过 `OpenAIClient.EmbeddingsEndpoint` 访问。\n\n#### [创建嵌入](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fembeddings\u002Fcreate)\n\n创建表示输入文本的嵌入向量。\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.EmbeddingsEndpoint.CreateEmbeddingAsync(\"食物非常美味，服务员……\", Models.Embedding_Ada_002);\nDebug.Log(response);\n```\n\n---\n\n### [内容审核](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmoderations)\n\n给定一段输入文本，该模型会输出该文本是否违反 OpenAI 的内容政策。\n\n相关指南：[内容审核](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fguides\u002Fmoderation)\n\n可以通过 `OpenAIClient.ModerationsEndpoint` 访问内容审核 API。\n\n#### [创建内容审核](https:\u002F\u002Fplatform.openai.com\u002Fdocs\u002Fapi-reference\u002Fmoderations\u002Fcreate)\n\n判断文本是否违反 OpenAI 的内容政策。\n\n```csharp\nvar api = new OpenAIClient();\nvar isViolation = await api.ModerationsEndpoint.GetModerationAsync(\"我想杀了他们。\");\nAssert.IsTrue(isViolation);\n```\n\n此外，你还可以获取给定输入的各项评分。\n\n```csharp\nvar api = new OpenAIClient();\nvar response = await api.ModerationsEndpoint.CreateModerationAsync(new ModerationsRequest(\"我爱你\"));\nAssert.IsNotNull(response);\nDebug.Log(response.Results?[0]?.Scores?.ToString());\n```\n\n---","# com.openai.unity 快速上手指南\n\n## 环境准备\n\n- **Unity 版本**：要求 Unity 2021.3 LTS 或更高版本。\n- **前置依赖**：需要拥有 OpenAI API 账户及有效的 API Key。\n- **网络环境**：由于 OpenAI 服务在中国大陆地区可能无法直接访问，建议配置代理或使用国内中转服务（如有）。\n\n## 安装步骤\n\n推荐使用 **OpenUPM** 进行安装，这是最简便且能自动处理依赖的方式。\n\n### 方法一：通过终端命令安装（推荐）\n\n在 Unity 项目根目录下打开终端，执行以下命令：\n\n```bash\nopenupm add com.openai.unity\n```\n\n### 方法二：通过 Unity Package Manager 手动添加\n\n如果未安装 openupm-cli，可在 Unity 编辑器中手动配置：\n\n1. 打开 **Edit > Project Settings > Package Manager**。\n2. 添加新的 Scoped Registry：\n   - **Name**: `OpenUPM`\n   - **URL**: `https:\u002F\u002Fpackage.openupm.com`\n   - **Scope(s)**:\n     - `com.openai`\n     - `com.utilities`\n3. 打开 **Window > Package Manager**。\n4. 将左上角下拉菜单从 \"Unity Registry\" 切换为 **\"My Registries\"**。\n5. 在列表中找到 **OpenAI** 包并点击 **Install**。\n\n> **注意**：若选择通过 Git URL 安装，需手动额外安装以下依赖包：`com.utilities.async`, `com.utilities.websockets`, `com.utilities.extensions`, `com.utilities.rest`, `com.utilities.audio`, `com.utilities.encoder.wav`。\n\n## 基本使用\n\n### 1. 认证配置\n\n为了安全起见，**不建议**将 API Key 硬编码在代码中。推荐以下两种方式之一：\n\n#### 方式 A：使用环境变量（推荐用于本地测试）\n\n在系统环境中设置以下变量：\n- `OPENAI_API_KEY`: 你的 API Key\n- `OPENAI_ORGANIZATION_ID`: (可选) 组织 ID\n- `OPENAI_PROJECT_ID`: (可选) 项目 ID\n\n代码初始化：\n```csharp\nvar api = new OpenAIClient(new OpenAIAuthentication().LoadFromEnvironment());\n```\n\n#### 方式 B：使用配置文件\n\n在项目根目录或用户主目录下创建名为 `.openai` 的文件，内容如下（JSON 格式）：\n\n```json\n{\n  \"apiKey\": \"sk-aaaabbbbbccccddddd\",\n  \"organizationId\": \"org-yourOrganizationId\",\n  \"projectId\": \"proj_yourProjectId\"\n}\n```\n\n代码初始化：\n```csharp\nvar api = new OpenAIClient(); \u002F\u002F 会自动加载默认配置文件\n```\n\n### 2. 调用聊天接口示例\n\n以下是一个最简单的发送消息并获取回复的示例：\n\n```csharp\nusing System;\nusing OpenAI;\nusing OpenAI.Chat;\n\npublic class ChatExample : MonoBehaviour\n{\n    async void Start()\n    {\n        \u002F\u002F 初始化客户端\n        var api = new OpenAIClient(new OpenAIAuthentication().LoadFromEnvironment());\n        var chatEndpoint = api.ChatEndpoint;\n\n        \u002F\u002F 构建对话请求\n        var conversation = new ChatRequest(\n            messages: new[] {\n                new Message(Role.System, \"You are a helpful assistant.\"),\n                new Message(Role.User, \"Hello, who are you?\")\n            },\n            model: \"gpt-3.5-turbo\"\n        );\n\n        try\n        {\n            \u002F\u002F 发送请求并获取结果\n            var response = await chatEndpoint.GetCompletionAsync(conversation);\n            \n            if (response.Choices != null && response.Choices.Length > 0)\n            {\n                Debug.Log($\"AI 回复：{response.FirstChoice.Message.Content}\");\n            }\n        }\n        catch (Exception e)\n        {\n            Debug.LogError($\"请求失败：{e.Message}\");\n        }\n    }\n}\n```\n\n### 3. 流式输出示例（Streaming）\n\n如果需要实现打字机效果，可以使用流式接口：\n\n```csharp\nvar response = await chatEndpoint.StreamCompletionAsync(conversation);\n\nawait foreach (var update in response)\n{\n    if (update.Choices != null && update.Choices.Length > 0)\n    {\n        string delta = update.FirstChoice.Delta.Content;\n        if (!string.IsNullOrEmpty(delta))\n        {\n            Debug.Log(delta); \u002F\u002F 逐字输出\n        }\n    }\n}\n```","一家独立游戏工作室正在开发一款支持语音互动的 NPC 系统，希望让玩家能通过麦克风直接与角色进行实时对话。\n\n### 没有 com.openai.unity 时\n- 开发者需手动封装复杂的 HTTP 请求代码来处理 OpenAI REST API，导致大量样板代码堆积，维护困难。\n- 实现流式响应（Streaming）和功能调用（Function Calling）时，需自行处理异步数据解析，极易出现卡顿或数据丢失。\n- 缺乏对 Unity 生命周期和异步任务的原生支持，多线程处理不当常引发主线程阻塞，造成游戏画面冻结。\n- 集成音频输入输出功能时，需额外寻找第三方库进行 WAV 编码转换，增加了项目依赖管理的复杂度。\n- 调试过程繁琐，任何 API 参数变更都需要修改底层网络逻辑，严重拖慢原型验证速度。\n\n### 使用 com.openai.unity 后\n- 通过 OpenUPM 一键安装即可直接调用封装好的 API 接口，无需编写底层网络请求，代码量减少 70%。\n- 原生支持流式文本生成与函数调用回调，开发者只需订阅事件即可平滑展示 NPC 回复，交互体验流畅自然。\n- 内置专为 Unity 优化的异步处理机制，自动适配主线程更新，彻底消除因网络请求导致的帧率波动。\n- 集成了音频编码与 WebSocket 实时会话模块，可直接对接麦克风输入并处理实时语音流转，简化了音频管线。\n- 提供清晰的文档与标准化错误处理，快速切换不同模型或调整参数，让团队能在数小时内完成新功能迭代。\n\ncom.openai.unity 将复杂的 AI 接口调用转化为 Unity 原生的开发体验，让游戏开发者能专注于创意逻辑而非底层通信细节。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FRageAgainstThePixel_com.openai.unity_710572ab.png","RageAgainstThePixel","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FRageAgainstThePixel_f704b0b6.jpg","",null,"RagePixels_XR","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel",[83],{"name":84,"color":85,"percentage":86},"C#","#178600",100,594,86,"2026-04-02T07:53:04","MIT","Windows, macOS, Linux","未说明",{"notes":94,"python":95,"dependencies":96},"这是一个用于 Unity 游戏引擎的 C# 包，而非 Python 库。安装推荐使用 OpenUPM 包管理器。运行需要有效的 OpenAI API 密钥（可通过环境变量、配置文件或代码传入）。若通过 Git URL 安装，需手动添加多个依赖仓库。支持 Azure OpenAI 部署。","不适用 (基于 Unity\u002FC#)",[97,98,99,100,101,102,103],"Unity 2021.3 LTS 或更高版本","com.utilities.async","com.utilities.websockets","com.utilities.extensions","com.utilities.rest","com.utilities.audio","com.utilities.encoder.wav",[26,53,15,14,54,13],[106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125],"upm","upm-package","unity","unity3d","openai","open-ai","openupm","ai","unity-package","unity-ml","unity-scripts","unity-plugin","dall-e","gpt","chat-gpt","gpt-4","dall-e-3","gpt-4o","gpt-realtime","gpt-5","2026-03-27T02:49:30.150509","2026-04-06T05:27:06.803419",[129,134,139,144,149,154],{"id":130,"question_zh":131,"answer_zh":132,"source_url":133},13491,"在 iOS 平台上遇到 JsonSerializationException，提示无法反序列化只读字典（IReadOnlyDictionary），如何解决？","这是由于 iOS 使用 IL2CPP 后端导致的序列化问题。解决方案是保留属性的只读访问器，但在构造函数中接收具体的 Dictionary 类型。请参考以下代码模式：\n\nclass ReadOnlyTest\n{\n    [Preserve]\n    [JsonConstructor]\n    public ReadOnlyTest(\n        [JsonProperty(\"metadata\")] Dictionary\u003Cstring, string> metadata)\n    {\n        Metadata = metadata;\n    }\n\n    [Preserve]\n    [JsonProperty(\"metadata\")]\n    public IReadOnlyDictionary\u003Cstring, string> Metadata { get; }\n}\n\n确保添加了 [Preserve] 和 [JsonConstructor] 特性。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F147",{"id":135,"question_zh":136,"answer_zh":137,"source_url":138},13492,"调用 ListAssistantsAsync 时抛出 ArgumentException，提示无法将 System.String 转换为 ResponseFormatObject，原因是什么？","这个问题通常是因为 API 返回的 `output_format` 字段值导致的反序列化失败。特别是在 Playground 中创建的 Assistant 可能包含库早期版本未正确处理的格式。维护者已确认需要添加自定义序列化器来处理该字段。如果遇到此问题，请确保使用的是最新版本的插件，因为后续版本已修复了对 `output_format` 字段的兼容性问题。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F293",{"id":140,"question_zh":141,"answer_zh":142,"source_url":143},13493,"获取 ChatRequest 响应时，返回的字符串包含了完整的 JSON 结构（如 id, model, usage 等），而不仅仅是回复内容，如何只获取文本内容？","如果收到的响应包含完整的 JSON 原始数据而不是解析后的对象，通常是因为本地缓存或序列化状态异常。尝试删除 Unity 项目中的 `\u002FLibrary\u002F` 文件夹并重新编译项目，这通常能解决因缓存导致的响应解析错误。如果问题依旧，请检查是否直接打印了原始响应对象而非 `.Content` 或 `.Message` 属性。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F53",{"id":145,"question_zh":146,"answer_zh":147,"source_url":148},13494,"在 WebGL 构建中文件存储或音频缓存不起作用，报错 DirectoryNotFoundException，如何解决？","WebGL 平台不支持标准的文件系统路径（如 \u002Ftmp\u002F），且 Unity 的 DownloadHandlerAudioClip 在 WebGL 上存在已知 Bug，导致流式音频处理失效。目前的缓存机制在 WebGL 上实际上是空操作（no-op）。对于未知长度的 MP3 文件或图像生成，WebGL 构建中可能无法正常工作。建议针对 WebGL 平台禁用本地文件缓存功能，或等待底层 REST 包修复相关的缓存实现。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F130",{"id":150,"question_zh":151,"answer_zh":152,"source_url":153},13495,"在 Unity 6 或 6.2 的 Android 构建中，使用 StreamAudioSource.SampleCallbackAsync() 导致应用崩溃，如何解决？","这是一个在 Unity 6\u002F6.2 Android 构建中特定的崩溃问题，通常发生在从 `BufferCallback` 切换到 `SampleCallbackAsync` 后。维护者已在版本 8.8.8 中修复了此问题。如果您遇到此崩溃，请将 OpenAI Unity 插件升级到 8.8.8 或更高版本。升级后在 Samsung 手机和平板等设备上测试表明崩溃已解决。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F438",{"id":155,"question_zh":156,"answer_zh":157,"source_url":158},13496,"通过 UPM 安装插件后，无法导入 OpenAI.Chat 或 OpenAI.Models 命名空间，报错找不到类型，怎么办？","如果安装后无法识别命名空间，首先尝试导入插件自带的示例场景（Sample Scene）。示例场景中通常包含了正确的引用配置和脚本设置，可以帮助验证环境是否配置正确。如果导入示例场景后仍无法解决，请检查 Package Manager 中插件是否显示为完全安装状态，并尝试重新导入或重启 Unity 编辑器以刷新程序集定义。","https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fissues\u002F99",[160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255],{"id":161,"version":162,"summary_zh":163,"released_at":164},72262,"8.8.9","## 变更内容\n- com.openai.unity 8.8.9，由 @StephenHodgson 在 #441 中提交\n  - com.utilities.audio 更新至 3.0.3\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.8...8.8.9","2026-01-26T00:11:01",{"id":166,"version":167,"summary_zh":168,"released_at":169},72263,"8.8.8","## 变更内容\n- com.openai.unity 8.8.8，由 @StephenHodgson 在 #436 中提交\n  - 优化图像纹理加载\n  - 允许将 `Responses.TextContent.Type` 设置为 `OutputText`，用于 `Role.Assistant` 消息，由 @TypeDefinition 提交\n  - 修复音频重构后的使用后释放崩溃问题\n  - 修复了封装的服务器发送事件错误对象\n  - 修复了为 mcp 工具批准创建 MCPApprovalResponse 的功能\n  - 修复了 MCPToolCall.Error 的反序列化问题\n  - 更新了默认模型\n  - com.utilities.rest 升级至 5.1.1\n  - com.utilities.audio 升级至 3.0.2\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.7...8.8.8","2025-12-13T22:36:53",{"id":171,"version":172,"summary_zh":173,"released_at":174},72264,"8.8.7","## 变更内容\n- com.openai.unity 8.8.7 由 @StephenHodgson 在 #431 中提交\n  - 修复 VAD 序列化未正确设置禁用值的问题\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.6...8.8.7","2025-11-09T19:02:45",{"id":176,"version":177,"summary_zh":178,"released_at":179},72265,"8.8.6","## 变更内容\n- com.openai.unity 8.8.6，由 @StephenHodgson 在 #428 中提交\n  - 修复 Realtime.UpdateSessionRequests 因额外的客户端密钥数据而导致会话更新失败的问题\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.5...8.8.6","2025-11-05T02:51:37",{"id":181,"version":182,"summary_zh":183,"released_at":184},72266,"8.8.5","## 变更内容\n- com.openai.unity 8.8.5，由 @StephenHodgson 在 #423 中提交\n  - 修复 RealtimeSessionConfiguration 和 CreateResponseRequest 模型参数覆盖提示设置的问题。\n  - 修复 Threads.MessageResponse.Status 未正确设置为 Completed 的问题。\n  - 更新了所有示例场景中的音频播放实现。\n  - com.utilities.rest 升级至 5.0.4。\n  - com.utilities.encoder.wav 升级至 3.0.2。\n  - com.utilities.websockets 升级至 2.0.0。\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.4...8.8.5","2025-11-03T23:46:45",{"id":186,"version":187,"summary_zh":188,"released_at":189},72267,"8.8.4","## 变更内容\n\n- 修复了 WAV 编码器依赖问题\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.3...8.8.4","2025-10-23T12:52:48",{"id":191,"version":192,"summary_zh":193,"released_at":194},72268,"8.8.3","## 变更内容\n\n- 修复 RealtimeBehaviour 示例转录\n- 修复示例场景中的麦克风精灵引用\n- 添加 DurationUsage\n- 更新 ConversationItemInputAudioTranscriptionResponse，使其包含 logprobs 和 usage\n- 由 @aechan 添加与 Azure Blob Batch API 兼容的 CreateBatchRequest 字段\n- 在 Realtime.SessionConfiguration 中更新 Prompt 和 Speed 参数\n- 将 Realtime.Prompt 移至通用命名空间\n- 提升包依赖版本\n\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.2...8.8.3","2025-10-23T02:05:03",{"id":196,"version":197,"summary_zh":198,"released_at":199},72269,"8.8.2","## 变更内容\n\n- 由 @dudziakl 修复了 Android 平台上解析响应时的一个 bug\n  - 重构了 JSON 对象 `.ctrs`，以解决 AOT 序列化问题\n- 由 @eapark 为 `RealtimeConfiguration` 添加了 `NoiseReductionSettings`\n- 由 @eapark 修复了 `ConversationItemTruncateRequest.ContentIndex` 的默认序列化问题\n- 由 @gfreezy 为响应 API 添加了 `file_url`\n- 增加了对 GPT-5 和最新 API 变更的支持\n- 新增了 `ConversationsEndpoint`\n- 更新了默认模型列表\n  - 使 `Model` 类可序列化，以便在检查器中进行选择\n- 重构了 Assistant\u002FThread Endpoint 的 Beta 版本标头，使其仅应用于这些特定端点\n- 更新了依赖项\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.1...8.8.2","2025-10-03T04:36:30",{"id":201,"version":202,"summary_zh":203,"released_at":204},72270,"8.8.1","## 变更内容\n\n- 更新了实时音频转录设置属性\n- 升级了依赖项\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.8.0...8.8.1","2025-07-04T20:54:02",{"id":206,"version":207,"summary_zh":208,"released_at":209},72271,"8.8.0","## 变更内容\n\n- 改进了 RealtimeSession 对代理的 WebSocket 支持\n  - 代理不再直接处理 WebSocket 连接，而是使用临时 API 密钥直接向 OpenAI API 发起连接\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.7.4...8.8.0","2025-07-02T17:15:44",{"id":211,"version":212,"summary_zh":213,"released_at":214},72272,"8.7.4","## What's Changed\r\n\r\n- bump deps\r\n- added sample menu to unity editor project\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.7.3...8.7.4","2025-07-01T20:15:57",{"id":216,"version":217,"summary_zh":218,"released_at":219},72273,"8.7.3","## What's Changed\r\n\r\n- Fixed Response.Instructions deserialization when using CreateResponseRequest.Prompt\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.7.3...8.7.3","2025-06-25T15:50:58",{"id":221,"version":222,"summary_zh":223,"released_at":224},72274,"8.7.2","## What's Changed\r\n\r\n- Add support for predefined prompts in Responses endpoint\r\n- Fixed WebSearchToolPreview streaming annotation deserialization\r\n  - Added IAnnotation\r\n  - Added UrlCitation\r\n  - Added ContainerFileCitation\r\n- Responses.TextContent.Annotations array type changed from Annotation to IAnnotation\r\n  - Added Responses.TextContent.LogProbs\r\n- Fixed Responses Tool serialization\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.7.1...8.7.2","2025-06-25T02:06:21",{"id":226,"version":227,"summary_zh":228,"released_at":229},72275,"8.7.1","## What's Changed\r\n\r\n- Fix Azure OpenAI endpoints that don't contain the deployment-id\r\n- Add JsonSchema support to Responses\r\n- Fix Responses Sample not registered in package manger samples\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.7.0...8.7.1","2025-06-19T01:18:39",{"id":231,"version":232,"summary_zh":233,"released_at":234},72276,"8.7.0","## What's Changed:\r\n\r\n- Added support for Responses API\r\n- Added support for gpt-image-1\r\n- Updated all static model definitions\r\n- Updated speech requests with instructions input\r\n- Updated audio transcription requests with ChunkingStrategy and includes\r\n- Updated default Azure OpenAI API Version to [GA 2024-10-21](https:\u002F\u002Flearn.microsoft.com\u002Fen-us\u002Fazure\u002Fai-services\u002Fopenai\u002Freference#data-plane-inference)\r\n- Renamed ChatResponseFormat ->TextResponseFormat\r\n- Renamed ResponseFormatObject  -> TextResponseFormatConfiguration\r\n- Renamed Realtime.Usage -> OpenAI.TokenUsage\r\n- Moved Realtime.TokenDetails -> OpenAI.TokenDetails\r\n- Removed ImageSize enum\r\n\r\n### First Time Contributor\r\n\r\n- @EitanWong\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.6...8.7.0","2025-06-15T00:44:39",{"id":236,"version":237,"summary_zh":238,"released_at":239},72277,"8.6.6","## What's Changed\r\n\r\n- fix mp3 and wav playback from SpeechRequests\r\n- added SemanticVAD options to realtime\r\n- added new audio models to static model list\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.5...8.6.6","2025-04-27T18:46:04",{"id":241,"version":242,"summary_zh":243,"released_at":244},72278,"8.6.5","## What's Changed\r\n- updated com.utilities.rest -> 3.3.3\r\n- updated com.utilities.encoder.wav -> 2.2.2\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.4...8.6.5","2025-04-13T21:48:37",{"id":246,"version":247,"summary_zh":248,"released_at":249},72279,"8.6.4","## What's Changed\r\n\r\n- Fixed Assistant Reasoning Model request serialization when using thread.CreateRunAsync\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.3...8.6.4","2025-03-21T13:47:02",{"id":251,"version":252,"summary_zh":253,"released_at":254},72280,"8.6.3","## What's New\r\n\r\n- Fixed Threads.AssistantResponse serialization with reasoning_effort\r\n- Updated AssitantBehaviour sample\r\n- Updated ChatBehaviour sample\r\n- Added additional deserializing json logging when a failure occurs\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.2...8.6.3","2025-03-09T20:34:21",{"id":256,"version":257,"summary_zh":258,"released_at":259},72281,"8.6.2","## What's New\r\n\r\n- Fixed Assistant Reasoning Model request serialization\r\n- Added a way to set max_tokens on chat request to support older azure api\r\n- Updated predefined models\r\n- Improve AudioClip handling and performance\r\n- Convert Task.Delay to Awaiters.DelayAsync to improve performance on WebGL Platform\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FRageAgainstThePixel\u002Fcom.openai.unity\u002Fcompare\u002F8.6.1...8.6.2","2025-03-08T03:46:54"]