[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-declare-lab--conv-emotion":3,"tool-declare-lab--conv-emotion":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",142651,2,"2026-04-06T23:34:12",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107888,"2026-04-06T11:32:50",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":76,"owner_twitter":76,"owner_website":77,"owner_url":78,"languages":79,"stars":88,"forks":89,"last_commit_at":90,"license":91,"difficulty_score":10,"env_os":92,"env_gpu":93,"env_ram":92,"env_deps":94,"category_tags":104,"github_topics":106,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":120,"updated_at":121,"faqs":122,"releases":123},4917,"declare-lab\u002Fconv-emotion","conv-emotion","This repo contains implementation of different architectures for emotion recognition in conversations.","conv-emotion 是一个专注于“对话中情绪识别”（ERC）的开源项目，旨在帮助机器理解多轮对话里说话人的情绪变化。在日常交流或客服场景中，单纯分析单句话往往难以准确判断情绪，因为情绪会随上下文动态演变。conv-emotion 通过提供多种先进的深度学习架构，有效解决了这一难题，让 AI 能结合语境更精准地捕捉喜怒哀乐等情感状态。\n\n该项目非常适合人工智能研究人员、NLP 开发者以及需要构建智能对话系统的工程师使用。其核心亮点在于集成了多个在该领域表现卓越的模型，其中 COSMIC 模型更是引入了常识知识库，显著提升了情绪识别的准确率，曾在多个权威数据集上刷新最佳成绩。此外，仓库还收录了 DialogueGCN、DialogueRNN 等经典算法的实现代码，并持续更新相关数据集（如 M2H2）和衍生任务（如情绪成因提取）的资源。无论是希望复现前沿论文成果，还是寻找可靠的基线模型进行二次开发，conv-emotion 都提供了丰富且经过验证的技术支持，是探索对话情感计算领域的宝贵资源库。","# Emotion Recognition in Conversations\n\n## Note\n\n``` For those enquiring about how to extract visual and audio features, please check this out: https:\u002F\u002Fgithub.com\u002Fsoujanyaporia\u002FMUStARD```\n\n## Updates 🔥 🔥 🔥 \n\n| Date \t| Announcements \t|\n|-\t|-\t|\n| 10\u002F03\u002F2024  | If you are interested in IQ testing LLMs, check out our new work: [AlgoPuzzleVQA](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fpuzzle-reasoning)\n| 03\u002F08\u002F2021  | 🎆 🎆 We have released a new dataset M2H2: A Multimodal Multiparty Hindi Dataset For Humor Recognition in Conversations. Check it out: [M2H2](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FM2H2-dataset). The baselines for the M2H2 dataset are created based on DialogueRNN and bcLSTM. |\n| 18\u002F05\u002F2021  | 🎆 🎆 We have released a new repo containing models to solve the problem of emotion cause recognition in conversations. Check it out: [emotion-cause-extraction](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion\u002Ftree\u002Fmaster\u002Femotion-cause-extraction). Thanks to [Pengfei Hong](https:\u002F\u002Fwww.pengfei-hong.com\u002F) for compiling this. |\n| 24\u002F12\u002F2020  | 🎆 🎆 Interested in the topic of recognizing emotion causes in conversations? We have just released a dataset for this. Head over to [https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON).  |\n| 06\u002F10\u002F2020  | 🎆 🎆 New paper and SOTA in Emotion Recognition in Conversations. Refer to the directory [COSMIC](.\u002FCOSMIC) for the code. Read the paper -- [COSMIC: COmmonSense knowledge for eMotion Identification in Conversations](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.02795.pdf).  |\n| 30\u002F09\u002F2020 \t| New paper and baselines in utterance-level dialogue understanding have been released. Read our paper [Utterance-level Dialogue Understanding: An Empirical Study](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.13902.pdf). Fork the [codes](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fdialogue-understanding). \t|\n| 26\u002F07\u002F2020 \t| New DialogueGCN code has been released. Please visit https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion\u002Ftree\u002Fmaster\u002FDialogueGCN-mianzhang. All the credit goes to the Mian Zhang (https:\u002F\u002Fgithub.com\u002Fmianzhang\u002F) \t|\n| 11\u002F07\u002F2020 \t| Interested in reading the papers on ERC or related tasks such as sarcasm detection in conversations? We have compiled a comprehensive reading list for papers. Please visit https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fawesome-emotion-recognition-in-conversations \t|\n| 07\u002F06\u002F2020: \t| New state-of-the-art results for the ERC task will be released soon. \t|\n| 07\u002F06\u002F2020: \t| The conv-emotion repo will be maintained on https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002F \t|\n| 22\u002F12\u002F2019: \t| Code for DialogueGCN has been released. \t|\n| 11\u002F10\u002F2019: \t| New Paper: Conversational Transfer Learning for Emotion Recognition. \t|\n| 09\u002F08\u002F2019: \t| New paper on Emotion Recognition in Conversation (ERC). \t|\n| 06\u002F03\u002F2019: \t| Features and codes to train DialogueRNN on the MELD dataset have been released. \t|\n| 20\u002F11\u002F2018: \t| End-to-end version of ICON and DialogueRNN have been released. \t|\n---------------------------------------------------------------------------\n\nCOSMIC is the best performing model in this repo and please visit the links below to compare the models on different ERC datasets.\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-4)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-4?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-meld)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-meld?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-3)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-3?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on?p=cosmic-commonsense-knowledge-for-emotion)\n\n\nThis repository contains implementations for several emotion recognition in conversations methods as well algorithms for recognizing emotion cause in conversations:\n\n- [Emotion Recognition in Conversations](#emotion-recognition-in-conversations)\n  * [Data Format](#data-format)\n  * [COSMIC (PyTorch)](#cosmic-commonsense-knowledge-for-emotion-identification-in-conversations)\n  * [TL-ERC (PyTorch)](#tl-erc-emotion-recognition-in-conversations-with-transfer-learning-from-generative-conversation-modeling)\n  * [DialogueGCN (PyTorch)](#dialoguegcn-a-graph-convolutional-neural-network-for-emotion-recognition-in-conversation)\n  * [DialogueRNN (PyTorch)](#dialoguernn-an-attentive-rnn-for-emotion-detection-in-conversations)\n  * [DialogueGCN-mianzhang (PyTorch)](#dialoguegcn-mianzhang-dialoguegcn-implementation-by-mian-zhang)\n  * [ICON (tensorflow)](#icon)\n  * [CMN (tensorflow)](#cmn)\n  * [bc-LSTM-pytorch (PyTorch)](#bc-lstm-pytorch)\n  * [bc-LSTM (keras)](#bc-lstm)\n- [Recognizing Emotion Cause in Conversations](#recognizing-emotion-cause-in-conversations)\n  * [ECPE-2D on RECCON dataset](#ecpe-2d-on-reccon-dataset)\n  * [Rank-Emotion-Cause on RECCON dataset](#rank-emotion-cause-on-reccon-dataset)\n  * [ECPE-MLL on RECCON dataset](#ecpe-mll-on-reccon-dataset)\n  * [RoBERTa and SpanBERT Baselines on RECCON dataset](#roberta-and-spanbert-baselines-on-reccon-dataset)\n\n\nUnlike other emotion detection models, these techniques consider the party-states and inter-party dependencies for modeling conversational context relevant to emotion recognition. The primary purpose of all these techniques are to pretrain an emotion detection model for empathetic dialogue generation.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_a671d59c710c.png\" alt=\"Controlling variables in conversation\" width=\"600\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem>Interaction among different controlling variables during a\ndyadic conversation between persons X and Y. Grey and white circles\nrepresent hidden and observed variables, respectively. P represents\npersonality, U represents utterance, S represents interlocutor state, I\nrepresents interlocutor intent, B represents background knowledge, Q represents external and sensory inputs, E represents emotion and Topic represents\ntopic of the conversation. This can easily be extended to multi-party\n      conversations.\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n\n\nEmotion recognition can be very useful for empathetic and affective dialogue generation - \n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_396d0caf2edf.jpg\" alt=\"Affective dialogue generation\" width=\"500\"\u002F>\n\u003C\u002Fp>\n\n\n## Data Format\n\nThese networks expect emotion\u002Fsentiment label and speaker info for each utterance present in a dialogue like\n```\nParty 1: I hate my girlfriend (angry)\nParty 2: you got a girlfriend?! (surprise)\nParty 1: yes (angry)\n```\nHowever, the code can be adpated to perform tasks where only the preceding utterances are available, without their corresponding labels, as context and goal is to label only the present\u002Ftarget utterance. For example, the *context* is\n```\nParty 1: I hate my girlfriend\nParty 2: you got a girlfriend?!\n```\nthe *target* is\n```\nParty 1: yes (angry)\n```\nwhere the target emotion is _angry_.\nMoreover, this code can also be molded to train the network in an end-to-end manner. We will soon push these useful changes.\n\n## Present SOTA Results\n\u003Ctable>\n  \u003Ctr>\n    \u003Cth rowspan=\"2\">Methods\u003C\u002Fth>\n    \u003Cth>IEMOCAP\u003C\u002Fth>\n    \u003Cth colspan=\"2\">DailyDialog\u003C\u002Fth>\n    \u003Cth colspan=\"2\">MELD\u003C\u002Fth>\n    \u003Cth colspan=\"2\">EmoryNLP\u003C\u002Fth>\n  \u003C\u002Ftr>\n\n  \u003Ctr>\n    \u003Ctd>W-Avg F1\u003C\u002Ftd>\n    \u003Ctd>Macro F1\u003C\u002Ftd>\n    \u003Ctd>Micro F1\u003C\u002Ftd>\n    \u003Ctd>W-Avg F1 (3-cls)\u003C\u002Ftd>\n    \u003Ctd>W-Avg F1 (7-cls)\u003C\u002Ftd>\n    \u003Ctd>W-Avg F1 (3-cls)\u003C\u002Ftd>\n    \u003Ctd>W-Avg F1 (7-cls)\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>RoBERTa\u003C\u002Ftd>\n    \u003Ctd>54.55\u003C\u002Ftd>\n    \u003Ctd>48.20\u003C\u002Ftd>\n    \u003Ctd>55.16\u003C\u002Ftd>\n    \u003Ctd>72.12\u003C\u002Ftd>\n    \u003Ctd>62.02\u003C\u002Ftd>\n    \u003Ctd>55.28\u003C\u002Ftd>\n    \u003Ctd>37.29\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>RoBERTa DialogueRNN\u003C\u002Ftd>\n    \u003Ctd>64.76\u003C\u002Ftd>\n    \u003Ctd>49.65\u003C\u002Ftd>\n    \u003Ctd>57.32\u003C\u002Ftd>\n    \u003Ctd>72.14\u003C\u002Ftd>\n    \u003Ctd>63.61\u003C\u002Ftd>\n    \u003Ctd>55.36\u003C\u002Ftd>\n    \u003Ctd>37.44\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>\u003Cb>RoBERTa COSMIC\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>65.28\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>51.05\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>58.48\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>73.20\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>65.21\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>56.51\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>38.11\u003C\u002Fb>\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\n\u003C\u002Ftable>\n\n## COSMIC: COmmonSense knowledge for eMotion Identification in Conversations\n\n[_COSMIC_](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion) addresses the task of utterance level emotion recognition in conversations using commonsense knowledge. It is a new framework that incorporates different elements of commonsense such as mental states, events, and causal relations, and build upon them to learn interactions between interlocutors participating in a conversation. Current state-of-the-art methods often encounter difficulties in context propagation, emotion shift detection, and differentiating between related emotion classes. By learning distinct commonsense representations, COSMIC addresses these challenges and achieves new state-of-the-art results for emotion recognition on four different benchmark conversational datasets. \n\n![Alt text](cosmic.jpg?raw=true \"COSMIC framework\")\n\n### Execution\n\nFirst download the RoBERTa and COMET features [here](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1TQYQYCoPtdXN2rQ1mR2jisjUztmOzfZr\u002Fview?usp=sharing) and keep them in appropriate directories in `COSMIC\u002Ferc-training`. Then training and evaluation on the four datasets are to be done as follows:\n\n1. IEMOCAP: `python train_iemocap.py --active-listener`\n2. DailyDialog: `python train_dailydialog.py --active-listener --class-weight --residual`\n3. MELD Emotion: `python train_meld.py --active-listener --attention simple --dropout 0.5 --rec_dropout 0.3 --lr 0.0001 --mode1 2 --classify emotion --mu 0 --l2 0.00003 --epochs 60`\n4. MELD Sentiment: `python train_meld.py --active-listener --class-weight --residual --classify sentiment`\n5. EmoryNLP Emotion: `python train_emorynlp.py --active-listener --class-weight --residual`\n6. EmoryNLP Sentiment: `python train_emorynlp.py --active-listener --class-weight --residual --classify sentiment`\n\n\n### Citation\n\nPlease cite the following [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.02795.pdf) if you find this code useful in your work.\n\n```bash\nCOSMIC: COmmonSense knowledge for eMotion Identification in Conversations. D. Ghosal, N. Majumder, A. Gelbukh, R. Mihalcea, & S. Poria.  Findings of EMNLP 2020.\n```\n\n\n## TL-ERC: Emotion Recognition in Conversations with Transfer Learning from Generative Conversation Modeling\n\n[_TL-ERC_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.04980.pdf) is a transfer learning-based framework for ERC. It pre-trains a generative dialogue model and transfers context-level weights that include affective knowledge into the target discriminative model for ERC.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_9aead472bc9b.jpg\" alt=\"TL-ERC framework\" width=\"600\"\u002F>\n\u003C\u002Fp>\n\n### Setting up\n\n1. Setup an environment with Conda:\n\n    ```bash\n    conda env create -f environment.yml\n    conda activate TL_ERC\n    cd TL_ERC\n    python setup.py\n    ```\n2. Download dataset files [IEMOCAP](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1nufbrBJ-LtcROv1MviCHFI7tQE3JnqQR\u002Fview?usp=sharing), [DailyDialog](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F13rHLtAMmDsiCP1hZwWqMNcrtkmZF_hK3\u002Fview?usp=sharing) and store them in `.\u002Fdatasets\u002F`.\n\n3. Download the pre-trained weights of HRED on [Cornell](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1OXtnyJ5nDMmK75L9kEQvKPIyO0xzyeVC\u002Fview?usp=sharing) and [Ubuntu](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1T2HLfSvWr7CSrhBuE193XRRXwfLkO_aK\u002Fview?usp=sharing) datasets and store them in `.\u002Fgenerative_weights\u002F`\n\n4. [Optional]: To train new generative weights from dialogue models, refer to https:\u002F\u002Fgithub.com\u002Fctr4si\u002FA-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling . \n\n\n### Run the ERC classifier with pre-trained weights\n\n1. `cd bert_model`\n2. `python train.py --load_checkpoint=..\u002Fgenerative_weights\u002Fcornell_weights.pkl --data=iemocap`.   \n    -  Change `cornell` to `ubuntu` and `iemocap` to `dailydialog` for other dataset combinations.\n    -  Drop `load_checkpoint` to avoid initializing contextual weights.\n    -  To modify hyperparameters, check `configs.py`\n\n### [Optional] Create ERC Dataset splits\n\n1. Set [glove](http:\u002F\u002Fnlp.stanford.edu\u002Fdata\u002Fglove.840B.300d.zip) path in the preprocessing files.\n2. `python iemocap_preprocess.py`. Similarly for `dailydialog`.\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nConversational transfer learning for emotion recognition. Hazarika, D., Poria, S., Zimmermann, R., & Mihalcea, R. (2020). Information Fusion.\n```\n## DialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation\n\n[_DialogueGCN_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1908.11540.pdf) (Dialogue Graph Convolutional Network), is a graph neural network based approach to ERC. We leverage self and inter-speaker dependency of the interlocutors to model conversational context for emotion recognition. Through the graph network, DialogueGCN addresses context propagation issues present in the current RNN-based methods. DialogueGCN is naturally suited for multi-party dialogues.\n\n![Alt text](DialogueGCN.jpg?raw=true \"DialogueGCN framework\")\n### Requirements\n\n- Python 3\n- PyTorch 1.0\n- PyTorch Geometric 1.3\n- Pandas 0.23\n- Scikit-Learn 0.20\n- TensorFlow (optional; required for tensorboard)\n- tensorboardX (optional; required for tensorboard)\n\n### Execution\n\n__Note__: PyTorch Geometric makes heavy usage of CUDA atomic operations and is a source of non-determinism. To reproduce the results reported in the paper, we recommend to use the following execution command. Note that this script will execute in CPU. We obatined weighted average F1 scores of 64.67 in our machine and 64.44 in Google colaboratory for IEMOCAP dataset with the following command.\n\n1. _IEMOCAP_ dataset: `python train_IEMOCAP.py --base-model 'LSTM' --graph-model --nodal-attention --dropout 0.4 --lr 0.0003 --batch-size 32 --class-weight --l2 0.0 --no-cuda`\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nDialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation. D. Ghosal, N. Majumder, S. Poria, N. Chhaya, & A. Gelbukh. EMNLP-IJCNLP (2019), Hong Kong, China.\n```\n\n\n## DialogueGCN-mianzhang: DialogueGCN Implementation by Mian Zhang\nPytorch implementation to paper \"DialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation\". \n\n### Running\nYou can run the whole process very easily. Take the IEMOCAP corpus for example:\n\n### Step 1: Preprocess.\n```bash\n.\u002Fscripts\u002Fiemocap.sh preprocess\n```\n\n### Step 2: Train.\n```bash\n.\u002Fscripts\u002Fiemocap.sh train\n```\n\n### Requirements\n\n- Python 3\n- PyTorch 1.0\n- PyTorch Geometric 1.4.3\n- Pandas 0.23\n- Scikit-Learn 0.20\n\n### Performance Comparision\n\n-|Dataset|Weighted F1\n:-:|:-:|:-:\nOriginal|IEMOCAP|64.18%\nThis Implementation|IEMOCAP|64.10%\n\n### Credits\n\nMian Zhang (Github: mianzhang)\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nDialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation. D. Ghosal, N. Majumder, S. Poria, N. Chhaya, & A. Gelbukh. EMNLP-IJCNLP (2019), Hong Kong, China.\n```\n\n\n## DialogueRNN: An Attentive RNN for Emotion Detection in Conversations\n\n[_DialogueRNN_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1811.00405.pdf) is basically a customized recurrent neural network (RNN) that\nprofiles each speaker in a conversation\u002Fdialogue on the fly, while models the\ncontext of the conversation at the same time. This model can easily be extended to\nmulti-party scenario. Also, it can be used as a pretraining model for empathetic\ndialogue generation. \n\n__Note__: the default settings (hyperparameters and commandline arguments) in the code are meant for BiDialogueRNN+Att. The user needs to optimize the settings for other the variants and changes.\n![Alt text](dialoguernn.jpg?raw=true \"DialogueRNN framework\")\n### Requirements\n\n- Python 3\n- PyTorch 1.0\n- Pandas 0.23\n- Scikit-Learn 0.20\n- TensorFlow (optional; required for tensorboard)\n- tensorboardX (optional; required for tensorboard)\n\n### Dataset Features\n\nPlease extract the contents of `DialogueRNN_features.zip`.\n\n### Execution\n\n1. _IEMOCAP_ dataset: `python train_IEMOCAP.py \u003Ccommand-line arguments>`\n2. _AVEC_ dataset: `python train_AVEC.py \u003Ccommand-line arguments>`\n\n### Command-Line Arguments\n\n-  `--no-cuda`: Does not use GPU\n-  `--lr`: Learning rate\n-  `--l2`: L2 regularization weight\n-  `--rec-dropout`: Recurrent dropout\n-  `--dropout`: Dropout\n-  `--batch-size`: Batch size\n-  `--epochs`: Number of epochs\n-  `--class-weight`: class weight (not applicable for AVEC)\n-  `--active-listener`: Explicit lisnener mode\n-  `--attention`: Attention type\n-  `--tensorboard`: Enables tensorboard log\n-  `--attribute`: Attribute 1 to 4 (only for AVEC; 1 = valence, 2 = activation\u002Farousal, 3 = anticipation\u002Fexpectation, 4 = power)\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nDialogueRNN: An Attentive RNN for Emotion Detection in Conversations. N. Majumder, S. Poria, D. Hazarika, R. Mihalcea, E. Cambria, and G. Alexander. AAAI (2019), Honolulu, Hawaii, USA\n```\n\n## ICON\n\nInteractive COnversational memory Network (ICON) is a multimodal emotion detection framework that extracts multimodal features from conversational videos and hierarchically models the \\textit{self-} and \\textit{inter-speaker} emotional influences into global memories. Such memories generate contextual summaries which aid in predicting the emotional orientation of utterance-videos.\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_d89452294417.jpg\" alt=\"ICON framework\" width=\"400\"\u002F>\n\u003C\u002Fp>\n\n### Requirements\n\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### Execution\n1. `cd ICON`\n\n2. Unzip the data as follows:  \n    - Download the features for IEMOCAP using this [link](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1zWCN2oMdibFkOkgwMG2m02uZmSmynw8c\u002Fview?usp=sharing).\n    - Unzip the folder and place it in the location: `\u002FICON\u002FIEMOCAP\u002Fdata\u002F`. Sample command to achieve this: `unzip  {path_to_zip_file} -d .\u002FIEMOCAP\u002F`\n3. Train the ICON model:\n    - `python train_iemocap.py` for IEMOCAP\n\n### Citation\n```bash\nICON: Interactive Conversational Memory Networkfor Multimodal Emotion Detection. D. Hazarika, S. Poria, R. Mihalcea, E. Cambria, and R. Zimmermann. EMNLP (2018), Brussels, Belgium\n```\n\n## CMN\n[_CMN_](http:\u002F\u002Faclweb.org\u002Fanthology\u002FN18-1193) is a neural framework for emotion detection in dyadic conversations. It leverages mutlimodal signals from text, audio and visual modalities. It specifically incorporates speaker-specific dependencies into its architecture for context modeling. Summaries are then generated from this context using multi-hop memory networks.\n![Alt text](cmn.jpg?raw=true \"CMN framework\")\n\n### Requirements\n\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### Execution\n1. `cd CMN`\n\n2. Unzip the data as follows:  \n    - Download the features for IEMOCAP using this [link](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1zWCN2oMdibFkOkgwMG2m02uZmSmynw8c\u002Fview?usp=sharing).\n    - Unzip the folder and place it in the location: `\u002FCMN\u002FIEMOCAP\u002Fdata\u002F`. Sample command to achieve this: `unzip {path_to_zip_file} -d .\u002FIEMOCAP\u002F`\n3. Train the ICON model:\n    - `python train_iemocap.py` for IEMOCAP\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nHazarika, D., Poria, S., Zadeh, A., Cambria, E., Morency, L.P. and Zimmermann, R., 2018. Conversational Memory Network for Emotion Recognition in Dyadic Dialogue Videos. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers) (Vol. 1, pp. 2122-2132).\n```\n\n## bc-LSTM-pytorch\n[_bc-LSTM-pytorch_](http:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FP17-1081) is a network for using context to detection emotion of an utterance in a dialogue. The model is simple but efficient which only uses a LSTM to model the temporal relation among the utterances. In this repo we gave the data of Semeval 2019 Task 3. We have used and provided the data released by Semeval 2019 Task 3 - \"Emotion Recognition in Context\" organizers. In this task only 3 utterances have been provided - utterance1 (user1), utterance2 (user2), utterance3 (user1) consecutively. The task is to predict the emotion label of utterance3. Emotion label of each utterance have not been provided. However, if your data contains emotion label of each utterance then you can still use this code and adapt it accordingly. Hence, this code is still aplicable for the datasets like MOSI, MOSEI, IEMOCAP, AVEC, DailyDialogue etc. bc-LSTM does not make use of speaker information like CMN, ICON and DialogueRNN.\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_b2731ddf5370.jpg\" alt=\"bc-LSTM framework\" width=\"500\"\u002F>\n\u003C\u002Fp>\n\n### Requirements\n\n- python 3.6.5\n- pandas==0.23.3\n- PyTorch 1.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### Execution\n1. `cd bc-LSTM-pytorch`\n\n2. Train the bc-LSTM model:\n    - `python train_IEMOCAP.py` for IEMOCAP\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nPoria, S., Cambria, E., Hazarika, D., Majumder, N., Zadeh, A. and Morency, L.P., 2017. Context-dependent sentiment analysis in user-generated videos. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (Vol. 1, pp. 873-883).\n```\n\n## bc-LSTM\nKeras implementation of [_bc-LSTM_](http:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FP17-1081).\n\n### Requirements\n\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n- keras==2.1\n\n### Execution\n1. `cd bc-LSTM`\n\n2. Train the bc-LSTM model:\n    - `python baseline.py -config testBaseline.config` for IEMOCAP\n\n### Citation\n\nPlease cite the following paper if you find this code useful in your work.\n\n```bash\nPoria, S., Cambria, E., Hazarika, D., Majumder, N., Zadeh, A. and Morency, L.P., 2017. Context-dependent sentiment analysis in user-generated videos. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (Vol. 1, pp. 873-883).\n```\n\n# Recognizing Emotion Cause in Conversations\n\nThis repository also contains implementations of different architectures to detect emotion cause in conversations.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_33c92a2f6b44.png\" alt=\"Emotion cause types in conversation\" width=\"1000\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem> (a) No context. (b) Unmentioned Latent Cause. (c) Distinguishing emotion cause from emotional expressions.\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_e7ffd867c097.png\" alt=\"Emotion cause types in conversation\" width=\"1000\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem> (a) Self-contagion. (b) The cause of the emotion is primarily due to a stable mood of the speaker that was induced in the previous dialogue turns; (c) The hybrid type with both inter-personal emotional influence and self-contagion.\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n## Baseline Results on [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) dataset (DailyDialog Fold)\n\n| Model \t| emo_f1 \t| pos_f1 \t| neg_f1 \t| macro_avg \t|\n|-\t|-\t|-\t|-\t|-\t|\n| ECPE-2d cross_road\u003Cbr>(0 transform layer) \t| 52.76 \t| 52.39 \t| 95.86 \t| 73.62 \t|\n| ECPE-2d window_constrained\u003Cbr>(1 transform layer) \t| 70.48 \t| 48.80 \t| 93.85 \t| 71.32 \t|\n| ECPE-2d cross_road\u003Cbr>(2 transform layer) \t| 52.76 \t| 55.50 \t| 94.96 \t| 75.23 \t|\n| ECPE-MLL | - | 48.48 | 94.68 | 71.58 |\n| Rank Emotion Cause | - | 33.00 |  97.30 |  65.15 |\n| RoBERTa-base | - | 64.28 |  88.74 |  76.51 |\n| RoBERTa-large | - | 66.23 |  87.89 |  77.06 |\n\n## ECPE-2D on [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) dataset\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_f5517ac73bf7.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\nCitation:\nPlease cite the following papers if you use this code.\n- Recognizing Emotion Cause in Conversations. Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Deepanway Ghosal, Rishabh Bhardwaj, Samson Yu Bai Jian, Romila Ghosh, Niyati Chhaya, Alexander Gelbukh, Rada Mihalcea. Arxiv (2020). [[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- Zixiang Ding, Rui Xia, Jianfei Yu. ECPE-2D: Emotion-Cause Pair Extraction based on Joint Two-Dimensional Representation, Interaction and Prediction. ACL 2020. [[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.acl-main.288.pdf)]\n\n## Rank-Emotion-Cause on [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) dataset\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_6ad50ec20512.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\nCitation:\nPlease cite the following papers if you use this code.\n- Recognizing Emotion Cause in Conversations. Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Deepanway Ghosal, Rishabh Bhardwaj, Samson Yu Bai Jian, Romila Ghosh, Niyati Chhaya, Alexander Gelbukh, Rada Mihalcea. Arxiv (2020). [[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- **Effective Inter-Clause Modeling for End-to-End Emotion-Cause Pair Extraction**. In *Proc. of ACL 2020: The 58th Annual Meeting of the Association for Computational Linguistics*, pages 3171--3181. [[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.acl-main.289\u002F)] \n\n\n## ECPE-MLL on [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) dataset\n\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_4203bd238fa4.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\nCitation:\nPlease cite the following papers if you use this code.\n- Recognizing Emotion Cause in Conversations. Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Deepanway Ghosal, Rishabh Bhardwaj, Samson Yu Bai Jian, Romila Ghosh, Niyati Chhaya, Alexander Gelbukh, Rada Mihalcea. Arxiv (2020). [[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- Zixiang Ding, Rui Xia, Jianfei Yu. End-to-End Emotion-Cause Pair Extraction based on SlidingWindow Multi-Label Learning. EMNLP 2020.[[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.emnlp-main.290.pdf)]\n\n## RoBERTa and SpanBERT Baselines on [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) dataset\n\nThe RoBERTa and SpanBERT baselines as explained in the original RECCON paper. Refer to [this](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf).\n\nCitation:\nPlease cite the following papers if you use this code.\n- Recognizing Emotion Cause in Conversations. Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Deepanway Ghosal, Rishabh Bhardwaj, Samson Yu Bai Jian, Romila Ghosh, Niyati Chhaya, Alexander Gelbukh, Rada Mihalcea. Arxiv (2020). [[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n\n","# 对话中的情感识别\n\n## 注释\n\n``` 对于询问如何提取视觉和音频特征的朋友们，请查看这里：https:\u002F\u002Fgithub.com\u002Fsoujanyaporia\u002FMUStARD```\n\n## 更新 🔥 🔥 🔥 \n\n| 日期 \t| 公告 \t|\n|-\t|-\t|\n| 2024年10月3日  | 如果你对LLM的智商测试感兴趣，可以看看我们的新工作：[AlgoPuzzleVQA](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fpuzzle-reasoning)\n| 2021年8月3日  | 🎆 🎆 我们发布了一个新的数据集M2H2：用于对话中幽默识别的多模态多人印地语数据集。请查看：[M2H2](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FM2H2-dataset)。M2H2数据集的基线模型是基于DialogueRNN和bcLSTM构建的。 |\n| 2021年5月18日  | 🎆 🎆 我们发布了一个包含解决对话中情感原因识别问题模型的新仓库。请查看：[emotion-cause-extraction](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion\u002Ftree\u002Fmaster\u002Femotion-cause-extraction)。感谢[Pengfei Hong](https:\u002F\u002Fwww.pengfei-hong.com\u002F)的整理。 |\n| 2020年12月24日  | 🎆 🎆 对在对话中识别情感原因这一主题感兴趣吗？我们刚刚发布了相关的数据集。请访问[https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON)。  |\n| 2020年10月6日  | 🎆 🎆 新论文及对话中情感识别的SOTA成果。代码请参考[COSMIC](.\u002FCOSMIC)目录。阅读论文——[COSMIC: COmmonSense知识用于对话中情感识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.02795.pdf)。  |\n| 2020年9月30日 \t| 发表了关于话语级对话理解的新论文及基线模型。请阅读我们的论文[话语级对话理解：一项实证研究](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.13902.pdf)。可复制[代码](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fdialogue-understanding)。 \t|\n| 2020年7月26日 \t| 发布了新的DialogueGCN代码。请访问https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion\u002Ftree\u002Fmaster\u002FDialogueGCN-mianzhang。所有功劳归于Mian Zhang (https:\u002F\u002Fgithub.com\u002Fmianzhang\u002F) \t|\n| 2020年7月11日 \t| 想阅读关于ERC或相关任务（如对话中的讽刺检测）的论文吗？我们整理了一份全面的论文阅读清单。请访问https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fawesome-emotion-recognition-in-conversations \t|\n| 2020年6月7日： \t| ERC任务的最新最先进成果将很快发布。 \t|\n| 2020年6月7日： \t| conv-emotion仓库将在https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002F上维护。 \t|\n| 2019年12月22日： \t| DialogueGCN的代码已发布。 \t|\n| 2019年10月11日： \t| 新论文：用于情感识别的对话迁移学习。 \t|\n| 2019年8月9日： \t| 关于对话中情感识别（ERC）的新论文。 \t|\n| 2019年3月6日： \t| 在MELD数据集上训练DialogueRNN的特征和代码已发布。 \t|\n| 2018年11月20日： \t| ICON和DialogueRNN的端到端版本已发布。 \t|\n---------------------------------------------------------------------------\n\nCOSMIC是本仓库中表现最好的模型，请访问以下链接，比较各模型在不同ERC数据集上的表现。\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-4)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-4?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-meld)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-meld?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on-3)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on-3?p=cosmic-commonsense-knowledge-for-emotion)\n\n[![PWC](https:\u002F\u002Fimg.shields.io\u002Fendpoint.svg?url=https:\u002F\u002Fpaperswithcode.com\u002Fbadge\u002Fcosmic-commonsense-knowledge-for-emotion\u002Femotion-recognition-in-conversation-on)](https:\u002F\u002Fpaperswithcode.com\u002Fsota\u002Femotion-recognition-in-conversation-on?p=cosmic-commonsense-knowledge-for-emotion)\n\n\n该仓库包含了多种对话中情感识别方法以及对话中情感原因识别算法的实现：\n\n- [对话中情感识别](#emotion-recognition-in-conversations)\n  * [数据格式](#data-format)\n  * [COSMIC（PyTorch）](#cosmic-commonsense-knowledge-for-emotion-identification-in-conversations)\n  * [TL-ERC（PyTorch）](#tl-erc-emotion-recognition-in-conversations-with-transfer-learning-from-generative-conversation-modeling)\n  * [DialogueGCN（PyTorch）](#dialoguegcn-a-graph-convolutional-neural-network-for-emotion-recognition-in-conversation)\n  * [DialogueRNN（PyTorch）](#dialoguernn-an-attentive-rnn-for-emotion-detection-in-conversations)\n  * [DialogueGCN-mianzhang（PyTorch）](#dialoguegcn-mianzhang-dialoguegcn-implementation-by-mian-zhang)\n  * [ICON（tensorflow）](#icon)\n  * [CMN（tensorflow）](#cmn)\n  * [bc-LSTM-pytorch（PyTorch）](#bc-lstm-pytorch)\n  * [bc-LSTM（keras）](#bc-lstm)\n- [对话中情感原因识别](#recognizing-emotion-cause-in-conversations)\n  * [ECPE-2D在RECCON数据集上](#ecpe-2d-on-reccon-dataset)\n  * [Rank-Emotion-Cause在RECCON数据集上](#rank-emotion-cause-on-reccon-dataset)\n  * [ECPE-MLL在RECCON数据集上](#ecpe-mll-on-reccon-dataset)\n  * [RoBERTa和SpanBERT基线在RECCON数据集上](#roberta-and-spanbert-baselines-on-reccon-dataset)\n\n\n与其他情感检测模型不同，这些技术考虑了参与方的状态以及参与者之间的依赖关系，以建模与情感识别相关的对话上下文。所有这些技术的主要目的是为共情式对话生成预训练情感检测模型。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_a671d59c710c.png\" alt=\"对话中的控制变量\" width=\"600\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem>在X和Y两人之间的二元对话过程中，不同控制变量之间的相互作用。灰色和白色圆圈分别代表隐藏变量和观察变量。P代表人格，U代表话语，S代表对话者状态，I代表对话者意图，B代表背景知识，Q代表外部和感官输入，E代表情感，Topic代表对话主题。这一概念可以轻松扩展到多方对话。\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n\n\n情感识别对于共情和情感对话生成非常有用——\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_396d0caf2edf.jpg\" alt=\"情感对话生成\" width=\"500\"\u002F>\n\u003C\u002Fp>\n\n## 数据格式\n\n这些网络期望对话中每条话语都带有情感\u002F情绪标签和说话人信息，例如：\n```\n一方：我讨厌我的女朋友（愤怒）\n另一方：你有女朋友了？！（惊讶）\n一方：是的（愤怒）\n```\n然而，代码可以调整以执行仅提供前序话语作为上下文的任务，而无需其对应的标签，因为目标只是对当前\u002F目标话语进行标注。例如，*上下文*为：\n```\n一方：我讨厌我的女朋友\n另一方：你有女朋友了？！\n```\n而*目标*则是：\n```\n一方：是的（愤怒）\n```\n其中目标情绪为_愤怒_。\n此外，该代码还可以被改造为端到端训练网络。我们很快会推送这些有用的改动。\n\n## 当前最先进结果\n\u003Ctable>\n  \u003Ctr>\n    \u003Cth rowspan=\"2\">方法\u003C\u002Fth>\n    \u003Cth>IEMOCAP\u003C\u002Fth>\n    \u003Cth colspan=\"2\">DailyDialog\u003C\u002Fth>\n    \u003Cth colspan=\"2\">MELD\u003C\u002Fth>\n    \u003Cth colspan=\"2\">EmoryNLP\u003C\u002Fth>\n  \u003C\u002Ftr>\n\n  \u003Ctr>\n    \u003Ctd>加权平均F1\u003C\u002Ftd>\n    \u003Ctd>宏平均F1\u003C\u002Ftd>\n    \u003Ctd>微平均F1\u003C\u002Ftd>\n    \u003Ctd>加权平均F1（3类）\u003C\u002Ftd>\n    \u003Ctd>加权平均F1（7类）\u003C\u002Ftd>\n    \u003Ctd>加权平均F1（3类）\u003C\u002Ftd>\n    \u003Ctd>加权平均F1（7类）\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>RoBERTa\u003C\u002Ftd>\n    \u003Ctd>54.55\u003C\u002Ftd>\n    \u003Ctd>48.20\u003C\u002Ftd>\n    \u003Ctd>55.16\u003C\u002Ftd>\n    \u003Ctd>72.12\u003C\u002Ftd>\n    \u003Ctd>62.02\u003C\u002Ftd>\n    \u003Ctd>55.28\u003C\u002Ftd>\n    \u003Ctd>37.29\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>RoBERTa DialogueRNN\u003C\u002Ftd>\n    \u003Ctd>64.76\u003C\u002Ftd>\n    \u003Ctd>49.65\u003C\u002Ftd>\n    \u003Ctd>57.32\u003C\u002Ftd>\n    \u003Ctd>72.14\u003C\u002Ftd>\n    \u003Ctd>63.61\u003C\u002Ftd>\n    \u003Ctd>55.36\u003C\u002Ftd>\n    \u003Ctd>37.44\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd>\u003Cb>RoBERTa COSMIC\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>65.28\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>51.05\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>58.48\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>73.20\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>65.21\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>56.51\u003C\u002Fb>\u003C\u002Ftd>\n   \u003Ctd>\u003Cb>38.11\u003C\u002Fb>\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\n\u003C\u002Ftable>\n\n## COSMIC：用于对话中情绪识别的常识知识\n\n[_COSMIC_](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion) 利用常识知识解决对话中的话语级情绪识别任务。这是一个新框架，它整合了心理状态、事件和因果关系等不同方面的常识，并在此基础上学习对话参与者之间的互动。目前最先进的方法在上下文传播、情绪转变检测以及区分相关情绪类别方面常常遇到困难。通过学习独特的常识表示，COSMIC解决了这些挑战，并在四个不同的基准对话数据集上实现了情绪识别的新最先进成果。\n\n![Alt text](cosmic.jpg?raw=true \"COSMIC框架\")\n\n### 执行步骤\n\n首先从[这里](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1TQYQYCoPtdXN2rQ1mR2jisjUztmOzfZr\u002Fview?usp=sharing)下载RoBERTa和COMET特征，并将其保存在`COSMIC\u002Ferc-training`中的相应目录下。然后按照以下步骤对四个数据集进行训练和评估：\n\n1. IEMOCAP：`python train_iemocap.py --active-listener`\n2. DailyDialog：`python train_dailydialog.py --active-listener --class-weight --residual`\n3. MELD 情绪：`python train_meld.py --active-listener --attention simple --dropout 0.5 --rec_dropout 0.3 --lr 0.0001 --mode1 2 --classify emotion --mu 0 --l2 0.00003 --epochs 60`\n4. MELD 情感：`python train_meld.py --active-listener --class-weight --residual --classify sentiment`\n5. EmoryNLP 情绪：`python train_emorynlp.py --active-listener --class-weight --residual`\n6. EmoryNLP 情感：`python train_emorynlp.py --active-listener --class-weight --residual --classify sentiment`\n\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\nCOSMIC：用于对话中情绪识别的常识知识。D. Ghosal, N. Majumder, A. Gelbukh, R. Mihalcea, & S. Poria. EMNLP 2020年研究成果。\n```\n\n\n## TL-ERC：基于生成式对话建模的迁移学习对话情绪识别\n\n[_TL-ERC_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.04980.pdf) 是一种基于迁移学习的ERC框架。它预先训练一个生成式对话模型，并将包含情感知识的上下文级别权重迁移到用于ERC的目标判别模型中。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_9aead472bc9b.jpg\" alt=\"TL-ERC框架\" width=\"600\"\u002F>\n\u003C\u002Fp>\n\n### 设置步骤\n\n1. 使用Conda搭建环境：\n\n    ```bash\n    conda env create -f environment.yml\n    conda activate TL_ERC\n    cd TL_ERC\n    python setup.py\n    ```\n2. 下载数据集文件[IEMOCAP](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1nufbrBJ-LtcROv1MviCHFI7tQE3JnqQR\u002Fview?usp=sharing)、[DailyDialog](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F13rHLtAMmDsiCP1hZwWqMNcrtkmZF_hK3\u002Fview?usp=sharing)并将其存储在`.\u002Fdatasets\u002F`目录下。\n\n3. 下载HRED在[Cornell](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1OXtnyJ5nDMmK75L9kEQvKPIyO0xzyeVC\u002Fview?usp=sharing)和[Ubuntu](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1T2HLfSvWr7CSrhBuE193XRRXwfLkO_aK\u002Fview?usp=sharing)数据集上的预训练权重，并将其存储在`.\u002Fgenerative_weights\u002F`目录下。\n\n4. [可选]：若要从对话模型中训练新的生成式权重，可参考https:\u002F\u002Fgithub.com\u002Fctr4si\u002FA-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling 。\n\n\n### 使用预训练权重运行ERC分类器\n\n1. `cd bert_model`\n2. `python train.py --load_checkpoint=..\u002Fgenerative_weights\u002Fcornell_weights.pkl --data=iemocap`。\n    - 对于其他数据集组合，可将`cornell`替换为`ubuntu`，`iemocap`替换为`dailydialog`。\n    - 可以省略`load_checkpoint`以避免初始化上下文权重。\n    - 若要修改超参数，请查看`configs.py`\n\n### [可选] 创建ERC数据集划分\n\n1. 在预处理文件中设置[glove](http:\u002F\u002Fnlp.stanford.edu\u002Fdata\u002Fglove.840B.300d.zip)路径。\n2. `python iemocap_preprocess.py`。对于`dailydialog`也可类似操作。\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\n对话迁移学习用于情绪识别。Hazarika, D., Poria, S., Zimmermann, R., & Mihalcea, R. (2020). 信息融合。\n```\n## DialogueGCN：用于对话情绪识别的图卷积神经网络\n\n[_DialogueGCN_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1908.11540.pdf)（对话图卷积网络）是一种基于图神经网络的ERC方法。我们利用对话参与者之间的自我依赖和相互依赖关系来建模对话上下文，从而进行情绪识别。通过图网络，DialogueGCN解决了当前基于RNN的方法中存在的上下文传播问题。DialogueGCN天然适用于多方对话。\n\n![Alt text](DialogueGCN.jpg?raw=true \"DialogueGCN框架\")\n### 需求\n\n- Python 3\n- PyTorch 1.0\n- PyTorch Geometric 1.3\n- Pandas 0.23\n- Scikit-Learn 0.20\n- TensorFlow（可选；用于tensorboard）\n- tensorboardX（可选；用于tensorboard）\n\n### 执行\n\n__注意__: PyTorch Geometric 大量使用 CUDA 原子操作，这会导致非确定性。为了复现论文中报告的结果，我们建议使用以下执行命令。请注意，该脚本将在 CPU 上运行。使用以下命令，我们在本地机器上获得了 IEMOCAP 数据集的加权平均 F1 分数 64.67，在 Google Colaboratory 上则为 64.44。\n\n1. _IEMOCAP_ 数据集: `python train_IEMOCAP.py --base-model 'LSTM' --graph-model --nodal-attention --dropout 0.4 --lr 0.0003 --batch-size 32 --class-weight --l2 0.0 --no-cuda`\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\nDialogueGCN: 对话中的情感识别图卷积神经网络。D. Ghosal, N. Majumder, S. Poria, N. Chhaya, & A. Gelbukh. EMNLP-IJCNLP (2019), 香港, 中国。\n```\n\n\n## DialogueGCN-mianzhang: Mian Zhang 实现的 DialogueGCN\n基于 PyTorch 的实现，对应论文“DialogueGCN: 对话中的情感识别图卷积神经网络”。\n\n### 运行\n您可以非常轻松地运行整个流程。以 IEMOCAP 语料库为例：\n\n### 第一步：预处理。\n```bash\n.\u002Fscripts\u002Fiemocap.sh preprocess\n```\n\n### 第二步：训练。\n```bash\n.\u002Fscripts\u002Fiemocap.sh train\n```\n\n### 需求\n\n- Python 3\n- PyTorch 1.0\n- PyTorch Geometric 1.4.3\n- Pandas 0.23\n- Scikit-Learn 0.20\n\n### 性能对比\n\n-|数据集|加权 F1\n:-:|:-:|:-:\n原论文|IEMOCAP|64.18%\n本实现|IEMOCAP|64.10%\n\n### 致谢\n\nMian Zhang（GitHub: mianzhang）\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\nDialogueGCN: 对话中的情感识别图卷积神经网络。D. Ghosal, N. Majumder, S. Poria, N. Chhaya, & A. Gelbukh. EMNLP-IJCNLP (2019), 香港, 中国。\n```\n\n\n## DialogueRNN: 用于对话情感检测的注意力 RNN\n\n[_DialogueRNN_](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1811.00405.pdf) 基本上是一种定制化的循环神经网络 (RNN)，它能够实时对对话中的每个说话者进行建模，同时对对话上下文进行建模。该模型可以轻松扩展到多方场景。此外，它还可以用作共情对话生成的预训练模型。\n\n__注意__: 代码中的默认设置（超参数和命令行参数）适用于 BiDialogueRNN+Att。用户需要针对其他变体和修改优化设置。\n![Alt text](dialoguernn.jpg?raw=true \"DialogueRNN 框架\")\n### 需求\n\n- Python 3\n- PyTorch 1.0\n- Pandas 0.23\n- Scikit-Learn 0.20\n- TensorFlow（可选；用于 TensorBoard）\n- tensorboardX（可选；用于 TensorBoard）\n\n### 数据集特征\n\n请解压 `DialogueRNN_features.zip` 文件。\n\n### 执行\n\n1. _IEMOCAP_ 数据集: `python train_IEMOCAP.py \u003C命令行参数>`\n2. _AVEC_ 数据集: `python train_AVEC.py \u003C命令行参数>`\n\n### 命令行参数\n\n-  `--no-cuda`: 不使用 GPU\n-  `--lr`: 学习率\n-  `--l2`: L2 正则化权重\n-  `--rec-dropout`: 循环丢弃率\n-  `--dropout`: 丢弃率\n-  `--batch-size`: 批量大小\n-  `--epochs`: 训练轮数\n-  `--class-weight`: 类别权重（不适用于 AVEC）\n-  `--active-listener`: 显式倾听模式\n-  `--attention`: 注意力类型\n-  `--tensorboard`: 启用 TensorBoard 日志\n-  `--attribute`: 属性 1 到 4（仅适用于 AVEC；1 = 情感效价，2 = 激发度，3 = 期待度，4 = 权势感）\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\nDialogueRNN: 用于对话情感检测的注意力 RNN。N. Majumder, S. Poria, D. Hazarika, R. Mihalcea, E. Cambria 和 G. Alexander。AAAI (2019), 夏威夷檀香山，美国\n```\n\n\n## ICON\n\n交互式会话记忆网络 (ICON) 是一种多模态情感检测框架，它从对话视频中提取多模态特征，并分层地将自我及说话者之间的情感影响建模到全局记忆中。这些记忆会生成上下文摘要，从而帮助预测话语视频的情感倾向。\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_d89452294417.jpg\" alt=\"ICON 框架\" width=\"400\"\u002F>\n\u003C\u002Fp>\n\n### 需求\n\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### 执行\n1. `cd ICON`\n\n2. 按照如下方式解压数据：\n    - 使用此 [链接](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1zWCN2oMdibFkOkgwMG2m02uZmSmynw8c\u002Fview?usp=sharing) 下载 IEMOCAP 的特征文件。\n    - 解压文件夹并将其放置在 `\u002FICON\u002FIEMOCAP\u002Fdata\u002F` 目录下。示例命令：`unzip {path_to_zip_file} -d .\u002FIEMOCAP\u002F`\n3. 训练 ICON 模型：\n    - 对于 IEMOCAP: `python train_iemocap.py`\n\n### 引用\n```bash\nICON: 用于多模态情感检测的交互式会话记忆网络。D. Hazarika, S. Poria, R. Mihalcea, E. Cambria 和 R. Zimmermann。EMNLP (2018), 布鲁塞尔，比利时\n```\n\n\n## CMN\n[_CMN_](http:\u002F\u002Faclweb.org\u002Fanthology\u002FN18-1193) 是一种用于双人对话情感检测的神经框架。它利用文本、音频和视觉模态的多模态信号，并在其架构中特别融入了说话者特定的依赖关系以进行上下文建模。随后，通过多跳记忆网络从这些上下文中生成摘要。\n![Alt text](cmn.jpg?raw=true \"CMN 框架\")\n\n### 需求\n\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### 执行\n1. `cd CMN`\n\n2. 按照如下方式解压数据：\n    - 使用此 [链接](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1zWCN2oMdibFkOkgwMG2m02uZmSmynw8c\u002Fview?usp=sharing) 下载 IEMOCAP 的特征文件。\n    - 解压文件夹并将其放置在 `\u002FCMN\u002FIEMOCAP\u002Fdata\u002F` 目录下。示例命令：`unzip {path_to_zip_file} -d .\u002FIEMOCAP\u002F`\n3. 训练 ICON 模型：\n    - 对于 IEMOCAP: `python train_iemocap.py`\n\n### 引用\n\n如果您在工作中发现此代码有用，请引用以下论文：\n\n```bash\nHazarika, D., Poria, S., Zadeh, A., Cambria, E., Morency, L.P. 和 Zimmermann, R., 2018. 用于双人对话视频情感识别的会话记忆网络。载于 2018 年北美计算语言学协会大会人类语言技术会议论文集，第一卷（长篇论文）（第 1 卷，第 2122–2132 页）。\n```\n\n## bc-LSTM-pytorch\n[_bc-LSTM-pytorch_](http:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FP17-1081) 是一种利用上下文来检测对话中话语情感的网络。该模型简单而高效，仅使用 LSTM 来建模话语之间的时序关系。在这个仓库中，我们提供了 Semeval 2019 任务 3 的数据。我们使用并公开了 Semeval 2019 任务 3 ——“语境中的情感识别”组织者发布的数据。在该任务中，只提供了连续的三句话：utterance1（用户1）、utterance2（用户2）和 utterance3（用户1）。任务是预测 utterance3 的情感标签。每句话的情感标签并未提供。然而，如果您的数据包含每句话的情感标签，您仍然可以使用这段代码并进行相应调整。因此，这段代码同样适用于 MOSI、MOSEI、IEMOCAP、AVEC、DailyDialogue 等数据集。bc-LSTM 不会像 CMN、ICON 和 DialogueRNN 那样利用说话人信息。\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_b2731ddf5370.jpg\" alt=\"bc-LSTM 框架\" width=\"500\"\u002F>\n\u003C\u002Fp>\n\n### 需求\n- python 3.6.5\n- pandas==0.23.3\n- PyTorch 1.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n\n### 运行\n1. `cd bc-LSTM-pytorch`\n\n2. 训练 bc-LSTM 模型：\n    - 对于 IEMOCAP 数据集，运行 `python train_IEMOCAP.py`\n\n### 引用\n如果您在工作中觉得这段代码有用，请引用以下论文：\n\n```bash\nPoria, S., Cambria, E., Hazarika, D., Majumder, N., Zadeh, A. and Morency, L.P., 2017. Context-dependent sentiment analysis in user-generated videos. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (Vol. 1, pp. 873-883).\n```\n\n## bc-LSTM\n[_bc-LSTM_](http:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FP17-1081) 的 Keras 实现。\n\n### 需求\n- python 3.6.5\n- pandas==0.23.3\n- tensorflow==1.9.0\n- numpy==1.15.0\n- scikit_learn==0.20.0\n- keras==2.1\n\n### 运行\n1. `cd bc-LSTM`\n\n2. 训练 bc-LSTM 模型：\n    - 对于 IEMOCAP 数据集，运行 `python baseline.py -config testBaseline.config`\n\n### 引用\n如果您在工作中觉得这段代码有用，请引用以下论文：\n\n```bash\nPoria, S., Cambria, E., Hazarika, D., Majumder, N., Zadeh, A. and Morency, L.P., 2017. Context-dependent sentiment analysis in user-generated videos. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (Vol. 1, pp. 873-883).\n```\n\n# 识别对话中的情感原因\n这个仓库还包含了用于检测对话中情感原因的不同架构的实现。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_33c92a2f6b44.png\" alt=\"对话中的情感原因类型\" width=\"1000\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem> （a）无上下文。（b）未提及的潜在原因。（c）区分情感原因与情感表达。\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_e7ffd867c097.png\" alt=\"对话中的情感原因类型\" width=\"1000\"\u002F>\n  \u003Cfigcaption stype=\"display:table-caption;\">\u003Cem> （a）自我感染。（b）情感的原因主要是由于说话人在之前的对话回合中形成的稳定情绪；（c）同时存在人际情感影响和自我感染的混合类型。\u003C\u002Fem>\u003C\u002Ffigcaption>\n\u003C\u002Fp>\n\n## [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) 数据集（DailyDialog 折叠）上的基线结果\n| 模型 \t| emo_f1 \t| pos_f1 \t| neg_f1 \t| macro_avg \t|\n|-\t|-\t|-\t|-\t|-\t|\n| ECPE-2d cross_road\u003Cbr>(0 层变换) \t| 52.76 \t| 52.39 \t| 95.86 \t| 73.62 \t|\n| ECPE-2d window_constrained\u003Cbr>(1 层变换) \t| 70.48 \t| 48.80 \t| 93.85 \t| 71.32 \t|\n| ECPE-2d cross_road\u003Cbr>(2 层变换) \t| 52.76 \t| 55.50 \t| 94.96 \t| 75.23 \t|\n| ECPE-MLL | - | 48.48 | 94.68 | 71.58 |\n| Rank Emotion Cause | - | 33.00 |  97.30 |  65.15 |\n| RoBERTa-base | - | 64.28 |  88.74 |  76.51 |\n| RoBERTa-large | - | 66.23 |  87.89 |  77.06 |\n\n## ECPE-2D 在 [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) 数据集上的应用\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_f5517ac73bf7.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\n引用：\n如果您使用这段代码，请引用以下论文。\n- 识别对话中的情感原因。Soujanya Poria、Navonil Majumder、Devamanyu Hazarika、Deepanway Ghosal、Rishabh Bhardwaj、Samson Yu Bai Jian、Romila Ghosh、Niyati Chhaya、Alexander Gelbukh、Rada Mihalcea。Arxiv（2020）。[[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- Zixiang Ding、Rui Xia、Jianfei Yu。ECPE-2D：基于联合二维表示、交互与预测的情感原因对提取。ACL 2020。[[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.acl-main.288.pdf)]\n\n## Rank-Emotion-Cause 在 [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) 数据集上的应用\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_6ad50ec20512.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\n引用：\n如果您使用这段代码，请引用以下论文。\n- 识别对话中的情感原因。Soujanya Poria、Navonil Majumder、Devamanyu Hazarika、Deepanway Ghosal、Rishabh Bhardwaj、Samson Yu Bai Jian、Romila Ghosh、Niyati Chhaya、Alexander Gelbukh、Rada Mihalcea。Arxiv（2020）。[[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- **有效的句间建模用于端到端的情感原因对提取**。载于 *Proc. of ACL 2020：第 58 届计算语言学协会年会*，第 3171–3181 页。[[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.acl-main.289\u002F)]\n\n\n## ECPE-MLL 在 [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) 数据集上的应用\n\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_readme_4203bd238fa4.png\" alt=\"ECPE-2D\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n\n引用：\n如果您使用这段代码，请引用以下论文。\n- 识别对话中的情感原因。Soujanya Poria、Navonil Majumder、Devamanyu Hazarika、Deepanway Ghosal、Rishabh Bhardwaj、Samson Yu Bai Jian、Romila Ghosh、Niyati Chhaya、Alexander Gelbukh、Rada Mihalcea。Arxiv（2020）。[[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]\n- Zixiang Ding、Rui Xia、Jianfei Yu。基于滑动窗口多标签学习的端到端情感原因对提取。EMNLP 2020。[[pdf](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002F2020.emnlp-main.290.pdf)]\n\n## RoBERTa 和 SpanBERT 基线在 [RECCON](https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002FRECCON) 数据集上的应用\nRoBERTa 和 SpanBERT 基线如原始 RECCON 论文所述。请参阅 [此文档](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)。\n\n引用：\n如果您使用这段代码，请引用以下论文。\n- 识别对话中的情感原因。Soujanya Poria、Navonil Majumder、Devamanyu Hazarika、Deepanway Ghosal、Rishabh Bhardwaj、Samson Yu Bai Jian、Romila Ghosh、Niyati Chhaya、Alexander Gelbukh、Rada Mihalcea。Arxiv（2020）。[[pdf](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.11820.pdf)]","# conv-emotion 快速上手指南\n\nconv-emotion 是一个用于**对话情感识别 (ERC)** 和**对话情感原因识别**的开源工具库，包含了多种前沿模型（如 COSMIC, DialogueGCN, DialogueRNN 等）。本指南以当前性能最佳的 **COSMIC** 模型为例，介绍如何快速搭建环境并运行。\n\n## 环境准备\n\n*   **操作系统**: Linux \u002F macOS (Windows 用户建议使用 WSL2)\n*   **Python 版本**: 推荐 Python 3.6 - 3.8\n*   **深度学习框架**: PyTorch (COSMIC 模型基于 PyTorch)\n*   **硬件要求**: 建议配备 NVIDIA GPU 以加速训练和推理\n\n**前置依赖**:\n确保已安装 `git`, `python`, `pip` 以及对应的 CUDA 驱动。\n\n## 安装步骤\n\n1.  **克隆仓库**\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Fdeclare-lab\u002Fconv-emotion.git\n    cd conv-emotion\n    ```\n\n2.  **创建虚拟环境并安装依赖**\n    虽然根目录未提供统一的 `requirements.txt`，但各子模块（如 `COSMIC`）通常依赖基础的 PyTorch 生态。建议先创建环境并安装基础包：\n    ```bash\n    conda create -n conv_emotion python=3.7\n    conda activate conv_emotion\n    pip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n    pip install numpy pandas scikit-learn tqdm\n    ```\n    *注：若特定子模块有独立的 `environment.yml`（如 TL-ERC），请进入对应目录执行 `conda env create -f environment.yml`。*\n\n3.  **下载预训练特征数据 (以 COSMIC 为例)**\n    COSMIC 模型需要预先提取的 RoBERTa 和 COMET 特征文件。\n    *   下载地址：[Google Drive Link](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1TQYQYCoPtdXN2rQ1mR2jisjUztmOzfZr\u002Fview?usp=sharing)\n    *   *国内用户提示*: 由于原链接为 Google Drive，下载可能较慢。建议使用 IDM、Motrix 等工具加速，或寻找国内镜像源\u002F网盘搬运资源。\n    *   将下载的文件解压后，放入 `COSMIC\u002Ferc-training` 目录下的相应文件夹中。\n\n## 基本使用\n\n以下演示如何在 **IEMOCAP** 数据集上训练和评估 COSMIC 模型。\n\n1.  **进入模型目录**\n    ```bash\n    cd COSMIC\n    ```\n\n2.  **运行训练脚本**\n    根据目标数据集选择不同的脚本和参数。以下是针对 IEMOCAP 数据集的命令：\n    ```bash\n    python train_iemocap.py --active-listener\n    ```\n\n    **其他数据集示例**:\n    *   **DailyDialog**:\n        ```bash\n        python train_dailydialog.py --active-listener --class-weight --residual\n        ```\n    *   **MELD (情感分类)**:\n        ```bash\n        python train_meld.py --active-listener --attention simple --dropout 0.5 --rec_dropout 0.3 --lr 0.0001 --mode1 2 --classify emotion --mu 0 --l2 0.00003 --epochs 60\n        ```\n\n3.  **查看结果**\n    脚本运行结束后，终端将输出加权平均 F1 分数 (W-Avg F1) 等评估指标。日志和模型检查点通常保存在当前目录或指定的 `output` 文件夹中。\n\n## 数据格式说明\n\n模型期望的输入数据格式为包含说话人、话语内容及情感标签的序列。例如：\n```text\nParty 1: I hate my girlfriend (angry)\nParty 2: you got a girlfriend?! (surprise)\nParty 1: yes (angry)\n```\n代码支持自适应处理，也可仅利用前文语境对当前话语进行情感预测。具体数据集预处理脚本请参考各子模块目录下的数据处理代码。","某电商平台的智能客服团队正试图升级其对话分析系统，以便从海量用户咨询记录中精准识别愤怒或焦虑情绪，从而优先处理高风险客诉。\n\n### 没有 conv-emotion 时\n- 传统关键词匹配无法区分反讽语境，常将用户说“这服务真是好得没话说”误判为正面评价，导致投诉被忽略。\n- 缺乏对上下文依赖的建模，系统只能分析单句话，无法捕捉用户在多轮对话中情绪逐渐升级的过程。\n- 人工标注数万条对话数据成本极高且效率低下，导致情绪识别模型迭代缓慢，难以适应新的业务场景。\n- 现有通用情感分析工具在多人混杂的对话场景中表现糟糕，无法准确归属具体发言人的情绪状态。\n\n### 使用 conv-emotion 后\n- 基于 COSMIC 等先进架构，系统能利用常识知识库精准识别反讽与隐含情绪，将高风险客诉的召回率提升了 40%。\n- 通过对话图神经网络（DialogueGCN）技术，模型能够关联前后多轮发言，敏锐捕捉用户从“疑惑”到“愤怒”的情绪演变轨迹。\n- 直接复用预训练的对话情绪识别模型，大幅减少了对特定领域标注数据的依赖，新业务线的模型部署周期从两周缩短至两天。\n- 支持多角色对话建模，能准确区分客服与不同用户的情绪状态，帮助管理者量化评估客服人员的安抚能力与压力分布。\n\nconv-emotion 通过将前沿的对话上下文建模与常识推理能力落地，让机器真正具备了“听懂弦外之音”的理解力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeclare-lab_conv-emotion_7059f82f.png","declare-lab","Deep Cognition and Language Research (DeCLaRe) Lab","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fdeclare-lab_27a2ce73.png","",null,"https:\u002F\u002Fdeclare-lab.github.io","https:\u002F\u002Fgithub.com\u002Fdeclare-lab",[80,84],{"name":81,"color":82,"percentage":83},"Python","#3572A5",99.5,{"name":85,"color":86,"percentage":87},"Shell","#89e051",0.5,1507,342,"2026-04-04T12:14:33","MIT","未说明","需要 NVIDIA GPU (隐含需求，因涉及深度学习模型训练及 RoBERTa\u002FCOMET 特征提取)，具体型号和显存未说明",{"notes":95,"python":96,"dependencies":97},"该项目包含多个子模型（如 COSMIC, TL-ERC, DialogueGCN 等），不同模型依赖不同的框架（PyTorch 或 TensorFlow）。COSMIC 模型需要预先下载 RoBERTa 和 COMET 特征文件；TL-ERC 模型建议使用 Conda 创建环境并安装 environment.yml 中定义的依赖。部分组件依赖 TensorFlow 1.x 或 Keras，而其他组件基于 PyTorch。具体 CUDA 版本和 Python 版本需参考各子目录下的配置文件（如 environment.yml）。","未说明 (需通过 Conda 环境配置)",[98,99,100,101,102,103],"PyTorch","TensorFlow","Keras","RoBERTa","COMET","Conda",[14,35,105,13],"视频",[107,108,109,110,111,112,113,114,115,116,117,118,119],"dialogue-systems","conversational-agents","conversational-ai","emotion-recognition","emotion-analysis","sentiment-analysis","pretrained-models","natural-language-processing","natural-language-understanding","memory-network","lstm","pytorch","emotion-recognition-in-conversation","2026-03-27T02:49:30.150509","2026-04-07T14:37:26.130029",[],[]]