[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ScalaConsultants--Aspect-Based-Sentiment-Analysis":3,"tool-ScalaConsultants--Aspect-Based-Sentiment-Analysis":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,52],"视频",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[14,35],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":77,"owner_twitter":76,"owner_website":78,"owner_url":79,"languages":80,"stars":85,"forks":86,"last_commit_at":87,"license":88,"difficulty_score":89,"env_os":90,"env_gpu":90,"env_ram":90,"env_deps":91,"category_tags":99,"github_topics":100,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":112,"updated_at":113,"faqs":114,"releases":145},4318,"ScalaConsultants\u002FAspect-Based-Sentiment-Analysis","Aspect-Based-Sentiment-Analysis","💭 Aspect-Based-Sentiment-Analysis: Transformer & Explainable ML (TensorFlow)","Aspect-Based-Sentiment-Analysis 是一个基于 Transformer 架构的开源自然语言处理工具，专注于细粒度的情感分析。与传统仅判断整段文本情感倾向的方法不同，它能针对长文中指定的多个具体方面（如产品的“价格”、“服务”或“功能”）分别识别其情感色彩（正面、负面或中性）。\n\n该工具主要解决了现有开源项目难以商业化复用以及模型预测缺乏透明度的痛点。在许多实际场景中，用户不仅需要知道结果，更需要了解模型为何得出该结论。为此，Aspect-Based-Sentiment-Analysis 创新性地引入了名为\"Professor\"的解释组件。在模型做出预测后，该组件会审查内部状态并提供近似决策解释，帮助用户评估预测的可信度，甚至在发现异常时驳回不可靠的结果。\n\n这款工具非常适合需要构建高精度情感分析系统的开发者、希望深入研究可解释性机器学习的研究人员，以及需要将非结构化用户反馈转化为结构化数据的商业分析师。其设计简洁独立，支持通过简单的 Python 代码快速加载预训练模型并投入使用，同时也允许用户根据自身数据定制和微调模型，以获取更稳定、准确的分析效果。","\n### Aspect Based Sentiment Analysis\n\nThe task is to classify the sentiment of potentially long texts for several aspects.\nThe key idea is to build a modern NLP package which supports explanations of model predictions.\nThe approximated decision explanations help you to infer how reliable predictions are.\nThe package is standalone, scalable, and can be freely extended to your needs.\nWe sum up thoughts in the article:\n\u003Cbr>\n\n[**Do You Trust in Aspect-Based Sentiment Analysis? Testing and Explaining Model Behaviors**](https:\u002F\u002Frafalrolczynski.com\u002F2021\u002F03\u002F07\u002Faspect-based-sentiment-analysis\u002F)\n\n\u003Cbr>\n\nThere are over 100 repositories on GitHub around sentiment analysis \n\u003Csup>\n[1](https:\u002F\u002Fgithub.com\u002Fsongyouwei\u002FABSA-PyTorch)\n[2](https:\u002F\u002Fgithub.com\u002Fjimmyyfeng\u002FTD-LSTM)\n[3](https:\u002F\u002Fgithub.com\u002Fdavidsbatista\u002FAspect-Based-Sentiment-Analysis)\n[4](https:\u002F\u002Fgithub.com\u002Fpeace195\u002Faspect-based-sentiment-analysis)\n[5](https:\u002F\u002Fgithub.com\u002Fyardstick17\u002FAspectBasedSentimentAnalysis)\n[6](https:\u002F\u002Fgithub.com\u002Fthestrox\u002FAspect-Based-Sentiment-Analysis)\n[7](https:\u002F\u002Fgithub.com\u002FAlexYangLi\u002FABSA_Keras)\n[8](https:\u002F\u002Fgithub.com\u002Fpedrobalage\u002FSemevalAspectBasedSentimentAnalysis)\n[9](https:\u002F\u002Fgithub.com\u002Fganeshjawahar\u002Fmem_absa)\n\u003C\u002Fsup>.\nAll of them are hard to commercialize and reuse open-source research projects.\nWe clean up this excellent research. \nPlease give a star if you like the project. \nThis is important to keep this project alive.\n\n\u003Cbr>\n\n### Quick Start\n\nThe aim is to classify the sentiments of a text concerning given aspects. \nWe have made several assumptions to make the service more helpful. \nNamely, the text being processed might be a full-length document, \nthe aspects could contain several words (so may be defined more precisely), \nand most importantly, the service should provide an approximate explanation of any decision made, \ntherefore, a user will be able to immediately infer the reliability of a prediction.\n\n```python\nimport aspect_based_sentiment_analysis as absa\n\nnlp = absa.load()\ntext = (\"We are great fans of Slack, but we wish the subscriptions \"\n        \"were more accessible to small startups.\")\n\nslack, price = nlp(text, aspects=['slack', 'price'])\nassert price.sentiment == absa.Sentiment.negative\nassert slack.sentiment == absa.Sentiment.positive\n```\n\nAbove is an example of how quickly you can start to benefit from our open-source package. \nAll you need to do is to call the `load` function which sets up the ready-to-use pipeline `nlp`. \nYou can explicitly pass the model name you wish to use (a list of available models is below), or a path to your model. \nIn spite of the simplicity of using fine-tune models, we encourage you to build a custom model which reflects your data. \nThe predictions will be more accurate and stable. \n\n\u003Cbr>\n\n### Pipeline: Keeping the Process in Shape\n\nThe pipeline provides an easy-to-use interface for making predictions.\nEven a highly accurate model will be useless if it is unclear how to correctly prepare the inputs and how to interpret the outputs.\nTo make things clear, we have introduced a pipeline that is closely linked to a model.\nIt is worth to know how to deal with the whole process, especially if you plan to build a custom model.\n\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_22e380e7d5e6.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\n\nThe diagram above illustrates an overview of the pipeline stages.\nAs usual, at the very beginning, we pre-process the inputs.\nWe convert the text and the aspects into a `task` which keeps examples (pairs of a text and an aspect) that we can then further tokenize, encode and pass to the model.\nThe model makes a prediction, and here is a change.\nInstead of directly post-processing the model outputs, we have added a review process wherein \nthe independent component called the `professor` supervises and explains a model prediction.\nThe professor might dismiss a model prediction if the model internal states or outputs seem suspicious.\nIn the article [here], we discuss in detail how the model and the professor work.\n\n````python\nimport aspect_based_sentiment_analysis as absa\n\nname = 'absa\u002Fclassifier-rest-0.2'\nmodel = absa.BertABSClassifier.from_pretrained(name)\ntokenizer = absa.BertTokenizer.from_pretrained(name)\nprofessor = absa.Professor(...)     # Explained in detail later on.\ntext_splitter = absa.sentencizer()  # The English CNN model from SpaCy.\nnlp = absa.Pipeline(model, tokenizer, professor, text_splitter)\n\n# Break down the pipeline `call` method.\ntask = nlp.preprocess(text=..., aspects=...)\ntokenized_examples = nlp.tokenize(task.examples)\ninput_batch = nlp.encode(tokenized_examples)\noutput_batch = nlp.predict(input_batch)\npredictions = nlp.review(tokenized_examples, output_batch)\ncompleted_task = nlp.postprocess(task, predictions)\n````\n\nAbove is an example how to initialize the pipeline directly,\nand we revise in code the process being discussed by exposing what calling the pipeline does under the hood.\nWe have omitted a lot of insignificant details but there's one thing we would like to highlight.\nThe sentiment of long texts tends to be fuzzy and neutral. \nTherefore, you might want to split a text into smaller independent chunks, sometimes called spans. \nThese could include just a single sentence or several sentences.\nIt depends on how the `text_splitter` works. \nIn this case, we are using the SpaCy CNN model, which splits a document into single sentences, \nand, as a result each sentence can then be processed independently.\nNote that longer spans have richer context information, so a model will have more information to consider.\nPlease take a look at the pipeline details [here](aspect_based_sentiment_analysis\u002Fpipelines.py).\n\n\u003Cbr>\n\n### Supervising Model Predictions\n\nIt's time to explain model reasoning, something which is extremely hard.\nThe key concept is to frame the problem of explaining a model decision as an independent task wherein\nan aux. model, the `pattern recognizer`, predicts patterns (weighted compositions of tokens, presented below) given model inputs, outputs, and internal states.\nDue to time constraints, at first we did not want to research and build a trainable pattern recognizer.\nInstead, we decided to start with a pattern recognizer that originates from our observations, prior knowledge.\nThe model, the aspect-based sentiment classifier, is based on the transformer architecture wherein self-attention layers hold the most parameters.\nTherefore, one might conclude that understanding self-attention layers is a good proxy to understanding a model as a whole.\nAccordingly, there are many articles that show how to explain a model decision \nin simple terms, using attention values (internal states of self-attention layers) straightforwardly.\nInspired by these articles, we have also analyzed attention values (processing training examples) to search for any meaningful insights.\nThis exploratory study has led us to create the `BasicPatternRecognizer` (details are [here](aspect_based_sentiment_analysis\u002Faux_models.py)).\n\n```python\nimport aspect_based_sentiment_analysis as absa\n\nrecognizer = absa.aux_models.BasicPatternRecognizer()\nnlp = absa.load(pattern_recognizer=recognizer)\ncompleted_task = nlp(text=..., aspects=['slack', 'price'])\nslack, price = completed_task.examples\n\nabsa.summary(slack)\nabsa.display(slack.review)\n```\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_ae55485b0100.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\n```python\nabsa.summary(price)\nabsa.display(price.review)\n```\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_0282dad9190e.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\nThe explanations are only useful if they are correct.\nTo form the basic pattern recognizer, we have made several assumptions (prior beliefs),\ntherefore we should be careful about interpreting the explanations too literally.\nEven if the attention values have thought-provoking properties, for example, \nthey encode rich linguistic relationships, there is no proven chain of causation.\nThere are a lot of articles that illustrate various concerns why drawing conclusions about model reasoning\ndirectly from attentions might be misleading.\nIn the article [here](https:\u002F\u002Frafalrolczynski.com\u002F2021\u002F03\u002F07\u002Faspect-based-sentiment-analysis\u002F), we validate and analyse explanations in detail.\n\n\u003Cbr>\n\n### Ready-to-Use Models\n\nIn the table below, we present the State of the Art results on the SemEval 2014 evaluation dataset \n(dataset details are [here](http:\u002F\u002Falt.qcri.org\u002Fsemeval2014\u002Ftask4\u002F)).\nThere are two available models for the restaurant and the laptop domains.\nThe model implementation details [here](aspect_based_sentiment_analysis\u002Fmodels.py).\nThe hyper-parameters optimization (with the explanation how to train a model) is [here](examples\u002Ftrain_classifier.py).\nYou can easily reproduce our evaluations, look at the performance tests [here](tests\u002Ftest_performance.py).\n\n| Model Name | Acc Rest | Acc Lapt | Release |\n| :--- |  :---:  |  :---:  | :---: |\n||\n| LCF-ATEPC  [[code]](https:\u002F\u002Fgithub.com\u002Fyangheng95\u002FLCF-ATEPC)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1912.07976)                        | 90.18  |  82.29  | Jan 2020 |\n| BERT-ADA   [[code]](https:\u002F\u002Fgithub.com\u002Fdeepopinion\u002Fdomain-adapted-atsc)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)             | 87.89  |  80.23  | Nov 2019 |\n| BAT        [[code]](https:\u002F\u002Fgithub.com\u002Fakkarimi\u002FAdversarial-Training-for-ABSA)[[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.11316.pdf) | 86.03  |  79.35  | Feb 2020 |\n||\n| `classifier-rest-0.2` | 85.17 |\n| `classifier-lapt-0.2` | | 79.78\n\n\n\u003Cbr>\n\n### Installation\n\nYou can use the pip:\n```bash\npip install aspect-based-sentiment-analysis\n```\nOtherwise, clone the code and create the new environment via \n[conda](https:\u002F\u002Fdocs.conda.io\u002Fprojects\u002Fconda\u002Fen\u002Flatest\u002Fuser-guide\u002Ftasks\u002Fmanage-environments.html#):\n```bash\ngit clone git@github.com:ScalaConsultants\u002FAspect-Based-Sentiment-Analysis.git\nconda env create -f=environment.yml\nconda activate Aspect-Based-Sentiment-Analysis\n```\nThe package works with the Python in the version 3.7 (the same as in Colab 2021).\n\n\u003Cbr>\n\n### References\n\nHow to use language models in the Aspect-Based Sentiment Analysis:\n- Utilizing BERT for Aspect-Based Sentiment Analysis via Constructing Auxiliary Sentence (NAACL 2019)\n[[code]](https:\u002F\u002Fgithub.com\u002FHSLCY\u002FABSA-BERT-pair)[[paper]](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FN19-1035\u002F)\n- BERT Post-Training for Review Reading Comprehension and Aspect-based Sentiment Analysis (NAACL 2019)\n[[code]](https:\u002F\u002Fgithub.com\u002Fhowardhsu\u002FBERT-for-RRC-ABSA)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)\n- Exploiting BERT for End-to-End Aspect-based Sentiment Analysis\n[[code]](https:\u002F\u002Fgithub.com\u002Flixin4ever\u002FBERT-E2E-ABSA)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1910.00883)\n\nIntroduction to the BERT interpretability:\n- Are Sixteen Heads Really Better than One?\n[[code]](https:\u002F\u002Fgithub.com\u002Fpmichel31415\u002Fare-16-heads-really-better-than-1)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10650)\n- A Primer in BERTology: What we know about how BERT works\n[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12327)\n- What Does BERT Look At? An Analysis of BERT's Attention\n[[code]](https:\u002F\u002Fgithub.com\u002Fclarkkev\u002Fattention-analysis)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04341)\n- Visualizing and Measuring the Geometry of BERT\n[[code]](https:\u002F\u002Fgithub.com\u002FPAIR-code\u002Finterpretability)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1906.02715)\n- Is BERT Really Robust? A Strong Baseline for Natural Language Attack on Text Classification and Entailment\n[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1907.11932)\n- Adversarial Training for Aspect-Based Sentiment Analysis with BERT\n[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2001.11316)\n- Adv-BERT: BERT is not robust on misspellings! Generating nature adversarial samples on BERT\n[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2003.04985)\n- exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformers Models\n[[code]](https:\u002F\u002Fgithub.com\u002Fbhoov\u002Fexbert)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1910.05276)\n- Does BERT Make Any Sense? Interpretable Word Sense Disambiguation with Contextualized Embeddings\n[[code]](https:\u002F\u002Fgithub.com\u002Fuhh-lt\u002Fbert-sense)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1909.10430)\n- Attention is not Explanation\n[[code]](https:\u002F\u002Fgithub.com\u002Fsuccessar\u002FAttentionExplanation)[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.10186)\n- Attention is not not Explanation\n[[code]](https:\u002F\u002Fgithub.com\u002Fsarahwie\u002Fattention)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04626)[[blog post]](https:\u002F\u002Fmedium.com\u002F@yuvalpinter\u002Fattention-is-not-not-explanation-dbc25b534017)\n- Hierarchical interpretations for neural network predictions\n[[code]](https:\u002F\u002Fgithub.com\u002Fcsinva\u002Fhierarchical-dnn-interpretations)[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05337)\n- Analysis Methods in Neural NLP\n[[code]](https:\u002F\u002Fgithub.com\u002Fboknilev\u002Fnlp-analysis-methods)[[paper]](https:\u002F\u002Fwww.mitpressjournals.org\u002Fdoi\u002Fpdf\u002F10.1162\u002Ftacl_a_00254)\n- Visualization for Sequential Neural Networks with Attention\n[[code]](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002FSeq2Seq-Vis)\n- NeuroX: Toolkit for finding and analyzing important neurons in neural networks\n[[code]](https:\u002F\u002Fgithub.com\u002Ffdalvi\u002FNeuroX)[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.09359)\n\nThe State of the Art results:\n- A Multi-task Learning Model for Chinese-oriented Aspect Polarity Classification and Aspect Term Extraction\n[[code]](https:\u002F\u002Fgithub.com\u002Fyangheng95\u002FLCF-ATEPC)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1912.07976)\n- Adapt or Get Left Behind: Domain Adaptation through BERT Language Model Finetuning for Aspect-Target Sentiment Classification\n[[code]](https:\u002F\u002Fgithub.com\u002Fdeepopinion\u002Fdomain-adapted-atsc)[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)\n- Adversarial Training for Aspect-Based Sentiment Analysis with BERT\n[[code]](https:\u002F\u002Fgithub.com\u002Fakkarimi\u002FAdversarial-Training-for-ABSA)[[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.11316.pdf)\n\nOther interesting:\n- Multi-Dimensional Explanation of Ratings from Reviews\n[[paper]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1909.11386)\n- Extracting Syntactic Trees from Transformer Encoder Self-Attentions\n[[paper]](http:\u002F\u002Faclweb.org\u002Fanthology\u002FW18-5444)\n- Master Thesis: Transfer and Multitask Learning for Aspect-Based Sentiment Analysis Using the Google Transformer Architecture\n[[code]](https:\u002F\u002Fgithub.com\u002FfelixSchober\u002FABSA-Transformer)\n- Create interactive textual heat maps for Jupiter notebooks\n[[code]](https:\u002F\u002Fgithub.com\u002FAndreasMadsen\u002Fpython-textualheatmap)\n- A pyTorch implementation of the DeepMoji model: state-of-the-art deep learning model for analyzing sentiment, emotion, sarcasm etc\n[[code]](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002FtorchMoji)\n- More you can find [here](https:\u002F\u002Fgithub.com\u002Fjiangqn\u002FAspect-Based-Sentiment-Analysis).\n\nDeveloped by [Scalac](https:\u002F\u002Fscalac.io\u002F?utm_source=scalac_github&utm_campaign=scalac1&utm_medium=web)\n","### 基于方面的情感分析\n\n任务是对包含多个方面的潜在长文本进行情感分类。核心思想是构建一个现代化的自然语言处理工具包，支持对模型预测结果的解释。近似的决策解释有助于推断预测结果的可靠性。该工具包是独立、可扩展的，并可根据需求自由扩展。我们在文章中总结了相关思考：\n\u003Cbr>\n\n[**你信任基于方面的情感分析吗？测试和解释模型行为**](https:\u002F\u002Frafalrolczynski.com\u002F2021\u002F03\u002F07\u002Faspect-based-sentiment-analysis\u002F)\n\n\u003Cbr>\n\nGitHub 上有超过 100 个与情感分析相关的仓库\n\u003Csup>\n[1](https:\u002F\u002Fgithub.com\u002Fsongyouwei\u002FABSA-PyTorch)\n[2](https:\u002F\u002Fgithub.com\u002Fjimmyyfeng\u002FTD-LSTM)\n[3](https:\u002F\u002Fgithub.com\u002Fdavidsbatista\u002FAspect-Based-Sentiment-Analysis)\n[4](https:\u002F\u002Fgithub.com\u002Fpeace195\u002Faspect-based-sentiment-analysis)\n[5](https:\u002F\u002Fgithub.com\u002Fyardstick17\u002FAspectBasedSentimentAnalysis)\n[6](https:\u002F\u002Fgithub.com\u002Fthestrox\u002FAspect-Based-Sentiment-Analysis)\n[7](https:\u002F\u002Fgithub.com\u002FAlexYangLi\u002FABSA_Keras)\n[8](https:\u002F\u002Fgithub.com\u002Fpedrobalage\u002FSemevalAspectBasedSentimentAnalysis)\n[9](https:\u002F\u002Fgithub.com\u002Fganeshjawahar\u002Fmem_absa)\n\u003C\u002Fsup>。\n这些项目大多难以商业化，且多为开源研究项目，复用性有限。我们致力于将这些优秀的研究成果加以整理和优化。如果你喜欢这个项目，请为它点亮星标，这对我们保持项目的持续发展至关重要。\n\n\u003Cbr>\n\n### 快速入门\n\n目标是针对给定的方面对文本的情感进行分类。为了使服务更具实用性，我们做出了一些假设：待处理的文本可以是整篇文档；方面可能由多个词组成，从而定义得更加精确；最重要的是，服务应提供对每项决策的近似解释，以便用户能够立即判断预测结果的可靠性。\n\n```python\nimport aspect_based_sentiment_analysis as absa\n\nnlp = absa.load()\ntext = (\"我们是 Slack 的忠实粉丝，但希望订阅方案能更易于小型初创企业接受。\")\n\nslack, price = nlp(text, aspects=['slack', 'price'])\nassert price.sentiment == absa.Sentiment.negative\nassert slack.sentiment == absa.Sentiment.positive\n```\n\n以上示例展示了如何快速开始使用我们的开源工具包。你只需调用 `load` 函数即可设置好即用的流水线 `nlp`。你可以显式指定要使用的模型名称（可用模型列表见下文），或提供自定义模型的路径。尽管微调模型使用起来很简单，但我们仍鼓励你根据自身数据构建定制化模型，这样预测结果会更加准确和稳定。\n\n\u003Cbr>\n\n### 流水线：确保流程顺畅\n\n流水线提供了一个易于使用的接口来生成预测结果。即使模型非常精准，如果输入准备和输出解读不明确，也毫无意义。为了解决这一问题，我们引入了与模型紧密关联的流水线。了解整个流程的工作方式尤为重要，尤其是当你计划构建自定义模型时。\n\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_22e380e7d5e6.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\n\n上图展示了流水线各阶段的概览。通常，在最开始阶段会对输入进行预处理。我们将文本和方面转换为一个 `task` 对象，其中包含文本与方面的配对样本，随后这些样本会被进一步分词、编码并传递给模型。模型完成预测后，流程会发生变化：我们没有直接对模型输出进行后处理，而是增加了一个审查环节，由一个名为 `professor` 的独立组件负责监督并解释模型的预测结果。如果模型内部状态或输出显得可疑，`professor` 可以否决该预测。在[此处](here)的文章中，我们详细讨论了模型和 `professor` 的工作原理。\n\n````python\nimport aspect_based_sentiment_analysis as absa\n\nname = 'absa\u002Fclassifier-rest-0.2'\nmodel = absa.BertABSClassifier.from_pretrained(name)\ntokenizer = absa.BertTokenizer.from_pretrained(name)\nprofessor = absa.Professor(...)     # 后续会详细说明。\ntext_splitter = absa.sentencizer()  # 使用 SpaCy 的英文 CNN 模型。\nnlp = absa.Pipeline(model, tokenizer, professor, text_splitter)\n\n# 分解流水线的 `call` 方法。\ntask = nlp.preprocess(text=..., aspects=...)\ntokenized_examples = nlp.tokenize(task.examples)\ninput_batch = nlp.encode(tokenized_examples)\noutput_batch = nlp.predict(input_batch)\npredictions = nlp.review(tokenized_examples, output_batch)\ncompleted_task = nlp.postprocess(task, predictions)\n````\n\n以上示例展示了如何直接初始化流水线，并通过代码逐步解析流水线调用背后的具体流程。我们省略了许多不重要的细节，但有一点值得强调：长文本的情感往往较为模糊且偏向中性。因此，你可能需要将文本拆分为较小的独立片段，有时称为“跨度”。这些跨度可以是一个句子，也可以是几个句子，具体取决于 `text_splitter` 的工作方式。在此例中，我们使用 SpaCy 的 CNN 模型，它会将文档按句子分割，从而使每个句子都能被独立处理。需要注意的是，较长的跨度通常包含更丰富的上下文信息，因此模型在作出预测时会有更多参考依据。请参阅流水线的详细实现[这里](aspect_based_sentiment_analysis\u002Fpipelines.py)。\n\n\u003Cbr>\n\n### 监督模型预测\n\n现在是时候解释模型的推理过程了，而这是一件极其困难的事情。\n关键概念是将解释模型决策的问题视为一个独立的任务，在这个任务中，\n一个辅助模型——`模式识别器`——会根据模型的输入、输出和内部状态来预测模式（如下文所示的标记加权组合）。\n由于时间限制，我们最初并不打算研究和构建一个可训练的模式识别器。\n相反，我们决定从基于观察和先验知识的模式识别器入手。\n该模型，即基于方面的情感分类器，基于Transformer架构，其中自注意力层占据了最多的参数。\n因此，人们可能会得出结论：理解自注意力层可以很好地反映对整个模型的理解。\n相应地，有许多文章以简单易懂的方式，直接利用注意力值（自注意力层的内部状态）来解释模型的决策。\n受这些文章的启发，我们也分析了注意力值（处理训练示例时的注意力值），以寻找任何有意义的见解。\n这项探索性研究促使我们创建了`BasicPatternRecognizer`（详细信息请参见[这里](aspect_based_sentiment_analysis\u002Faux_models.py)）。\n\n```python\nimport aspect_based_sentiment_analysis as absa\n\nrecognizer = absa.aux_models.BasicPatternRecognizer()\nnlp = absa.load(pattern_recognizer=recognizer)\ncompleted_task = nlp(text=..., aspects=['slack', 'price'])\nslack, price = completed_task.examples\n\nabsa.summary(slack)\nabsa.display(slack.review)\n```\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_ae55485b0100.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\n```python\nabsa.summary(price)\nabsa.display(price.review)\n```\n\n\u003Cp align=\"middle\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_readme_0282dad9190e.png\" width=\"600\" alt=\"\"\u002F>\n\u003C\u002Fp>\n\n这些解释只有在正确的情况下才有用。\n为了构建基本的模式识别器，我们做了一些假设（先验信念），\n因此我们在解读这些解释时应谨慎，不要过于字面化。\n即使注意力值具有一些引人深思的特性，例如它们能够编码丰富的语言关系，\n目前也尚未有确凿的因果链证明这一点。\n有许多文章指出了各种担忧，说明直接从注意力值推断模型的推理过程可能会产生误导。\n在[这篇文章](https:\u002F\u002Frafalrolczynski.com\u002F2021\u002F03\u002F07\u002Faspect-based-sentiment-analysis\u002F)中，我们对这些解释进行了详细的验证和分析。\n\n\u003Cbr>\n\n### 即用型模型\n\n下表展示了我们在SemEval 2014评测数据集上的最先进结果\n（数据集详情请见[这里](http:\u002F\u002Falt.qcri.org\u002Fsemeval2014\u002Ftask4\u002F)）。\n目前有两个可用的模型，分别适用于餐厅和笔记本电脑领域。\n模型的具体实现细节请参见[这里](aspect_based_sentiment_analysis\u002Fmodels.py)。\n超参数优化（以及如何训练模型的说明）请参见[这里](examples\u002Ftrain_classifier.py)。\n您可以轻松复现我们的评估结果，并查看性能测试[这里](tests\u002Ftest_performance.py)。\n\n| 模型名称 | 餐厅准确率 | 笔记本准确率 | 发布日期 |\n| :--- |  :---:  |  :---:  | :---: |\n||\n| LCF-ATEPC  [[代码]](https:\u002F\u002Fgithub.com\u002Fyangheng95\u002FLCF-ATEPC)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1912.07976)                        | 90.18  |  82.29  | 2020年1月 |\n| BERT-ADA   [[代码]](https:\u002F\u002Fgithub.com\u002Fdeepopinion\u002Fdomain-adapted-atsc)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)             | 87.89  |  80.23  | 2019年11月 |\n| BAT        [[代码]](https:\u002F\u002Fgithub.com\u002Fakkarimi\u002FAdversarial-Training-for-ABSA)[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.11316.pdf) | 86.03  |  79.35  | 2020年2月 |\n||\n| `classifier-rest-0.2` | 85.17 |\n| `classifier-lapt-0.2` | | 79.78\n\n\n\u003Cbr>\n\n### 安装\n\n您可以使用pip进行安装：\n```bash\npip install aspect-based-sentiment-analysis\n```\n或者，您也可以克隆代码并使用[conda](https:\u002F\u002Fdocs.conda.io\u002Fprojects\u002Fconda\u002Fen\u002Flatest\u002Fuser-guide\u002Ftasks\u002Fmanage-environments.html#)创建一个新的环境：\n```bash\ngit clone git@github.com:ScalaConsultants\u002FAspect-Based-Sentiment-Analysis.git\nconda env create -f=environment.yml\nconda activate Aspect-Based-Sentiment-Analysis\n```\n该软件包支持Python 3.7版本（与Colab 2021版本相同）。\n\n\u003Cbr>\n\n### 参考文献\n\n如何在基于方面的情感分析中使用语言模型：\n- 通过构建辅助句子利用 BERT 进行基于方面的情感分析（NAACL 2019）\n[[代码]](https:\u002F\u002Fgithub.com\u002FHSLCY\u002FABSA-BERT-pair)[[论文]](https:\u002F\u002Fwww.aclweb.org\u002Fanthology\u002FN19-1035\u002F)\n- 针对评论阅读理解和基于方面的情感分析的 BERT 后训练（NAACL 2019）\n[[代码]](https:\u002F\u002Fgithub.com\u002Fhowardhsu\u002FBERT-for-RRC-ABSA)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)\n- 利用 BERT 实现端到端的基于方面的情感分析\n[[代码]](https:\u002F\u002Fgithub.com\u002Flixin4ever\u002FBERT-E2E-ABSA)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1910.00883)\n\nBERT 可解释性简介：\n- 十六个注意力头真的比一个更好吗？\n[[代码]](https:\u002F\u002Fgithub.com\u002Fpmichel31415\u002Fare-16-heads-really-better-than-1)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10650)\n- BERT 学入门：我们所知道的 BERT 工作原理\n[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12327)\n- BERT 关注的是什么？对 BERT 注意力机制的分析\n[[代码]](https:\u002F\u002Fgithub.com\u002Fclarkkev\u002Fattention-analysis)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04341)\n- 可视化与测量 BERT 的几何结构\n[[代码]](https:\u002F\u002Fgithub.com\u002FPAIR-code\u002Finterpretability)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1906.02715)\n- BERT 真的稳健吗？针对文本分类和蕴含任务的自然语言攻击的强大基线\n[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1907.11932)\n- 基于 BERT 的对抗训练用于基于方面的情感分析\n[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2001.11316)\n- Adv-BERT：BERT 对拼写错误并不鲁棒！在 BERT 上生成自然对抗样本\n[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2003.04985)\n- exBERT：一种用于探索 Transformer 模型中学习表示的可视化分析工具\n[[代码]](https:\u002F\u002Fgithub.com\u002Fbhoov\u002Fexbert)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1910.05276)\n- BERT 是否有意义？利用上下文嵌入进行可解释的词义消歧\n[[代码]](https:\u002F\u002Fgithub.com\u002Fuhh-lt\u002Fbert-sense)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1909.10430)\n- 注意力并非解释\n[[代码]](https:\u002F\u002Fgithub.com\u002Fsuccessar\u002FAttentionExplanation)[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.10186)\n- 注意力并不是非解释\n[[代码]](https:\u002F\u002Fgithub.com\u002Fsarahwie\u002Fattention)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04626)[[博客文章]](https:\u002F\u002Fmedium.com\u002F@yuvalpinter\u002Fattention-is-not-not-explanation-dbc25b534017)\n- 针对神经网络预测的层次化解释\n[[代码]](https:\u002F\u002Fgithub.com\u002Fcsinva\u002Fhierarchical-dnn-interpretations)[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05337)\n- 神经 NLP 中的分析方法\n[[代码]](https:\u002F\u002Fgithub.com\u002Fboknilev\u002Fnlp-analysis-methods)[[论文]](https:\u002F\u002Fwww.mitpressjournals.org\u002Fdoi\u002Fpdf\u002F10.1162\u002Ftacl_a_00254)\n- 具有注意力机制的序列神经网络可视化\n[[代码]](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002FSeq2Seq-Vis)\n- NeuroX：用于发现和分析神经网络中重要神经元的工具包\n[[代码]](https:\u002F\u002Fgithub.com\u002Ffdalvi\u002FNeuroX)[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.09359)\n\n当前最先进成果：\n- 面向中文的多任务学习模型，用于方面极性分类和方面术语抽取\n[[代码]](https:\u002F\u002Fgithub.com\u002Fyangheng95\u002FLCF-ATEPC)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1912.07976)\n- 适应或落后：通过 BERT 语言模型微调实现领域自适应，用于方面—目标情感分类\n[[代码]](https:\u002F\u002Fgithub.com\u002Fdeepopinion\u002Fdomain-adapted-atsc)[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1908.11860)\n- 基于 BERT 的对抗训练用于基于方面的情感分析\n[[代码]](https:\u002F\u002Fgithub.com\u002Fakkarimi\u002FAdversarial-Training-for-ABSA)[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.11316.pdf)\n\n其他有趣内容：\n- 从评论中提取评分的多维解释\n[[论文]](http:\u002F\u002Farxiv.org\u002Fabs\u002F1909.11386)\n- 从 Transformer 编码器的自注意力中提取句法树\n[[论文]](http:\u002F\u002Faclweb.org\u002Fanthology\u002FW18-5444)\n- 硕士论文：利用 Google Transformer 架构进行迁移学习和多任务学习，用于基于方面的情感分析\n[[代码]](https:\u002F\u002Fgithub.com\u002FfelixSchober\u002FABSA-Transformer)\n- 为 Jupyter 笔记本创建交互式文本热图\n[[代码]](https:\u002F\u002Fgithub.com\u002FAndreasMadsen\u002Fpython-textualheatmap)\n- DeepMoji 模型的 PyTorch 实现：用于分析情感、情绪、讽刺等的最先进深度学习模型\n[[代码]](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002FtorchMoji)\n- 更多内容请参见 [这里](https:\u002F\u002Fgithub.com\u002Fjiangqn\u002FAspect-Based-Sentiment-Analysis)。\n\n由 [Scalac](https:\u002F\u002Fscalac.io\u002F?utm_source=scalac_github&utm_campaign=scalac1&utm_medium=web) 开发","# Aspect-Based-Sentiment-Analysis 快速上手指南\n\n本指南帮助开发者快速部署和使用基于方面的细粒度情感分析（ABSA）工具。该工具不仅能识别文本中特定方面的情感倾向，还能提供模型预测的可解释性说明。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**：Linux, macOS 或 Windows\n*   **Python 版本**：推荐 Python 3.7（与 Colab 2021 环境一致）\n*   **前置依赖**：\n    *   `pip` 包管理工具\n    *   （可选）`conda`：如果您希望使用隔离的虚拟环境进行安装\n\n> **国内加速建议**：\n> 如果直接连接 PyPI 源速度较慢，建议使用国内镜像源（如清华源或阿里源）进行安装。\n\n## 安装步骤\n\n您可以选择以下任一方式进行安装：\n\n### 方式一：使用 pip 安装（推荐）\n\n直接使用 pip 安装最新稳定版。若需加速，可指定国内镜像源：\n\n```bash\npip install aspect-based-sentiment-analysis -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 方式二：源码安装（使用 Conda 环境）\n\n如果您需要自定义开发或复现论文实验，建议克隆源码并创建专用环境：\n\n```bash\n# 1. 克隆代码仓库\ngit clone git@github.com:ScalaConsultants\u002FAspect-Based-Sentiment-Analysis.git\n\n# 2. 进入目录并创建 conda 环境\ncd Aspect-Based-Sentiment-Analysis\nconda env create -f=environment.yml\n\n# 3. 激活环境\nconda activate Aspect-Based-Sentiment-Analysis\n```\n\n## 基本使用\n\n该工具的核心功能是输入一段文本和关注的“方面”（Aspects），输出对应的情感倾向及可靠性解释。\n\n### 最简单示例\n\n以下代码展示了如何加载预训练模型并对包含多个方面的句子进行情感分类：\n\n```python\nimport aspect_based_sentiment_analysis as absa\n\n# 加载默认流水线（自动下载预训练模型）\nnlp = absa.load()\n\n# 定义待分析文本\ntext = (\"We are great fans of Slack, but we wish the subscriptions \"\n        \"were more accessible to small startups.\")\n\n# 指定需要分析的方面：'slack' (产品) 和 'price' (价格\u002F订阅费用)\nslack, price = nlp(text, aspects=['slack', 'price'])\n\n# 验证结果\nassert price.sentiment == absa.Sentiment.negative  # 对价格感到不满\nassert slack.sentiment == absa.Sentiment.positive  # 对产品本身表示喜爱\n\n# 查看详细解释（可选）\n# absa.display(slack.review) \n# absa.display(price.review)\n```\n\n### 使用说明\n1.  **加载模型**：调用 `absa.load()` 即可初始化包含分词器、模型和解释器（Professor）的完整流水线。\n2.  **执行分析**：传入 `text`（字符串）和 `aspects`（方面关键词列表）。方面词可以是单个单词或多个单词组成的短语。\n3.  **获取结果**：返回对象包含 `sentiment` 属性（Positive\u002FNegative\u002FNeutral）以及 `review` 属性（用于分析预测可靠性的解释数据）。\n\n> **提示**：对于长文档，内置的流水线会自动利用 SpaCy 将文本分割为句子独立处理，以提高准确性。如需自定义模型或调整分句策略，可参考官方文档中的 Pipeline 高级用法。","某电商数据团队需要每日处理数万条包含多产品评价的长文本评论，以提取用户对“电池续航”、“屏幕清晰度”及“客服态度”等具体维度的反馈。\n\n### 没有 Aspect-Based-Sentiment-Analysis 时\n- **粒度粗糙导致误判**：传统情感分析只能给出整条评论的单一得分，无法区分用户“喜欢屏幕但讨厌电池”的复杂态度，导致产品改进方向模糊。\n- **黑盒决策缺乏信任**：模型直接输出结果却无法解释依据，分析师难以判断是模型学到了规律还是产生了幻觉，不敢直接将数据用于高层汇报。\n- **长文本处理能力弱**：面对篇幅较长的详细评测，现有开源方案往往丢失关键上下文信息，或需要耗费大量精力进行人工规则清洗和分段处理。\n\n### 使用 Aspect-Based-Sentiment-Analysis 后\n- **精准定位多维情感**：利用其基于 Transformer 的架构，能一次性从长文中精准识别并分类多个特定方面（如同时判定“屏幕”为正、“电池”为负），直接生成结构化洞察。\n- **可解释性增强可信度**：内置的\"Professor\"组件会自动审查模型预测并提供近似决策解释，让团队能直观看到模型为何做出该判断，快速剔除不可靠的异常预测。\n- **开箱即用降低门槛**：通过简单的 `load` 函数即可部署就绪的流水线，无需重新造轮子处理复杂的输入编码与后处理，大幅缩短了从原始数据到商业洞察的周期。\n\nAspect-Based-Sentiment-Analysis 的核心价值在于将不可控的黑盒预测转化为透明、可解释且细粒度的商业情报，让企业真正敢于信赖并落地 AI 情感分析能力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FScalaConsultants_Aspect-Based-Sentiment-Analysis_5b4b14d3.png","ScalaConsultants","Scalac","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FScalaConsultants_80c0baab.png","Best Scala hAkkers!",null,"info@scalac.io","https:\u002F\u002Fscalac.io\u002F?utm_source=scalac_github&utm_campaign=scalac1&utm_medium=web","https:\u002F\u002Fgithub.com\u002FScalaConsultants",[81],{"name":82,"color":83,"percentage":84},"Python","#3572A5",100,580,94,"2026-04-05T19:49:21","Apache-2.0",1,"未说明",{"notes":92,"python":93,"dependencies":94},"README 未明确列出底层深度学习框架（如 PyTorch）的具体版本或 GPU 硬件要求，但指出该工具基于 BERT 架构。推荐使用 conda 通过提供的 environment.yml 文件创建环境。文本分割默认使用 SpaCy 的 CNN 模型。由于涉及 BERT 模型推理，实际运行可能隐式需要支持 CUDA 的环境以获得更好性能，但文档未强制要求。","3.7",[95,96,97,98],"aspect-based-sentiment-analysis","transformers (隐含，基于 BERT 模型)","spacy (用于文本分割)","conda (推荐用于环境管理)",[14,35],[95,101,102,103,104,105,106,107,108,109,110,111],"tensorflow","sentiment-analysis","machine-learning","interpretability","distill","transformers","transformer-models","bert-embeddings","deep-learning","explainable-ml","explainable-ai","2026-03-27T02:49:30.150509","2026-04-06T18:54:08.609392",[115,120,125,130,135,140],{"id":116,"question_zh":117,"answer_zh":118,"source_url":119},19652,"为什么无法通过 pip 安装该包（提示找不到匹配的版本）？","这通常是因为 Python 版本不兼容。请确保您使用的是 Python 3.7 版本。如果您在 Google Colab 上运行，请注意旧版 Colab 可能仅支持 Python 2.7 或 3.6，导致无法安装。建议创建一个新的 conda 环境并指定 Python 3.7，或者升级到支持 Python 3.7+ 的环境后再运行 `pip install aspect-based-sentiment-analysis`。","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F14",{"id":121,"question_zh":122,"answer_zh":123,"source_url":124},19653,"如何在 Ubuntu 18.04 和 Python 3.7.5 环境下解决 TensorFlow 依赖安装失败的问题？","在 Ubuntu 上安装失败通常是因为 TensorFlow 版本冲突。建议先更新 Python，然后创建一个空的 conda 环境。在该环境中，首先单独安装特定版本的 TensorFlow，命令为：`pip3 install tensorflow==2.2.0`。安装完 TensorFlow 及其所有依赖后，再安装本库。如果问题依旧，请检查系统底层依赖是否完整。","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F13",{"id":126,"question_zh":127,"answer_zh":128,"source_url":129},19654,"库的依赖项（TensorFlow 和 Transformers）过旧，与其他现代 NLP 库冲突怎么办？","维护者已收到反馈并更新了依赖项。请升级库到最新版本（2.0 及以上），新版本已兼容更新的 TensorFlow 和 Transformers 版本，解决了与 Sentence-Transformers 等库的冲突问题，同时也支持了 M1 Mac 等设备所需的 TensorFlow 2.5+ 版本。请使用 `pip install --upgrade aspect-based-sentiment-analysis` 进行更新。","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F42",{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},19655,"运行时出现 ValueError: Input 0 of layer classifier is incompatible... 错误如何解决？","此错误是由 `transformers` 库的版本不兼容引起的。解决方案有两种：1. 推荐将库升级到版本 `2.0.2` 或更高，维护者已在该版本中修复了此问题；2. 如果暂时无法升级主库，可以尝试将 `transformers` 库降级到 2.5 版本（`pip install transformers==2.5`）。","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F34",{"id":136,"question_zh":137,"answer_zh":138,"source_url":139},19656,"为什么使用预训练的 Laptop 分类器时，准确率远低于 README 中描述的数值？","这是因为代码中加载了错误的模型名称。原代码使用了 `absa\u002Fbert-lapt-0.1`（这是语言模型而非分类器），导致准确率极低。请将模型名称更改为 `absa\u002Fclassifier-lapt-0.1`。修改后的加载代码应为：`nlp = absa.load('absa\u002Fclassifier-lapt-0.1')`。","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F15",{"id":141,"question_zh":142,"answer_zh":143,"source_url":144},19657,"README 中提到的关于注意力机制分析的文章链接在哪里？","维护者提供了详细的官方博客文章链接，其中验证并分析了模型解释。您可以访问以下网址阅读：https:\u002F\u002Frafalrolczynski.com\u002F2021\u002F03\u002F07\u002Faspect-based-sentiment-analysis\u002F","https:\u002F\u002Fgithub.com\u002FScalaConsultants\u002FAspect-Based-Sentiment-Analysis\u002Fissues\u002F38",[]]