[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-shagunsodhani--papers-I-read":3,"tool-shagunsodhani--papers-I-read":61},[4,18,28,37,45,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":24,"last_commit_at":25,"category_tags":26,"status":17},9989,"n8n","n8n-io\u002Fn8n","n8n 是一款面向技术团队的公平代码（fair-code）工作流自动化平台，旨在让用户在享受低代码快速构建便利的同时，保留编写自定义代码的灵活性。它主要解决了传统自动化工具要么过于封闭难以扩展、要么完全依赖手写代码效率低下的痛点，帮助用户轻松连接 400 多种应用与服务，实现复杂业务流程的自动化。\n\nn8n 特别适合开发者、工程师以及具备一定技术背景的业务人员使用。其核心亮点在于“按需编码”：既可以通过直观的可视化界面拖拽节点搭建流程，也能随时插入 JavaScript 或 Python 代码、调用 npm 包来处理复杂逻辑。此外，n8n 原生集成了基于 LangChain 的 AI 能力，支持用户利用自有数据和模型构建智能体工作流。在部署方面，n8n 提供极高的自由度，支持完全自托管以保障数据隐私和控制权，也提供云端服务选项。凭借活跃的社区生态和数百个现成模板，n8n 让构建强大且可控的自动化系统变得简单高效。",184740,2,"2026-04-19T23:22:26",[16,14,13,15,27],"插件",{"id":29,"name":30,"github_repo":31,"description_zh":32,"stars":33,"difficulty_score":10,"last_commit_at":34,"category_tags":35,"status":17},10095,"AutoGPT","Significant-Gravitas\u002FAutoGPT","AutoGPT 是一个旨在让每个人都能轻松使用和构建 AI 的强大平台，核心功能是帮助用户创建、部署和管理能够自动执行复杂任务的连续型 AI 智能体。它解决了传统 AI 应用中需要频繁人工干预、难以自动化长流程工作的痛点，让用户只需设定目标，AI 即可自主规划步骤、调用工具并持续运行直至完成任务。\n\n无论是开发者、研究人员，还是希望提升工作效率的普通用户，都能从 AutoGPT 中受益。开发者可利用其低代码界面快速定制专属智能体；研究人员能基于开源架构探索多智能体协作机制；而非技术背景用户也可直接选用预置的智能体模板，立即投入实际工作场景。\n\nAutoGPT 的技术亮点在于其模块化“积木式”工作流设计——用户通过连接功能块即可构建复杂逻辑，每个块负责单一动作，灵活且易于调试。同时，平台支持本地自托管与云端部署两种模式，兼顾数据隐私与使用便捷性。配合完善的文档和一键安装脚本，即使是初次接触的用户也能在几分钟内启动自己的第一个 AI 智能体。AutoGPT 正致力于降低 AI 应用门槛，让人人都能成为 AI 的创造者与受益者。",183572,"2026-04-20T04:47:55",[13,36,27,14,15],"语言模型",{"id":38,"name":39,"github_repo":40,"description_zh":41,"stars":42,"difficulty_score":10,"last_commit_at":43,"category_tags":44,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":46,"name":47,"github_repo":48,"description_zh":49,"stars":50,"difficulty_score":24,"last_commit_at":51,"category_tags":52,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",161692,"2026-04-20T11:33:57",[14,13,36],{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":24,"last_commit_at":59,"category_tags":60,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":72,"owner_website":79,"owner_url":80,"languages":81,"stars":101,"forks":102,"last_commit_at":103,"license":78,"difficulty_score":104,"env_os":105,"env_gpu":106,"env_ram":106,"env_deps":107,"category_tags":110,"github_topics":112,"view_count":24,"oss_zip_url":78,"oss_zip_packed_at":78,"status":17,"created_at":120,"updated_at":121,"faqs":122,"releases":123},10227,"shagunsodhani\u002Fpapers-I-read","papers-I-read","A-Paper-A-Week","papers-I-read 是一个由研究者发起的“每周一篇论文”开源项目，旨在系统性地整理、解读和分享人工智能及计算机系统领域的高质量学术文献。面对海量且更新迅速的科研论文，许多从业者在筛选重点和理解核心思想上耗费大量精力，papers-I-read 通过提供精选论文列表及其配套的详细摘要与笔记，有效降低了阅读门槛，帮助用户快速把握前沿技术脉络。\n\n该项目涵盖的内容十分广泛，既包括 Toolformer、超网络（HyperNetworks）、持续学习等前沿 AI 算法研究，也深入探讨了 YouTube 推荐系统、广告点击预测、分布式数据库设计（如 Cassandra、CAP 定理）等工业界实战经验。其独特亮点在于不仅关注模型理论，更强调从系统设计和工程落地的视角去解析论文，提供了难得的“战地视角”与实践教训。\n\npapers-I-read 非常适合 AI 研究人员、算法工程师、系统架构师以及希望深入了解技术底层逻辑的开发者使用。对于想要追踪学术动态、寻找灵感或补充系统知识盲区的专业人士而言，这是一个极具价值的知识库与学习指南，能帮助大家在繁忙的工作中高效获取经过提炼的技术精华。","# papers-I-read\n\nI am trying a new initiative - a-paper-a-week. This repository will hold all those papers and related summaries and notes.\n\n## List of papers\n\n- [Toolformer - Language Models Can Teach Themselves to Use Tools](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FToolformer-Language-Models-Can-Teach-Themselves-to-Use-Tools)\n- [Hints for Computer System Design](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHints-for-Computer-System-Design)\n- [Synthesized Policies for Transfer and Adaptation across Tasks and Environments](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSynthesized-Policies-for-Transfer-and-Adaptation-across-Tasks-and-Environments)\n- [Deep Neural Networks for YouTube Recommendations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Neural-Networks-for-YouTube-Recommendations)\n- [The Tail at Scale](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FThe-Tail-at-Scale)\n- [Practical Lessons from Predicting Clicks on Ads at Facebook](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPractical-Lessons-from-Predicting-Clicks-on-Ads-at-Facebook)\n- [Ad Click Prediction - a View from the Trenches](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAd-Click-Prediction-a-View-from-the-Trenches)\n- [Anatomy of Catastrophic Forgetting - Hidden Representations and Task Semantics](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAnatomy-of-Catastrophic-Forgetting-Hidden-Representations-and-Task-Semantics)\n- [When Do Curricula Work?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-Do-Curricula-Work)\n- [Continual learning with hypernetworks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FContinual-learning-with-hypernetworks)\n- [Zero-shot Learning by Generating Task-specific Adapters](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FZero-shot-Learning-by-Generating-Task-specific-Adapters)\n- [HyperNetworks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHyperNetworks)\n- [Energy-based Models for Continual Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEnergy-based-Models-for-Continual-Learning)\n- [GPipe - Easy Scaling with Micro-Batch Pipeline Parallelism](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGPipe-Easy-Scaling-with-Micro-Batch-Pipeline-Parallelism)\n- [Compositional Explanations of Neurons](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCompositional-Explanations-of-Neurons)\n- [Design patterns for container-based distributed systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDesign-patterns-for-container-based-distributed-systems)\n- [Cassandra - a decentralized structured storage system](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCassandra-a-decentralized-structured-storage-system)\n- [CAP twelve years later - How the rules have changed](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCAP-twelve-years-later-How-the-rules-have-changed)\n- [Consistency Tradeoffs in Modern Distributed Database System Design](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FConsistency-Tradeoffs-in-Modern-Distributed-Database-System-Design)\n- [Exploring Simple Siamese Representation Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExploring-Simple-Siamese-Representation-Learning)\n- [Data Management for Internet-Scale Single-Sign-On](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FData-Management-for-Internet-Scale-Single-Sign-On)\n- [Searching for Build Debt - Experiences Managing Technical Debt at Google](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSearching-for-Build-Debt-Experiences-Managing-Technical-Debt-at-Google)\n- [One Solution is Not All You Need - Few-Shot Extrapolation via Structured MaxEnt RL](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-Solution-is-Not-All-You-Need-Few-Shot-Extrapolation-via-Structured-MaxEnt-RL)\n- [Learning Explanations That Are Hard To Vary](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-Explanations-That-Are-Hard-To-Vary)\n- [Remembering for the Right Reasons - Explanations Reduce Catastrophic Forgetting](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRemembering-for-the-Right-Reasons-Explanations-Reduce-Catastrophic-Forgetting)\n- [A Foliated View of Transfer Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Foliated-View-of-Transfer-Learning)\n- [Harvest, Yield, and Scalable Tolerant Systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHarvest,-Yield,-and-Scalable-Tolerant-Systems)\n- [MONet - Unsupervised Scene Decomposition and Representation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMONet-Unsupervised-Scene-Decomposition-and-Representation)\n- [Revisiting Fundamentals of Experience Replay](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRevisiting-Fundamentals-of-Experience-Replay)\n- [Deep Reinforcement Learning and the Deadly Triad](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Reinforcement-Learning-and-the-Deadly-Triad)\n- [Alpha Net: Adaptation with Composition in Classifier Space](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAlpha-Net-Adaptation-with-Composition-in-Classifier-Space)\n- [Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOutrageously-Large-Neural-Networks-The-Sparsely-Gated-Mixture-of-Experts-Layer)\n- [Gradient Surgery for Multi-Task Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradient-Surgery-for-Multi-Task-Learning)\n- [GradNorm: Gradient Normalization for Adaptive Loss Balancing in Deep Multitask Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradNorm-Gradient-Normalization-for-Adaptive-Loss-Balancing-in-Deep-Multitask-Networks)\n- [TaskNorm: Rethinking Batch Normalization for Meta-Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTASKNORM-Rethinking-Batch-Normalization-for-Meta-Learning)\n- [Averaging Weights leads to Wider Optima and Better Generalization](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAveraging-Weights-leads-to-Wider-Optima-and-Better-Generalization)\n- [Decentralized Reinforcement Learning: Global Decision-Making via Local Economic Transactions](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDecentralized-Reinforcement-Learning-Global-Decision-Making-via-Local-Economic-Transactions)\n- [When to use parametric models in reinforcement learning?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-to-use-parametric-models-in-reinforcement-learning)\n- [Network Randomization - A Simple Technique for Generalization in Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNetwork-Randomization-A-Simple-Technique-for-Generalization-in-Deep-Reinforcement-Learning)\n- [On the Difficulty of Warm-Starting Neural Network Training](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOn-the-Difficulty-of-Warm-Starting-Neural-Network-Training)\n- [Supervised Contrastive Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSupervised-Contrastive-Learning)\n- [CURL - Contrastive Unsupervised Representations for Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCURL-Contrastive-Unsupervised-Representations-for-Reinforcement-Learning)\n- [Competitive Training of Mixtures of Independent Deep Generative Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCompetitive-Training-of-Mixtures-of-Independent-Deep-Generative-Models)\n- [What Does Classifying More Than 10,000 Image Categories Tell Us?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhat-Does-Classifying-More-Than-10,000-Image-Categories-Tell-Us)\n- [mixup - Beyond Empirical Risk Minimization](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002Fmixup-Beyond-Empirical-Risk-Minimization)\n- [ELECTRA - Pre-training Text Encoders as Discriminators Rather Than Generators](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FELECTRA-Pre-training-Text-Encoders-as-Discriminators-Rather-Than-Generators)\n- [Gradient based sample selection for online continual learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradient-based-sample-selection-for-online-continual-learning)\n- [Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FYour-Classifier-is-Secretly-an-Energy-Based-Model,-and-You-Should-Treat-it-Like-One)\n- [Massively Multilingual Neural Machine Translation in the Wild - Findings and Challenges](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMassively-Multilingual-Neural-Machine-Translation-in-the-Wild-Findings-and-Challenges)\n- [Observational Overfitting in Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FObservational-Overfitting-in-Reinforcement-Learning)\n- [Rapid Learning or Feature Reuse? Towards Understanding the Effectiveness of MAML](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRapid-Learning-or-Feature-Reuse-Towards-Understanding-the-Effectiveness-of-MAML)\n- [Accurate, Large Minibatch SGD - Training ImageNet in 1 Hour](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAccurate-Large-Minibatch-SGD-Training-ImageNet-in-1-Hour)\n- [Superposition of many models into one](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSuperposition-of-many-models-into-one)\n- [Towards a Unified Theory of State Abstraction for MDPs](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTowards-a-Unified-Theory-of-State-Abstraction-for-MDPs)\n- [ALBERT - A Lite BERT for Self-supervised Learning of Language Representations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FALBERT-A-Lite-BERT-for-Self-supervised-Learning-of-Language-Representations)\n- [Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMastering-Atari,-Go,-Chess-and-Shogi-by-Planning-with-a-Learned-Model)\n- [Contrastive Learning of Structured World Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FContrastive-Learning-of-Structured-World-Models)\n- [Gossip based Actor-Learner Architectures for Deep RL](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGossip-based-Actor-Learner-Architectures-for-Deep-RL)\n- [How to train your MAML](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHow-to-train-your-MAML)\n- [PHYRE - A New Benchmark for Physical Reasoning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPHYRE-A-New-Benchmark-for-Physical-Reasoning)\n- [Large Memory Layers with Product Keys](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLarge-Memory-Layers-with-Product-Keys)\n- [Abductive Commonsense Reasoning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAbductive-Commonsense-Reasoning)\n- [Deep Reinforcement Learning in a Handful of Trials using Probabilistic Dynamics Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Reinforcement-Learning-in-a-Handful-of-Trials-using-Probabilistic-Dynamics-Models)\n- [Assessing Generalization in Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAssessing-Generalization-in-Deep-Reinforcement-Learning)\n- [Quantifying Generalization in Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FQuantifying-Generalization-in-Reinforcement-Learning)\n- [Set Transformer: A Framework for Attention-based Permutation-Invariant Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSet-Transformer-A-Framework-for-Attention-based-Permutation-Invariant-Neural-Networks)\n- [Measuring abstract reasoning in neural networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeasuring-Abstract-Reasoning-in-Neural-Networks)\n- [Hamiltonian Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHamiltonian-Neural-Networks)\n- [Extrapolating Beyond Suboptimal Demonstrations via Inverse Reinforcement Learning from Observations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExtrapolating-Beyond-Suboptimal-Demonstrations-via-Inverse-Reinforcement-Learning-from-Observations)\n- [Meta-Reinforcement Learning of Structured Exploration Strategies](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeta-Reinforcement-Learning-of-Structured-Exploration-Strategies)\n- [Relational Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRelational-Reinforcement-Learning)\n- [Good-Enough Compositional Data Augmentation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGood-Enough-Compositional-Data-Augmentation)\n- [Multiple Model-Based Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMultiple-Model-Based-Reinforcement-Learning)\n- [Towards a natural benchmark for continual learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTowards-a-natural-benchmark-for-continual-learning)\n- [Meta-Learning Update Rules for Unsupervised Representation Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeta-Learning-Update-Rules-for-Unsupervised-Representation-Learning)\n- [GNN Explainer - A Tool for Post-hoc Explanation of Graph Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGNN-Explainer-A-Tool-for-Post-hoc-Explanation-of-Graph-Neural-Networks)\n- [To Tune or Not to Tune? Adapting Pretrained Representations to Diverse Tasks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTo-Tune-or-Not-to-Tune-Adapting-Pretrained-Representations-to-Diverse-Tasks)\n- [Model Primitive Hierarchical Lifelong Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FModel-Primitive-Hierarchical-Lifelong-Reinforcement-Learning)\n- [TuckER - Tensor Factorization for Knowledge Graph Completion](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTuckER-Tensor-Factorization-for-Knowledge-Graph-Completion)\n- [Linguistic Knowledge as Memory for Recurrent Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLinguistic-Knowledge-as-Memory-for-Recurrent-Neural-Networks)\n- [Diversity is All You Need - Learning Skills without a Reward Function](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDiversity-is-All-You-Need-Learning-Skills-without-a-Reward-Function)\n- [Modular meta-learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FModular-meta-learning)\n- [Hierarchical RL Using an Ensemble of Proprioceptive Periodic Policies](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHierarchical-RL-Using-an-Ensemble-of-Proprioceptive-Periodic-Policies)\n- [Efficient Lifelong Learningi with A-GEM](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEfficient-Lifelong-Learning-with-A-GEM)\n- [Pre-training Graph Neural Networks with Kernels](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPre-training-Graph-Neural-Networks-with-Kernels)\n- [Smooth Loss Functions for Deep Top-k Classification](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSmooth-Loss-Functions-for-Deep-Top-k-Classification)\n- [Hindsight Experience Replay](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHindsight-Experience-Replay)\n- [Representation Tradeoffs for Hyperbolic Embeddings](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRepresentation-Tradeoffs-for-Hyperbolic-Embeddings)\n- [Learned Optimizers that Scale and Generalize](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearned-Optimizers-that-Scale-and-Generalize)\n- [One-shot Learning with Memory-Augmented Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-shot-Learning-with-Memory-Augmented-Neural-Networks)\n- [BabyAI - First Steps Towards Grounded Language Learning With a Human In the Loop](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FBabyAI-First-Steps-Towards-Grounded-Language-Learning-With-a-Human-In-the-Loop)\n- [Poincaré Embeddings for Learning Hierarchical Representations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPoincare-Embeddings-for-Learning-Hierarchical-Representations)\n- [When Recurrent Models Don’t Need To Be Recurrent](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-Recurrent-Models-Don-t-Need-To-Be-Recurrent)\n- [HoME - a Household Multimodal Environment](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHoME-a-Household-Multimodal-Environment)\n- [Emergence of Grounded Compositional Language in Multi-Agent Populations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEmergence-of-Grounded-Compositional-Language-in-Multi-Agent-Populations)\n- [A Semantic Loss Function for Deep Learning with Symbolic Knowledge](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Semantic-Loss-Function-for-Deep-Learning-with-Symbolic-Knowledge)\n- [Hierarchical Graph Representation Learning with Differentiable Pooling](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHierarchical-Graph-Representation-Learning-with-Differentiable-Pooling)\n- [Imagination-Augmented Agents for Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FImagination-Augmented-Agents-for-Deep-Reinforcement-Learning)\n- [Kronecker Recurrent Units](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FKronecker-Recurrent-Units)\n- [Learning Independent Causal Mechanisms](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-Independent-Causal-Mechanisms)\n- [Memory-based Parameter Adaptation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMemory-Based-Parameter-Adaption)\n- [Born Again Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FBorn-Again-Neural-Networks)\n- [Net2Net-Accelerating Learning via Knowledge Transfer](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNet2Net-Accelerating-Learning-via-Knowledge-Transfer)\n- [Learning to Count Objects in Natural Images for Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-to-Count-Objects-in-Natural-Images-for-Visual-Question-Answering)\n- [Neural Message Passing for Quantum Chemistry](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Message-Passing-for-Quantum-Chemistry)\n- [Unsupervised Learning by Predicting Noise](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FUnsupervised-Learning-By-Predicting-Noise)\n- [The Lottery Ticket Hypothesis - Training Pruned Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FThe-Lottery-Ticket-Hypothesis-Training-Pruned-Neural-Networks)\n- [Cyclical Learning Rates for Training Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCyclical-Learning-Rates-for-Training-Neural-Networks)\n- [Improving Information Extraction by Acquiring External Evidence with Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FImproving-Information-Extraction-by-Acquiring-External-Evidence-with-Reinforcement-Learning)\n- [An Empirical Investigation of Catastrophic Forgetting in Gradient-Based Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAn-Empirical-Investigation-of-Catastrophic-Forgetting-in-Gradient-Based-Neural-Networks)\n- [Learning an SAT Solver from Single-Bit Supervision](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-a-SAT-Solver-from-Single-Bit-Supervision)\n- [Neural Relational Inference for Interacting Systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Relational-Inference-for-Interacting-Systems)\n- [Stylistic Transfer in Natural Language Generation Systems Using Recurrent Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FStylistic-Transfer-in-Natural-Language-Generation-Systems-Using-Recurrent-Neural-Networks)\n- [Get To The Point: Summarization with Pointer-Generator Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGet-To-The-Point-Summarization-with-Pointer-Generator-Networks)\n- [StarSpace - Embed All The Things!](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FStarSpace-Embed-All-The-Things)\n- [Emotional Chatting Machine - Emotional Conversation Generation with Internal and External Memory](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEmotional-Chatting-Machine-Emotional-Conversation-Generation-with-Internal-and-External-Memory)\n- [Exploring Models and Data for Image Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExploring-Models-and-Data-for-Image-Question-Answering)\n- [How transferable are features in deep neural networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHow-transferable-are-features-in-deep-neural-networks)\n- [Distilling the Knowledge in a Neural Network](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDistilling-the-Knowledge-in-a-Neural-Network)\n- [Revisiting Semi-Supervised Learning with Graph Embeddings](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRevisiting-Semi-Supervised-Learning-with-Graph-Embeddings)\n- [Two-Stage Synthesis Networks for Transfer Learning in Machine Comprehension](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTwo-Stage-Synthesis-Networks-for-Transfer-Learning-in-Machine-Comprehension)\n- [Higher-order organization of complex networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHigher-order-organization-of-complex-networks)\n- [Network Motifs - Simple Building Blocks of Complex Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNetwork-Motifs-Simple-Building-Blocks-of-Complex-Networks)\n- [Word Representations via Gaussian Embedding](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWord-Representations-via-Gaussian-Embedding)\n- [HARP - Hierarchical Representation Learning for Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHARP-Hierarchical-Representation-Learning-for-Networks)\n- [Swish - a Self-Gated Activation Function](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSwish-A-self-gated-activation-function)\n- [Reading Wikipedia to Answer Open-Domain Questions](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FReading-Wikipedia-to-Answer-Open-Domain-Questions)\n- [Task-Oriented Query Reformulation with Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTask-Oriented-Query-Reformulation-with-Reinforcement-Learning)\n- [Refining Source Representations with Relation Networks for Neural Machine Translation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRefining-Source-Representations-with-Relation-Networks-for-Neural-Machine-Translation)\n- [Pointer Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPointer-Networks)\n- [Learning to Compute Word Embeddings On the Fly](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-to-Compute-Word-Embeddings-On-the-Fly)\n- [R-NET - Machine Reading Comprehension with Self-matching Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FR-NET-Machine-Reading-Comprehension-with-Self-matching-Networks)\n- [ReasoNet - Learning to Stop Reading in Machine Comprehension](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FReasoNet-Learning-to-Stop-Reading-in-Machine-Comprehension)\n- [Principled Detection of Out-of-Distribution Examples in Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPrincipled-Detection-of-Out-of-Distribution-Examples-in-Neural-Networks)\n- [Ask Me Anything: Dynamic Memory Networks for Natural Language Processing](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAsk-Me-Anything-Dynamic-Memory-Networks-for-Natural-Language-Processing)\n- [One Model To Learn Them All](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-Model-To-Learn-Them-All)\n- [Two\u002FToo Simple Adaptations of Word2Vec for Syntax Problems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTwo-Too-Simple-Adaptations-of-Word2Vec-for-Syntax-Problems)\n- [A Decomposable Attention Model for Natural Language Inference](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Decomposable-Attention-Model-for-Natural-Language-Inference)\n- [A Fast and Accurate Dependency Parser using Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Fast-and-Accurate-Dependency-Parser-using-Neural-Networks)\n- [Neural Module Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Module-Networks)\n- [Making the V in VQA Matter: Elevating the Role of Image Understanding in Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMaking-the-V-in-VQA-Matter-Elevating-the-Role-of-Image-Understanding-in-Visual-Question-Answering)\n- [Conditional Similarity Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FConditional-Similarity-Networks)\n- [Simple Baseline for Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSimple-Baseline-for-Visual-Question-Answering)\n- [VQA: Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FVQA-Visual-Question-Answering)\n- [Learning to Generate Reviews and Discovering Sentiment](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F634dbe1aa678188399254bb3d0078e1d)\n- [Seeing the Arrow of Time](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F828d8de0034a350d97738bbedadc9373)\n- [End-to-end optimization of goal-driven and visually grounded dialogue systems](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbbbc739e6815ab6217e0cf0a8f706786)\n- [GuessWhat?! Visual object discovery through multi-modal dialogue](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2418238e6aefd7b1e8c922cda9e10488)\n- [Semantic Parsing via Paraphrasing](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F93c96d7dd0488d0d00bd7078889dd6f6)\n- [Traversing Knowledge Graphs in Vector Space](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe8e6213906ec2642f27b1aca3a6201c6)\n- [PPDB: The Paraphrase Database](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ffa1f387f084355dfafdf7550b1899af6)\n- [NewsQA: A Machine Comprehension Dataset](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc47f0d5c1dfe60ce5da0dd8241e506ea)\n- [A Persona-Based Neural Conversation Model](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F8ad464e7d0ea4c7c6ed5189ac4e44095)\n- [“Why Should I Trust You?” Explaining the Predictions of Any Classifier](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbd744ab6c17a2289ca139ea586d1d65e)\n- [Conditional Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5d726334de3014defeeb701099a3b4b3)\n- [Addressing the Rare Word Problem in Neural Machine Translation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa18fe14b74c7292129c6c5ecb37f33b5)\n- [Achieving Open Vocabulary Neural Machine Translation with Hybrid Word-Character Models](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd32e665b27696ce0436c79174a136410)\n- [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6ca136088f58d24f7b08056ec8b97595)\n- [Improving Word Representations via Global Context and Multiple Word Prototypes](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1be86a9bcbd7f120ce55994dcd932bbf)\n- [Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9dccec626e68e495fd4577ecdca36b7b)\n- [Skip-Thought Vectors](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F4a4eb32de8cabf21bda9a4ada15c46e8)\n- [Deep Convolutional Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Faa79796c70565e3761e86d0f932a3de5)\n- [Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1f9dc0444142be8bd8a7404a226880eb)\n- [A Roadmap towards Machine Intelligence](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9928673525b1713c2d41fd0fac38f81f)\n- [Smart Reply: Automated Response Suggestion for Email](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fda411f15b71ed6a664f9d5ac46409b42)\n- [Convolutional Neural Network For Sentence Classification](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9ae6d2364c278c97b1b2f4ec53255c56)\n- [Conditional Image Generation with PixelCNN Decoders](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F3cc7066ce7de051d769908b8fab11990)\n- [Pixel Recurrent Neural Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe741ebd5ba0e0fc0f49d7836e30891a7)\n- [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ff48da7f77418aa22751ffed115779126)\n- [Bag of Tricks for Efficient Text Classification](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F432746f15889f7f4a798bf7f9ec4b7d8)\n- [GloVe: Global Vectors for Word Representation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fefea5a42d17e0fcf18374df8e3e4b3e8)\n- [SimRank: A Measure of Structural-Context Similarity](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6329486212643fd61f58a5a3eb5abb3c)\n- [How NOT To Evaluate Your Dialogue System: An Empirical Study of Unsupervised Evaluation Metrics for Dialogue Response Generation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ff05748b6339ceff26420ceecfc79d58d)\n- [Neural Generation of Regular Expressions from Natural Language with Minimal Domain Knowledge](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F004d803bc021f579d4aa3b24cec5b994)\n- [WikiReading : A Novel Large-scale Language Understanding Task over Wikipedia](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2788ac9dbcac5523cb8b2d0a3d70f2d2)\n- [WikiQA: A challenge dataset for open-domain question answering](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F7cf3677ff2b0028a33e6702fbd260bc5)\n- [Teaching Machines to Read and Comprehend](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa863eb099bb7a1ab4831cd37bffffb04)\n- [Evaluating Prerequisite Qualities for Learning End-to-end Dialog Systems](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5e7c40f61c18502eec2809e5cf1ead6b)\n- [Recurrent Neural Network Regularization](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd66245692b276cd0b6dcbaf43e4211db)\n- [Deep Math: Deep Sequence Models for Premise Selection](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd8387256f2bb08f39509600f9d7db498)\n- [A Neural Conversational Model](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fec6835964df0e49fdef0459c8b334b94)\n- [Key-Value Memory Networks for Directly Reading Documents](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa5e0baa075b4a917c0a69edc575772a8)\n- [Advances In Optimizing Recurrent Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F75dc31e3c7999ad4a1edf4f289deaa88)\n- [Query Regression Networks for Machine Comprehension](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F93caa283af3c151372f4be86ed4c4b99)\n- [Sequence to Sequence Learning with Neural Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa2915921d7d0ac5cfd0e379025acfb9f)\n- [The Difficulty of Training Deep Architectures and the Effect of Unsupervised Pre-Training](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe3608ccf262d6e5a6b537128c917c92https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbbbc739e6815ab6217e0cf0a8f706786c)\n- [Question Answering with Subgraph Embeddings](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb65e299ff5f79a4f9da4a2e9281a0676)\n- [Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F12691b76addf149a224c24ab64b5bdcc)\n- [Visualizing Large-scale and High-dimensional Data](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6c267cf6122399e9be36491a2f510641)\n- [Visualizing Data using t-SNE](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2153e01d026712ac94a2b4928a2dbf3e)\n- [Curriculum Learning](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F7e4e1c9817c46e3cb1932f62aac8806b)\n- [End-To-End Memory Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F17881da05d9ee1f6539b2baa8067a6ef)\n- [Memory Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc7a03a47b3d709e7c592fa7011b0f33e)\n- [Learning To Execute](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb44b29b86cdfe1b6bae4286253f76350)\n- [Distributed GraphLab: A Framework for Machine Learning and Data Mining in the Cloud](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1bb05a7134c27cffa1e2f57dc6b1c136)\n- [Large Scale Distributed Deep Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5733fffe6b1a268998bd93f29ec9fbeb)\n- [Efficient Estimation of Word Representations in Vector Space](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F176a283e2c158a75a0a6)\n- [Regularization and variable selection via the elastic net](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1cd5d136c8ca30432de5)\n- [Fractional Max-Pooling](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fccfe3134f46fd3738aa0)\n- [TAO: Facebook’s Distributed Data Store for the Social Graph](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1c91987c2a4a098fa9f1)\n- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F4441216a298df0fe6ab0)\n- [The Unified Logging Infrastructure for Data Analytics at Twitter](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F0083f8a2d276e026b15c)\n- [A Few Useful Things to Know about Machine Learning](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5c2cdfc269bf8aa50b72)\n- [Hive – A Petabyte Scale Data Warehouse Using Hadoop](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb0651ade0dc39aeb7cfd)\n- [Kafka: a Distributed Messaging System for Log Processing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fnotes-about-kafka-cc6c1b5c5025)\n- [Power-law distributions in Empirical data](https:\u002F\u002Fgithub.com\u002Fshagunsodhani\u002Fpowerlaw\u002Fblob\u002Fmaster\u002Fpaper\u002FREADME.md)\n- [Pregel: A System for Large-Scale Graph Processing](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Faf9677bdc79bb34be698)\n- [GraphX: Unifying Data-Parallel and Graph-Parallel Analytics](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc72bc1928aeef40280c9)\n- [Pig Latin: A Not-So-Foreign Language for Data Processing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fpig-latin-e840ac23db93)\n- [Resilient Distributed Datasets: A Fault-Tolerant Abstraction for In-Memory Cluster Computing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fresilient-distributed-datasets-97c28c3a9411)\n- [MapReduce: Simplified Data Processing on Large Clusters](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fmapreduce-1c88f8a7c3d2)\n- [BigTable: A Distributed Storage System for Structured Data](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fbigtable-bf580262f030)\n- [Spark SQL: Relational Data Processing in Spark](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fspark-sql-68a6fac271fe)\n- [Spark: Cluster Computing with Working Sets](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fspark-8ca626d55d21)\n- [Fast Data in the Era of Big Data: Twitter’s Real-Time Related Query Suggestion Architecture](https:\u002F\u002Fmedium.com\u002F@shagun\u002Ffast-data-in-the-era-of-big-data-e6208e6d3575)\n- [Scaling Memcache at Facebook](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fscaling-memcache-at-facebook-1ba77d71c082)\n- [Dynamo: Amazon’s Highly Available Key-value Store](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fdynamo-9665c22a1ddb)\n- [f4 : Facebook's Warm BLOB Storage System](https:\u002F\u002Fmedium.com\u002F@shagun\u002Ff4-cba2f141cb0c)\n- [A Theoretician’s Guide to the Experimental Analysis of Algorithms](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fdos-and-dont-s-of-research-fe33322c7aff)\n- [Cuckoo Hashing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fcuckoo-hashing-eb160dfab804)\n- [Never Ending Learning](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fnever-ending-learning-e7b78006e713)\n","# 我读过的论文\n\n我正在尝试一项新计划——每周读一篇论文。这个仓库将存放所有这些论文以及相关的摘要和笔记。\n\n## 论文列表\n\n- [Toolformer - Language Models Can Teach Themselves to Use Tools](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FToolformer-Language-Models-Can-Teach-Themselves-to-Use-Tools)\n- [Hints for Computer System Design](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHints-for-Computer-System-Design)\n- [Synthesized Policies for Transfer and Adaptation across Tasks and Environments](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSynthesized-Policies-for-Transfer-and-Adaptation-across-Tasks-and-Environments)\n- [Deep Neural Networks for YouTube Recommendations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Neural-Networks-for-YouTube-Recommendations)\n- [The Tail at Scale](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FThe-Tail-at-Scale)\n- [Practical Lessons from Predicting Clicks on Ads at Facebook](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPractical-Lessons-from-Predicting-Clicks-on-Ads-at-Facebook)\n- [Ad Click Prediction - a View from the Trenches](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAd-Click-Prediction-a-View-from-the-Trenches)\n- [Anatomy of Catastrophic Forgetting - Hidden Representations and Task Semantics](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAnatomy-of-Catastrophic-Forgetting-Hidden-Representations-and-Task-Semantics)\n- [When Do Curricula Work?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-Do-Curricula-Work)\n- [Continual learning with hypernetworks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FContinual-learning-with-hypernetworks)\n- [Zero-shot Learning by Generating Task-specific Adapters](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FZero-shot-Learning-by-Generating-Task-specific-Adapters)\n- [HyperNetworks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHyperNetworks)\n- [Energy-based Models for Continual Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEnergy-based-Models-for-Continual-Learning)\n- [GPipe - Easy Scaling with Micro-Batch Pipeline Parallelism](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGPipe-Easy-Scaling-with-Micro-Batch-Pipeline-Parallelism)\n- [Compositional Explanations of Neurons](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCompositional-Explanations-of-Neurons)\n- [Design patterns for container-based distributed systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDesign-patterns-for-container-based-distributed-systems)\n- [Cassandra - a decentralized structured storage system](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCassandra-a-decentralized-structured-storage-system)\n- [CAP twelve years later - How the rules have changed](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCAP-twelve-years-later-How-the-rules-have-changed)\n- [Consistency Tradeoffs in Modern Distributed Database System Design](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FConsistency-Tradeoffs-in-Modern-Distributed-Database-System-Design)\n- [Exploring Simple Siamese Representation Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExploring-Simple-Siamese-Representation-Learning)\n- [Data Management for Internet-Scale Single-Sign-On](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FData-Management-for-Internet-Scale-Single-Sign-On)\n- [Searching for Build Debt - Experiences Managing Technical Debt at Google](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSearching-for-Build-Debt-Experiences-Managing-Technical-Debt-at-Google)\n- [One Solution is Not All You Need - Few-Shot Extrapolation via Structured MaxEnt RL](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-Solution-is-Not-All-You-Need-Few-Shot-Extrapolation-via-Structured-MaxEnt-RL)\n- [Learning Explanations That Are Hard To Vary](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-Explanations-That-Are-Hard-To-Vary)\n- [Remembering for the Right Reasons - Explanations Reduce Catastrophic Forgetting](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRemembering-for-the-Right-Reasons-Explanations-Reduce-Catastrophic-Forgetting)\n- [A Foliated View of Transfer Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Foliated-View-of-Transfer-Learning)\n- [Harvest, Yield, and Scalable Tolerant Systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHarvest,-Yield,-and-Scalable-Tolerant-Systems)\n- [MONet - Unsupervised Scene Decomposition and Representation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMONet-Unsupervised-Scene-Decomposition-and-Representation)\n- [Revisiting Fundamentals of Experience Replay](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRevisiting-Fundamentals-of-Experience-Replay)\n- [Deep Reinforcement Learning and the Deadly Triad](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Reinforcement-Learning-and-the-Deadly-Triad)\n- [Alpha Net: Adaptation with Composition in Classifier Space](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAlpha-Net-Adaptation-with-Composition-in-Classifier-Space)\n- [Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOutrageously-Large-Neural-Networks-The-Sparsely-Gated-Mixture-of-Experts-Layer)\n- [Gradient Surgery for Multi-Task Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradient-Surgery-for-Multi-Task-Learning)\n- [GradNorm: Gradient Normalization for Adaptive Loss Balancing in Deep Multitask Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradNorm-Gradient-Normalization-for-Adaptive-Loss-Balancing-in-Deep-Multitask-Networks)\n- [TaskNorm: Rethinking Batch Normalization for Meta-Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTASKNORM-Rethinking-Batch-Normalization-for-Meta-Learning)\n- [Averaging Weights leads to Wider Optima and Better Generalization](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAveraging-Weights-leads-to-Wider-Optima-and-Better-Generalization)\n- [Decentralized Reinforcement Learning: Global Decision-Making via Local Economic Transactions](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDecentralized-Reinforcement-Learning-Global-Decision-Making-via-Local-Economic-Transactions)\n- [When to use parametric models in reinforcement learning?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-to-use-parametric-models-in-reinforcement-learning)\n- [Network Randomization - A Simple Technique for Generalization in Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNetwork-Randomization-A-Simple-Technique-for-Generalization-in-Deep-Reinforcement-Learning)\n- [On the Difficulty of Warm-Starting Neural Network Training](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOn-the-Difficulty-of-Warm-Starting-Neural-Network-Training)\n- [Supervised Contrastive Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSupervised-Contrastive-Learning)\n- [CURL - Contrastive Unsupervised Representations for Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCURL-Contrastive-Unsupervised-Representations-for-Reinforcement-Learning)\n- [Competitive Training of Mixtures of Independent Deep Generative Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCompetitive-Training-of-Mixtures-of-Independent-Deep-Generative-Models)\n- [What Does Classifying More Than 10,000 Image Categories Tell Us?](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhat-Does-Classifying-More-Than-10,000-Image-Categories-Tell-Us)\n- [mixup - Beyond Empirical Risk Minimization](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002Fmixup-Beyond-Empirical-Risk-Minimization)\n- [ELECTRA - Pre-training Text Encoders as Discriminators Rather Than Generators](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FELECTRA-Pre-training-Text-Encoders-as-Discriminators-Rather-Than-Generators)\n- [Gradient based sample selection for online continual learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGradient-based-sample-selection-for-online-continual-learning)\n- [Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FYour-Classifier-is-Secretly-an-Energy-Based-Model,-and-You-Should-Treat-it-Like-One)\n- [Massively Multilingual Neural Machine Translation in the Wild - Findings and Challenges](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMassively-Multilingual-Neural-Machine-Translation-in-the-Wild-Findings-and-Challenges)\n- [Observational Overfitting in Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FObservational-Overfitting-in-Reinforcement-Learning)\n- [Rapid Learning or Feature Reuse? Towards Understanding the Effectiveness of MAML](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRapid-Learning-or-Feature-Reuse-Towards-Understanding-the-Effectiveness-of-MAML)\n- [Accurate, Large Minibatch SGD - Training ImageNet in 1 Hour](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAccurate-Large-Minibatch-SGD-Training-ImageNet-in-1-Hour)\n- [Superposition of many models into one](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSuperposition-of-many-models-into-one)\n- [Towards a Unified Theory of State Abstraction for MDPs](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTowards-a-Unified-Theory-of-State-Abstraction-for-MDPs)\n- [ALBERT - A Lite BERT for Self-supervised Learning of Language Representations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FALBERT-A-Lite-BERT-for-Self-supervised-Learning-of-Language-Representations)\n- [Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMastering-Atari,-Go,-Chess-and-Shogi-by-Planning-with-a-Learned-Model)\n- [Contrastive Learning of Structured World Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FContrastive-Learning-of-Structured-World-Models)\n- [Gossip based Actor-Learner Architectures for Deep RL](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGossip-based-Actor-Learner-Architectures-for-Deep-RL)\n- [How to train your MAML](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHow-to-train-your-MAML)\n- [PHYRE - A New Benchmark for Physical Reasoning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPHYRE-A-New-Benchmark-for-Physical-Reasoning)\n- [Large Memory Layers with Product Keys](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLarge-Memory-Layers-with-Product-Keys)\n- [Abductive Commonsense Reasoning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAbductive-Commonsense-Reasoning)\n- [Deep Reinforcement Learning in a Handful of Trials using Probabilistic Dynamics Models](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDeep-Reinforcement-Learning-in-a-Handful-of-Trials-using-Probabilistic-Dynamics-Models)\n- [Assessing Generalization in Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAssessing-Generalization-in-Deep-Reinforcement-Learning)\n- [Quantifying Generalization in Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FQuantifying-Generalization-in-Reinforcement-Learning)\n- [Set Transformer: A Framework for Attention-based Permutation-Invariant Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSet-Transformer-A-Framework-for-Attention-based-Permutation-Invariant-Neural-Networks)\n- [Measuring abstract reasoning in neural networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeasuring-Abstract-Reasoning-in-Neural-Networks)\n- [Hamiltonian Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHamiltonian-Neural-Networks)\n- [Extrapolating Beyond Suboptimal Demonstrations via Inverse Reinforcement Learning from Observations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExtrapolating-Beyond-Suboptimal-Demonstrations-via-Inverse-Reinforcement-Learning-from-Observations)\n- [Meta-Reinforcement Learning of Structured Exploration Strategies](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeta-Reinforcement-Learning-of-Structured-Exploration-Strategies)\n- [Relational Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRelational-Reinforcement-Learning)\n- [Good-Enough Compositional Data Augmentation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGood-Enough-Compositional-Data-Augmentation)\n- [Multiple Model-Based Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMultiple-Model-Based-Reinforcement-Learning)\n- [Towards a natural benchmark for continual learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTowards-a-natural-benchmark-for-continual-learning)\n- [Meta-Learning Update Rules for Unsupervised Representation Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMeta-Learning-Update-Rules-for-Unsupervised-Representation-Learning)\n- [GNN Explainer - A Tool for Post-hoc Explanation of Graph Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGNN-Explainer-A-Tool-for-Post-hoc-Explanation-of-Graph-Neural-Networks)\n- [To Tune or Not to Tune? Adapting Pretrained Representations to Diverse Tasks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTo-Tune-or-Not-to-Tune-Adapting-Pretrained-Representations-to-Diverse-Tasks)\n- [Model Primitive Hierarchical Lifelong Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FModel-Primitive-Hierarchical-Lifelong-Reinforcement-Learning)\n- [TuckER - Tensor Factorization for Knowledge Graph Completion](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTuckER-Tensor-Factorization-for-Knowledge-Graph-Completion)\n- [Linguistic Knowledge as Memory for Recurrent Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLinguistic-Knowledge-as-Memory-for-Recurrent-Neural-Networks)\n- [Diversity is All You Need - Learning Skills without a Reward Function](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDiversity-is-All-You-Need-Learning-Skills-without-a-Reward-Function)\n- [Modular meta-learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FModular-meta-learning)\n- [Hierarchical RL Using an Ensemble of Proprioceptive Periodic Policies](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHierarchical-RL-Using-an-Ensemble-of-Proprioceptive-Periodic-Policies)\n- [Efficient Lifelong Learningi with A-GEM](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEfficient-Lifelong-Learning-with-A-GEM)\n- [Pre-training Graph Neural Networks with Kernels](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPre-training-Graph-Neural-Networks-with-Kernels)\n- [Smooth Loss Functions for Deep Top-k Classification](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSmooth-Loss-Functions-for-Deep-Top-k-Classification)\n- [Hindsight Experience Replay](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHindsight-Experience-Replay)\n- [Representation Tradeoffs for Hyperbolic Embeddings](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRepresentation-Tradeoffs-for-Hyperbolic-Embeddings)\n- [Learned Optimizers that Scale and Generalize](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearned-Optimizers-that-Scale-and-Generalize)\n- [One-shot Learning with Memory-Augmented Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-shot-Learning-with-Memory-Augmented-Neural-Networks)\n- [BabyAI - First Steps Towards Grounded Language Learning With a Human In the Loop](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FBabyAI-First-Steps-Towards-Grounded-Language-Learning-With-a-Human-In-the-Loop)\n- [Poincaré Embeddings for Learning Hierarchical Representations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPoincare-Embeddings-for-Learning-Hierarchical-Representations)\n- [When Recurrent Models Don’t Need To Be Recurrent](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWhen-Recurrent-Models-Don-t-Need-To-Be-Recurrent)\n- [HoME - a Household Multimodal Environment](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHoME-a-Household-Multimodal-Environment)\n- [Emergence of Grounded Compositional Language in Multi-Agent Populations](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEmergence-of-Grounded-Compositional-Language-in-Multi-Agent-Populations)\n- [A Semantic Loss Function for Deep Learning with Symbolic Knowledge](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Semantic-Loss-Function-for-Deep-Learning-with-Symbolic-Knowledge)\n- [Hierarchical Graph Representation Learning with Differentiable Pooling](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHierarchical-Graph-Representation-Learning-with-Differentiable-Pooling)\n- [Imagination-Augmented Agents for Deep Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FImagination-Augmented-Agents-for-Deep-Reinforcement-Learning)\n- [Kronecker Recurrent Units](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FKronecker-Recurrent-Units)\n- [Learning Independent Causal Mechanisms](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-Independent-Causal-Mechanisms)\n- [Memory-based Parameter Adaptation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMemory-Based-Parameter-Adaption)\n- [Born Again Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FBorn-Again-Neural-Networks)\n- [Net2Net-Accelerating Learning via Knowledge Transfer](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNet2Net-Accelerating-Learning-via-Knowledge-Transfer)\n- [Learning to Count Objects in Natural Images for Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-to-Count-Objects-in-Natural-Images-for-Visual-Question-Answering)\n- [Neural Message Passing for Quantum Chemistry](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Message-Passing-for-Quantum-Chemistry)\n- [Unsupervised Learning by Predicting Noise](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FUnsupervised-Learning-By-Predicting-Noise)\n- [The Lottery Ticket Hypothesis - Training Pruned Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FThe-Lottery-Ticket-Hypothesis-Training-Pruned-Neural-Networks)\n- [Cyclical Learning Rates for Training Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FCyclical-Learning-Rates-for-Training-Neural-Networks)\n- [Improving Information Extraction by Acquiring External Evidence with Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FImproving-Information-Extraction-by-Acquiring-External-Evidence-with-Reinforcement-Learning)\n- [An Empirical Investigation of Catastrophic Forgetting in Gradient-Based Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAn-Empirical-Investigation-of-Catastrophic-Forgetting-in-Gradient-Based-Neural-Networks)\n- [Learning an SAT Solver from Single-Bit Supervision](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-a-SAT-Solver-from-Single-Bit-Supervision)\n- [Neural Relational Inference for Interacting Systems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Relational-Inference-for-Interacting-Systems)\n- [Stylistic Transfer in Natural Language Generation Systems Using Recurrent Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FStylistic-Transfer-in-Natural-Language-Generation-Systems-Using-Recurrent-Neural-Networks)\n- [Get To The Point: Summarization with Pointer-Generator Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FGet-To-The-Point-Summarization-with-Pointer-Generator-Networks)\n- [StarSpace - Embed All The Things!](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FStarSpace-Embed-All-The-Things)\n- [Emotional Chatting Machine - Emotional Conversation Generation with Internal and External Memory](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FEmotional-Chatting-Machine-Emotional-Conversation-Generation-with-Internal-and-External-Memory)\n- [Exploring Models and Data for Image Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FExploring-Models-and-Data-for-Image-Question-Answering)\n- [How transferable are features in deep neural networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHow-transferable-are-features-in-deep-neural-networks)\n- [Distilling the Knowledge in a Neural Network](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FDistilling-the-Knowledge-in-a-Neural-Network)\n- [Revisiting Semi-Supervised Learning with Graph Embeddings](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRevisiting-Semi-Supervised-Learning-with-Graph-Embeddings)\n- [Two-Stage Synthesis Networks for Transfer Learning in Machine Comprehension](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTwo-Stage-Synthesis-Networks-for-Transfer-Learning-in-Machine-Comprehension)\n- [Higher-order organization of complex networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHigher-order-organization-of-complex-networks)\n- [Network Motifs - Simple Building Blocks of Complex Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNetwork-Motifs-Simple-Building-Blocks-of-Complex-Networks)\n- [Word Representations via Gaussian Embedding](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FWord-Representations-via-Gaussian-Embedding)\n- [HARP - Hierarchical Representation Learning for Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FHARP-Hierarchical-Representation-Learning-for-Networks)\n- [Swish - a Self-Gated Activation Function](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSwish-A-self-gated-activation-function)\n- [Reading Wikipedia to Answer Open-Domain Questions](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FReading-Wikipedia-to-Answer-Open-Domain-Questions)\n- [Task-Oriented Query Reformulation with Reinforcement Learning](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTask-Oriented-Query-Reformulation-with-Reinforcement-Learning)\n- [Refining Source Representations with Relation Networks for Neural Machine Translation](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FRefining-Source-Representations-with-Relation-Networks-for-Neural-Machine-Translation)\n- [Pointer Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPointer-Networks)\n- [Learning to Compute Word Embeddings On the Fly](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FLearning-to-Compute-Word-Embeddings-On-the-Fly)\n- [R-NET - Machine Reading Comprehension with Self-matching Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FR-NET-Machine-Reading-Comprehension-with-Self-matching-Networks)\n- [ReasoNet - Learning to Stop Reading in Machine Comprehension](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FReasoNet-Learning-to-Stop-Reading-in-Machine-Comprehension)\n- [Principled Detection of Out-of-Distribution Examples in Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FPrincipled-Detection-of-Out-of-Distribution-Examples-in-Neural-Networks)\n- [Ask Me Anything: Dynamic Memory Networks for Natural Language Processing](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FAsk-Me-Anything-Dynamic-Memory-Networks-for-Natural-Language-Processing)\n- [One Model To Learn Them All](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FOne-Model-To-Learn-Them-All)\n- [Two\u002FToo Simple Adaptations of Word2Vec for Syntax Problems](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FTwo-Too-Simple-Adaptations-of-Word2Vec-for-Syntax-Problems)\n- [A Decomposable Attention Model for Natural Language Inference](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Decomposable-Attention-Model-for-Natural-Language-Inference)\n- [A Fast and Accurate Dependency Parser using Neural Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FA-Fast-and-Accurate-Dependency-Parser-using-Neural-Networks)\n- [Neural Module Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FNeural-Module-Networks)\n- [Making the V in VQA Matter: Elevating the Role of Image Understanding in Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FMaking-the-V-in-VQA-Matter-Elevating-the-Role-of-Image-Understanding-in-Visual-Question-Answering)\n- [Conditional Similarity Networks](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FConditional-Similarity-Networks)\n- [Simple Baseline for Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FSimple-Baseline-for-Visual-Question-Answering)\n- [VQA: Visual Question Answering](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002FVQA-Visual-Question-Answering)\n- [Learning to Generate Reviews and Discovering Sentiment](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F634dbe1aa678188399254bb3d0078e1d)\n- [Seeing the Arrow of Time](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F828d8de0034a350d97738bbedadc9373)\n- [End-to-end optimization of goal-driven and visually grounded dialogue systems](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbbbc739e6815ab6217e0cf0a8f706786)\n- [GuessWhat?! Visual object discovery through multi-modal dialogue](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2418238e6aefd7b1e8c922cda9e10488)\n- [Semantic Parsing via Paraphrasing](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F93c96d7dd0488d0d00bd7078889dd6f6)\n- [Traversing Knowledge Graphs in Vector Space](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe8e6213906ec2642f27b1aca3a6201c6)\n- [PPDB: The Paraphrase Database](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ffa1f387f084355dfafdf7550b1899af6)\n- [NewsQA: A Machine Comprehension Dataset](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc47f0d5c1dfe60ce5da0dd8241e506ea)\n- [A Persona-Based Neural Conversation Model](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F8ad464e7d0ea4c7c6ed5189ac4e44095)\n- [“Why Should I Trust You?” Explaining the Predictions of Any Classifier](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbd744ab6c17a2289ca139ea586d1d65e)\n- [Conditional Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5d726334de3014defeeb701099a3b4b3)\n- [Addressing the Rare Word Problem in Neural Machine Translation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa18fe14b74c7292129c6c5ecb37f33b5)\n- [Achieving Open Vocabulary Neural Machine Translation with Hybrid Word-Character Models](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd32e665b27696ce0436c79174a136410)\n- [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6ca136088f58d24f7b08056ec8b97595)\n- [Improving Word Representations via Global Context and Multiple Word Prototypes](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1be86a9bcbd7f120ce55994dcd932bbf)\n- [Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9dccec626e68e495fd4577ecdca36b7b)\n- [Skip-Thought Vectors](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F4a4eb32de8cabf21bda9a4ada15c46e8)\n- [Deep Convolutional Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Faa79796c70565e3761e86d0f932a3de5)\n- [Generative Adversarial Nets](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1f9dc0444142be8bd8a7404a226880eb)\n- [A Roadmap towards Machine Intelligence](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9928673525b1713c2d41fd0fac38f81f)\n- [Smart Reply: Automated Response Suggestion for Email](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fda411f15b71ed6a664f9d5ac46409b42)\n- [Convolutional Neural Network For Sentence Classification](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F9ae6d2364c278c97b1b2f4ec53255c56)\n- [Conditional Image Generation with PixelCNN Decoders](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F3cc7066ce7de051d769908b8fab11990)\n- [Pixel Recurrent Neural Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe741ebd5ba0e0fc0f49d7836e30891a7)\n- [Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ff48da7f77418aa22751ffed115779126)\n- [Bag of Tricks for Efficient Text Classification](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F432746f15889f7f4a798bf7f9ec4b7d8)\n- [GloVe: Global Vectors for Word Representation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fefea5a42d17e0fcf18374df8e3e4b3e8)\n- [SimRank: A Measure of Structural-Context Similarity](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6329486212643fd61f58a5a3eb5abb3c)\n- [How NOT To Evaluate Your Dialogue System: An Empirical Study of Unsupervised Evaluation Metrics for Dialogue Response Generation](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Ff05748b6339ceff26420ceecfc79d58d)\n- [Neural Generation of Regular Expressions from Natural Language with Minimal Domain Knowledge](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F004d803bc021f579d4aa3b24cec5b994)\n- [WikiReading : A Novel Large-scale Language Understanding Task over Wikipedia](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2788ac9dbcac5523cb8b2d0a3d70f2d2)\n- [WikiQA: A challenge dataset for open-domain question answering](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F7cf3677ff2b0028a33e6702fbd260bc5)\n- [Teaching Machines to Read and Comprehend](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa863eb099bb7a1ab4831cd37bffffb04)\n- [Evaluating Prerequisite Qualities for Learning End-to-end Dialog Systems](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5e7c40f61c18502eec2809e5cf1ead6b)\n- [Recurrent Neural Network Regularization](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd66245692b276cd0b6dcbaf43e4211db)\n- [Deep Math: Deep Sequence Models for Premise Selection](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fd8387256f2bb08f39509600f9d7db498)\n- [A Neural Conversational Model](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fec6835964df0e49fdef0459c8b334b94)\n- [Key-Value Memory Networks for Directly Reading Documents](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa5e0baa075b4a917c0a69edc575772a8)\n- [Advances In Optimizing Recurrent Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F75dc31e3c7999ad4a1edf4f289deaa88)\n- [Query Regression Networks for Machine Comprehension](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F93caa283af3c151372f4be86ed4c4b99)\n- [Sequence to Sequence Learning with Neural Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fa2915921d7d0ac5cfd0e379025acfb9f)\n- [The Difficulty of Training Deep Architectures and the Effect of Unsupervised Pre-Training](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fe3608ccf262d6e5a6b537128c917c92https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fbbbc739e6815ab6217e0cf0a8f706786c)\n- [Question Answering with Subgraph Embeddings](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb65e299ff5f79a4f9da4a2e9281a0676)\n- [Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F12691b76addf149a224c24ab64b5bdcc)\n- [Visualizing Large-scale and High-dimensional Data](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F6c267cf6122399e9be36491a2f510641)\n- [Visualizing Data using t-SNE](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F2153e01d026712ac94a2b4928a2dbf3e)\n- [Curriculum Learning](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F7e4e1c9817c46e3cb1932f62aac8806b)\n- [End-To-End Memory Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F17881da05d9ee1f6539b2baa8067a6ef)\n- [Memory Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc7a03a47b3d709e7c592fa7011b0f33e)\n- [Learning To Execute](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb44b29b86cdfe1b6bae4286253f76350)\n- [Distributed GraphLab: A Framework for Machine Learning and Data Mining in the Cloud](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1bb05a7134c27cffa1e2f57dc6b1c136)\n- [Large Scale Distributed Deep Networks](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5733fffe6b1a268998bd93f29ec9fbeb)\n- [Efficient Estimation of Word Representations in Vector Space](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F176a283e2c158a75a0a6)\n- [Regularization and variable selection via the elastic net](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1cd5d136c8ca30432de5)\n- [Fractional Max-Pooling](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fccfe3134f46fd3738aa0)\n- [TAO: Facebook’s Distributed Data Store for the Social Graph](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F1c91987c2a4a098fa9f1)\n- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F4441216a298df0fe6ab0)\n- [The Unified Logging Infrastructure for Data Analytics at Twitter](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F0083f8a2d276e026b15c)\n- [A Few Useful Things to Know about Machine Learning](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002F5c2cdfc269bf8aa50b72)\n- [Hive – A Petabyte Scale Data Warehouse Using Hadoop](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fb0651ade0dc39aeb7cfd)\n- [Kafka: a Distributed Messaging System for Log Processing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fnotes-about-kafka-cc6c1b5c5025)\n- [Power-law distributions in Empirical data](https:\u002F\u002Fgithub.com\u002Fshagunsodhani\u002Fpowerlaw\u002Fblob\u002Fmaster\u002Fpaper\u002FREADME.md)\n- [Pregel: A System for Large-Scale Graph Processing](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Faf9677bdc79bb34be698)\n- [GraphX: Unifying Data-Parallel and Graph-Parallel Analytics](https:\u002F\u002Fgist.github.com\u002Fshagunsodhani\u002Fc72bc1928aeef40280c9)\n- [Pig Latin: A Not-So-Foreign Language for Data Processing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fpig-latin-e840ac23db93)\n- [Resilient Distributed Datasets: A Fault-Tolerant Abstraction for In-Memory Cluster Computing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fresilient-distributed-datasets-97c28c3a9411)\n- [MapReduce: Simplified Data Processing on Large Clusters](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fmapreduce-1c88f8a7c3d2)\n- [BigTable: A Distributed Storage System for Structured Data](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fbigtable-bf580262f030)\n- [Spark SQL: Relational Data Processing in Spark](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fspark-sql-68a6fac271fe)\n- [Spark: Cluster Computing with Working Sets](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fspark-8ca626d55d21)\n- [Fast Data in the Era of Big Data: Twitter’s Real-Time Related Query Suggestion Architecture](https:\u002F\u002Fmedium.com\u002F@shagun\u002Ffast-data-in-the-era-of-big-data-e6208e6d3575)\n- [Scaling Memcache at Facebook](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fscaling-memcache-at-facebook-1ba77d71c082)\n- [Dynamo: Amazon’s Highly Available Key-value Store](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fdynamo-9665c22a1ddb)\n- [f4 : Facebook's Warm BLOB Storage System](https:\u002F\u002Fmedium.com\u002F@shagun\u002Ff4-cba2f141cb0c)\n- [A Theoretician’s Guide to the Experimental Analysis of Algorithms](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fdos-and-dont-s-of-research-fe33322c7aff)\n- [Cuckoo Hashing](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fcuckoo-hashing-eb160dfab804)\n- [Never Ending Learning](https:\u002F\u002Fmedium.com\u002F@shagun\u002Fnever-ending-learning-e7b78006e713)","# papers-I-read 快速上手指南\n\n`papers-I-read` 并非一个需要安装运行的软件工具或代码库，而是一个由 Shagun Sodhani 维护的**学术论文阅读清单与笔记仓库**。该项目记录了作者“每周一篇论文”计划中的研究成果、摘要及个人笔记，涵盖深度学习、强化学习、系统设计及持续学习等前沿领域。\n\n因此，本指南将指导您如何直接访问、浏览及利用该资源进行学习，无需执行传统的安装步骤。\n\n## 环境准备\n\n由于本项目本质为静态文档集合，您仅需具备以下基础环境即可开始阅读：\n\n*   **操作系统**：任意支持现代浏览器的系统（Windows, macOS, Linux）。\n*   **前置依赖**：\n    *   现代网页浏览器（推荐 Chrome, Edge, Firefox）。\n    *   （可选）Git：如果您希望克隆仓库到本地进行离线阅读或贡献。\n*   **网络环境**：\n    *   项目托管于个人域名 (`shagunsodhani.com`) 及 GitHub。\n    *   **国内访问建议**：若直接访问论文链接速度较慢，建议通过 **GitHub 镜像站**（如 `ghproxy.com`）加速克隆仓库，或使用学术资源聚合平台搜索对应论文标题获取原文。\n\n## 安装\u002F获取步骤\n\n您有两种方式获取这些内容：在线直接阅读或克隆到本地。\n\n### 方式一：在线直接阅读（推荐）\n\n直接访问项目维护的个人网站，这是体验最佳的方式，页面已针对阅读优化。\n\n1.  打开浏览器。\n2.  访问项目主页：[https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002F](https:\u002F\u002Fshagunsodhani.com\u002Fpapers-I-read\u002F)\n3.  在列表中点击感兴趣的论文标题（例如 *Toolformer* 或 *GPipe*），即可跳转至对应的详细笔记页面。\n\n### 方式二：本地克隆（适合离线查阅）\n\n如果您希望将笔记保存到本地，可以使用 Git 克隆仓库。\n\n```bash\n# 使用标准 Git 命令克隆\ngit clone https:\u002F\u002Fgithub.com\u002Fshagunsodhani\u002Fpapers-I-read.git\n\n# 【国内加速方案】如果直接克隆速度慢，可使用 ghproxy 加速\ngit clone https:\u002F\u002Fghproxy.com\u002Fhttps:\u002F\u002Fgithub.com\u002Fshagunsodhani\u002Fpapers-I-read.git\n```\n\n克隆完成后，进入目录查看文件结构（通常为 Markdown 文件或重定向配置）：\n\n```bash\ncd papers-I-read\nls\n```\n\n## 基本使用\n\n本项目的核心用法是**按需检索与学习**。以下是具体的使用流程示例：\n\n### 1. 浏览论文列表\n在项目主页或本地 `README.md` 中，您会看到按主题分类的论文列表。例如，若您关注**大模型工具使用**，可寻找：\n*   *Toolformer - Language Models Can Teach Themselves to Use Tools*\n\n若您关注**分布式系统**，可寻找：\n*   *Cassandra - a decentralized structured storage system*\n*   *CAP twelve years later - How the rules have changed*\n\n### 2. 阅读深度笔记\n点击链接进入具体页面后，您将获得以下内容：\n*   **论文原文链接**：直接跳转至 arXiv 或会议官网。\n*   **核心摘要**：作者用通俗语言总结的论文核心思想。\n*   **关键洞察**：关于模型架构、训练技巧（如 *GradNorm*, *Mixup*）或数学推导的个人注解。\n*   **代码实现线索**：部分笔记会提及相关的开源实现或关键算法逻辑。\n\n### 3. 搜索特定技术点\n如果您在本地克隆了仓库，可以使用命令行快速搜索特定技术关键词的笔记。例如，查找所有关于“持续学习 (Continual Learning)\"的笔记：\n\n```bash\n# 在本地仓库中搜索包含 \"Continual Learning\" 的文件\ngrep -r \"Continual Learning\" .\n```\n\n或者搜索特定的算法名称，如 \"MAML\"：\n\n```bash\ngrep -r \"MAML\" .\n```\n\n这将帮助您快速定位到 *How to train your MAML* 或 *Rapid Learning or Feature Reuse?* 等相关笔记进行深入研读。","某大厂推荐算法团队的资深工程师正在为新一代视频推荐系统寻找能够平衡大规模训练效率与解决“灾难性遗忘”问题的前沿方案。\n\n### 没有 papers-I-read 时\n- **信息检索低效**：需要在 arXiv、Google Scholar 等多个平台反复搜索\"Continual Learning\"或\"Pipeline Parallelism\"等关键词，耗费数天筛选高价值论文。\n- **核心观点难提炼**：面对《GPipe》或《Anatomy of Catastrophic Forgetting》等长篇幅技术文档，难以快速抓住其针对分布式训练或模型遗忘的具体优化策略。\n- **知识体系碎片化**：读过的论文笔记散落在个人博客、本地文档和书签中，无法将《Deep Neural Networks for YouTube Recommendations》与最新的迁移学习理论建立关联。\n- **落地参考缺失**：缺乏像《Practical Lessons from Predicting Clicks on Ads at Facebook》这样包含工业界实战教训的整理，导致方案设计容易重蹈覆辙。\n\n### 使用 papers-I-read 后\n- **精准直达主题**：直接通过目录定位到《GPipe - Easy Scaling with Micro-Batch Pipeline Parallelism》，立即获取微批次流水线并行的核心实现思路。\n- **摘要辅助决策**：利用仓库中提供的《Remembering for the Right Reasons》等论文的精选摘要，快速理解如何利用解释性机制减少灾难性遗忘，节省 80% 的阅读时间。\n- **系统化知识串联**：借助作者对《Toolformer》到《HyperNetworks》等一系列文章的连续整理，自然构建起从工具使用到持续学习的完整技术演进视图。\n- **避坑指南现成**：直接参考《Searching for Build Debt》和《CAP twelve years later》中的实战经验，在架构设计阶段就规避了分布式一致性和技术债务管理的常见陷阱。\n\npapers-I-read 将分散的学术孤岛转化为结构化的工业界实战地图，让工程师能从“大海捞针”转变为“站在巨人肩膀上”进行高效创新。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fshagunsodhani_papers-I-read_450eda90.png","shagunsodhani","Shagun Sodhani","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fshagunsodhani_8aea818f.jpg","@FacebookResearch. Previously \r\n@mila-iqia, @MicrosoftResearch, @AdobeResearch, @IITRoorkee, @gradientpub.","@facebook","Virtual",null,"https:\u002F\u002Fshagunsodhani.github.io","https:\u002F\u002Fgithub.com\u002Fshagunsodhani",[82,86,90,94,98],{"name":83,"color":84,"percentage":85},"HTML","#e34c26",77.1,{"name":87,"color":88,"percentage":89},"Less","#1d365d",10.8,{"name":91,"color":92,"percentage":93},"SCSS","#c6538c",5.5,{"name":95,"color":96,"percentage":97},"Shell","#89e051",3.3,{"name":99,"color":100,"percentage":97},"CSS","#663399",957,80,"2026-04-18T19:57:24",1,"","未说明",{"notes":108,"python":106,"dependencies":109},"该仓库并非可执行的 AI 工具或代码库，而是一个个人阅读论文列表及摘要的文档集合。README 中列出的均为学术论文标题和链接，不包含任何源代码、安装脚本、环境配置文件（如 requirements.txt）或运行指令，因此无需特定的操作系统、GPU、内存或 Python 环境即可浏览内容。",[],[14,111],"其他",[113,114,115,116,117,118,119],"artificial-intelligence","machine-learning","deep-learning","neural-network","analytics","research-paper","computer-science","2026-03-27T02:49:30.150509","2026-04-20T22:49:42.145722",[],[]]