[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-nfmcclure--tensorflow_cookbook":3,"tool-nfmcclure--tensorflow_cookbook":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",152630,2,"2026-04-12T23:33:54",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":78,"owner_email":76,"owner_twitter":76,"owner_website":79,"owner_url":80,"languages":81,"stars":90,"forks":91,"last_commit_at":92,"license":93,"difficulty_score":32,"env_os":94,"env_gpu":95,"env_ram":95,"env_deps":96,"category_tags":100,"github_topics":101,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":118,"updated_at":119,"faqs":120,"releases":146},7099,"nfmcclure\u002Ftensorflow_cookbook","tensorflow_cookbook","Code for Tensorflow Machine Learning Cookbook","tensorflow_cookbook 是经典著作《TensorFlow Machine Learning Cookbook》的官方配套代码库，由 Nick McClure 编写。它旨在通过一系列循序渐进的实战案例，帮助开发者快速掌握 TensorFlow 框架的核心用法。\n\n面对机器学习算法理论复杂、框架上手难度高的问题，该项目将抽象概念转化为可运行的代码片段。内容覆盖从基础的张量操作、变量管理，到线性回归、支持向量机等传统算法，再到神经网络、自然语言处理（NLP）、卷积神经网络（CNN）及循环神经网络（RNN）等深度学习前沿领域，甚至包含模型生产化部署的指导。其独特的“食谱”式结构，让用户能像查阅菜谱一样，针对特定任务直接找到对应的实现方案，并直观理解计算图在 TensorBoard 中的呈现方式。\n\n这套资源非常适合具有一定编程基础的开发者、数据科学家以及希望深入实践 TensorFlow 的研究人员。无论是初学者想要系统构建知识体系，还是资深工程师需要查找特定算法的实现参考，tensorflow_cookbook 都能提供清晰、实用的代码指引，是学习与应用 TensorFlow","tensorflow_cookbook 是经典著作《TensorFlow Machine Learning Cookbook》的官方配套代码库，由 Nick McClure 编写。它旨在通过一系列循序渐进的实战案例，帮助开发者快速掌握 TensorFlow 框架的核心用法。\n\n面对机器学习算法理论复杂、框架上手难度高的问题，该项目将抽象概念转化为可运行的代码片段。内容覆盖从基础的张量操作、变量管理，到线性回归、支持向量机等传统算法，再到神经网络、自然语言处理（NLP）、卷积神经网络（CNN）及循环神经网络（RNN）等深度学习前沿领域，甚至包含模型生产化部署的指导。其独特的“食谱”式结构，让用户能像查阅菜谱一样，针对特定任务直接找到对应的实现方案，并直观理解计算图在 TensorBoard 中的呈现方式。\n\n这套资源非常适合具有一定编程基础的开发者、数据科学家以及希望深入实践 TensorFlow 的研究人员。无论是初学者想要系统构建知识体系，还是资深工程师需要查找特定算法的实现参考，tensorflow_cookbook 都能提供清晰、实用的代码指引，是学习与应用 TensorFlow 不可或缺的实用工具集。","\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4ff856cf3ddf.jpg\" data-canonical-src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4ff856cf3ddf.jpg\" width=\"400\" height=\"250\" \u002F>\n\n# [TensorFlow Machine Learning Cookbook](https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Ftensorflow-machine-learning-cookbook)\n\n## [A Packt Publishing Book](https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Ftensorflow-machine-learning-cookbook)\n\n### By Nick McClure\n\n=================\n\nBuild: [![Build Status](https:\u002F\u002Ftravis-ci.org\u002Fnfmcclure\u002Ftensorflow_cookbook.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fnfmcclure\u002Ftensorflow_cookbook)\n\n=================\n\n\nTable of Contents\n=================\n\n  * [Ch 1: Getting Started with TensorFlow](#ch-1-getting-started-with-tensorflow)\n  * [Ch 2: The TensorFlow Way](#ch-2-the-tensorflow-way)\n  * [Ch 3: Linear Regression](#ch-3-linear-regression)\n  * [Ch 4: Support Vector Machines](#ch-4-support-vector-machines)\n  * [Ch 5: Nearest Neighbor Methods](#ch-5-nearest-neighbor-methods)\n  * [Ch 6: Neural Networks](#ch-6-neural-networks)\n  * [Ch 7: Natural Language Processing](#ch-7-natural-language-processing)\n  * [Ch 8: Convolutional Neural Networks](#ch-8-convolutional-neural-networks)\n  * [Ch 9: Recurrent Neural Networks](#ch-9-recurrent-neural-networks)\n  * [Ch 10: Taking TensorFlow to Production](#ch-10-taking-tensorflow-to-production)\n  * [Ch 11: More with TensorFlow](#ch-11-more-with-tensorflow)\n\n---\n\n## [Ch 1: Getting Started with TensorFlow](01_Introduction#ch-1-getting-started-with-tensorflow)\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F01_How_TensorFlow_Works#introduction-to-how-tensorflow-graphs-work\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d93bd04b2da6.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F02_Creating_and_Using_Tensors#creating-and-using-tensors\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_cd474844625d.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F03_Using_Variables_and_Placeholders#variables-and-placeholders\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d55adf1793b4.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_bc45c8ab34fc.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_aec02189b19a.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nThis chapter intends to introduce the main objects and concepts in TensorFlow.  We also introduce how to access the data for the rest of the book and provide additional resources for learning about TensorFlow.\n\n 1. [General Outline of TF Algorithms](01_Introduction\u002F01_How_TensorFlow_Works#introduction-to-how-tensorflow-graphs-work)\n  * Here we introduce TensorFlow and the general outline of how most TensorFlow algorithms work.\n 2. [Creating and Using Tensors](01_Introduction\u002F02_Creating_and_Using_Tensors#creating-and-using-tensors)\n  * How to create and initialize tensors in TensorFlow.  We also depict how these operations appear in Tensorboard.\n 3. [Using Variables and Placeholders](01_Introduction\u002F03_Using_Variables_and_Placeholders#variables-and-placeholders)\n  * How to create and use variables and placeholders in TensorFlow.  We also depict how these operations appear in Tensorboard.\n 4. [Working with Matrices](01_Introduction\u002F04_Working_with_Matrices#working-with-matrices)\n  * Understanding how TensorFlow can work with matrices is crucial to understanding how the algorithms work.\n 5. [Declaring Operations](01_Introduction\u002F05_Declaring_Operations#declaring-operations)\n  * How to use various mathematical operations in TensorFlow.\n 6. [Implementing Activation Functions](01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions)\n  * Activation functions are unique functions that TensorFlow has built in for your use in algorithms.\n 7. [Working with Data Sources](01_Introduction\u002F07_Working_with_Data_Sources#data-source-information)\n  * Here we show how to access all the various required data sources in the book.  There are also links describing the data sources and where they come from.\n 8. [Additional Resources](01_Introduction\u002F08_Additional_Resources#additional-resources)\n  * Mostly official resources and papers.  The papers are TensorFlow papers or Deep Learning resources.\n\n## [Ch 2: The TensorFlow Way](02_TensorFlow_Way#ch-2-the-tensorflow-way)\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F01_Operations_as_a_Computational_Graph#operations-as-a-computational-graph\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b23b62a67c21.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F02_Layering_Nested_Operations#multiple-operations-on-a-computational-graph\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_476027bb36b4.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F03_Working_with_Multiple_Layers#working-with-multiple-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_550e69c0159e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F04_Implementing_Loss_Functions#implementing-loss-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c39072b9df66.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F05_Implementing_Back_Propagation#implementing-back-propagation\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_93c575efeb3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F06_Working_with_Batch_and_Stochastic_Training#working-with-batch-and-stochastic-training\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d8bdb267dc03.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F07_Combining_Everything_Together#combining-everything-together\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_fc50b8095667.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F08_Evaluating_Models#evaluating-models\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ac0488f32e3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nAfter we have established the basic objects and methods in TensorFlow, we now want to establish the components that make up TensorFlow algorithms.  We start by introducing computational graphs, and then move to loss functions and back propagation.  We end with creating a simple classifier and then show an example of evaluating regression and classification algorithms.\n\n 1. [One Operation as a Computational Graph](02_TensorFlow_Way\u002F01_Operations_as_a_Computational_Graph#operations-as-a-computational-graph)\n  * We show how to create an operation on a computational graph and how to visualize it using Tensorboard.\n 2. [Layering Nested Operations](02_TensorFlow_Way\u002F02_Layering_Nested_Operations#multiple-operations-on-a-computational-graph)\n  * We show how to create multiple operations on a computational graph and how to visualize them using Tensorboard.\n 3. [Working with Multiple Layers](02_TensorFlow_Way\u002F03_Working_with_Multiple_Layers#working-with-multiple-layers)\n  * Here we extend the usage of the computational graph to create multiple layers and show how they appear in Tensorboard.\n 4. [Implementing Loss Functions](02_TensorFlow_Way\u002F04_Implementing_Loss_Functions#implementing-loss-functions)\n  * In order to train a model, we must be able to evaluate how well it is doing. This is given by loss functions. We plot various loss functions and talk about the benefits and limitations of some.\n 5. [Implementing Back Propagation](02_TensorFlow_Way\u002F05_Implementing_Back_Propagation#implementing-back-propagation)\n  * Here we show how to use loss functions to iterate through data and back propagate errors for regression and classification.\n 6. [Working with Stochastic and Batch Training](02_TensorFlow_Way\u002F06_Working_with_Batch_and_Stochastic_Training#working-with-batch-and-stochastic-training)\n  * TensorFlow makes it easy to use both batch and stochastic training. We show how to implement both and talk about the benefits and limitations of each.\n 7. [Combining Everything Together](02_TensorFlow_Way\u002F07_Combining_Everything_Together#combining-everything-together)\n  * We now combine everything together that we have learned and create a simple classifier.\n 8. [Evaluating Models](02_TensorFlow_Way\u002F08_Evaluating_Models#evaluating-models)\n  * Any model is only as good as it's evaluation.  Here we show two examples of (1) evaluating a regression algorithm and (2) a classification algorithm.\n\n## [Ch 3: Linear Regression](03_Linear_Regression#ch-3-linear-regression)\n\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F01_Using_the_Matrix_Inverse_Method#using-the-matrix-inverse-method\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_909eca9e639d.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F02_Implementing_a_Decomposition_Method#using-the-cholesky-decomposition-method\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4e5173a23c07.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F03_TensorFlow_Way_of_Linear_Regression#learning-the-tensorflow-way-of-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_972935e5b857.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F04_Loss_Functions_in_Linear_Regressions#loss-functions-in-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_0fcc0663e170.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F05_Implementing_Deming_Regression#implementing-deming-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c94c3c1de194.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F06_Implementing_Lasso_and_Ridge_Regression#implementing-lasso-and-ridge-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_25d029b57793.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F07_Implementing_Elasticnet_Regression#implementing-elasticnet-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_25d029b57793.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F08_Implementing_Logistic_Regression#implementing-logistic-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4bd6d245a823.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nHere we show how to implement various linear regression techniques in TensorFlow.  The first two sections show how to do standard matrix linear regression solving in TensorFlow.  The remaining six sections depict how to implement various types of regression using computational graphs in TensorFlow.\n\n 1. [Using the Matrix Inverse Method](03_Linear_Regression\u002F01_Using_the_Matrix_Inverse_Method#using-the-matrix-inverse-method)\n  * How to solve a 2D regression with a matrix inverse in TensorFlow.\n 2. [Implementing a Decomposition Method](03_Linear_Regression\u002F02_Implementing_a_Decomposition_Method#using-the-cholesky-decomposition-method)\n  * Solving a 2D linear regression with Cholesky decomposition.\n 3. [Learning the TensorFlow Way of Linear Regression](03_Linear_Regression\u002F03_TensorFlow_Way_of_Linear_Regression#learning-the-tensorflow-way-of-regression)\n  * Linear regression iterating through a computational graph with L2 Loss.\n 4. [Understanding Loss Functions in Linear Regression](03_Linear_Regression\u002F04_Loss_Functions_in_Linear_Regressions#loss-functions-in-linear-regression)\n  * L2 vs L1 loss in linear regression.  We talk about the benefits and limitations of both.\n 5. [Implementing Deming Regression (Total Regression)](03_Linear_Regression\u002F05_Implementing_Deming_Regression#implementing-deming-regression)\n  * Deming (total) regression implemented in TensorFlow by changing the loss function.\n 6. [Implementing Lasso and Ridge Regression](03_Linear_Regression\u002F06_Implementing_Lasso_and_Ridge_Regression#implementing-lasso-and-ridge-regression)\n  * Lasso and Ridge regression are ways of regularizing the coefficients. We implement both of these in TensorFlow via changing the loss functions.\n 7. [Implementing Elastic Net Regression](03_Linear_Regression\u002F07_Implementing_Elasticnet_Regression#implementing-elasticnet-regression)\n  * Elastic net is a regularization technique that combines the L2 and L1 loss for coefficients.  We show how to implement this in TensorFlow.\n 8. [Implementing Logistic Regression](03_Linear_Regression\u002F08_Implementing_Logistic_Regression#implementing-logistic-regression)\n  * We implement logistic regression by the use of an activation function in our computational graph.\n\n## [Ch 4: Support Vector Machines](04_Support_Vector_Machines#ch-4-support-vector-machines)\n\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F01_Introduction#support-vector-machine-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2c64c0aff2cf.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F02_Working_with_Linear_SVMs#working-with-linear-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_394f2205c4f9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F03_Reduction_to_Linear_Regression#svm-reduction-to-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ae9018a7d62b.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F04_Working_with_Kernels#working-with-kernels\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_216ceceb82c1.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F05_Implementing_Nonlinear_SVMs#implementing-nonlinear-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_deed4e87b434.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F06_Implementing_Multiclass_SVMs#implementing-multiclass-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_a3c842385b79.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nThis chapter shows how to implement various SVM methods with TensorFlow.  We first create a linear SVM and also show how it can be used for regression.  We then introduce kernels (RBF Gaussian kernel) and show how to use it to split up non-linear data. We finish with a multi-dimensional implementation of non-linear SVMs to work with multiple classes.\n\n\n 1. [Introduction](04_Support_Vector_Machines\u002F01_Introduction#support-vector-machine-introduction)\n  * We introduce the concept of SVMs and how we will go about implementing them in the TensorFlow framework.\n 2. [Working with Linear SVMs](04_Support_Vector_Machines\u002F02_Working_with_Linear_SVMs#working-with-linear-svms)\n  * We create a linear SVM to separate I. setosa based on sepal length and pedal width in the Iris data set.\n 3. [Reduction to Linear Regression](04_Support_Vector_Machines\u002F03_Reduction_to_Linear_Regression#svm-reduction-to-linear-regression)\n  * The heart of SVMs is separating classes with a line.  We change tweek the algorithm slightly to perform SVM regression.\n 4. [Working with Kernels in TensorFlow](04_Support_Vector_Machines\u002F04_Working_with_Kernels#working-with-kernels)\n  * In order to extend SVMs into non-linear data, we explain and show how to implement different kernels in TensorFlow.\n 5. [Implementing Non-Linear SVMs](04_Support_Vector_Machines\u002F05_Implementing_Nonlinear_SVMs#implementing-nonlinear-svms)\n  * We use the Gaussian kernel (RBF) to separate non-linear classes.\n 6. [Implementing Multi-class SVMs](04_Support_Vector_Machines\u002F06_Implementing_Multiclass_SVMs#implementing-multiclass-svms)\n  * SVMs are inherently binary predictors.  We show how to extend them in a one-vs-all strategy in TensorFlow.\n\n## [Ch 5: Nearest Neighbor Methods](05_Nearest_Neighbor_Methods#ch-5-nearest-neighbor-methods)\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F01_Introduction#nearest-neighbor-methods-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_eee57850429d.jpg\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F02_Working_with_Nearest_Neighbors#working-with-nearest-neighbors\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_56e27479079f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F03_Working_with_Text_Distances#working-with-text-distances\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F04_Computing_with_Mixed_Distance_Functions#computing-with-mixed-distance-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c796d4709593.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F05_An_Address_Matching_Example#an-address-matching-example\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F06_Nearest_Neighbors_for_Image_Recognition#nearest-neighbors-for-image-recognition\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_475ccc914ffd.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nNearest Neighbor methods are a very popular ML algorithm.  We show how to implement k-Nearest Neighbors, weighted k-Nearest Neighbors, and k-Nearest Neighbors with mixed distance functions.  In this chapter we also show how to use the Levenshtein distance (edit distance) in TensorFlow, and use it to calculate the distance between strings. We end this chapter with showing how to use k-Nearest Neighbors for categorical prediction with the MNIST handwritten digit recognition.\n\n 1. [Introduction](05_Nearest_Neighbor_Methods\u002F01_Introduction#nearest-neighbor-methods-introduction)\n  * We introduce the concepts and methods needed for performing k-Nearest Neighbors in TensorFlow.\n 2. [Working with Nearest Neighbors](05_Nearest_Neighbor_Methods\u002F02_Working_with_Nearest_Neighbors#working-with-nearest-neighbors)\n  * We create a nearest neighbor algorithm that tries to predict housing worth (regression).\n 3. [Working with Text Based Distances](05_Nearest_Neighbor_Methods\u002F03_Working_with_Text_Distances#working-with-text-distances)\n  * In order to use a distance function on text, we show how to use edit distances in TensorFlow.\n 4. [Computing Mixing Distance Functions](05_Nearest_Neighbor_Methods\u002F04_Computing_with_Mixed_Distance_Functions#computing-with-mixed-distance-functions)\n  * Here we implement scaling of the distance function by the standard deviation of the input feature for k-Nearest Neighbors.\n 5. [Using Address Matching](05_Nearest_Neighbor_Methods\u002F05_An_Address_Matching_Example#an-address-matching-example)\n  * We use a mixed distance function to match addresses. We use numerical distance for zip codes, and string edit distance for street names. The street names are allowed to have typos.\n 6. [Using Nearest Neighbors for Image Recognition](05_Nearest_Neighbor_Methods\u002F06_Nearest_Neighbors_for_Image_Recognition#nearest-neighbors-for-image-recognition)\n  * The MNIST digit image collection is a great data set for illustration of how to perform k-Nearest Neighbors for an image classification task.\n\n## [Ch 6: Neural Networks](06_Neural_Networks#ch-6-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F01_Introduction#neural-networks-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F02_Implementing_an_Operational_Gate#implementing-an-operational-gate\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9b72d7489720.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F03_Working_with_Activation_Functions#working-with-activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_aedbb738e3a8.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F04_Single_Hidden_Layer_Network#implementing-a-one-layer-neural-network\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_01c8d4397df1.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F05_Implementing_Different_Layers#implementing-different-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F06_Using_Multiple_Layers#using-multiple-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_dc769b986940.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F07_Improving_Linear_Regression#improving-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_121cf35abd3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F08_Learning_Tic_Tac_Toe#learning-to-play-tic-tac-toe\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_8449e9784ce2.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nNeural Networks are very important in machine learning and growing in popularity due to the major breakthroughs in prior unsolved problems.  We must start with introducing 'shallow' neural networks, which are very powerful and can help us improve our prior ML algorithm results.  We start by introducing the very basic NN unit, the operational gate.  We gradually add more and more to the neural network and end with training a model to play tic-tac-toe.\n\n 1. [Introduction](06_Neural_Networks\u002F01_Introduction#neural-networks-introduction)\n  * We introduce the concept of neural networks and how TensorFlow is built to easily handle these algorithms.\n 2. [Implementing Operational Gates](06_Neural_Networks\u002F02_Implementing_an_Operational_Gate#implementing-an-operational-gate)\n  * We implement an operational gate with one operation. Then we show how to extend this to multiple nested operations.\n 3. [Working with Gates and Activation Functions](06_Neural_Networks\u002F03_Working_with_Activation_Functions#working-with-activation-functions)\n  * Now we have to introduce activation functions on the gates.  We show how different activation functions operate.\n 4. [Implementing a One Layer Neural Network](06_Neural_Networks\u002F04_Single_Hidden_Layer_Network#implementing-a-one-layer-neural-network)\n  * We have all the pieces to start implementing our first neural network.  We do so here with regression on the Iris data set.\n 5. [Implementing Different Layers](06_Neural_Networks\u002F05_Implementing_Different_Layers#implementing-different-layers)\n  * This section introduces the convolution layer and the max-pool layer.  We show how to chain these together in a 1D and 2D example with fully connected layers as well.\n 6. [Using Multi-layer Neural Networks](06_Neural_Networks\u002F06_Using_Multiple_Layers#using-multiple-layers)\n  * Here we show how to functionalize different layers and variables for a cleaner multi-layer neural network.\n 7. [Improving Predictions of Linear Models](06_Neural_Networks\u002F07_Improving_Linear_Regression#improving-linear-regression)\n  * We show how we can improve the convergence of our prior logistic regression with a set of hidden layers.\n 8. [Learning to Play Tic-Tac-Toe](06_Neural_Networks\u002F08_Learning_Tic_Tac_Toe#learning-to-play-tic-tac-toe)\n  * Given a set of tic-tac-toe boards and corresponding optimal moves, we train a neural network classification model to play.  At the end of the script, you can attempt to play against the trained model.\n\n## [Ch 7: Natural Language Processing](07_Natural_Language_Processing#ch-7-natural-language-processing)\n\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F01_Introduction#natural-language-processing-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F02_Working_with_Bag_of_Words#working-with-bag-of-words\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ab53c5827144.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F03_Implementing_tf_idf#implementing-tf-idf\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_e017b413c5cb.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F04_Working_With_Skip_Gram_Embeddings#working-with-skip-gram-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_cf649676da14.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F05_Working_With_CBOW_Embeddings#working-with-cbow-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b567c1b5c2ce.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F06_Using_Word2Vec_Embeddings#using-word2vec-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_15c68eb4bb34.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F07_Sentiment_Analysis_With_Doc2Vec#sentiment-analysis-with-doc2vec\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3cca4b614e14.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nNatural Language Processing (NLP) is a way of processing textual information into numerical summaries, features, or models. In this chapter we will motivate and explain how to best deal with text in TensorFlow.  We show how to implement the classic 'Bag-of-Words' and show that there may be better ways to embed text based on the problem at hand. There are neural network embeddings called Word2Vec (CBOW and Skip-Gram) and Doc2Vec.  We show how to implement all of these in TensorFlow.\n\n 1. [Introduction](07_Natural_Language_Processing\u002F01_Introduction#natural-language-processing-introduction)\n  * We introduce methods for turning text into numerical vectors. We introduce the TensorFlow 'embedding' feature as well.\n 2. [Working with Bag-of-Words](07_Natural_Language_Processing\u002F02_Working_with_Bag_of_Words#working-with-bag-of-words)\n  * Here we use TensorFlow to do a one-hot-encoding of words called bag-of-words.  We use this method and logistic regression to predict if a text message is spam or ham.\n 3. [Implementing TF-IDF](07_Natural_Language_Processing\u002F03_Implementing_tf_idf#implementing-tf-idf)\n  * We implement Text Frequency - Inverse Document Frequency (TFIDF) with a combination of Sci-kit Learn and TensorFlow. We perform logistic regression on TFIDF vectors to improve on our spam\u002Fham text-message predictions.\n 4. [Working with Skip-Gram](07_Natural_Language_Processing\u002F04_Working_With_Skip_Gram_Embeddings#working-with-skip-gram-embeddings)\n  * Our first implementation of Word2Vec called, \"skip-gram\" on a movie review database.\n 5. [Working with CBOW](07_Natural_Language_Processing\u002F05_Working_With_CBOW_Embeddings#working-with-cbow-embeddings)\n  * Next, we implement a form of Word2Vec called, \"CBOW\" (Continuous Bag of Words) on a movie review database.  We also introduce method to saving and loading word embeddings.\n 6. [Implementing Word2Vec Example](07_Natural_Language_Processing\u002F06_Using_Word2Vec_Embeddings#using-word2vec-embeddings)\n  * In this example, we use the prior saved CBOW word embeddings to improve on our TF-IDF logistic regression of movie review sentiment.\n 7. [Performing Sentiment Analysis with Doc2Vec](07_Natural_Language_Processing\u002F07_Sentiment_Analysis_With_Doc2Vec#sentiment-analysis-with-doc2vec)\n  * Here, we introduce a Doc2Vec method (concatenation of doc and word embeddings) to improve out logistic model of movie review sentiment.\n\n## [Ch 8: Convolutional Neural Networks](08_Convolutional_Neural_Networks#ch-8-convolutional-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F01_Intro_to_CNN#introduction-to-convolutional-neural-networks\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_6487260bb981.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F02_Intro_to_CNN_MNIST#introduction-to-cnn-with-mnist\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3d879621e33c.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F03_CNN_CIFAR10#cifar-10-cnn\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_63fcf67b7e7e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F04_Retraining_Current_Architectures#retraining-fine-tuning-current-cnn-architectures\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F05_Stylenet_NeuralStyle#stylenet--neural-style\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2ec3a7e5eb3c.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F06_Deepdream#deepdream-in-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9b9c1a057236.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nConvolutional Neural Networks (CNNs) are ways of getting neural networks to deal with image data. CNN derive their name from the use of a convolutional layer that applies a fixed size filter across a larger image, recognizing a pattern in any part of the image. There are many other tools that they use (max-pooling, dropout, etc...) that we show how to implement with TensorFlow.  We also show how to retrain an existing architecture and take CNNs further with Stylenet and Deep Dream.\n\n 1. [Introduction](08_Convolutional_Neural_Networks\u002F01_Intro_to_CNN#introduction-to-convolutional-neural-networks)\n  * We introduce convolutional neural networks (CNN), and how we can use them in TensorFlow.\n 2. [Implementing a Simple CNN.](08_Convolutional_Neural_Networks\u002F02_Intro_to_CNN_MNIST#introduction-to-cnn-with-mnist)\n  * Here, we show how to create a CNN architecture that performs well on the MNIST digit recognition task.\n 3. [Implementing an Advanced CNN.](08_Convolutional_Neural_Networks\u002F03_CNN_CIFAR10#cifar-10-cnn)\n  * In this example, we show how to replicate an architecture for the CIFAR-10 image recognition task.\n 4. [Retraining an Existing Architecture.](08_Convolutional_Neural_Networks\u002F04_Retraining_Current_Architectures#retraining-fine-tuning-current-cnn-architectures)\n  * We show how to download and setup the CIFAR-10 data for the TensorFlow retraining\u002Ffine-tuning tutorial.\n 5. [Using Stylenet\u002FNeuralStyle.](08_Convolutional_Neural_Networks\u002F05_Stylenet_NeuralStyle#stylenet--neural-style)\n  * In this recipe, we show a basic implementation of using Stylenet or Neuralstyle.\n 6. [Implementing Deep Dream.](08_Convolutional_Neural_Networks\u002F06_Deepdream#deepdream-in-tensorflow)\n  * This script shows a line-by-line explanation of TensorFlow's deepdream tutorial. Taken from [Deepdream on TensorFlow](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftensorflow\u002Ftree\u002Fmaster\u002Ftensorflow\u002Fexamples\u002Ftutorials\u002Fdeepdream). Note that the code here is converted to Python 3.\n\n## [Ch 9: Recurrent Neural Networks](09_Recurrent_Neural_Networks#ch-9-recurrent-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F01_Introduction#introduction-to-rnns-in-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_15650d160470.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F02_Implementing_RNN_for_Spam_Prediction#implementing-an-rnn-for-spam-prediction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b5f475d3fd67.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F03_Implementing_LSTM#implementing-an-lstm-model\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ab41476bee1e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F04_Stacking_Multiple_LSTM_Layers#stacking-multiple-lstm-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_a43cef75d0d9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F05_Creating_A_Sequence_To_Sequence_Model#creating-a-sequence-to-sequence-model-with-tensorflow-seq2seq\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2a8cd982dae5.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F06_Training_A_Siamese_Similarity_Measure#training-a-siamese-similarity-measure-rnns\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3b329c40b6f9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nRecurrent Neural Networks (RNNs) are very similar to regular neural networks except that they allow 'recurrent' connections, or loops that depend on the prior states of the network. This allows RNNs to efficiently deal with sequential data, whereas other types of networks cannot. We then motivate the usage of LSTM (Long Short Term Memory) networks as a way of addressing regular RNN problems. Then we show how easy it is to implement these RNN types in TensorFlow.\n\n 1. [Introduction](09_Recurrent_Neural_Networks\u002F01_Introduction#introduction-to-rnns-in-tensorflow)\n  * We introduce Recurrent Neural Networks and how they are able to feed in a sequence and predict either a fixed target (categorical\u002Fnumerical) or another sequence (sequence to sequence).\n 2. [Implementing an RNN Model for Spam Prediction](09_Recurrent_Neural_Networks\u002F02_Implementing_RNN_for_Spam_Prediction#implementing-an-rnn-for-spam-prediction)\n  * In this example, we create an RNN model to improve on our spam\u002Fham SMS text predictions.\n 3. [Implementing an LSTM Model for Text Generation](09_Recurrent_Neural_Networks\u002F03_Implementing_LSTM#implementing-an-lstm-model)\n  * We show how to implement a LSTM (Long Short Term Memory) RNN for Shakespeare language generation. (Word level vocabulary)\n 4. [Stacking Multiple LSTM Layers](09_Recurrent_Neural_Networks\u002F04_Stacking_Multiple_LSTM_Layers#stacking-multiple-lstm-layers)\n  * We stack multiple LSTM layers to improve on our Shakespeare language generation. (Character level vocabulary)\n 5. [Creating a Sequence to Sequence Translation Model (Seq2Seq)](09_Recurrent_Neural_Networks\u002F05_Creating_A_Sequence_To_Sequence_Model#creating-a-sequence-to-sequence-model-with-tensorflow-seq2seq)\n  * Here, we use TensorFlow's sequence-to-sequence models to train an English-German translation model.\n 6. [Training a Siamese Similarity Measure](09_Recurrent_Neural_Networks\u002F06_Training_A_Siamese_Similarity_Measure#training-a-siamese-similarity-measure-rnns)\n  * Here, we implement a Siamese RNN to predict the similarity of addresses and use it for record matching.  Using RNNs for record matching is very versatile, as we do not have a fixed set of target categories and can use the trained model to predict similarities across new addresses.\n\n## [Ch 10: Taking TensorFlow to Production](10_Taking_TensorFlow_to_Production#ch-10-taking-tensorflow-to-production)\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F01_Implementing_Unit_Tests#implementing-unit-tests\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F02_Using_Multiple_Devices#using-multiple-devices\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F03_Parallelizing_TensorFlow#parallelizing-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F04_Production_Tips#production-tips-with-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F05_Production_Example#a-production-example\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nOf course there is more to TensorFlow than just creating and fitting machine learning models.  Once we have a model that we want to use, we have to move it towards production usage.  This chapter will provide tips and examples of implementing unit tests, using multiple processors, using multiple machines (TensorFlow distributed), and finish with a full production example.\n\n 1. [Implementing Unit Tests](10_Taking_TensorFlow_to_Production\u002F01_Implementing_Unit_Tests#implementing-unit-tests)\n  * We show how to implement different types of unit tests on tensors (placeholders and variables).\n 2. [Using Multiple Executors (Devices)](10_Taking_TensorFlow_to_Production\u002F02_Using_Multiple_Devices#using-multiple-devices)\n  * How to use a machine with multiple devices.  E.g., a machine with a CPU, and one or more GPUs.\n 3. [Parallelizing TensorFlow](10_Taking_TensorFlow_to_Production\u002F03_Parallelizing_TensorFlow#parallelizing-tensorflow)\n  * How to setup and use TensorFlow distributed on multiple machines.\n 4. [Tips for TensorFlow in Production](10_Taking_TensorFlow_to_Production\u002F04_Production_Tips#production-tips-with-tensorflow)\n  * Various tips for developing with TensorFlow\n 5. [An Example of Productionalizing TensorFlow](10_Taking_TensorFlow_to_Production\u002F05_Production_Example#a-production-example)\n  * We show how to do take the RNN model for predicting ham\u002Fspam (from Chapter 9, recipe #2) and put it in two production level files: training and evaluation.\n\n## [Ch 11: More with TensorFlow](11_More_with_TensorFlow#ch-11-more-with-tensorflow)\n\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F01_Visualizing_Computational_Graphs#visualizing-computational-graphs-wtensorboard\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ffca4a7e7390.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F02_Working_with_a_Genetic_Algorithm\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9d112355a705.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F03_Clustering_Using_KMeans#clustering-using-k-means\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_e0fb76772b50.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F04_Solving_A_System_of_ODEs#solving-a-system-of-odes\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_5abae85728fc.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\nTo illustrate how versatile TensorFlow is, we will show additional examples in this chapter. We start with showing how to use the logging\u002Fvisualizing tool Tensorboard.  Then we illustrate how to do k-means clustering, use a genetic algorithm, and solve a system of ODEs.\n\n 1. [Visualizing Computational Graphs (with Tensorboard)](11_More_with_TensorFlow\u002F01_Visualizing_Computational_Graphs#visualizing-computational-graphs-wtensorboard)\n  * An example of using histograms, scalar summaries, and creating images in Tensorboard.\n 2. [Working with a Genetic Algorithm](11_More_with_TensorFlow\u002F02_Working_with_a_Genetic_Algorithm#working-with-a-genetic-algorithm)\n  * We create a genetic algorithm to optimize an individual (array of 50 numbers) toward the ground truth function.\n 3. [Clustering Using K-means](11_More_with_TensorFlow\u002F03_Clustering_Using_KMeans#clustering-using-k-means)\n  * How to use TensorFlow to do k-means clustering.  We use the Iris data set, set k=3, and use k-means to make predictions.\n 4. [Solving a System of ODEs](11_More_with_TensorFlow\u002F04_Solving_A_System_of_ODEs#solving-a-system-of-odes)\n  * Here, we show how to use TensorFlow to solve a system of ODEs.  The system of concern is the Lotka-Volterra predator-prey system.\n 5. [Using a Random Forest](11_More_with_TensorFlow\u002F05_Using_a_Random_Forest#using-a-random-forest)\n  * We illustrate how to use TensorFlow's gradient boosted regression and classification trees.\n 6. [Using TensorFlow with Keras](11_More_with_TensorFlow\u002F06_Using_TensorFlow_with_Keras#using-tensorflow-with-keras)\n  * Here we show how to use the Keras sequential model building for a fully connected neural network and a CNN model with callbacks.","\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4ff856cf3ddf.jpg\" data-canonical-src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4ff856cf3ddf.jpg\" width=\"400\" height=\"250\" \u002F>\n\n# 【TensorFlow机器学习 Cookbook】(https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Ftensorflow-machine-learning-cookbook)\n\n## 【Packt 出版社图书】(https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Ftensorflow-machine-learning-cookbook)\n\n### 作者：尼克·麦克劳尔\n\n=================\n\n构建状态：[![构建状态](https:\u002F\u002Ftravis-ci.org\u002Fnfmcclure\u002Ftensorflow_cookbook.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fnfmcclure\u002Ftensorflow_cookbook)\n\n=================\n\n\n目录\n=================\n\n  * [第1章：TensorFlow 入门](#ch-1-getting-started-with-tensorflow)\n  * [第2章：TensorFlow 的工作方式](#ch-2-the-tensorflow-way)\n  * [第3章：线性回归](#ch-3-linear-regression)\n  * [第4章：支持向量机](#ch-4-support-vector-machines)\n  * [第5章：最近邻方法](#ch-5-nearest-neighbor-methods)\n  * [第6章：神经网络](#ch-6-neural-networks)\n  * [第7章：自然语言处理](#ch-7-natural-language-processing)\n  * [第8章：卷积神经网络](#ch-8-convolutional-neural-networks)\n  * [第9章：循环神经网络](#ch-9-recurrent-neural-networks)\n  * [第10章：将 TensorFlow 投入生产](#ch-10-taking-tensorflow-to-production)\n  * [第11章：更多关于 TensorFlow 的内容](#ch-11-more-with-tensorflow)\n\n---\n\n## [第1章：TensorFlow 入门](01_Introduction#ch-1-getting-started-with-tensorflow)\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F01_How_TensorFlow_Works#introduction-to-how-tensorflow-graphs-work\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d93bd04b2da6.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F02_Creating_and_Using_Tensors#creating-and-using-tensors\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_cd474844625d.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F03_Using_Variables_and_Placeholders#variables-and-placeholders\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d55adf1793b4.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_bc45c8ab34fc.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_aec02189b19a.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n本章旨在介绍 TensorFlow 中的主要对象和概念。我们还将介绍如何获取本书后续章节所需的数据，并提供进一步学习 TensorFlow 的资源。\n\n 1. [TF 算法的总体概述](01_Introduction\u002F01_How_TensorFlow_Works#introduction-to-how-tensorflow-graphs-work)\n  * 在这里我们介绍了 TensorFlow，以及大多数 TensorFlow 算法的基本工作原理。\n 2. [创建和使用张量](01_Introduction\u002F02_Creating_and_Using_Tensors#creating-and-using-tensors)\n  * 如何在 TensorFlow 中创建和初始化张量。我们还展示了这些操作在 TensorBoard 中的呈现方式。\n 3. [使用变量和占位符](01_Introduction\u002F03_Using_Variables_and_Placeholders#variables-and-placeholders)\n  * 如何在 TensorFlow 中创建并使用变量和占位符。我们同样展示了这些操作在 TensorBoard 上的显示效果。\n 4. [矩阵操作](01_Introduction\u002F04_Working_with_Matrices#working-with-matrices)\n  * 理解 TensorFlow 如何处理矩阵，对于掌握算法的工作原理至关重要。\n 5. [声明运算](01_Introduction\u002F05_Declaring_Operations#declaring-operations)\n  * 如何在 TensorFlow 中使用各种数学运算。\n 6. [实现激活函数](01_Introduction\u002F06_Implementing_Activation_Functions#activation-functions)\n  * 激活函数是 TensorFlow 内置的独特函数，供你在算法中使用。\n 7. [数据源的使用](01_Introduction\u002F07_Working_with_Data_Sources#data-source-information)\n  * 在这里我们展示了如何访问书中所需的各种数据源。同时提供了描述这些数据源及其来源的链接。\n 8. [附加资源](01_Introduction\u002F08_Additional_Resources#additional-resources)\n  * 主要为官方资源和相关论文。这些论文多为 TensorFlow 或深度学习领域的文献。\n\n## [第2章：TensorFlow 的方式](02_TensorFlow_Way#ch-2-the-tensorflow-way)\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F01_Operations_as_a_Computational_Graph#operations-as-a-computational-graph\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b23b62a67c21.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F02_Layering_Nested_Operations#multiple-operations-on-a-computational-graph\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_476027bb36b4.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F03_Working_with_Multiple_Layers#working-with-multiple-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_550e69c0159e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F04_Implementing_Loss_Functions#implementing-loss-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c39072b9df66.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F05_Implementing_Back_Propagation#implementing-back-propagation\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_93c575efeb3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F06_Working_with_Batch_and_Stochastic_Training#working-with-batch-and-stochastic-training\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_d8bdb267dc03.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F07_Combining_Everything_Together#combining-everything-together\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_fc50b8095667.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"02_TensorFlow_Way\u002F08_Evaluating_Models#evaluating-models\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ac0488f32e3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n在建立了 TensorFlow 中的基本对象和方法之后，我们现在要构建构成 TensorFlow 算法的各个组件。我们首先介绍计算图，然后过渡到损失函数和反向传播。最后，我们将创建一个简单的分类器，并展示如何评估回归和分类算法。\n\n 1. [单个操作作为计算图](02_TensorFlow_Way\u002F01_Operations_as_a_Computational_Graph#operations-as-a-computational-graph)\n  * 我们将展示如何在计算图上创建一个操作，以及如何使用 TensorBoard 对其进行可视化。\n 2. [嵌套操作的分层](02_TensorFlow_Way\u002F02_Layering_Nested_Operations#multiple-operations-on-a-computational-graph)\n  * 我们将展示如何在计算图上创建多个操作，并使用 TensorBoard 对它们进行可视化。\n 3. [多层操作](02_TensorFlow_Way\u002F03_Working_with_Multiple_Layers#working-with-multiple-layers)\n  * 在这里，我们将计算图的应用扩展到多层结构，并展示它们在 TensorBoard 中的呈现方式。\n 4. [实现损失函数](02_TensorFlow_Way\u002F04_Implementing_Loss_Functions#implementing-loss-functions)\n  * 为了训练模型，我们必须能够评估其性能。这正是由损失函数来完成的。我们将绘制各种损失函数，并讨论其中一些的优点和局限性。\n 5. [实现反向传播](02_TensorFlow_Way\u002F05_Implementing_Back_Propagation#implementing-back-propagation)\n  * 在这一部分，我们将展示如何利用损失函数遍历数据，并对回归和分类问题进行误差的反向传播。\n 6. [随机梯度下降与批量训练](02_TensorFlow_Way\u002F06_Working_with_Batch_and_Stochastic_Training#working-with-batch-and-stochastic-training)\n  * TensorFlow 让批量训练和随机梯度下降都变得非常简单。我们将展示如何实现这两种方法，并讨论各自的优缺点。\n 7. [整合所有内容](02_TensorFlow_Way\u002F07_Combining_Everything_Together#combining-everything-together)\n  * 现在，我们将之前学到的所有内容整合起来，创建一个简单的分类器。\n 8. [模型评估](02_TensorFlow_Way\u002F08_Evaluating_Models#evaluating-models)\n  * 任何模型的好坏都取决于其评估结果。在这里，我们将展示两个例子：(1) 回归算法的评估，以及 (2) 分类算法的评估。\n\n## [第3章：线性回归](03_Linear_Regression#ch-3-linear-regression)\n\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F01_Using_the_Matrix_Inverse_Method#using-the-matrix-inverse-method\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_909eca9e639d.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F02_Implementing_a_Decomposition_Method#using-the-cholesky-decomposition-method\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4e5173a23c07.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F03_TensorFlow_Way_of_Linear_Regression#learning-the-tensorflow-way-of-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_972935e5b857.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F04_Loss_Functions_in_Linear_Regressions#loss-functions-in-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_0fcc0663e170.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F05_Implementing_Deming_Regression#implementing-deming-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c94c3c1de194.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F06_Implementing_Lasso_and_Ridge_Regression#implementing-lasso-and-ridge-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_25d029b57793.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F07_Implementing_Elasticnet_Regression#implementing-elasticnet-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_25d029b57793.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"03_Linear_Regression\u002F08_Implementing_Logistic_Regression#implementing-logistic-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_4bd6d245a823.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n在这里，我们展示了如何在 TensorFlow 中实现各种线性回归技术。前两节介绍了如何在 TensorFlow 中使用标准矩阵方法求解线性回归问题。剩下的六节则演示了如何利用 TensorFlow 的计算图来实现不同类型的回归。\n\n1. [使用矩阵逆法](03_Linear_Regression\u002F01_Using_the_Matrix_Inverse_Method#using-the-matrix-inverse-method)\n   * 如何在 TensorFlow 中通过矩阵逆法求解二维线性回归问题。\n2. [实现分解法](03_Linear_Regression\u002F02_Implementing_a_Decomposition_Method#using-the-cholesky-decomposition-method)\n   * 使用 Cholesky 分解法求解二维线性回归问题。\n3. [学习 TensorFlow 中的线性回归方法](03_Linear_Regression\u002F03_TensorFlow_Way_of_Linear_Regression#learning-the-tensorflow-way-of-regression)\n   * 通过 L2 损失函数，在计算图中迭代进行线性回归。\n4. [理解线性回归中的损失函数](03_Linear_Regression\u002F04_Loss_Functions_in_Linear_Regressions#loss-functions-in-linear-regression)\n   * 线性回归中的 L2 损失与 L1 损失对比。我们将讨论两者的优缺点。\n5. [实现 Deming 回归（总回归）](03_Linear_Regression\u002F05_Implementing_Deming_Regression#implementing-deming-regression)\n   * 通过修改损失函数，在 TensorFlow 中实现 Deming（总）回归。\n6. [实现 Lasso 和 Ridge 回归](03_Linear_Regression\u002F06_Implementing_Lasso_and_Ridge_Regression#implementing-lasso-and-ridge-regression)\n   * Lasso 和 Ridge 回归是用于正则化回归系数的方法。我们通过改变损失函数，在 TensorFlow 中实现了这两种回归。\n7. [实现 Elastic Net 回归](03_Linear_Regression\u002F07_Implementing_Elasticnet_Regression#implementing-elasticnet-regression)\n   * Elastic Net 是一种结合了 L2 和 L1 损失的正则化技术。我们展示了如何在 TensorFlow 中实现它。\n8. [实现逻辑回归](03_Linear_Regression\u002F08_Implementing_Logistic_Regression#implementing-logistic-regression)\n   * 我们通过在计算图中使用激活函数来实现逻辑回归。\n\n## [第4章：支持向量机](04_Support_Vector_Machines#ch-4-support-vector-machines)\n\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F01_Introduction#support-vector-machine-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2c64c0aff2cf.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F02_Working_with_Linear_SVMs#working-with-linear-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_394f2205c4f9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F03_Reduction_to_Linear_Regression#svm-reduction-to-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ae9018a7d62b.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F04_Working_with_Kernels#working-with-kernels\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_216ceceb82c1.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F05_Implementing_Nonlinear_SVMs#implementing-nonlinear-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_deed4e87b434.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"04_Support_Vector_Machines\u002F06_Implementing_Multiclass_SVMs#implementing-multiclass-svms\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_a3c842385b79.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n本章展示了如何使用TensorFlow实现各种支持向量机方法。我们首先创建一个线性支持向量机，并说明其如何用于回归分析。随后引入核函数（RBF高斯核），并演示如何利用它来处理非线性数据的分类问题。最后，我们将介绍多类别的非线性支持向量机的实现。\n\n1. [简介](04_Support_Vector_Machines\u002F01_Introduction#support-vector-machine-introduction)\n   * 我们将介绍支持向量机的基本概念，以及如何在TensorFlow框架中实现它们。\n2. [线性支持向量机的应用](04_Support_Vector_Machines\u002F02_Working_with_Linear_SVMs#working-with-linear-svms)\n   * 我们基于鸢尾花数据集中的萼片长度和花瓣宽度，构建了一个线性支持向量机来区分I. setosa类别。\n3. [降维至线性回归](04_Support_Vector_Machines\u002F03_Reduction_to_Linear_Regression#svm-reduction-to-linear-regression)\n   * 支持向量机的核心思想是通过一条直线来划分不同类别。我们将对算法稍作调整，以实现支持向量机的回归功能。\n4. [TensorFlow中的核函数应用](04_Support_Vector_Machines\u002F04_Working_with_Kernels#working-with-kernels)\n   * 为了使支持向量机能够处理非线性数据，我们解释并演示如何在TensorFlow中实现不同的核函数。\n5. [非线性支持向量机的实现](04_Support_Vector_Machines\u002F05_Implementing_Nonlinear_SVMs#implementing-nonlinear-svms)\n   * 我们使用高斯核（RBF）来分离非线性的类别。\n6. [多分类支持向量机的实现](04_Support_Vector_Machines\u002F06_Implementing_Multiclass_SVMs#implementing-multiclass-svms)\n   * 支持向量机本质上是一种二分类模型。我们展示如何在TensorFlow中采用“一对多”的策略将其扩展到多分类任务中。\n\n## [第5章：最近邻方法](05_Nearest_Neighbor_Methods#ch-5-nearest-neighbor-methods)\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F01_Introduction#nearest-neighbor-methods-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_eee57850429d.jpg\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F02_Working_with_Nearest_Neighbors#working-with-nearest-neighbors\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_56e27479079f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F03_Working_with_Text_Distances#working-with-text-distances\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F04_Computing_with_Mixed_Distance_Functions#computing-with-mixed-distance-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_c796d4709593.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F05_An_Address_Matching_Example#an-address-matching-example\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"05_Nearest_Neighbor_Methods\u002F06_Nearest_Neighbors_for_Image_Recognition#nearest-neighbors-for-image-recognition\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_475ccc914ffd.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n最近邻方法是一种非常流行的机器学习算法。我们展示了如何实现k近邻、加权k近邻以及混合距离函数的k近邻算法。在这一章中，我们还介绍了如何在TensorFlow中使用Levenshtein距离（编辑距离），并用它来计算字符串之间的相似度。最后，我们以MNIST手写数字识别为例，说明如何利用k近邻进行分类预测。\n\n1. [简介](05_Nearest_Neighbor_Methods\u002F01_Introduction#nearest-neighbor-methods-introduction)\n   * 我们将介绍在TensorFlow中实现k近邻算法所需的概念和方法。\n2. [最近邻算法的应用](05_Nearest_Neighbor_Methods\u002F02_Working_with_Nearest_Neighbors#working-with-nearest-neighbors)\n   * 我们构建了一个最近邻算法，尝试预测房屋价值（回归问题）。\n3. [基于文本的距离计算](05_Nearest_Neighbor_Methods\u002F03_Working_with_Text_Distances#working-with-text-distances)\n   * 为了能够在文本上应用距离函数，我们展示了如何在TensorFlow中使用编辑距离。\n4. [混合距离函数的计算](05_Nearest_Neighbor_Methods\u002F04_Computing_with_Mixed_Distance_Functions#computing-with-mixed-distance-functions)\n   * 在这里，我们实现了根据输入特征的标准差对距离函数进行缩放的k近邻算法。\n5. [地址匹配的应用](05_Nearest_Neighbor_Methods\u002F05_An_Address_Matching_Example#an-address-matching-example)\n   * 我们使用混合距离函数来匹配地址。对于邮政编码使用数值距离，而对于街道名称则使用字符串编辑距离。街道名称允许存在拼写错误。\n6. [图像识别中的最近邻方法](05_Nearest_Neighbor_Methods\u002F06_Nearest_Neighbors_for_Image_Recognition#nearest-neighbors-for-image-recognition)\n   * MNIST数字图像数据集非常适合用来演示如何在图像分类任务中使用k近邻算法。\n\n## [第6章：神经网络](06_Neural_Networks#ch-6-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F01_Introduction#neural-networks-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F02_Implementing_an_Operational_Gate#implementing-an-operational-gate\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9b72d7489720.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F03_Working_with_Activation_Functions#working-with-activation-functions\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_aedbb738e3a8.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F04_Single_Hidden_Layer_Network#implementing-a-one-layer-neural-network\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_01c8d4397df1.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F05_Implementing_Different_Layers#implementing-different-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F06_Using_Multiple_Layers#using-multiple-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_dc769b986940.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F07_Improving_Linear_Regression#improving-linear-regression\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_121cf35abd3f.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"06_Neural_Networks\u002F08_Learning_Tic_Tac_Toe#learning-to-play-tic-tac-toe\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_8449e9784ce2.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n神经网络在机器学习中占据着非常重要的地位，并且由于在以往难以解决的问题上取得了重大突破，其受欢迎程度正不断上升。我们首先从介绍“浅层”神经网络开始，这类网络功能强大，能够帮助我们提升之前机器学习算法的性能。我们将从最基本的神经网络单元——运算门入手，逐步构建更复杂的神经网络结构，最终训练一个可以玩井字棋的模型。\n\n1. [简介](06_Neural_Networks\u002F01_Introduction#neural-networks-introduction)\n  * 我们将介绍神经网络的概念，以及TensorFlow如何设计来轻松处理这些算法。\n2. [实现运算门](06_Neural_Networks\u002F02_Implementing_an_Operational_Gate#implementing-an-operational-gate)\n  * 我们先实现一个包含单一运算的运算门，随后展示如何将其扩展为多层嵌套的运算。\n3. [使用门与激活函数](06_Neural_Networks\u002F03_Working_with_Activation_Functions#working-with-activation-functions)\n  * 接下来需要在门上引入激活函数。我们将演示不同激活函数的工作原理。\n4. [实现单隐层神经网络](06_Neural_Networks\u002F04_Single_Hidden_Layer_Network#implementing-a-one-layer-neural-network)\n  * 现在我们已经具备了构建第一个神经网络的所有要素。在这里，我们将使用鸢尾花数据集进行回归任务。\n5. [实现不同层](06_Neural_Networks\u002F05_Implementing_Different_Layers#implementing-different-layers)\n  * 本节将介绍卷积层和池化层，并通过一维和二维的例子展示如何将它们与全连接层串联起来。\n6. [使用多层神经网络](06_Neural_Networks\u002F06_Using_Multiple_Layers#using-multiple-layers)\n  * 在这一部分，我们将展示如何对不同层和变量进行模块化处理，从而构建更加清晰的多层神经网络。\n7. [改进线性模型的预测](06_Neural_Networks\u002F07_Improving_Linear_Regression#improving-linear-regression)\n  * 我们将说明如何通过添加隐藏层来加速之前逻辑回归模型的收敛速度。\n8. [学习玩井字棋](06_Neural_Networks\u002F08_Learning_Tic_Tac_Toe#learning-to-play-tic-tac-toe)\n  * 基于一组井字棋棋盘及其对应的最优走法，我们将训练一个神经网络分类模型来下棋。脚本结束时，你可以尝试与训练好的模型对弈。\n\n## [第7章：自然语言处理](07_Natural_Language_Processing#ch-7-natural-language-processing)\n\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F01_Introduction#natural-language-processing-introduction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F02_Working_with_Bag_of_Words#working-with-bag-of-words\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ab53c5827144.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F03_Implementing_tf_idf#implementing-tf-idf\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_e017b413c5cb.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F04_Working_With_Skip_Gram_Embeddings#working-with-skip-gram-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_cf649676da14.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F05_Working_With_CBOW_Embeddings#working-with-cbow-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b567c1b5c2ce.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F06_Using_Word2Vec_Embeddings#using-word2vec-embeddings\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_15c68eb4bb34.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"07_Natural_Language_Processing\u002F07_Sentiment_Analysis_With_Doc2Vec#sentiment-analysis-with-doc2vec\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3cca4b614e14.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n自然语言处理（NLP）是一种将文本信息转化为数值型摘要、特征或模型的方法。在本章中，我们将介绍并解释如何在TensorFlow中最佳地处理文本数据。我们展示了如何实现经典的“词袋”模型，并说明根据具体问题，可能存在更好的文本嵌入方法。此外，还有基于神经网络的Word2Vec（CBOW和Skip-Gram）以及Doc2Vec等嵌入技术。我们将演示如何在TensorFlow中实现所有这些方法。\n\n1. [简介](07_Natural_Language_Processing\u002F01_Introduction#natural-language-processing-introduction)\n  * 我们介绍了将文本转换为数值向量的方法，并简要说明了TensorFlow中的“嵌入”功能。\n2. [使用词袋模型](07_Natural_Language_Processing\u002F02_Working_with_Bag_of_Words#working-with-bag-of-words)\n  * 在此部分，我们利用TensorFlow对单词进行独热编码，即“词袋”表示法。随后结合逻辑回归模型，用于预测短信是垃圾短信还是正常短信。\n3. [实现TF-IDF](07_Natural_Language_Processing\u002F03_Implementing_tf_idf#implementing-tf-idf)\n  * 我们结合Sci-kit Learn和TensorFlow实现了词频-逆文档频率（TF-IDF）算法，并基于TF-IDF向量进行逻辑回归，以提升垃圾短信分类的准确性。\n4. [使用Skip-Gram模型](07_Natural_Language_Processing\u002F04_Working_With_Skip_Gram_Embeddings#working-with-skip-gram-embeddings)\n  * 我们首次在电影评论数据集上实现了名为“Skip-Gram”的Word2Vec模型。\n5. [使用CBOW模型](07_Natural_Language_Processing\u002F05_Working_With_CBOW_Embeddings#working-with-cbow-embeddings)\n  * 接下来，我们在同一电影评论数据集上实现了另一种Word2Vec形式——“连续词袋”（CBOW）。同时，我们还介绍了保存和加载词嵌入的方法。\n6. [Word2Vec示例应用](07_Natural_Language_Processing\u002F06_Using_Word2Vec_Embeddings#using-word2vec-embeddings)\n  * 在本示例中，我们利用先前保存的CBOW词嵌入，进一步改进了基于TF-IDF的电影评论情感分类逻辑回归模型。\n7. [使用Doc2Vec进行情感分析](07_Natural_Language_Processing\u002F07_Sentiment_Analysis_With_Doc2Vec#sentiment-analysis-with-doc2vec)\n  * 最后，我们引入了Doc2Vec方法（将文档嵌入与词嵌入拼接），以提升电影评论情感分类的逻辑回归模型性能。\n\n## [第8章：卷积神经网络](08_Convolutional_Neural_Networks#ch-8-convolutional-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F01_Intro_to_CNN#introduction-to-convolutional-neural-networks\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_6487260bb981.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F02_Intro_to_CNN_MNIST#introduction-to-cnn-with-mnist\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3d879621e33c.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F03_CNN_CIFAR10#cifar-10-cnn\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_63fcf67b7e7e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F04_Retraining_Current_Architectures#retraining-fine-tuning-current-cnn-architectures\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F05_Stylenet_NeuralStyle#stylenet--neural-style\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2ec3a7e5eb3c.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"08_Convolutional_Neural_Networks\u002F06_Deepdream#deepdream-in-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9b9c1a057236.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n卷积神经网络（CNN）是一种使神经网络能够处理图像数据的方法。CNN之所以得名，是因为它使用卷积层，该层会在较大的图像上应用固定大小的滤波器，从而在图像的任何部分识别出特定模式。此外，CNN还使用了许多其他技术（如最大池化、丢弃等），我们将在TensorFlow中展示如何实现这些技术。我们还将演示如何对现有架构进行再训练，并通过Stylenet和Deep Dream进一步扩展CNN的应用。\n\n1. [简介](08_Convolutional_Neural_Networks\u002F01_Intro_to_CNN#introduction-to-convolutional-neural-networks)\n  * 我们介绍卷积神经网络（CNN），以及如何在TensorFlow中使用它们。\n2. [实现一个简单的CNN](08_Convolutional_Neural_Networks\u002F02_Intro_to_CNN_MNIST#introduction-to-cnn-with-mnist)\n  * 在这里，我们展示了如何创建一个在MNIST数字识别任务中表现良好的CNN架构。\n3. [实现一个高级CNN](08_Convolutional_Neural_Networks\u002F03_CNN_CIFAR10#cifar-10-cnn)\n  * 在这个示例中，我们展示了如何复现用于CIFAR-10图像识别任务的架构。\n4. [对现有架构进行再训练](08_Convolutional_Neural_Networks\u002F04_Retraining_Current_Architectures#retraining-fine-tuning-current-cnn-architectures)\n  * 我们展示了如何下载并设置CIFAR-10数据，以便进行TensorFlow的再训练\u002F微调教程。\n5. [使用Stylenet\u002FNeuralStyle](08_Convolutional_Neural_Networks\u002F05_Stylenet_NeuralStyle#stylenet--neural-style)\n  * 在此配方中，我们展示了使用Stylenet或Neuralstyle的基本实现。\n6. [实现Deep Dream](08_Convolutional_Neural_Networks\u002F06_Deepdream#deepdream-in-tensorflow)\n  * 此脚本逐行解释了TensorFlow的Deep Dream教程。内容摘自[Deep Dream on TensorFlow](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftensorflow\u002Ftree\u002Fmaster\u002Ftensorflow\u002Fexamples\u002Ftutorials\u002Fdeepdream)。请注意，此处的代码已转换为Python 3。\n\n## [第9章：循环神经网络](09_Recurrent_Neural_Networks#ch-9-recurrent-neural-networks)\n\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F01_Introduction#introduction-to-rnns-in-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_15650d160470.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F02_Implementing_RNN_for_Spam_Prediction#implementing-an-rnn-for-spam-prediction\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_b5f475d3fd67.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F03_Implementing_LSTM#implementing-an-lstm-model\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ab41476bee1e.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F04_Stacking_Multiple_LSTM_Layers#stacking-multiple-lstm-layers\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_a43cef75d0d9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F05_Creating_A_Sequence_To_Sequence_Model#creating-a-sequence-to-sequence-model-with-tensorflow-seq2seq\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_2a8cd982dae5.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"09_Recurrent_Neural_Networks\u002F06_Training_A_Siamese_Similarity_Measure#training-a-siamese-similarity-measure-rnns\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_3b329c40b6f9.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n循环神经网络（RNN）与普通神经网络非常相似，不同之处在于它允许“循环”连接，即依赖于网络先前状态的环路。这使得RNN能够高效地处理序列数据，而其他类型的网络则无法做到。随后，我们介绍了LSTM（长短期记忆网络）作为解决常规RNN问题的一种方法，并展示了在TensorFlow中实现这些RNN类型是多么容易。\n\n1. [简介](09_Recurrent_Neural_Networks\u002F01_Introduction#introduction-to-rnns-in-tensorflow)\n   * 我们介绍循环神经网络及其如何输入一个序列并预测固定的目标（分类或数值）或其他序列（序列到序列）。\n2. [实现用于垃圾短信预测的RNN模型](09_Recurrent_Neural_Networks\u002F02_Implementing_RNN_for_Spam_Prediction#implementing-an-rnn-for-spam-prediction)\n   * 在此示例中，我们创建了一个RNN模型，以改进对垃圾短信和正常短信的文本预测。\n3. [实现用于文本生成的LSTM模型](09_Recurrent_Neural_Networks\u002F03_Implementing_LSTM#implementing-an-lstm-model)\n   * 我们展示了如何实现一个LSTM（长短期记忆）RNN来生成莎士比亚风格的语言。（基于单词级别的词汇表）\n4. [堆叠多个LSTM层](09_Recurrent_Neural_Networks\u002F04_Stacking_Multiple_LSTM_Layers#stacking-multiple-lstm-layers)\n   * 我们堆叠了多个LSTM层，以进一步提升莎士比亚风格语言的生成效果。（基于字符级别的词汇表）\n5. [创建序列到序列翻译模型（Seq2Seq）](09_Recurrent_Neural_Networks\u002F05_Creating_A_Sequence_To_Sequence_Model#creating-a-sequence-to-sequence-model-with-tensorflow-seq2seq)\n   * 在这里，我们使用TensorFlow的序列到序列模型训练了一个英德翻译模型。\n6. [训练暹罗相似度测量模型](09_Recurrent_Neural_Networks\u002F06_Training_A_Siamese_Similarity_Measure#training-a-siamese-similarity-measure-rnns)\n   * 在此，我们实现了一个暹罗RNN来预测地址之间的相似性，并将其用于记录匹配。使用RNN进行记录匹配非常灵活，因为我们没有固定的类别目标，可以利用训练好的模型来预测新地址之间的相似性。\n\n## [第10章：将TensorFlow投入生产](10_Taking_TensorFlow_to_Production#ch-10-taking-tensorflow-to-production)\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F01_Implementing_Unit_Tests#implementing-unit-tests\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F02_Using_Multiple_Devices#using-multiple-devices\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F03_Parallelizing_TensorFlow#parallelizing-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F04_Production_Tips#production-tips-with-tensorflow\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"10_Taking_TensorFlow_to_Production\u002F05_Production_Example#a-production-example\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_874e8b7ccf51.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n当然，TensorFlow的功能远不止于创建和训练机器学习模型。一旦我们有了想要使用的模型，就必须将其推向生产环境。本章将提供实施单元测试、使用多台设备、使用多台机器（TensorFlow分布式）等方面的技巧和示例，并以一个完整的生产示例作为结尾。\n\n1. [实施单元测试](10_Taking_TensorFlow_to_Production\u002F01_Implementing_Unit_Tests#implementing-unit-tests)\n   * 我们展示了如何对张量（占位符和变量）实施不同类型的单元测试。\n2. [使用多个执行器（设备）](10_Taking_TensorFlow_to_Production\u002F02_Using_Multiple_Devices#using-multiple-devices)\n   * 如何使用配备多个设备的机器。例如，一台机器配备CPU和一个或多个GPU。\n3. [TensorFlow的并行化](10_Taking_TensorFlow_to_Production\u002F03_Parallelizing_TensorFlow#parallelizing-tensorflow)\n   * 如何在多台机器上设置和使用TensorFlow分布式计算。\n4. [TensorFlow生产环境中的技巧](10_Taking_TensorFlow_to_Production\u002F04_Production_Tips#production-tips-with-tensorflow)\n   * 各种关于使用TensorFlow开发的实用技巧。\n5. [TensorFlow生产化的示例](10_Taking_TensorFlow_to_Production\u002F05_Production_Example#a-production-example)\n   * 我们展示了如何将第9章食谱第2项中的垃圾短信预测RNN模型部署到两个生产级文件中：训练和评估。\n\n## [第11章：TensorFlow 的更多用法](11_More_with_TensorFlow#ch-11-more-with-tensorflow)\n\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F01_Visualizing_Computational_Graphs#visualizing-computational-graphs-wtensorboard\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_ffca4a7e7390.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F02_Working_with_a_Genetic_Algorithm\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_9d112355a705.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F03_Clustering_Using_KMeans#clustering-using-k-means\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_e0fb76772b50.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\u003Ckbd>\n  \u003Ca href=\"11_More_with_TensorFlow\u002F04_Solving_A_System_of_ODEs#solving-a-system-of-odes\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_readme_5abae85728fc.png\" align=\"center\" height=\"45\" width=\"90\">\n  \u003C\u002Fa>\n\u003C\u002Fkbd>\n\n为了展示 TensorFlow 的强大灵活性，本章将提供更多示例。我们首先介绍如何使用日志记录和可视化工具 TensorBoard。随后，我们将演示如何进行 K-Means 聚类、运用遗传算法以及求解常微分方程组。\n\n 1. [可视化计算图（使用 TensorBoard）](11_More_with_TensorFlow\u002F01_Visualizing_Computational_Graphs#visualizing-computational-graphs-wtensorboard)\n  * 展示如何在 TensorBoard 中使用直方图、标量摘要以及生成图像。\n 2. [使用遗传算法](11_More_with_TensorFlow\u002F02_Working_with_a_Genetic_Algorithm#working-with-a-genetic-algorithm)\n  * 我们构建一个遗传算法，用于优化个体（由 50 个数字组成的数组），使其逼近真实函数。\n 3. [使用 K-Means 进行聚类](11_More_with_TensorFlow\u002F03_Clustering_Using_KMeans#clustering-using-k-means)\n  * 如何利用 TensorFlow 实现 K-Means 聚类。我们以 Iris 数据集为例，设置聚类数 k=3，并通过 K-Means 进行预测。\n 4. [求解常微分方程组](11_More_with_TensorFlow\u002F04_Solving_A_System_of_ODEs#solving-a-system-of-odes)\n  * 在此，我们展示如何使用 TensorFlow 求解常微分方程组。所讨论的系统是 Lotka-Volterra 捕食者-猎物模型。\n 5. [使用随机森林](11_More_with_TensorFlow\u002F05_Using_a_Random_Forest#using-a-random-forest)\n  * 我们演示如何使用 TensorFlow 的梯度提升回归树和分类树。\n 6. [TensorFlow 与 Keras 结合使用](11_More_with_TensorFlow\u002F06_Using_TensorFlow_with_Keras#using-tensorflow-with-keras)\n  * 在这里，我们展示了如何使用 Keras 的序列模型来构建全连接神经网络，以及带有回调函数的卷积神经网络模型。","# TensorFlow Machine Learning Cookbook 快速上手指南\n\n本指南基于 Nick McClure 编写的《TensorFlow Machine Learning Cookbook》开源代码库，旨在帮助开发者快速掌握使用 TensorFlow 构建机器学习模型的核心技巧。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**：Linux, macOS 或 Windows\n*   **Python 版本**：推荐 Python 3.6 - 3.9（根据原始代码库的兼容性，现代 TensorFlow 版本通常支持 Python 3.8+）\n*   **前置依赖**：\n    *   `pip` (Python 包管理工具)\n    *   `git` (用于克隆代码库)\n    *   `virtualenv` 或 `conda` (推荐用于创建隔离环境)\n\n> **注意**：原书代码主要基于 TensorFlow 1.x 编写（大量使用 `tf.Session`, `tf.placeholder` 等）。如果您希望运行原始示例，建议安装 `tensorflow==1.15.0`。若需在 TensorFlow 2.x 环境下运行，部分代码可能需要调整为 eager execution 模式或使用 `tf.compat.v1`。本指南以还原书中经典示例为主，默认按 TF 1.x 环境配置。\n\n## 安装步骤\n\n### 1. 克隆项目代码\n首先从 GitHub 获取源代码：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook.git\ncd tensorflow_cookbook\n```\n\n> **国内加速提示**：如果访问 GitHub 较慢，可使用国内镜像源克隆：\n> ```bash\n> git clone https:\u002F\u002Fgitee.com\u002Fmirrors\u002Ftensorflow_cookbook.git\n> ```\n> *(注：若 Gitee 镜像未同步最新版本，请尝试使用 GitHub 代理或标准克隆)*\n\n### 2. 创建虚拟环境\n推荐使用 `venv` 或 `conda` 创建独立环境，避免依赖冲突。\n\n**使用 venv:**\n```bash\npython3 -m venv tf_cookbook_env\nsource tf_cookbook_env\u002Fbin\u002Factivate  # Windows 用户请使用: tf_cookbook_env\\Scripts\\activate\n```\n\n**使用 conda:**\n```bash\nconda create -n tf_cookbook python=3.7\nconda activate tf_cookbook\n```\n\n### 3. 安装依赖\n进入项目目录后，安装所需的 Python 库。\n\n**方案 A：安装 TensorFlow 1.x (完全复现书中代码)**\n```bash\npip install tensorflow==1.15.0\npip install matplotlib numpy requests scipy scikit-learn\n```\n\n**方案 B：安装 TensorFlow 2.x (需修改代码以适应新 API)**\n```bash\npip install tensorflow\npip install matplotlib numpy requests scipy scikit-learn\n```\n\n**国内 pip 源加速**：\n```bash\npip install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple tensorflow==1.15.0 matplotlib numpy requests scipy scikit-learn\n```\n\n## 基本使用\n\n本书的核心在于通过具体的“食谱”（代码片段）学习 TensorFlow 的各种算法。以下是运行第一个示例（第一章：TensorFlow 入门）的步骤。\n\n### 1. 验证安装\n在项目根目录下，尝试运行第一章的第一个脚本，检查环境是否正常：\n\n```bash\ncd 01_Introduction\u002F01_How_TensorFlow_Works\npython how_tensorflow_works.py\n```\n*(注：具体文件名可能因版本略有不同，请查看目录下的 `.py` 文件)*\n\n### 2. 最简单的使用示例\n以下代码展示了书中核心的“计算图”概念：定义占位符、变量、操作并启动会话。您可以直接创建一个 `test_basic.py` 文件运行此代码：\n\n```python\nimport tensorflow as tf\nimport numpy as np\n\n# 1. 初始化 TensorFlow 图 (针对 TF 1.x)\n# 如果是 TF 2.x，需要添加: tf.compat.v1.disable_eager_execution()\n\n# 2. 创建占位符 (Placeholders) - 用于输入数据\nx_data = tf.placeholder(tf.float32, shape=[None])\ny_target = tf.placeholder(tf.float32, shape=[None])\n\n# 3. 创建变量 (Variables) - 模型参数\nA = tf.Variable(tf.random_normal(shape=[1]))\nb = tf.Variable(tf.random_normal(shape=[1]))\n\n# 4. 定义操作 (Operations) - 线性模型 y = Ax + b\nmodel_output = tf.add(tf.multiply(x_data, A), b)\n\n# 5. 定义损失函数 (Loss Function) - L2 损失\nloss = tf.reduce_mean(tf.square(y_target - model_output))\n\n# 6. 定义优化器 (Optimizer) - 梯度下降\nmy_opt = tf.train.GradientDescentOptimizer(0.05)\ntrain_step = my_opt.minimize(loss)\n\n# 7. 启动会话 (Session) 并初始化变量\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# 8. 生成模拟数据并训练\nnp.random.seed(1)\nx_vals = np.random.normal(0, 1, 100)\ny_vals = x_vals * 3 + 2 + np.random.normal(0, 0.1, 100) # 真实关系 y = 3x + 2\n\nprint(\"开始训练...\")\nfor i in range(100):\n    rand_index = np.random.choice(100)\n    sess.run(train_step, feed_dict={x_data: [x_vals[rand_index]], y_target: [y_vals[rand_index]]})\n    \n    if (i+1) % 20 == 0:\n        current_loss = sess.run(loss, feed_dict={x_data: x_vals, y_target: y_vals})\n        print(f\"Step #{i+1}, Loss: {current_loss:.4f}\")\n\n# 9. 输出最终参数\nfinal_A, final_b = sess.run([A, b])\nprint(f\"最终模型参数: A = {final_A[0]:.4f}, b = {final_b[0]:.4f}\")\nprint(\"预期参数: A ≈ 3.0, b ≈ 2.0\")\n\nsess.close()\n```\n\n### 3. 探索更多章节\n项目按照书籍章节组织了文件夹，您可以依次深入：\n\n*   **线性回归**: `cd ..\u002F03_Linear_Regression`\n*   **支持向量机**: `cd ..\u002F04_Support_Vector_Machines`\n*   **神经网络**: `cd ..\u002F06_Neural_Networks`\n*   **卷积神经网络 (CNN)**: `cd ..\u002F08_Convolutional_Neural_Networks`\n\n每个文件夹内包含多个独立的 `.py` 脚本，对应书中的具体食谱。直接运行即可观察结果和生成的图表（如损失曲线、决策边界等）。","某电商初创公司的数据团队急需构建一个商品评论情感分析系统，以快速识别用户反馈中的负面情绪并优化服务。\n\n### 没有 tensorflow_cookbook 时\n- 团队成员在面对 TensorFlow 复杂的计算图、占位符和变量初始化时束手无策，花费数天时间仅用于配置基础环境。\n- 缺乏标准的代码参考，导致在实现循环神经网络（RNN）处理文本序列时频繁遭遇维度不匹配错误，调试效率极低。\n- 从理论公式到实际代码的转化困难重重，尤其是激活函数选择和矩阵运算部分，往往需要反复查阅零散的官方文档。\n- 项目进度严重滞后，原本计划两周完成的原型开发，因基础概念混淆和代码试错而拖延至一个月以上。\n\n### 使用 tensorflow_cookbook 后\n- 直接复用第 1 章和第 2 章中关于张量创建、变量管理及计算图构建的标准代码片段，半天内即可完成环境搭建与数据流水线。\n- 依据第 9 章“循环神经网络”提供的完整示例，快速构建了适用于文本分类的 RNN 模型，避免了常见的架构设计陷阱。\n- 参照第 7 章自然语言处理及第 6 章神经网络的实战代码，轻松实现了激活函数对比与矩阵操作，显著降低了算法落地门槛。\n- 开发周期大幅缩短，团队在一周内便输出了可运行的情感分析原型，并能迅速将模型推向生产环境（参考第 10 章）。\n\ntensorflow_cookbook 通过将抽象的机器学习理论转化为可直接运行的代码食谱，极大地降低了 TensorFlow 的学习曲线与工程落地成本。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fnfmcclure_tensorflow_cookbook_bc45c8ab.png","nfmcclure","Nick","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fnfmcclure_cdc62f75.jpg",null,"@nfmcclure","Seattle, WA","http:\u002F\u002Ffromdata.org","https:\u002F\u002Fgithub.com\u002Fnfmcclure",[82,86],{"name":83,"color":84,"percentage":85},"Jupyter Notebook","#DA5B0B",87.7,{"name":87,"color":88,"percentage":89},"Python","#3572A5",12.3,6242,2378,"2026-04-08T08:26:28","MIT","","未说明",{"notes":97,"python":95,"dependencies":98},"该项目是《TensorFlow Machine Learning Cookbook》一书的配套代码库，主要依赖 TensorFlow 框架。README 中未明确列出具体的操作系统、Python 版本、GPU 或内存需求。根据书名及内容（涵盖线性回归、神经网络、NLP 等），推测需安装与书中示例代码对应的 TensorFlow 版本（可能为 TF 1.x 或早期 2.x 版本，具体需参考各章节代码文件）。建议查看具体章节文件夹中的脚本以获取准确的依赖版本信息。",[99],"tensorflow",[52,14,35],[99,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117],"tensorflow-cookbook","linear-regression","neural-network","tensorflow-algorithms","rnn","cnn","svm","nlp","packtpub","machine-learning","tensorboard","classification","regression","kmeans-clustering","genetic-algorithm","ode","2026-03-27T02:49:30.150509","2026-04-13T18:54:30.245906",[121,126,131,136,141],{"id":122,"question_zh":123,"answer_zh":124,"source_url":125},31904,"运行代码时遇到 ValueError: Only call `sparse_softmax_cross_entropy_with_logits` with named arguments 错误，如何解决？","这是因为 TensorFlow 版本更新后，该函数强制要求使用命名参数。请将代码中的调用方式修改为显式指定参数名。例如，将：\nloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(model_output, y_target))\n修改为：\nloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_target, logits=model_output))\n确保 labels 和 logits 参数都通过名称传递。","https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Fissues\u002F20",{"id":127,"question_zh":128,"answer_zh":129,"source_url":130},31905,"运行线性回归改进示例（07_Improving_Linear_Regression）时，Loss 值为负数且不断下降，结果异常怎么办？","这是由于原始出生体重数据源（UMASS）关闭了公开访问权限，导致代码获取到的数据版本不一致或列索引错误。维护者已确认目标变量应为第 0 列，特征应为第 1-8 列。该问题已在后续代码更新中修复，请拉取最新的代码仓库版本即可解决。","https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Fissues\u002F101",{"id":132,"question_zh":133,"answer_zh":134,"source_url":135},31906,"访问低出生体重数据（Low Birthweight Data）链接时出现 403 Forbidden 错误，无法下载数据怎么办？","原数据托管网站已停止公共访问。维护者已将数据集副本移至 GitHub 仓库本地目录中。请直接从以下地址获取数据并更新代码中的引用路径：\nhttps:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Ftree\u002Fmaster\u002F01_Introduction\u002F07_Working_with_Data_Sources\u002Fbirthweight_data\n相关章节的代码也已更新为引用此本地路径。","https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Fissues\u002F74",{"id":137,"question_zh":138,"answer_zh":139,"source_url":140},31907,"Skip-gram 和 CBOW 模型训练时，相似度（余弦相似度）结果始终不变或随机，未随训练更新，原因是什么？","这是一个代码逻辑错误。问题出在文本预处理函数 text_to_numbers 中，代码错误地按字符遍历句子而不是按单词遍历。请将循环语句：\nfor word in sentence:\n修改为：\nfor word in sentence.split():\n这样可以将句子正确分割为单词列表，确保嵌入向量基于单词而非字符进行训练，从而使相似度计算随训练收敛。","https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Fissues\u002F69",{"id":142,"question_zh":143,"answer_zh":144,"source_url":145},31908,"运行序列到序列（seq2seq）翻译模型示例时出现 TypeError 错误，该如何修复？","该错误通常由 TensorFlow 版本兼容性或变量复用作用域设置引起。维护者已确认并在代码库中修复了此 bug。如果遇到此问题，请首先确保拉取了仓库的最新代码。若问题依旧，请检查您的 Python 和 TensorFlow 版本，并参考 TensorFlow 官方关于 variable_scope 复用的文档，确保在创建测试模型时正确使用了 tf.variable_scope(..., reuse=True)。","https:\u002F\u002Fgithub.com\u002Fnfmcclure\u002Ftensorflow_cookbook\u002Fissues\u002F94",[]]