1.tensorflow简单例子

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import os
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "images"def save_fig(fig_id, tight_layout=True):path = os.path.join(PROJECT_ROOT_DIR, CHAPTER_ID, "up_and_running_with_tensorflow", fig_id + ".png")print("Saving figure", fig_id)if tight_layout:plt.tight_layout()plt.savefig(path, format='png', dpi=300)#### 1.创Creating and running a graph
reset_graph()
x = tf.Variable(3, name="x")
y = tf.Variable(4, name="y")
f = x*x*y + y + 2
print(f)# 方法一
sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print(result)
sess.close()# 方法二
with tf.Session() as sess:x.initializer.run()y.initializer.run()result = f.eval()
print(result)# 方法三
init = tf.global_variables_initializer()
with tf.Session() as sess:init.run()result = f.eval()
print(result)#### 2.Managing graphs
# default
reset_graph()
x1 = tf.Variable(1)
print(x1.graph is tf.get_default_graph())  # True# managing default
graph = tf.Graph()
with graph.as_default():x2 = tf.Variable(2)print(x2.graph is graph)  # Trueprint(x2.graph is tf.get_default_graph())  # False#### 3.Node value life cycle
w = tf.constant(3)
x = w + 2
y = x + 5
z = x * 3with tf.Session() as sess:  # Below y and z will be executed twiceprint(y.eval())  # 10print(z.eval())  # 15with tf.Session() as sess:  # The following y and z will be executed oncey_val, z_val = sess.run([y, z])print(y_val)  # 10print(z_val)  # 15

2.tensorflow线性回归

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import os
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)reset_graph()housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
XT = tf.transpose(X)
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)with tf.Session() as sess:theta_value = theta.eval()
print(theta_value)

纯numpy计算线性回归

import numpy as npreset_graph()housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]X = housing_data_plus_bias
y = housing.target.reshape(-1, 1)
theta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)print(theta_numpy)

使用sklearn

from sklearn.linear_model import LinearRegression
from sklearn.datasets import fetch_california_housing
lin_reg = LinearRegression()
housing = fetch_california_housing()
lin_reg.fit(housing.data, housing.target.reshape(-1, 1))print(np.r_[lin_reg.intercept_.reshape(-1, 1), lin_reg.coef_.T])

手工计算梯度

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import os
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]reset_graph()n_epochs = 1000
learning_rate = 0.01X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
gradients = 2 / m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta - learning_rate * gradients)init = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):if epoch % 100 == 0:print("Epoch", epoch, "MSE =", mse.eval())sess.run(training_op)best_theta = theta.eval()

使用autodiff

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]reset_graph()n_epochs = 1000
learning_rate = 0.01X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")gradients = tf.gradients(mse, [theta])[0]training_op = tf.assign(theta, theta - learning_rate * gradients)init = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):if epoch % 100 == 0:print("Epoch", epoch, "MSE =", mse.eval())sess.run(training_op)best_theta = theta.eval()print("Best theta:")
print(best_theta)

对某个函数求偏导

原本函数:

def my_func(a, b):z = 0for i in range(100):z = a * np.cos(z + i) + z * np.sin(b - i)return zprint(my_func(0.2, 0.3))  # -0.21253923284754914

使用tensorflow:

reset_graph()a = tf.Variable(0.2, name="a")
b = tf.Variable(0.3, name="b")
z = tf.constant(0.0, name="z0")
for i in range(100):z = a * tf.cos(z + i) + z * tf.sin(b - i)grads = tf.gradients(z, [a, b])
init = tf.global_variables_initializer()with tf.Session() as sess:init.run()print(z.eval())  # -0.21253741print(sess.run(grads))  # [-1.1388495, 0.19671397]

使用优化器:

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]reset_graph()n_epochs = 1000
learning_rate = 0.01X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)init = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):if epoch % 100 == 0:print("Epoch", epoch, "MSE =", mse.eval())sess.run(training_op)best_theta = theta.eval()print("Best theta:")
print(best_theta)

使用momentum优化器

#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]reset_graph()n_epochs = 1000
learning_rate = 0.01X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)training_op = optimizer.minimize(mse)init = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):if epoch % 100 == 0:print("Epoch", epoch, "MSE =", mse.eval())sess.run(training_op)best_theta = theta.eval()print("Best theta:")
print(best_theta)

使用小批次梯度下降:

占位符节点案例:

reset_graph()A = tf.placeholder(tf.float32, shape=(None, 3))
B = A + 5
with tf.Session() as sess:B_val_1 = B.eval(feed_dict={A: [[1, 2, 3]]})B_val_2 = B.eval(feed_dict={A: [[4, 5, 6], [7, 8, 9]]})print(B_val_1)  # [[6. 7. 8.]]
print(B_val_2)  # [[ 9. 10. 11.][12. 13. 14.]]
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @Time:       2019/11/30 16:52
# @Author:     weiz
# @File:       09_up_and_running_with_tensorflow.py
# @Description:# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals# Common imports
import numpy as np
import tensorflow as tf# to make this notebook's output stable across runs
def reset_graph(seed=42):tf.reset_default_graph()tf.set_random_seed(seed)np.random.seed(seed)from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]reset_graph()n_epochs = 1000
learning_rate = 0.01X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)init = tf.global_variables_initializer()
n_epochs = 10
batch_size = 100
n_batches = int(np.ceil(m / batch_size))def fetch_batch(epoch, batch_index, batch_size):np.random.seed(epoch * n_batches + batch_index)  # not shown in the bookindices = np.random.randint(m, size=batch_size)  # not shownX_batch = scaled_housing_data_plus_bias[indices] # not showny_batch = housing.target.reshape(-1, 1)[indices] # not shownreturn X_batch, y_batchwith tf.Session() as sess:sess.run(init)for epoch in range(n_epochs):for batch_index in range(n_batches):X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)sess.run(training_op, feed_dict={X: X_batch, y: y_batch})best_theta = theta.eval()print(best_theta)

tensorflow系统学习(1):如何使用tensorflow相关推荐

  1. 人工智能实践:TensorFlow笔记学习(三)——TensorFlow框架

    搭建神经网络 大纲 3.1 张量.计算图.会话 3.2 前向传播 3.3 反向传播 目标 搭建神经网络,总结搭建八股 3.1 张量.计算图.会话 一.基本概念 基于Tensorflow的NN:用张量表 ...

  2. TensorFlow Lite学习笔记

    TensorFlow Lite学习笔记 目录 TensorFlow Lite学习笔记 Tensorflow LIte Demo 模型固化freeze_graph和模型优化optimize_for_in ...

  3. 深度学习利器: TensorFlow系统架构及高性能程序设计

    2015年11月9日谷歌开源了人工智能平台TensorFlow,同时成为2015年最受关注的开源项目之一.经历了从v0.1到v0.12的12个版本迭代后,谷歌于2017年2月15日发布了TensorF ...

  4. 【深度学习】TensorFlow系统架构和设计理念

    TensorFlow系统架构 设计理念 图的定义和图的运行完全分开. TensorFlow为"符号主义"的库. 编程模式通常分为命令式编程和符号式编程. 命令式编程:编写通常意义上 ...

  5. 迁移学习:如何使用TensorFlow对图像进行分类

    导言 在机器学习环境中,迁移学习是一种技术,使我们能够重用已经训练的模型并将其用于另一个任务.图像分类是将图像作为输入并为其分配具有概率的类(通常是标签)的过程.这个过程使用深度学习模型,即深度神经网 ...

  6. 深度学习(9)TensorFlow基础操作五: Broadcasting

    深度学习(9)TensorFlow基础操作五: Broadcasting 1. 操作思想 2. 具体例子 3. 理解 (1) How to understand? (2) Why Broadcasti ...

  7. TensorFlow 2学习和工业CV领域应用 心得分享

    我是一名来自苏州的机器视觉开发者,从事传统的机器视觉算法开发有11年了,从2018年开始,因为一些复杂微弱的瑕疵检测项目遇到的传统算法瓶颈,开始接触到了深度学习,并选择了使用TensorFlow,期间 ...

  8. TensorFlow 全网最全学习资料汇总之TensorFlow的技术应用

    谷歌于2015年11月发布了全新人工智能系统TensorFlow.该系统可被用于语音识别或照片识别等多项机器深度学习领域,主要针对2011年开发的深度学习基础架构DistBelief进行了各方面的改进 ...

  9. 基于TensorFlow深度学习框架,运用python搭建LeNet-5卷积神经网络模型和mnist手写数字识别数据集,设计一个手写数字识别软件。

    本软件是基于TensorFlow深度学习框架,运用LeNet-5卷积神经网络模型和mnist手写数字识别数据集所设计的手写数字识别软件. 具体实现如下: 1.读入数据:运用TensorFlow深度学习 ...

最新文章

  1. Mysql是时候学习一个存储过程了
  2. RocketMQ介绍与云服务器安装
  3. mysql dba系统学习(10)innodb引擎的redo log日志的原理
  4. Tesseract-OCR引擎 入门
  5. 通过完整示例来理解如何使用 epoll
  6. spring期刊状态_无状态Spring安全性第2部分:无状态认证
  7. STM32F412应用开发笔记之一:初识NUCLEO-F412ZG
  8. python csdn博客_GitHub - 1783955902/CSDNBlogBackup: Python实现CSDN博客的完整备份
  9. TCP/IP详解学习笔记(1)
  10. Aop_AspectJ实现
  11. cartographer探秘第四章之代码解析(二) --- 传感器数据处理过程
  12. cpc卡内计费信息异常包括_抖音信息流广告收费标准,抖音广告效果分析
  13. nginx gzip
  14. 4G模块配置、概念、调试记录
  15. 清理linux 系统内存缓存
  16. C# Devexpress控件详细安装攻略
  17. [系统安全] 七.逆向分析之PE病毒原理、C++实现文件加解密及OllyDbg逆向
  18. 【Android机器学习实战】3、定制可点击View、目标检测、以图搜图实战
  19. (翻译)预览(Preview)
  20. opengl全景图转换为天空盒图(成功)

热门文章

  1. 华为是一家怎样的企业?
  2. 草地与石头模型边缘混合
  3. Windows7截图教学
  4. google play电子市场和gmail如何安装在国产手机、三星手机、摩托手机里
  5. 安装MS15-034漏洞补丁KB3042553失败
  6. python爬取京东商品代码_Python简单爬取京东商品列表
  7. 【NOIP2013模拟】Freda的传呼机
  8. python实现数据爬取——糗事百科爬虫项目
  9. omnet++ 第一个工程的创建
  10. android 获取通话记录对应卡槽,短信记录对应卡槽