文章目录

  • 1. Basic Block的实现
  • 2.Res Block的实现
  • 3.ResNet的实现
  • 4.ResNet18的实现
  • 5.ResNet18的实战

1. Basic Block的实现

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,optimizers,Sequential# 构建层
class BasicBlock(layers.Layer):def __init__(self,filter_num,stride=1):super(BasicBlock, self).__init__()# strides=1时,得到output会略小于inputsize,所有需要设置padding=‘same’,保证两者相同# convolution layer = conv + bn + reluself.conv1 = layers.Conv2D(filter_num,kernel_size=(3,3),strides=stride,padding='same')self.bn1 = layers.BatchNormalization()self.relu = layers.Activation('relu')   # rulu层没有参数,可以多次使用# convolution layer = conv + bn + reluself.conv2 = layers.Conv2D(filter_num,kernel_size=(3,3),strides=stride,padding='same')self.bn2 = layers.BatchNormalization()# 短接层if stride != 1:self.downsample = Sequential()self.downsample.add(layers.Conv2D(filter_num,kernel_size=(1,1),strides = stride))else:self.downsample = lambda x:x# 构建前向传播def call(self,inputs,training=None):# inputs:[b,h,w,c]out = self.conv1(inputs)out = self.bn1(out)out = self.relu(out)out = self.conv2(out)out = self.bn2(out)identity = self.downsample(inputs)output = layers.add([out,identity])output = self.relu(output)# output = tf.nn.relu(output)return output

2.Res Block的实现


    def build_resblock(self,filter_num,blocks,stride=1):res_blocks = Sequential()# 下采样-只有一个具有下采样的能力res_blocks.add(BasicBlock(filter_num,stride))for _ in range(1,blocks):res_blocks.add(BasicBlock(filter_num,stride=1))return res_blocks

3.ResNet的实现

class ResNet(keras.Model):# layer_dims,比如:[2,2,2,2] 4个Res Block,每个Res Block包含两个BasicBlock# num_classes=100  100类def __init__(self,layer_dims,num_classes=100):super(ResNet, self).__init__()# 预处理层self.stem = Sequential([layers.Conv2D(64,kernel_size=(3,3),strides=(1,1)),layers.BatchNormalization(),layers.Activation('relu'),layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same')  # 池化层])# 按照经验chanel从小到大,feature_size从大到小self.layer1 = self.build_resblock(64,layer_dims[0])self.layer2 = self.build_resblock(128,layer_dims[1],stride=2)self.layer3 = self.build_resblock(256,layer_dims[2],stride=2)self.layer4 = self.build_resblock(512,layer_dims[3],stride=2)# output: [b,512,h,w],无法直接确定h,w,故可以设置自适应层# 原理是对512个通道上面的feature像素值做一个平均,得到一个像素的平均值,将512个像素值送到下一层做均值self.avgpool = layers.GlobalAveragePooling2D()# 全连接层-做分类self.fc = layers.Dense(num_classes)def call(self,inputs,training=None):x = self.stem(inputs)x = self.layer1(x)x = self.layer2(x)x = self.layer3(x)x = self.layer4(x)# [b,c]x = self.avgpool(x)# [b,c]=>[b,100]x = self.fc(x)return xdef build_resblock(self,filter_num,blocks,stride=1):res_blocks = Sequential()# 下采样-只有一个具有下采样的能力res_blocks.add(BasicBlock(filter_num,stride))for _ in range(1,blocks):res_blocks.add(BasicBlock(filter_num,stride=1))return res_blocks

4.ResNet18的实现

import  tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import layers, Sequentialclass BasicBlock(layers.Layer):def __init__(self, filter_num, stride=1):super(BasicBlock, self).__init__()self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same')self.bn1 = layers.BatchNormalization()self.relu = layers.Activation('relu')self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same')self.bn2 = layers.BatchNormalization()if stride != 1:self.downsample = Sequential()self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))else:self.downsample = lambda x:xdef call(self, inputs, training=None):# [b, h, w, c]out = self.conv1(inputs)out = self.bn1(out)out = self.relu(out)out = self.conv2(out)out = self.bn2(out)identity = self.downsample(inputs)output = layers.add([out, identity])output = tf.nn.relu(output)return outputclass ResNet(keras.Model):def __init__(self, layer_dims, num_classes=100): # [2, 2, 2, 2]super(ResNet, self).__init__()self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),layers.BatchNormalization(),layers.Activation('relu'),layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')])self.layer1 = self.build_resblock(64,  layer_dims[0])self.layer2 = self.build_resblock(128, layer_dims[1], stride=2)self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)self.layer4 = self.build_resblock(512, layer_dims[3], stride=2)# output: [b, 512, h, w],self.avgpool = layers.GlobalAveragePooling2D()self.fc = layers.Dense(num_classes)def call(self, inputs, training=None):x = self.stem(inputs)x = self.layer1(x)x = self.layer2(x)x = self.layer3(x)x = self.layer4(x)# [b, c]x = self.avgpool(x)# [b, 100]x = self.fc(x)return xdef build_resblock(self, filter_num, blocks, stride=1):res_blocks = Sequential()# may down sampleres_blocks.add(BasicBlock(filter_num, stride))for _ in range(1, blocks):res_blocks.add(BasicBlock(filter_num, stride=1))return res_blocksdef resnet18():return ResNet([2, 2, 2, 2])def resnet34():return ResNet([3, 4, 6, 3])

5.ResNet18的实战

import osos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets, Sequential
from resnet import resnet18tf.random.set_seed(2345)def preprocess(x, y):# [-1~1]x = tf.cast(x, dtype=tf.float32) / 255. - 0.5y = tf.cast(y, dtype=tf.int32)return x, y(x, y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print(x.shape, y.shape, x_test.shape, y_test.shape)train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_db = test_db.map(preprocess).batch(128)sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))def main():model = resnet18()model.build(input_shape=(None, 32, 32, 3))# 查看模型的参数量model.summary()optimizer = optimizers.Adam(lr=1e-3)for epoch in range(500):for step, (x, y) in enumerate(train_db):with tf.GradientTape() as tape:# [b, 32, 32, 3] => [b, 100]logits = model(x)# [b] => [b, 100]y_onehot = tf.one_hot(y, depth=100)# compute lossloss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)loss = tf.reduce_mean(loss)grads = tape.gradient(loss, model.trainable_variables)optimizer.apply_gradients(zip(grads, model.trainable_variables))if step % 50 == 0:print(epoch, step, 'loss:', float(loss))total_num = 0total_correct = 0for x, y in test_db:logits = model(x)prob = tf.nn.softmax(logits, axis=1)pred = tf.argmax(prob, axis=1)pred = tf.cast(pred, dtype=tf.int32)correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)correct = tf.reduce_sum(correct)total_num += x.shape[0]total_correct += int(correct)acc = total_correct / total_numprint(epoch, 'acc:', acc)if __name__ == '__main__':main()

一部分结果如下:

(50000, 32, 32, 3) (50000,) (10000, 32, 32, 3) (10000,)
sample: (256, 32, 32, 3) (256,) tf.Tensor(-0.5, shape=(), dtype=float32) tf.Tensor(0.5, shape=(), dtype=float32)
Model: "res_net"
_________________________________________________________________
Layer (type)                 Output Shape              Param #
=================================================================
sequential (Sequential)      multiple                  2048
_________________________________________________________________
sequential_1 (Sequential)    multiple                  148736
_________________________________________________________________
sequential_2 (Sequential)    multiple                  526976
_________________________________________________________________
sequential_4 (Sequential)    multiple                  2102528
_________________________________________________________________
sequential_6 (Sequential)    multiple                  8399360
_________________________________________________________________
global_average_pooling2d (Gl multiple                  0
_________________________________________________________________
dense (Dense)                multiple                  51300
=================================================================
Total params: 11,230,948
Trainable params: 11,223,140
Non-trainable params: 7,808
_________________________________________________________________
0 0 loss: 4.604719638824463
0 50 loss: 4.561609745025635
0 100 loss: 4.337265491485596
0 150 loss: 4.3709611892700195
0 acc: 0.0803
1 0 loss: 4.024875164031982
1 50 loss: 3.8826417922973633
1 100 loss: 3.5792930126190186
1 150 loss: 3.672839641571045
1 acc: 0.1549
2 0 loss: 3.5927116870880127
2 50 loss: 3.357438564300537
2 100 loss: 3.4201531410217285
2 150 loss: 3.187776565551758
2 acc: 0.2268
3 0 loss: 3.1957569122314453
3 50 loss: 3.1121461391448975
3 100 loss: 2.817192316055298
3 150 loss: 2.813638210296631
3 acc: 0.2721
4 0 loss: 3.081834316253662

深度学习2.0-35.ResNet-18实战相关推荐

  1. 深度学习笔记(35) 滑动窗口的卷积实现

    深度学习笔记(35) 滑动窗口的卷积实现 1. 卷积实现 2. 减少计算成本 1. 卷积实现 为了构建滑动窗口的卷积应用,首先要知道如何把神经网络的全连接层转化成卷积层 假设对象检测算法输入一个14× ...

  2. 深度学习之图像识别核心技术与案例实战

    <神经网络与深度学习讲义>pdf下载在线阅读全文,求百度网盘云资源 求<神经网络与深度学习讲义>全文免费下载百度网盘资源,谢谢~ <深度学习之图像识别核心技术与案例实战& ...

  3. 视频教程-深度学习之图像识别 核心技术与案例实战-深度学习

    深度学习之图像识别 核心技术与案例实战 先后就读于华中科技大学,中国科学院,先后就职于奇虎360AI研究院,陌陌深度学习实验室,6年计算机视觉从业经验,拥有丰富的传统图像算法和深度学习图像项目经验,拥 ...

  4. 深度学习框架PyTorch快速开发与实战

    深度学习框架PyTorch快速开发与实战 邢梦来,王硕,孙洋洋 著 ISBN:9787121345647 包装:平装 开本:16开 用纸:胶版纸 正文语种:中文 出版社:电子工业出版社 出版时间:20 ...

  5. 【深度学习】入门理解ResNet和他的小姨子们(一)---ResNet

    2015年由He KaiMing 大神 提出的ResNet现在是一种经典模型.在这之前,虽然VGG的深度已经高达19层了,但是152层的ResNet真的让我们目瞪口呆. ResNet这篇文章则是CVP ...

  6. halcon 深度学习标注_HALCON深度学习工具0.4 早鸟版发布了

    原标题:HALCON深度学习工具0.4 早鸟版发布了 HALOCN深度学习工具在整个深度学习过程中扮演着重要的作用,而且在将来将扮演更重要的辅助作用,大大加快深度学习的开发流程,目前发布版本工具的主要 ...

  7. halcon显示坐标_HALCON深度学习工具0.4 早鸟版发布了

    HALOCN深度学习工具在整个深度学习过程中扮演着重要的作用,而且在将来将扮演更重要的辅助作用,大大加快深度学习的开发流程,目前发布版本工具的主要作用是图像数据处理和目标检测和分类中的标注. 标注训练 ...

  8. 神经网络与深度学习——TensorFlow2.0实战(笔记)(二)(开发环境介绍)

    开发环境介绍 Python3 1.结构清晰,简单易学 2.丰富的标准库 3.强大的的第三方生态系统 4.开源.开放体系 5.高可扩展性:胶水语言 6.高可扩展性:胶水语言 7.解释型语言,实现复杂算法 ...

  9. 深度学习论文导航 | 08 ResNet:用于图像识别的深度残差网络

    写在前面:大家好!我是[AI 菌],一枚爱弹吉他的程序员.我热爱AI.热爱分享.热爱开源! 这博客是我对学习的一点总结与记录.如果您也对 深度学习.机器视觉.算法.Python.C++ 感兴趣,可以关 ...

  10. 【深度学习】经典神经网络 ResNet 论文解读

    ResNet 是何凯明团队的作品,对应的论文 <Deep Residual Learning for Image Recognition>是 2016 CVPR 最佳论文.ResNet 的 ...

最新文章

  1. Andraoid 状态栏透明的方法
  2. madplay播放器移植
  3. [bzoj2527][Poi2011]Meteors_整体二分_树状数组
  4. Android之布局RelativeLayout
  5. 对AI毫无了解?本文带你轻松了解AI
  6. 利用openCV中的cvCanny函数检测人脸的边缘
  7. MBaen+Timer=减少数据库调用
  8. HDOJ1907 SG问题
  9. (39)FPGA面试技能提升篇(nandflash接口)
  10. vs2010 c# 配置项问题
  11. C#1所搭建的核心基础
  12. 看看我能帮上大家的什么忙? 文平
  13. win10 桌面残留文件夹或其他应用关闭后的画面
  14. matlab南方平差易,测量平差实习心得多篇
  15. 流氓又见流氓!-有感雅虎中国通过公证部门 证明雅虎助手非恶意软件
  16. 3.关联查询和属性文件
  17. ndk命令行编译so库
  18. uni-app在手机上背景图片不显示
  19. 最新引流脚本之窃语漂流瓶引流脚本,如何使用窃语脚本
  20. GBase 8c 安全特性

热门文章

  1. 安装j2ee开发环境
  2. hdu 1005(规律)
  3. 基于CentOS构建高功能的LAMP平台
  4. 使用SharePoint Designer,将左侧的导航修改为Treeview
  5. (5)STM32使用HAL库实现串口通讯——实战操作
  6. 如何断开所有SQL Server所有的连接
  7. Swift 3必看:新的访问控制fileprivate和open
  8. java新手笔记18 类比较
  9. 在AIX系统下查询硬件信息的方法
  10. boost正则表达式库简单介绍