darknet53网络tf.keras搭建

一、定义darknet块类

1 、darknet块网络结构

2、darknet块实现

# 定义darknet块类
class _ResidualBlock(tf.keras.Model):def __init__(self, filters, layer_idx, name=""):''':param filters: 存放卷积核个数的列表,存放两次卷积的卷积核数:param layer_idx: 操作名称 存放两次卷积的名称:param name: darknet块名称'''super(_ResidualBlock, self).__init__(name=name)filters1, filters2 = filterslayer1, layer2 = layer_idxlayer_name1 = "layer_{}".format(str(layer1))layer_name2 = "layer_{}".format(str(layer2))self.conv2a = layers.Conv2D(filters1, (1, 1), padding='same', use_bias=False, name=layer_name1)  # 定义卷积操作--创建卷积对象self.bn2a = layers.BatchNormalization(epsilon=0.001, name=layer_name1)                           # 定义BN操作--创建BN对象self.conv2b = layers.Conv2D(filters2, (3, 3), padding='same', use_bias=False, name=layer_name2)  # 定义卷积操作--创建卷积对象self.bn2b = layers.BatchNormalization(epsilon=0.001, name=layer_name2)                           # 定义BN操作--创建BN对象def call(self, input_tensor, training=False):  # 本函数可使用本类的实例化对象名直接调用''':param input_tensor: 输入tensor:param training: 是否训练的标志:return: 处理后的结果'''x = self.conv2a(input_tensor)               # 利用卷积对象的call函数实现卷积x = self.bn2a(x, training=training)         # 利用BN对象的call函数实现BNx = tf.nn.leaky_relu(x, alpha=0.1)          # 激活函数x = self.conv2b(x)x = self.bn2b(x, training=training)x = tf.nn.leaky_relu(x, alpha=0.1)x += input_tensor                           # 短接return x

二、定义卷积池化块类–即步长为2的卷积

1、卷积池化块结构

2、卷积池化块类实现

# 定义卷积池化块类
class _ConvPoolBlock(tf.keras.Model):                # 定义卷积池化块类,继承tf.keras.Model类def __init__(self, filters, layer_idx, name=""):''':param filters: 卷积核个数:param layer_idx: 池化操作名称:param name: 标志名'''super(_ConvPoolBlock, self).__init__(name=name) # 定义构造函数 调用父类的构造函数layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积池化的名称self.pad = layers.ZeroPadding2D(((1,0),(1,0)))  # paddingself.conv = layers.Conv2D(filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name=layer_name) # 卷积操作定义self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name) # BN定义def call(self, input_tensor, training=False):''':param input_tensor: 用于卷积池化的输入:param training: 是否训练的标志:return: 卷积池化的结果'''x = self.pad(input_tensor)x = self.conv(x)x = self.bn(x, training=training)x = tf.nn.leaky_relu(x, alpha=0.1)return x

三、定义卷积块类

1、卷积块结构

四、由以上模块来组合成darknet53类

1、darknet53网络结构

2、darknet53类的实现

# 定义Darknet53类
class Darknet53(tf.keras.Model): #定义Darknet53类,继承tf.keras.Model类# 定义构造函数,在自己的构造函数中继承tf.keras.Model类的构造函数def __init__(self):super(Darknet53, self).__init__(name='')# 创建网络结构所以需要得模块类对象# (256, 256, 3)self.l0a = _ConvBlock(32, layer_idx=0, name="stage0")           # 创建卷积块类对象self.l0_pool = _ConvPoolBlock(64, layer_idx=1, name="stage0")# (128, 128, 64)self.l1a = _ResidualBlock([32, 64], layer_idx=[2, 3], name="stage1")self.l1_pool = _ConvPoolBlock(128, layer_idx=4, name="stage1")# (64, 64, 128)self.l2a = _ResidualBlock([64, 128], layer_idx=[5, 6], name="stage2")self.l2b = _ResidualBlock([64, 128], layer_idx=[7, 8], name="stage2")self.l2_pool = _ConvPoolBlock(256, layer_idx=9, name="stage2")# (32, 32, 256)self.l3a = _ResidualBlock([128, 256], layer_idx=[10, 11], name="stage3")self.l3b = _ResidualBlock([128, 256], layer_idx=[12, 13], name="stage3")self.l3c = _ResidualBlock([128, 256], layer_idx=[14, 15], name="stage3")self.l3d = _ResidualBlock([128, 256], layer_idx=[16, 17], name="stage3")self.l3e = _ResidualBlock([128, 256], layer_idx=[18, 19], name="stage3")self.l3f = _ResidualBlock([128, 256], layer_idx=[20, 21], name="stage3")self.l3g = _ResidualBlock([128, 256], layer_idx=[22, 23], name="stage3")self.l3h = _ResidualBlock([128, 256], layer_idx=[24, 25], name="stage3")self.l3_pool = _ConvPoolBlock(512, layer_idx=26, name="stage3")# (16, 16, 512)self.l4a = _ResidualBlock([256, 512], layer_idx=[27, 28], name="stage4")self.l4b = _ResidualBlock([256, 512], layer_idx=[29, 30], name="stage4")self.l4c = _ResidualBlock([256, 512], layer_idx=[31, 32], name="stage4")self.l4d = _ResidualBlock([256, 512], layer_idx=[33, 34], name="stage4")self.l4e = _ResidualBlock([256, 512], layer_idx=[35, 36], name="stage4")self.l4f = _ResidualBlock([256, 512], layer_idx=[37, 38], name="stage4")self.l4g = _ResidualBlock([256, 512], layer_idx=[39, 40], name="stage4")self.l4h = _ResidualBlock([256, 512], layer_idx=[41, 42], name="stage4")self.l4_pool = _ConvPoolBlock(1024, layer_idx=43, name="stage4")# (8, 8, 1024)self.l5a = _ResidualBlock([512, 1024], layer_idx=[44, 45], name="stage5")self.l5b = _ResidualBlock([512, 1024], layer_idx=[46, 47], name="stage5")self.l5c = _ResidualBlock([512, 1024], layer_idx=[48, 49], name="stage5")self.l5d = _ResidualBlock([512, 1024], layer_idx=[50, 51], name="stage5")self.num_layers = 52self._init_vars()   # 自定义初始化函数变量def call(self, input_tensor, training=False):# 根据创建网络结构所以需要得模块类对象,执行相应的方法x = self.l0a(input_tensor, training)x = self.l0_pool(x, training)x = self.l1a(x, training)x = self.l1_pool(x, training)x = self.l2a(x, training)x = self.l2b(x, training)x = self.l2_pool(x, training)x = self.l3a(x, training)x = self.l3b(x, training)x = self.l3c(x, training)x = self.l3d(x, training)x = self.l3e(x, training)x = self.l3f(x, training)x = self.l3g(x, training)x = self.l3h(x, training)output_stage3 = xx = self.l3_pool(x, training)x = self.l4a(x, training)x = self.l4b(x, training)x = self.l4c(x, training)x = self.l4d(x, training)x = self.l4e(x, training)x = self.l4f(x, training)x = self.l4g(x, training)x = self.l4h(x, training)output_stage4 = xx = self.l4_pool(x, training)x = self.l5a(x, training)x = self.l5b(x, training)x = self.l5c(x, training)x = self.l5d(x, training)output_stage5 = xreturn output_stage3, output_stage4, output_stage5def get_variables(self, layer_idx, suffix=None):''':param layer_idx: 卷积名称:param suffix::return: 变量名对应的卷积层的权重值'''if suffix:find_name = "layer_{}/{}".format(layer_idx, suffix)else:find_name = "layer_{}/".format(layer_idx)variables = []for v in self.variables:if find_name in v.name:variables.append(v)return variablesdef _init_vars(self):   # 测试本类功能的函数import numpy as npimgs = np.random.randn(1, 256, 256, 3).astype(np.float32)input_tensor = tf.constant(imgs)return self.call(input_tensor)

五、测试

if __name__ == '__main__':import numpy as np# 启用动态图机制tf.enable_eager_execution()# 生成模拟图片batch_size = 5height, width = 256, 256inputs = tf.random_uniform((batch_size, height, width, 3))  # 模拟图片# 创建Darknet53对象darknet53 = Darknet53()# 调用darknet53网络的call()方法# a,b,c = darknet53(inputs, training=False)a,b,c = darknet53._init_vars()# 输出三个网络层的输出print(a.shape,b.shape,c.shape)

结果:输出的是网络的三个尺度的特征图的形状

六、原码

import tensorflow as tflayers = tf.keras.layers# 定义Darknet53类
class Darknet53(tf.keras.Model): #定义Darknet53类,继承tf.keras.Model类# 定义构造函数,在自己的构造函数中继承tf.keras.Model类的构造函数def __init__(self):super(Darknet53, self).__init__(name='')# 创建网络结构所以需要得模块类对象# (256, 256, 3)self.l0a = _ConvBlock(32, layer_idx=0, name="stage0")           # 创建卷积块类对象self.l0_pool = _ConvPoolBlock(64, layer_idx=1, name="stage0")# (128, 128, 64)self.l1a = _ResidualBlock([32, 64], layer_idx=[2, 3], name="stage1")self.l1_pool = _ConvPoolBlock(128, layer_idx=4, name="stage1")# (64, 64, 128)self.l2a = _ResidualBlock([64, 128], layer_idx=[5, 6], name="stage2")self.l2b = _ResidualBlock([64, 128], layer_idx=[7, 8], name="stage2")self.l2_pool = _ConvPoolBlock(256, layer_idx=9, name="stage2")# (32, 32, 256)self.l3a = _ResidualBlock([128, 256], layer_idx=[10, 11], name="stage3")self.l3b = _ResidualBlock([128, 256], layer_idx=[12, 13], name="stage3")self.l3c = _ResidualBlock([128, 256], layer_idx=[14, 15], name="stage3")self.l3d = _ResidualBlock([128, 256], layer_idx=[16, 17], name="stage3")self.l3e = _ResidualBlock([128, 256], layer_idx=[18, 19], name="stage3")self.l3f = _ResidualBlock([128, 256], layer_idx=[20, 21], name="stage3")self.l3g = _ResidualBlock([128, 256], layer_idx=[22, 23], name="stage3")self.l3h = _ResidualBlock([128, 256], layer_idx=[24, 25], name="stage3")self.l3_pool = _ConvPoolBlock(512, layer_idx=26, name="stage3")# (16, 16, 512)self.l4a = _ResidualBlock([256, 512], layer_idx=[27, 28], name="stage4")self.l4b = _ResidualBlock([256, 512], layer_idx=[29, 30], name="stage4")self.l4c = _ResidualBlock([256, 512], layer_idx=[31, 32], name="stage4")self.l4d = _ResidualBlock([256, 512], layer_idx=[33, 34], name="stage4")self.l4e = _ResidualBlock([256, 512], layer_idx=[35, 36], name="stage4")self.l4f = _ResidualBlock([256, 512], layer_idx=[37, 38], name="stage4")self.l4g = _ResidualBlock([256, 512], layer_idx=[39, 40], name="stage4")self.l4h = _ResidualBlock([256, 512], layer_idx=[41, 42], name="stage4")self.l4_pool = _ConvPoolBlock(1024, layer_idx=43, name="stage4")# (8, 8, 1024)self.l5a = _ResidualBlock([512, 1024], layer_idx=[44, 45], name="stage5")self.l5b = _ResidualBlock([512, 1024], layer_idx=[46, 47], name="stage5")self.l5c = _ResidualBlock([512, 1024], layer_idx=[48, 49], name="stage5")self.l5d = _ResidualBlock([512, 1024], layer_idx=[50, 51], name="stage5")self.num_layers = 52self._init_vars()   # 自定义初始化函数变量def call(self, input_tensor, training=False):# 根据创建网络结构所以需要得模块类对象,执行相应的方法x = self.l0a(input_tensor, training)x = self.l0_pool(x, training)x = self.l1a(x, training)x = self.l1_pool(x, training)x = self.l2a(x, training)x = self.l2b(x, training)x = self.l2_pool(x, training)x = self.l3a(x, training)x = self.l3b(x, training)x = self.l3c(x, training)x = self.l3d(x, training)x = self.l3e(x, training)x = self.l3f(x, training)x = self.l3g(x, training)x = self.l3h(x, training)output_stage3 = xx = self.l3_pool(x, training)x = self.l4a(x, training)x = self.l4b(x, training)x = self.l4c(x, training)x = self.l4d(x, training)x = self.l4e(x, training)x = self.l4f(x, training)x = self.l4g(x, training)x = self.l4h(x, training)output_stage4 = xx = self.l4_pool(x, training)x = self.l5a(x, training)x = self.l5b(x, training)x = self.l5c(x, training)x = self.l5d(x, training)output_stage5 = xreturn output_stage3, output_stage4, output_stage5def get_variables(self, layer_idx, suffix=None):''':param layer_idx: 卷积名称:param suffix::return: 变量名对应的卷积层的权重值'''if suffix:find_name = "layer_{}/{}".format(layer_idx, suffix)else:find_name = "layer_{}/".format(layer_idx)variables = []for v in self.variables:if find_name in v.name:variables.append(v)return variablesdef _init_vars(self):   # 测试本类功能的函数import numpy as npimgs = np.random.randn(1, 256, 256, 3).astype(np.float32)input_tensor = tf.constant(imgs)return self.call(input_tensor)# 定义卷积块类
class _ConvBlock(tf.keras.Model):                       # 定义卷积块类,继承tf.keras.Model类def __init__(self, filters, layer_idx, name=""):    # 定义构造函数 调用父类的构造函数''':param filters: 卷积核个数:param layer_idx: 卷积操作名称:param name: 标志名'''super(_ConvBlock, self).__init__(name=name)layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积的名称self.conv = layers.Conv2D(filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, name=layer_name)  # 真实的卷积操作self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name)                                          # 卷积+BNdef call(self, input_tensor, training=False):  # 方法重写''':param input_tensor: 用于卷积的输入:param training: 是否为训练的标志:return:'''x = self.conv(input_tensor)         # 进行卷积x = self.bn(x, training=training)   # 进行BNx = tf.nn.leaky_relu(x, alpha=0.1)  # 进行leaky_relu激活return x# 定义卷积池化块类
class _ConvPoolBlock(tf.keras.Model):                # 定义卷积池化块类,继承tf.keras.Model类def __init__(self, filters, layer_idx, name=""):''':param filters: 卷积核个数:param layer_idx: 池化操作名称:param name: 标志名'''super(_ConvPoolBlock, self).__init__(name=name) # 定义构造函数 调用父类的构造函数layer_name = "layer_{}".format(str(layer_idx))  # 本次卷积池化的名称self.pad = layers.ZeroPadding2D(((1,0),(1,0)))  # paddingself.conv = layers.Conv2D(filters, (3, 3), strides=(2, 2), padding='valid', use_bias=False, name=layer_name) # 卷积操作定义self.bn = layers.BatchNormalization(epsilon=0.001, name=layer_name) # BN定义def call(self, input_tensor, training=False):''':param input_tensor: 用于卷积池化的输入:param training: 是否训练的标志:return: 卷积池化的结果'''x = self.pad(input_tensor)x = self.conv(x)x = self.bn(x, training=training)x = tf.nn.leaky_relu(x, alpha=0.1)return x# 定义darknet块类
class _ResidualBlock(tf.keras.Model):def __init__(self, filters, layer_idx, name=""):''':param filters: 存放卷积核个数的列表,存放两次卷积的卷积核数:param layer_idx: 操作名称 存放两次卷积的名称:param name: darknet块名称'''super(_ResidualBlock, self).__init__(name=name)filters1, filters2 = filterslayer1, layer2 = layer_idxlayer_name1 = "layer_{}".format(str(layer1))layer_name2 = "layer_{}".format(str(layer2))self.conv2a = layers.Conv2D(filters1, (1, 1), padding='same', use_bias=False, name=layer_name1)  # 定义卷积操作--创建卷积对象self.bn2a = layers.BatchNormalization(epsilon=0.001, name=layer_name1)                           # 定义BN操作--创建BN对象self.conv2b = layers.Conv2D(filters2, (3, 3), padding='same', use_bias=False, name=layer_name2)  # 定义卷积操作--创建卷积对象self.bn2b = layers.BatchNormalization(epsilon=0.001, name=layer_name2)                           # 定义BN操作--创建BN对象def call(self, input_tensor, training=False):  # 使用本类的实例化对象调用''':param input_tensor: 输入tensor:param training: 是否训练的标志:return: 处理后的结果'''x = self.conv2a(input_tensor)               # 利用卷积对象的call函数实现卷积x = self.bn2a(x, training=training)         # 利用BN对象的call函数实现BNx = tf.nn.leaky_relu(x, alpha=0.1)          # 激活函数x = self.conv2b(x)x = self.bn2b(x, training=training)x = tf.nn.leaky_relu(x, alpha=0.1)x += input_tensor                           # 短接return x# =========================================测试以上程序是否能跑通===================================================
if __name__ == '__main__':import numpy as np# 启用动态图机制tf.enable_eager_execution()# 生成模拟图片batch_size = 5height, width = 256, 256inputs = tf.random_uniform((batch_size, height, width, 3))  # 模拟图片# 创建Darknet53对象darknet53 = Darknet53()# 调用darknet53网络的call()方法# a,b,c = darknet53(inputs, training=False)a,b,c = darknet53._init_vars()# 输出三个网络层的输出print(a.shape,b.shape,c.shape)

yolov3从头实现(四)-- darknet53网络tf.keras搭建相关推荐

  1. 使用tf.keras搭建mnist手写数字识别网络

    使用tf.keras搭建mnist手写数字识别网络 目录 使用tf.keras搭建mnist手写数字识别网络 1.使用tf.keras.Sequential搭建序列模型 1.1 tf.keras.Se ...

  2. TensorFlow高阶 API: keras教程-使用tf.keras搭建mnist手写数字识别网络

    TensorFlow高阶 API:keras教程-使用tf.keras搭建mnist手写数字识别网络 目录 TensorFlow高阶 API:keras教程-使用tf.keras搭建mnist手写数字 ...

  3. 机器学习(七)——tf.keras搭建神经网络固定模式

    一.总纲(一般按照下面6步搭建) import--导入相关模块 train,test--指定训练集与测试集 model = tf.keras.models.Sequential--在Sequentia ...

  4. 掌声送给TensorFlow 2.0!用Keras搭建一个CNN | 入门教程

    作者 | Himanshu Rawlani 译者 | Monanfei,责编 | 琥珀 出品 | AI科技大本营(id:rgznai100) 2019 年 3 月 6 日,谷歌在 TensorFlow ...

  5. 网络教学系统搭建对入学教育有何影响?

    随着互联网的高速发展,目前的教学环境对高校教育工作机制和方法提出了更高的要求.在入学教育工作中,利用"互联网+"的优势,实行网络教学系统搭建项目,打破传统入学教育的时间与空间限制, ...

  6. yolov3从头实现(五)-- yolov3网络块

    yolov3网络块 一.上采样卷积块类 1.上采样卷积块结构 1*1的卷积作用是为了调整上采样输出的通道数 2.上采样卷积类的实现 # 上采样卷积类 class _Upsamling(tf.keras ...

  7. yolov3前向传播(一)-- darknet53网络解析与实现

    一.darknet53网络解析与实现 一.darknet53网络结构组成部分 darknet53网络是由 darknet块 下采样卷积块组成的 1.darknet块 1.1 darknet块网络结构 ...

  8. tf.keras CNN网络搭建笔记

    tf.keras CNN网络搭建笔记 这里写目录标题 tf.keras CNN网络搭建笔记 基本流程,以LeNet为例 创建Sequential模型 配置模型的学习流程 数据预处理 模型训练与验证 相 ...

  9. tf.keras 05: 使用Keras保存和加载不同格式的模型

    本文是 tf.keras 系列文章的第五篇.通过手写数字识别数据集介绍了 Keras 模型训练中的 检查点(checkpoint) 文件,.weights 文件,.pb 文件以及 .h5 文件等四种格 ...

最新文章

  1. numpy 数组 最大值、最小值、中位数、均值、标准差、方差、总和、排序、去重
  2. Excel删除区域名
  3. 小型数据中心规划和设计原则
  4. centos 升级mysql5.6_centos6.9升级mysql5.1到mysql5.6
  5. 如何实现微服务架构中的服务发现
  6. 2011年最新使用CSS3实现各种独特悬浮效果的教程
  7. C# 发邮件类可发送附件
  8. 上周热点回顾(5.16-5.22)
  9. 笔试算法题(28):删除乱序链表中的重复项 找出已经排好序的两个数组中的相同项...
  10. 回归素材(part3)--机器学习基础从入门到求职
  11. 图文详解cacti的安装和使用
  12. 从容 IT 人生路,开发工具伴我行——“葡萄城 30 周年”征文
  13. 获取脚本路径_如何快速有效的写ftp脚本
  14. 将数据插入SQL Server的方法
  15. I00020 计算卡特兰数函数
  16. 来了!最详细2019实习生退税完全攻略(适用于所有实习过的同学)
  17. 新概念英语2电子版_新概念英语读100遍,英语能超神
  18. 安卓应用,在吾手机上正常,在另外手机上崩溃,因为缺少so库
  19. 【实用】Word如何转换成PDF格式,文件转换三步免费搞定
  20. 我国云计算中心大盘点

热门文章

  1. 2016CCCC天梯--多项式A除以B
  2. hadoop相关问题
  3. markdown与latex:矩阵的书写
  4. java统计计数_java – 使用LongAdder计算统计计数器的最大值?
  5. ZooKeeper CentOS7上安装
  6. STM32CUBEF4 实现USB 虚拟串口
  7. Monkeyrunner脚本的录制与回放
  8. 优秀代码所具备的5大品质 你的代码呢?
  9. 网站建设软件—***系统(DianCMS)1.0 发布
  10. [转载] 【Python】向json文件中追加新的对象