【吴恩达深度学习】Residual Networks(PyTorch)
keras版本链接
导包
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from resnets_utils import *
Dataset类
class MyDataset(Dataset):def __init__(self, x, y):super(MyDataset, self).__init__()assert x.shape[0] == y.shape[0]self.x = xself.y = ydef __len__(self):return self.x.shape[0]def __getitem__(self, item):return self.x[item], self.y[item]
Flatten类
class Flatten(nn.Module):def __init__(self, start_dim=1, end_dim=-1):super(Flatten, self).__init__()self.start_dim = start_dimself.end_dim = end_dimdef forward(self, input):return input.flatten(self.start_dim, self.end_dim)
The identity block
class IdentityBlock(nn.Module):def __init__(self, channels, f):super(IdentityBlock, self).__init__()channel1, channel2, channel3, channel4 = channelsself.conv = nn.Sequential(# nn.Conv2d(in_channels=channel1, out_channels=channel2, kernel_size=1, stride=1, padding='valid'),nn.Conv2d(in_channels=channel1, out_channels=channel2, kernel_size=1, stride=1, padding=0),nn.BatchNorm2d(num_features=channel2),nn.ReLU(),# nn.Conv2d(in_channels=channel2, out_channels=channel3, kernel_size=f, stride=1, padding='same'),nn.Conv2d(in_channels=channel2, out_channels=channel3, kernel_size=f, stride=1, padding=(f - 1) // 2),nn.BatchNorm2d(num_features=channel3),nn.ReLU(),# nn.Conv2d(in_channels=channel3, out_channels=channel4, kernel_size=1, stride=1, padding='valid'),nn.Conv2d(in_channels=channel3, out_channels=channel4, kernel_size=1, stride=1, padding=0),nn.BatchNorm2d(num_features=channel4),)def forward(self, input):x_shortcut = inputx = self.conv(input)x = x_shortcut + xx = F.relu(x)return x
The convolutional block
class ConvolutionalBlock(nn.Module):def __init__(self, channels, f, s):super(ConvolutionalBlock, self).__init__()channel1, channel2, channel3, channel4 = channelsself.conv1 = nn.Sequential(# nn.Conv2d(in_channels=channel1, out_channels=channel2, kernel_size=1, stride=s, padding='valid'),nn.Conv2d(in_channels=channel1, out_channels=channel2, kernel_size=1, stride=s, padding=0),nn.BatchNorm2d(num_features=channel2),nn.ReLU(),# nn.Conv2d(in_channels=channel2, out_channels=channel3, kernel_size=f, stride=1, padding='same'),nn.Conv2d(in_channels=channel2, out_channels=channel3, kernel_size=f, stride=1, padding=(f - 1) // 2),nn.BatchNorm2d(num_features=channel3),nn.ReLU(),# nn.Conv2d(in_channels=channel3, out_channels=channel4, kernel_size=1, stride=1, padding='valid'),nn.Conv2d(in_channels=channel3, out_channels=channel4, kernel_size=1, stride=1, padding=0),nn.BatchNorm2d(num_features=channel4))self.conv2 = nn.Sequential(# nn.Conv2d(in_channels=channel1, out_channels=channel4, kernel_size=1, stride=s, padding='valid'),nn.Conv2d(in_channels=channel1, out_channels=channel4, kernel_size=1, stride=s, padding=0),nn.BatchNorm2d(num_features=channel4))def forward(self, input):x = self.conv1(input)x_shortcut = self.conv2(input)x = x + x_shortcutx = F.relu(x)return x
ResNet50
class ResNet50(nn.Module):def __init__(self, classes=6):super(ResNet50, self).__init__()self.net = nn.Sequential(nn.ZeroPad2d(padding=(3, 3, 3, 3)),nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=0),nn.BatchNorm2d(num_features=64),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2),ConvolutionalBlock(channels=[64, 64, 64, 256], f=3, s=1),IdentityBlock(channels=[256, 64, 64, 256], f=3),IdentityBlock(channels=[256, 64, 64, 256], f=3),ConvolutionalBlock(channels=[256, 128, 128, 512], f=3, s=2),IdentityBlock(channels=[512, 128, 128, 512], f=3),IdentityBlock(channels=[512, 128, 128, 512], f=3),IdentityBlock(channels=[512, 128, 128, 512], f=3),ConvolutionalBlock(channels=[512, 256, 256, 1024], f=3, s=2),IdentityBlock(channels=[1024, 256, 256, 1024], f=3),IdentityBlock(channels=[1024, 256, 256, 1024], f=3),IdentityBlock(channels=[1024, 256, 256, 1024], f=3),IdentityBlock(channels=[1024, 256, 256, 1024], f=3),IdentityBlock(channels=[1024, 256, 256, 1024], f=3),ConvolutionalBlock(channels=[1024, 512, 512, 2048], f=3, s=2),IdentityBlock(channels=[2048, 512, 512, 2048], f=3),IdentityBlock(channels=[2048, 512, 512, 2048], f=3),nn.AvgPool2d(kernel_size=2),Flatten(),nn.Linear(2048, classes),)def forward(self, input):x = self.net(input)return x
加载数据集和预处理
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()# Normalize image vectors
X_train = X_train_orig / 255.
X_test = X_test_orig / 255.
X_train = np.transpose(X_train, [0, 3, 1, 2])
X_test = np.transpose(X_test, [0, 3, 1, 2])Y_train = Y_train_orig.T
Y_test = Y_test_orig.Tprint("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
构建网络、优化器、损失函数
model = ResNet50()
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
epochs = 2
batch_size = 32
train_dataset = MyDataset(X_train, Y_train)
train_data = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
训练
model.train()
for epoch in range(epochs):for i, (x, y) in enumerate(train_data):x = x.float()y = y.long().squeeze()optimizer.zero_grad()y_hat = model(x)loss = criterion(y_hat, y)loss.backward()optimizer.step()
测试
model.eval()
with torch.no_grad():x = torch.tensor(X_test).float()y = torch.tensor(Y_test).long().squeeze()y_hat = model(x)loss = criterion(y_hat, y)print("Loss = ", loss.item())y_hat = torch.argmax(y_hat, dim=-1)correct_prediction = y_hat == ytest_accuracy = torch.sum(correct_prediction).float() / y.shape[0]print("Test Accuracy = ", test_accuracy.item())
【吴恩达深度学习】Residual Networks(PyTorch)相关推荐
- 吴恩达深度学习笔记——卷积神经网络(Convolutional Neural Networks)
深度学习笔记导航 前言 传送门 卷积神经网络(Convolutional Neural Networks) 卷积神经网络基础(Foundations of Convolutional Neural N ...
- 吴恩达深度学习笔记——神经网络与深度学习(Neural Networks and Deep Learning)
文章目录 前言 传送门 神经网络与深度学习(Neural Networks and Deep Learning) 绪论 梯度下降法与二分逻辑回归(Gradient Descend and Logist ...
- 深度学习入门首推资料--吴恩达深度学习全程笔记分享
本文首发于微信公众号"StrongerTang",可打开微信搜一搜,或扫描文末二维码,关注查看更多文章. 原文链接:(https://mp.weixin.qq.com/s?__bi ...
- 吴恩达深度学习教程——中文笔记网上资料整理
吴恩达深度学习笔记整理 内容为网上博主博文整理,如有侵权,请私信联系. 课程内容: Coursera:官方课程安排(英文字幕).付费用户在课程作业中可以获得作业评分,每门课程修完可获得结课证书:不付费 ...
- 吴恩达深度学习课程之第四门课 卷积神经网络 第二周 深度卷积网络
本文参考黄海广主编针对吴恩达深度学习课程DeepLearning.ai <深度学习课程 笔记 (V5.1 )> 第二周 深度卷积网络 2.1 为什么要进行实例探究?(Why look at ...
- 吴恩达深度学习课程笔记(四):卷积神经网络2 实例探究
吴恩达深度学习课程笔记(四):卷积神经网络2 实例探究 吴恩达深度学习课程笔记(四):卷积神经网络2 实例探究 2.1 为什么要进行实例探究 2.2 经典网络 LeNet-5 AlexNet VGG- ...
- 吴恩达深度学习笔记(四)
吴恩达深度学习笔记(四) 卷积神经网络CNN-第二版 卷积神经网络 深度卷积网络:实例探究 目标检测 特殊应用:人脸识别和神经风格转换 卷积神经网络编程作业 卷积神经网络CNN-第二版 卷积神经网络 ...
- 吴恩达.深度学习系列-C4卷积神经网络-W2深度卷积模型案例
吴恩达.深度学习系列-C4卷积神经网络-W2深度卷积模型案例 (本笔记部分内容直接引用redstone的笔记http://redstonewill.com/1240/.原文整理的非常好,引入并添加我自 ...
- 吴恩达深度学习课程笔记之卷积神经网络(2nd week)
0 参考资料 [1] 大大鹏/Bilibili资料 - Gitee.com [2] [中英字幕]吴恩达深度学习课程第四课 - 卷积神经网络_哔哩哔哩_bilibili [3] 深度学习笔记-目录 ...
- 360题带你走进深度学习!吴恩达深度学习课程测试题中英对照版发布
吴恩达的深度学习课程(deepLearning.ai)是公认的入门深度学习的宝典,本站将课程的课后测试题进行了翻译,建议初学者学习.所有题目都翻译完毕,适合英文不好的同学学习. 主要翻译者:黄海广 内 ...
最新文章
- 自学python方法-你是如何自学 Python 的?
- JS函数式编程概念理解:函子(Functor)
- 跨界创立PayPal、特斯拉、SpaceX……,埃隆·马斯克是这样“掌控”知识的
- 叮当快药产品体验报告
- html表单验证js代码,JavaScript表单验证实现代码
- 将*.STL转换成顶点索引格式
- 2021大厂面试高频100题最新汇总(附答案详解)
- java.lang.IllegalArgumentException: requirement failed: No output operations registered, so nothing
- 产品架构开发方法(2011中国软件技术大会)
- Eclipse 安装配置总结(WST WTP)(转)
- StarUML使用说明-指导手册
- Android应用开发—PendingIntent:如何判断两个PendingIntent对等
- Android bootchart(二)
- 【图像融合】高斯金字塔
- 高德地图在android上的开发汇总
- 小郡肝火锅点餐系统——测试部署发布
- MT【252】椭圆内接三角形内切圆半径
- 桌面图标出现蓝色问号
- jQuery 验证码输入错误后自动刷新验证码 点击验证码图片刷新验证码
- web项目web接入微信登录