flyai.exe train -p=1 -b=64 -e=6000

将net_add_conv5_conv6_py 换成 net_conv1_conv2_conv3_conv4_py

score : 82.21

flyai.exe train -p=1 -b=64 -e=6000

score : 85.15

修改模型保存方式,将

    # 若测试准确率高于当前最高准确率,则保存模型train_accuracy = eval(model, x_test, y_test)
#    if train_accuracy > best_accuracy:
#        best_accuracy = train_accuracy
#        model.save_model(cnn, MODEL_PATH, overwrite=True)
#        print("step %d, best accuracy %g" % (i, best_accuracy))

改为

    if i == args.EPOCHS - 1:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == args.EPOCHS:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))print(str(i) + "/" + str(args.EPOCHS))

main.py

和net_conv5_conv6的main.py一样

cnn = Net().to(device)
optimizer = Adam(cnn.parameters(), lr=0.0005, betas=(0.99999999, 0.999999999999))  # 选用AdamOptimizer
"""
实现Adam算法。它在Adam: [A Method for Stochastic Optimization](https://arxiv.org/pdf/1412.6980.pdf)中被提出。参数:params (iterable) – 用于优化的可以迭代参数或定义参数组
lr (float, 可选) – 学习率(默认:1e-3)
betas (Tuple[float, float], 可选) – 用于计算梯度运行平均值及其平方的系数(默认:0.9,0.999)
eps (float, 可选) – 增加分母的数值以提高数值稳定性(默认:1e-8)
weight_decay (float, 可选) – 权重衰减(L2范数)(默认: 0)
"""
# optimizer = Adam(cnn.parameters(), lr = 1e-4, momentum=0.99997) # 选用SGD_Optimizer(Stochastic Gradient Descent)
# 自适应优化算法训练出来的结果通常都不如SGD,尽管这些自适应优化算法在训练时表现的看起来更好。 使用者应当慎重使用自适应优化算法。
"""
利用惯性momentum,即当前梯度与上次梯度进行加权,-   如果方向一致,则累加导致更新步长变大;-   如果方向不同,则相互抵消中和导致更新趋向平衡。
"""
loss_fn = nn.CrossEntropyLoss()  # 定义损失函数

net.py

# build CNN
from torch import nn# build CNN
class Net(nn.Module):                 #def __init__(self,num_classes=10):def __init__(self):super(Net, self).__init__()   self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)       self.relu1=nn.ReLU(True)self.bn1=nn.BatchNorm2d(32) self.pool1 = nn.MaxPool2d(2, 2)        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1)self.relu2=nn.ReLU(True)self.bn2=nn.BatchNorm2d(64) self.pool2 = nn.MaxPool2d(2, 2)   self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)self.relu3=nn.ReLU(True)self.bn3=nn.BatchNorm2d(128) self.pool3 = nn.MaxPool2d(2, 2)    self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.fc1 = nn.Linear(128*8*8, 1024) self.relu5=nn.ReLU(True)self.fc2 = nn.Linear(1024,6)def forward(self, input):output = self.conv1(input)output = self.relu1(output)output = self.bn1(output)output = self.pool1(output)output = self.conv2(output)output = self.relu2(output)output = self.bn2(output)output = self.pool2(output)output = self.conv3(output)output = self.relu3(output)output = self.bn3(output)output = self.pool3(output)output = self.conv4(output)output = self.relu4(output)output = self.bn4(output)output = self.pool4(output)output = output.view(-1, 128*8*8)output = self.fc1(output)output = self.relu5(output)output = self.fc2(output)return output

flyai.exe train -p=1 -b=64 -e=8000

score : 85.38

main.py

# -*- coding: utf-8 -*
import argparse
import torch
import torch.nn as nn
from flyai.dataset import Dataset
from torch.optim import Adamfrom model import Model
from net import Net
from path import MODEL_PATH# 数据获取辅助类
dataset = Dataset()# 模型操作辅助类
model = Model(dataset)# 超参
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=1000, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=256, type=int, help="batch size")
parser.add_argument("-lr", "--learning_rate", default=0.001, type=float, help="learning_rate")
args = parser.parse_args()# 判断gpu是否可用
if torch.cuda.is_available():device = 'cuda'
else:device = 'cpu'
device = torch.device(device)def eval(model, x_test, y_test):cnn.eval()batch_eval = model.batch_iter(x_test, y_test)total_acc = 0.0data_len = len(x_test)for x_batch, y_batch in batch_eval:batch_len = len(x_batch)outputs = cnn(x_batch)_, prediction = torch.max(outputs.data, 1)correct = (prediction == y_batch).sum().item()acc = correct / batch_lentotal_acc += acc * batch_lenreturn total_acc / data_len#cnn = Net().to(device)
#optimizer = Adam(cnn.parameters(), lr=0.001, betas=(0.9, 0.999))  # 选用AdamOptimizer
#optimizer = Adam(cnn.parameters(), lr=0.00005, betas=(0.999999, 0.99999999999))  # 选用AdamOptimizer
#loss_fn = nn.CrossEntropyLoss()  # 定义损失函数cnn = Net().to(device)
optimizer = Adam(cnn.parameters(), lr=0.0005, betas=(0.99999999, 0.999999999999))  # 选用AdamOptimizer
"""
实现Adam算法。它在Adam: [A Method for Stochastic Optimization](https://arxiv.org/pdf/1412.6980.pdf)中被提出。参数:params (iterable) – 用于优化的可以迭代参数或定义参数组
lr (float, 可选) – 学习率(默认:1e-3)
betas (Tuple[float, float], 可选) – 用于计算梯度运行平均值及其平方的系数(默认:0.9,0.999)
eps (float, 可选) – 增加分母的数值以提高数值稳定性(默认:1e-8)
weight_decay (float, 可选) – 权重衰减(L2范数)(默认: 0)
"""
# optimizer = Adam(cnn.parameters(), lr = 1e-4, momentum=0.99997) # 选用SGD_Optimizer(Stochastic Gradient Descent)
# 自适应优化算法训练出来的结果通常都不如SGD,尽管这些自适应优化算法在训练时表现的看起来更好。 使用者应当慎重使用自适应优化算法。
"""
利用惯性momentum,即当前梯度与上次梯度进行加权,-   如果方向一致,则累加导致更新步长变大;-   如果方向不同,则相互抵消中和导致更新趋向平衡。
"""
loss_fn = nn.CrossEntropyLoss()  # 定义损失函数# 训练并评估模型best_accuracy = 0
for i in range(args.EPOCHS):cnn.train()x_train, y_train, x_test, y_test = dataset.next_batch(args.BATCH)  # 读取数据x_train = torch.from_numpy(x_train)y_train = torch.from_numpy(y_train)x_train = x_train.float().to(device)y_train = y_train.long().to(device)x_test = torch.from_numpy(x_test)y_test = torch.from_numpy(y_test)x_test = x_test.float().to(device)y_test = y_test.long().to(device)outputs = cnn(x_train)_, prediction = torch.max(outputs.data, 1)optimizer.zero_grad()loss = loss_fn(outputs, y_train)loss.backward()optimizer.step()# 若测试准确率高于当前最高准确率,则保存模型train_accuracy = eval(model, x_test, y_test)
#    if train_accuracy > best_accuracy:
#        best_accuracy = train_accuracy
#        model.save_model(cnn, MODEL_PATH, overwrite=True)
#        print("step %d, best accuracy %g" % (i, best_accuracy))if i == 5000:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == 6000:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == args.EPOCHS - 1:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == args.EPOCHS:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))print(str(i) + "/" + str(args.EPOCHS))

net.py

## build CNN
from torch import nn## build CNN
class Net(nn.Module):                 #def __init__(self,num_classes=10):def __init__(self):super(Net, self).__init__()   self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)       self.relu1=nn.ReLU(True)self.bn1=nn.BatchNorm2d(32) self.pool1 = nn.MaxPool2d(2, 2)        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1)self.relu2=nn.ReLU(True)self.bn2=nn.BatchNorm2d(64) self.pool2 = nn.MaxPool2d(2, 2)   self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)self.relu3=nn.ReLU(True)self.bn3=nn.BatchNorm2d(128) self.pool3 = nn.MaxPool2d(2, 2)    self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)
#self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.fc1 = nn.Linear(128*8*8, 1024)
#self.relu5=nn.ReLU(True)self.fc2 = nn.Linear(1024,6)def forward(self, input):output = self.conv1(input)output = self.relu1(output)output = self.bn1(output)output = self.pool1(output)output = self.conv2(output)output = self.relu2(output)output = self.bn2(output)output = self.pool2(output)output = self.conv3(output)output = self.relu3(output)output = self.bn3(output)output = self.pool3(output)output = self.conv4(output)output = self.relu4(output)output = self.bn4(output)output = self.pool4(output)output = output.view(-1, 128*8*8)output = self.fc1(output)output = self.relu5(output)output = self.fc2(output)return output

./flyai train -p=1 -b=64 -e=8000

score : 85.24
## build CNN
from torch import nn## build CNN
class Net(nn.Module):                 #def __init__(self,num_classes=10):def __init__(self):super(Net, self).__init__()   self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)       self.relu1=nn.ReLU(True)self.bn1=nn.BatchNorm2d(32) self.pool1 = nn.MaxPool2d(2, 2)        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1)self.relu2=nn.ReLU(True)self.bn2=nn.BatchNorm2d(64) self.pool2 = nn.MaxPool2d(2, 2)   self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)self.relu3=nn.ReLU(True)self.bn3=nn.BatchNorm2d(128) self.pool3 = nn.MaxPool2d(2, 2)    self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.fc1 = nn.Linear(128*8*8, 1024) self.relu5=nn.ReLU(True)self.fc2 = nn.Linear(1024,6)def forward(self, input):output = self.conv1(input)output = self.relu1(output)output = self.bn1(output)output = self.pool1(output)output = self.conv2(output)output = self.relu2(output)output = self.bn2(output)output = self.pool2(output)output = self.conv3(output)output = self.relu3(output)output = self.bn3(output)output = self.pool3(output)output = self.conv4(output)output = self.relu4(output)output = self.bn4(output)output = self.pool4(output)output = output.view(-1, 128*8*8)output = self.fc1(output)output = self.relu5(output)output = self.fc2(output)return output

./flyai train -p=1 -b=64 -e=8000

score : 83.24

将AdamOptimizer换成SGD_Optimizer(Stochastic Gradient Descent)

main.py

# -*- coding: utf-8 -*
import argparse
import torch
import torch.nn as nn
from flyai.dataset import Dataset
from torch.optim import Adam
from torch.optim import SGDfrom model import Model
from net import Net
from path import MODEL_PATH# 数据获取辅助类
dataset = Dataset()# 模型操作辅助类
model = Model(dataset)# 超参
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--EPOCHS", default=1000, type=int, help="train epochs")
parser.add_argument("-b", "--BATCH", default=256, type=int, help="batch size")
parser.add_argument("-lr", "--learning_rate", default=0.001, type=float, help="learning_rate")
parser.add_argument("-m", "--momentum", default=0.9, type=int, help="momentum")
# parser.add_argument("-
args = parser.parse_args()# 判断gpu是否可用
if torch.cuda.is_available():device = 'cuda'
else:device = 'cpu'
device = torch.device(device)def eval(model, x_test, y_test):cnn.eval()batch_eval = model.batch_iter(x_test, y_test)total_acc = 0.0data_len = len(x_test)for x_batch, y_batch in batch_eval:batch_len = len(x_batch)outputs = cnn(x_batch)_, prediction = torch.max(outputs.data, 1)correct = (prediction == y_batch).sum().item()acc = correct / batch_lentotal_acc += acc * batch_lenreturn total_acc / data_len#cnn = Net().to(device)
#optimizer = Adam(cnn.parameters(), lr=0.001, betas=(0.9, 0.999))  # 选用AdamOptimizer
#optimizer = Adam(cnn.parameters(), lr=0.00005, betas=(0.999999, 0.99999999999))  # 选用AdamOptimizer
#loss_fn = nn.CrossEntropyLoss()  # 定义损失函数cnn = Net().to(device)
# optimizer = Adam(cnn.parameters(), lr=0.0005, betas=(0.99999999, 0.999999999999))  # 选用AdamOptimizer
"""
实现Adam算法。它在Adam: [A Method for Stochastic Optimization](https://arxiv.org/pdf/1412.6980.pdf)中被提出。参数:params (iterable) – 用于优化的可以迭代参数或定义参数组
lr (float, 可选) – 学习率(默认:1e-3)
betas (Tuple[float, float], 可选) – 用于计算梯度运行平均值及其平方的系数(默认:0.9,0.999)
eps (float, 可选) – 增加分母的数值以提高数值稳定性(默认:1e-8)
weight_decay (float, 可选) – 权重衰减(L2范数)(默认: 0)
"""
optimizer = SGD(cnn.parameters(), lr = 1e-4, momentum=0.99997) # 选用SGD_Optimizer(Stochastic Gradient Descent)
# 自适应优化算法训练出来的结果通常都不如SGD,尽管这些自适应优化算法在训练时表现的看起来更好。 使用者应当慎重使用自适应优化算法。
"""
利用惯性momentum,即当前梯度与上次梯度进行加权,-   如果方向一致,则累加导致更新步长变大;-   如果方向不同,则相互抵消中和导致更新趋向平衡。
"""
loss_fn = nn.CrossEntropyLoss()  # 定义损失函数# 训练并评估模型best_accuracy = 0
for i in range(args.EPOCHS):cnn.train()x_train, y_train, x_test, y_test = dataset.next_batch(args.BATCH)  # 读取数据x_train = torch.from_numpy(x_train)y_train = torch.from_numpy(y_train)x_train = x_train.float().to(device)y_train = y_train.long().to(device)x_test = torch.from_numpy(x_test)y_test = torch.from_numpy(y_test)x_test = x_test.float().to(device)y_test = y_test.long().to(device)outputs = cnn(x_train)_, prediction = torch.max(outputs.data, 1)optimizer.zero_grad()loss = loss_fn(outputs, y_train)loss.backward()optimizer.step()# 若测试准确率高于当前最高准确率,则保存模型train_accuracy = eval(model, x_test, y_test)
#    if train_accuracy > best_accuracy:
#        best_accuracy = train_accuracy
#        model.save_model(cnn, MODEL_PATH, overwrite=True)
#        print("step %d, best accuracy %g" % (i, best_accuracy))if i == 5000:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == 6000:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == args.EPOCHS - 1:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))if i == args.EPOCHS:model.save_model(cnn, MODEL_PATH, overwrite=True)print("step %d, the model is saved" % (i))print(str(i) + "/" + str(args.EPOCHS))

net.py

## build CNN
from torch import nn## build CNN
class Net(nn.Module):                 #def __init__(self,num_classes=10):def __init__(self):super(Net, self).__init__()   self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)       self.relu1=nn.ReLU(True)self.bn1=nn.BatchNorm2d(32) self.pool1 = nn.MaxPool2d(2, 2)        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1)self.relu2=nn.ReLU(True)self.bn2=nn.BatchNorm2d(64) self.pool2 = nn.MaxPool2d(2, 2)   self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)self.relu3=nn.ReLU(True)self.bn3=nn.BatchNorm2d(128) self.pool3 = nn.MaxPool2d(2, 2)    self.conv4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)self.relu4=nn.ReLU(True)self.bn4=nn.BatchNorm2d(128) self.pool4 = nn.MaxPool2d(2, 2)  self.fc1 = nn.Linear(128*8*8, 1024) self.relu5=nn.ReLU(True)self.fc2 = nn.Linear(1024,6)def forward(self, input):output = self.conv1(input)output = self.relu1(output)output = self.bn1(output)output = self.pool1(output)output = self.conv2(output)output = self.relu2(output)output = self.bn2(output)output = self.pool2(output)output = self.conv3(output)output = self.relu3(output)output = self.bn3(output)output = self.pool3(output)output = self.conv4(output)output = self.relu4(output)output = self.bn4(output)output = self.pool4(output)output = output.view(-1, 128*8*8)output = self.fc1(output)output = self.relu5(output)output = self.fc2(output)return output

转载于:https://www.cnblogs.com/hugeng007/p/10628752.html

net_conv1_conv2_conv3_conv4_py相关推荐

最新文章

  1. 经典C语言程序100例之三四
  2. 计算机课搞事情检讨,上微机课玩游戏检讨书
  3. 关于 TypeScript 内 constructor signature 的一些失败尝试
  4. 关于发布DIPS的MVC项目的IIS 7.0环境配置的方法
  5. JavaScript常用工具Date对象和Math介绍介绍
  6. linux之stat
  7. 读书笔记:windows程序设计
  8. 50.纯 CSS 创作一个永动的牛顿摆
  9. RDT 协议 (可靠数据传输协议)
  10. 安卓psp模拟器联机教程_谁知道手机版的ppsspp模拟器怎么联机啊?
  11. 邱锡鹏《神经网络与深度学习》第一章 绪论
  12. 一个光棍的呐喊!-太经典了
  13. CDN边缘智能助力5G
  14. 64 win7 PLSQL Developer 连接
  15. html h5 php 语音录入,html5在输入框添加语音输入功能
  16. L3-001. 凑零钱
  17. flink 1.10.1 java版本sql OverWindow示例(每事件出结果)
  18. 30_linux笔记-文件系统
  19. 性能优化还不会?吃掉这五个类别,摆平性能优化
  20. 分析B站100万+视频,发现竟然有这么多干货资源。

热门文章

  1. 从使用角度看何为zookeeper
  2. 使用ffmpeg打印音视频的详细信息
  3. Chrome和Firefox中安装Hackbar插件
  4. 浅谈malloc,calloc,realloc函数之间的区别
  5. UNIX再学习 -- 线程控制
  6. 【译】Notes on Blockchain Governance
  7. 对深度学习的逃逸攻击 — 探究人工智能系统中的安全盲区
  8. Alibaba-AndFix Bug热修复框架原理及源码解析
  9. JZOJ 3852. 【NOIP2014八校联考第2场第2试9.28】单词接龙(words)
  10. 反恐精英出现服务器消息,cs你已被禁用次服务器 | 手游网游页游攻略大全