数据预处理(十分类)
文件名:data_10

Fault location Loads(hp) Defect diameters (inches) Class
Normal 0/1/2/3 0 0
Inner race 0/1/2/3 0.007
0.014
0.021
1
2
3
Ball 0/1/2/3 0.007
0.014
0.021
4
5
6
Outer race 0/1/2/3 0.007
0.014
0.021
7
8
9
import random
import numpy as np
import scipy.io as scio
from sklearn import preprocessingdef open_data(bath_path,key_num):#open_data('/Users/apple/Desktop/cwru/12k Drive End Bearing Fault Data/',105)path = bath_path + str(key_num) + ".mat"str1 =  "X" + "%03d"%key_num + "_DE_time"data = scio.loadmat(path)data = data[str1]return datadef deal_data(data,length,label):#line:num column:1025data = np.reshape(data,(-1))num = len(data)//lengthdata = data[0:num*length]data = np.reshape(data,(num,length))min_max_scaler = preprocessing.MinMaxScaler()data = min_max_scaler.fit_transform(np.transpose(data,[1,0]))data = np.transpose(data,[1,0])label = np.ones((num,1))*labelreturn np.column_stack((data,label)) def split_data(data,split_rate):length = len(data)num1 = int(length*split_rate[0])num2 = int(length*split_rate[1])index1 = random.sample(range(num1),num1)train = data[index1]data = np.delete(data,index1,axis=0)index2 = random.sample(range(num2),num2)valid = data[index2]test = np.delete(data,index2,axis=0)return train,valid,testdef load_data(num,length,hp,fault_diameter,split_rate):#num: number of sample in each data file#length: each sample#split_rate: train:valid:testbath_path1 = 'path of Normal Baseline Data'bath_path2 = 'path of 12k Drive End Bearing Fault Data/'data_list = []file_list = np.array([[105,118,130,106,119,131,107,120,132,108,121,133],  #0.007[169,185,197,170,186,198,171,187,199,172,188,200],  #0.014[209,222,234,210,223,235,211,224,236,212,225,237]])  #0.021label = 0#normal datafor i in hp:normal_data = open_data(bath_path1,97+i)data = deal_data(normal_data,length,label = label)data_list.append(data)#abnormal datafor i in fault_diameter:for j in hp:inner_num = file_list[int(i/0.007-1),3*j]ball_num = file_list[int(i/0.007-1),3*j+1]outer_num = file_list[int(i/0.007-1),3*j+2]inner_data = open_data(bath_path2,inner_num)inner_data = deal_data(inner_data,length,label + 1)data_list.append(inner_data)ball_data = open_data(bath_path2,ball_num)ball_data = deal_data(ball_data,length,label + 4)data_list.append(ball_data)outer_data = open_data(bath_path2,outer_num)outer_data = deal_data(outer_data,length,label + 7)data_list.append(outer_data)label = label + 1#keep each class same number of datanum_list = []for i in data_list:num_list.append(len(i))min_num = min(num_list)if num > min_num:print("The number of each class overflow, the maximum number is:%d" %min_num)min_num = min(num,min_num)#Divide the train, validation, test sets and shuffletrain = []valid = []test = []for data in data_list:data = data[0:min_num,:]a,b,c = split_data(data,split_rate)train.append(a)valid.append(b)test.append(c)train = np.reshape(train,(-1,length+1))train = train[random.sample(range(len(train)),len(train))]train_data = train[:,0:length]train_label = train[:,length]onehot_encoder = preprocessing.OneHotEncoder(sparse=False)train_label = train_label.reshape(len(train_label), 1)train_label = onehot_encoder.fit_transform(train_label)valid = np.reshape(valid,(-1,length+1))valid = valid[random.sample(range(len(valid)),len(valid))]valid_data = valid[:,0:length]valid_label = valid[:,length]valid_label = valid_label.reshape(len(valid_label), 1)valid_label = onehot_encoder.fit_transform(valid_label)test = np.reshape(test,(-1,length+1))test = test[random.sample(range(len(test)),len(test))]test_data = test[:,0:length]test_label = test[:,length]test_label = test_label.reshape(len(test_label), 1)test_label = onehot_encoder.fit_transform(test_label)return train_data,train_label,valid_data,valid_label,test_data,test_label

WDCNN训练与测试

from data_10 import load_dataimport math
import torch
import torch.nn as nn
import torch.nn.functional as Fimport torch.utils.data
from torch.utils.data import DataLoader, TensorDataset
import torchvision
from torchvision import datasets, transformsclass Net(nn.Module):def __init__(self, in_channel=1, out_channel=10):super(TeacherNet, self).__init__()self.layer1 = nn.Sequential(nn.Conv1d(in_channel, 16, kernel_size=64,stride=16,padding=24),  nn.BatchNorm1d(16),nn.ReLU(inplace=True),nn.MaxPool1d(kernel_size=2,stride=2))self.layer2 = nn.Sequential(nn.Conv1d(16, 32, kernel_size=3,padding=1), nn.BatchNorm1d(32),nn.ReLU(inplace=True),nn.MaxPool1d(kernel_size=2, stride=2))  self.layer3 = nn.Sequential(nn.Conv1d(32, 64, kernel_size=3,padding=1),  nn.BatchNorm1d(64),nn.ReLU(inplace=True),nn.MaxPool1d(kernel_size=2, stride=2))  self.layer4 = nn.Sequential(nn.Conv1d(64, 64, kernel_size=3,padding=1),  nn.BatchNorm1d(64),nn.ReLU(inplace=True),nn.MaxPool1d(kernel_size=2, stride=2))  self.layer5 = nn.Sequential(nn.Conv1d(64, 64, kernel_size=3),  nn.BatchNorm1d(64),nn.ReLU(inplace=True),nn.MaxPool1d(kernel_size=2, stride=2))  self.fc=nn.Sequential(nn.Linear(64, 100),nn.ReLU(inplace=True),nn.Linear(100, out_channel))def forward(self, x):x = self.layer1(x) x = self.layer2(x)  x = self.layer3(x)x = self.layer4(x)x = self.layer5(x)x = x.view(x.size(0), -1)output = self.fc(x)return outputdef train_Model(model,train_loader,optimizer,epoch):model.train()trained_samples = 0correct = 0for batch_idx, (data, target) in enumerate(train_loader):optimizer.zero_grad()output = model(data)loss_fn = nn.MSELoss(reduce=True, size_average=True)loss = loss_fn(output.float(), target.float()) loss.backward(loss.clone().detach())optimizer.step()trained_samples += len(data)print("\rTrain epoch %d: %d/%d, " %(epoch, trained_samples, len(train_loader.dataset),), end='')pred = output.argmax(dim=1, keepdim=True)real = target.argmax(dim=1, keepdim=True)correct += pred.eq(real.view_as(pred)).sum().item()train_acc = correct / len(train_loader.dataset)print("Train acc: " , train_acc)def test_Model(model,test_loader):model.eval()test_loss = 0correct = 0with torch.no_grad():for data, target in test_loader:output = model(data)  #logitsprint(output)loss_fn = nn.MSELoss(reduce=True, size_average=False)test_loss += loss_fn(output.float(), target.float()).item()  # sum up batch losspred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability# print((pred==4).sum())target = target.argmax(dim=1, keepdim=True)correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)print('\nTest: average loss: {:.4f}, accuracy: {}/{} ({:.2f}%)'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))return test_loss, correct / len(test_loader.dataset)def main():epochs = 300batch_size = 32torch.manual_seed(0)train_dataset,train_label,_,_,test_dataset,test_label = load_data(num = 100,length = 1024,hp = [0,1,2,3],fault_diameter = [0.007,0.014,0.021],split_rate = [0.7,0.1,0.2])train_dataset = torch.tensor(train_dataset)train_label = torch.tensor(train_label)test_dataset = torch.tensor(test_dataset)test_label = torch.tensor(test_label)train_dataset = train_dataset.unsqueeze(1)test_dataset  = test_dataset.unsqueeze(1)train_dataset = train_dataset.to(torch.float32)test_dataset  = test_dataset.to(torch.float32)train_id = TensorDataset(train_dataset, train_label) test_id  = TensorDataset(test_dataset, test_label)train_loader = DataLoader(dataset=train_id, batch_size=batch_size, shuffle=True)test_loader  = DataLoader(dataset=test_id,  batch_size=batch_size, shuffle=False)model = Net()optimizer = torch.optim.Adadelta(model.parameters())model_history = []for epoch in range(1, epochs + 1):train_Model(model, train_loader, optimizer, epoch)  loss, acc = test_Model(model, test_loader)model_history.append((loss, acc))# torch.save(model.state_dict(), "model.pt")return model, model_historymodel, model_history = main()

凯斯西储数据集(CWRU)十分类处理与训练代码(Pytorch)相关推荐

  1. 凯斯西储(CWRU)数据集解读并将数据集划分为10分类(含代码)

    凯斯西储大学轴承故障数据集官方网址:https://engineering.case.edu/bearingdatacenter/download-data-file 官方数据集整理版(不用挨个下了) ...

  2. 随机森林模型构建--在凯斯西储(CWRU)数据上的简单测试(1)

    随机森林模型构建测试–在凯斯西储(CWRU)数据上的简单测试(1) 文章目录 随机森林模型构建测试--在凯斯西储(CWRU)数据上的简单测试(1) 1. 实验数据集说明 2. 测试数据说明 3. 随机 ...

  3. 小波变换之(凯斯西储大学CWRU数据转为时频图像)-matlab

    凯西储大学(CWRU)数据集下载地址:CWUR完整版 1,挑选出所需数据,每种工况下的10类数据,也就是每个类别一个mat格式的文件 2,加载原始路径,预先定义好所需要的参数 采样频率12Khz ad ...

  4. 【凯斯西储大学数据集介绍(CWRU)】

    目录 实验平台 缩写含义 总结 实验平台 实验平台组成: [1]一个2马力的电动机(图左侧): [2]一个扭矩传感器/ 译码器(图中间连接处): [3]一个功率测试计(图右侧): [4]电子控制器(图 ...

  5. 凯斯西储轴承数据CWRU数据集制作十分类

    凯斯西储轴承数据CWRU数据集制作 问题描述 解决办法 问题描述 凯斯西储轴承数据CWRU数据集制作预处理代码. 解决办法 基于开源代码的改进. import os from scipy.io imp ...

  6. CWRU(凯斯西储大学轴承数据中心)数据集获取

    CWRU(凯斯西储大学轴承数据中心)数据集获取网址 https://engineering.case.edu/bearingdatacenter/apparatus-and-procedures 数据 ...

  7. 利用python整理凯斯西储大学(CWRU)轴承数据,制作数据集

    利用python整理凯斯西储大学(CWRU)轴承数据,制作数据集 1 前言 2 制作数据集 2.1 下载数据,初步处理 2.2 上代码 1 前言 大多数文献用CWRU数据制作数据集的思路是差不多的,这 ...

  8. Tensorflow2.0:CNN 解决凯斯西储大学轴承数据集的分类问题

    文章目录 项目介绍 代码实现 1.导入需要的库 2.参数设置 3.归一化 4.定义滑窗函数 5.取样本 6.划分训练集和测试集 7.得到 Dataset 类型数据集 8.建立模型 9.初始化优化器 1 ...

  9. 整理好的凯斯西储大学轴(CWRU)承数据集

    网上提供的基本都是matlab文件的凯斯西储轴承数据集,下载之后还需要整理,很麻烦,作为一个深受这种折磨的研究故障诊断的小白,为大家提供自己制作好的数据集,总共有16类故障. 下载链接,及每种标签对应 ...

最新文章

  1. Py之mglearn:python库之mglearn简介、安装、使用方法之详细攻略
  2. linux下nmap工具的使用
  3. hdu 4970 killing monster 代代相传刷qq 不用线段树啦~
  4. ES6学习笔记(二):教你玩转类的继承和类的对象
  5. 正则表达式入门(c#)
  6. JWT 教程_1 SpringBoot与JWT整合
  7. swift在线天气预报案例教程
  8. python抓取直播源 并更新_虎牙直播源Python爬虫
  9. 牛人在chinaren上的经典歌曲集
  10. android中的所有activity间动画跳转
  11. 我的世界超能物质纳米机器人_超能物质复制物品教程(附图) - [MO]超能物质 (Matter Overdrive) - MC百科|最大的Minecraft中文MOD百科...
  12. 对称加密与非对称加密,以及RSA的原理
  13. Oracle分区表详解,分区表创建,分区表按日期划分
  14. Shader实现高光反射
  15. 2022年的第一个夕阳
  16. concatenation 和 element-wise summation 该选哪个?
  17. php-fpm彻底解决502(php-fpm多开、nginx限制并发、定时重启)解决网站卡顿的终极奥义
  18. Hieroglyph3 框架分析2
  19. php实训表单验证的日记心得,php实训心得
  20. python怎么检验股票日收益率_Python量化笔记-股票收益率的正态分布检验和凯利公式应用...

热门文章

  1. 几率大的linux命令面试题(含答案)
  2. VS2012安装时出现找不到申请的对象?
  3. 献给做销售朋友的邀约话术,收藏吧,耐心精读3遍,会有新突破的!
  4. 科海融生正航,以信息化驱动管理升级,携手共迎数智未来
  5. 无线渗透_WPS攻击
  6. 搭建企业内部知识库,从知识管理做起
  7. selenium chrome 浏览器闪退
  8. hyspider之猫眼价格解密
  9. 视频号直播的常见问题QA | 第1期
  10. 抖音怎么用A/B测试驱动产品增长的