时间序列-N-CNN-LSTM
文章目录
- 数据格式
- 代码
数据格式
数据
代码
#!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: liujie
@software: PyCharm
@file: N_CNN_LSTM.py
@time: 2020/11/14 19:19
"""
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error,mean_absolute_error
from sklearn.preprocessing import MinMaxScalerimport tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,optimizers,metrics,losses
from keras.layers.recurrent import LSTM# 读取数据
def read_data(filepath):data = pd.read_excel(filepath,header=0)data = data.iloc[:,1:12] # 数据label = data.iloc[:,-1]print('The Data : \n',data)return data,label# 归一化
def min_max_scaler(data,label):label = np.array(label)data_scaler = MinMaxScaler()data = data_scaler.fit_transform(data)label_scaler = MinMaxScaler()label = label_scaler.fit_transform(label.reshape(-1, 1))return data,label# 转换为n_lstm的输入与输出
def n_lstm_data(data,data_length,delay_factor):x0 = []x1 = []x2 = []x3 = []x4 = []x5 = []x6 = []y = []for index in range(max(delay_factor),data_length):# 每一列数据的滞后参数不同x0.append(data[index - delay_factor[0] : index + 1,0:1])x1.append(data[index - delay_factor[1] : index + 1,1:2])x2.append(data[index - delay_factor[2] : index + 1,2:3])x3.append(data[index - delay_factor[3] : index + 1,3:4])x4.append(data[index - delay_factor[4] : index + 1,4:5])x5.append(data[index - delay_factor[5] : index + 1,5:6])x6.append(data[index-0 : index +1,6:-1])y.append(data[index,-1])x0 = np.array(x0)x1 = np.array(x1)x2 = np.array(x2)x3 = np.array(x3)x4 = np.array(x4)x5 = np.array(x5)x6 = np.array(x6)y = np.array(y)print('x0~x6.shape,y.shape\n',x0.shape,x1.shape,x2.shape,x3.shape,x4.shape,x5.shape,x6.shape,y.shape)return x0,x1,x2,x3,x4,x5,x6,y# 构造数据,并分割数据集-根据原始数据集构建符合神经网络的数据集
def make_dataset(data,test_num):feature_num = data.shape[1] - 1data_length = data.shape[0]# 转换成模型的输入x0, x1, x2, x3, x4, x5, x6, y = n_lstm_data(data,data_length,delay_factor)# 分割数据集x0_train,x0_test = x0[:-test_num,:],x0[-test_num:,:]x1_train,x1_test = x1[:-test_num,:],x1[-test_num:,:]x2_train,x2_test = x2[:-test_num,:],x2[-test_num:,:]x3_train,x3_test = x3[:-test_num,:],x3[-test_num:,:]x4_train,x4_test = x4[:-test_num,:],x4[-test_num:,:]x5_train,x5_test = x5[:-test_num,:],x5[-test_num:,:]x6_train,x6_test = x6[:-test_num,:],x6[-test_num:,:]y_train,y_test = y[:-test_num],y[-test_num:]print('数据集长度:',data_length)print('测试集长度:',test_num)return x0_train,x0_test,x1_train,x1_test,x2_train,x2_test,x3_train,x3_test,x4_train,x4_test,x5_train,x5_test,x6_train,x6_test,y_train,y_test# 建立N_CNN_LSTM模型
def n_cnn_lstm_model(delay_factor,filters,kernel_size,rnn_units,window,dropout):# 输入层inputs0 = tf.keras.Input(shape=(delay_factor[0]+1,1))inputs1 = tf.keras.Input(shape=(delay_factor[1]+1,1))inputs2 = tf.keras.Input(shape=(delay_factor[2]+1,1))inputs3 = tf.keras.Input(shape=(delay_factor[3]+1,1))inputs4 = tf.keras.Input(shape=(delay_factor[4]+1,1))inputs5 = tf.keras.Input(shape=(delay_factor[5]+1,1))inputs6 = tf.keras.Input(shape=(1,4))# CNNcnn0 = layers.Conv1D(filters=filters,kernel_size=kernel_size,activation='relu')(inputs0) # 卷积层cnn0 = layers.MaxPooling1D(pool_size=window)(cnn0) # 池化层cnn1 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs1) # 卷积层cnn1 = layers.MaxPooling1D(pool_size=window)(cnn1) # 池化层cnn2 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs2) # 卷积层cnn2 = layers.MaxPooling1D(pool_size=window)(cnn2) # 池化层cnn3 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs3) # 卷积层cnn3 = layers.MaxPooling1D(pool_size=window)(cnn3) # 池化层cnn4 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs4) # 卷积层cnn4 = layers.MaxPooling1D(pool_size=window)(cnn4) # 池化层cnn5 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs5) # 卷积层cnn5 = layers.MaxPooling1D(pool_size=window)(cnn5) # 池化层cnn6 = layers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu')(inputs6) # 卷积层cnn6= layers.MaxPooling1D(pool_size=window)(cnn6) # 池化层# LSTM层rnn0 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn0)rnn1 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn1)rnn2 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn2)rnn3 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn3)rnn4 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn4)rnn5 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn5)rnn6 = LSTM(rnn_units,activation='relu',return_sequences=False)(cnn6)# 将输入张量进行拼接rnn = layers.Concatenate(axis=1)([rnn0,rnn1,rnn2,rnn3,rnn4,rnn5,rnn6])# droupout层dense = layers.Dropout(dropout)(rnn)# 输出层outputs = layers.Dense(1,activation='relu')(dense)# 模型model = tf.keras.Model(inputs=[inputs0,inputs1,inputs2,inputs3,inputs4,inputs5,inputs6],outputs=outputs)return model# 反归一化
def inverse_data(label_,data):data = np.array(data).reshape(-1,1)label_ = np.array(label_).reshape(-1,1)data_scaler = MinMaxScaler()data_1 = data_scaler.fit(label_)data = data_scaler.inverse_transform(data)data = data[:,0]return data# 展示
def plotdata(y_test,y_test_pred):# rmsermse = np.sqrt(mean_squared_error(y_test,y_test_pred))print('rmse : %.4f' % rmse)fig = plt.figure(figsize=(10,5))fig.add_subplot()plt.plot(y_test,'r--',label = 'test')plt.plot(y_test_pred,'b-',label = 'predict')plt.legend(loc = 'upper right')plt.title('RMSE : %.4f'%rmse)plt.show()if __name__ == '__main__':warnings.filterwarnings(action='ignore')# 设置美化样式plt.style.use('ggplot')# 设置参数test_num = 200dropout = 0.1epoch = 50 # 迭代次数batch_size = 64 # 批处理数量validation_split = 0.1 # 验证集比例delay_factor = [0,11,4,1,0,12] # 滞后参数# 读取数据filepath = '../data/总的数据集.xlsx'data,label_ = read_data(filepath)# 归一化data,label = min_max_scaler(data,label_)# 构造数据集x0_train, x0_test, x1_train, x1_test, x2_train, x2_test, x3_train, x3_test, x4_train, x4_test, x5_train, x5_test, x6_train, x6_test, y_train, y_test = make_dataset(data,test_num)# parameterfilters = 16 # cnn filter个数kernel_size = 1 # cnn kernel个数rnn_units = 8 # rnn神经元个数window = 1 # 最大池化窗口的尺寸save_model = 'n_cnn_lstm_model.h5'# Modelmodel = n_cnn_lstm_model(delay_factor,filters,kernel_size,rnn_units,window,dropout)# 编译模型model.compile(optimizer=optimizers.Adam(learning_rate=1e-3),loss= tf.losses.MSE,metrics = ['accuracy'])# 展示模型结构model.summary()# 训练模型# patience含义是:可以接受多少个epoch内monitor没有改善,之后训练将停止early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',patience=10)# 保存最好的模型-保存模型或模型权重# save_best_only=True,如果save_best_only=True,根据监视指标得到的最新的最佳模型将不会被覆盖cp = keras.callbacks.ModelCheckpoint(filepath = save_model,monitor='val_loss',save_best_only=True)history = model.fit(x = [x0_train,x1_train,x2_train,x3_train,x4_train,x5_train,x6_train],y = y_train,batch_size=batch_size,epochs = epoch,verbose=1,callbacks=[early_stop,cp],validation_split = 0.1)# 迭代图像loss = history.history['loss']val_loss = history.history['val_loss']epochs_range = range(len(loss))plt.plot(epochs_range, loss, label = 'Train Loss')plt.plot(epochs_range, val_loss, label = 'Val Loss')plt.legend(loc = 'upper right')plt.title('Train and Val Loss')plt.show()model.load_weights(save_model) #加载最好的训练结果# 测试集预测结果y_test_predict = model.predict([x0_test,x1_test,x2_test,x3_test,x4_test,x5_test,x6_test])# 反归一化y_test = inverse_data(label_,y_test)y_test_predict = inverse_data(label_,y_test_predict)# 保存输出结果pd.DataFrame(y_test_predict).to_csv('y_test_predict_CNN.csv')# 画图显示plotdata(y_test,y_test_predict)
主要是数据格式与模型的构造
敬请期待!!!
时间序列-N-CNN-LSTM相关推荐
- 基于神经网络的黄金、外汇价格预测(RNN、LSTM、GRU、CNN+LSTM、BP)
价格预测研究分析 价格预测作为一种复杂的回归问题,可以使用神经网络模拟其复杂的函数表示来解决,对黄金和外汇而言,它们在未来某一时间节点的价格与历史数据存在着紧密的联系,具备极强的时间关联性,对这种与时 ...
- 深度学习时间序列预测:LSTM算法构建时间序列单变量模型预测大气压( air pressure)+代码实战
深度学习时间序列预测:LSTM算法构建时间序列单变量模型预测大气压( air pressure)+代码实战 长短期记忆(Long short-term memory, LSTM)是一种特殊的RNN,主 ...
- 深度学习时间序列预测:LSTM算法构建时间序列单变量模型预测空气质量(PM2.5)+代码实战
深度学习时间序列预测:LSTM算法构建时间序列单变量模型预测空气质量(PM2.5)+代码实战 # 导入需要的包和函数: from __future__ import print_function im ...
- 深度学习多变量时间序列预测:LSTM算法构建时间序列多变量模型预测交通流量+代码实战
深度学习多变量时间序列预测:LSTM算法构建时间序列多变量模型预测交通流量+代码实战 LSTM(Long Short Term Memory Network)长短时记忆网络,是一种改进之后的循环神经网 ...
- 对时间序列分类的LSTM全卷积网络的见解
https://www.toutiao.com/a6674883188617118220/ 对时间序列分类的LSTM全卷积网络的见解 题目: Insights into LSTM Fully Conv ...
- 吴良超 融合 cnn+lstm
吴良超 融合 cnn+lstm 链接 from keras.applications.vgg16 import VGG16 from keras.models import Sequential, M ...
- CNN+LSTM+CTC
需求:调研CNN+LSTM+CTC的实现 解决方案; 参考github实现 示例代码: #!/usr/bin/env python2 # -*- coding: utf-8 -*- "&qu ...
- lstm原文_对时间序列分类的LSTM全卷积网络的见解
对时间序列分类的LSTM全卷积网络的见解 题目: Insights into LSTM Fully Convolutional Networks for Time Series Classificat ...
- 【深度学习 项目实战】Keras深度学习多变量时间序列预测的LSTM模型
无意中发现了一个巨牛的人工智能教程,忍不住分享一下给大家.教程不仅是零基础,通俗易懂,而且非常风趣幽默,像看小说一样!觉得太牛了,所以分享给大家.点这里可以跳转到教程.人工智能教程 本篇文章将介绍基于 ...
- 【视频课】行为识别课程更新!CNN+LSTM理论与实践!
前言 欢迎大家关注有三AI的视频课程系列,我们的视频课程系列共分为5层境界,内容和学习路线图如下: 第1层:掌握学习算法必要的预备知识,包括Python编程,深度学习基础,数据使用,框架使用. 第2层 ...
最新文章
- 你可能不知道的Shell
- 如何实现示波器探头的最佳匹配?
- 中if判断中文_当Excel表格中的条件判断超过8个,用IF函数不容易实现怎么办?...
- 寒假与春节终归,新学期和新任务又至
- 依赖注入原理 php,PHP依赖注入原理与用法分析
- Java并发编程实战————对象的组合
- Java调用Lua(转)
- ORACLE进制转换函数
- DOS命令大全(经典收藏)【运行CMD后的命令】
- 盘点一下 在Python中安装包的三种方法
- 机器人动力学方程——拉格朗日法
- 编辑为什么建议转投_编辑建议转投更合适的期刊_建议改投其他期刊是什么意思_改革期刊投稿要求...
- 闲谈寻址-DNS的多级缓存系统
- 站长网专访:南昌网事:那一场关于互联网的风花雪月
- SRE学堂:OSS监控告警案例分析
- 【毕设】基于HTML的零食商城网站大前端开发设计(含文档)
- python-yacs库的用法
- 苹果自带跳语音服务器,苹果手机怎么把语音包悬浮 悬浮功能打开方法
- 给定k个排好序的序列,设计一个算法确定2路合并次序,使所需的总比较次数最少。Java代码
- 低端与高端直流电流采样电流补充说明