1.数据抽取

2.特征工程

3.算法对比

4.xgboost

import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 100)from itertools import product
from sklearn.preprocessing import LabelEncoderimport seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inlinefrom xgboost import XGBRegressor
from xgboost import plot_importancedef plot_features(booster, figsize):    fig, ax = plt.subplots(1,1,figsize=figsize)return plot_importance(booster=booster, ax=ax)import time
import sys
import gc
import pickle
sys.version_infoitems = pd.read_csv('D:/anaconda_coding/xgboost/items.csv')
shops = pd.read_csv('D:/anaconda_coding/xgboost/shops.csv')
cats = pd.read_csv('D:/anaconda_coding/xgboost/item_categories.csv')
train = pd.read_csv('D:/anaconda_coding/xgboost/sales_train_v2.csv')
# set index to ID to avoid droping it later
test  = pd.read_csv('D:/anaconda_coding/xgboost/test.csv').set_index('ID')
test.head()plt.figure(figsize=(10,4))
plt.xlim(-100, 3000)
sns.boxplot(x=train.item_cnt_day)plt.figure(figsize=(10,4))
plt.xlim(train.item_price.min(), train.item_price.max()*1.1)
sns.boxplot(x=train.item_price)train = train[train.item_price<100000]##筛选数据挑出价格小于100000,销售产品数量小于1000的训练数据
train = train[train.item_cnt_day<1001]
train.head()##用中值去填充价格小于0的那一栏数据
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median
train['item_price'].value_counts().min()#  56
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
#
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops['shop_name'].str.split(' ').map(lambda x: x[0])
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops = shops[['shop_id','city_code']]cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
# if subtype is nan then type
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]items.drop(['item_name'], axis=1, inplace=True)
shops.head()len(list(set(test.item_id) - set(test.item_id).intersection(set(train.item_id)))), len(list(set(test.item_id))), len(test)ts = time.time()
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):sales = train[train.date_block_num==i]matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
time.time() - tstrain['revenue'] = train['item_price'] *  train['item_cnt_day']ts = time.time()
group = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20) # NB clip target here.astype(np.float16))
time.time() - tstest['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
test.head()ts = time.time()
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True) # 34 month
print(matrix.head())
time.time() - tsts = time.time()
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
time.time() - tsdef lag_feature(df, lags, col):tmp = df[['date_block_num','shop_id','item_id',col]]for i in lags:shifted = tmp.copy()shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]shifted['date_block_num'] += idf = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')return dfts = time.time()
matrix = lag_feature(matrix, [1,2,3,6,12], 'item_cnt_month')
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num'], how='left')
matrix['date_avg_item_cnt'] = matrix['date_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_avg_item_cnt')
matrix.drop(['date_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3,6,12], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_shop_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num','shop_id'], how='left')
matrix['date_shop_avg_item_cnt'] = matrix['date_shop_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3,6,12], 'date_shop_avg_item_cnt')
matrix.drop(['date_shop_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'item_category_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_cat_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num','item_category_id'], how='left')
matrix['date_cat_avg_item_cnt'] = matrix['date_cat_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_cat_avg_item_cnt')
matrix.drop(['date_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'item_category_id']).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_cat_avg_item_cnt']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'item_category_id'], how='left')
matrix['date_shop_cat_avg_item_cnt'] = matrix['date_shop_cat_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_cat_avg_item_cnt')
matrix.drop(['date_shop_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'type_code']).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_type_avg_item_cnt']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'type_code'], how='left')
matrix['date_shop_type_avg_item_cnt'] = matrix['date_shop_type_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_type_avg_item_cnt')
matrix.drop(['date_shop_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'subtype_code']).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_subtype_avg_item_cnt']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'subtype_code'], how='left')
matrix['date_shop_subtype_avg_item_cnt'] = matrix['date_shop_subtype_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_subtype_avg_item_cnt')
matrix.drop(['date_shop_subtype_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'city_code']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_city_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'city_code'], how='left')
matrix['date_city_avg_item_cnt'] = matrix['date_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_city_avg_item_cnt')
matrix.drop(['date_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'item_id', 'city_code']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_city_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'item_id', 'city_code'], how='left')
matrix['date_item_city_avg_item_cnt'] = matrix['date_item_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_item_city_avg_item_cnt')
matrix.drop(['date_item_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'type_code']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_type_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'type_code'], how='left')
matrix['date_type_avg_item_cnt'] = matrix['date_type_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_type_avg_item_cnt')
matrix.drop(['date_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = matrix.groupby(['date_block_num', 'subtype_code']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_subtype_avg_item_cnt' ]
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num', 'subtype_code'], how='left')
matrix['date_subtype_avg_item_cnt'] = matrix['date_subtype_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_subtype_avg_item_cnt')
matrix.drop(['date_subtype_avg_item_cnt'], axis=1, inplace=True)
time.time() - tsts = time.time()
group = train.groupby(['item_id']).agg({'item_price': ['mean']})
group.columns = ['item_avg_item_price']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['item_id'], how='left')
matrix['item_avg_item_price'] = matrix['item_avg_item_price'].astype(np.float16)group = train.groupby(['date_block_num','item_id']).agg({'item_price': ['mean']})
group.columns = ['date_item_avg_item_price']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_price'] = matrix['date_item_avg_item_price'].astype(np.float16)lags = [1,2,3,4,5,6]
matrix = lag_feature(matrix, lags, 'date_item_avg_item_price')for i in lags:matrix['delta_price_lag_'+str(i)] = \(matrix['date_item_avg_item_price_lag_'+str(i)] - matrix['item_avg_item_price']) / matrix['item_avg_item_price']def select_trend(row):for i in lags:if row['delta_price_lag_'+str(i)]:return row['delta_price_lag_'+str(i)]return 0matrix['delta_price_lag'] = matrix.apply(select_trend, axis=1)
matrix['delta_price_lag'] = matrix['delta_price_lag'].astype(np.float16)
matrix['delta_price_lag'].fillna(0, inplace=True)# matrix['price_trend'] = matrix[['delta_price_lag_1','delta_price_lag_2','delta_price_lag_3']].bfill(axis=1).iloc[:, 0]
# Invalid dtype for backfill_2d [float16]fetures_to_drop = ['item_avg_item_price', 'date_item_avg_item_price']
for i in lags:fetures_to_drop += ['date_item_avg_item_price_lag_'+str(i)]fetures_to_drop += ['delta_price_lag_'+str(i)]matrix.drop(fetures_to_drop, axis=1, inplace=True)time.time() - tsts = time.time()
group = train.groupby(['date_block_num','shop_id']).agg({'revenue': ['sum']})
group.columns = ['date_shop_revenue']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['date_block_num','shop_id'], how='left')
matrix['date_shop_revenue'] = matrix['date_shop_revenue'].astype(np.float32)group = group.groupby(['shop_id']).agg({'date_shop_revenue': ['mean']})
group.columns = ['shop_avg_revenue']
group.reset_index(inplace=True)matrix = pd.merge(matrix, group, on=['shop_id'], how='left')
matrix['shop_avg_revenue'] = matrix['shop_avg_revenue'].astype(np.float32)matrix['delta_revenue'] = (matrix['date_shop_revenue'] - matrix['shop_avg_revenue']) / matrix['shop_avg_revenue']
matrix['delta_revenue'] = matrix['delta_revenue'].astype(np.float16)matrix = lag_feature(matrix, [1], 'delta_revenue')matrix.drop(['date_shop_revenue','shop_avg_revenue','delta_revenue'], axis=1, inplace=True)
time.time() - tsmatrix['month'] = matrix['date_block_num'] % 12
matrix.head()days = pd.Series([31,28,31,30,31,30,31,31,30,31,30,31])
matrix['days'] = matrix['month'].map(days).astype(np.int8)ts = time.time()
cache = {}
matrix['item_shop_last_sale'] = -1
matrix['item_shop_last_sale'] = matrix['item_shop_last_sale'].astype(np.int8)
for idx, row in matrix.iterrows():    key = str(row.item_id)+' '+str(row.shop_id)if key not in cache:if row.item_cnt_month!=0:cache[key] = row.date_block_numelse:last_date_block_num = cache[key]matrix.at[idx, 'item_shop_last_sale'] = row.date_block_num - last_date_block_numcache[key] = row.date_block_num
time.time() - tsts = time.time()
cache = {}
matrix['item_last_sale'] = -1
matrix['item_last_sale'] = matrix['item_last_sale'].astype(np.int8)
for idx, row in matrix.iterrows():    key = row.item_idif key not in cache:if row.item_cnt_month!=0:cache[key] = row.date_block_numelse:last_date_block_num = cache[key]if row.date_block_num>last_date_block_num:matrix.at[idx, 'item_last_sale'] = row.date_block_num - last_date_block_numcache[key] = row.date_block_num
time.time() - tsts = time.time()
matrix['item_shop_first_sale'] = matrix['date_block_num'] - matrix.groupby(['item_id','shop_id'])['date_block_num'].transform('min')
matrix['item_first_sale'] = matrix['date_block_num'] - matrix.groupby('item_id')['date_block_num'].transform('min')
time.time() - tsts = time.time()
matrix = matrix[matrix.date_block_num > 11]
time.time() - tsts = time.time()
def fill_na(df):for col in df.columns:if ('_lag_' in col) & (df[col].isnull().any()):if ('item_cnt' in col):df[col].fillna(0, inplace=True)         return dfmatrix = fill_na(matrix)
time.time() - tsmatrix.to_pickle('data.pkl')
del matrix
del cache
del group
del items
del shops
del cats
del train
# leave test for submission
gc.collect();

5.数据大小优化、xgboost跑模型

data = pd.read_pickle('data.pkl')
data = data[['date_block_num','shop_id','item_id','item_cnt_month','city_code','item_category_id','type_code','subtype_code','item_cnt_month_lag_1','item_cnt_month_lag_2','item_cnt_month_lag_3','item_cnt_month_lag_6','item_cnt_month_lag_12','date_avg_item_cnt_lag_1','date_item_avg_item_cnt_lag_1','date_item_avg_item_cnt_lag_2','date_item_avg_item_cnt_lag_3','date_item_avg_item_cnt_lag_6','date_item_avg_item_cnt_lag_12','date_shop_avg_item_cnt_lag_1','date_shop_avg_item_cnt_lag_2','date_shop_avg_item_cnt_lag_3','date_shop_avg_item_cnt_lag_6','date_shop_avg_item_cnt_lag_12','date_cat_avg_item_cnt_lag_1','date_shop_cat_avg_item_cnt_lag_1',#'date_shop_type_avg_item_cnt_lag_1',#'date_shop_subtype_avg_item_cnt_lag_1','date_city_avg_item_cnt_lag_1','date_item_city_avg_item_cnt_lag_1',#'date_type_avg_item_cnt_lag_1',#'date_subtype_avg_item_cnt_lag_1','delta_price_lag','month','days','item_shop_last_sale','item_last_sale','item_shop_first_sale','item_first_sale',
]]X_train = data[data.date_block_num < 33].drop(['item_cnt_month'], axis=1)
Y_train = data[data.date_block_num < 33]['item_cnt_month']
X_valid = data[data.date_block_num == 33].drop(['item_cnt_month'], axis=1)
Y_valid = data[data.date_block_num == 33]['item_cnt_month']
X_test = data[data.date_block_num == 34].drop(['item_cnt_month'], axis=1)del data
gc.collect();ts = time.time()model = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300, colsample_bytree=0.8, subsample=0.8, eta=0.3,    seed=42)model.fit(X_train, Y_train, eval_metric="rmse", eval_set=[(X_train, Y_train), (X_valid, Y_valid)], verbose=True, early_stopping_rounds = 10)time.time() - ts

Y_pred = model.predict(X_valid).clip(0, 20)
Y_test = model.predict(X_test).clip(0, 20)submission = pd.DataFrame({"ID": test.index, "item_cnt_month": Y_test
})
submission.to_csv('xgb_submission.csv', index=False)# save predictions for an ensemble
pickle.dump(Y_pred, open('xgb_train.pickle', 'wb'))
pickle.dump(Y_test, open('xgb_test.pickle', 'wb'))plot_features(model, (10,14))

Kaggle竞赛销售预测(xgboost)相关推荐

  1. ML之xgboost:基于xgboost(5f-CrVa)算法对HiggsBoson数据集(Kaggle竞赛)训练实现二分类预测(基于训练好的模型进行新数据预测)

    ML之xgboost:基于xgboost(5f-CrVa)算法对HiggsBoson数据集(Kaggle竞赛)训练实现二分类预测(基于训练好的模型进行新数据预测) 目录 输出结果 设计思路 核心代码 ...

  2. ML之xgboost:基于xgboost(5f-CrVa)算法对HiggsBoson数据集(Kaggle竞赛)训练(模型保存+可视化)实现二分类预测

    ML之xgboost:基于xgboost(5f-CrVa)算法对HiggsBoson数据集(Kaggle竞赛)训练(模型保存+可视化)实现二分类预测 目录 数据集简介 输出结果 设计思路 核心代码 数 ...

  3. 基于集成学习方法Random Forest、Adaboost、GBDT、LightGBM、XGBoost的调参、建模、评估实现kaggle竞赛员工离职案例分析(2)

    基于集成学习方法Random Forest.Adaboost.GBDT.LightGBM.XGBoost的调参.建模.评估实现kaggle竞赛员工离职案例分析(2) 引言 3. adaboost模型分 ...

  4. ML之xgboostGBM:基于xgboostGBM算法对HiggsBoson数据集(Kaggle竞赛)训练(两模型性能PK)实现二分类预测

    ML之xgboost&GBM:基于xgboost&GBM算法对HiggsBoson数据集(Kaggle竞赛)训练(两模型性能PK)实现二分类预测 目录 输出结果 设计思路 核心代码 输 ...

  5. Dataset之HiggsBoson:Higgs Boson(Kaggle竞赛)数据集的简介、下载、案例应用之详细攻略

    Dataset之HiggsBoson:Higgs Boson(Kaggle竞赛)数据集的简介.下载.案例应用之详细攻略 目录 Higgs Boson比赛简介 Higgs Boson数据集的下载 Hig ...

  6. kaggle竞赛--房价预测详细解读

    ## Kaggle竞赛 -- 房价预测 (House Prices) #### 完整代码见[kaggle kernel](https://www.kaggle.com/massquantity/all ...

  7. 业界 | 如何达到Kaggle竞赛top 2%?这里有一篇特征探索经验帖

    本文作者 Abhay Pawar 多次参加 Kaggle 竞赛,并在 Instacart Market Basket Analysis 竞赛中拿到 top 2% 的名次.他在多年竞赛中总结出了一套特征 ...

  8. 预测移动用户人口属性的Kaggle竞赛作品解析

    大家晚上好,我是TalkingData数据科学部的路瑶,很荣幸能和在座这么多位朋友聚在一起,今年的7月-9月,来自全球70多个国家和地区的约2000名选手,在全球影响力最大的算法竞赛平台上竞技,今天我 ...

  9. kaggle竞赛:泰坦尼克幸存者预测

    kaggle竞赛:泰坦尼克幸存者预测--(一) import pandas as pd import numpy as np import matplotlib.pyplot as plt impor ...

  10. 不写一行代码,也能玩转Kaggle竞赛?

    整理 | Jane 出品 | AI科技大本营(ID:rgznai100) [导读]AI科技大本营会给大家分享一些 Kaggle 上的资源,如 Kaggle 开放的数据集,也会分享一些好的竞赛方案或有意 ...

最新文章

  1. 两边放动物对战守城的游戏_集合啦!动物森友会定制周边;百战天虫 大混战发布...
  2. 对2014年,关于轻应用的五大预言
  3. 支持自定义的离线语音模块WT516P6Core 串口协议使用说明
  4. STL之map中排序方式的重载
  5. c#中消息机制阻塞实例与结论
  6. C#和其它C语言的区别
  7. python处理表格数据教程_python利用Excel读取和存储测试数据完成接口自动化教程...
  8. 商务宽屏视频剪辑企业网站模板
  9. 无线网络MIMO技术浅谈
  10. 局域网限速软件_8款Windows实用软件推荐,满满的干货,总有一款是你必备的
  11. 二分查找的平均查找长度详解【转】
  12. 苏宁回应“股权质押”:正常商业合作
  13. python如何搭建环境_Python基础环境如何搭建
  14. shell之旅--将目录下的文件重命名为md5码+后缀名
  15. 云计算学习(5-1)云平台产品介绍-华为的FusionCloud产品
  16. Linux 中的FHS文件系统风格标准---File Hierarchy Standard
  17. jquery获取元素索引
  18. Log_Analysis_using_OSSEC.md
  19. 拓端tecdat|matlab递归神经网络RNN实现:桨距控制控制风力发电机组研究
  20. Excel VBA(Visual Basic)编程入门

热门文章

  1. 把谷歌主题背景图片保存下来
  2. flask框架可以做什么?
  3. easyui datagrid checkbox使用
  4. 墨修尧哪一章痊愈_墨修尧叶璃
  5. 500个爆文标题_爆文标题创作思路——来自100个10W+的标题的总结
  6. 年薪30~60万,机器学习算法工程师必备能力项
  7. Android音视频开发入门(一)
  8. 几个函数泰勒展开式及其记忆技巧
  9. 如何利用Photoshop制作电子签名
  10. Springboot2.2对put,detele方法的更改