首先查看数据

#coding:utf-8
"""
Created on Wen Jan 9 2019@author: fzh
"""
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import seaborn as sns
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score,cross_val_predict,KFold
from sklearn.metrics import make_scorer,mean_squared_error
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.svm import LinearSVR, SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor,AdaBoostRegressor
from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures,MinMaxScaler,StandardScaler
from sklearn.metrics import make_scorer
from scipy import stats
import os#load_dataset
with open("data/zhengqi_train.txt")  as fr:data_train=pd.read_table(fr,sep="\t")print('data_train.shape=',data_train.shape)with open("data/zhengqi_test.txt") as fr_test:data_test=pd.read_table(fr_test,sep="\t")print('data_test.shape=',data_test.shape)
#merge train_set and test_set  add origin
data_train["oringin"]="train"
data_test["oringin"]="test"
data_all=pd.concat([data_train,data_test],axis=0,ignore_index=True)
#View data
print('data_all.shape=',data_all.shape)
# Explore feature distibution
fig = plt.figure(figsize=(6, 6))
for column in data_all.columns[0:-2]:g = sns.kdeplot(data_all[column][(data_all["oringin"] == "train")], color="Red", shade = True)g = sns.kdeplot(data_all[column][(data_all["oringin"] == "test")], color="Blue", shade= True)g.set(xlabel=column,ylabel='Frequency')g = g.legend(["train","test"])plt.show()

可看出,train有2888行数据,test有1925行数据,然后查看train和test的每列数据的分布,横坐标是特征名,从v0到37,下面选择了v4和v5,可看出v4的train和test分布接近,而v5相差较大,类似较大的还有"V9","V11","V17","V22","V28",故可以删掉这些列特征。

fig = plt.figure(figsize=(10, 10))
for i in range(len(data_all.columns)-2):#初始网格g = sns.FacetGrid(data_all, col='oringin')#利用map方法可视化直方图g = g.map(sns.distplot, data_all.columns[i])plt.savefig('distplot'+str(i)+'.jpg')

这里选取了v5

删掉"V5","V9","V11","V17","V22","V28"这些列特征。

"""删除特征"V5","V9","V11","V17","V22","V28",训练集和测试集分布不一致"""
data_all.drop(["V5","V9","V11","V17","V22","V28"],axis=1,inplace=True)
print('drop after data_all.shape=',data_all.shape)

"""删除特征"V5","V9","V11","V17","V22","V28",训练集和测试集分布不一致"""
data_all.drop(["V5","V9","V11","V17","V22","V28"],axis=1,inplace=True)
print('drop after data_all.shape=',data_all.shape)# figure parameters
data_train= data_all[data_all["oringin"] == "train"].drop("oringin", axis=1)
print('drop after data_train.shape=',data_train.shape)"""找出相关程度"""
plt.figure(figsize=(20, 16))  # 指定绘图对象宽度和高度
colnm = data_train.columns.tolist()  # 列表头
mcorr = data_train.corr(method="spearman")  # 相关系数矩阵,即给出了任意两个变量之间的相关系数
print('mcorr.shape=',mcorr.shape)
mask = np.zeros_like(mcorr, dtype=np.bool)  # 构造与mcorr同维数矩阵 为bool型
#画上三角相关系数矩阵
mask[np.triu_indices_from(mask)] = True  # 上三角为1
g = sns.heatmap(mcorr, mask=mask, cmap=plt.cm.jet, square=True, annot=True, fmt='0.2f')  # 热力图(看两两相似度)
plt.savefig('mcorr.jpg')

丢掉与target相关系数小于0.1的特征

# Threshold for removing correlated variables
threshold = 0.1
# Absolute value correlation matrix
corr_matrix = data_train.corr().abs()
drop_col=corr_matrix[corr_matrix["target"]<threshold].index
print('drop_col=',drop_col)
data_all.drop(drop_col,axis=1,inplace=True)
print('data_all.shape=',data_all.shape)

对每一列特征进行最小最大归一化

"""归一化"""
cols_numeric=list(data_all.columns)
cols_numeric.remove("oringin")
def scale_minmax(col):return (col-col.min())/(col.max()-col.min())
scale_cols = [col for col in cols_numeric if col!='target']
print('scale_cols=',scale_cols)
data_all[scale_cols] = data_all[scale_cols].apply(scale_minmax,axis=0)
print('data_all[scale_cols].shape=',data_all[scale_cols].shape)

划分train,valid,和test

"""function to get training samples"""
def get_training_data():df_train = data_all[data_all["oringin"]=="train"]print('df_train.shape=',df_train.shape)y = df_train.targetX = df_train.drop(["oringin","target"],axis=1)X_train,X_valid,y_train,y_valid=train_test_split(X,y,test_size=0.3,random_state=100)return X_train,X_valid,y_train,y_valid"""extract test data (without SalePrice)"""
def get_test_data():df_test = data_all[data_all["oringin"]=="test"].reset_index(drop=True)return df_test.drop(["oringin","target"],axis=1)"""metric for evaluation"""
def rmse(y_true, y_pred):diff = y_pred - y_truesum_sq = sum(diff ** 2)n = len(y_pred)return np.sqrt(sum_sq / n)def mse(y_ture, y_pred):return mean_squared_error(y_ture, y_pred)

利用Ridge回归去掉边界点,边界点满足误差的正态分布方差大于3

"""function to detect outliers based on the predictions of a model"""
def find_outliers(model, X, y, sigma=3):# predict y values using modeltry:y_pred = pd.Series(model.predict(X), index=y.index)# if predicting fails, try fitting the model firstexcept:model.fit(X, y)y_pred = pd.Series(model.predict(X), index=y.index)# calculate residuals between the model prediction and true y valuesresid = y - y_predmean_resid = resid.mean()std_resid = resid.std()# calculate z statistic, define outliers to be where |z|>sigmaz = (resid - mean_resid) / std_resid#找出方差大于3的数据的索引,然后丢掉outliers = z[abs(z) > sigma].index# print and plot the resultsprint('score=', model.score(X, y))print('rmse=', rmse(y, y_pred))print("mse=", mean_squared_error(y, y_pred))print('---------------------------------------')print('mean of residuals:', mean_resid)print('std of residuals:', std_resid)print('---------------------------------------')print(len(outliers), 'outliers:')print(outliers.tolist())plt.figure(figsize=(15, 5))plt.subplot(1, 3, 1)plt.plot(y, y_pred, '.')plt.plot(y.loc[outliers], y_pred.loc[outliers], 'ro')plt.legend(['Accepted', 'Outlier'])plt.xlabel('y')plt.ylabel('y_pred')plt.subplot(1, 3, 2)plt.plot(y, y - y_pred, '.')plt.plot(y.loc[outliers], y.loc[outliers] - y_pred.loc[outliers], 'ro')plt.legend(['Accepted', 'Outlier'])plt.xlabel('y')plt.ylabel('y - y_pred')plt.subplot(1, 3, 3)plt.hist(z,bins=50)plt.hist(z.loc[outliers],color='r', bins=50)plt.legend(['Accepted', 'Outlier'])plt.xlabel('normal res error')plt.ylabel('frequency')plt.savefig('outliers.png')return outliers# get training data
from sklearn.linear_model import Ridge
X_train, X_valid,y_train,y_valid = get_training_data()
# find and remove outliers using a Ridge model
outliers = find_outliers(Ridge(), X_train, y_train)

将移除的点进行训练

""" permanently remove these outliers from the data"""
X_t=X_train.drop(outliers)
y_t=y_train.drop(outliers)
#
def get_trainning_data_omitoutliers():y1=y_t.copy()X1=X_t.copy()return X1,y1from sklearn.preprocessing import StandardScalerdef train_model(model, param_grid,splits=5, repeats=5):X, y = get_trainning_data_omitoutliers()poly_trans=PolynomialFeatures(degree=2)X=poly_trans.fit_transform(X)X=MinMaxScaler().fit_transform(X)# create cross-validation methodrkfold = RepeatedKFold(n_splits=splits, n_repeats=repeats)# perform a grid search if param_grid givenif len(param_grid) > 0:# setup grid search parametersgsearch = GridSearchCV(model, param_grid, cv=rkfold,scoring="neg_mean_squared_error",verbose=1, return_train_score=True)# search the gridgsearch.fit(X, y)# extract best model from the gridmodel = gsearch.best_estimator_best_idx = gsearch.best_index_# get cv-scores for best modelgrid_results = pd.DataFrame(gsearch.cv_results_)cv_mean = abs(grid_results.loc[best_idx, 'mean_test_score'])cv_std = grid_results.loc[best_idx, 'std_test_score']# no grid search, just cross-val score for given modelelse:grid_results = []cv_results = cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=rkfold)cv_mean = abs(np.mean(cv_results))cv_std = np.std(cv_results)# combine mean and std cv-score in to a pandas seriescv_score = pd.Series({'mean': cv_mean, 'std': cv_std})# predict y using the fitted modely_pred = model.predict(X)# print stats on model performanceprint('----------------------')print(model)print('----------------------')print('score=', model.score(X, y))print('rmse=', rmse(y, y_pred))print('mse=', mse(y, y_pred))print('cross_val: mean=', cv_mean, ', std=', cv_std)return model, cv_score, grid_results
#
# places to store optimal models and scores
opt_models = dict()
score_models = pd.DataFrame(columns=['mean','std'])# no. k-fold splits
splits=5
# no. k-fold iterations
repeats=5print('=========Ridge model========================')
model = 'Ridge'
opt_models[model] = Ridge()
alph_range = np.arange(0.25,6,0.25)
param_grid = {'alpha': alph_range}
opt_models[model],cv_score,grid_results = train_model(opt_models[model], param_grid=param_grid,splits=splits, repeats=repeats)cv_score.name = model
score_models = score_models.append(cv_score)plt.figure()
plt.errorbar(alph_range, abs(grid_results['mean_test_score']),abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('alpha')
plt.ylabel('score')
plt.show()
#print('===========RandomForest model================')
#model = 'RandomForest'
#opt_models[model] = RandomForestRegressor()
#param_grid = {'n_estimators':[100,150,200],'max_features':[8,12,16,20,24],'min_samples_split':[2,4,6]}#opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,splits=5, repeats=1)
#cv_score.name = model
#score_models = score_models.append(cv_score)
#print('score_models=',score_models)
#import pickle
#with open("prediction.pkl", "wb") as f:
#    pickle.dump(opt_models[model], f)

明显score高了。

后面用随机森林训练了一版并进行了保存。

完整train.py

#coding:utf-8
"""
Created on Wen Jan 9 2019@author: fzh
"""
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import seaborn as sns
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score,cross_val_predict,KFold
from sklearn.metrics import make_scorer,mean_squared_error
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.svm import LinearSVR, SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor,AdaBoostRegressor
# from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures,MinMaxScaler,StandardScaler
from sklearn.metrics import make_scorer
import os
from scipy import stats
from sklearn.preprocessing import StandardScaler
from beautifultable import BeautifulTable
import pickle
from sklearn.ensemble import ExtraTreesRegressor,AdaBoostRegressor
from sklearn.ensemble import  AdaBoostClassifierdef del_feature(data_train,data_test):data_train["oringin"]="train"data_test["oringin"]="test"data_all=pd.concat([data_train,data_test],axis=0,ignore_index=True)"""删除特征"V5","V9","V11","V17","V22","V28",训练集和测试集分布不一致"""data_all.drop(["V5","V9","V11","V17","V22","V28"],axis=1,inplace=True)# print('drop after data_all.shape=',data_all.shape)# figure parametersdata_train= data_all[data_all["oringin"] == "train"].drop("oringin", axis=1)# print('drop after data_train.shape=',data_train.shape)"""'V14', u'V21', u'V25', u'V26', u'V32', u'V33', u'V34'"""# Threshold for removing correlated variablesthreshold = 0.1# Absolute value correlation matrixcorr_matrix = data_train.corr().abs()drop_col=corr_matrix[corr_matrix["target"]<threshold].index# print('drop_col=',drop_col)data_all.drop(drop_col,axis=1,inplace=True)# print('data_all.shape=',data_all.shape)return data_all
"""function to get training samples"""
def get_training_data(data_all):df_train = data_all[data_all["oringin"]=="train"]y = df_train.targetX = df_train.drop(["oringin","target"],axis=1)X_train,X_valid,y_train,y_valid=train_test_split(X,y,test_size=0.3,random_state=100)return X_train,X_valid,y_train,y_valid"""extract test data (without SalePrice)"""
def get_test_data(data_all):df_test = data_all[data_all["oringin"]=="test"].reset_index(drop=True)return df_test.drop(["oringin","target"],axis=1)
#
"""metric for evaluation"""
def rmse(y_true, y_pred):diff = y_pred - y_truesum_sq = sum(diff ** 2)n = len(y_pred)return np.sqrt(sum_sq / n)def mse(y_ture, y_pred):return mean_squared_error(y_ture, y_pred)"""function to detect outliers based on the predictions of a model"""
def find_outliers(model, X, y, sigma=3):# predict y values using modeltry:y_pred = pd.Series(model.predict(X), index=y.index)# if predicting fails, try fitting the model firstexcept:model.fit(X, y)y_pred = pd.Series(model.predict(X), index=y.index)# calculate residuals between the model prediction and true y valuesresid = y - y_predmean_resid = resid.mean()std_resid = resid.std()# calculate z statistic, define outliers to be where |z|>sigmaz = (resid - mean_resid) / std_resid#找出方差大于3的数据的索引,然后丢掉outliers = z[abs(z) > sigma].index# print and plot the resultsprint('score=', model.score(X, y))print('rmse=', rmse(y, y_pred))print("mse=", mean_squared_error(y, y_pred))print('---------------------------------------')print('mean of residuals:', mean_resid)print('std of residuals:', std_resid)print('---------------------------------------')return outliersdef get_trainning_data_omitoutliers(X_t,y_t):y1=y_t.copy()X1=X_t.copy()return X1,y1def scale_minmax(col):return (col - col.min()) / (col.max() - col.min())def normal(data_all):"""归一化"""cols_numeric = list(data_all.columns)cols_numeric.remove("oringin")scale_cols = [col for col in cols_numeric if col != 'target']print('scale_cols=', scale_cols)data_all[scale_cols] = data_all[scale_cols].apply(scale_minmax, axis=0)return data_allif __name__ == '__main__':with open("data/zhengqi_train.txt")  as fr:data_train = pd.read_table(fr, sep="\t")with open("data/zhengqi_test.txt") as fr_test:data_test = pd.read_table(fr_test, sep="\t")data_all=del_feature(data_train,data_test)print('clear data_all.shape',data_all.shape)data_all=normal(data_all)X_train, X_valid, y_train, y_valid = get_training_data(data_all)print('X_train.shape=', X_train.shape)print('X_valid.shape=', X_valid.shape)X_test=get_test_data(data_all)print('X_test.shape',X_test.shape)# find and remove outliers using a Ridge modeloutliers = find_outliers(Ridge(), X_train, y_train)""" permanently remove these outliers from the data"""X_train,y_train=get_trainning_data_omitoutliers(X_train.drop(outliers),y_train.drop(outliers))X1=pd.concat([X_train,y_train],axis=1)X2=pd.concat([X_valid,y_valid],axis=1)X_all=pd.concat([X1,X2],axis=0)print(X_all)y = X_all['target']X = X_all.drop(["target"], axis=1)print(X.shape)X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.3, random_state=100)poly_trans = PolynomialFeatures(degree=2)X_train = poly_trans.fit_transform(X_train)print(X_train.shape)X_valid = poly_trans.fit_transform(X_valid)print(X_valid.shape)print('==============forest_model========================')forest_model = RandomForestRegressor(n_estimators=500,criterion='mse',max_depth=20,min_samples_leaf=3,max_features=0.4,random_state=1,bootstrap=False,n_jobs=-1)forest_model.fit(X_train,y_train)importance =forest_model.feature_importances_table = BeautifulTable()# table.column_headers = ["feature", "importance"]print('RF feature importance:')# print(data_all)for i, cols in enumerate(X_all.iloc[:, :-1]):table.append_row([cols, round(importance[i], 3)])print(table)y_pred = forest_model.predict(X_valid)y_valid_rmse=rmse(y_valid,y_pred)print('y_valid_rmse=',y_valid_rmse)y_valid_mse = mse(y_valid, y_pred)print('y_valid_mse=',y_valid_mse)y_valid_score=forest_model.score(X_valid,y_valid)print('y_valid_score=',y_valid_score)with open("forest_model.pkl", "wb") as f:pickle.dump(forest_model, f)with open("forest_model.pkl", "rb") as f:model = pickle.load(f)y_pred = model.predict(X_valid)y_valid_rmse = rmse(y_valid, y_pred)print('y_valid_rmse=', y_valid_rmse)y_valid_mse = mse(y_valid, y_pred)print('y_valid_mse=', y_valid_mse)y_valid_score = model.score(X_valid, y_valid)print('y_valid_score=', y_valid_score)

inference.py 如下

#coding:utf-8
"""
Created on Wen Jan 9 2019@author: fzh
"""
import pickle
import numpy as np
import os
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures,MinMaxScaler
def del_feature(data_train,data_test):data_train["oringin"]="train"data_test["oringin"]="test"data_all=pd.concat([data_train,data_test],axis=0,ignore_index=True)"""删除特征"V5","V9","V11","V17","V22","V28",训练集和测试集分布不一致"""data_all.drop(["V5","V9","V11","V17","V22","V28"],axis=1,inplace=True)# print('drop after data_all.shape=',data_all.shape)# figure parametersdata_train= data_all[data_all["oringin"] == "train"].drop("oringin", axis=1)# print('drop after data_train.shape=',data_train.shape)"""'V14', u'V21', u'V25', u'V26', u'V32', u'V33', u'V34'"""# Threshold for removing correlated variablesthreshold = 0.1# Absolute value correlation matrixcorr_matrix = data_train.corr().abs()drop_col=corr_matrix[corr_matrix["target"]<threshold].index# print('drop_col=',drop_col)data_all.drop(drop_col,axis=1,inplace=True)# print('data_all.shape=',data_all.shape)return data_all
def scale_minmax(col):return (col - col.min()) / (col.max() - col.min())def normal(data_all):"""归一化"""cols_numeric = list(data_all.columns)cols_numeric.remove("oringin")scale_cols = [col for col in cols_numeric if col != 'target']print('scale_cols=', scale_cols)data_all[scale_cols] = data_all[scale_cols].apply(scale_minmax, axis=0)return data_all"""extract test data (without SalePrice)"""
def get_test_data(data_all):df_test = data_all[data_all["oringin"]=="test"].reset_index(drop=True)return df_test.drop(["oringin","target"],axis=1)
if __name__ == '__main__':with open("data/zhengqi_train.txt")  as fr:data_train=pd.read_table(fr,sep="\t")with open("data/zhengqi_test.txt") as fr_test:data_test=pd.read_table(fr_test,sep="\t")data_all = del_feature(data_train, data_test)print('clear data_all.shape', data_all.shape)data_all=normal(data_all)X_test = get_test_data(data_all)print('X_test.shape', X_test.shape)poly_trans = PolynomialFeatures(degree=2)X_test = poly_trans.fit_transform(X_test)print(X_test.shape)with open("forest_model.pkl", "rb") as f:model = pickle.load(f)X_pre=model.predict(X_test)print(X_pre.shape)X_pre=list(map(lambda x:round(x,3),X_pre))X_pre=np.reshape(X_pre,(-1,1))print(X_pre.shape)X_pre=pd.DataFrame(X_pre)print(X_pre)X_pre.to_csv('result.txt',index=False,header=False)

得分0.1298,排名100多,第一次实践还算可以吧。

天池入门赛--蒸汽预测相关推荐

  1. java统计文本中英文单词个数split_零基础入门NLP - 新闻文本分类Task2(天池入门赛)...

    本章主要内容是主要是通过pandas模块来进行数据分析.(注:文章只是对天池入门赛课件的学习) 一.学习目标 1.学习使用pandas读取赛题规律 2.分布赛题数据的分布规律 二.数据读取 使用pan ...

  2. 天池 入门赛-新闻文本分类-单个bert模型分数0.961

    文章目录 一些说明 三.最终代码及解析 3.1 构建分词器 3.2 预训练bert模型 3.3 分类任务微调: 零.分词tokenization 1.2 分词规则 1.3 character-base ...

  3. 天池新人赛幸福感数据分析+预测

    天池新人赛幸福感预测赛题链接 https://tianchi.aliyun.com/competition/entrance/231702/introduction 本文将按以下几个步骤描述,数据分析 ...

  4. 天池新人赛-零基础入门金融风控-贷款违约预测-排名174

    赛题以预测用户贷款是否违约为任务,该数据来自某信贷平台的贷款记录,总数据量超过120w,包含47列变量信息,其中15列为匿名变量.从中抽取80万条作为训练集,20万条作为测试集A,20万条作为测试集B ...

  5. 天池学习赛:工业蒸汽量预测1——数据探索

    目录 0.赛题介绍 1.数据分析知识 2.代码实现 0.赛题介绍 火力发电的基本原理是:燃料在燃烧时加热水生成蒸汽,蒸汽压力推动汽轮机旋转,然后汽轮机带动发电机旋转,产生电能.在这一系列的能量转化中, ...

  6. 阿里云天池大赛赛题(机器学习)——工业蒸汽量预测(完整代码)

    目录 赛题背景 全代码 导入包 导入数据 合并数据 删除相关特征 数据最大最小归一化 画图:探查特征和标签相关信息 对特征进行Box-Cox变换,使其满足正态性 标签数据统计转换后的数据,计算分位数画 ...

  7. 天池学习赛:工业蒸汽量预测5——特征优化

    上一篇<天池学习赛:工业蒸汽量预测4--模型验证> 目录 1 特征优化的方法 1.1 合成特征 1.2 特征变换 1.3 用决策树创造新特征 1.4 特征组合 2 赛题特征优化代码 1 特 ...

  8. 天池学习赛:工业蒸汽量预测4——模型验证

    上一篇<天池学习赛:工业蒸汽量预测3--模型训练>中已经是使用了几种机器学习的模型,接下来将介绍一些模型的评价方法. 目录 1 模型评估的方法 2 模型调参 3 赛题模型验证与调参 3.1 ...

  9. 天池学习赛:工业蒸汽量预测3——模型训练

    接上一篇<天池学习赛:工业蒸汽量预测2--特征工程> 数据划分: from sklearn.model_selection import train_test_split #切分数据new ...

最新文章

  1. 20160127:开始学VBA:(三)、判断语句
  2. 使用云服务器安装QEMU搭建嵌入式开发环境
  3. java代码调用python_Java调用Python
  4. 设计模式理解:工厂模式,抽象工厂,原型方法
  5. ad如何设置pcb板子形状_手把手教你在PCB上添加泪滴
  6. (软件工程复习核心重点)第六章实现和测试-第三节:单元测试
  7. Oracle教程之管理索引(一)--Oracle管理索引
  8. js得到自定义属性和操作table表格
  9. 降低大数据分析门槛,HashData打造云端数据仓库
  10. Insomni'hack teaser 2019 - Misc - echoechoechoecho
  11. RegExp-1 【转义符号与字符、正则基础、修饰/元字符】
  12. 简单理解网页源码(HTML源代码)
  13. c语言恶搞小程序自动关机,C语言的自动关机程序及捉弄人的小程序.doc
  14. win10修改用户名_win10最详细优化设置|win10专业版笔记本优化教程
  15. 做成功的CIO从反省自己开始
  16. 程序员过圣诞 | 用HTML写出绽放的烟花
  17. 均值归一化_特征归一化
  18. QT 进度条、滑动条、调色板、取色器
  19. input输入框非空验证
  20. php数字转人民币,php人民币数字转大写数字的简单示例

热门文章

  1. 95后CV工程师晒出工资单:狠补了这个,真香…
  2. 一个程序员的“无聊”故事
  3. Google | 突破瓶颈,打造更强大的Transformer
  4. NLP数据增强方法总结:EDA、BT、MixMatch、UDA
  5. 论文浅尝 - ICML2020 | 拆解元学习:理解 Few-Shots 任务中的特征表示
  6. docker compose linux安装以及使用
  7. excel字符串反转
  8. 一至七-----小东西
  9. 关于java子类继承以及final问题总结
  10. 二、python框架相关知识体系