1 #Smooth Support Vector Regression之实现

2

3 importnumpy4 from matplotlib importpyplot as plt5 from mpl_toolkits.mplot3d importAxes3D6

7

8 numpy.random.seed(0)9

10 defpeaks(x1, x2):11 term1 = 3 * (1 - x1) ** 2 * numpy.exp(-x1 ** 2 - (x2 + 1) ** 2)12 term2 = -10 * (x1 / 5 - x1 ** 3 - x2 ** 5) * numpy.exp(-x1 ** 2 - x2 ** 2)13 term3 = -numpy.exp(-(x1 + 1) ** 2 - x2 ** 2) / 3

14 val = term1 + term2 +term315 returnval16

17

18 #生成回归数据

19 X1 = numpy.linspace(-3, 3, 30)20 X2 = numpy.linspace(-3, 3, 30)21 X1, X2 =numpy.meshgrid(X1, X2)22 X = numpy.hstack((X1.reshape((-1, 1)), X2.reshape((-1, 1)))) #待回归数据之样本点

23 Y = peaks(X1, X2).reshape((-1, 1))24 Yerr = Y + numpy.random.normal(0, 0.4, size=Y.shape) #待回归数据之观测值

25

26

27

28 classSSVR(object):29

30 def __init__(self, X, Y_, c=50, mu=1, epsilon=0.5, beta=10):31 '''

32 X: 样本点数据集, 1行代表1个样本33 Y_: 观测值数据集, 1行代表1个观测值34 '''

35 self.__X = X #待回归数据之样本点

36 self.__Y_ = Y_ #待回归数据之观测值

37 self.__c = c #误差项权重参数

38 self.__mu = mu #gaussian kernel参数

39 self.__epsilon = epsilon #管道残差参数

40 self.__beta = beta #光滑化参数

41

42 self.__A =X.T43

44

45 defget_estimation(self, x, alpha, b):46 '''

47 获取估计值48 '''

49 A = self.__A

50 mu = self.__mu

51

52 x = numpy.array(x).reshape((-1, 1))53 KAx = self.__get_KAx(A, x, mu)54 regVal = self.__calc_hVal(KAx, alpha, b)55 returnregVal56

57

58 defget_MAE(self, alpha, b):59 '''

60 获取平均绝对误差61 '''

62 X = self.__X

63 Y_ = self.__Y_

64

65 Y = numpy.array(list(self.get_estimation(x, alpha, b) for x in X)).reshape((-1, 1))66 RES = Y_ -Y67 MAE = numpy.linalg.norm(RES, ord=1) /alpha.shape[0]68 returnMAE69

70

71 defget_GE(self):72 '''

73 获取泛化误差74 '''

75 X = self.__X

76 Y_ = self.__Y_

77

78 cnt =079 GE =080 for idx inrange(X.shape[0]):81 x = X[idx:idx+1, :]82 y_ =Y_[idx, 0]83

84 self.__X = numpy.vstack((X[:idx, :], X[idx+1:, :]))85 self.__Y_ = numpy.vstack((Y_[:idx, :], Y_[idx+1:, :]))86 self.__A = self.__X.T87 alpha, b, tab =self.optimize()88 if nottab:89 continue

90 cnt += 1

91 y =self.get_estimation(x, alpha, b)92 GE += (y_ - y) ** 2

93 GE /=cnt94

95 self.__X =X96 self.__Y_ =Y_97 self.__A =X.T98 returnGE99

100

101 def optimize(self, maxIter=1000, EPSILON=1.e-9):102 '''

103 maxIter: 最大迭代次数104 EPSILON: 收敛判据, 梯度趋于0则收敛105 '''

106 A, Y_ = self.__A, self.__Y_

107 c = self.__c

108 mu = self.__mu

109 epsilon = self.__epsilon

110 beta = self.__beta

111

112 alpha, b = self.__init_alpha_b((A.shape[1], 1))113 KAA = self.__get_KAA(A, mu)114 JVal = self.__calc_JVal(KAA, Y_, c, epsilon, beta, alpha, b)115 grad = self.__calc_grad(KAA, Y_, c, epsilon, beta, alpha, b)116 Hess = self.__calc_Hess(KAA, Y_, c, epsilon, beta, alpha, b)117

118 for i inrange(maxIter):119 print("iterCnt: {:3d}, JVal: {}".format(i, JVal))120 if self.__converged1(grad, EPSILON):121 returnalpha, b, True122

123 dCurr = -numpy.matmul(numpy.linalg.inv(Hess), grad)124 ALPHA = self.__calc_ALPHA_by_ArmijoRule(alpha, b, JVal, grad, dCurr, KAA, Y_, c, epsilon, beta)125

126 delta = ALPHA *dCurr127 alphaNew = alpha + delta[:-1, :]128 bNew = b + delta[-1, -1]129 JValNew = self.__calc_JVal(KAA, Y_, c, epsilon, beta, alphaNew, bNew)130 if self.__converged2(delta, JValNew -JVal, EPSILON):131 returnalphaNew, bNew, True132

133 alpha, b, JVal =alphaNew, bNew, JValNew134 grad = self.__calc_grad(KAA, Y_, c, epsilon, beta, alpha, b)135 Hess = self.__calc_Hess(KAA, Y_, c, epsilon, beta, alpha, b)136 else:137 if self.__converged1(grad, EPSILON):138 returnalpha, b, True139 returnalpha, b, False140

141

142 def __converged2(self, delta, JValDelta, EPSILON):143 val1 =numpy.linalg.norm(delta)144 val2 =numpy.linalg.norm(JValDelta)145 if val1 <= EPSILON or val2 <=EPSILON:146 returnTrue147 returnFalse148

149

150 def __calc_ALPHA_by_ArmijoRule(self, alphaCurr, bCurr, JCurr, gCurr, dCurr, KAA, Y_, c, epsilon, beta, C=1.e-4, v=0.5):151 i =0152 ALPHA = v **i153 delta = ALPHA *dCurr154 alphaNext = alphaCurr + delta[:-1, :]155 bNext = bCurr + delta[-1, -1]156 JNext = self.__calc_JVal(KAA, Y_, c, epsilon, beta, alphaNext, bNext)157 whileTrue:158 if JNext <= JCurr + C * ALPHA * numpy.matmul(dCurr.T, gCurr)[0, 0]: break

159 i += 1

160 ALPHA = v **i161 delta = ALPHA *dCurr162 alphaNext = alphaCurr + delta[:-1, :]163 bNext = bCurr + delta[-1, -1]164 JNext = self.__calc_JVal(KAA, Y_, c, epsilon, beta, alphaNext, bNext)165 returnALPHA166

167

168 def __converged1(self, grad, EPSILON):169 if numpy.linalg.norm(grad) <=EPSILON:170 returnTrue171 returnFalse172

173

174 def __calc_Hess(self, KAA, Y_, c, epsilon, beta, alpha, b):175 Hess_J1 = self.__calc_Hess_J1(alpha)176 Hess_J2 = self.__calc_Hess_J2(KAA, Y_, c, epsilon, beta, alpha, b)177 Hess = Hess_J1 +Hess_J2178 returnHess179

180

181 def __calc_Hess_J2(self, KAA, Y_, c, epsilon, beta, alpha, b):182 Hess_J2_alpha_alpha =numpy.zeros((KAA.shape[0], KAA.shape[0]))183 Hess_J2_alpha_b = numpy.zeros((KAA.shape[0], 1))184 Hess_J2_b_alpha = numpy.zeros((1, KAA.shape[0]))185 Hess_J2_b_b =0186

187 z = Y_ - numpy.matmul(KAA, alpha) -b188 term1 = z -epsilon189 term2 = -z -epsilon190 for i inrange(z.shape[0]):191 term3 = self.__s(term1[i, 0], beta) ** 2 + self.__p(term1[i, 0], beta) * self.__d(term1[i, 0], beta)192 term4 = self.__s(term2[i, 0], beta) ** 2 + self.__p(term2[i, 0], beta) * self.__d(term2[i, 0], beta)193 term5 = term3 +term4194 Hess_J2_alpha_alpha += term5 * numpy.matmul(KAA[:, i:i+1], KAA[i:i+1, :])195 Hess_J2_alpha_b += term5 * KAA[:, i:i+1]196 Hess_J2_b_b +=term5197 Hess_J2_alpha_alpha *=c198 Hess_J2_alpha_b *=c199 Hess_J2_b_alpha = Hess_J2_alpha_b.reshape((1, -1))200 Hess_J2_b_b *=c201

202 Hess_J2_upper =numpy.hstack((Hess_J2_alpha_alpha, Hess_J2_alpha_b))203 Hess_J2_lower =numpy.hstack((Hess_J2_b_alpha, [[Hess_J2_b_b]]))204 Hess_J2 =numpy.vstack((Hess_J2_upper, Hess_J2_lower))205 returnHess_J2206

207

208 def __calc_Hess_J1(self, alpha):209 I =numpy.identity(alpha.shape[0])210 term = numpy.hstack((I, numpy.zeros((I.shape[0], 1))))211 Hess_J1 = numpy.vstack((term, numpy.zeros((1, term.shape[1]))))212 returnHess_J1213

214

215 def __calc_grad(self, KAA, Y_, c, epsilon, beta, alpha, b):216 grad_J1 = self.__calc_grad_J1(alpha)217 grad_J2 = self.__calc_grad_J2(KAA, Y_, c, epsilon, beta, alpha, b)218 grad = grad_J1 +grad_J2219 returngrad220

221

222 def __calc_grad_J2(self, KAA, Y_, c, epsilon, beta, alpha, b):223 grad_J2_alpha = numpy.zeros((KAA.shape[0], 1))224 grad_J2_b =0225

226 z = Y_ - numpy.matmul(KAA, alpha) -b227 term1 = z -epsilon228 term2 = -z -epsilon229 for i inrange(z.shape[0]):230 term3 = self.__p(term1[i, 0], beta) * self.__s(term1[i, 0], beta) - self.__p(term2[i, 0], beta) * self.__s(term2[i, 0], beta)231 grad_J2_alpha += term3 * KAA[:, i:i+1]232 grad_J2_b +=term3233 grad_J2_alpha *= -c234 grad_J2_b *= -c235

236 grad_J2 =numpy.vstack((grad_J2_alpha, [[grad_J2_b]]))237 returngrad_J2238

239

240 def __calc_grad_J1(self, alpha):241 grad_J1 =numpy.vstack((alpha, [[0]]))242 returngrad_J1243

244

245 def __calc_JVal(self, KAA, Y_, c, epsilon, beta, alpha, b):246 J1 = self.__calc_J1(alpha)247 J2 = self.__calc_J2(KAA, Y_, c, epsilon, beta, alpha, b)248 JVal = J1 +J2249 returnJVal250

251

252 def __calc_J2(self, KAA, Y_, c, epsilon, beta, alpha, b):253 z = Y_ - numpy.matmul(KAA, alpha) -b254 term2 = numpy.sum(self.__p_epsilon_square(item[0], epsilon, beta) for item inz)255 J2 = term2 * c / 2

256 returnJ2257

258

259 def __calc_J1(self, alpha):260 J1 = numpy.sum(alpha * alpha) / 2

261 returnJ1262

263

264 def __p(self, x, beta):265 val = numpy.log(numpy.exp(beta * x) + 1) /beta266 returnval267

268

269 def __s(self, x, beta):270 val = 1 / (numpy.exp(-beta * x) + 1)271 returnval272

273

274 def __d(self, x, beta):275 term = numpy.exp(beta *x)276 val = beta * term / (term + 1) ** 2

277 returnval278

279

280 def __p_epsilon_square(self, x, epsilon, beta):281 term1 = self.__p(x - epsilon, beta) ** 2

282 term2 = self.__p(-x - epsilon, beta) ** 2

283 val = term1 +term2284 returnval285

286

287 def __get_KAA(self, A, mu):288 KAA = numpy.zeros((A.shape[1], A.shape[1]))289 for rowIdx inrange(KAA.shape[0]):290 for colIdx in range(rowIdx + 1):291 x1 = A[:, rowIdx:rowIdx+1]292 x2 = A[:, colIdx:colIdx+1]293 val = self.__calc_gaussian(x1, x2, mu)294 KAA[rowIdx, colIdx] = KAA[colIdx, rowIdx] =val295 returnKAA296

297

298 def __calc_gaussian(self, x1, x2, mu):299 val = numpy.exp(-mu * numpy.linalg.norm(x1 - x2) ** 2)300 #val = numpy.sum(x1 * x2)

301 returnval302

303

304 def __init_alpha_b(self, shape):305 '''

306 alpha、b之初始化307 '''

308 alpha, b =numpy.zeros(shape), 0309 returnalpha, b310

311

312 def __calc_hVal(self, KAx, alpha, b):313 hVal = numpy.matmul(alpha.T, KAx)[0, 0] +b314 returnhVal315

316

317 def __get_KAx(self, A, x, mu):318 KAx = numpy.zeros((A.shape[1], 1))319 for rowIdx inrange(KAx.shape[0]):320 x1 = A[:, rowIdx:rowIdx+1]321 val = self.__calc_gaussian(x1, x, mu)322 KAx[rowIdx, 0] =val323 returnKAx324

325

326

327 classPeaksPlot(object):328

329 defpeaks_plot(self, X, Y_, ssvrObj, alpha, b):330 surfX1 = numpy.linspace(numpy.min(X[:, 0]), numpy.max(X[:, 0]), 50)331 surfX2 = numpy.linspace(numpy.min(X[:, 1]), numpy.max(X[:, 1]), 50)332 surfX1, surfX2 =numpy.meshgrid(surfX1, surfX2)333 surfY_ =peaks(surfX1, surfX2)334

335 surfY =numpy.zeros(surfX1.shape)336 for rowIdx inrange(surfY.shape[0]):337 for colIdx in range(surfY.shape[1]):338 surfY[rowIdx, colIdx] =ssvrObj.get_estimation((surfX1[rowIdx, colIdx], surfX2[rowIdx, colIdx]), alpha, b)339

340 fig = plt.figure(figsize=(10, 3))341 ax1 = plt.subplot(1, 2, 1, projection="3d")342 ax2 = plt.subplot(1, 2, 2, projection="3d")343

344 norm =plt.Normalize(surfY_.min(), surfY_.max())345 colors =plt.cm.rainbow(norm(surfY_))346 surf = ax1.plot_surface(surfX1, surfX2, surfY_, facecolors=colors, shade=True, alpha=0.5)347 surf.set_facecolor("white")348 ax1.scatter3D(X[:, 0:1], X[:, 1:2], Y_, s=1, c="r")349 ax1.set(xlabel="$x_1$", ylabel="$x_2$", zlabel="$f(x_1, x_2)$", xlim=(-3, 3), ylim=(-3, 3), zlim=(-8, 8))350 ax1.set(title="Original Function")351 ax1.view_init(0, 0)352

353 norm2 =plt.Normalize(surfY.min(), surfY.max())354 colors2 =plt.cm.rainbow(norm2(surfY))355 surf2 = ax2.plot_surface(surfX1, surfX2, surfY, facecolors=colors2, shade=True, alpha=0.5)356 surf2.set_facecolor("white")357 ax2.scatter3D(X[:, 0:1], X[:, 1:2], Y_, s=1, c="r")358 ax2.set(xlabel="$x_1$", ylabel="$x_2$", zlabel="$f(x_1, x_2)$", xlim=(-3, 3), ylim=(-3, 3), zlim=(-8, 8))359 ax2.set(title="Estimated Function")360 ax2.view_init(0, 0)361

362 fig.tight_layout()363 fig.savefig("peaks_plot.png", dpi=100)364

365

366

367 if __name__ == "__main__":368 ssvrObj = SSVR(X, Yerr, c=50, mu=1, epsilon=0.5)369 alpha, b, tab =ssvrObj.optimize()370

371 ppObj =PeaksPlot()372 ppObj.peaks_plot(X, Yerr, ssvrObj, alpha, b)

vector函数python_Smooth Support Vector Regression - Python实现相关推荐

  1. matlab vector函数参数,将vector作为参数传递

    这两天在用c++写数值计算,手残选了个蛋疼的Boost库.boost的好处在于通用性,缺点--原型实在是太长了,make一下只要出一个error就被刷屏了(偶17寸屏幕18号字,谁让我是瞎子). 首先 ...

  2. 支持向量机与支持向量回归(support vector machine and support vector regression)

    支持向量机和支持向量回归是目前机器学习领域用得较多的方法,不管是人脸识别,字符识别,行为识别,姿态识别等,都可以看到它们的影子.在我的工作中,经常用到支持向量机和支持向量回归,然而,作为基本的理论,却 ...

  3. 【SVM最后一课】详解烧脑的Support Vector Regression

    AI有道 一个有情怀的公众号 1 Kernel Ridge Regression 首先回顾一下上节课介绍的Representer Theorem,对于任何包含正则项的L2-regularized li ...

  4. 台湾大学林轩田机器学习技法课程学习笔记6 -- Support Vector Regression

    红色石头的个人网站:redstonewill.com 上节课我们主要介绍了Kernel Logistic Regression,讨论如何把SVM的技巧应用在soft-binary classifica ...

  5. Ex6_机器学习_吴恩达课程作业(Python):SVM支持向量机(Support Vector Machines)

    Ex6_机器学习_吴恩达课程作业(Python):SVM支持向量机(Support Vector Machines) 文章目录 Ex6_机器学习_吴恩达课程作业(Python):SVM支持向量机(Su ...

  6. SVM 支持向量机算法(Support Vector Machine )【Python机器学习系列(十四)】

    SVM 支持向量机算法(Support Vector Machine )[Python机器学习系列(十四)] 文章目录 1.SVM简介 2. SVM 逻辑推导 2.1 Part1 化简限制条件 2.2 ...

  7. 机器学习| 面试题:01、机器学习中LR(Logistic Regression)和SVM(Support Vector Machine)有什么区别与联系?

    问题 机器学习中LR(Logistic Regression)和SVM(Support Vector Machine)有什么区别与联系? 背景 LR和SVM的概念大家都有了解甚至很熟悉了,不过在面试中 ...

  8. LIBSVM -- A Library for Support Vector Machines--转

    原文地址:http://www.csie.ntu.edu.tw/~cjlin/libsvm/index.html Chih-Chung Chang and Chih-Jen Lin  Version ...

  9. 支持向量机(Support Vector Machines,SVM)

    文章目录 1. 线性可分SVM 与 硬间隔最大化 1.1 线性可分SVM 1.2 函数间隔.几何间隔 1.3 间隔最大化 2. 线性SVM 与 软间隔最大化 2.1 线性SVM 3. 非线性SVM 与 ...

最新文章

  1. zabbix二次开发之从mysql取值在运维平台js图表展现
  2. LISTVIEW嵌套GRIDVIEW的一些处理(点击GRIDVIEW的条目,能够显示他在LISTVIEW中的位置)(对这篇文章的优化处理,不每次都new onItemClickListener)...
  3. cmd 控制台 提示:请求的操作须要提升!
  4. Java设计模式—责任链模式
  5. linux中send函数MSG_NOSIGNAL异常消息
  6. 阶梯到XML:1级 - XML简介
  7. C语言 static - C语言零基础入门教程
  8. 一个Https网站发送Http的 ajax请求的解决方法
  9. php压缩解压zip文件夹,php利用ZipArchive类实现文件压缩与解压
  10. 计算机重启发出响声怎么办,电脑不断响提示音怎么办
  11. 华为FPGA设计高级技巧xilinx篇阅读笔记一
  12. 张正友相机标定法原理与实现
  13. vr电力作业安全培训覆盖三大板块,为学员提供高仿真的技能培训
  14. Java后端开发之JSON入门
  15. Android读取服务器图片
  16. Java 是否应该使用通配符导入( wildcard imports)
  17. 腾讯云短信发送接口类
  18. 计算机c语言与数学知识的联系,计算机数学基础知识
  19. MySQL数据库期末考试试题及参考答案(08)
  20. UVA 10859 放置街灯(树形DP)

热门文章

  1. 深度学习训练中关于数据处理方式--原始样本采集以及数据增广
  2. 解决:RuntimeError: CUDA out of memory. Tried to allocate 2.00 MiB
  3. opencv_contrib4.4安装
  4. ant+jmeter中build.xml配置详解
  5. 【Hadoop Summit Tokyo 2016】LLAP:Hive上的次秒级分析查询
  6. 数百个 HTML5 例子学习 HT 图形组件 – 拓扑图篇
  7. eclipse总是自动跳到ThreadPoolExecutor解决办法
  8. MySQL索引的创建、删除和查看
  9. 如何在windows下安装JDK
  10. 上传图片之上传前判断文件格式与大小