六角形区域求解Drichlet边界泊松方程

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import bfgs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")np.random.seed(1234)
torch.manual_seed(1234)  def UU(X, order,prob):if prob==1:temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5if order[0]==0 and order[1]==0:return torch.log(temp)if order[0]==1 and order[1]==0:return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))if order[0]==0 and order[1]==1:return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))if order[0]==2 and order[1]==0:return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \+ temp**(-1) * (22)if order[0]==1 and order[1]==1:return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \+ temp**(-1) * (18)if order[0]==0 and order[1]==2:return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \+ temp**(-1) * (22)if prob==2:if order[0]==0 and order[1]==0:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==1 and order[1]==0:return (3*X[:,0]*X[:,0]-1) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==0 and order[1]==1:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))if order[0]==2 and order[1]==0:return (6*X[:,0]) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==1 and order[1]==1:return (3*X[:,0]*X[:,0]-1) * \(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))if order[0]==0 and order[1]==2:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if prob==3:temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1if order[0]==0 and order[1]==0:return temp1 * temp2**(-1)if order[0]==1 and order[1]==0:return (2*X[:,0]) * temp2**(-1) + \temp1 * (-1)*temp2**(-2) * (2*X[:,0])if order[0]==0 and order[1]==1:return (-2*X[:,1]) * temp2**(-1) + \temp1 * (-1)*temp2**(-2) * (2*X[:,1])if order[0]==2 and order[1]==0:return (2) * temp2**(-1) + \2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \temp1 * (-1)*temp2**(-2) * (2)if order[0]==1 and order[1]==1:return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])if order[0]==0 and order[1]==2:return (-2) * temp2**(-1) + \2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \temp1 * (-1)*temp2**(-2) * (2)if prob==4:temp = torch.exp(-4*X[:,1]*X[:,1])if order[0]==0 and order[1]==0:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * temp + \(1-ind) * (-(-X[:,0]+1)**4+1) * tempif order[0]==1 and order[1]==0:ind = (X[:,0]<=0).float()return ind * (4*(X[:,0]+1)**3) * temp + \(1-ind) * (4*(-X[:,0]+1)**3) * tempif order[0]==0 and order[1]==1:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))if order[0]==2 and order[1]==0:ind = (X[:,0]<=0).float()return ind * (12*(X[:,0]+1)**2) * temp + \(1-ind) * (-12*(-X[:,0]+1)**2) * tempif order[0]==1 and order[1]==1:ind = (X[:,0]<=0).float()return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))if order[0]==0 and order[1]==2:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):return -UU(X,[2,0],prob) - UU(X,[0,2],prob)class INSET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx - 1):for j in range(self.nx + 0):self.X[m,0] = (1 + i + 0.5*j) * self.hxself.X[m,1] = j * 0.5*3**0.5*self.hxm = m + 1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*(self.nx - 1)*(self.nx + 0)ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.right = FF(self.X,prob).reshape(-1,1)self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class BDSET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.lenth = 12*self.nx*self.hxself.size = 12*self.nxself.X = torch.zeros(self.size,self.dim)self.n = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx):self.X[m,0] = -(i+0.5) * 0.5*self.hxself.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hxself.n[m,0] = -0.5*3**0.5self.n[m,1] = 0.5m = m+1for i in range(self.nx):self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hxself.X[m,1] = 0.5*self.radiusself.n[m,0] = 0.0self.n[m,1] = 1.0m = m+1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nxself.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)self.Dright = UU(self.X,[0,0],prob).reshape(-1,1)
class TESET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx - 1):for j in range(self.nx + 0):self.X[m,0] = (1 + i + 0.5*j) * self.hxself.X[m,1] = j * 0.5*3**0.5*self.hxm = m + 1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*(self.nx - 1)*(self.nx + 0)ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class LEN():def __init__(self,bdset):self.n = bdset.nxself.X = bdset.Xself.radius = bdset.radiusself.dim = 2self.mu = 1self.num = 6self.ee = 1e-1self.L_max = 1.0self.Nodes()self.xt = torch.tensor([[0,0.0]])self.L_max = max(self.forward(self.xt))def Nodes(self):self.inp = []for i in range(self.num):nodes = torch.zeros(2*(2*self.n),self.dim)inds = (i*2*self.n)%(12*self.n);inde = inds + 2*self.nnodes[:2*self.n,:] = self.X[inds:inde,:]inds = (i*2*self.n +  6*self.n)%(12*self.n);inde = inds + 2*self.nnodes[2*self.n:,:] = self.X[inds:inde,:]self.inp.append(nodes)def phi(self,X,Y):dist = ((X - Y)**2).sum(1)return (dist + self.ee)**(-0.5)def coef(self,k):nodes = self.inp[k]size = nodes.shape[0] + nodes.shape[1] + 1A = torch.zeros(size,size)N = nodes.shape[0]for i in range(N):A[0:N,i] = self.phi(nodes,nodes[i,:])A[:N,N:size - 1] = nodesA[N:(size - 1),:N] = nodes.t()A[:N,-1] = torch.ones(N)A[-1,:N] = torch.ones(N)b = torch.zeros(size,1)b[:2*self.n,:] = torch.ones(2*self.n,1)xishu,un = torch.solve(b,A)#xishu = torch.linalg.solve(A,b)#服务器用这个return xishudef lk(self,k,X):nodes = self.inp[k]#[60,2]size = nodes.shape[0] + nodes.shape[1] + 1N = nodes.shape[0]value = torch.zeros(X.shape[0])xishu = self.coef(k)for i in range(N):value += xishu[i]*self.phi(X,nodes[i,:])for j in range(nodes.shape[1]):value += xishu[N + j]*X[:,j]return value + xishu[-1]def forward(self,X):L = 1.0for i in range(self.num):L = L*(1 - (1 - self.lk(i,X))**self.mu)return (L/self.L_max).view(-1,1)class Net(torch.nn.Module):def __init__(self, layers, dtype):super(Net, self).__init__()self.layers = layersself.layers_hid_num = len(layers)-2self.device = deviceself.dtype = dtypefc = []for i in range(self.layers_hid_num):fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))self.fc = torch.nn.Sequential(*fc)for i in range(self.layers_hid_num):self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)def forward(self, x):dev = x.devicefor i in range(self.layers_hid_num):h = torch.sin(self.fc[2*i](x))h = torch.sin(self.fc[2*i+1](h))temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)x = h + x@tempreturn self.fc[-1](x)    def pred(netf,X):return netf.forward(X)def error(u_pred, u_acc):u_pred = u_pred.to(device)u_acc = u_acc.to(device)#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def loadtype(inset,bdset,teset,dtype):inset.X = inset.X.type(dtype)inset.right = inset.right.type(dtype)inset.u_acc = inset.u_acc.type(dtype)bdset.X = bdset.X.type(dtype)bdset.Dright = bdset.Dright.type(dtype)teset.X = teset.X.type(dtype)teset.u_acc = teset.u_acc.type(dtype)def loadcuda(inset,bdset,teset,netf):    netf = netf.to(device)if inset.X.requires_grad == False:inset.X.requires_grad = Trueinset.X = inset.X.to(device)inset.u_acc = inset.u_acc.to(device)bdset.X = bdset.X.to(device)inset.right = inset.right.to(device)bdset.Dright = bdset.Dright.to(device)teset.X = teset.X.to(device)teset.u_acc = teset.u_acc.to(device)def loadcpu(inset,bdset,teset,netf):    netf = netf.to('cpu')#inset.X.requires_grad = Trueinset.X = inset.X.to('cpu')inset.u_acc = inset.u_acc.to('cpu')bdset.X = bdset.X.to('cpu')inset.right = inset.right.to('cpu')bdset.Dright = bdset.Dright.to('cpu')teset.X = teset.X.to('cpu')teset.u_acc = teset.u_acc.to('cpu')def Lossf(netf,inset,bdset):if inset.X.requires_grad is not True:inset.X.requires_grad = TrueinsetF = netf.forward(inset.X)# print('insetF device: {}'.format(insetF.device))insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))u_in = insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度ux = insetFx#复合函数求导,提高迭代效率taux, = torch.autograd.grad(ux[:,0:1], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))tauy, = torch.autograd.grad(ux[:,1:2], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))out_in = ((taux[:,0:1] + tauy[:,1:2] + inset.right)**2 + (taux[:,1:2] - tauy[:,0:1])**2).mean()ub = netf.forward(bdset.X)  out_b = ((ub - bdset.Dright)**2).mean()beta = 5e2loss = out_in + beta*out_bloss = torch.sqrt(loss)return loss# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):print('train neural network f')ERROR,BUZHOU = [],[]lossf = Lossf(netf,inset,bdset)lossoptimal = lossftrainerror = error(netf.forward(inset.X), inset.u_acc)print('epoch: %d, loss: %.3e, trainerror: %.3e'%(0, lossf.item(), trainerror.item()))torch.save(netf.state_dict(),'best_netf.pkl')cycle = 100for i in range(epochf):st = time.time()'''for j in range(cycle):optimf.zero_grad()lossf = Lossf(netf,inset)lossf.backward()optimf.step()'''def closure():optimf.zero_grad()lossf = Lossf(netf,inset,bdset)lossf.backward()return lossfoptimf.step(closure)lossf = Lossf(netf,inset,bdset)if lossf < lossoptimal:lossoptimal = lossftorch.save(netf.state_dict(),'best_netf.pkl')ela = time.time() - sttrainerror = error(netf.forward(inset.X), inset.u_acc)ERROR.append(trainerror.item())BUZHOU.append((i + 1)*cycle)print('epoch:%d,lossf:%.3e,Linf error:%.3e,train error:%.3e,time:%.2f'%((i + 1)*cycle,lossf.item(),max(abs(netf.forward(inset.X) - inset.u_acc)),trainerror,ela))return ERROR,BUZHOU
prob = 3
radius = 2.0
nx_tr = 40
nx_te = 100epochf = 20
lr = 1e0
tests_num = 1
#dtype = torch.float32
dtype = torch.float64# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)inset = INSET(radius,nx_tr,prob)
bdset = BDSET(radius,nx_te,prob)
teset = TESET(radius,nx_te,prob)
loadtype(inset,bdset,teset,dtype)
lay = [2,20,20,1];netf = Net(lay,dtype)
loadcuda(inset,bdset,teset,netf)for it in range(tests_num):'''optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')'''optimf = bfgs.BFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')start_time = time.time()ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)elapsed = time.time() - start_timeprint('Train time: %.2f' %(elapsed))netf.load_state_dict(torch.load('best_netf.pkl'))te_U = pred(netf,teset.X)testerror[it] = error(te_U, teset.u_acc)print('prob:%d,Linf error:%.4e,testerror = %.3e\n' %(prob,max(abs(te_U - teset.u_acc)),testerror[it].item()))print(testerror.data)testerror_mean = testerror.mean()testerror_std = testerror.std()print('testerror_mean = %.3e, testerror_std = %.3e'%(testerror_mean.item(),testerror_std.item()))loadcpu(inset,bdset,teset,netf)
torch.cuda.empty_cache()

PFNN求解泊松方程

此时由于长度因子不好选取,所以求解效果很差,读者自己需要调整参数

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import bfgs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = 'cpu'
np.random.seed(1234)
torch.manual_seed(1234)  def UU(X, order,prob):if prob==1:temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5if order[0]==0 and order[1]==0:return torch.log(temp)if order[0]==1 and order[1]==0:return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))if order[0]==0 and order[1]==1:return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))if order[0]==2 and order[1]==0:return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \+ temp**(-1) * (22)if order[0]==1 and order[1]==1:return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \+ temp**(-1) * (18)if order[0]==0 and order[1]==2:return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \+ temp**(-1) * (22)if prob==2:if order[0]==0 and order[1]==0:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==1 and order[1]==0:return (3*X[:,0]*X[:,0]-1) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==0 and order[1]==1:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))if order[0]==2 and order[1]==0:return (6*X[:,0]) * \0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if order[0]==1 and order[1]==1:return (3*X[:,0]*X[:,0]-1) * \(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))if order[0]==0 and order[1]==2:return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))if prob==3:temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1if order[0]==0 and order[1]==0:return temp1 * temp2**(-1)if order[0]==1 and order[1]==0:return (2*X[:,0]) * temp2**(-1) + \temp1 * (-1)*temp2**(-2) * (2*X[:,0])if order[0]==0 and order[1]==1:return (-2*X[:,1]) * temp2**(-1) + \temp1 * (-1)*temp2**(-2) * (2*X[:,1])if order[0]==2 and order[1]==0:return (2) * temp2**(-1) + \2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \temp1 * (-1)*temp2**(-2) * (2)if order[0]==1 and order[1]==1:return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])if order[0]==0 and order[1]==2:return (-2) * temp2**(-1) + \2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \temp1 * (-1)*temp2**(-2) * (2)if prob==4:temp = torch.exp(-4*X[:,1]*X[:,1])if order[0]==0 and order[1]==0:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * temp + \(1-ind) * (-(-X[:,0]+1)**4+1) * tempif order[0]==1 and order[1]==0:ind = (X[:,0]<=0).float()return ind * (4*(X[:,0]+1)**3) * temp + \(1-ind) * (4*(-X[:,0]+1)**3) * tempif order[0]==0 and order[1]==1:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))if order[0]==2 and order[1]==0:ind = (X[:,0]<=0).float()return ind * (12*(X[:,0]+1)**2) * temp + \(1-ind) * (-12*(-X[:,0]+1)**2) * tempif order[0]==1 and order[1]==1:ind = (X[:,0]<=0).float()return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))if order[0]==0 and order[1]==2:ind = (X[:,0]<=0).float()return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):return -UU(X,[2,0],prob) - UU(X,[0,2],prob)class INSET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx - 1):for j in range(self.nx + 0):self.X[m,0] = (1 + i + 0.5*j) * self.hxself.X[m,1] = j * 0.5*3**0.5*self.hxm = m + 1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*(self.nx - 1)*(self.nx + 0)ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.right = FF(self.X,prob).reshape(-1,1)self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class BDSET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.lenth = 12*self.nx*self.hxself.size = 12*self.nxself.X = torch.zeros(self.size,self.dim)self.n = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx):self.X[m,0] = -(i+0.5) * 0.5*self.hxself.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hxself.n[m,0] = -0.5*3**0.5self.n[m,1] = 0.5m = m+1for i in range(self.nx):self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hxself.X[m,1] = 0.5*self.radiusself.n[m,0] = 0.0self.n[m,1] = 1.0m = m+1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nxself.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)self.Dright = UU(self.X,[0,0],prob).reshape(-1,1)
class TESET():def __init__(self, radius, nx,prob):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/(nx*np.sqrt(3.0))self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx - 1):for j in range(self.nx + 0):self.X[m,0] = (1 + i + 0.5*j) * self.hxself.X[m,1] = j * 0.5*3**0.5*self.hxm = m + 1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*(self.nx - 1)*(self.nx + 0)ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
'''
class LEN():def __init__(self,bdset,n_b,n_i,mu):#n_b = 3,n_i = 4self.n = bdset.nxself.X = bdset.Xself.radius = bdset.radiusself.dim = 2self.mu = muself.n_i = n_iself.n_b = n_bself.ee = 1e-4self.L_max = 1.0self.Nodes()self.xt = torch.tensor([[0,0.0]])def Nodes(self):self.inp = []for i in range(self.n_i):nodes = torch.zeros(2*(self.n_b*self.n),self.dim)inds = (i*self.n_b*self.n)%(12*self.n);inde = inds + self.n_b*self.nnodes[:self.n_b*self.n,:] = self.X[inds:inde,:]inds = (i*self.n_b*self.n +  6*self.n)%(12*self.n);inde = inds + self.n_b*self.nnodes[self.n_b*self.n:,:] = self.X[inds:inde,:]self.inp.append(nodes)def phi(self,X,Y):dist = ((X - Y)**2).sum(1)return (dist + self.ee)**(-0.5)def coef(self,k):nodes = self.inp[k]size = nodes.shape[0] + nodes.shape[1] + 1A = torch.zeros(size,size)N = nodes.shape[0]for i in range(N):A[0:N,i] = self.phi(nodes,nodes[i,:])A[:N,N:size - 1] = nodesA[N:(size - 1),:N] = nodes.t()A[:N,-1] = torch.ones(N)A[-1,:N] = torch.ones(N)b = torch.zeros(size,1)b[:self.n_b*self.n,:] = torch.ones(self.n_b*self.n,1)xishu,un = torch.solve(b,A)#xishu = torch.linalg.solve(A,b)return xishudef lk(self,k,X):dev = X.devicenodes = self.inp[k].to(dev)size = nodes.shape[0] + nodes.shape[1] + 1N = nodes.shape[0]value = torch.zeros(X.shape[0],device = dev)xishu = self.coef(k).to(dev)for i in range(N):value += xishu[i]*self.phi(X,nodes[i,:])for j in range(nodes.shape[1]):value += xishu[N + j]*X[:,j]tmp = value + xishu[-1]return tmpdef forward(self,X):L = 1.0h = 1e-1dev = X.devicefor i in range(self.n_i):tmp = (1 - (1 - self.lk(i,X))**self.mu)/htemp = (1 - (1 - self.lk(i,self.xt.to(dev)))**self.mu)/hL = L*(tmp)**2return (L/self.L_max).to(dev).view(-1,1)
'''
class LEN():def __init__(self,mu):self.mu = muself.xt = torch.tensor([[0,0.0]])def phi(self,k,order,X):if k == 0:ind00 = (X[:,1] > 0);ind01 = (X[:,1] > 1)ind10 = (np.sqrt(3)*X[:,0] - X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] - 2 > 0)ind = (ind00*~ind01*ind10*~ind11).float()if order == [0,0]:return ind*((X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2))**2elif order == [1,0]:return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*np.sqrt(3)*(X[:,1] - 1)elif order == [0,1]:return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] - 2)elif k == 1:ind00 = (X[:,1] > 0);ind01 = (X[:,1] > - 1)ind10 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] + X[:,1] - 2 > 0)ind = (~ind00*ind01*ind10*~ind11).float()if order == [0,0]:return ind*((X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2))**2elif order == [1,0]:return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*np.sqrt(3)*(X[:,1] + 1)elif order == [0,1]:return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*(np.sqrt(3)*X[:,0] + 2*X[:,1] - 2)elif k == 2:ind00 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind01 = (np.sqrt(3)*X[:,0] + X[:,1] + 2 > 0)ind10 = (np.sqrt(3)*X[:,0] - X[:,1] - 2 > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] > 0)ind = (~ind00*ind01*~ind10*ind11).float()if order == [0,0]:return ind*((np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2))**2elif order == [1,0]:return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*6*X[:,0]elif order == [0,1]:return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*(-2*X[:,1] - 4)elif k == 3:ind00 = (X[:,1] > 0);ind01 = (X[:,1] > - 1)ind10 = (np.sqrt(3)*X[:,0] - X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] + 2 > 0)ind = (~ind00*ind01*~ind10*ind11).float()if order == [0,0]:return ind*((X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2))**2elif order == [1,0]:return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*np.sqrt(3)*(X[:,1] + 1)elif order == [0,1]:return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] - 2*X[:,1] + 2)elif k == 4:ind00 = (X[:,1] > 0);ind01 = (X[:,1] > 1)ind10 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] + X[:,1] + 2 > 0)ind = (ind00*~ind01*~ind10*ind11).float()if order == [0,0]:return ind*((X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2))**2elif order == [1,0]:return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*2*(X[:,1] - 1)elif order == [0,1]:return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*(np.sqrt(3)*X[:,0] + 2)elif k == 5:ind00 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind01 = (np.sqrt(3)*X[:,0] + X[:,1] - 2 > 0)ind10 = (np.sqrt(3)*X[:,0] - X[:,1] + 2 > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] > 0)ind = (ind00*~ind01*ind10*~ind11).float()if order == [0,0]:return ind*((np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2))**2elif order == [1,0]:return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*6*X[:,0]elif order == [0,1]:return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*(-2*X[:,1] + 4)def forward(self,order,X):L = 0.0L_max = 0.0dev = X.devicefor i in range(6):L = L + self.phi(i,order,X)L_max = L_max + self.phi(i,order,self.xt.to(X.device))return (L/L_max).reshape(-1,1)
class Net(torch.nn.Module):def __init__(self, layers, dtype):super(Net, self).__init__()self.layers = layersself.layers_hid_num = len(layers)-2self.device = deviceself.dtype = dtypefc = []for i in range(self.layers_hid_num):fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))self.fc = torch.nn.Sequential(*fc)for i in range(self.layers_hid_num):self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)def forward(self, x):dev = x.devicefor i in range(self.layers_hid_num):h = torch.sin(self.fc[2*i](x))h = torch.sin(self.fc[2*i+1](h))temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)x = h + x@tempreturn self.fc[-1](x) def pred(netf,netg,lenth,X):return netf.forward(X)*lenth.forward([0,0],X) + netg.forward(X)
'''
def pred(netf,netg,lenth,X):return netf.forward(X)*lenth.forward(X) + netg.forward(X)
'''def error(u_pred, u_acc):u_pred = u_pred.to(device)u_acc = u_acc.to(device)#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def loadtype(inset,bdset,teset,dtype):inset.X = inset.X.type(dtype)inset.right = inset.right.type(dtype)inset.u_acc = inset.u_acc.type(dtype)bdset.X = bdset.X.type(dtype)bdset.Dright = bdset.Dright.type(dtype)teset.X = teset.X.type(dtype)teset.u_acc = teset.u_acc.type(dtype)def loadcuda(inset,bdset,teset,netf,netg):    netf = netf.to(device)netg = netg.to(device)if inset.X.requires_grad == False:inset.X.requires_grad = Trueinset.X = inset.X.to(device)inset.u_acc = inset.u_acc.to(device)bdset.X = bdset.X.to(device)inset.right = inset.right.to(device)bdset.Dright = bdset.Dright.to(device)teset.X = teset.X.to(device)teset.u_acc = teset.u_acc.to(device)def loadcpu(inset,bdset,teset,netf,netg):    netf = netf.to('cpu')netg = netg.to('cpu')#inset.X.requires_grad = Trueinset.X = inset.X.to('cpu')inset.u_acc = inset.u_acc.to('cpu')bdset.X = bdset.X.to('cpu')inset.right = inset.right.to('cpu')bdset.Dright = bdset.Dright.to('cpu')teset.X = teset.X.to('cpu')teset.u_acc = teset.u_acc.to('cpu')def Lossg(netg,bdset):ub = netg.forward(bdset.X)out_b = (ub - bdset.Dright)**2return torch.sqrt(out_b.mean())def Lossf(netf,inset):if inset.X.requires_grad is not True:inset.X.requires_grad = TrueinsetF = netf.forward(inset.X)# print('insetF device: {}'.format(insetF.device))insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))taux, = torch.autograd.grad(insetFx[:,0:1], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))tauy, = torch.autograd.grad(insetFx[:,1:2], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))uxx = taux[:,0:1]*inset.L + 2*insetFx[:,0:1]*inset.Lx + inset.Lxx*insetF + inset.Gxxuyy = tauy[:,1:2]*inset.L + 2*insetFx[:,1:2]*inset.Ly + inset.Lyy*insetF + inset.Gyyinset.res = (uxx + uyy + inset.right)**2 + inset.deout_in = (inset.res).mean()return (out_in)def Traing(netg,bdset,optimg,max_epoch):print('train neural network g')lossbest = Lossg(netg,bdset)for sub_epoch in range(max_epoch):def closure():loss = Lossg(netg,bdset)optimg.zero_grad()loss.backward()return lossoptimg.step(closure)    loss = Lossg(netg,bdset)if loss < lossbest:lossbest = losstorch.save(netg.state_dict(),'best_netg.pkl')print('epoch:%d,loss_u:%.3e'%(sub_epoch + 1,loss.item()))# Train neural network f
def Trainf(netf,netg, inset, lenth,optimf, epochf):print('train neural network f')ERROR,BUZHOU = [],[]lossf = Lossf(netf,inset)lossoptimal = lossftrainerror = error(pred(netf,netg,lenth,inset.X), inset.u_acc)print('epoch: %d, loss: %.3e, trainerror: %.3e'%(0, lossf.item(), trainerror.item()))torch.save(netf.state_dict(),'best_netf.pkl')cycle = 100for i in range(epochf):st = time.time()def closure():optimf.zero_grad()lossf = Lossf(netf,inset)lossf.backward()return lossfoptimf.step(closure)lossf = Lossf(netf,inset)if lossf < lossoptimal:lossoptimal = lossftorch.save(netf.state_dict(),'best_netf.pkl')ela = time.time() - sttrainerror = error(pred(netf,netg,lenth,inset.X), inset.u_acc)ERROR.append(trainerror.item())BUZHOU.append((i + 1)*cycle)print('epoch:%d,lossf:%.3e,Linf error:%.3e,train error:%.3e,time:%.2f'%((i + 1)*cycle,lossf.item(),max(abs(pred(netf,netg,lenth,inset.X) - inset.u_acc)),trainerror,ela))return ERROR,BUZHOU
def Train(netf,netg, inset, bdset, lenth,optimf, optimg,epochf,max_epoch):if inset.X.requires_grad is not True:inset.X.requires_grad = True#inset.L = lenth.forward(inset.X)inset.L = lenth.forward([0,0],inset.X)inset.dL, = torch.autograd.grad(inset.L, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.Lx = inset.dL[:,0:1]inset.Ly = inset.dL[:,1:2]inset.dLxx, = torch.autograd.grad(inset.Lx, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.dLyy, = torch.autograd.grad(inset.Ly, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.Lxx = inset.dLxx[:,0:1];inset.Lyy = inset.dLyy[:,1:2]inset.L = inset.L.datainset.Lx = inset.Lx.data;inset.Ly = inset.Ly.datainset.Lxx = inset.Lxx.data;inset.Lyy = inset.Lyy.data;inset.dL = inset.dL.data;inset.dLxx = inset.dLxx.data;inset.dLyy = inset.dLyy.dataTraing(netg,bdset,optimg,max_epoch)netg.load_state_dict(torch.load('best_netg.pkl'))err = pred(netf,netg,lenth,bdset.X) - bdset.Drightprint(max(abs(err)))err_g = netg.forward(bdset.X) - bdset.Drightprint(max(abs(err_g)))inset.G = netg.forward(inset.X)inset.Gx, = torch.autograd.grad(inset.G, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.dGxx, = torch.autograd.grad(inset.Gx[:,0:1], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.dGyy, = torch.autograd.grad(inset.Gx[:,1:2], inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1).to(device))inset.Gxx = inset.dGxx[:,0:1];inset.Gyy = inset.dGyy[:,1:2]inset.Gxx = inset.Gxx.data;inset.Gyy = inset.Gyy.datainset.dGxx = inset.dGxx.data;inset.dGyy = inset.dGyy.datainset.de = (inset.dLxx[:,1:2] - inset.dLyy[:,0:1])**2 + (inset.dGxx[:,1:2] - inset.dGyy[:,0:1])**2inset.de = inset.de.dataERROR,BUZHOU = [],[]ERROR,BUZHOU = Trainf(netf,netg, inset, lenth,optimf, epochf)netf.load_state_dict(torch.load('best_netf.pkl'))return ERROR,BUZHOUprob = 3
radius = 2.0
nx_tr = 40
nx_te = 100max_epoch = 20
epochf = 20
lr_g = 1e0
lr_f = 1e0
tests_num = 1
#dtype = torch.float32
dtype = torch.float64# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)inset = INSET(radius,nx_tr,prob)
bdset = BDSET(radius,nx_te,prob)
teset = TESET(radius,nx_te,prob)
loadtype(inset,bdset,teset,dtype)
layg = [2,20,20,1];netg = Net(layg,dtype)
layf = [2,20,20,1];netf = Net(layf,dtype)loadcuda(inset,bdset,teset,netf,netg)
'''
n_b = 3;n_i = 4;mu = 1
lenth = LEN(bdset,n_b,n_i,mu)
'''
mu = 1
lenth = LEN(mu)st = time.time()
for it in range(tests_num):'''optimg = torch.optim.LBFGS(netg.parameters(), lr=lr_g,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')optimf = torch.optim.LBFGS(netf.parameters(), lr=lr_f,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')'''optimg = bfgs.BFGS(netg.parameters(), lr=lr_g,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')optimf = bfgs.BFGS(netf.parameters(), lr=lr_f,max_iter = 100,history_size=2500,line_search_fn = 'strong_wolfe')start_time = time.time()ERROR,BUZHOU = Train(netf,netg, inset, bdset, lenth,optimf, optimg,epochf,max_epoch)elapsed = time.time() - start_timeprint('Train time: %.2f' %(elapsed))te_U = pred(netf,netg,lenth,teset.X)testerror[it] = error(te_U, teset.u_acc)print('prob:%d,Linf error:%.4e,testerror = %.3e\n' %(prob,max(abs(te_U - teset.u_acc)),testerror[it].item()))print(testerror.data)testerror_mean = testerror.mean()testerror_std = testerror.std()print('testerror_mean = %.3e, testerror_std = %.3e'%(testerror_mean.item(),testerror_std.item()))loadcpu(inset,bdset,teset,netf,netg)
torch.cuda.empty_cache()ela = time.time() - st
print('train time:%.2f'%(ela))

极小曲面方程混合边界求解


import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn# ----------------------------------------------------------------------------------------------------
# Generate Data# Solution
def UU(X, lamda, order):temp = lamda * 0.5*(torch.exp(X[:,0]/lamda)+torch.exp(-X[:,0]/lamda))if order[0]==0 and order[1]==0:return temp * torch.sin(torch.acos(X[:,1]/temp))if order[0]==1 and order[1]==0:temp_x = 0.5*(torch.exp(X[:,0]/lamda)-torch.exp(-X[:,0]/lamda));return temp_x * torch.sin(torch.acos(X[:,1]/temp)) + \temp * torch.cos(torch.acos(X[:,1]/temp)) * \(-(1-(X[:,1]/temp)**2)**(-0.5)) * \(-X[:,1]/temp**2) * temp_xif order[0]==0 and order[1]==1:return temp * torch.cos(torch.acos(X[:,1]/temp)) * \(-(1-(X[:,1]/temp)**2)**(-0.5)) * (1/temp)# Right hand side of the Neumann boundary
def RR_N(X, n, lamda):u_x1 = UU(X,lamda,[1,0]); u_x2 = UU(X,lamda,[0,1])return 1/(1+u_x1*u_x1+u_x2*u_x2)**0.5 * (u_x1*n[:,0]+u_x2*n[:,1])# ----------------------------------------------------------------------------------------------------
# Inner Set
class INSET():def __init__(self, radius, nx, lamda):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/3**0.5/self.nxself.area = 6 * self.radius/3**0.5 * self.radius/2self.size = 6 * 2*self.nx**2self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx):for j in range(self.nx):self.X[2*m,0] = (0.5+i+0.5*j) * self.hxself.X[2*m,1] = (j+1/3) * 0.5*3**0.5*self.hxself.X[2*m+1,0] = (1+i+0.5*j) * self.hxself.X[2*m+1,1] = (j+2/3) * 0.5*3**0.5*self.hxm = m+1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*2*self.nx**2; ind1 = (k+1)*2*self.nx**2; ind2 = (k+2)*2*self.nx**2self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.u_acc = UU(self.X,lamda,[0,0]).reshape(self.size,1)# Boundary set
class BDSET():def __init__(self, radius, nx, lamda):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/3**0.5/self.nxself.lenth = 12*self.nx*self.hxself.size = 12*self.nxself.X = torch.zeros(self.size,self.dim)self.n = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx):self.X[m,0] = -(i+0.5) * 0.5*self.hxself.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hxself.n[m,0] = -0.5*3**0.5self.n[m,1] = 0.5m = m+1for i in range(self.nx):self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hxself.X[m,1] = 0.5*self.radiusself.n[m,0] = 0.0self.n[m,1] = 1.0m = m+1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nxself.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)self.DS = 6*nxself.Dlenth = self.DS*self.hxself.DX = self.X[:self.DS,:]self.Dn = self.n[:self.DS,:]self.NS = self.size - self.DSself.Nlenth = self.NS*self.hxself.NX = self.X[self.DS:,:]self.Nn = self.n[self.DS:,:]self.Dright = UU(self.DX,lamda,[0,0]).reshape(-1,1)self.Nright = RR_N(self.NX,self.Nn,lamda).reshape(-1,1)# Test set
class TESET():def __init__(self, radius, nx, lamda):self.dim = 2self.radius = radiusself.nx = nxself.hx = self.radius/3**0.5/self.nxself.size = 6 * self.nx*(self.nx+1) + 1self.X = torch.zeros(self.size,self.dim)m = 0for i in range(self.nx):for j in range(self.nx+1):self.X[m,0] = (1+i+0.5*j) * self.hxself.X[m,1] = j * 0.5*3**0.5*self.hxm = m+1th = np.pi/3RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)for k in range(5):ind0 = k*self.nx*(self.nx+1)ind1 = (k+1)*self.nx*(self.nx+1)ind2 = (k+2)*self.nx*(self.nx+1)self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)self.u_acc = UU(self.X,lamda,[0,0]).reshape(-1,1)
class LEN():def __init__(self,bdset):self.n = bdset.nxself.radius = bdset.radiusself.dim = 2self.mu = 3self.num = 3self.ee = 1#(1.25*self.radius)**2/(4*self.n)self.inp = []self.L_max = 1.0for i in range(self.num):node = torch.zeros(4*self.n,self.dim)node[0:2*self.n,:] = bdset.DX[2*i*self.n:2*(i + 1)*self.n,:]node[2*self.n:,:] = bdset.NX[2*i*self.n:2*(i + 1)*self.n,:]self.inp.append(node)self.L_max = max(self.forward(bdset.X))def dist(self,X,Y):d = ((X - Y)**2).sum(1)return (self.ee + d)**(-0.5)def coef(self,k):node = self.inp[k]#[60,2]size = node.shape[0] + node.shape[1] + 1N = node.shape[0]A = torch.zeros(size,size)A[:N,N:size - 1] = nodeA[N:size - 1,:N] = node.t()A[:N,-1] = torch.ones(N)A[-1,:N] = torch.ones(N)for i in range(N):A[0:N,i] = self.dist(node,node[i,:])#print(A[0:N,0].shape,self.dist(node,node[0,:]).shape)b = torch.zeros(size,1)b[2*self.n:4*self.n,:] = torch.ones(2*self.n,1)xishu,unknow = torch.solve(b,A)return xishudef lk(self,k,X):node = self.inp[k]#[60,2]size = node.shape[0] + node.shape[1] + 1N = node.shape[0]value = torch.zeros(X.shape[0])xishu = self.coef(k)for i in range(N):value += xishu[i]*self.dist(X,node[i,:])for j in range(node.shape[1]):value += xishu[N + j]*X[:,j]return value + xishu[-1]def forward(self,X):L = 1.0for i in range(self.num):L = L*(1 - (1 - self.lk(i,X))**self.mu)return (L/self.L_max).view(-1,1)
'''
nx = 4
lamda = 2.1
radius = 2
bdset = BDSET(radius, nx, lamda)
le = LEN(bdset)
i = 0
node = torch.zeros(4*nx,2)
node[0:2*nx,:] = bdset.DX[2*i*nx:2*(i + 1)*nx,:]
node[2*nx:,:] = bdset.NX[2*i*nx:2*(i + 1)*nx,:]
dx = bdset.DX[2*i*nx:2*(i + 1)*nx,:]
print(le.lk(i,node))
#测试长度因子函数
'''
np.random.seed(1234)
torch.manual_seed(1234)
class Net(torch.nn.Module):def __init__(self, layers):super(Net, self).__init__()self.layers = layersself.hidden_layers_num = len(layers)-2fc = []for i in range(self.hidden_layers_num):fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))fc.append(torch.nn.Linear(self.layers[-2], self.layers[-1]))self.fc = torch.nn.Sequential(*fc)def forward(self, Input):for i in range(self.hidden_layers_num):Output = torch.sin(self.fc[2*i](Input))Output = torch.sin(self.fc[2*i+1](Output))Output[:,0:self.layers[i]] = Output[:,0:self.layers[i]] + InputInput = Outputreturn self.fc[-1](Input)def pred(netg,netf,lenth,X):return netg.forward(X) + lenth.forward(X)*netf.forward(X)
def error(u_pred, u_acc):fenzi = ((u_pred - u_acc)**2).sum()fenmu = (u_acc**2).sum()return (fenzi/fenmu)**(0.5)def Lossg(netg,bdset):#拟合Dirichlet边界ub = netg.forward(bdset.DX)return bdset.Dlenth * ((ub - bdset.Dright)**2).mean()def Lossf(netf,inset,bdset):inset.X.requires_grad = TrueinsetF = netf.forward(inset.X)insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1))u_in = inset.G + inset.L * insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度ux = inset.Gx + inset.Lx*insetF + inset.L*insetFx#复合函数求导,提高迭代效率ub = bdset.N_G + bdset.N_L * netf.forward(bdset.NX)return (inset.area*((1 + ux**2).sum(1))**0.5).mean()\- bdset.Nlenth * (bdset.Nright*ub).mean()def Traing(netg, bdset, optimg, epochg):print('train neural network g')lossg = Lossg(netg,bdset)lossbest = lossgprint('epoch:%d,lossf:%.3e'%(0,lossg.item()))torch.save(netg.state_dict(),'best_netg.pkl')cycle = 100for i in range(epochg):st = time.time()for j in range(cycle):optimg.zero_grad()lossg = Lossg(netg,bdset)lossg.backward()optimg.step()if lossg < lossbest:lossbest = lossgtorch.save(netg.state_dict(),'best_netg.pkl')ela = time.time() - stprint('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):print('train neural network f')ERROR,BUZHOU = [],[]lossf = Lossf(netf,inset,bdset)lossoptimal = lossftrainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)print('epoch: %d, loss: %.3e, trainerror: %.3e'%(0, lossf.item(), trainerror.item()))torch.save(netf.state_dict(),'best_netf.pkl')cycle = 100for i in range(epochf):st = time.time()for j in range(cycle):optimf.zero_grad()lossf = Lossf(netf,inset,bdset)lossf.backward()optimf.step()if lossf < lossoptimal:lossoptimal = lossftorch.save(netf.state_dict(),'best_netf.pkl')ela = time.time() - sttrainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)ERROR.append(trainerror.item())BUZHOU.append((i + 1)*cycle)print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%((i + 1)*cycle,lossf.item(),trainerror,ela))return ERROR,BUZHOU# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):# Calculate the length factorinset.X.requires_grad = Trueinset.L = lenth.forward(inset.X)inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1))bdset.N_L = lenth.forward(bdset.NX)#计算长度因子关于Neumann边界样本点的梯度inset.L = inset.L.data; inset.Lx = inset.Lx.data; bdset.N_L = bdset.N_L.data# Train neural network gTraing(netg, bdset, optimg, epochg)netg.load_state_dict(torch.load('best_netg.pkl'))inset.X.requires_grad = Trueinset.G = netg.forward(inset.X)inset.Gx, = torch.autograd.grad(inset.G, inset.X,create_graph=True, retain_graph=True,grad_outputs=torch.ones(inset.size,1))bdset.N_G = netg.forward(bdset.NX)inset.G = inset.G.data; inset.Gx = inset.Gx.data; bdset.N_G = bdset.N_G.data# Train neural network fERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)return ERROR,BUZHOU
def main():# Configurationsradius = 2.0lamda = 2.1nx_tr = 15nx_te = 30epochg = 4epochf = 4layer_g = [2,10,1]layer_f = [2,10,10,10,1]learning_rate = 0.01tests_num = 1# ------------------------------------------------------------------------------------------------# Teststesterror = torch.zeros(tests_num)for it in range(tests_num):# Parepare data setinset = INSET(radius, nx_tr, lamda)bdset = BDSET(radius, nx_tr, lamda)teset = TESET(radius, nx_te, lamda)# Construct length factor#lenth = lenthfactor(bdset)lenth = LEN(bdset)# Construct neural networknetg = Net(layer_f)netf = Net(layer_g)optimg = torch.optim.Adam(netg.parameters(), lr=learning_rate)optimf = torch.optim.Adam(netf.parameters(), lr=learning_rate)# Train neural networkstart_time = time.time()ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)print(ERROR,BUZHOU)elapsed = time.time() - start_timeprint('Train time: %.2f' %(elapsed))# Make predictionnetg.load_state_dict(torch.load('best_netg.pkl'))netf.load_state_dict(torch.load('best_netf.pkl'))te_U = pred(netg, netf, lenth, teset.X)testerror[it] = error(te_U, teset.u_acc)print('testerror = %.3e\n' %(testerror[it].item()))# ------------------------------------------------------------------------------------------------print(testerror.data)errors_mean = testerror.mean()errors_std = testerror.std()print('test_error_mean = %.3e, test_error_std = %.3e'%(errors_mean.item(),errors_std.item()))if __name__ == '__main__':main()

六角星区域的泊松方程求解相关推荐

  1. Python之OpenGL笔记(33):透视投影画六角星

    一.目的 1.摄像机应用,透视投影画六角星: 二.程序运行结果 三.透视投影    吴亚峰<OpenGL ES 3.x游戏开发>(上卷)内容    现实世界中人眼观察物体时会有" ...

  2. python画五角星-python画五角星和六角星程序 | 学步园

    1.五角星 import turtle turtle.forward(100) turtle.right(144) turtle.forward(100) turtle.right(144) turt ...

  3. python五角星程序显示错误_python画五角星和六角星程序

    1.五角星 import turtle turtle.forward(100) turtle.right(144) turtle.forward(100) turtle.right(144) turt ...

  4. python画五角星和六角星程序_python画五角星和六角星程序

    1.五角星 import turtle turtle.forward(100) turtle.right(144) turtle.forward(100) turtle.right(144) turt ...

  5. Python绘制六角星、多角星、小太阳、小风车《打包好的各种游戏源码,画图源码》

    绘制如下图的,多角图形.思路. (1)每个角是一个标准的等边三角形,把绘制等边三角形作为一个标准函数. (2)观察图形,可以看出,画的三角形在不断的旋转和移动,因此第一步找到三角形画法起始点的海龟头旋 ...

  6. python画五角星和六角星程序

    1.五角星 import turtleturtle.forward(100) turtle.right(144) turtle.forward(100) turtle.right(144) turtl ...

  7. python六角星_python畫五角星和六角星程序

    1.五角星 import turtle turtle.forward(100) turtle.right(144) turtle.forward(100) turtle.right(144) turt ...

  8. opengles绘制可旋转的六角星

    package com.bn.Sample5_1;import android.opengl.Matrix;//存储系统矩阵状态的类 public class MatrixState {private ...

  9. Python之OpenGL笔记(34):采用了顶点常量属性方法画多彩六角星

    一.目的 1.采用了顶点常量属性方法画多彩六角星: 二.程序运行结果 三.顶点常量属性    吴亚峰<OpenGL ES 3.x游戏开发>(上卷)内容    前面的很多案例中,给每一个顶点 ...

最新文章

  1. 【学习笔记】超简单的多项式除法(含完整证明)
  2. 对网页是否为当前展示标签页、是否最小化、以及是否后台运行进行监听
  3. mysql利用cpu率高_MySQL高CPU使用率
  4. php的array跟go的array,实现类似php的array_column方法
  5. go语言json字符串解析为结构体数组,结构体指针的数组
  6. 安卓设置Activity切换动画无效的问题
  7. java .jvp文件_GitHub - eddylapis/jvppeteer: Headless Chrome For Java (Java 爬虫)
  8. camera中文版软件 ip_ip camera网络摄像机
  9. Linux基础(使用ssh服务管理远程主机1)
  10. 【JMX】JMX 远程 连接 The client has been closed
  11. uni app 调用网络打印机_uni-app封装一个request请求
  12. sparkR介绍及安装
  13. js系列教程5-数据结构和算法全解
  14. Python 清理项目的目录
  15. Android 倒计时器工具类
  16. Charles注册、破解(避免30分钟自动kill掉)
  17. office2019_word_多级标题(四级以上heading的配置)/自定义样式heading style
  18. mysql is marked_快速解决MySQL:Table xxx is marked as crashed and should be repaired五个办法...
  19. 知识星球的规划和落实!
  20. Tensorflow Keras中的masking与padding的学习笔记

热门文章

  1. 华为手表gt2搭载鸿蒙,曝华为 Watch GT2 Pro 支持和手机协调工作
  2. 《推坑写作术》看书笔记 第6章 认同请分享!粉丝圈小编的发文秘技 目录
  3. ByteBuffer 之黏包和半包
  4. 金融行业如何应对数据安全风险问题?
  5. 词云wordcloud
  6. Android源码目录分析【转】
  7. vue核心原理之--理解Tree-Shaking
  8. uniapp 单行文本溢出隐藏
  9. 查看django 是什么版本
  10. pythonttf字体提取_[TTF字体]提取TTF字体的轮廓(二) | 学步园