NN求解NS方程进一步探讨

检验函数空间的选取

⚪基函数

利用散度公式,把 ∮ Ω v P x d x d y \oint_{\Omega}vP_xdxdy ΩvPxdxdy变成 ∮ Ω P v x d x d y \oint_{\Omega}Pv_xdxdy ΩPvxdxdy,这样可以加快速度,不过貌似精度略显不足,代码如下

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn

def UU(X,order,prob):
    if prob == 1:
        Re = 1000
        lam = 1/2*Re - (1/4*Re**2 + 4*np.pi**2)**0.5;eta = 2*np.pi
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 1 - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam/(eta)
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**2)/(eta)
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**3)/(eta)
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*(eta)**2
            tmp[:,1] = -torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam*(eta)
            return tmp
    if prob == 2:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.sin(X[:,0]) + torch.sin(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,0])
            tmp[:,1] = torch.sin(X[:,0])*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.sin(X[:,0])
            tmp[:,1] = torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = -torch.sin(X[:,1])
            tmp[:,1] = 0*X[:,0]
            return tmp
    if prob == 3:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = X[:,0]**2 + torch.exp(X[:,1])
            tmp[:,1] = - 2*X[:,0]*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*X[:,0]
            tmp[:,1] = -2*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = -2*X[:,0]
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*torch.ones_like(X[:,0])
            tmp[:,1] = 0*X[:,0]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = 0*X[:,1]
            return tmp
    
def Delta(X,prob):
    return UU(X,[2,0],prob) + UU(X,[0,2],prob)
def PP(X,order,prob):
    if prob==1:
        temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
        if order[0]==0 and order[1]==0:
            return torch.log(temp)
        if order[0]==1 and order[1]==0:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
        if order[0]==0 and order[1]==1:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
        if order[0]==2 and order[1]==0:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
        if order[0]==1 and order[1]==1:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
                   * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
                   + temp**(-1) * (18)
        if order[0]==0 and order[1]==2:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
    if prob==2:
        temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
        temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
        if order[0]==0 and order[1]==0:
            return temp1 * temp2**(-1)
        if order[0]==1 and order[1]==0:
            return (2*X[:,0]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,0])
        if order[0]==0 and order[1]==1:
            return (-2*X[:,1]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,1])
        if order[0]==2 and order[1]==0:
            return (2) * temp2**(-1) + \
                   2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
        if order[0]==1 and order[1]==1:
            return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
        if order[0]==0 and order[1]==2:
            return (-2) * temp2**(-1) + \
                   2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
    if prob==3:
        temp = torch.exp(-4*X[:,1]*X[:,1])
        if order[0]==0 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * temp + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * temp
        if order[0]==1 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * temp + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * temp
        if order[0]==0 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
        if order[0]==2 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (12*(X[:,0]+1)**2) * temp + \
                   (1-ind) * (-12*(-X[:,0]+1)**2) * temp
        if order[0]==1 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
        if order[0]==0 and order[1]==2:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):#mu = 0.5
    tmp = torch.zeros(X.shape[0],2)
    Re = 1000
    nu = 1/Re
    tmp[:,0] = -nu*Delta(X,prob)[:,0] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,0]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,0]) + PP(X,[1,0],prob)
    tmp[:,1] = -nu*Delta(X,prob)[:,1] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,1]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,1]) + PP(X,[0,1],prob)
    return tmp


#函数定义修改完成
class INSET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = (nx[0] - 1)*(nx[1] - 1)
        self.nx = nx
        self.bound = bound
        self.gp = np.sqrt(4/15)
        
        self.Node()#生成网格点,这里对应基函数
        self.cai()#采集样本点x,以及给出对于内网格点的索引,方便取出内网格点对应的基函数
        self.test()#取出测试函数在不为0的区域的取值以及对应的偏导数值
        self.u_acc = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        self.right = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        for i in range(self.X.shape[0]):
            self.u_acc[i,:,:] = UU(self.X[i,:,:],[0,0],prob)
            self.right[i,:,:] = FF(self.X[i,:,:],prob)
    def Node(self):#生成网格点(M + 1)*(N + 1),注意这个和X不是同一个
        self.Nodes_size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.Nodes = torch.zeros(self.Nodes_size,self.dim)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.Nodes[m,0] = self.bound[0,0] + i*self.hx[0]
                self.Nodes[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
    def cai(self):
        self.index = np.zeros([(self.nx[0] - 1)*(self.nx[1] - 1)],np.int)#这个就是内网格点的索引
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):#内网格点的数目为(nx[0] - 1)*(nx[1] - 1)
                self.index[m] = i*(self.nx[1] + 1) + j
                m = m + 1
        #ind = self.index
        #plt.scatter(self.Nodes[ind,0],self.Nodes[ind,1]),这行命令可以检查是否取得是内网格点
        self.X = torch.zeros((self.nx[0] - 1)*(self.nx[1] - 1),4,self.dim)
        #以内网格点为中心对应的基函数,周围4个单元取值非0,其中在每个单元采集一个点
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):
                self.X[m,0,0] = self.bound[0,0] + (i - self.gp)*self.hx[0]
                self.X[m,0,1] = self.bound[1,0] + (j - self.gp)*self.hx[1]
                self.X[m,1,0] = self.bound[0,0] + (i + self.gp)*self.hx[0]
                self.X[m,1,1] = self.bound[1,0] + (j - self.gp)*self.hx[1]
                self.X[m,2,0] = self.bound[0,0] + (i + self.gp)*self.hx[0]
                self.X[m,2,1] = self.bound[1,0] + (j + self.gp)*self.hx[1]
                self.X[m,3,0] = self.bound[0,0] + (i - self.gp)*self.hx[0]
                self.X[m,3,1] = self.bound[1,0] + (j + self.gp)*self.hx[1]
                m = m + 1
    def phi(self,X,order):#[-1,1]*[-1,1],在原点取值为1,其他网格点取值为0的基函数
        indx = (X[:,0] >= -1)*(X[:,0] <= 1)
        indy = (1 - X[:,0]**2 >= X[:,1]**2)
        #indy = (X[:,1] >= -1)*(X[:,1] <= 1)
        if order == [0,0]:
            return (indx*indy).float()*(1 - X[:,0]**2 - X[:,1]**2)
        if order == [1,0]:
            return (indx*indy).float()*(-2*X[:,0])
        if order == [0,1]:
            return (indx*indy).float()*(-2*X[:,1])
    def basic(self,X,order,i):#根据网格点的存储顺序,遍历所有网格点,取基函数
        temp = (X - self.Nodes[i,:])/torch.tensor([self.hx[0],self.hx[1]])
        if order == [0,0]:
            return self.phi(temp,order)
        if order == [1,0]:
            return self.phi(temp,order)/self.hx[0]
        if order == [0,1]:
            return self.phi(temp,order)/self.hx[1]
    def test(self):
        self.v = torch.zeros(self.size,4,1)
        #定义测试函数在所有内点的取值,一共有(nx[0] - 1)*(nx[1] - 1)个测试函数,每个测试函数只有4个点上取值非0
        #print(self.v[0,:,0:1].shape,self.basic(self.X[0,:,:],[0,0],self.index[0]).shape)
        for i in range(self.size):
            self.v[i,:,0:1] = self.basic(self.X[i,:,:],[0,0],self.index[i]).view(-1,1)
        self.vx = torch.zeros(self.size,4,self.dim)#定义基函数在对应区域的偏导数
        for i in range(self.size):
            self.vx[i,:,0] = self.basic(self.X[i,:,:],[1,0],self.index[i])
            self.vx[i,:,1] = self.basic(self.X[i,:,:],[0,1],self.index[i])
        

class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.DS = 2*(nx[0] + nx[1])
        self.Dlenth = 2*(bound[0,1] - bound[0,0]) + 2*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        
        self.X = torch.zeros(self.DS,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        self.Dright = UU(self.X,[0,0],prob)
class TESET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        M,N = nx[0] + 1,nx[1] + 1
        self.size = M*N
        self.X = torch.zeros(self.size,self.dim)#储存求解区域所有网格点,包括边界点
        for j in range(N):
            for i in range(M):
                self.X[j*M + i,0] = bound[0,0] + i*self.hx[0]
                self.X[j*M + i,1] = bound[1,0] + j*self.hx[1]
        self.u_acc = UU(self.X,[0,0],prob)#储存求解区域网格点对应精确解
#数据集修改完成
np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
    def __init__(self):
        super(NETG,self).__init__()
        self.fc1 = torch.nn.Linear(2,16)
        self.fc2 = torch.nn.Linear(16,16)
        self.fc3 = torch.nn.Linear(16,10)
        self.fc4 = torch.nn.Linear(10,2)
    def forward(self,x):
        out = torch.sin(self.fc1(x))
        out = torch.sin(self.fc2(out)) + x@torch.eye(x.size(-1),16)
        out = torch.sin(self.fc3(out)) + x@torch.eye(x.size(-1),10)
        return self.fc4(out)
    
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
    def __init__(self,order):
        super(SIN,self).__init__()
        self.e = order
    def forward(self,x):
        return torch.sin(x)**self.e
class Res(nn.Module):
    def __init__(self,input_size,output_size):
        super(Res,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_size,output_size),
            SIN(1)
            
        )
        self.input = input_size
        self.output = output_size
    def forward(self,x):
        x = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
        return x
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
    def __init__(self):
        super(NETF,self).__init__()
        self.model = nn.Sequential(
            Res(2,16),
            Res(16,16),
            Res(16,16)
            
        )
        self.fc = torch.nn.Linear(16,3)
    def forward(self,x):
        out = self.model(x)
        return self.fc(out)


class lenthfactor():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
        self.dx = bound[:,1] - bound[:,0]
    def forward(self,X):
        L = 1.0
        if X.ndim == 2:#如果是边界点,存储方式为[m,2]
            for i in range(2):
                L = L*(1 - (1 - (X[:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,i])/self.dx[i])**self.mu)
            return L.view(-1,1)
        elif X.ndim == 3:#如果是内点,存储方式为[m,4,2],m表示内网格点数目
            for i in range(2):
                L = L*(1 - (1 - (X[:,:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,:,i])/self.dx[i])**self.mu)
            return L.view(X.shape[0],X.shape[1],1)  
def pred(netg,netf,lenth,X):
    if X.ndim == 2:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,0:2]
    if X.ndim == 3:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,:,0:2]
def error(u_pred,u_acc):
    tmp = (u_pred - u_acc).reshape(-1,2)
    return (((u_pred - u_acc)**2).mean())**(0.5)
    #return max(max(abs(tmp[:,0])),max(abs(tmp[:,1])))
    #return (((u_pred-u_acc)**2).sum() / (u_acc**2).sum()) ** (0.5)

# ----------------------------------------------------------------------------------------------------
def Lossg(netg,bdset):#拟合Dirichlet边界
    ub = netg.forward(bdset.X)
    return bdset.Dlenth * ((ub - bdset.Dright)**2).sum()

def Lossf(netf,inset):#之前方程有一个mu=0.5
    Re = 1000
    nu = 1/Re
    inset.X.requires_grad = True
    
    insetF = netf.forward(inset.X)[:,:,0:2]
    
    insetFx1, = torch.autograd.grad(insetF[:,:,0:1], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_1的偏导数
    insetFx2, = torch.autograd.grad(insetF[:,:,1:2], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_2的偏导数
    P = netf.forward(inset.X)[:,:,2:3]
    
    u1 = inset.G1 + inset.L*insetF[:,:,0:1]
    u2 = inset.G2 + inset.L*insetF[:,:,1:2]
    
    ux1 = inset.Gx1 + inset.Lx*insetF[:,:,0:1] + inset.L*insetFx1#复合函数求导,提高迭代效率
    ux2 = inset.Gx2 + inset.Lx*insetF[:,:,1:2] + inset.L*insetFx2#复合函数求导,提高迭代效率
    
    out1 = nu*(inset.vx*ux1).sum(2) - (inset.vx[:,:,0:1]*P).sum(2) + \
    ((u1*ux1[:,:,0:1] + u2*ux1[:,:,1:2] - inset.right[:,:,0:1])*inset.v).sum(2)
    out2 = nu*(inset.vx*ux2).sum(2) - (inset.vx[:,:,1:2]*P).sum(2) + \
    ((u1*ux2[:,:,0:1] + u2*ux2[:,:,1:2] - inset.right[:,:,1:2])*inset.v).sum(2)
    out_ = (ux1[:,:,0:1] + ux2[:,:,1:2]).sum(2)#\nabla u
    #out_ = -(inset.vx[:,:,0:1]*u1 + inset.vx[:,:,1:2]*u2).sum(2)
    out = (out1.sum(1))**2 + (out2.sum(1))**2 + (out_**2).sum(1)
    return out.sum()
    #return torch.log(out.mean())

def Traing(netg, bdset, optimg, epochg):
    print('train neural network g')
    lossg = Lossg(netg,bdset)
    lossbest = lossg
    print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
    torch.save(netg.state_dict(),'best_netg.pkl')
    cycle = 100
    for i in range(epochg):
        st = time.time()
        '''
        for j in range(cycle):
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            optimg.step()
        '''
        def closure():
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            return lossg
        optimg.step(closure)
        
        lossg = Lossg(netg,bdset)

        if lossg < lossbest:
            lossbest = lossg
            torch.save(netg.state_dict(),'best_netg.pkl')
        ela = time.time() - st
        print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))

# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
    print('train neural network f')
    ERROR,BUZHOU = [],[]
    lossf = Lossf(netf,inset)
    lossoptimal = lossf
    trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
    print('epoch: %d, loss: %.3e, trainerror: %.3e'
          %(0, lossf.item(), trainerror.item()))
    torch.save(netf.state_dict(),'best_netf.pkl')
    cycle = 100
    for i in range(epochf):
        st = time.time()
        '''
        for j in range(cycle):
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            optimf.step()
        '''
        def closure():
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            return lossf
        optimf.step(closure)
        lossf = Lossf(netf,inset)

        if lossf < lossoptimal:
            lossoptimal = lossf
            torch.save(netf.state_dict(),'best_netf.pkl')
        ela = time.time() - st
        trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
        ERROR.append(trainerror.item())
        BUZHOU.append((i + 1)*cycle)
        print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
             ((i + 1)*cycle,lossf.item(),trainerror,ela))
    return ERROR,BUZHOU

# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
    
    # Calculate the length factor
    inset.X.requires_grad = True
    inset.L = lenth.forward(inset.X)
    inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
                                    create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))
    
    
    inset.L = inset.L.data; inset.Lx = inset.Lx.data

    # Train neural network g
    Traing(netg, bdset, optimg, epochg)

    netg.load_state_dict(torch.load('best_netg.pkl'))
    inset.X.requires_grad = True
    inset.G = netg.forward(inset.X)
    inset.G1 = inset.G[:,:,0:1];inset.G2 = inset.G[:,:,1:2]
    inset.Gx1, = torch.autograd.grad(inset.G1, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.Gx2, = torch.autograd.grad(inset.G2, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.G = inset.G.data; inset.Gx1 = inset.Gx1.data;inset.Gx2 = inset.Gx2.data
    inset.G1 = inset.G1.data;inset.G2 = inset.G2.data

    # Train neural network f
    ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
    return ERROR,BUZHOU



    
    # Configurations
prob = 1
bounds = torch.tensor([[-0.5,1],[-0.5,1.5]]).float()
nx_tr = [51,51]#训练集剖分
nx_te = [101,101]#测试集剖分
epochg = 30
epochf = 30
lr = 1e-2
tests_num = 1

    # ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
for it in range(tests_num):

    inset = INSET(bounds, nx_tr, prob)
    bdset = BDSET(bounds, nx_tr, prob)
    teset = TESET(bounds, nx_te, prob)

    lenth = lenthfactor(bounds, 1)

    netg = NETG()
    netf = NETF()
        #optimg = torch.optim.Adam(netg.parameters(), lr=lr)
        #optimf = torch.optim.Adam(netf.parameters(), lr=lr)
    optimg = torch.optim.LBFGS(netg.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')
    optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')


    start_time = time.time()
    ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
        #print(ERROR,BUZHOU)
    elapsed = time.time() - start_time
    print('Train time: %.2f' %(elapsed))

    netg.load_state_dict(torch.load('best_netg.pkl'))
    netf.load_state_dict(torch.load('best_netf.pkl'))
    te_U = pred(netg, netf, lenth, teset.X)
    testerror[it] = error(te_U, teset.u_acc)
    print('testerror = %.3e\n' %(testerror[it].item()))
    plt.plot(BUZHOU,ERROR,'r*')
    plt.plot(BUZHOU,ERROR)
    plt.title('the absolute error for example of prob = %d at iteration'%(prob))
    plt.savefig('jvxing.jpg')
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
      %(testerror_mean.item(),testerror_std.item()))
    


在这里插入图片描述

下面这个就是正常的圆形基函数代码

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn

def UU(X,order,prob):
    if prob == 1:
        Re = 1000
        lam = 1/2*Re - (1/4*Re**2 + 4*np.pi**2)**0.5;eta = 2*np.pi
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 1 - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam/(eta)
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**2)/(eta)
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**3)/(eta)
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*(eta)**2
            tmp[:,1] = -torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam*(eta)
            return tmp
    if prob == 2:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.sin(X[:,0]) + torch.sin(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,0])
            tmp[:,1] = torch.sin(X[:,0])*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.sin(X[:,0])
            tmp[:,1] = torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = -torch.sin(X[:,1])
            tmp[:,1] = 0*X[:,0]
            return tmp
    if prob == 3:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = X[:,0]**2 + torch.exp(X[:,1])
            tmp[:,1] = - 2*X[:,0]*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*X[:,0]
            tmp[:,1] = -2*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = -2*X[:,0]
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*torch.ones_like(X[:,0])
            tmp[:,1] = 0*X[:,0]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = 0*X[:,1]
            return tmp
    
def Delta(X,prob):
    return UU(X,[2,0],prob) + UU(X,[0,2],prob)
def PP(X,order,prob):
    if prob==1:
        temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
        if order[0]==0 and order[1]==0:
            return torch.log(temp)
        if order[0]==1 and order[1]==0:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
        if order[0]==0 and order[1]==1:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
        if order[0]==2 and order[1]==0:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
        if order[0]==1 and order[1]==1:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
                   * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
                   + temp**(-1) * (18)
        if order[0]==0 and order[1]==2:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
    if prob==2:
        temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
        temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
        if order[0]==0 and order[1]==0:
            return temp1 * temp2**(-1)
        if order[0]==1 and order[1]==0:
            return (2*X[:,0]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,0])
        if order[0]==0 and order[1]==1:
            return (-2*X[:,1]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,1])
        if order[0]==2 and order[1]==0:
            return (2) * temp2**(-1) + \
                   2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
        if order[0]==1 and order[1]==1:
            return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
        if order[0]==0 and order[1]==2:
            return (-2) * temp2**(-1) + \
                   2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
    if prob==3:
        temp = torch.exp(-4*X[:,1]*X[:,1])
        if order[0]==0 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * temp + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * temp
        if order[0]==1 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * temp + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * temp
        if order[0]==0 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
        if order[0]==2 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (12*(X[:,0]+1)**2) * temp + \
                   (1-ind) * (-12*(-X[:,0]+1)**2) * temp
        if order[0]==1 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
        if order[0]==0 and order[1]==2:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):#mu = 0.5
    tmp = torch.zeros(X.shape[0],2)
    Re = 1000
    nu = 1/Re
    tmp[:,0] = -nu*Delta(X,prob)[:,0] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,0]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,0]) + PP(X,[1,0],prob)
    tmp[:,1] = -nu*Delta(X,prob)[:,1] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,1]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,1]) + PP(X,[0,1],prob)
    return tmp


#函数定义修改完成
class INSET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = (nx[0] - 1)*(nx[1] - 1)
        self.nx = nx
        self.bound = bound
        self.gp = np.sqrt(4/15)
        
        self.Node()#生成网格点,这里对应基函数
        self.cai()#采集样本点x,以及给出对于内网格点的索引,方便取出内网格点对应的基函数
        self.test()#取出测试函数在不为0的区域的取值以及对应的偏导数值
        self.u_acc = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        self.right = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        for i in range(self.X.shape[0]):
            self.u_acc[i,:,:] = UU(self.X[i,:,:],[0,0],prob)
            self.right[i,:,:] = FF(self.X[i,:,:],prob)
    def Node(self):#生成网格点(M + 1)*(N + 1),注意这个和X不是同一个
        self.Nodes_size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.Nodes = torch.zeros(self.Nodes_size,self.dim)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.Nodes[m,0] = self.bound[0,0] + i*self.hx[0]
                self.Nodes[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
    def cai(self):
        self.index = np.zeros([(self.nx[0] - 1)*(self.nx[1] - 1)],np.int)#这个就是内网格点的索引
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):#内网格点的数目为(nx[0] - 1)*(nx[1] - 1)
                self.index[m] = i*(self.nx[1] + 1) + j
                m = m + 1
        #ind = self.index
        #plt.scatter(self.Nodes[ind,0],self.Nodes[ind,1]),这行命令可以检查是否取得是内网格点
        self.X = torch.zeros((self.nx[0] - 1)*(self.nx[1] - 1),4,self.dim)
        #以内网格点为中心对应的基函数,周围4个单元取值非0,其中在每个单元采集一个点
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):
                self.X[m,0,0] = self.bound[0,0] + (i - self.gp)*self.hx[0]
                self.X[m,0,1] = self.bound[1,0] + (j - self.gp)*self.hx[1]
                self.X[m,1,0] = self.bound[0,0] + (i + self.gp)*self.hx[0]
                self.X[m,1,1] = self.bound[1,0] + (j - self.gp)*self.hx[1]
                self.X[m,2,0] = self.bound[0,0] + (i + self.gp)*self.hx[0]
                self.X[m,2,1] = self.bound[1,0] + (j + self.gp)*self.hx[1]
                self.X[m,3,0] = self.bound[0,0] + (i - self.gp)*self.hx[0]
                self.X[m,3,1] = self.bound[1,0] + (j + self.gp)*self.hx[1]
                m = m + 1
    def phi(self,X,order):#[-1,1]*[-1,1],在原点取值为1,其他网格点取值为0的基函数
        indx = (X[:,0] >= -1)*(X[:,0] <= 1)
        indy = (1 - X[:,0]**2 >= X[:,1]**2)
        #indy = (X[:,1] >= -1)*(X[:,1] <= 1)
        if order == [0,0]:
            return (indx*indy).float()*(1 - X[:,0]**2 - X[:,1]**2)
        if order == [1,0]:
            return (indx*indy).float()*(-2*X[:,0])
        if order == [0,1]:
            return (indx*indy).float()*(-2*X[:,1])
    def basic(self,X,order,i):#根据网格点的存储顺序,遍历所有网格点,取基函数
        temp = (X - self.Nodes[i,:])/torch.tensor([self.hx[0],self.hx[1]])
        if order == [0,0]:
            return self.phi(temp,order)
        if order == [1,0]:
            return self.phi(temp,order)/self.hx[0]
        if order == [0,1]:
            return self.phi(temp,order)/self.hx[1]
    def test(self):
        self.v = torch.zeros(self.size,4,1)
        #定义测试函数在所有内点的取值,一共有(nx[0] - 1)*(nx[1] - 1)个测试函数,每个测试函数只有4个点上取值非0
        #print(self.v[0,:,0:1].shape,self.basic(self.X[0,:,:],[0,0],self.index[0]).shape)
        for i in range(self.size):
            self.v[i,:,0:1] = self.basic(self.X[i,:,:],[0,0],self.index[i]).view(-1,1)
        self.vx = torch.zeros(self.size,4,self.dim)#定义基函数在对应区域的偏导数
        for i in range(self.size):
            self.vx[i,:,0] = self.basic(self.X[i,:,:],[1,0],self.index[i])
            self.vx[i,:,1] = self.basic(self.X[i,:,:],[0,1],self.index[i])
        

class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.DS = 2*(nx[0] + nx[1])
        self.Dlenth = 2*(bound[0,1] - bound[0,0]) + 2*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        
        self.X = torch.zeros(self.DS,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        self.Dright = UU(self.X,[0,0],prob)
class TESET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        M,N = nx[0] + 1,nx[1] + 1
        self.size = M*N
        self.X = torch.zeros(self.size,self.dim)#储存求解区域所有网格点,包括边界点
        for j in range(N):
            for i in range(M):
                self.X[j*M + i,0] = bound[0,0] + i*self.hx[0]
                self.X[j*M + i,1] = bound[1,0] + j*self.hx[1]
        self.u_acc = UU(self.X,[0,0],prob)#储存求解区域网格点对应精确解
#数据集修改完成
np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
    def __init__(self):
        super(NETG,self).__init__()
        self.fc1 = torch.nn.Linear(2,16)
        self.fc2 = torch.nn.Linear(16,16)
        self.fc3 = torch.nn.Linear(16,10)
        self.fc4 = torch.nn.Linear(10,2)
    def forward(self,x):
        out = torch.sin(self.fc1(x))
        out = torch.sin(self.fc2(out)) + x@torch.eye(x.size(-1),16)
        out = torch.sin(self.fc3(out)) + x@torch.eye(x.size(-1),10)
        return self.fc4(out)
    
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
    def __init__(self,order):
        super(SIN,self).__init__()
        self.e = order
    def forward(self,x):
        return torch.sin(x)**self.e
class Res(nn.Module):
    def __init__(self,input_size,output_size):
        super(Res,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_size,output_size),
            SIN(1)
            
        )
        self.input = input_size
        self.output = output_size
    def forward(self,x):
        x = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
        return x
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
    def __init__(self):
        super(NETF,self).__init__()
        self.model = nn.Sequential(
            Res(2,16),
            Res(16,16),
            Res(16,16)
            
        )
        self.fc = torch.nn.Linear(16,3)
    def forward(self,x):
        out = self.model(x)
        return self.fc(out)


class lenthfactor():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
        self.dx = bound[:,1] - bound[:,0]
    def forward(self,X):
        L = 1.0
        if X.ndim == 2:#如果是边界点,存储方式为[m,2]
            for i in range(2):
                L = L*(1 - (1 - (X[:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,i])/self.dx[i])**self.mu)
            return L.view(-1,1)
        elif X.ndim == 3:#如果是内点,存储方式为[m,4,2],m表示内网格点数目
            for i in range(2):
                L = L*(1 - (1 - (X[:,:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,:,i])/self.dx[i])**self.mu)
            return L.view(X.shape[0],X.shape[1],1)  
def pred(netg,netf,lenth,X):
    if X.ndim == 2:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,0:2]
    if X.ndim == 3:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,:,0:2]
def error(u_pred,u_acc):
    tmp = (u_pred - u_acc).reshape(-1,2)
    return (((u_pred - u_acc)**2).mean())**(0.5)
    #return max(max(abs(tmp[:,0])),max(abs(tmp[:,1])))
    #return (((u_pred-u_acc)**2).sum() / (u_acc**2).sum()) ** (0.5)

# ----------------------------------------------------------------------------------------------------
def Lossg(netg,bdset):#拟合Dirichlet边界
    ub = netg.forward(bdset.X)
    return bdset.Dlenth * ((ub - bdset.Dright)**2).sum()

def Lossf(netf,inset):#之前方程有一个mu=0.5
    Re = 1000
    nu = 1/Re
    inset.X.requires_grad = True
    
    insetF = netf.forward(inset.X)[:,:,0:2]
    
    insetFx1, = torch.autograd.grad(insetF[:,:,0:1], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_1的偏导数
    insetFx2, = torch.autograd.grad(insetF[:,:,1:2], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_2的偏导数
    P = netf.forward(inset.X)[:,:,2:3]
    Px, = torch.autograd.grad(P, inset.X, create_graph=True, retain_graph=True,
                              grad_outputs=torch.ones(inset.size,4,1))
    u1 = inset.G1 + inset.L*insetF[:,:,0:1]
    u2 = inset.G2 + inset.L*insetF[:,:,1:2]
    
    ux1 = inset.Gx1 + inset.Lx*insetF[:,:,0:1] + inset.L*insetFx1#复合函数求导,提高迭代效率
    ux2 = inset.Gx2 + inset.Lx*insetF[:,:,1:2] + inset.L*insetFx2#复合函数求导,提高迭代效率
    
    out1 = nu*(inset.vx*ux1).sum(2) + ((u1*ux1[:,:,0:1] + u2*ux1[:,:,1:2] + Px[:,:,0:1] - inset.right[:,:,0:1])*inset.v).sum(2)
    out2 = nu*(inset.vx*ux2).sum(2) + ((u1*ux2[:,:,0:1] + u2*ux2[:,:,1:2] + Px[:,:,1:2] - inset.right[:,:,1:2])*inset.v).sum(2)
    out_ = (ux1[:,:,0:1] + ux2[:,:,1:2]).sum(2)#\nabla u
    
    out = (out1.sum(1))**2 + (out2.sum(1))**2 + (out_**2).sum(1)
    return out.sum()
    #return torch.log(out.mean())

def Traing(netg, bdset, optimg, epochg):
    print('train neural network g')
    lossg = Lossg(netg,bdset)
    lossbest = lossg
    print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
    torch.save(netg.state_dict(),'best_netg.pkl')
    cycle = 100
    for i in range(epochg):
        st = time.time()
        '''
        for j in range(cycle):
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            optimg.step()
        '''
        def closure():
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            return lossg
        optimg.step(closure)
        
        lossg = Lossg(netg,bdset)

        if lossg < lossbest:
            lossbest = lossg
            torch.save(netg.state_dict(),'best_netg.pkl')
        ela = time.time() - st
        print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))

# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
    print('train neural network f')
    ERROR,BUZHOU = [],[]
    lossf = Lossf(netf,inset)
    lossoptimal = lossf
    trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
    print('epoch: %d, loss: %.3e, trainerror: %.3e'
          %(0, lossf.item(), trainerror.item()))
    torch.save(netf.state_dict(),'best_netf.pkl')
    cycle = 100
    for i in range(epochf):
        st = time.time()
        '''
        for j in range(cycle):
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            optimf.step()
        '''
        def closure():
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            return lossf
        optimf.step(closure)
        lossf = Lossf(netf,inset)

        if lossf < lossoptimal:
            lossoptimal = lossf
            torch.save(netf.state_dict(),'best_netf.pkl')
        ela = time.time() - st
        trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
        ERROR.append(trainerror.item())
        BUZHOU.append((i + 1)*cycle)
        print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
             ((i + 1)*cycle,lossf.item(),trainerror,ela))
    return ERROR,BUZHOU

# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
    
    # Calculate the length factor
    inset.X.requires_grad = True
    inset.L = lenth.forward(inset.X)
    inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
                                    create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))
    
    
    inset.L = inset.L.data; inset.Lx = inset.Lx.data

    # Train neural network g
    Traing(netg, bdset, optimg, epochg)

    netg.load_state_dict(torch.load('best_netg.pkl'))
    inset.X.requires_grad = True
    inset.G = netg.forward(inset.X)
    inset.G1 = inset.G[:,:,0:1];inset.G2 = inset.G[:,:,1:2]
    inset.Gx1, = torch.autograd.grad(inset.G1, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.Gx2, = torch.autograd.grad(inset.G2, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.G = inset.G.data; inset.Gx1 = inset.Gx1.data;inset.Gx2 = inset.Gx2.data
    inset.G1 = inset.G1.data;inset.G2 = inset.G2.data

    # Train neural network f
    ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
    return ERROR,BUZHOU



    
    # Configurations
prob = 1
bounds = torch.tensor([[-0.5,1],[-0.5,1.5]]).float()
nx_tr = [51,51]#训练集剖分
nx_te = [101,101]#测试集剖分
epochg = 30
epochf = 50
lr = 1e-2
tests_num = 1

    # ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
for it in range(tests_num):

    inset = INSET(bounds, nx_tr, prob)
    bdset = BDSET(bounds, nx_tr, prob)
    teset = TESET(bounds, nx_te, prob)

    lenth = lenthfactor(bounds, 1)

    netg = NETG()
    netf = NETF()
        #optimg = torch.optim.Adam(netg.parameters(), lr=lr)
        #optimf = torch.optim.Adam(netf.parameters(), lr=lr)
    optimg = torch.optim.LBFGS(netg.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')
    optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')


    start_time = time.time()
    ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
        #print(ERROR,BUZHOU)
    elapsed = time.time() - start_time
    print('Train time: %.2f' %(elapsed))

    netg.load_state_dict(torch.load('best_netg.pkl'))
    netf.load_state_dict(torch.load('best_netf.pkl'))
    te_U = pred(netg, netf, lenth, teset.X)
    testerror[it] = error(te_U, teset.u_acc)
    print('testerror = %.3e\n' %(testerror[it].item()))
    plt.plot(BUZHOU,ERROR,'r*')
    plt.plot(BUZHOU,ERROR)
    plt.title('the absolute error for example of prob = %d at iteration'%(prob))
    plt.savefig('jvxing.jpg')
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
      %(testerror_mean.item(),testerror_std.item()))
    

在这里插入图片描述

前面的NS方程选取的检验函数都是一个一个基函数为模板,然后进行对应的缩放得到的,这个我们打印出所有的inset.v,inset.vx会发现里面的元素是一样的,其实很容易理解,当我们进行缩放以后,4个积分点相对于网格点的位置没变,所以取值应该一样。有这个思路以后,我们随机生成一组数,把这组数看成是我们的基函数的取值。为了方便说明,本人举一个例子:
ψ ⁡ ( x , y ) = { ( 1 − x ) ∗ ( 1 − y ) , 0 ≤ x ≤ 1 , 0 ≤ y ≤ 1 , ( 1 + x ) ∗ ( 1 − y ) , − 1 ≤ x ≤ 0 , 0 ≤ y ≤ 1 , ( 1 + x ) ∗ ( 1 + y ) , − 1 ≤ x ≤ 0 , − 1 ≤ y ≤ 0 , ( 1 − x ) ∗ ( 1 + y ) , 0 ≤ x ≤ 1 , − 1 ≤ y ≤ 0. \operatorname{\psi}(x,y)=\left\{\begin{array}{ll} (1 - x)*(1 - y),0 \leq x \leq 1,0 \leq y \leq 1, \\ (1 + x)*(1 - y),-1 \leq x \leq 0,0 \leq y \leq 1, \\ (1 + x)*(1 + y),-1 \leq x \leq 0,-1 \leq y \leq 0, \\ (1 - x)*(1 + y),0 \leq x \leq 1,-1 \leq y \leq 0.\\ \end{array}\right. ψ(x,y)=(1x)(1y),0x1,0y1,(1+x)(1y),1x0,0y1,(1+x)(1+y),1x0,1y0,(1x)(1+y),0x1,1y0.
φ i , j = ψ ( x − x i h x , y − y j h y ) \varphi_{i,j} = \psi(\frac{x - x_i}{h_x},\frac{y - y_j}{h_y}) φi,j=ψ(hxxxi,hyyyj),根据我们的选取,其实 x − x i h x = 0.5 , y − y j h y = 0.5 \frac{x - x_i}{h_x}=0.5,\frac{y - y_j}{h_y}=0.5 hxxxi=0.5,hyyyj=0.5
那么我们现在这么考虑:
ψ ⁡ ( x , y ) = { ( a − x ) ∗ ( 1 − y ) , 0 ≤ x ≤ 1 , 0 ≤ y ≤ 1 , ( a + x ) ∗ ( 1 − y ) , − 1 ≤ x ≤ 0 , 0 ≤ y ≤ 1 , ( a + x ) ∗ ( 1 + y ) , − 1 ≤ x ≤ 0 , − 1 ≤ y ≤ 0 , ( a − x ) ∗ ( 1 + y ) , 0 ≤ x ≤ 1 , − 1 ≤ y ≤ 0. \operatorname{\psi}(x,y)=\left\{\begin{array}{ll} (a - x)*(1 - y),0 \leq x \leq 1,0 \leq y \leq 1, \\ (a + x)*(1 - y),-1 \leq x \leq 0,0 \leq y \leq 1, \\ (a + x)*(1 + y),-1 \leq x \leq 0,-1 \leq y \leq 0, \\ (a - x)*(1 + y),0 \leq x \leq 1,-1 \leq y \leq 0.\\ \end{array}\right. ψ(x,y)=(ax)(1y),0x1,0y1,(a+x)(1y),1x0,0y1,(a+x)(1+y),1x0,1y0,(ax)(1+y),0x1,1y0.
对于任意给定数值 γ , γ = φ i , j = ψ ( x − x i h x , y − y j h y ) = ( a − 0.5 ) ∗ 0.5 , a = 2 γ + 0.5 \gamma,\gamma=\varphi_{i,j} = \psi(\frac{x - x_i}{h_x},\frac{y - y_j}{h_y})=(a-0.5)*0.5,a = 2\gamma+0.5 γ,γ=φi,j=ψ(hxxxi,hyyyj)=(a0.5)0.5,a=2γ+0.5,对应的梯度随之根据我们计算出来的 a a a确定。这样做就可以增大数据的多样性。

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn

def UU(X,order,prob):
    if prob == 1:
        lam = 0.5;eta = 2*np.pi
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 1 - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam/(eta)
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**2)/(eta)
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**3)/(eta)
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*(eta)**2
            tmp[:,1] = -torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam*(eta)
            return tmp
    if prob == 2:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.sin(X[:,0]) + torch.sin(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,0])
            tmp[:,1] = torch.sin(X[:,0])*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.sin(X[:,0])
            tmp[:,1] = torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = -torch.sin(X[:,1])
            tmp[:,1] = 0*X[:,0]
            return tmp
    if prob == 3:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = X[:,0]**2 + torch.exp(X[:,1])
            tmp[:,1] = - 2*X[:,0]*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*X[:,0]
            tmp[:,1] = -2*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = -2*X[:,0]
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*torch.ones_like(X[:,0])
            tmp[:,1] = 0*X[:,0]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = 0*X[:,1]
            return tmp
    
def Delta(X,prob):
    return UU(X,[2,0],prob) + UU(X,[0,2],prob)
def PP(X,order,prob):
    if prob==1:
        temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
        if order[0]==0 and order[1]==0:
            return torch.log(temp)
        if order[0]==1 and order[1]==0:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
        if order[0]==0 and order[1]==1:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
        if order[0]==2 and order[1]==0:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
        if order[0]==1 and order[1]==1:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
                   * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
                   + temp**(-1) * (18)
        if order[0]==0 and order[1]==2:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
    if prob==2:
        temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
        temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
        if order[0]==0 and order[1]==0:
            return temp1 * temp2**(-1)
        if order[0]==1 and order[1]==0:
            return (2*X[:,0]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,0])
        if order[0]==0 and order[1]==1:
            return (-2*X[:,1]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,1])
        if order[0]==2 and order[1]==0:
            return (2) * temp2**(-1) + \
                   2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
        if order[0]==1 and order[1]==1:
            return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
        if order[0]==0 and order[1]==2:
            return (-2) * temp2**(-1) + \
                   2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
    if prob==3:
        temp = torch.exp(-4*X[:,1]*X[:,1])
        if order[0]==0 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * temp + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * temp
        if order[0]==1 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * temp + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * temp
        if order[0]==0 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
        if order[0]==2 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (12*(X[:,0]+1)**2) * temp + \
                   (1-ind) * (-12*(-X[:,0]+1)**2) * temp
        if order[0]==1 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
        if order[0]==0 and order[1]==2:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):#mu = 0.5
    tmp = torch.zeros(X.shape[0],2)
    nu = 0.5
    tmp[:,0] = -nu*Delta(X,prob)[:,0] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,0]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,0]) + PP(X,[1,0],prob)
    tmp[:,1] = -nu*Delta(X,prob)[:,1] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,1]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,1]) + PP(X,[0,1],prob)
    return tmp


#函数定义修改完成
class INSET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = (nx[0] - 1)*(nx[1] - 1)
        self.nx = nx
        self.bound = bound
        
        self.Node()#生成网格点,这里对应基函数
        self.cai()#采集样本点x,以及给出对于内网格点的索引,方便取出内网格点对应的基函数
        self.test()#取出测试函数在不为0的区域的取值以及对应的偏导数值
        self.u_acc = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        self.right = torch.zeros(self.X.shape[0],self.X.shape[1],2)
        for i in range(self.X.shape[0]):
            self.u_acc[i,:,:] = UU(self.X[i,:,:],[0,0],prob)
            self.right[i,:,:] = FF(self.X[i,:,:],prob)
    def Node(self):#生成网格点(M + 1)*(N + 1),注意这个和X不是同一个
        self.Nodes_size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.Nodes = torch.zeros(self.Nodes_size,self.dim)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.Nodes[m,0] = self.bound[0,0] + i*self.hx[0]
                self.Nodes[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
    def cai(self):
        self.index = np.zeros([(self.nx[0] - 1)*(self.nx[1] - 1)],np.int)#这个就是内网格点的索引
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):#内网格点的数目为(nx[0] - 1)*(nx[1] - 1)
                self.index[m] = i*(self.nx[1] + 1) + j
                m = m + 1
        #ind = self.index
        #plt.scatter(self.Nodes[ind,0],self.Nodes[ind,1]),这行命令可以检查是否取得是内网格点
        self.X = torch.zeros((self.nx[0] - 1)*(self.nx[1] - 1),4,self.dim)
        #以内网格点为中心对应的基函数,周围4个单元取值非0,其中在每个单元采集一个点
        m = 0
        for i in range(1,self.nx[0]):
            for j in range(1,self.nx[1]):
                self.X[m,0,0] = self.bound[0,0] + (i - 0.5)*self.hx[0]
                self.X[m,0,1] = self.bound[1,0] + (j - 0.5)*self.hx[1]
                self.X[m,1,0] = self.bound[0,0] + (i + 0.5)*self.hx[0]
                self.X[m,1,1] = self.bound[1,0] + (j - 0.5)*self.hx[1]
                self.X[m,2,0] = self.bound[0,0] + (i + 0.5)*self.hx[0]
                self.X[m,2,1] = self.bound[1,0] + (j + 0.5)*self.hx[1]
                self.X[m,3,0] = self.bound[0,0] + (i - 0.5)*self.hx[0]
                self.X[m,3,1] = self.bound[1,0] + (j + 0.5)*self.hx[1]
                m = m + 1
    def phi(self,X,order):#[-1,1]*[-1,1],在原点取值为1,其他网格点取值为0的基函数
        indx = (X[:,0] >= -1)*(X[:,0] <= 1)
        indy = (1 - X[:,0]**2 >= X[:,1]**2)
        #indy = (X[:,1] >= -1)*(X[:,1] <= 1)
        if order == [0,0]:
            return (indx*indy).float()*(1 - X[:,0]**2 - X[:,1]**2)
        if order == [1,0]:
            return (indx*indy).float()*(-2*X[:,0])
        if order == [0,1]:
            return (indx*indy).float()*(-2*X[:,1])
    def basic(self,X,order,i):#根据网格点的存储顺序,遍历所有网格点,取基函数
        temp = (X - self.Nodes[i,:])/torch.tensor([self.hx[0],self.hx[1]])
        if order == [0,0]:
            return self.phi(temp,order)
        if order == [1,0]:
            return self.phi(temp,order)/self.hx[0]
        if order == [0,1]:
            return self.phi(temp,order)/self.hx[1]
    def test(self):
        self.v = 0.25*torch.rand(self.size,1,1).repeat(1,4,1)
        
        self.vx = torch.zeros(self.size,4,self.dim)#定义基函数在对应区域的偏导数
        for i in range(self.size):
            self.vx[i,0,0] = 0.5/self.hx[0]
            self.vx[i,0,1] = 2*self.v[i,0,0]/self.hx[1]
            self.vx[i,1,0] = - 0.5/self.hx[0]
            self.vx[i,1,1] = 2*self.v[i,1,0]/self.hx[1]
            self.vx[i,2,0] = - 0.5/self.hx[0]
            self.vx[i,2,1] = - 2*self.v[i,2,0]/self.hx[1]
            self.vx[i,3,0] = 0.5/self.hx[0]
            self.vx[i,3,1] = - 2*self.v[i,3,0]/self.hx[1]


class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.DS = 2*(nx[0] + nx[1])
        self.Dlenth = 2*(bound[0,1] - bound[0,0]) + 2*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        
        self.X = torch.zeros(self.DS,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        self.Dright = UU(self.X,[0,0],prob)
class TESET():
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        M,N = nx[0] + 1,nx[1] + 1
        self.size = M*N
        self.X = torch.zeros(self.size,self.dim)#储存求解区域所有网格点,包括边界点
        for j in range(N):
            for i in range(M):
                self.X[j*M + i,0] = bound[0,0] + i*self.hx[0]
                self.X[j*M + i,1] = bound[1,0] + j*self.hx[1]
        self.u_acc = UU(self.X,[0,0],prob)#储存求解区域网格点对应精确解
#数据集修改完成
np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
    def __init__(self):
        super(NETG,self).__init__()
        self.fc1 = torch.nn.Linear(2,10)
        self.fc2 = torch.nn.Linear(10,10)
        self.fc3 = torch.nn.Linear(10,2)
    def forward(self,x):
        out = torch.sin(self.fc1(x))
        out = torch.sin(self.fc2(out)) + x@torch.eye(x.size(-1),10)
        return self.fc3(out)
    
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
    def __init__(self,order):
        super(SIN,self).__init__()
        self.e = order
    def forward(self,x):
        return torch.sin(x)**self.e
class Res(nn.Module):
    def __init__(self,input_size,output_size):
        super(Res,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_size,output_size),
            SIN(1),
            nn.Linear(output_size,output_size),
            SIN(1)
        )
        self.input = input_size
        self.output = output_size
    def forward(self,x):
        x = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
        return x
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
    def __init__(self):
        super(NETF,self).__init__()
        self.model = nn.Sequential(
            Res(2,10),
            Res(10,10),
            Res(10,10)
        )
        self.fc = torch.nn.Linear(10,3)
    def forward(self,x):
        out = self.model(x)
        return self.fc(out)


class lenthfactor():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
        self.dx = bound[:,1] - bound[:,0]
    def forward(self,X):
        L = 1.0
        if X.ndim == 2:#如果是边界点,存储方式为[m,2]
            for i in range(2):
                L = L*(1 - (1 - (X[:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,i])/self.dx[i])**self.mu)
            return L.view(-1,1)
        elif X.ndim == 3:#如果是内点,存储方式为[m,4,2],m表示内网格点数目
            for i in range(2):
                L = L*(1 - (1 - (X[:,:,i] - self.bound[i,0])/self.dx[i])**self.mu)
                L = L*(1 - (1 - (self.bound[i,1] - X[:,:,i])/self.dx[i])**self.mu)
            return L.view(X.shape[0],X.shape[1],1)  
def pred(netg,netf,lenth,X):
    if X.ndim == 2:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,0:2]
    if X.ndim == 3:
        #print(netg.forward(X).shape,lenth.forward(X).shape,netf.forward(X).shape)
        return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,:,0:2]
def error(u_pred,u_acc):
    tmp = (u_pred - u_acc).reshape(-1,2)
    return max(max(abs(tmp[:,0])),max(abs(tmp[:,1])))
    #return (((u_pred-u_acc)**2).sum() / (u_acc**2).sum()) ** (0.5)

# ----------------------------------------------------------------------------------------------------
def Lossg(netg,bdset):#拟合Dirichlet边界
    ub = netg.forward(bdset.X)
    out = bdset.Dlenth * ((ub - bdset.Dright)**2).sum()
    return out
    #return torch.log(out)
def Lossf(netf,inset):#之前方程有一个mu=0.5
    nu = 0.5
    inset.X.requires_grad = True
    
    insetF = netf.forward(inset.X)[:,:,0:2]
    
    insetFx1, = torch.autograd.grad(insetF[:,:,0:1], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_1的偏导数
    insetFx2, = torch.autograd.grad(insetF[:,:,1:2], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))#u_2的偏导数
    P = netf.forward(inset.X)[:,:,2:3]
    Px, = torch.autograd.grad(P, inset.X, create_graph=True, retain_graph=True,
                              grad_outputs=torch.ones(inset.size,4,1))
    u1 = inset.G1 + inset.L*insetF[:,:,0:1]
    u2 = inset.G2 + inset.L*insetF[:,:,1:2]
    
    ux1 = inset.Gx1 + inset.Lx*insetF[:,:,0:1] + inset.L*insetFx1#复合函数求导,提高迭代效率
    ux2 = inset.Gx2 + inset.Lx*insetF[:,:,1:2] + inset.L*insetFx2#复合函数求导,提高迭代效率
    
    out1 = nu*(inset.vx*ux1).sum(2) + ((u1*ux1[:,:,0:1] + u2*ux1[:,:,1:2] + Px[:,:,0:1] - inset.right[:,:,0:1])*inset.v).sum(2)
    out2 = nu*(inset.vx*ux2).sum(2) + ((u1*ux2[:,:,0:1] + u2*ux2[:,:,1:2] + Px[:,:,1:2] - inset.right[:,:,1:2])*inset.v).sum(2)
    out_ = ((ux1[:,:,0:1] + ux2[:,:,1:2])*inset.v).sum(2)#\nabla u
    
    out = (out1.sum(1))**2 + (out2.sum(1))**2 + (out_**2).sum(1)
    return out.mean()
    #return torch.log(out.mean())

def Traing(netg, bdset, optimg, epochg):
    print('train neural network g')
    lossg = Lossg(netg,bdset)
    lossbest = lossg
    print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
    torch.save(netg.state_dict(),'best_netg.pkl')
    cycle = 100
    for i in range(epochg):
        st = time.time()
        '''
        for j in range(cycle):
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            optimg.step()
        '''
        def closure():
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            return lossg
        optimg.step(closure)
        
        lossg = Lossg(netg,bdset)

        if lossg < lossbest:
            lossbest = lossg
            torch.save(netg.state_dict(),'best_netg.pkl')
        ela = time.time() - st
        print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))

# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
    print('train neural network f')
    ERROR,BUZHOU = [],[]
    lossf = Lossf(netf,inset)
    lossoptimal = lossf
    trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
    print('epoch: %d, loss: %.3e, trainerror: %.3e'
          %(0, lossf.item(), trainerror.item()))
    torch.save(netf.state_dict(),'best_netf.pkl')
    cycle = 100
    for i in range(epochf):
        st = time.time()
        '''
        for j in range(cycle):
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            optimf.step()
        '''
        def closure():
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            return lossf
        optimf.step(closure)
        lossf = Lossf(netf,inset)

        if lossf < lossoptimal:
            lossoptimal = lossf
            torch.save(netf.state_dict(),'best_netf.pkl')
        ela = time.time() - st
        trainerror = error(inset.G + inset.L * netf.forward(inset.X)[:,:,0:2], inset.u_acc)
        ERROR.append(trainerror.item())
        BUZHOU.append((i + 1)*cycle)
        print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
             ((i + 1)*cycle,lossf.item(),trainerror,ela))
    return ERROR,BUZHOU

# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
    
    # Calculate the length factor
    inset.X.requires_grad = True
    inset.L = lenth.forward(inset.X)
    inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
                                    create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,4,1))
    
    
    inset.L = inset.L.data; inset.Lx = inset.Lx.data

    # Train neural network g
    Traing(netg, bdset, optimg, epochg)

    netg.load_state_dict(torch.load('best_netg.pkl'))
    inset.X.requires_grad = True
    inset.G = netg.forward(inset.X)
    inset.G1 = inset.G[:,:,0:1];inset.G2 = inset.G[:,:,1:2]
    inset.Gx1, = torch.autograd.grad(inset.G1, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.Gx2, = torch.autograd.grad(inset.G2, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,4,1))
    inset.G = inset.G.data; inset.Gx1 = inset.Gx1.data;inset.Gx2 = inset.Gx2.data
    inset.G1 = inset.G1.data;inset.G2 = inset.G2.data

    # Train neural network f
    ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
    return ERROR,BUZHOU


def main():
    
    # Configurations
    prob = 1
    bounds = torch.tensor([[-0.5,1],[-0.5,1.5]]).float()
    nx_tr = [20,20]#训练集剖分
    nx_te = [101,101]#测试集剖分
    epochg = 10
    epochf = 10
    lr = 1e-2
    tests_num = 1

    # ------------------------------------------------------------------------------------------------
    testerror = torch.zeros(tests_num)
    for it in range(tests_num):

        inset = INSET(bounds, nx_tr, prob)
        bdset = BDSET(bounds, nx_tr, prob)
        teset = TESET(bounds, nx_te, prob)

        lenth = lenthfactor(bounds, 3)

        netg = NETG()
        netf = NETF()
        #optimg = torch.optim.Adam(netg.parameters(), lr=lr)
        #optimf = torch.optim.Adam(netf.parameters(), lr=lr)
        optimg = torch.optim.LBFGS(netg.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')
        optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')


        start_time = time.time()
        ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
        #print(ERROR,BUZHOU)
        elapsed = time.time() - start_time
        print('Train time: %.2f' %(elapsed))

        netg.load_state_dict(torch.load('best_netg.pkl'))
        netf.load_state_dict(torch.load('best_netf.pkl'))
        te_U = pred(netg, netf, lenth, teset.X)
        testerror[it] = error(te_U, teset.u_acc)
        print('testerror = %.3e\n' %(testerror[it].item()))
        plt.plot(BUZHOU,ERROR,'r*')
        plt.plot(BUZHOU,ERROR)
        plt.title('the absolute error for example of prob = %d at iteration'%(prob))
        plt.savefig('jvxing.jpg')
    print(testerror.data)
    testerror_mean = testerror.mean()
    testerror_std = testerror.std()
    print('testerror_mean = %.3e, testerror_std = %.3e'
          %(testerror_mean.item(),testerror_std.item()))
    
if __name__ == '__main__':
    main()

在这里插入图片描述

增大支集

前面提到的求解NS方程,我使用的检验函数空间有一个特点就是支集很小,基本上都是在网格点周围小邻域不为0,事实上,根据Galerkin原理,我们只需要要求检验函数空间的函数在边界上为0即可(比如说我们的长度因子函数)。所以这里我们考虑检验函数空间的函数定义为
s i n ( m π x − x a x b − x a ) ∗ c o s ( n π y − y a y b − y a ) ∗ l e n t h sin(m\pi \frac{x - xa}{xb-xa})*cos(n\pi \frac{y-ya}{yb-ya})*lenth sin(mπxbxaxxa)cos(nπybyayya)lenth
其中lenth就是长度因子函数,参考我前面的博客。

import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
def UU(X,order,prob):
    if prob == 1:
        lam = 0.5;eta = 2*np.pi
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 1 - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam/(eta)
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**2)/(eta)
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(eta)
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*lam*lam
            tmp[:,1] = torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*(lam**3)/(eta)
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(lam*X[:,0])*torch.cos(X[:,1]*eta)*(eta)**2
            tmp[:,1] = -torch.exp(lam*X[:,0])*torch.sin(X[:,1]*eta)*lam*(eta)
            return tmp
    if prob == 2:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.sin(X[:,0]) + torch.sin(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,0])
            tmp[:,1] = torch.sin(X[:,0])*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.cos(X[:,1])
            tmp[:,1] = -torch.cos(X[:,0])
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = - torch.sin(X[:,0])
            tmp[:,1] = torch.cos(X[:,0])*X[:,1]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = -torch.sin(X[:,1])
            tmp[:,1] = 0*X[:,0]
            return tmp
    if prob == 3:
        if order == [0,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = X[:,0]**2 + torch.exp(X[:,1])
            tmp[:,1] = - 2*X[:,0]*X[:,1]
            return tmp
        if order == [1,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*X[:,0]
            tmp[:,1] = -2*X[:,1]
            return tmp
        if order == [0,1]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = -2*X[:,0]
            return tmp
        if order == [2,0]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = 2*torch.ones_like(X[:,0])
            tmp[:,1] = 0*X[:,0]
            return tmp
        if order == [0,2]:
            tmp = torch.zeros(X.shape[0],2)
            tmp[:,0] = torch.exp(X[:,1])
            tmp[:,1] = 0*X[:,1]
            return tmp
    
def Delta(X,prob):
    return UU(X,[2,0],prob) + UU(X,[0,2],prob)
def PP(X,order,prob):
    if prob==1:
        temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
        if order[0]==0 and order[1]==0:
            return torch.log(temp)
        if order[0]==1 and order[1]==0:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
        if order[0]==0 and order[1]==1:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
        if order[0]==2 and order[1]==0:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
        if order[0]==1 and order[1]==1:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
                   * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
                   + temp**(-1) * (18)
        if order[0]==0 and order[1]==2:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
    if prob==2:
        temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
        temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
        if order[0]==0 and order[1]==0:
            return temp1 * temp2**(-1)
        if order[0]==1 and order[1]==0:
            return (2*X[:,0]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,0])
        if order[0]==0 and order[1]==1:
            return (-2*X[:,1]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,1])
        if order[0]==2 and order[1]==0:
            return (2) * temp2**(-1) + \
                   2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
        if order[0]==1 and order[1]==1:
            return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
        if order[0]==0 and order[1]==2:
            return (-2) * temp2**(-1) + \
                   2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
    if prob==3:
        temp = torch.exp(-4*X[:,1]*X[:,1])
        if order[0]==0 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * temp + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * temp
        if order[0]==1 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * temp + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * temp
        if order[0]==0 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
        if order[0]==2 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (12*(X[:,0]+1)**2) * temp + \
                   (1-ind) * (-12*(-X[:,0]+1)**2) * temp
        if order[0]==1 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
        if order[0]==0 and order[1]==2:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):#mu = 0.5
    tmp = torch.zeros(X.shape[0],2)
    nu = 0.5
    tmp[:,0] = -nu*Delta(X,prob)[:,0] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,0]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,0]) + PP(X,[1,0],prob)
    tmp[:,1] = -nu*Delta(X,prob)[:,1] + (UU(X,[0,0],prob)[:,0])*(UU(X,[1,0],prob)[:,1]) + \
    (UU(X,[0,0],prob)[:,1])*(UU(X,[0,1],prob)[:,1]) + PP(X,[0,1],prob)
    return tmp
class INSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        self.bound = bound
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = nx[0]*nx[1]
        self.X = torch.zeros(self.size,self.dim)#储存内点
        self.nx = nx
        self.dx = bound[:,1] - bound[:,0]
        for i in range(nx[0]):
            for j in range(nx[1]):
                self.X[i*nx[1] + j,0] = bound[0,0] + (i + 0.5)*self.hx[0]
                self.X[i*nx[1] + j,1] = bound[1,0] + (j + 0.5)*self.hx[1]
        self.u_acc = UU(self.X,[0,0],prob)
        self.right = FF(self.X,prob)
        self.cal()
    def Len(self,X,order):
        mu = 1
        L = 1.0
        x = (X[:,0] - self.bound[0,0])/self.dx[0];y = (X[:,1] - self.bound[1,0])/self.dx[1]
        Lx1 = (1 - (1 - x)**mu);Lx2 = (1 - (1 - (self.bound[0,1] - X[:,0])/self.dx[0])**mu)
        Ly1 = (1 - (1 - y)**mu);Ly2 = (1 - (1 - (self.bound[1,1] - X[:,1])/self.dx[1])**mu)
        if order == [0,0]:
            L = L*Lx1*Lx2*Ly1*Ly2
            
        if order == [1,0]:
            L = L*(mu/self.dx[0])*(Lx2*(1 - x)**(mu - 1) - Lx1*(1 - (self.bound[0,1] - X[:,0])/self.dx[0])**(mu - 1))
            L = L*Ly1*Ly2
            
        if order == [0,1]:
            L = L*(mu/self.dx[1])*(Ly2*(1 - y)**(mu - 1) - Ly1*(1 - (self.bound[1,1] - X[:,1])/self.dx[1])**(mu - 1))
            L = L*Lx1*Lx2
        return L
    def phi(self,X,order,m,n):
        x = (X[:,0] - self.bound[0,0])/self.dx[0];y = (X[:,1] - self.bound[1,0])/self.dx[1]
        if order == [0,0]:
            return torch.sin(np.pi*m*x)*torch.cos(np.pi*n*y)
        if order == [1,0]:
            return torch.cos(np.pi*m*x)*torch.cos(np.pi*n*y)*np.pi*m/self.dx[0]
        if order == [0,1]:
            return -torch.sin(np.pi*m*x)*torch.sin(np.pi*n*y)*np.pi*n/self.dx[1]
    def cal(self):
        self.v = torch.zeros(2*self.size,self.size,1)
        self.vx = torch.zeros(2*self.size,self.size,2)
        
        m = 0
        for i in range(2*self.nx[0]):
            for j in range(self.nx[1]):
                self.v[m,:,0] = self.Len(self.X,[0,0])*self.phi(self.X,[0,0],i + 1,j + 1)
                self.vx[m,:,0] = self.Len(self.X,[0,0])*self.phi(self.X,[1,0],i + 1,j + 1) + \
                self.Len(self.X,[1,0])*self.phi(self.X,[0,0],i + 1,j + 1)
                self.vx[m,:,1] = self.Len(self.X,[0,0])*self.phi(self.X,[0,1],i + 1,j + 1) + \
                self.Len(self.X,[0,1])*self.phi(self.X,[0,0],i + 1,j + 1)
                m = m + 1
class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = 2*(nx[0] + nx[1])
        self.X = torch.zeros(self.size,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.u_acc = UU(self.X,[0,0],prob)#储存内点精确解
class TESET():
    def __init__(self, bound, nx,prob):
        self.bound = bound
        self.nx = nx
        self.hx = [(self.bound[0,1]-self.bound[0,0])/self.nx[0],
                   (self.bound[1,1]-self.bound[1,0])/self.nx[1]]
        self.prob = prob
        self.size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.X = torch.zeros(self.size,2)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.X[m,0] = self.bound[0,0] + i*self.hx[0]
                self.X[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.u_acc = UU(self.X,[0,0],prob) 
        self.pp = PP(self.X,[0,0],prob) 
class LEN():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
        self.hx = bound[:,1] - bound[:,0]
    def forward(self,X):
        L = 1.0
        for i in range(2):
            L = L*(1 - (1 - (X[:,i] - self.bound[i,0])/self.hx[i])**self.mu)
            L = L*(1 - (1 - (self.bound[i,1] - X[:,i])/self.hx[i])**self.mu)
        return L.view(-1,1)
        

np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
    def __init__(self):
        super(NETG,self).__init__()
        self.fc1 = torch.nn.Linear(2,10)
        self.fc2 = torch.nn.Linear(10,10)
        self.fc3 = torch.nn.Linear(10,2)
    def forward(self,x):
        out = torch.sin(self.fc1(x)) + x@torch.eye(x.size(-1),10)
        out = torch.sin(self.fc2(out)) + out@torch.eye(out.size(-1),10)
        #注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
        return self.fc3(out)
    
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
    def __init__(self,order):
        super(SIN,self).__init__()
        self.e = order
    def forward(self,x):
        return torch.sin(x)**self.e
class Res(nn.Module):
    def __init__(self,input_size,output_size):
        super(Res,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_size,output_size),
            SIN(1),
            nn.Linear(output_size,output_size),
            SIN(1)
        )
        self.input = input_size
        self.output = output_size
    def forward(self,x):
        out = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
        #注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
        return out
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
    def __init__(self):
        super(NETF,self).__init__()
        self.model = nn.Sequential(
            Res(2,10),
            Res(10,10),
            Res(10,10),
            Res(10,10)
        )
        self.fc = torch.nn.Linear(10,3)
    def forward(self,x):
        out = self.model(x)
        return self.fc(out)
def pred(netg,netf,lenth,X):
    return netg.forward(X) + lenth.forward(X)*netf.forward(X)[:,0:2]
    
def error(u_pred,u_acc):
    tmp = (u_pred - u_acc).reshape(-1,2)
    return max(max(abs(tmp[:,0])),max(abs(tmp[:,1])))
    
def Lossg(netg,bdset):#拟合Dirichlet边界,这个就是简单的边界损失函数
    ub = netg.forward(bdset.X)
    return ((ub - bdset.u_acc)**2).mean()
    
def Traing(netg, bdset, optimg, epochg):
    print('train neural network g')
    lossbest = Lossg(netg,bdset)
    
    print('epoch:%d,lossf:%.3e'%(0,lossbest.item()))
    torch.save(netg.state_dict(),'best_netg.pkl')
    cycle = 100
    for i in range(epochg):
        st = time.time()
        '''
        for j in range(cycle):
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            optimg.step()
        '''
        def closure():
            optimg.zero_grad()
            lossg = Lossg(netg,bdset)
            lossg.backward()
            return lossg
        optimg.step(closure)
        
        lossg = Lossg(netg,bdset)
        if lossg < lossbest:
            lossbest = lossg
            torch.save(netg.state_dict(),'best_netg.pkl')
        ela = time.time() - st
        print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))
#----------------------
def Lossf(netf,inset):#之前方程有一个mu=0.5
    nu = 0.5
    inset.X.requires_grad = True
    
    insetF = netf.forward(inset.X)[:,0:2]
    
    insetFx1, = torch.autograd.grad(insetF[:,0:1], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,1))#u_1的偏导数
    insetFx2, = torch.autograd.grad(insetF[:,1:2], inset.X, create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,1))#u_2的偏导数
    P = netf.forward(inset.X)[:,2:3]
    PX, = torch.autograd.grad(P, inset.X, create_graph=True, retain_graph=True,
                              grad_outputs=torch.ones(inset.size,1))
    U1 = inset.G1 + inset.L*insetF[:,0:1]
    U2 = inset.G2 + inset.L*insetF[:,1:2]
    
    UX1 = inset.Gx1 + inset.Lx*insetF[:,0:1] + inset.L*insetFx1#复合函数求导,提高迭代效率
    UX2 = inset.Gx2 + inset.Lx*insetF[:,1:2] + inset.L*insetFx2#复合函数求导,提高迭代效率
    
    ux1 = UX1.reshape(1,UX1.shape[0],UX1.shape[1]);ux2 = UX2.reshape(1,UX2.shape[0],UX2.shape[1])
    u1 = U1.reshape(1,U1.shape[0],U1.shape[1]);u2 = U2.reshape(1,U2.shape[0],U2.shape[1])
    
    Px = PX.reshape(1,PX.shape[0],PX.shape[1]);right = inset.right.reshape(1,inset.right.shape[0],inset.right.shape[1])
    
    K = inset.vx.shape[0]
    ux1 = ux1.repeat(K,1,1);ux2 = ux2.repeat(K,1,1)
    u1 = u1.repeat(K,1,1);u2 = u2.repeat(K,1,1)
    Px = Px.repeat(K,1,1);right = right.repeat(K,1,1)
    out1 = nu*(inset.vx*ux1).sum(2) + ((u1*ux1[:,:,0:1] + u2*ux1[:,:,1:2] + Px[:,:,0:1] - right[:,:,0:1])*inset.v).sum(2)
    out2 = nu*(inset.vx*ux2).sum(2) + ((u1*ux2[:,:,0:1] + u2*ux2[:,:,1:2] + Px[:,:,1:2] - right[:,:,1:2])*inset.v).sum(2)
    out_ = (ux1[:,:,0:1] + ux2[:,:,1:2]).sum(2)#\nabla u
    
    out = (out1.sum(1))**2 + (out2.sum(1))**2 + (out_**2).sum(1)
    return out.mean()

    
def Trainf(netf, inset,optimf, epochf):
    print('train neural network f')
    ERROR,BUZHOU = [],[]
    lossoptimal = Lossf(netf,inset).data
    trainerror = error(pred(netg,netf,lenth,inset.X),inset.u_acc)#---------------------
    print('epoch: %d, loss: %.3e, trainerror: %.3e'
          %(0, lossoptimal.item(), trainerror.item()))
    torch.save(netf.state_dict(),'best_netf.pkl')
    cycle = 100
    for i in range(epochf):
        st = time.time()
        '''
        for j in range(cycle):
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            optimf.step()
        '''
        def closure():
            optimf.zero_grad()
            lossf = Lossf(netf,inset)
            lossf.backward()
            return lossf
        optimf.step(closure)
        lossf = Lossf(netf,inset)
        if lossf < lossoptimal:
            lossoptimal = lossf
            torch.save(netf.state_dict(),'best_netf.pkl')
        ela = time.time() - st
        trainerror = error(pred(netg,netf,lenth,inset.X),inset.u_acc)
        ERROR.append(trainerror)
        BUZHOU.append((i + 1)*cycle)
        print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
             ((i + 1)*cycle,lossf.item(),trainerror,ela))
    return ERROR,BUZHOU
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
    # Train neural network g
    Traing(netg, bdset, optimg, epochg)

    netg.load_state_dict(torch.load('best_netg.pkl'))
    
    # Calculate the length factor
    inset.X.requires_grad = True
    inset.L = lenth.forward(inset.X)
    inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
                                    create_graph=True, retain_graph=True,
                                    grad_outputs=torch.ones(inset.size,1))
    
    
    inset.L = inset.L.data; inset.Lx = inset.Lx.data
    
    
    print(inset.L.shape)
    
    inset.G = netg.forward(inset.X)
    inset.G1 = inset.G[:,0:1];inset.G2 = inset.G[:,1:2]
    inset.Gx1, = torch.autograd.grad(inset.G1, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,1))
    inset.Gx2, = torch.autograd.grad(inset.G2, inset.X,#计算长度因子关于内部点输入的梯度
                                     create_graph=True, retain_graph=True,
                                     grad_outputs=torch.ones(inset.size,1))
    inset.G = inset.G.data; inset.Gx1 = inset.Gx1.data;inset.Gx2 = inset.Gx2.data
    inset.G1 = inset.G1.data;inset.G2 = inset.G2.data
    # Train neural network f
    ERROR,BUZHOU = Trainf(netf, inset, optimf, epochf)
    return ERROR,BUZHOU


bound = torch.tensor([[-0.5,1],[-0.5,1.5]]).float()
nx = [10,10]
nx_te = [40,40]
prob = 1
mu = 4
lr = 1e-3

inset = INSET(bound,nx,prob)
bdset = BDSET(bound,nx,prob)
teset = TESET(bound,nx_te,prob)

lenth = LEN(bound,mu)
netg = NETG()
netf = NETF()

#optimg = torch.optim.Adam(netg.parameters(), lr=lr)
#optimf = torch.optim.Adam(netf.parameters(), lr=lr)
optimg = torch.optim.LBFGS(netg.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')
optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,line_search_fn = 'strong_wolfe')
epochg = 10
epochf = 10
start_time = time.time()
ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
#print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))

netg.load_state_dict(torch.load('best_netg.pkl'))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netg, netf, lenth, teset.X)
testerror = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror.item()))

plt.plot(BUZHOU,ERROR,'r*')
plt.plot(BUZHOU,ERROR)
plt.title('the absolute error for example of prob = %d at iteration,petrov'%(prob))
plt.savefig('pe.jpg')


在这里插入图片描述

拓展

借助于WAN的思想,我们可以搭建一个网络net,输入节点为2,输出节点为m,这样就可以得到m个试验函数,同样带入上面的Petrov思想,这样的话我们可以不训练net就可以得到我们的解。(不过这个代码比较麻烦,因为这样的话,输入(100,2)的数据,输出(100,m)的值,那么我们需要进行求导得到一个(m,100,2)的张量表示对应的梯度,向量对向量求导在pytorch中没有对应的函数)

猜你喜欢

转载自blog.csdn.net/forrestguang/article/details/115247291