Physically Informed Neural Network

Here is show how to implement some of the ideas that are presented in [3].

Example - 1D Poisson (DNN3 \(\sigma = \tanh\))

[90]:
import tensorflow as tf
from tensorflow.keras.constraints import min_max_norm
import logging
tf.get_logger().setLevel(logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
from numsa.TFHessian import *
from random import *
from tqdm.notebook import tqdm


tf.random.set_seed(7)
############| SETTINGS |############
a = -1; b = 1;
N = 50; #Nuerons in the Hidden Layer
N_EvEl = 101; #Spacing of the evaluation function
itmax = 10000;
step = 1e-3;
tol=1e-32
gamma = tf.Variable(1e0,dtype=np.float32)
def f(x):
    return (np.pi**2)*np.sin(np.pi*x);
####################################

#Pnts = np.linspace(a,b,N_EvEl)


Pnts = np.array([0.0])
Pnts = np.append(Pnts,np.random.uniform(a,b,N_EvEl))
Pnts = np.append(Pnts,1.0)
#Pnts = np.random.uniform(a,b,N_EvEl)

Pnts = np.sort(Pnts)

u = tf.keras.Sequential([
    tf.keras.layers.Dense(N,input_dim=1, activation='tanh'),#Hidden Layer
    tf.keras.layers.Dense(N,activation="tanh"),
    tf.keras.layers.Dense(N,activation="tanh"),
    tf.keras.layers.Dense(1,use_bias=False)#10 leyers weights.
])

#We create an evaluation mesh
mesh = tf.Variable([[point] for point in Pnts]);
F = tf.cast(tf.Variable([[-f(point)] for point in Pnts]),dtype=np.float32);
#Defining the lost function
def ColEnergy(weights):
    with tf.GradientTape() as gtape2:
        with tf.GradientTape() as gtape:
            uh = u(mesh, training=True)
        du_dx = gtape.gradient(uh, [mesh])
    #We use TF Gradient Tape to compute grad uh
    d2u_dx2 = gtape2.gradient(du_dx,[mesh])
    Delta = (d2u_dx2-F)
    Energy = (1/N_EvEl)*tf.reduce_sum(tf.square(Delta));
    #Adding B.C using penalty method idea;
    Energy = Energy + (gamma/N_EvEl)*(tf.square(u(tf.Variable([[a]])))+tf.square(u(tf.Variable([[b]]))))
    return Energy;
[91]:
print(u(mesh).shape)
print(ColEnergy(u.trainable_weights))
(103, 1)
tf.Tensor([[40.050835]], shape=(1, 1), dtype=float32)
[92]:
optimizer = tf.keras.optimizers.Adamax(learning_rate=step)

# Iterate over the batches of a dataset.
for it in tqdm(range(itmax)):
    # Open a GradientTape.
    with tf.GradientTape() as tape:
        Loss = ColEnergy(u.trainable_weights)
    # Get gradients of loss wrt the weights.
    gradients = tape.gradient(Loss, u.trainable_weights)
    res = sum([tf.norm(grad) for grad in gradients]);
    # Update the weights of the model.
    optimizer.apply_gradients(zip(gradients, u.trainable_weights))
    if res <= tol:
        break;
    if it%(itmax/10) == 0:
        print("[It. {}] Loss function value {} and residual {}.".format(it,Loss,res))

print("Loss function value {} and residual {}.".format(Loss,res))
[It. 0] Loss function value [[40.050835]] and residual 29.165576934814453.
[It. 1000] Loss function value [[0.00762097]] and residual 0.15168429911136627.
[It. 2000] Loss function value [[1.4110279e-05]] and residual 0.11412914842367172.
[It. 3000] Loss function value [[5.9321637e-06]] and residual 0.16223815083503723.
[It. 4000] Loss function value [[3.0792385e-06]] and residual 0.0004416355805005878.
[It. 5000] Loss function value [[2.884495e-06]] and residual 0.0009915133705362678.
[It. 6000] Loss function value [[2.7776434e-06]] and residual 0.008474547415971756.
[It. 7000] Loss function value [[3.0952278e-06]] and residual 0.05875520408153534.
[It. 8000] Loss function value [[1.87274e-05]] and residual 0.35876917839050293.
[It. 9000] Loss function value [[2.5348052e-06]] and residual 0.003511270973831415.
Loss function value [[0.00036402]] and residual 2.1709649562835693.
Loss function value [[0.00036402]] and residual 2.1709649562835693.
[93]:
plt.plot(mesh.numpy(),u(mesh).numpy(),"r*")
plt.plot(mesh.numpy(),np.sin(np.pi*mesh.numpy()),"b-")
[93]:
[<matplotlib.lines.Line2D at 0x7fe6286fbb50>]
../_images/TensorFlow_PDE-NN_5_1.png

Example - 1D Poisson (DNN3 \(\sigma = \tanh\))

[42]:
import tensorflow as tf
from tensorflow.keras.constraints import min_max_norm
import logging
tf.get_logger().setLevel(logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
from numsa.TFHessian import *
from random import *
from tqdm.notebook import tqdm


tf.random.set_seed(7)
############| SETTINGS |############
a = -1; b = 1;
N = 50; #Nuerons in the Hidden Layer
N_EvEl = 501; #Spacing of the evaluation function
itmax = 50000;
step = 1e-3;
tol=1e-32
gamma = tf.Variable(1e0,dtype=np.float32)
def f(x):
    return (np.pi**2)*np.sin(np.pi*x);
####################################

#Pnts = np.linspace(a,b,N_EvEl)


Pnts = np.array([0.0])
Pnts = np.append(Pnts,np.random.uniform(a,b,N_EvEl))
Pnts = np.append(Pnts,1.0)
#Pnts = np.random.uniform(a,b,N_EvEl)

Pnts = np.sort(Pnts)

u = tf.keras.Sequential([
    tf.keras.layers.Dense(N,input_dim=1, activation='tanh'),#Hidden Layer
    tf.keras.layers.Dense(N,activation="tanh"),
    tf.keras.layers.Dense(N,activation="tanh"),
    tf.keras.layers.Dense(1,use_bias=False)
])
#We create an evaluation mesh
mesh = tf.Variable([[point] for point in Pnts]);
F = tf.cast(tf.Variable([[-f(point)] for point in Pnts]),dtype=np.float32);
#Defining the lost function
def ColEnergy(weights):
    with tf.GradientTape() as gtape2:
        with tf.GradientTape() as gtape:
            uh = u(mesh, training=True)
        du_dx = gtape.gradient(uh, [mesh])
    #We use TF Gradient Tape to compute grad uh
    d2u_dx2 = gtape2.gradient(du_dx,[mesh])
    Delta = (d2u_dx2-F)
    Energy = (1/N_EvEl)*tf.reduce_sum(tf.square(Delta));
    #Adding B.C using penalty method idea;
    Energy = Energy + (gamma/N_EvEl)*(tf.square(u(tf.Variable([[a]])))+tf.square(u(tf.Variable([[b]]))))
    return Energy;
optimizer = tf.keras.optimizers.Adamax(learning_rate=step)

def Residual(u,F,mesh):
    with tf.GradientTape() as gtape2:
        with tf.GradientTape() as gtape:
            uh = u(mesh, training=True)
        du_dx = gtape.gradient(uh, [mesh])
    #We use TF Gradient Tape to compute grad uh
    d2u_dx2 = gtape2.gradient(du_dx,[mesh])
    Delta = (d2u_dx2-F)
    return tf.abs(Delta)

# Iterate over the batches of a dataset.
for it in tqdm(range(itmax)):
    # Open a GradientTape.
    with tf.GradientTape() as tape:
        Loss = ColEnergy(u.trainable_weights)
    # Get gradients of loss wrt the weights.
    gradients = tape.gradient(Loss, u.trainable_weights)
    res = sum([tf.norm(grad) for grad in gradients]);
    # Update the weights of the model.
    optimizer.apply_gradients(zip(gradients, u.trainable_weights))
    if res <= tol:
        break;
    if it%(itmax/10) == 0:
        print("[It. {}] Loss function value {} and residual {}.".format(it,Loss,res))

print("Loss function value {} and residual {}.".format(Loss,res))
[It. 0] Loss function value [[48.291153]] and residual 30.685426712036133.
[It. 5000] Loss function value [[2.1055244e-05]] and residual 0.34498804807662964.
[It. 10000] Loss function value [[2.187425e-05]] and residual 0.4546234905719757.
[It. 15000] Loss function value [[9.026974e-06]] and residual 0.26926928758621216.
[It. 20000] Loss function value [[1.0501252e-06]] and residual 0.027121542021632195.
[It. 25000] Loss function value [[1.2005267e-05]] and residual 0.3423703610897064.
[It. 30000] Loss function value [[1.8846424e-06]] and residual 0.10099022090435028.
[It. 35000] Loss function value [[1.5217504e-05]] and residual 0.37315642833709717.
[It. 40000] Loss function value [[9.6387965e-05]] and residual 0.9605128169059753.
[It. 45000] Loss function value [[0.00021875]] and residual 1.4392529726028442.
Loss function value [[1.8259313e-06]] and residual 0.09424027800559998.
[43]:
print(np.sort(u.trainable_weights[1]))
[-0.35823503 -0.33831617 -0.33742264 -0.26369816 -0.25181732 -0.21676466
 -0.15891671 -0.15383002 -0.14794554 -0.1464389  -0.1438641  -0.14049067
 -0.1305898  -0.12797573 -0.12470637 -0.1222436  -0.12012127 -0.10821651
 -0.10432903 -0.0843064  -0.0820904  -0.06586344 -0.05771144 -0.0130109
  0.00157357  0.00846939  0.00968238  0.01053899  0.02348686  0.02790974
  0.04256134  0.06551021  0.07652889  0.07799305  0.07947817  0.09196392
  0.10043197  0.10841352  0.11284401  0.11348713  0.11466438  0.13370815
  0.15069737  0.18803813  0.1919146   0.20009439  0.21880934  0.22043915
  0.25118658  0.3693546 ]
[44]:
plt.figure()
plt.plot(mesh.numpy(),u(mesh).numpy(),"r*")
plt.plot(np.sort(u.trainable_weights[1]),np.zeros((len(np.sort(u.trainable_weights[1])),1)),"g*")
plt.plot(mesh.numpy(),np.sin(np.pi*mesh.numpy()),"b-")
plt.legend(["Bias Point","Numerical Solution","Exact Solution"])
plt.figure()
plt.plot(mesh.numpy(),Residual(u,F,mesh)[0].numpy(),"b-")
plt.legend(["Residual"])
[44]:
<matplotlib.legend.Legend at 0x7f84bc2aaf40>
../_images/TensorFlow_PDE-NN_9_1.png
../_images/TensorFlow_PDE-NN_9_2.png

Example - 2D Poisson (DNN1 \(\sigma = \tanh\))

[1]:
import tensorflow as tf
from tensorflow.keras.constraints import min_max_norm
import logging
tf.get_logger().setLevel(logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
from numsa.TFHessian import *
from random import *
from tqdm.notebook import tqdm
import itertools


tf.random.set_seed(7)
############| SETTINGS |############
a = 0; b = 0;
c = 1; d= 1;
N = 50; #Nuerons in the Hidden Layer
N_EvEl = 1001; #Spacing of the evaluation function
N_EvEl_BC = 20; #Spacing of the evaluation function
itmax = 50000;
step = 1e-3;
tol=1e-32
data_type="float32"
gamma = tf.Variable(1e0,dtype=np.float32)
def f(x,y):
    return (2*np.pi**2)*np.sin(np.pi*x)*np.sin(np.pi*y);
####################################

#Pnts = np.linspace(a,b,N_EvEl)


xy_min = [a, b]
xy_max = [c, d]
Pnts = np.random.uniform(low=xy_min, high=xy_max, size=(N_EvEl,2))
BNDPnts = [[P,b] for P in np.random.uniform(a,c,N_EvEl_BC)]
BNDPnts = BNDPnts + [[c,P] for P in np.random.uniform(b,d,N_EvEl_BC)]
BNDPnts = BNDPnts + [[P,d] for P in np.random.uniform(a,c,N_EvEl_BC)]
BNDPnts = BNDPnts + [[a,P] for P in np.random.uniform(b,d,N_EvEl_BC)]
plt.figure();
plt.scatter([P[0] for P in Pnts],[P[1] for P in Pnts]);
plt.scatter([P[0] for P in BNDPnts],[P[1] for P in BNDPnts])
plt.title("Evaluation Points")

u = tf.keras.Sequential([
    tf.keras.layers.Dense(N,input_dim=2, activation='tanh'),#Hidden Layer
    #tf.keras.layers.Dense(N,activation="tanh"),
    #tf.keras.layers.Dense(N,activation="tanh"),
    tf.keras.layers.Dense(1,use_bias=False)
])
#We create an evaluation mesh
mesh = np.array([[point[0],point[1]] for point in Pnts],data_type);
mesh = tf.Variable(mesh)
BNDMesh = np.array([[point[0],point[1]] for point in BNDPnts],data_type);
BNDMesh = tf.Variable(BNDMesh)
F = tf.cast(tf.Variable([[-f(point[0],point[1])] for point in Pnts]),dtype=np.float32);
#Defining the lost function
def ColEnergy(weights):
    v = tf.Variable([[1.0,0.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dx2=acc.jvp(gradu)[:,0]
    v = tf.Variable([[0.0,1.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dy2=acc.jvp(gradu)[:,1]
    Delta = (d2u_dx2+d2u_dy2-tf.reshape(F,(N_EvEl,)))
    Energy = (1/(N_EvEl))*tf.reduce_sum(tf.square(Delta));
    #Adding B.C using penalty method idea;
    Energy = Energy + (gamma/N_EvEl_BC)*tf.reduce_sum((tf.square(u(BNDMesh))))
    return Energy;


LossHistory = [];


optimizer = tf.keras.optimizers.Adamax(learning_rate=step)

# Iterate over the batches of a dataset.
for it in tqdm(range(itmax)):
    # Open a GradientTape.
    with tf.GradientTape() as tape:
        Loss = ColEnergy(u.trainable_weights)
    # Get gradients of loss wrt the weights.
    gradients = tape.gradient(Loss, u.trainable_weights)
    res = sum([tf.norm(grad) for grad in gradients]);
    # Update the weights of the model.
    optimizer.apply_gradients(zip(gradients, u.trainable_weights))
    if res <= tol:
        break;
    if it%(itmax/10) == 0:
        print("[It. {}] Loss function value {} and residual {}.".format(it,Loss,res))
    LossHistory = LossHistory + [Loss];

[It. 0] Loss function value 100.23746490478516 and residual 13.624043464660645.
[It. 5000] Loss function value 0.0036094917450100183 and residual 0.07286364585161209.
[It. 10000] Loss function value 0.00031002325704321265 and residual 0.0010910315904766321.
[It. 15000] Loss function value 0.00019777187844738364 and residual 0.0006636990001425147.
[It. 20000] Loss function value 0.00013671928900294006 and residual 0.0018985089845955372.
[It. 25000] Loss function value 0.00010311126243323088 and residual 0.0003722708497662097.
[It. 30000] Loss function value 8.403413085034117e-05 and residual 0.0022913324646651745.
[It. 35000] Loss function value 7.178398664109409e-05 and residual 0.0007917672046460211.
[It. 40000] Loss function value 6.29963687970303e-05 and residual 0.00032822656794451177.
[It. 45000] Loss function value 5.6171134929172695e-05 and residual 0.00020835785835515708.
../_images/TensorFlow_PDE-NN_11_2.png
[6]:
from scipy.interpolate import griddata
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:100j]
def Residual(u,F,mesh):
    v = tf.Variable([[1.0,0.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dx2=acc.jvp(gradu)[:,0]
    v = tf.Variable([[0.0,1.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dy2=acc.jvp(gradu)[:,1]
    Delta = (d2u_dx2+d2u_dy2-tf.reshape(F,(N_EvEl,)))
    return tf.abs(Delta)


grid_u = griddata(mesh.numpy(), u(mesh).numpy().reshape(N_EvEl,),(grid_x, grid_y), method='nearest')
plt.figure()
plt.imshow(grid_u.T, extent=(0,1,0,1), origin='lower',cmap="jet")
plt.colorbar()
plt.title("Solution At Evaluation Point")

grid_res = griddata(mesh.numpy(), Residual(u,F,mesh).numpy().reshape(N_EvEl,),(grid_x, grid_y), method='nearest')
plt.figure()
plt.imshow(grid_res.T, extent=(0,1,0,1), origin='lower',cmap="jet")
plt.colorbar()
plt.title("Residual At Evaluation Point")
[6]:
Text(0.5, 1.0, 'Residual At Evaluation Point')
../_images/TensorFlow_PDE-NN_12_1.png
../_images/TensorFlow_PDE-NN_12_2.png
[7]:
x = np.arange(0.0, 1.0, 0.01)
y = np.arange(0.0, 1.0, 0.01)
X, Y = np.meshgrid(x, y)
Z = 1*X;
for i in range(len(X)):
    for j in range(len(Y)):
        Z[i,j] = u(np.array([[X[i,j],Y[i,j]]]));
import matplotlib.ticker as plticker
fig,ax = plt.subplots()
loc = plticker.MultipleLocator(base=0.25)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
plt.imshow(Z, cmap='jet',extent=(0,1,0,1))
plt.colorbar()
plt.grid(color='white', linestyle='-', linewidth=4)
../_images/TensorFlow_PDE-NN_13_0.png
[8]:
import matplotlib.ticker as plticker
fig,ax = plt.subplots()
ax.scatter([P[0] for P in Pnts],[P[1] for P in Pnts]);
ax.scatter([P[0] for P in BNDPnts],[P[1] for P in BNDPnts])
ax.set_title("Evaluation Points")
ax.set_aspect('equal', 'box')
loc = plticker.MultipleLocator(base=0.25)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
plt.grid(color='red', which='major',linestyle='-', linewidth=2)
../_images/TensorFlow_PDE-NN_14_0.png
[9]:
print("Number of NN parameters: ",util_shape_product([l.shape  for l in u.trainable_weights]))
print([l.shape for l in u.trainable_weights])
Number of NN parameters:  200
[TensorShape([2, 50]), TensorShape([50]), TensorShape([50, 1])]

Example - 2D Poisson (DNN1 Sigmoid)

[10]:
import tensorflow as tf
from tensorflow.keras.constraints import min_max_norm
import logging
tf.get_logger().setLevel(logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
from numsa.TFHessian import *
from random import *
from tqdm.notebook import tqdm
import itertools


tf.random.set_seed(7)
############| SETTINGS |############
a = 0; b = 0;
c = 1; d= 1;
N = 50; #Nuerons in the Hidden Layer
N_EvEl = 1001; #Spacing of the evaluation function
N_EvEl_BC = 20; #Spacing of the evaluation function
itmax = 50000;
step = 1e-3;
tol=1e-32
data_type="float32"
gamma = tf.Variable(1e0,dtype=np.float32)
def f(x,y):
    return (2*np.pi**2)*np.sin(np.pi*x)*np.sin(np.pi*y);
####################################

#Pnts = np.linspace(a,b,N_EvEl)


xy_min = [a, b]
xy_max = [c, d]
Pnts = np.random.uniform(low=xy_min, high=xy_max, size=(N_EvEl,2))
BNDPnts = [[P,b] for P in np.random.uniform(a,c,N_EvEl_BC)]
BNDPnts = BNDPnts + [[c,P] for P in np.random.uniform(b,d,N_EvEl_BC)]
BNDPnts = BNDPnts + [[P,d] for P in np.random.uniform(a,c,N_EvEl_BC)]
BNDPnts = BNDPnts + [[a,P] for P in np.random.uniform(b,d,N_EvEl_BC)]
plt.figure();
plt.scatter([P[0] for P in Pnts],[P[1] for P in Pnts]);
plt.scatter([P[0] for P in BNDPnts],[P[1] for P in BNDPnts])
plt.title("Evaluation Points")

u = tf.keras.Sequential([
    tf.keras.layers.Dense(N,input_dim=2, activation='sigmoid'),
    tf.keras.layers.Dense(1,use_bias=False)
])
#We create an evaluation mesh
mesh = np.array([[point[0],point[1]] for point in Pnts],data_type);
mesh = tf.Variable(mesh)
BNDMesh = np.array([[point[0],point[1]] for point in BNDPnts],data_type);
BNDMesh = tf.Variable(BNDMesh)
F = tf.cast(tf.Variable([[-f(point[0],point[1])] for point in Pnts]),dtype=np.float32);
#Defining the lost function
def ColEnergy(weights):
    v = tf.Variable([[1.0,0.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dx2=acc.jvp(gradu)[:,0]
    v = tf.Variable([[0.0,1.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dy2=acc.jvp(gradu)[:,1]
    Delta = (d2u_dx2+d2u_dy2-tf.reshape(F,(N_EvEl,)))
    Energy = (1/(N_EvEl))*tf.reduce_sum(tf.square(Delta));
    #Adding B.C using penalty method idea;
    Energy = Energy + (gamma/N_EvEl_BC)*tf.reduce_sum((tf.square(u(BNDMesh))))
    return Energy;


LossHistory = [];


optimizer = tf.keras.optimizers.Adamax(learning_rate=step)

# Iterate over the batches of a dataset.
for it in tqdm(range(itmax)):
    # Open a GradientTape.
    with tf.GradientTape() as tape:
        Loss = ColEnergy(u.trainable_weights)
    # Get gradients of loss wrt the weights.
    gradients = tape.gradient(Loss, u.trainable_weights)
    res = sum([tf.norm(grad) for grad in gradients]);
    # Update the weights of the model.
    optimizer.apply_gradients(zip(gradients, u.trainable_weights))
    if res <= tol:
        break;
    if it%(itmax/10) == 0:
        print("[It. {}] Loss function value {} and residual {}.".format(it,Loss,res))
    LossHistory = LossHistory + [Loss];

[It. 0] Loss function value 100.32574462890625 and residual 32.43754959106445.
[It. 5000] Loss function value 0.16919295489788055 and residual 0.14852739870548248.
[It. 10000] Loss function value 0.00048557115951552987 and residual 0.0019718895200639963.
[It. 15000] Loss function value 0.00021293295139912516 and residual 0.007478305138647556.
[It. 20000] Loss function value 0.00013567889982368797 and residual 0.00391441211104393.
[It. 25000] Loss function value 0.00010585739801172167 and residual 0.024814754724502563.
[It. 30000] Loss function value 8.735847222851589e-05 and residual 0.012798226438462734.
[It. 35000] Loss function value 7.515062316088006e-05 and residual 0.005434154532849789.
[It. 40000] Loss function value 6.622089131269604e-05 and residual 0.0051046437583863735.
[It. 45000] Loss function value 5.938777030678466e-05 and residual 0.00028884882340207696.
../_images/TensorFlow_PDE-NN_17_2.png
[11]:
from scipy.interpolate import griddata
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:100j]
def Residual(u,F,mesh):
    v = tf.Variable([[1.0,0.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dx2=acc.jvp(gradu)[:,0]
    v = tf.Variable([[0.0,1.0] for i in range(N_EvEl)])
    with tf.autodiff.ForwardAccumulator(mesh,v) as acc:
        with tf.GradientTape() as tape:
            uh = u(mesh, training=True)
        gradu = tape.gradient(uh,mesh);
    d2u_dy2=acc.jvp(gradu)[:,1]
    Delta = (d2u_dx2+d2u_dy2-tf.reshape(F,(N_EvEl,)))
    return tf.abs(Delta)


grid_u = griddata(mesh.numpy(), u(mesh).numpy().reshape(N_EvEl,),(grid_x, grid_y), method='nearest')
plt.figure()
plt.imshow(grid_u.T, extent=(0,1,0,1), origin='lower',cmap="jet")
plt.colorbar()
plt.title("Soltuion At Evaluation Point")

grid_res = griddata(mesh.numpy(), Residual(u,F,mesh).numpy().reshape(N_EvEl,),(grid_x, grid_y), method='nearest')
plt.figure()
plt.imshow(grid_res.T, extent=(0,1,0,1), origin='lower',cmap="jet")
plt.colorbar()
plt.title("Residual At Evaluation Point")
[11]:
Text(0.5, 1.0, 'Residual At Evaluation Point')
../_images/TensorFlow_PDE-NN_18_1.png
../_images/TensorFlow_PDE-NN_18_2.png
[16]:
x = np.arange(0.0, 1.0, 0.01)
y = np.arange(0.0, 1.0, 0.01)
X, Y = np.meshgrid(x, y)
Z = 1*X;
for i in range(len(X)):
    for j in range(len(Y)):
        Z[i,j] = u(np.array([[X[i,j],Y[i,j]]]));
import matplotlib.ticker as plticker
fig,ax = plt.subplots()
plt.imshow(Z, cmap='jet',extent=(0,1,0,1))
plt.colorbar()
[16]:
<matplotlib.colorbar.Colorbar at 0x7f1a1ad8f370>
../_images/TensorFlow_PDE-NN_19_1.png
[15]:
import matplotlib.ticker as plticker
fig,ax = plt.subplots()
ax.scatter([P[0] for P in Pnts],[P[1] for P in Pnts]);
ax.scatter([P[0] for P in BNDPnts],[P[1] for P in BNDPnts])
ax.set_title("Evaluation Points")
ax.set_aspect('equal', 'box')
../_images/TensorFlow_PDE-NN_20_0.png
[14]:
print("Number of NN parameters: ",util_shape_product([l.shape  for l in u.trainable_weights]))
print([l.shape for l in u.trainable_weights])
Number of NN parameters:  200
[TensorShape([2, 50]), TensorShape([50]), TensorShape([50, 1])]
[ ]: