Physics-informed Neural Networks (PINN)
Table of Contents
Why do data-driven ‘black-box’ methods fail?
Physics-Informed Neural Networks (PINNs)
$$
\frac{\partial u}{\partial t} - \lambda \frac{\partial^2 u}{\partial x^2} = 0
$$
$$
\begin{align*}
\mathcal{L} &= \omega_{\text{data}} \mathcal{L}_{\text{data}} + \omega_{\text{PDE}} \mathcal{L}_{\text{PDE}}\\\\\\
\mathcal{L}_{\text{data}} &= \frac{1}{N_{\text{data}}} \sum \left(u(x,t) - u \right)^2 \\
\mathcal{L}_{\text{PDE}} &= \frac{1}{N_{\text{PDE}}} \sum \left(\frac{\partial u}{\partial t} - \lambda \frac{\partial^2 u}{\partial x^2} \right)^2
\end{align*}
$$
This is a result first due to Lagaris et.al from 1998. The idea is to solve differential equations using neural networks by representing the solution by a neural network and training the resulting network to satisfy the conditions required by the differential equation.
Consider a system of ordinary differential equations
$$ {u^\prime} = f(u,t) $$with $t \in [0, 1]$ and a known initial condition $u(0) = u_0$.
To solve this, we approximate the solution by a neural network:
$$ {\text{NN}(t)} \approx {u(t)} $$If $\text{NN}(t)$ was the true solution, then it would hold that $\text{NN}'(t) = f\left(\text{NN}(t),t \right)$ for all $t$. Thus we turn this condtion into our loss function. This motivates the loss function.
$${L(\omega)} = \sum_{i} \left(\frac{d \text{NN}(t_i)}{dt}-f\left(\text{NN}(t_i),t_i \right)\right)^2$$
The choice of $t_i$ could be done in many ways: it can be random, it can be a grid, etc. Anyways, when this loss function is minimized (gradients computed with standard reverse-mode automatic differentiation), then we have that $\frac{dNN(t_i)}{dt} \approx f(\text{NN}(t_i),t_i)$ and thus $\text{NN}(t)$ approximately solves the differential equation.
Note that we still have to handle the initial condition. One simple way to do this is to add an initial condition term to the cost function.
$${L(\omega)} = \sum_{i} \left(\frac{d \text{NN}(t_i)}{dt}-f(\text{NN}(t_i),t_i)\right)^2 + (\text{NN}(0)-u_0)^2 $$
where $\omega$ are the parameters that define the neural network $\text{NN}$ that approximates $u$. Thus this reduces down, once again, to simply finding weights which minimize a loss function !
$$\frac{du}{dt} = \cos2\pi t$$
$$u(0) = 1$$
$$u(t) = \frac{1}{2\pi}\sin2\pi t + 1$$
# !pip install tensorflow-probability
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import random
NN = tf.keras.models.Sequential([
tf.keras.layers.Input((1,)),
tf.keras.layers.Dense(units = 32, activation = 'tanh'),
tf.keras.layers.Dense(units = 1)
])
NN.summary()
optm = tf.keras.optimizers.Adam(learning_rate = 0.001)
$$L_{\text{ODE}} = \frac{1}{n}\sum_{i=1}^{n} \left(\frac{d \text{NN}(t_i)}{dt}- \cos2\pi t_i\right)^2$$
$$L_{IC} = \frac{1}{n}\sum_{i=1}^{n} \left({\text{NN}(0)}- 1\right)^2$$
$$L_{\text{Total}} = L_{\text{ODE}} + L_{\text{IC}}$$
def ode_system(t, net):
t = t.reshape(-1,1)
t = tf.constant(t, dtype = tf.float32)
t_0 = tf.zeros((1,1))
one = tf.ones((1,1))
with tf.GradientTape() as tape:
tape.watch(t)
u = net(t)
u_t = tape.gradient(u, t)
ode_loss = u_t - tf.math.cos(2*np.pi*t)
IC_loss = net(t_0) - one
square_loss = tf.square(ode_loss) + tf.square(IC_loss)
total_loss = tf.reduce_mean(square_loss)
return total_loss
train_t = (np.random.rand(30)*2).reshape(-1, 1)
train_loss_record = []
for itr in range(6000):
with tf.GradientTape() as tape:
train_loss = ode_system(train_t, NN)
train_loss_record.append(train_loss)
grad_w = tape.gradient(train_loss, NN.trainable_variables)
optm.apply_gradients(zip(grad_w, NN.trainable_variables))
if itr % 1000 == 0:
print(train_loss.numpy())
plt.figure(figsize = (10,8))
plt.plot(train_loss_record)
plt.show()
test_t = np.linspace(0, 2, 100)
train_u = np.sin(2*np.pi*train_t)/(2*np.pi) + 1
true_u = np.sin(2*np.pi*test_t)/(2*np.pi) + 1
pred_u = NN.predict(test_t).ravel()
plt.figure(figsize = (10,8))
plt.plot(train_t, train_u, 'ok', label = 'Train')
plt.plot(test_t, true_u, '-k',label = 'True')
plt.plot(test_t, pred_u, '--r', label = 'Prediction')
plt.legend(fontsize = 15)
plt.xlabel('t', fontsize = 15)
plt.ylabel('u', fontsize = 15)
plt.show()
# !pip install deepxde
from deepxde.backend.set_default_backend import set_default_backend
set_default_backend("tensorflow")
$$\frac{du}{dt} = \cos2\pi t$$
$$u(0) = 1$$
$$u(t) = \frac{1}{2\pi}\sin2\pi t + 1$$
import tensorflow as tf
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
import math as m
pi = tf.constant(m.pi)
def ode_system(t, u):
du_t = dde.grad.jacobian(u, t)
return du_t - tf.math.cos(2*pi*t)
def boundary(t, on_initial):
return on_initial and np.isclose(t[0], 0)
geom = dde.geometry.TimeDomain(0, 2)
ic = dde.IC(geom, lambda t: 1, boundary)
# Reference solution to compute the error
def true_solution(t):
return np.sin(2*np.pi*t)/(2*np.pi) + 1
data = dde.data.PDE(geom,
ode_system,
ic,
num_domain = 30,
num_boundary = 2,
solution = true_solution,
num_test = 100)
layer_size = [1] + [32] + [1]
activation = "tanh"
initializer = "Glorot uniform"
NN = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, NN)
model.compile("adam", lr = 0.001)
losshistory, train_state = model.train(epochs = 6000)
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
$${\partial^4 y \over \partial x^4} + 1 = 0, \qquad \text{where} \quad x \in [0,1]$$
$$y(0) = 0$$
$$y'(0) = 0$$
$$y''(1) = 0, \quad y'''(1) = 0$$
$$y(x) = -{1 \over 24}x^4 + {1 \over 6}x^3 - {1 \over 4}x^2$$
import deepxde as dde
import numpy as np
def ddy(x, y):
return dde.grad.hessian(y, x)
def dddy(x, y):
return dde.grad.jacobian(ddy(x, y), x)
def pde(x, y):
dy_xx = ddy(x, y)
dy_xxxx = dde.grad.hessian(dy_xx, x)
return dy_xxxx + 1
def boundary_left(x, on_boundary):
return on_boundary and np.isclose(x[0], 0)
def boundary_right(x, on_boundary):
return on_boundary and np.isclose(x[0], 1)
geom = dde.geometry.Interval(0, 1)
bc1 = dde.DirichletBC(geom, lambda x: 0, boundary_left) # u(0) = 0
bc2 = dde.NeumannBC(geom, lambda x: 0, boundary_left) # u'(0) = 0
bc3 = dde.OperatorBC(geom, lambda x, y, _: ddy(x, y), boundary_right) # u''(1) = 0
bc4 = dde.OperatorBC(geom, lambda x, y, _: dddy(x, y), boundary_right) # u'''(1) = 0
# Reference solution to compute the error
def true_solution(x):
return -(x ** 4) / 24 + x ** 3 / 6 - x ** 2 / 4
data = dde.data.PDE(geom,
pde,
[bc1, bc2, bc3, bc4],
num_domain = 10,
num_boundary = 2,
solution = true_solution,
num_test = 100)
layer_size = [1] + [20] * 3 + [1]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr = 0.001)
losshistory, train_state = model.train(epochs = 5000)
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
# !pip install deepxde
import tensorflow as tf
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
from deepxde.backend.set_default_backend import set_default_backend
set_default_backend("tensorflow")
$$\rho = 1\operatorname{kg/m^3}, \quad \mu = 1\operatorname{N\cdot s/m^2}, \quad D = 2h = 1\operatorname{m}, \quad
L = 2\operatorname{m}, \quad u_{in} = 1\operatorname{m/s}, \quad \nu = \frac{\mu}{\rho}$$
$$\quad D_h = \lim\limits_{b\to\infty} {4(2bh) \over {2b+4h}} = 4h = 2\operatorname{m}$$
$$Re = \frac{\rho u_{in} D_h}{\mu} = 2 $$
$$
\begin{align*}
u{\partial u \over \partial x} + v{\partial u \over \partial y} + {1 \over \rho}{\partial p \over \partial x} - \nu \ \left({\partial^2 u \over {\partial x^2}} + {\partial^2 u \over {\partial y^2}}\right) &= 0\\\\
u{\partial v \over \partial x} + v{\partial v \over \partial y} + {1 \over \rho}{\partial p \over \partial y} - \nu \ \left({\partial^2 v \over {\partial x^2}} + {\partial^2 v \over {\partial y^2}}\right) &= 0\\\\
{\partial u \over \partial x} + {\partial v \over \partial y} &= 0
\end{align*}
$$
$$u(x,y) = 0, \quad v(x,y) = 0 \qquad \text{at} \quad y = \frac{D}{2} \ \; \text{or} \; -\frac{D}{2}$$
$$u(-1,y) = u_{\text{in}}, \quad v(-1,y) = 0$$
$$p(1,y) = 0, \quad v(1,y) = 0$$
# Properties
rho = 1
mu = 1
u_in = 1
D = 1
L = 2
def boundary_wall(X, on_boundary):
on_wall = np.logical_and(np.logical_or(np.isclose(X[1], -D/2), np.isclose(X[1], D/2)), on_boundary)
return on_wall
def boundary_inlet(X, on_boundary):
return on_boundary and np.isclose(X[0], -L/2)
def boundary_outlet(X, on_boundary):
return on_boundary and np.isclose(X[0], L/2)
def pde(X, Y):
du_x = dde.grad.jacobian(Y, X, i = 0, j = 0)
du_y = dde.grad.jacobian(Y, X, i = 0, j = 1)
dv_x = dde.grad.jacobian(Y, X, i = 1, j = 0)
dv_y = dde.grad.jacobian(Y, X, i = 1, j = 1)
dp_x = dde.grad.jacobian(Y, X, i = 2, j = 0)
dp_y = dde.grad.jacobian(Y, X, i = 2, j = 1)
du_xx = dde.grad.hessian(Y, X, i = 0, j = 0, component = 0)
du_yy = dde.grad.hessian(Y, X, i = 1, j = 1, component = 0)
dv_xx = dde.grad.hessian(Y, X, i = 0, j = 0, component = 1)
dv_yy = dde.grad.hessian(Y, X, i = 1, j = 1, component = 1)
pde_u = Y[:,0:1]*du_x + Y[:,1:2]*du_y + 1/rho * dp_x - (mu/rho)*(du_xx + du_yy)
pde_v = Y[:,0:1]*dv_x + Y[:,1:2]*dv_y + 1/rho * dp_y - (mu/rho)*(dv_xx + dv_yy)
pde_cont = du_x + dv_y
return [pde_u, pde_v, pde_cont]
geom = dde.geometry.Rectangle(xmin=[-L/2, -D/2], xmax=[L/2, D/2])
bc_wall_u = dde.DirichletBC(geom, lambda X: 0., boundary_wall, component = 0)
bc_wall_v = dde.DirichletBC(geom, lambda X: 0., boundary_wall, component = 1)
bc_inlet_u = dde.DirichletBC(geom, lambda X: u_in, boundary_inlet, component = 0)
bc_inlet_v = dde.DirichletBC(geom, lambda X: 0., boundary_inlet, component = 1)
bc_outlet_p = dde.DirichletBC(geom, lambda X: 0., boundary_outlet, component = 2)
bc_outlet_v = dde.DirichletBC(geom, lambda X: 0., boundary_outlet, component = 1)
data = dde.data.PDE(geom,
pde,
[bc_wall_u, bc_wall_v, bc_inlet_u, bc_inlet_v, bc_outlet_p, bc_outlet_v],
num_domain = 2000,
num_boundary = 200,
num_test = 100)
plt.figure(figsize = (10,8))
plt.scatter(data.train_x_all[:,0], data.train_x_all[:,1], s = 0.5)
plt.xlabel('x-direction length')
plt.ylabel('Distance from the middle of plates (m)')
plt.show()
layer_size = [2] + [64] * 5 + [3]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr = 1e-3)
losshistory, train_state = model.train(epochs = 10000)
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]
for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
cmap = 'jet',
s = 2)
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()
dde.optimizers.config.set_LBFGS_options(maxiter = 3000)
model.compile("L-BFGS")
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]
for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
cmap = 'jet',
s = 2)
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()
$$u(y) = {3V_{avg} \over 2} \left[ 1- \left( \frac{y}{h} \right)^2 \right]$$
# Analytic solution
x = np.ones([1000,1])
y = np.linspace(-0.5, 0.5, 1000).reshape(1000,1)
outlet = np.hstack([x, y])
analytic_solution = u_in * 1.5 * (1 - ((y)/(D/2))**2)
PINN_solution = model.predict(outlet)
plt.figure(figsize = (10,8))
plt.plot(y, PINN_solution[:, 0], c = 'tomato', linewidth = 3, label = 'PINN solution')
plt.plot(y, analytic_solution, c = 'k', linestyle = '--', label = 'Analytic solution')
plt.xticks(np.linspace(-0.5, 0.5, 5), fontsize = 15)
plt.yticks(np.linspace(0, 1.5, 11), fontsize = 15)
plt.legend(fontsize = 12)
plt.xlabel('Distance from the middle of plates (m)', fontsize = 15)
plt.ylabel('Velocity ($u$)', fontsize = 15)
plt.show()
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')