Physics-informed Neural Networks (PINN)
Table of Contents
Advantages of data-driven approaches
Why do data-driven ‘black-box’ methods fail?
Limitations of pure data-driven approaches?
Physics-Informed Neural Networks (PINNs)
NN as an universal function approximator
Given
This is a result first due to Lagaris et.al from 1998. The idea is to solve differential equations using neural networks by representing the solution by a neural network and training the resulting network to satisfy the conditions required by the differential equation.
Consider a system of ordinary differential equations
$$ {u^\prime} = f(u,t) $$with $t \in [0, 1]$ and a known initial condition $u(0) = u_0$.
To solve this, we approximate the solution by a neural network:
$$ {\text{NN}(t)} \approx {u(t)} $$If $\text{NN}(t)$ was the true solution, then it would hold that $\text{NN}'(t) = f\left(\text{NN}(t),t \right)$ for all $t$. Thus we turn this condtion into our loss function. This motivates the loss function.
The choice of $t_i$ could be done in many ways: it can be random, it can be a grid, etc. Anyways, when this loss function is minimized (gradients computed with standard reverse-mode automatic differentiation), then we have that $\frac{dNN(t_i)}{dt} \approx f(\text{NN}(t_i),t_i)$ and thus $\text{NN}(t)$ approximately solves the differential equation.
Note that we still have to handle the initial condition. One simple way to do this is to add an initial condition term to the cost function.
where $\omega$ are the parameters that define the neural network $\text{NN}$ that approximates $u$. Thus this reduces down, once again, to simply finding weights which minimize a loss function !
Setup
Using tensorFlow 2
Install and Setup
# !pip install tensorflow-probability
Import Library
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import random
Define Network and Hyper-parameters
NN = tf.keras.models.Sequential([
tf.keras.layers.Input((1,)),
tf.keras.layers.Dense(units = 32, activation = 'tanh'),
tf.keras.layers.Dense(units = 1)
])
NN.summary()
optm = tf.keras.optimizers.Adam(learning_rate = 0.001)
Define ODE System
def ode_system(t, net):
t = t.reshape(-1,1)
t = tf.constant(t, dtype = tf.float32)
t_0 = tf.zeros((1,1))
one = tf.ones((1,1))
with tf.GradientTape() as tape:
tape.watch(t)
u = net(t)
u_t = tape.gradient(u, t)
ode_loss = u_t - tf.math.cos(2*np.pi*t)
IC_loss = net(t_0) - one
square_loss = tf.square(ode_loss) + tf.square(IC_loss)
total_loss = tf.reduce_mean(square_loss)
return total_loss
Train & Prediction
train_t = (np.random.rand(30)*2).reshape(-1, 1)
train_loss_record = []
for itr in range(6000):
with tf.GradientTape() as tape:
train_loss = ode_system(train_t, NN)
train_loss_record.append(train_loss)
grad_w = tape.gradient(train_loss, NN.trainable_variables)
optm.apply_gradients(zip(grad_w, NN.trainable_variables))
if itr % 1000 == 0:
print(train_loss.numpy())
plt.figure(figsize = (10,8))
plt.plot(train_loss_record)
plt.show()
test_t = np.linspace(0, 2, 100)
train_u = np.sin(2*np.pi*train_t)/(2*np.pi) + 1
true_u = np.sin(2*np.pi*test_t)/(2*np.pi) + 1
pred_u = NN.predict(test_t).ravel()
plt.figure(figsize = (10,8))
plt.plot(train_t, train_u, 'ok', label = 'Train')
plt.plot(test_t, true_u, '-k',label = 'True')
plt.plot(test_t, pred_u, '--r', label = 'Prediction')
plt.legend(fontsize = 15)
plt.xlabel('t', fontsize = 15)
plt.ylabel('u', fontsize = 15)
plt.show()
# !pip install deepxde
from deepxde.backend.set_default_backend import set_default_backend
set_default_backend("tensorflow")
Import Library
import tensorflow as tf
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
import math as m
Define ODE System
pi = tf.constant(m.pi)
def ode_system(t, u):
du_t = dde.grad.jacobian(u, t)
return du_t - tf.math.cos(2*pi*t)
Define Initial Condition
def boundary(t, on_initial):
return on_initial and np.isclose(t[0], 0)
Define Geometry & Implement Initial Condition
geom = dde.geometry.TimeDomain(0, 2)
ic = dde.IC(geom, lambda t: 1, boundary)
# Reference solution to compute the error
def true_solution(t):
return np.sin(2*np.pi*t)/(2*np.pi) + 1
data = dde.data.PDE(geom,
ode_system,
ic,
num_domain = 30,
num_boundary = 2,
solution = true_solution,
num_test = 100)
Define Network and Hyper-parameters
layer_size = [1] + [32] + [1]
activation = "tanh"
initializer = "Glorot uniform"
NN = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, NN)
model.compile("adam", lr = 0.001)
Train & Prediction
losshistory, train_state = model.train(epochs = 6000)
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
from google.colab import drive
drive.mount('/content/drive')
Import Library
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import warnings
warnings.filterwarnings("ignore")
Define Parameters
E = 1
I = 1
q = 1
l = 1
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
Define Collocation Points
x = np.linspace(0, l, 25)
y = np.zeros_like(x)
x0 = x[0] # Left boundary of x = 0
x1 = x[-1] # Right boundary of x = 1
plt.figure(figsize = (6, 3))
plt.scatter(x, y, s = 10)
plt.scatter(x0, 0, label = 'x = 0', s = 10, c = 'red')
plt.scatter(x1, 0, label = 'x = 1', s = 10, c = 'red')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Collocation Points with Boundaries')
plt.show()
Define Network and Hyper-parameters
import torch.nn as nn
from torch.nn import Linear, Tanh
class PINN(nn.Module):
def __init__(self):
super(PINN, self).__init__()
self.net = nn.Sequential(
Linear(1, 32),
Tanh(),
Linear(32, 32),
Tanh(),
Linear(32, 32),
Tanh(),
Linear(32, 32),
Tanh(),
Linear(32, 1),
Tanh()
)
def forward(self, x):
return self.net(x)
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
loss_fn = nn.MSELoss()
Define PDE System
def derivative(f, t):
return torch.autograd.grad(f,
t,
grad_outputs = torch.ones_like(f),
create_graph = True)[0]
def PDE(model, x):
u = model(x)
u_x = derivative(u, x)
u_xx = derivative(u_x, x)
u_xxx = derivative(u_xx, x)
u_xxxx = derivative(u_xxx, x)
pde = u_xxxx + q
return pde
Define Boundary Condition
def BC_left(model, x_0):
# x = 0
u_0 = model(x_0)
bc1 = u_0
u_x_0 = derivative(u_0, x_0)
bc2 = u_x_0
return bc1, bc2
def BC_right(model, x_f):
# x = 1
u_f = model(x_f)
u_x_f = derivative(u_f, x_f)
u_xx_f = derivative(u_x_f, x_f)
bc3 = u_xx_f
u_xxx_f = derivative(u_xx_f, x_f)
bc4 = u_xxx_f
return bc3, bc4
Train
def require_grad(x, device = device):
return torch.tensor(x,
device = device,
dtype = torch.float32,
requires_grad = True)
epochs = 3001
x = require_grad(x.reshape(-1, 1))
x0 = require_grad(x0.reshape(-1, 1))
x1 = require_grad(x1.reshape(-1, 1))
for epoch in range(epochs):
pde_term = PDE(model, x)
bc1, bc2 = BC_left(model, x0)
bc3, bc4 = BC_right(model, x1)
loss_pde = loss_fn(pde_term, torch.zeros_like(pde_term).to(device))
loss_bc1 = loss_fn(bc1, torch.zeros_like(bc1).to(device))
loss_bc2 = loss_fn(bc2, torch.zeros_like(bc2).to(device))
loss_bc3 = loss_fn(bc3, torch.zeros_like(bc3).to(device))
loss_bc4 = loss_fn(bc4, torch.zeros_like(bc4).to(device))
loss_bc = loss_bc1 + loss_bc2 + loss_bc3 + loss_bc4
loss = loss_pde + loss_bc
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('Epoch: {} Loss: {:.4f} PDELoss: {:.4f} BCLoss: {:.4f}'.format(epoch,
loss.item(),
loss_pde.item(),
loss_bc.item()))
Define Exact Solution
def exact_solution(x):
return -(x ** 4) / 24 + x ** 3 / 6 - x ** 2 / 4
Result
x_ = np.linspace(0, l, 100)
x_ = torch.tensor(x_.reshape(-1, 1), dtype=torch.float32)
with torch.no_grad():
model.cpu()
model.eval()
u_ = model(x_)
plt.figure(figsize = (6, 4))
plt.plot(x_, exact_solution(x_), c = 'k', label = 'Exact Solution', alpha = 0.5)
plt.plot(x_, u_.detach().numpy(), c = 'r', label = 'Predicted u', linestyle = '--', linewidth = 3)
plt.legend(fontsize = 10)
plt.xticks(fontsize = 10)
plt.yticks(fontsize = 10)
plt.xlabel('x', fontsize = 10)
plt.ylabel('Displacement, y', fontsize = 10)
plt.show()
We will solve thin plate equations to find displacement and stress distribution of thin plate in 2D
Based on Kirchhoff-Love plate theory, three hypotheses were used
A non-uniform stretching force is applied to square elastic plate
Only one quarter of the plate is considered since the geometry and in-plane forces are symmetric (yellow domain)
Numerical Solution (= Exact Solution)
Numerical solution of this problem is illustrated in below figures
$x, y$ direction displacement $u$, $v$ and
Stress $\sigma_{xx}$ and $\sigma_{yy}$
Solve this problem using PINN and then compare with a numerical solution
Import Library & Define Properties
import torch
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
# properties
E = 50.0
nu = 0.3
w = 10.0
f = 1.0
h = 1.0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
Define Collocation Points
Nx = 50 # Number of samples
Ny = 50 # Number of samples
x = torch.linspace(0, w, Nx) # Input data for x (Nx x 1)
y = torch.linspace(0, w, Ny) # Input data for y (Ny x 1)
xy = torch.meshgrid(x, y)
XY_domain = torch.cat([xy[0].reshape(-1, 1), xy[1].reshape(-1, 1)], dim=1)
XY_left = XY_domain[XY_domain[:, 0] == 0]
XY_right = XY_domain[XY_domain[:, 0] == w]
XY_top = XY_domain[XY_domain[:, 1] == w]
XY_bottom = XY_domain[XY_domain[:, 1] == 0]
plt.figure(figsize = (5, 5))
plt.scatter(XY_domain[:, 0], XY_domain[:, 1], s = 4, c = 'gray', alpha = 0.3)
plt.scatter(XY_bottom[:, 0], XY_bottom[:, 1], s = 4, c = 'red')
plt.scatter(XY_top[:, 0], XY_top[:, 1], s = 4, c = 'blue')
plt.scatter(XY_left[:, 0], XY_left[:, 1], s = 4, c = 'green')
plt.scatter(XY_right[:, 0], XY_right[:, 1], s = 4, c = 'purple')
plt.title('Collocation Points')
plt.xlabel('X')
plt.ylabel('Y')
plt.grid(True)
plt.show()
Define Network and Hyper-parameters
import torch.nn as nn
from torch.nn import Linear, Tanh
class PINN(nn.Module):
def __init__(self):
super(PINN, self).__init__()
self.net = nn.Sequential(
Linear(2, 64),
Tanh(),
Linear(64, 64),
Tanh(),
Linear(64, 64),
Tanh(),
Linear(64, 64),
Tanh(),
Linear(64, 64),
Tanh(),
Linear(64, 64),
Tanh(),
Linear(64, 2)
)
self._initialize_weights()
'''
Xavier initializer (Glorot initialization):
Initializes the weights to keep the variance consistent across layers,
preventing issues like vanishing or exploding gradients. Weights are
drawn from a distribution with variance 2/(fan_in + fan_out).
Biases are set to zero.
'''
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
x = x.float()
output = self.net(x)
u, v = output[:, 0:1], output[:, 1:2]
return u, v
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
loss_fn = nn.MSELoss()
Define PDE with Boundary Conditions
def derivative(y, t) :
df = torch.autograd.grad(y, t, grad_outputs = torch.ones_like(y).to(device), create_graph = True)[0]
df_x = df[:, 0:1]
df_y = df[:, 1:2]
return df_x, df_y
def PDE(model, XY_domain):
u, v = model(XY_domain)
du_x, du_y = derivative(u, XY_domain)
dv_x, dv_y = derivative(v, XY_domain)
du_xx, du_xy = derivative(du_x, XY_domain)
_, du_yy = derivative(du_y, XY_domain)
dv_xx, dv_xy = derivative(dv_x, XY_domain)
_, dv_yy = derivative(dv_y, XY_domain)
factor = E / (1 - nu ** 2)
force_eq_x = factor * (du_xx + (1 - nu) / 2 * du_yy + (1 + nu) / 2 * dv_xy)
force_eq_y = factor * (dv_yy + (1 - nu) / 2 * dv_xx + (1 + nu) / 2 * du_xy)
return force_eq_x.float(), force_eq_y.float()
def BC_left(model, left):
u_left, _ = model(left)
return u_left
def BC_bottom(model, bottom):
_, v_bottom = model(bottom)
return v_bottom
def BC_top(model, top):
# Top edge
u_top, v_top = model(top)
u_top_x, u_top_y = derivative(u_top, top)
v_top_x, v_top_y = derivative(v_top, top)
sigma_yy = E / (1 - nu ** 2) * (v_top_y + nu * u_top_x)
sigma_yx = E / 2 / (1 + nu) * (v_top_x + u_top_y)
return sigma_yy, sigma_yx
def BC_right(model, right):
# Right edge
u_right, v_right = model(right)
u_right_x, u_right_y = derivative(u_right, right)
v_right_x, v_right_y = derivative(v_right, right)
sigma_xx = E / (1 - nu ** 2) * (u_right_x + nu * v_right_y)
sigma_xy = E / 2 / (1 + nu) * ((v_right_x + u_right_y))
sigma_ex = h * f * torch.cos(np.pi * right[:, 1:] / (2 * w))
return sigma_xx, sigma_ex, sigma_xy
Train
def require_grad(x):
return torch.tensor(x, dtype = torch.float32, requires_grad = True).to(device)
XY_domain = require_grad(XY_domain)
XY_bottom = require_grad(XY_bottom)
XY_top = require_grad(XY_top)
XY_left = require_grad(XY_left)
XY_right = require_grad(XY_right)
epochs = 10001
best_loss = np.inf
for epoch in range(epochs):
force_eq_x, force_eq_y = PDE(model, XY_domain)
loss_PDE_x = loss_fn(force_eq_x, torch.zeros_like(force_eq_x))
loss_PDE_y = loss_fn(force_eq_y, torch.zeros_like(force_eq_y))
loss_PDE = loss_PDE_x + loss_PDE_y
bottom_v = BC_bottom(model, XY_bottom)
left_u = BC_left(model, XY_left)
top_sigyy, top_sigyx = BC_top(model, XY_top)
right_sigxx, right_sigex, right_sigxy = BC_right(model, XY_right)
loss_BC_bottom = loss_fn(bottom_v, torch.zeros_like(bottom_v))
loss_BC_left = loss_fn(left_u, torch.zeros_like(left_u))
loss_BC_top_1 = loss_fn(top_sigyy, torch.zeros_like(top_sigyy))
loss_BC_top_2 = loss_fn(top_sigyx, torch.zeros_like(top_sigyx))
loss_BC_top = loss_BC_top_1 + loss_BC_top_2
loss_BC_right_1 = loss_fn(right_sigxx, right_sigex)
loss_BC_right_2 = loss_fn(right_sigxy, torch.zeros_like(right_sigxy))
loss_BC_right = loss_BC_right_1 + loss_BC_right_2
loss_BC = loss_BC_bottom + loss_BC_left + loss_BC_top + loss_BC_right
loss = loss_PDE + loss_BC
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('Epoch: {} Loss: {:.4f} PDELoss: {:.4f} BCLoss: {:.4f}'.format(epoch, loss.item(), loss_PDE.item(), loss_BC.item()))
Load the model parameters
The trained model will be used to avoid the long training time.
# GPU available
if torch.cuda.is_available():
model = torch.load('/content/drive/MyDrive/DL/DL_data/lab2_adam_model.pt')
Plot Displacement and Stress
def check_stress(model, xy):
u, v = model(xy)
u_x, _ = derivative(u, xy)
_, v_y = derivative(v, xy)
sig_xx = E / (1 - nu ** 2) * (u_x + nu * v_y)
sig_yy = E / (1 - nu ** 2) * (v_y + nu * u_x)
return sig_xx, sig_yy
def plot_results(data, color_legend, titles, w):
plt.figure(figsize = (8, 3))
for idx in range(2):
plt.subplot(1, 2, idx + 1)
plt.scatter(test[:, 0], test[:, 1],
c = data[idx].detach().cpu().numpy(),
cmap = 'rainbow',
s = 5)
plt.clim(color_legend[idx])
plt.title(titles[idx])
plt.axis('square')
plt.xlim((0, w))
plt.ylim((0, w))
plt.colorbar()
plt.tight_layout()
plt.show()
Nx = 100 # Number of samples
Ny = 100 # Number of samples
x_test = torch.linspace(0, w, Nx) # Input data for x (Nx x 1)
y_test = torch.linspace(0, w, Ny) # Input data for y (Ny x 1)
xy_test = torch.meshgrid(x_test, y_test)
test = torch.cat([xy_test[0].reshape(-1, 1), xy_test[1].reshape(-1, 1)], dim = 1)
test_tensor = require_grad(test)
with torch.no_grad():
model.eval()
result = model(test_tensor)
sigma_xx, sigma_yy = check_stress(model, test_tensor)
sigma = [sigma_xx, sigma_yy]
titles = ['x-displacement ($u$)', 'y-displacement ($v$)', '$\sigma_{xx}$', '$\sigma_{yy}$']
color_legend = [[0, 0.182], [-0.06, 0.011], [-0.0022, 1.0], [-0.15, 0.45]]
plot_results(result, color_legend[:2], titles[:2], w)
plot_results(sigma, color_legend[2:], titles[2:], w)
Comparison with FEM data
Plate_data = np.load('/content/drive/MyDrive/DL/DL_data/lab2_Plate_data.npy')
loc = Plate_data[:, 0:2]
u = Plate_data[:, 2:3]
v = Plate_data[:, 3:4]
stress = Plate_data[:, 4:6]
def RESULT(test_model, diag):
diag = require_grad(diag)
pde_u, pde_v = test_model(diag)
pde_disp = np.hstack([pde_u.cpu().detach().numpy(), pde_v.cpu().detach().numpy()])
sig_x, sig_y = check_stress(test_model, diag)
pde_sig = np.hstack([sig_x.cpu().detach().numpy(), sig_y.cpu().detach().numpy()])
return pde_disp, pde_sig
diag_ = [i for i in range(u.shape[0]) if loc[i, 0] + loc[i, 1] == 10]
diag_x = np.linspace(0, 10, 101).reshape(-1, 1)
diag_y = -diag_x + 10
diag = np.concatenate((diag_x, diag_y), 1)
model_Adam = torch.load('/content/drive/MyDrive/DL/DL_data/lab2_adam_model.pt')
results = {
"FEM": [u[diag_], v[diag_], stress[diag_, 0], stress[diag_, 1]],
"PINN": RESULT(model_Adam, diag),
}
for key in ["PINN"]:
disp, sig = results[key]
results[key] = [disp[:, 0], disp[:, 1], sig[:, 0], sig[:, 1]]
titles = ['x-displacement ($u$)', 'y-displacement ($v$)', '$\sigma_{xx}$', '$\sigma_{yy}$']
line_styles = {'FEM': 'k', 'PINN': '--'}
plt.figure(figsize = (7, 6))
for idx, title in enumerate(titles):
plt.subplot(2, 2, idx + 1)
for label, result in results.items():
plt.plot(diag[:, 0], result[idx], line_styles[label], linewidth = 3, label = label)
plt.xlabel('x')
plt.ylabel(title)
plt.legend()
plt.tight_layout()
plt.show()
Define Sample Data
x_values = [0, 2, 5, 10]
tr_sample = np.where(np.isin(loc[:, 0], x_values))[0]
x_sample = loc[tr_sample, :]
u_sample = u[tr_sample, :]
v_sample = v[tr_sample, :]
stress_sample = stress[tr_sample, :]
data = np.concatenate([x_sample, u_sample, v_sample, stress_sample], axis=1)
plt.figure(figsize = (5, 5))
plt.scatter(x_sample[:, 0], x_sample[:, 1], s = 1)
plt.xlim([-0.5, 10.5])
plt.xlabel('x')
plt.ylabel('y')
plt.show()
Define Data-Driven Loss term
def DATA(model, loc):
u, v = model(loc)
u_x, _ = derivative(u, loc)
_, v_y = derivative(v, loc)
sigma_xx = E / (1 - nu ** 2) * (u_x + nu * v_y)
sigma_yy = E / (1 - nu ** 2) * (v_y + nu * u_x)
sigma = torch.cat((sigma_xx, sigma_yy), dim = 1)
return u, v, sigma
Train
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
loss_fn = nn.MSELoss()
epochs = 10001
best_loss = np.inf
loc = require_grad(data[:, 0:2])
gt_u = require_grad(data[:, 2:3])
gt_v = require_grad(data[:, 3:4])
gt_sigma = require_grad(data[:, 4:6])
for epoch in range(epochs):
# PDE
force_eq_x, force_eq_y = PDE(model, XY_domain)
loss_PDE_x = loss_fn(force_eq_x, torch.zeros_like(force_eq_x))
loss_PDE_y = loss_fn(force_eq_y, torch.zeros_like(force_eq_y))
loss_PDE = loss_PDE_x + loss_PDE_y
# BC
bottom_v = BC_bottom(model, XY_bottom)
left_u = BC_left(model, XY_left)
top_sigyy, top_sigyx = BC_top(model, XY_top)
right_sigxx, right_sigex, right_sigxy = BC_right(model, XY_right)
loss_BC_bottom = loss_fn(bottom_v, torch.zeros_like(bottom_v))
loss_BC_left = loss_fn(left_u, torch.zeros_like(left_u))
loss_BC_top_1 = loss_fn(top_sigyy, torch.zeros_like(top_sigyy))
loss_BC_top_2 = loss_fn(top_sigyx, torch.zeros_like(top_sigyx))
loss_BC_top = loss_BC_top_1 + loss_BC_top_2
loss_BC_right_1 = loss_fn(right_sigxx, right_sigex)
loss_BC_right_2 = loss_fn(right_sigxy, torch.zeros_like(right_sigxy))
loss_BC_right = loss_BC_right_1 + loss_BC_right_2
loss_BC = loss_BC_bottom + loss_BC_left + loss_BC_top + loss_BC_right
# DATA
data_u, data_v, data_sigma = DATA(model, loc)
loss_Data_u = loss_fn(data_u, gt_u)
loss_Data_v = loss_fn(data_v, gt_v)
loss_Data_sigma = loss_fn(data_sigma, gt_sigma)
loss_Data = loss_Data_u + loss_Data_v + loss_Data_sigma
# Total
loss = loss_PDE + loss_BC + loss_Data
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('Epoch: {} Loss: {:.4f} PDELoss: {:.4f} BCLoss: {:.4f} DATALoss: {:.4f}'.format(epoch, loss.item(), loss_PDE.item(), loss_BC.item(), loss_Data.item()))
Load the model parameters
Plot Results
with torch.no_grad():
model = torch.load('/content/drive/MyDrive/DL/DL_data/lab2_data_model.pt')
result = model(test_tensor)
sigma_xx, sigma_yy = check_stress(model, test_tensor)
sigma = [sigma_xx, sigma_yy]
plot_results(result, color_legend[:2], titles[:2], w)
plot_results(sigma, color_legend[2:], titles[2:], w)
Comparison with FEM data
loc = Plate_data[:, 0:2]
u = Plate_data[:, 2:3]
v = Plate_data[:, 3:4]
stress = Plate_data[:, 4:6]
def RESULT(test_model, diag):
diag = require_grad(diag)
pde_u, pde_v = test_model(diag)
pde_disp = np.hstack([pde_u.cpu().detach().numpy(), pde_v.cpu().detach().numpy()])
sig_x, sig_y = check_stress(test_model, diag)
pde_sig = np.hstack([sig_x.cpu().detach().numpy(), sig_y.cpu().detach().numpy()])
return pde_disp, pde_sig
diag_ = [i for i in range(u.shape[0]) if loc[i, 0] + loc[i, 1] == 10]
diag_x = np.linspace(0, 10, 101).reshape(-1, 1)
diag_y = -diag_x + 10
diag = np.concatenate((diag_x, diag_y), 1)
model_Adam = torch.load('/content/drive/MyDrive/DL/DL_data/lab2_adam_model.pt')
model_Data = torch.load('/content/drive/MyDrive/DL/DL_data/lab2_data_model.pt')
results = {
"FEM": [u[diag_], v[diag_], stress[diag_, 0], stress[diag_, 1]],
"PINN": RESULT(model_Adam, diag),
"PINN + data": RESULT(model_Data, diag)
}
for key in ["PINN", "PINN + data"]:
disp, sig = results[key]
results[key] = [disp[:, 0], disp[:, 1], sig[:, 0], sig[:, 1]]
titles = ['x-displacement ($u$)', 'y-displacement ($v$)', '$\sigma_{xx}$', '$\sigma_{yy}$']
line_styles = {'FEM': 'k', 'PINN': '--', 'PINN + data': ':'}
plt.figure(figsize = (7, 6))
for idx, title in enumerate(titles):
plt.subplot(2, 2, idx + 1)
for label, result in results.items():
plt.plot(diag[:, 0], result[idx], line_styles[label], linewidth = 3, label = label)
plt.xlabel('x')
plt.ylabel(title)
plt.xlim((0, 10))
plt.legend()
plt.tight_layout()
plt.show()
Import Library
import torch
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# true values
C1true = 1.0
C2true = 0.01
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
Load the Data
xy_domain= np.load('/content/drive/MyDrive/DL/DL_data/lab3_XY_domain.npy')
gt_domain = np.load('/content/drive/MyDrive/DL/DL_data/lab3_gt_domain.npy')
print(xy_domain.shape)
print(gt_domain.shape)
Define Collocation Points
'Boundary Conditions'
bc_top_x = np.linspace(-0.5, 1.5, 200).reshape(-1, 1)
bc_top_y = 0.5 * np.ones_like(bc_top_x).reshape(-1, 1)
bc_bottom_x = np.linspace(-0.5, 1.5, 200).reshape(-1, 1)
bc_bottom_y = -0.5 * np.ones_like(bc_bottom_x).reshape(-1, 1)
bc_inlet_y = np.linspace(-0.5, 0.5, 100).reshape(-1, 1)
bc_inlet_x = -0.5 * np.ones_like(bc_inlet_y).reshape(-1, 1)
bc_outlet_y = np.linspace(-0.5, 0.5, 100).reshape(-1, 1)
bc_outlet_x = 1.5 * np.ones_like(bc_outlet_y).reshape(-1, 1)
xy_top = np.concatenate((bc_top_x, bc_top_y), 1)
xy_bottom = np.concatenate((bc_bottom_x, bc_bottom_y), 1)
xy_inlet = np.concatenate((bc_inlet_x, bc_inlet_y), 1)
xy_outlet = np.concatenate((bc_outlet_x, bc_outlet_y), 1)
radius = 0.05
theta = np.linspace(0, 2 * np.pi, 200)
bc_cylinder_x = (0 + radius * np.cos(theta)).reshape(-1, 1)
bc_cylinder_y = (0 + radius * np.sin(theta)).reshape(-1, 1)
xy_cylinder = np.concatenate((bc_cylinder_x, bc_cylinder_y), 1)
xy_wall = np.concatenate((xy_top, xy_bottom, xy_cylinder))
plt.scatter(xy_domain[:, 0], xy_domain[:, 1], s = 1, label = 'domain')
plt.scatter(xy_wall[:, 0], xy_wall[:, 1], s = 5, label = 'wall')
plt.scatter(xy_inlet[:, 0], xy_inlet[:, 1], s = 5, label = 'inlet')
plt.scatter(xy_outlet[:, 0], xy_outlet[:, 1], s = 5, label = 'outlet')
plt.axis('scaled')
plt.legend()
plt.show()
Define Observation Points
idx_obs = np.random.choice(len(xy_domain), int(0.0025 * len(xy_domain)), replace = False)
xy_obs = xy_domain[idx_obs]
gt_obs = gt_domain[idx_obs]
print('XY obs: {}'.format(xy_obs.shape))
print('GT obs: {}'.format(gt_obs.shape))
plt.scatter(xy_domain[:, 0], xy_domain[:, 1], c = gt_domain[:, 0], s = 1, cmap = 'jet')
plt.scatter(xy_obs[:, 0], xy_obs[:, 1], marker = 'x', linewidths = 1, s = 20, c = 'k')
plt.axis('scaled')
plt.show()
Define PINN Network
import torch.nn as nn
from torch.nn import Linear, Tanh
class PINN(nn.Module):
def __init__(self):
super(PINN, self).__init__()
self.net = nn.Sequential(
Linear(2, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 3),
)
self._initialize_weights()
'''
Xavier initializer (Glorot initialization):
Initializes the weights to keep the variance consistent across layers,
preventing issues like vanishing or exploding gradients. Weights are
drawn from a distribution with variance 2/(fan_in + fan_out).
Biases are set to zero.
'''
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
x = x.float()
output = self.net(x)
u, v, p = output[:, 0:1], output[:, 1:2], output[:, 2:3]
return u, v, p
from IPython.display import clear_output
def PLOT(model, XY_domain, rho_list, vis_list):
pred = model(XY_domain)
plt.figure(figsize = (9, 2))
plt.subplot(1, 3, 1)
plt.title('CFD')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = gt_domain[:, 0],
s = 0.1, cmap = 'jet')
plt.axis('scaled')
plt.subplot(1, 3, 2)
plt.title('Prediction')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = pred[0].detach().cpu().numpy(),
s = 0.1, cmap = 'jet')
plt.axis('scaled')
plt.subplot(1, 3, 3)
plt.title('Error')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = np.abs(pred[0].detach().cpu().numpy() - gt_domain[:, 0:1]),
s = 0.1, cmap = 'jet')
plt.axis('scaled')
plt.tight_layout()
plt.show()
plt.figure(figsize = (6, 2))
plt.subplot(1, 2, 1)
plt.title('Rho Prediction')
plt.plot(rho_list)
plt.subplot(1, 2, 2)
plt.title('Vis Prediction')
plt.plot(vis_list)
plt.tight_layout()
plt.show()
clear_output(wait = True)
Define Governing Eqaution and Boundary Conditions
def derivative(y, t) :
df = torch.autograd.grad(y, t, grad_outputs = torch.ones_like(y).to(device), create_graph = True)[0]
df_x = df[:, 0:1]
df_y = df[:, 1:2]
return df_x, df_y
def PDE(model, XY_domain):
u, v, p = model(XY_domain)
du_x, du_y = derivative(u, XY_domain)
dv_x, dv_y = derivative(v, XY_domain)
dp_x, dp_y = derivative(p, XY_domain)
du_xx, _ = derivative(du_x, XY_domain)
_, du_yy = derivative(du_y, XY_domain)
dv_xx, _ = derivative(dv_x, XY_domain)
_, dv_yy = derivative(dv_y, XY_domain)
pde_u = rho * (u * du_x + v * du_y) + dp_x - vis * (du_xx + du_yy)
pde_v = rho * (u * dv_x + v * dv_y) + dp_y - vis * (dv_xx + dv_yy)
pde_cont = du_x + dv_y
return pde_u, pde_v, pde_cont
def BC_wall(model, XY_wall):
u_wall, v_wall, _ = model(XY_wall)
return u_wall, v_wall
def BC_inlet(model, XY_inlet):
u_inlet, v_inlet, _ = model(XY_inlet)
u_inlet = u_inlet - torch.ones_like(u_inlet).to(device)
return u_inlet, v_inlet
def BC_outlet(model, XY_outlet):
_, v_outlet, p_outlet = model(XY_outlet)
return v_outlet, p_outlet
Numpy to Tensor
def require_grad(x):
return torch.tensor(x, dtype = torch.float32, requires_grad = True).to(device)
XY_domain = require_grad(xy_domain)
XY_wall = require_grad(xy_wall)
XY_inlet = require_grad(xy_inlet)
XY_outlet = require_grad(xy_outlet)
XY_obs = require_grad(xy_obs)
gt_obs = require_grad(gt_obs)
'''
Initialize \rho and \mu as NumPy arrays with the value 1.
'''
rho = torch.ones(1).to(device).requires_grad_(True).float()
vis = torch.ones(1).to(device).requires_grad_(True).float()
model = PINN().to(device)
optimizer = torch.optim.Adam([{'params': model.parameters(), 'lr':1e-4},
{'params': rho, 'lr':1e-4},
{'params': vis, 'lr':1e-4}])
loss_fn = nn.MSELoss()
Train and Save the model
epoch = 0
best_loss = np.inf
loss_list = []
rho_list, vis_list = [], []
while epoch < 50001:
PDE_u, PDE_v, PDE_cont = PDE(model, XY_domain)
loss_PDE_u = loss_fn(PDE_u, torch.zeros_like(PDE_u))
loss_PDE_v = loss_fn(PDE_v, torch.zeros_like(PDE_v))
loss_PDE_cont = loss_fn(PDE_cont, torch.zeros_like(PDE_cont))
loss_pde = loss_PDE_u + loss_PDE_v + loss_PDE_cont
u_wall, v_wall = BC_wall(model, XY_wall)
u_inlet, v_inlet = BC_inlet(model, XY_inlet)
v_outlet, p_outlet = BC_outlet(model, XY_outlet)
loss_BC_wall_u = loss_fn(u_wall, torch.zeros_like(u_wall))
loss_BC_wall_v = loss_fn(v_wall, torch.zeros_like(v_wall))
loss_BC_inlet_u = loss_fn(u_inlet, torch.zeros_like(u_inlet))
loss_BC_inlet_v = loss_fn(v_inlet, torch.zeros_like(v_inlet))
loss_BC_outlet_v = loss_fn(v_outlet, torch.zeros_like(v_outlet))
loss_BC_outlet_p = loss_fn(p_outlet, torch.zeros_like(p_outlet))
loss_bc = loss_BC_wall_u + loss_BC_wall_v + loss_BC_inlet_u + loss_BC_inlet_v + loss_BC_outlet_v + loss_BC_outlet_p
u_obs, v_obs, p_obs = model(XY_obs)
loss_data_u = loss_fn(u_obs, gt_obs[:, 0:1])
loss_data_v = loss_fn(v_obs, gt_obs[:, 1:2])
loss_data_p = loss_fn(p_obs, gt_obs[:, 2:3])
loss_data = loss_data_u + loss_data_v + loss_data_p
loss = loss_pde + loss_bc + 10 * loss_data
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_list.append(loss.item())
rho_list.append(rho.item())
vis_list.append(vis.item())
if epoch % 1000 == 0:
print('EPOCH : %6d/%6d | Loss_PDE : %5.4f | Loss_BC : %5.4f | Loss_DATA : %5.4f | RHO : %.4f | VIS : %.4f' \
%(epoch, 50000, loss_pde.item(), loss_bc.item(), loss_data.item(), rho.item(), vis.item()))
PLOT(model, XY_domain, rho_list, vis_list)
epoch += 1
Result
rho_list = np.load('/content/drive/MyDrive/DL/DL_data/rho_list.npy', allow_pickle = True).tolist()
vis_list = np.load('/content/drive/MyDrive/DL/DL_data/vis_list.npy', allow_pickle = True).tolist()
with torch.no_grad():
print('Predicted RHO: {:.4f}, Predicted VIS: {:.4f}'.format(rho_list[-1], vis_list[-1]))
model = torch.load('/content/drive/MyDrive/DL/DL_data/weight/lab3.pt')
PLOT(model, XY_domain, rho_list, vis_list)
Import Library
import torch
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
Load the Data
gt_domain = np.load('/content/drive/MyDrive/DL/DL_data/lab4_gt_domain.npy')
Define Observation Points
plt.figure(figsize = (5, 4))
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
x, y = np.meshgrid(x, y)
idx_xs = np.array([15, 50, 85])
idx_ys = np.array([15, 50, 85])
xy_obs = []
gt_obs = []
for idx_x in idx_xs:
for idx_y in idx_ys:
xy_obs.append(np.concatenate((x[idx_x, idx_y].reshape(-1, 1),
y[idx_x, idx_y].reshape(-1, 1)), 1)[0, :])
gt_obs.append(gt_domain[idx_x, idx_y])
xy_obs = np.array(xy_obs)
gt_obs = np.array(gt_obs).reshape(-1, 1)
plt.pcolormesh(x, y, gt_domain, cmap = 'jet')
plt.colorbar()
plt.scatter(xy_obs[:, 0], xy_obs[:, 1], s = 30, c = 'r', cmap = 'jet')
plt.title('FDM')
plt.xlabel('x')
plt.ylabel('y')
plt.axis("square")
plt.show()
Define Collocation Points
# 'Domain Points'
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
x, y = np.meshgrid(x, y)
xy_domain = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1)), 1)
# 'Boundary Conditions'
bc_top_x = np.linspace(-1, 1, 100).reshape(-1, 1)
bc_top_y = np.ones_like(bc_top_x)
bc_bottom_x = np.linspace(-1, 1, 100).reshape(-1, 1)
bc_bottom_y = -1 * np.ones_like(bc_bottom_x)
bc_right_y = np.linspace(-1, 1, 100).reshape(-1, 1)
bc_right_x = np.ones_like(bc_right_y)
bc_left_y = np.linspace(-1, 1, 100).reshape(-1, 1)
bc_left_x = -1 * np.ones_like(bc_left_y)
xy_top = np.concatenate((bc_top_x, bc_top_y), 1)
xy_bottom = np.concatenate((bc_bottom_x, bc_bottom_y), 1)
xy_right = np.concatenate((bc_right_x, bc_right_y), 1)
xy_left = np.concatenate((bc_left_x, bc_left_y), 1)
plt.scatter(xy_domain[:, 0], xy_domain[:, 1], s = 1, alpha = 0.5)
plt.scatter(xy_top[:, 0], xy_top[:, 1], s = 5)
plt.scatter(xy_bottom[:, 0], xy_bottom[:, 1], s = 5)
plt.scatter(xy_right[:, 0], xy_right[:, 1], s = 5)
plt.scatter(xy_left[:, 0], xy_left[:, 1], s = 5)
plt.title('Collocation Points')
plt.axis('scaled')
plt.show()
Define PINN Network
import torch.nn as nn
from torch.nn import Linear, Tanh
class PINN(nn.Module):
def __init__(self):
super(PINN, self).__init__()
self.net = nn.Sequential(
Linear(2, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 256),
Tanh(),
Linear(256, 1),
)
self._initialize_weights()
'''
Xavier initializer (Glorot initialization):
Initializes the weights to keep the variance consistent across layers,
preventing issues like vanishing or exploding gradients. Weights are
drawn from a distribution with variance 2/(fan_in + fan_out).
Biases are set to zero.
'''
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
x = x.float()
return self.net(x)
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
loss_fn = nn.MSELoss()
from IPython.display import clear_output
def PLOT(model_test, XY_domain):
pred = model_test(XY_domain)
plt.figure(figsize = (9, 2.3))
plt.subplot(1, 3, 1)
plt.title('CFD')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = gt_domain.detach().cpu().numpy().reshape(-1, 1),
vmin = 0, vmax = 1, s = 1, cmap = 'jet')
plt.colorbar()
plt.axis('scaled')
plt.subplot(1, 3, 2)
plt.title('Prediction')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = pred.detach().cpu().numpy(),
vmin = 0, vmax = 1, s = 1, cmap = 'jet')
plt.colorbar()
plt.axis('scaled')
plt.subplot(1, 3, 3)
plt.title('Error')
plt.scatter(XY_domain[:, 0].detach().cpu().numpy(),
XY_domain[:, 1].detach().cpu().numpy(),
c = np.abs(pred.detach().cpu().numpy() - np.array(gt_domain.detach().cpu().numpy().reshape(-1, 1))),
vmin = 0, vmax = 0.2, s = 1, cmap = 'jet')
plt.colorbar()
plt.axis('scaled')
plt.tight_layout()
plt.show()
clear_output(wait=True)
Define Governing Equation and Boundary Conditions
def derivative(y, t) :
df = torch.autograd.grad(y, t,
grad_outputs = torch.ones_like(y).to(device),
create_graph = True)[0]
df_x = df[:, 0:1]
df_y = df[:, 1:2]
return df_x, df_y
def PDE(model, XY_domain):
T = model(XY_domain)
dT_x, dT_y = derivative(T, XY_domain)
dT_xx, _ = derivative(dT_x, XY_domain)
_, dT_yy = derivative(dT_y, XY_domain)
pde = 1 * (dT_xx + dT_yy)
return pde
def BC_bottom(model, XY_bottom):
T_bottom = model(XY_bottom)
return T_bottom
def BC_right(model, XY_right):
T_right = model(XY_right)
T_right = T_right - torch.ones_like(T_right).to(device)
return T_right
def BC_left(model, XY_left):
T_left = model(XY_left)
T_left = T_left - 0.5 * torch.ones_like(T_left).to(device)
return T_left
Numpy to Tensor
def require_grad(x):
return torch.tensor(x, dtype = torch.float32, requires_grad = True).to(device)
XY_domain = require_grad(xy_domain)
XY_obs = require_grad(xy_obs)
XY_bottom = require_grad(xy_bottom)
XY_right = require_grad(xy_right)
XY_left = require_grad(xy_left)
XY_top = require_grad(xy_top)
gt_domain = require_grad(gt_domain)
gt_obs = require_grad(gt_obs)
Train and Save the model
epochs = 20001
best_loss = np.inf
for epoch in range(epochs):
pde_term = PDE(model, XY_domain)
T_bottom = BC_bottom(model, XY_bottom)
T_right = BC_right(model, XY_right)
T_left = BC_left(model, XY_left)
loss_pde = loss_fn(pde_term, torch.zeros_like(pde_term))
loss_BC_bottom = loss_fn(T_bottom, torch.zeros_like(T_bottom))
loss_BC_right = loss_fn(T_right, torch.zeros_like(T_right))
loss_BC_left = loss_fn(T_left, torch.zeros_like(T_left))
loss_bc = loss_BC_bottom + loss_BC_right + loss_BC_left
loss = loss_pde + loss_bc
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('EPOCH : %6d/%6d| Loss : %5.4f | Loss_PDE : %5.4f | Loss_BC : %5.4f' \
%(epoch, epochs-1, loss.item(), loss_pde.item(), loss_bc.item()))
PLOT(model, XY_domain)
Result
Load the model parameters
with torch.no_grad():
model = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_wo_data.pt')
PLOT(model, XY_domain)
Train with data
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-4)
loss_fn = nn.MSELoss()
epochs = 20001
best_loss = np.inf
for epoch in range(epochs):
pde_term = PDE(model, XY_domain)
T_bottom = BC_bottom(model, XY_bottom)
T_right = BC_right(model, XY_right)
T_left = BC_left(model, XY_left)
loss_pde = loss_fn(pde_term, torch.zeros_like(pde_term))
loss_BC_bottom = loss_fn(T_bottom, torch.zeros_like(T_bottom))
loss_BC_right = loss_fn(T_right, torch.zeros_like(T_right))
loss_BC_left = loss_fn(T_left, torch.zeros_like(T_left))
loss_bc = loss_BC_bottom + loss_BC_right + loss_BC_left
# Data loss
T_obs = model(XY_obs)
loss_data = loss_fn(T_obs, gt_obs)
loss = loss_pde + loss_bc + loss_data
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('EPOCH : %6d/%6d| Loss : %5.4f | Loss_PDE : %5.4f | Loss_BC : %5.4f | Loss_DATA : %5.4f' \
%(epoch, epochs-1, loss.item(), loss_pde.item(), loss_bc.item(), loss_data.item()))
PLOT(model, XY_domain)
Result
Load the model parameters
with torch.no_grad():
model = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_w_data.pt')
PLOT(model, XY_domain)
Train again using data and prior knowledge
model = PINN().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-4)
loss_fn = nn.MSELoss()
epochs = 20001
best_loss = np.inf
for epoch in range(epochs):
# PDE loss
pde_term = PDE(model, XY_domain)
loss_pde = loss_fn(pde_term, torch.zeros_like(pde_term))
# BC loss
T_bottom = BC_bottom(model, XY_bottom)
T_right = BC_right(model, XY_right)
T_left = BC_left(model, XY_left)
loss_BC_bottom = loss_fn(T_bottom, torch.zeros_like(T_bottom))
loss_BC_right = loss_fn(T_right, torch.zeros_like(T_right))
loss_BC_left = loss_fn(T_left, torch.zeros_like(T_left))
loss_bc = loss_BC_bottom + loss_BC_right + loss_BC_left
# Data loss
T_obs = model(XY_obs)
loss_data = loss_fn(T_obs, gt_obs)
# Prior Knowledge
T_top = model(XY_top)
mean_value = torch.mean(T_top)
loss_prior = loss_fn(T_top, mean_value)
loss = loss_pde + loss_bc + loss_data + loss_prior
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 1000 == 0:
print('EPOCH : %6d/%6d| Loss : %5.4f | Loss_PDE : %5.4f | Loss_BC : %5.4f | Loss_DATA : %5.4f | Loss_PRIOR : %5.4f' \
%(epoch, epochs - 1, loss.item(), loss_pde.item(), loss_bc.item(), loss_data.item(), loss_prior.item()))
PLOT(model, XY_domain)
Load the model parameters
with torch.no_grad():
model = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_w_prior.pt')
PLOT(model, XY_domain)
with torch.no_grad():
model_wo_data = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_wo_data.pt')
model_w_data = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_w_data.pt')
model_w_prior = torch.load('/content/drive/MyDrive/DL/DL_data/lab4_w_prior.pt')
T_wo_data = model_wo_data(XY_top)
T_w_data = model_w_data(XY_top)
T_w_prior = model_w_prior(XY_top)
plt.figure(figsize = (6, 4))
plt.plot(xy_top[:, 0], np.zeros_like(xy_top[:, 0]), c = 'k', linestyle = '--', linewidth = 2, label = 'GT')
plt.plot(xy_top[:, 0], T_wo_data.detach().cpu().numpy(), c = 'b', linestyle = '--', linewidth = 2, label = 'w/o data')
plt.plot(xy_top[:, 0], T_w_data.detach().cpu().numpy(), c = 'g', linestyle = '--', linewidth = 2, label = 'w/ data')
plt.plot(xy_top[:, 0], T_w_prior.detach().cpu().numpy(), c = 'r', linewidth = 2, label = 'w/ prior')
plt.legend()
plt.grid(True)
plt.show()
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')