PINN with Data

Fluid Mechanics Example

By Prof. Seungchul Lee
http://iai.postech.ac.kr/
Industrial AI Lab at POSTECH

1. Data-driven Approach with Big dataÂ¶

InÂ [2]:
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

Using backend: pytorch


InÂ [3]:
fluid_bigdata = np.load('./data_files/fluid_bigdata.npy')

observe_x = fluid_bigdata[:, :2]
observe_y = fluid_bigdata[:, 2:]

InÂ [4]:
observe_u = dde.icbc.PointSetBC(observe_x, observe_y[:, 0].reshape(-1, 1), component=0)
observe_v = dde.icbc.PointSetBC(observe_x, observe_y[:, 1].reshape(-1, 1), component=1)
observe_p = dde.icbc.PointSetBC(observe_x, observe_y[:, 2].reshape(-1, 1), component=2)


1.2. Define ParametersÂ¶

InÂ [5]:
# Properties
rho = 1
mu = 1
u_in = 1
D = 1
L = 2


1.3. Define GeometryÂ¶

InÂ [6]:
geom = dde.geometry.Rectangle(xmin = [-L/2, -D/2], xmax = [L/2, D/2])
data = dde.data.PDE(geom,
None,
[observe_u, observe_v, observe_p],
num_domain = 0,
num_boundary = 0,
num_test = 100)

Warning: 100 points required, but 120 points sampled.

InÂ [7]:
plt.figure(figsize = (20,4))
plt.scatter(data.train_x_all[:,0], data.train_x_all[:,1], s = 0.5)
plt.scatter(observe_x[:, 0], observe_x[:, 1], c = observe_y[:, 0], s = 6.5, cmap = 'jet')
plt.scatter(observe_x[:, 0], observe_x[:, 1], s = 0.5, color='k', alpha = 0.5)
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.xlabel('x-direction length (m)')
plt.ylabel('Distance from middle of plates (m)')
plt.title('Velocity (u)')
plt.show()


1.4. Define Network and Hyper-parametersÂ¶

InÂ [8]:
layer_size = [2] + [64] * 5 + [3]
activation = "tanh"
initializer = "Glorot uniform"

net = dde.maps.FNN(layer_size, activation, initializer)

model = dde.Model(data, net)

Compiling model...
'compile' took 0.000181 s



InÂ [9]:
losshistory, train_state = model.train(epochs = 10000)
dde.saveplot(losshistory, train_state, issave = False, isplot = False)

Training model...

Step      Train loss                        Test loss                         Test metric
0         [1.20e+00, 3.17e-02, 2.00e+02]    [1.20e+00, 3.17e-02, 2.00e+02]    []
1000      [1.83e-01, 5.27e-03, 6.67e-01]    [1.83e-01, 5.27e-03, 6.67e-01]    []
2000      [6.29e-03, 4.43e-03, 6.54e-02]    [6.29e-03, 4.43e-03, 6.54e-02]    []
3000      [1.79e-03, 1.70e-03, 2.05e-02]    [1.79e-03, 1.70e-03, 2.05e-02]    []
4000      [4.26e-04, 5.19e-04, 8.69e-03]    [4.26e-04, 5.19e-04, 8.69e-03]    []
5000      [2.89e-04, 2.47e-04, 1.84e-03]    [2.89e-04, 2.47e-04, 1.84e-03]    []
6000      [1.51e-04, 1.03e-04, 3.99e-04]    [1.51e-04, 1.03e-04, 3.99e-04]    []
7000      [8.78e-05, 5.29e-05, 2.20e-03]    [8.78e-05, 5.29e-05, 2.20e-03]    []
8000      [5.93e-05, 4.02e-05, 1.15e-03]    [5.93e-05, 4.02e-05, 1.15e-03]    []
9000      [4.22e-05, 2.70e-05, 1.23e-04]    [4.22e-05, 2.70e-05, 1.23e-04]    []
10000     [3.40e-05, 2.28e-05, 1.09e-04]    [3.40e-05, 2.28e-05, 1.09e-04]    []

Best model at step 10000:
train loss: 1.65e-04
test loss: 1.65e-04
test metric: []

'train' took 75.887699 s



1.6. Train More (L-BFGS Optimizer)Â¶

InÂ [10]:
dde.optimizers.config.set_LBFGS_options()
model.compile("L-BFGS")
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave = False, isplot = True)

Compiling model...
'compile' took 0.000401 s

Training model...

Step      Train loss                        Test loss                         Test metric
10000     [3.40e-05, 2.28e-05, 1.09e-04]    [3.40e-05, 2.28e-05, 1.09e-04]    []
11000     [3.84e-06, 3.34e-06, 4.07e-05]    [3.84e-06, 3.34e-06, 4.07e-05]    []
12000     [3.61e-06, 1.97e-06, 2.23e-05]    [3.61e-06, 1.97e-06, 2.23e-05]    []
13000     [3.14e-06, 9.65e-07, 1.29e-05]    [3.14e-06, 9.65e-07, 1.29e-05]    []
14000     [1.86e-06, 1.24e-06, 5.42e-06]    [1.86e-06, 1.24e-06, 5.42e-06]    []
15000     [1.34e-06, 4.84e-07, 2.77e-06]    [1.34e-06, 4.84e-07, 2.77e-06]    []
16000     [9.03e-07, 3.72e-07, 1.46e-06]    [9.03e-07, 3.72e-07, 1.46e-06]    []
17000     [8.04e-07, 3.14e-07, 1.21e-06]    [8.04e-07, 3.14e-07, 1.21e-06]    []
18000     [8.02e-07, 3.12e-07, 1.20e-06]    [8.02e-07, 3.12e-07, 1.20e-06]    []
19000     [8.00e-07, 3.11e-07, 1.20e-06]    [8.00e-07, 3.11e-07, 1.20e-06]    []
20000     [7.99e-07, 3.09e-07, 1.20e-06]    [7.99e-07, 3.09e-07, 1.20e-06]    []
21000     [7.98e-07, 3.08e-07, 1.19e-06]    [7.98e-07, 3.08e-07, 1.19e-06]    []
22000     [7.97e-07, 3.08e-07, 1.19e-06]    [7.97e-07, 3.08e-07, 1.19e-06]    []
23000     [7.97e-07, 3.07e-07, 1.19e-06]    [7.97e-07, 3.07e-07, 1.19e-06]    []
24000     [7.96e-07, 3.06e-07, 1.19e-06]    [7.96e-07, 3.06e-07, 1.19e-06]    []
25000     [7.95e-07, 3.05e-07, 1.18e-06]    [7.95e-07, 3.05e-07, 1.18e-06]    []

Best model at step 25000:
train loss: 2.28e-06
test loss: 2.28e-06
test metric: []

'train' took 219.993909 s



1.7. Plot Results (Adam + L-BFGS)Â¶

InÂ [11]:
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]

for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
s = 2,
cmap = 'jet')
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()