PINN with Data

Fluid Mechanics Example

By Prof. Seungchul Lee
http://iai.postech.ac.kr/
Industrial AI Lab at POSTECH

# 1. Data-driven Approach with Big dataÂ¶

## 1.1. Load and Sample DataÂ¶

InÂ [Â ]:
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

InÂ [Â ]:
from google.colab import drive
drive.mount('/content/drive/')

Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount("/content/drive/", force_remount=True).

InÂ [Â ]:
fluid_bigdata = np.load('/content/drive/MyDrive/Colab Notebooks/data_files/fluid_bigdata.npy')

observe_x = fluid_bigdata[:, :2]
observe_y = fluid_bigdata[:, 2:]

InÂ [Â ]:
observe_u = dde.icbc.PointSetBC(observe_x, observe_y[:, 0].reshape(-1, 1), component=0)
observe_v = dde.icbc.PointSetBC(observe_x, observe_y[:, 1].reshape(-1, 1), component=1)
observe_p = dde.icbc.PointSetBC(observe_x, observe_y[:, 2].reshape(-1, 1), component=2)


## 1.2. Define ParametersÂ¶

InÂ [Â ]:
# Properties
rho = 1
mu = 1
u_in = 1
D = 1
L = 2


## 1.3. Define GeometryÂ¶

InÂ [Â ]:
geom = dde.geometry.Rectangle(xmin = [-L/2, -D/2], xmax = [L/2, D/2])
data = dde.data.PDE(geom,
None,
[observe_u, observe_v, observe_p],
num_domain = 0,
num_boundary = 0,
num_test = 100)

Warning: 100 points required, but 120 points sampled.

InÂ [Â ]:
plt.figure(figsize = (20,4))
plt.scatter(data.train_x_all[:,0], data.train_x_all[:,1], s = 0.5)
plt.scatter(observe_x[:, 0], observe_x[:, 1], c = observe_y[:, 0], s = 6.5, cmap = 'jet')
plt.scatter(observe_x[:, 0], observe_x[:, 1], s = 0.5, color='k', alpha = 0.5)
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.xlabel('x-direction length (m)')
plt.ylabel('Distance from middle of plates (m)')
plt.title('Velocity (u)')
plt.show()


## 1.4. Define Network and Hyper-parametersÂ¶

InÂ [Â ]:
layer_size = [2] + [64] * 5 + [3]
activation = "tanh"
initializer = "Glorot uniform"

net = dde.maps.FNN(layer_size, activation, initializer)

model = dde.Model(data, net)

Compiling model...
Building feed-forward neural network...
'build' took 0.104355 s


/usr/local/lib/python3.7/dist-packages/deepxde/nn/tensorflow_compat_v1/fnn.py:110: UserWarning: tf.layers.dense is deprecated and will be removed in a future version. Please use tf.keras.layers.Dense instead.
kernel_constraint=self.kernel_constraint,
/usr/local/lib/python3.7/dist-packages/keras/legacy_tf_layers/core.py:261: UserWarning: layer.apply is deprecated and will be removed in a future version. Please use layer.__call__ method instead.
return layer.apply(inputs)

'compile' took 0.378851 s



InÂ [Â ]:
losshistory, train_state = model.train(epochs = 10000)
dde.saveplot(losshistory, train_state, issave = False, isplot = False)

Initializing variables...
Training model...

0         [1.17e+00, 6.43e-03, 2.02e+02]    [1.17e+00, 6.43e-03, 2.02e+02]    []
1000      [1.83e-01, 5.70e-03, 6.76e-01]    [1.83e-01, 5.70e-03, 6.76e-01]    []
2000      [7.91e-03, 4.33e-03, 8.52e-02]    [7.91e-03, 4.33e-03, 8.52e-02]    []
3000      [1.68e-03, 2.46e-03, 2.26e-02]    [1.68e-03, 2.46e-03, 2.26e-02]    []
4000      [5.00e-04, 6.59e-04, 1.58e-02]    [5.00e-04, 6.59e-04, 1.58e-02]    []
5000      [2.51e-04, 3.36e-04, 9.71e-03]    [2.51e-04, 3.36e-04, 9.71e-03]    []
6000      [2.00e-04, 1.78e-04, 3.42e-03]    [2.00e-04, 1.78e-04, 3.42e-03]    []
7000      [1.65e-04, 1.25e-04, 8.15e-03]    [1.65e-04, 1.25e-04, 8.15e-03]    []
8000      [1.24e-04, 1.07e-04, 1.52e-02]    [1.24e-04, 1.07e-04, 1.52e-02]    []
9000      [7.33e-05, 5.47e-05, 1.16e-04]    [7.33e-05, 5.47e-05, 1.16e-04]    []
10000     [5.68e-05, 4.45e-05, 1.30e-04]    [5.68e-05, 4.45e-05, 1.30e-04]    []

Best model at step 10000:
train loss: 2.31e-04
test loss: 2.31e-04
test metric: []

'train' took 18.869112 s



## 1.6. Train More (L-BFGS Optimizer)Â¶

InÂ [Â ]:
dde.optimizers.config.set_LBFGS_options(maxiter=3000)
model.compile("L-BFGS")
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave = False, isplot = True)

Compiling model...
'compile' took 0.375285 s

Training model...

Step      Train loss                        Test loss                         Test metric
10000     [5.68e-05, 4.45e-05, 1.30e-04]    [5.68e-05, 4.45e-05, 1.30e-04]    []
11000     [4.70e-06, 2.77e-06, 2.75e-05]
12000     [3.22e-06, 9.92e-07, 1.41e-05]
13000     [2.41e-06, 4.91e-07, 9.34e-06]
INFO:tensorflow:Optimization terminated with:
Message: b'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
Objective function value: 0.000011
Number of iterations: 3000
Number of functions evaluations: 3189
13189     [2.15e-06, 4.98e-07, 8.57e-06]    [2.15e-06, 4.98e-07, 8.57e-06]    []

Best model at step 13189:
train loss: 1.12e-05
test loss: 1.12e-05
test metric: []

'train' took 109.374331 s



## 1.7. Plot Results (Adam + L-BFGS)Â¶

InÂ [Â ]:
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]

for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
s = 2,
cmap = 'jet')
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()