AI for Mechanical Engineering: Fluid Mechanics
http://iailab.kaist.ac.kr/
Industrial AI Lab at KAIST
We want to design a Venturi meter using cvxpy to maximize the pressure at Point 1. Given the constraints on a, b, and c, find the maximum pressure at Point 1 and the corresponding values of a, b, and c. (Define the specific weight of JP-4 fuel as 9.807)
\begin{array}{I} & 10 \leq a-c \leq 30 \\ & 15 \leq a+b \leq 25 \\ & 25 \leq 3b+c \leq 45 \\ & a, b \geq 1 \\ & c \geq 6 \\ \end{array}
$$ \begin{array}{I} \max \quad &\gamma a + \gamma (b sin(30°)) + \frac{\gamma V^2 (1-\frac{c^2}{16})}{2g} \\ \text{subject to} \quad & 10 \leq a-c \leq 30 \\ & 15 \leq a+b \leq 25 \\ & 25 \leq 3b+c \leq 45 \\ & a, b \geq 1 \\ & c \geq 6 \\ \end{array} \quad\implies\quad \begin{array}{I} \min \quad &-\gamma(a + (b sin(30°)) + \frac{ V^2 (1-\frac{c^2}{16})}{2g}) \\ \text{subject to} \quad & 10 \leq a-c \leq 30 \\ & 15 \leq a+b \leq 25 \\ & 25 \leq 3b+c \leq 45 \\ & a, b \geq 1 \\ & c \geq 6 \\ \end{array} $$
$$ \begin{array}{I} \min \quad &-\gamma(a + (b sin(30°)) + \frac{ V^2 (1-\frac{c^2}{16})}{2g}) \\ \text{subject to} \quad & 10 \leq a-c \leq 30 \\ & 15 \leq a+b \leq 25 \\ & 25 \leq 3b+c \leq 45 \\ & a, b \geq 1 \\ & c \geq 6 \\ \end{array} \quad\implies\quad \begin{array}{I} \min_{X} \quad & -\gamma(X^T HX + f^T X) \\ \text{subject to} \quad & AX \leq b \\ & AX \geq b_{lower} \\ & X \geq lb \end{array} $$
import cvxpy as cvx
import numpy as np
x = cvx.Variable(3)
f = np.array([[1], [np.sin(np.deg2rad(30))], [0]])
H = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, -10**2 / (16 * 2 * 9.8)]
])
objective = cvx.Minimize(-(cvx.quad_form(x, H) + f.T @ x + 10**2/(2*9.8)))
A = np.array([
[1, 0, -1],
[1, 1, 0],
[0, 3, 1]
])
b = np.array([30, 25, 45])
b_lower = np.array([10, 15, 25])
lb = np.array([1, 1, 6])
constraints = [A @ x <= b, A @ x >= b_lower, lb <= x ]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
print("The optimal value is", result * - 9.807)
print("A solution [a, b, c] is")
print(x.value)
2. Dimensionality Reduction¶
2.1. SVD¶
Data Desciription
ALL.mat - 151 snapshots of u and v velocity as well as vorticity (UALL, VALL, VORTALL).
Datat set for fluid flow past at cylinder at RE = 100
Download data here
Given the data above, perform SVD to obtain U, S, and V. Then, perform truncated SVD to obtain a 5-rank approximation and compare the results with the original data. The data to be used for this SVD is 'flows_mat', and the code for this is provided below.
- Input shape: (89351 x 151)
- 449 x 199 dimensions for 151 time steps
Data Preprocessing and Visualization
from google.colab import drive
drive.mount('/content/drive')
from scipy import io
import numpy as np
flows = io.loadmat('/content/drive/MyDrive/Data/CYLINDER_ALL.mat')
flows_mat_u = flows['UALL']
flows_mat_v = flows['VALL']
flows_mat_vort = flows['VORTALL']
flows_mat_u.shape
nx = 449
ny = 199
flows_mat_u[:,0].reshape(nx, ny),
import matplotlib.pyplot as plt
plt.subplot(1,3,1)
plt.imshow(flows_mat_u[:,150].reshape(nx, ny), 'gray')
plt.title('u velocity')
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(flows_mat_v[:,150].reshape(nx, ny), 'gray')
plt.title('v velocity')
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(flows_mat_vort[:,150].reshape(nx, ny), 'gray')
plt.title('vorticity')
plt.axis('off')
plt.show()
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
def animate(i):
ax1.clear()
ax1.imshow(flows_mat_u[:,i].reshape(nx, ny), 'gray')
ax1.set_title('u velocity')
ax1.axis('off')
ax2.clear()
ax2.imshow(flows_mat_v[:,i].reshape(nx, ny), 'gray')
ax2.set_title('v velocity')
ax2.axis('off')
ax3.clear()
ax3.imshow(flows_mat_vort[:,i].reshape(nx, ny), 'gray')
ax3.set_title('vorticity')
ax3.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 10)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
flows_mat_vort_mean = flows_mat_vort.mean(axis = 1)
flows_mat_vort_centered = flows_mat_vort - flows_mat_vort_mean[:, None]
flows_mat_vort_centered_normalized = (flows_mat_vort_centered - flows_mat_vort_centered.min()) / (flows_mat_vort_centered.max() - flows_mat_vort_centered.min())
plt.imshow(flows_mat_vort_mean.reshape(nx, ny), 'gray')
plt.title('vorticity')
plt.axis('off')
plt.show()
flows_mat = flows_mat_vort_centered_normalized
Perform SVD
U, S, VT = np.linalg.svd(flows_mat, full_matrices = False)
print(U.shape)
print(S.shape)
print(VT.shape)
plt.semilogy(S, '-o')
plt.ylabel('Singular Value', fontsize = 15)
plt.xlabel('Index', fontsize = 15)
plt.show()
plt.subplot(1,4,1)
plt.imshow(U[:,0].reshape(nx, ny), 'gray')
plt.title('POD Mode 1')
plt.axis('off')
plt.subplot(1,4,2)
plt.imshow(U[:,1].reshape(nx, ny), 'gray')
plt.title('POD Mode 2')
plt.axis('off')
plt.subplot(1,4,3)
plt.imshow(U[:,4].reshape(nx, ny), 'gray')
plt.title('POD Mode 5')
plt.axis('off')
plt.subplot(1,4,4)
plt.imshow(U[:,12].reshape(nx, ny), 'gray')
plt.title('POD Mode 13')
plt.axis('off')
plt.show()
U = np.asmatrix(U)
diagS = np.asmatrix(np.diag(S))
VT = np.asmatrix(VT)
print(U.shape, diagS.shape, VT.shape, '\n')
Perform Truncated SVD with Five Rank Approximation
k = 5
low_U = U[:, :k]
low_S = diagS[:k, :k]
low_VT = VT[:k, :]
low_flows_mat = low_U*low_S*low_VT
print(low_U.shape, low_S.shape, low_VT.shape, '\n')
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
def animate(i):
ax1.clear()
ax1.imshow(flows_mat[:,i].reshape(nx, ny), 'gray')
ax1.axis('off')
ax2.clear()
ax2.imshow(low_flows_mat[:,i].reshape(nx, ny), 'gray')
ax2.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 10)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
2.2. Autoencoder¶
- Latent space dimensions: 30, 10, 5, 2
- Train each autoencoder separately with the same training dataset
- The data to be used will be constructed based on the u velocity values from the SVD example.
Data Preprocessing and Visualization
import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
%matplotlib inline
flows_mat_u_mean = flows_mat_u.mean(axis = 1)
flows_mat_u_centered = flows_mat_u - flows_mat_u_mean[:, None]
print(flows_mat_u_centered.shape)
nx = 449
ny = 199
plt.imshow(flows_mat_u_mean.reshape(nx, ny), 'gray')
plt.title('u velocity')
plt.axis('off')
flows_mat_u_centered_normalized = (flows_mat_u_centered - flows_mat_u_centered.min()) / (flows_mat_u_centered.max() - flows_mat_u_centered.min())
train_x = flows_mat_u_centered_normalized.T.reshape(-1, nx, ny, 1)
Define Function to Create and Train Autoencoder Models with Different Latent Dimensions
import tensorflow as tf
def create_and_train_model(lat_dim):
input_shape = (449, 199, 1)
encoder = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape = input_shape),
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dense(lat_dim, activation = None)
])
decoder = tf.keras.models.Sequential([
tf.keras.layers.Dense(64, activation = 'relu', input_shape = (lat_dim,)),
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.Dense(449*199, activation = None),
tf.keras.layers.Reshape(input_shape)
])
inputs = tf.keras.Input(shape = input_shape)
latent = encoder.output
result = decoder(latent)
model = tf.keras.Model(inputs = encoder.input, outputs = result)
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
model.fit(train_x, train_x, epochs = 500, verbose = 0)
return model, encoder
Train Autoencoders with Different Latent Dimensions
model30, encoder30 = create_and_train_model(30)
model10, encoder10 = create_and_train_model(10)
model5, encoder5 = create_and_train_model(5)
model2, encoder2 = create_and_train_model(2)
Encode Datasets to the Latent Space and Reconstruct to the Original Image with Decoders
predict30 = model30.predict(train_x)
predict30 = predict30.reshape(151, 89351)
train_30 = train_x.reshape(151, 89351)
predict10 = model10.predict(train_x)
predict10 = predict10.reshape(151, 89351)
train_10 = train_x.reshape(151, 89351)
predict5 = model5.predict(train_x)
predict5 = predict5.reshape(151, 89351)
train_5 = train_x.reshape(151, 89351)
predict2 = model2.predict(train_x)
predict2 = predict2.reshape(151, 89351)
train_2 = train_x.reshape(151, 89351)
Visualize Reconstruction Results from the Latent Space
- We just need 30, 10, 5, 2 dimensional data and the decoder for reconstructing the original image with the dimension of 89,351
Latent Space with 30 Dimensions
# Let's see the flow in time
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
def animate(i):
ax1.clear()
ax1.imshow(train_30[i,:].reshape(nx, ny), 'gray')
ax1.set_title('Ground Truth')
ax1.axis('off')
ax2.clear()
ax2.imshow(predict30[i,:].reshape(nx, ny), 'gray')
ax2.set_title('Reconstructed')
ax2.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 10)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
Latent Space with 10 Dimensions
# Let's see the flow in time
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
def animate(i):
ax1.clear()
ax1.imshow(train_10[i,:].reshape(nx, ny), 'gray')
ax1.set_title('Ground Truth')
ax1.axis('off')
ax2.clear()
ax2.imshow(predict10[i,:].reshape(nx, ny), 'gray')
ax2.set_title('Reconstructed')
ax2.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 10)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
Latent Space with 5 Dimensions
# Let's see the flow in time
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
def animate(i):
ax1.clear()
ax1.imshow(train_5[i,:].reshape(nx, ny), 'gray')
ax1.set_title('Ground Truth')
ax1.axis('off')
ax2.clear()
ax2.imshow(predict5[i,:].reshape(nx, ny), 'gray')
ax2.set_title('Reconstructed')
ax2.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 10)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
Latent Space with 2 Dimensions
- With the help of the latent space, we can easily notice that the cylinder flow has period of 30 time steps.
- Rotation around the circle in the latent space takes 30 time steps.
# Let's see the flow in time
from matplotlib.animation import FuncAnimation
import time
from IPython import display
fig = plt.figure(figsize = (8, 10))
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 1, 2)
lat = encoder2.predict(train_x)
lat = lat.reshape(151, 2)
def animate(i):
ax1.clear()
ax1.imshow(train_2[i,:].reshape(nx, ny), 'gray')
ax1.set_title('Ground Truth')
ax1.axis('off')
ax2.clear()
ax2.imshow(predict2[i,:].reshape(nx, ny), 'gray')
ax2.set_title('Reconstructed Time Step: {}'.format(i))
ax2.axis('off')
ax3.clear()
ax3.scatter(lat[:,0], lat[:,1], alpha = 0.3)
ax3.scatter(lat[i,0], lat[i,1], color = 'r', s = 50)
ax3.set_title('Latent Space Visualization (Time Step {})'.format(i))
ax3.set_xticks([])
ax3.set_yticks([])
#ax3.axis('off')
ani = FuncAnimation(fig, animate, frames = int(150/1), interval = 100)
video = ani.to_html5_video()
html = display.HTML(video)
display.display(html)
plt.close()
3. Prediction¶
3.1 Polynomial Regression¶
Let's assume the following scenario. We have data from measuring P2 while varying Q1. However, we don't have sensors or devices to measure A1 and P1. In this case, let's use the relationship between P2 and Q1 to infer A1 and P1 based on the data. Additionally, assume that the flow rate divides evenly between pipes (2) and (3). (Density of the fluid is 1000)
- Objective: determine constant pressure (P1) and area of the first pipe (A1)
- Given: measured P2 while varying Q1
- Download data here
$$ P_2 = \frac{\rho}{2} \left(\left(\frac{1}{A_1}\right)^2 + \left(\frac{0.5}{0.8 \cdot A_1}\right)^2\right) Q_1^2 + P_1$$
Data Visualization
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('/content/drive/MyDrive/Data/pressure_flowrate.csv')
x = data['Q1'].values
y = data['P2'].values.reshape(-1, 1)
plt.figure(figsize=(8, 6))
plt.plot(x, y, 'o', markersize=7)
plt.xlabel('Q1 (Flow rate)', fontsize = 12)
plt.ylabel('P2 (Pressure)',fontsize = 12)
plt.xlim([np.min(x), np.max(x)])
plt.grid(alpha=0.3)
plt.legend()
plt.show()
Perform Polynomial Regression
A = np.vstack([np.ones_like(x), x**2]).T
A = np.asmatrix(A)
theta = (A.T * A).I * A.T * y
print('Coefficients:\n', theta)
xp = np.linspace(np.min(x), np.max(x), 100)
yp = theta[0, 0] + theta[1, 0] * xp**2
# Plotting
plt.figure(figsize=(8, 6))
plt.plot(x, y, 'o', markersize=7, label='Actual Data')
plt.plot(xp, yp, 'r', linewidth=3, label='Polynomial Fit')
plt.xlabel('Q1 (Flow rate)')
plt.ylabel('P2 (Pressure)')
plt.xlim([np.min(x), np.max(x)])
plt.grid(alpha=0.3)
plt.legend()
plt.show()
print('P1:', theta[0, 0])
print('A1:', np.sqrt(1000/2 * (1 + (0.5/0.8)**2) / theta[1, 0]))
3.2 ANN Regression¶
A source of strength 𝑚 is located a distance 𝑙 from a vertical solid wall as shown in the figure below. To obtain the velocity profile, sensors were placed to measure the $x$-direction velocity (𝑢) and $y$-direction velocity (𝑣) at all $x$ and $y$ coordinates. We then adjusted 𝑚 and 𝑙 and recorded the values of 𝑢 and 𝑣 at each coordinate. However, when $𝑙 = 2$, $m = 2$, and $x = 1$, the sensors failed, resulting in the loss of all 𝑢 values at every $y$-coordinate. Instead of conducting a new experiment, we plan to predict the 𝑢 values using an ANN regressor. The given conditions of our data only consider the cases where 𝑥 is not equal to 𝑙. (All variables except for 𝑢 and 𝑣 are integers)
Load Data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train_data = pd.read_csv('/content/drive/MyDrive/Data/ann_train.csv')
test_data = pd.read_csv('/content/drive/MyDrive/Data/ann_test.csv')
X_train = train_data[['x', 'y', 'l', 'm', 'v']].values
y_train = train_data['u'].values
X_test = test_data[['x', 'y', 'l', 'm', 'v']].values
y_test = test_data['y'].values
Define and Train ANN Model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(input_shape = (5,), units = 30, activation = 'relu'),
tf.keras.layers.Dense(units = 100, activation = 'relu'),
tf.keras.layers.Dense(units = 100, activation = 'relu'),
tf.keras.layers.Dense(units = 30, activation = 'relu'),
tf.keras.layers.Dense(units = 1)
])
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
model.fit(X_train, y_train, epochs = 100, batch_size = 16, verbose = 1)
Make Predictions on Test Dataset
u_predicted = model.predict(X_test)
Compare Prediction Result with Actual Velocity Profile
pi = np.pi
def calculate_u(x, y, l, m):
if x != l:
return m / (4 * pi) * (2 * (x - l) / ((x - l)**2 + y**2) + 2 * (x + l) / ((x + l)**2 + y**2))
return None
# Above equation is just provided for the comparison purpose
u_actual = [calculate_u(row['x'], row['y'], row['l'], row['m']) for index, row in test_data.iterrows()]
plt.figure(figsize=(10, 6))
plt.plot(y_test, u_actual, label='Actual u', marker='o')
plt.plot(y_test, u_predicted, label='Predicted u', linestyle='--', marker='x')
plt.title('Comparison of Actual and Predicted u Values by y')
plt.xlabel('y Value')
plt.ylabel('Velocity u')
plt.legend()
plt.show()
3.3 CNN Regression¶
As we learned in fluid dynamics, the drag coefficient depends on the shape. Therefore, we usually measure the drag coefficient through experiments in a wind tunnel. However, before that, we use simulations to calculate the drag coefficient, and only the shapes with the best results are fabricated and tested, for the sake of time and resource limitations. Running simulations for all shapes is computationally expensive requiring significant time and computing resources. To solve this problem, we plan to build a CNN (Convolutional Neural Network) model that can act as a surrogate for the simulations. This approach is referred to as surrogate modeling.
Conventionally, we should construct a mesh as shown in the figure below and derive the drag coefficient using a computational fluid dynamics program.
However, we are going to build a CNN model that can predict the drag of a given shape.
Load Data
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, r2_score
import matplotlib.pyplot as plt
imgs = np.load('/content/drive/MyDrive/Data/imgs.npy')
sol = np.load('/content/drive/MyDrive/Data/labels.npy')
X_train, X_test, y_train, y_test = train_test_split(imgs, sol, test_size = 0.2, random_state = 42)
X_train.shape
X_test.shape
Define and Train CNN Model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation = 'relu', padding = 'SAME', input_shape = (128, 128, 1)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu', padding = 'SAME'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(128, (3, 3), activation = 'relu', padding = 'SAME'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation = 'relu'),
tf.keras.layers.Dense(1)
])
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
history = model.fit(X_train, y_train, epochs = 10, batch_size = 32, validation_split = 0.1)
Compare Prediction Result on Test Dataset with Actual Drag Coefficient
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"Mean Absolute Error: {mae}")
print(f"R^2 Score: {r2}")
plt.figure(figsize = (10, 10))
plt.scatter(y_test, y_pred, alpha = 0.7, color = 'r')
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw = 3)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Predicted vs Actual')
plt.show()
Visualize Test Result
idx = 0
test_img = X_test[idx:idx + 1]
test_label = y_test[idx]
predict = model.predict(test_img, verbose = 0)
predicted_drag = predict[0][0]
plt.figure(figsize = (9, 4))
plt.imshow(test_img.reshape(128, 128), cmap = 'gray')
plt.axis('off')
plt.title('Test Image')
plt.show()
print(f'Predicted Drag Coefficient: [{predicted_drag:.3f}]')
print(f'Actual Drag Coefficient: {test_label}')
idx = 200
test_img = X_test[idx:idx + 1]
test_label = y_test[idx]
predict = model.predict(test_img, verbose = 0)
predicted_drag = predict[0][0]
plt.figure(figsize = (9, 4))
plt.imshow(test_img.reshape(128, 128), cmap = 'gray')
plt.axis('off')
plt.title('Test Image')
plt.show()
print(f'Predicted Drag Coefficient: [{predicted_drag:.3f}]')
print(f'Actual Drag Coefficient: {test_label}')
idx = 300
test_img = X_test[idx:idx + 1]
test_label = y_test[idx]
predict = model.predict(test_img, verbose = 0)
predicted_drag = predict[0][0]
plt.figure(figsize = (9, 4))
plt.imshow(test_img.reshape(128, 128), cmap = 'gray')
plt.axis('off')
plt.title('Test Image')
plt.show()
print(f'Predicted Drag Coefficient: [{predicted_drag:.3f}]')
print(f'Actual Drag Coefficient: {test_label}')
3.4. PINN with Data¶
- We will solve 2D Navier-Stokes Equations to find velocity profile in infinite parallel plates flow
- Any fluid flowing in plates had to enter at some location. The region of flow near where the fluid enters the plates is termed the entrance region and is illustrated in below figure
- The fluid typically enters the plates with a nearly uniform velocity profile
- As the fluid moves through the plates, viscous effects cause it to stick to the plates wall (no-slip boundary condition)
- Thus, a boundary layer is produced along the plates wall such that the initial velocity profile changes with distance along the plates, $x$, until the fluid reaches the end of the hydrodynamic entrance region (which is also called entrance length) beyond which the velocity profile does not vary with $x$
- Problem properties
$$\rho = 1\operatorname{kg/m^3}, \quad \mu = 1\operatorname{N\cdot s/m^2}, \quad D = 2h = 1\operatorname{m}, \quad L = 2\operatorname{m}, \quad u_{in} = 1\operatorname{m/s}, \quad \nu = \frac{\mu}{\rho}$$
- Hydraulic diameter is
$$\quad D_h = \lim\limits_{b\to\infty} {4(2bh) \over {2b+4h}} = 4h = 2\operatorname{m}$$
- So, the Reynolds number of this system is
$$Re = \frac{\rho u_{in} D_h}{\mu} = 2 $$
- 2D Navier-Stokes Equations & boundary conditions (for steady state)
$$ \begin{align*} u{\partial u \over \partial x} + v{\partial u \over \partial y} + {1 \over \rho}{\partial p \over \partial x} - \nu \ \left({\partial^2 u \over {\partial x^2}} + {\partial^2 u \over {\partial y^2}}\right) &= 0\\\\ u{\partial v \over \partial x} + v{\partial v \over \partial y} + {1 \over \rho}{\partial p \over \partial y} - \nu \ \left({\partial^2 v \over {\partial x^2}} + {\partial^2 v \over {\partial y^2}}\right) &= 0\\\\ {\partial u \over \partial x} + {\partial v \over \partial y} &= 0 \end{align*} $$
- Two Dirichlet boundary conditions on the plate boundary (no-slip condition),
$$u(x,y) = 0, \quad v(x,y) = 0 \qquad \text{at} \quad y = \frac{D}{2} \ \; \text{or} \; -\frac{D}{2}$$
- Two Dirichlet boundary conditions at the inlet boundary
$$u(-1,y) = u_{\text{in}}, \quad v(-1,y) = 0$$
- Two Dirichlet boundary conditions at the outlet boundary
$$p(1,y) = 0, \quad v(1,y) = 0$$
- This time, we will attempt two approaches
- First, we will build a model based on ANN whose input and output are presented below. When using the ANN, we will train it with big data.
- Next, we will build a model based on PINN, applying both PDE loss and data loss as described below.
- When using PINN to solve our problem, we will train it with small data.
- Despite using less data, we will see that the PINN, which utilizes the PDE, yields better results than the ANN.
!pip install deepxde
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from deepxde.backend.set_default_backend import set_default_backend
set_default_backend("tensorflow")
fluid_bigdata = np.load('/content/drive/MyDrive/Data/fluid_bigdata.npy')
observe_x = fluid_bigdata[:, :2]
observe_y = fluid_bigdata[:, 2:]
observe_u = dde.icbc.PointSetBC(observe_x, observe_y[:, 0].reshape(-1, 1), component=0)
observe_v = dde.icbc.PointSetBC(observe_x, observe_y[:, 1].reshape(-1, 1), component=1)
observe_p = dde.icbc.PointSetBC(observe_x, observe_y[:, 2].reshape(-1, 1), component=2)
Define Parameters
# Properties
rho = 1
mu = 1
u_in = 1
D = 1
L = 2
Define Geometry
geom = dde.geometry.Rectangle(xmin = [-L/2, -D/2], xmax = [L/2, D/2])
data = dde.data.PDE(geom,
None,
[observe_u, observe_v, observe_p],
num_domain = 0,
num_boundary = 0,
num_test = 100)
plt.figure(figsize = (20,4))
plt.scatter(data.train_x_all[:,0], data.train_x_all[:,1], s = 0.5)
plt.scatter(observe_x[:, 0], observe_x[:, 1], c = observe_y[:, 0], s = 6.5, cmap = 'jet')
plt.scatter(observe_x[:, 0], observe_x[:, 1], s = 0.5, color='k', alpha = 0.5)
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.xlabel('x-direction length (m)')
plt.ylabel('Distance from middle of plates (m)')
plt.title('Velocity (u)')
plt.show()
Define Network and Hyper-parameters
layer_size = [2] + [64] * 5 + [3]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr = 1e-3)
Train (Adam Optimizer)
losshistory, train_state = model.train(epochs = 10000)
dde.saveplot(losshistory, train_state, issave = False, isplot = False)
Train More (L-BFGS Optimizer)
dde.optimizers.config.set_LBFGS_options(maxiter=3000)
model.compile("L-BFGS")
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
Plot Results (Adam + L-BFGS)
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]
for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
s = 2,
cmap = 'jet')
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()
3.4.2 PINN with Small Data¶
Load and Sample Data
- Download data here
fluid_smalldata = np.load('/content/drive/MyDrive/Data/fluid_smalldata.npy')
observe_x = fluid_smalldata[:, :2]
observe_y = fluid_smalldata[:, 2:]
observe_u = dde.icbc.PointSetBC(observe_x, observe_y[:, 0].reshape(-1, 1), component=0)
observe_v = dde.icbc.PointSetBC(observe_x, observe_y[:, 1].reshape(-1, 1), component=1)
observe_p = dde.icbc.PointSetBC(observe_x, observe_y[:, 2].reshape(-1, 1), component=2)
Define PDE with Boundary & Initial Conditions
def boundary_wall(X, on_boundary):
on_wall = np.logical_and(np.logical_or(np.isclose(X[1], -D/2), np.isclose(X[1], D/2)), on_boundary)
return on_wall
def boundary_inlet(X, on_boundary):
return on_boundary and np.isclose(X[0], -L/2)
def boundary_outlet(X, on_boundary):
return on_boundary and np.isclose(X[0], L/2)
def pde(X, Y):
du_x = dde.grad.jacobian(Y, X, i = 0, j = 0)
du_y = dde.grad.jacobian(Y, X, i = 0, j = 1)
dv_x = dde.grad.jacobian(Y, X, i = 1, j = 0)
dv_y = dde.grad.jacobian(Y, X, i = 1, j = 1)
dp_x = dde.grad.jacobian(Y, X, i = 2, j = 0)
dp_y = dde.grad.jacobian(Y, X, i = 2, j = 1)
du_xx = dde.grad.hessian(Y, X, i = 0, j = 0, component = 0)
du_yy = dde.grad.hessian(Y, X, i = 1, j = 1, component = 0)
dv_xx = dde.grad.hessian(Y, X, i = 0, j = 0, component = 1)
dv_yy = dde.grad.hessian(Y, X, i = 1, j = 1, component = 1)
pde_u = Y[:,0:1] * du_x + Y[:,1:2] * du_y + 1/rho * dp_x - (mu/rho) * (du_xx + du_yy)
pde_v = Y[:,0:1] * dv_x + Y[:,1:2] * dv_y + 1/rho * dp_y - (mu/rho) * (dv_xx + dv_yy)
pde_cont = du_x + dv_y
return [pde_u, pde_v, pde_cont]
Define Geometry and Implement Boundary Condition
geom = dde.geometry.Rectangle(xmin=[-L/2, -D/2], xmax=[L/2, D/2])
bc_wall_u = dde.DirichletBC(geom, lambda X: 0., boundary_wall, component = 0)
bc_wall_v = dde.DirichletBC(geom, lambda X: 0., boundary_wall, component = 1)
bc_inlet_u = dde.DirichletBC(geom, lambda X: u_in, boundary_inlet, component = 0)
bc_inlet_v = dde.DirichletBC(geom, lambda X: 0., boundary_inlet, component = 1)
bc_outlet_p = dde.DirichletBC(geom, lambda X: 0., boundary_outlet, component = 2)
bc_outlet_v = dde.DirichletBC(geom, lambda X: 0., boundary_outlet, component = 1)
data = dde.data.PDE(geom,
pde,
[bc_wall_u, bc_wall_v, bc_inlet_u, bc_inlet_v, bc_outlet_p, bc_outlet_v, observe_u, observe_v, observe_p],
num_domain = 1000,
num_boundary = 500,
num_test = 1000,
train_distribution = 'LHS')
plt.figure(figsize = (20,4))
plt.scatter(data.train_x_all[:,0], data.train_x_all[:,1], s = 0.5)
plt.scatter(observe_x[:, 0], observe_x[:, 1], c = observe_y[:, 0], s = 6.5, cmap = 'jet')
plt.scatter(observe_x[:, 0], observe_x[:, 1], s = 0.5, color='k', alpha = 0.5)
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.xlabel('x-direction length (m)')
plt.ylabel('Distance from middle of plates (m)')
plt.title('Velocity (u)')
plt.show()
Define Network and Hyper-parameters
layer_size = [2] + [64] * 5 + [3]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.maps.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
model.compile("adam", lr = 1e-3, loss_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9])
Train (Adam Optimizer)
losshistory, train_state = model.train(epochs = 10000)
dde.saveplot(losshistory, train_state, issave = False, isplot = False)
Train More (L-BFGS Optimizer)
dde.optimizers.config.set_LBFGS_options(maxiter=3000)
model.compile("L-BFGS", loss_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9])
losshistory, train_state = model.train()
dde.saveplot(losshistory, train_state, issave = False, isplot = True)
Plot Results (Adam + L-BFGS)
samples = geom.random_points(500000)
result = model.predict(samples)
color_legend = [[0, 1.5], [-0.3, 0.3], [0, 35]]
for idx in range(3):
plt.figure(figsize = (20, 4))
plt.scatter(samples[:, 0],
samples[:, 1],
c = result[:, idx],
s = 2,
cmap = 'jet')
plt.colorbar()
plt.clim(color_legend[idx])
plt.xlim((0-L/2, L-L/2))
plt.ylim((0-D/2, D-D/2))
plt.tight_layout()
plt.show()