problems in solving 2D Allen Cahn equation
liu-ziyuan-math opened this issue · comments
Hi Mikael and sorry to bother you again, I try solving 2D Allen Cahn equation but the solution shenfun returned seems not correct, since generally the solution of ACE will converge to 1 and -1 after sufficient time. Below is the toy experiment.
import matplotlib
matplotlib.use('TkAgg')
from time import time
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
from shenfun import *
from copy import deepcopy
# Use sympy to set up initial condition
x, y = sp.symbols("x, y")
u0 = sp.sin(sp.pi*x)*sp.sin(sp.pi*y)/2
# parameters for AC equation
eps = 0.02
Nx = 100 + 1
CX = FunctionSpace(Nx, 'C', quad='GL', bc={'left':('D',0), 'right':('D',0)})
CY = FunctionSpace(Nx, 'C', quad='GL', bc={'left':('D',0), 'right':('D',0)})
C = TensorProductSpace(comm, (CX, CY))
u = TrialFunction(C)
v = TestFunction(C)
def LinearRHS(self, u, **par):
return eps * div(grad(u)) #+ u
def NonlinearRHS(self, u, u_hat, rhs, **params):
# u-u**3
rhs.fill(0)
ub = u_hat.backward(padding_factor=2)
ub[:] = -ub**3 + ub
rhs = ub.forward(rhs)
return rhs
u_ = Array(C, buffer=u0)
u_hat = Function(C)
u_hat = u_.forward(u_hat)
def update(self, u, u_hat, t, tstep, **kw):
if tstep % 100 == 0:
print(tstep)
print(np.max(np.abs(u_hat.backward())))
# fig, ax = plt.subplots()
# X, Y = np.meshgrid(*C.mesh())
# cs = ax.contourf(X, Y, u_hat.backward(), cmap=plt.get_cmap('Spectral'))
# # ax.plot_surface(-XCGL, -YCGL, u0.backward())
# cbar = fig.colorbar(cs)
# plt.show()
X, Y = np.meshgrid(*C.mesh())
fig = plt.figure()
plt.pcolor(X, Y, u_hat.backward(), shading='auto', cmap="jet")
plt.colorbar()
plt.show()
#dt = 1/Nx
dt = 0.001
end_time = 10
#integrator = BackwardEuler(C, L=LinearRHS, N=NonlinearRHS, update=update)
integrator = IRK3(C, L=LinearRHS, N=NonlinearRHS, update=update)
integrator.setup(dt)
u_hat = integrator.solve(u_, u_hat, dt, (0, end_time))
Hi
As far as I can tell you solve a problem with homogeneous Dirichlet boundary conditions and the solution should then converge to 0 and not +1 or -1?
There is an error in the NonlinearRHS, that needs to be a scalar product and not a complete forward transform. The following should work:
def NonlinearRHS(self, u, u_hat, rhs, **params):
# u-u**3
rhs.fill(0)
ub = u_hat.backward(padding_factor=2)
uh = (-ub**3 + ub).forward()
rhs = inner(v, uh, output_array=rhs)
return rhs