Experimentally verifying order of convergence of SDE

214 Views Asked by At

I am trying to experimentally verify strong and weak orders of convergence of Euler-Maruyama and Milstein methods for the following SDE.

$$ \begin{cases} \begin{align} dX_t&=9.5(Y_x-X_t)dt+(Y_t-X_t)dW_t\\ dY_t&=(28X_t-Y_T-X_tZ_t)dt\\ dZ_t&=(X_tY_t-\frac{8}{3}Z_t)dt \end{align} \end{cases} $$

I got

$$ \begin{cases} \begin{align} X_{n+1}&=X_n+9.5(Y_n-X_n)\Delta t+(Y_n-X_n)\Delta W_n\\ Y_{n+1}&=Y_n+(28X_n-Y_n-X_nZ_n)\Delta t\\ Z_{n+1}&=Z_n+(X_nY_n-\frac{8}{3}Z_n)\Delta t \end{align} \end{cases} $$

for Euler-Maruyama method and

$$ \begin{cases} \begin{align} X_{n+1}&=X_n+9.5(Y_n-X_n)\Delta t+(Y_n-X_n)\Delta W_n-0.5(Y_n-X_n)(\Delta W_n^2-\Delta t)\\ Y_{n+1}&=Y_n+(28X_n-Y_n-X_nZ_n)\Delta t\\ Z_{n+1}&=Z_n+(X_nY_n-\frac{8}{3}Z_n)\Delta t \end{align} \end{cases} $$

for Milstein method.

I have set $[0,1]$ as the interval and $3.125\times10^{-4}$ as a reference $dt$ for approximating the exact solution since I do not think there is an analytic solution to the SDE above.

I have written following Python code to run experiments and plot graphs.

import numpy as np
import matplotlib.pyplot as plt

x0 = 1.508870
y0 = -1.531271
z0 = 25.246091

t = 1
dt = 3.125e-4
n = int(t / dt)
sample_dts = np.power(2, np.arange(5, 0, -1)) * dt
number_of_samples = 10000

def euler_maruyama(a, dt, dw):
    x, y, z = a
    return np.array([x + 9.5 * (y - x) * dt + (y - x) * dw, y + (28 * x - y - x * z) * dt, z + (x * y - 8 / 3 * z) * dt])

def milstein(a, dt, dw):
    x, y, z = a
    return np.array([x + 9.5 * (y - x) * dt + (y - x) * dw - 0.5 * (y - x) * (dw ** 2 - dt), y + (28 * x - y - x * z) * dt, z + (x * y - 8 / 3 * z) * dt])

def simulate(initial_condition, step_function, weak=False):
    errors = np.zeros((len(sample_dts), len(initial_condition)))
    y = np.tile(initial_condition, (1, number_of_samples))
    x = np.tile(initial_condition, (len(sample_dts), 1, number_of_samples))
    if not weak:
        dw = np.random.normal(scale=np.sqrt(dt), size=(n, number_of_samples))
        for i in range(n):
            y = step_function(y, dt, dw[i])
            for j, sample_dt in enumerate(sample_dts):
                scale = int(sample_dt / dt)
                if i % scale == 0:
                    x[j] = step_function(x[j], sample_dt, np.sum(dw[i * scale:(i + 1) * scale], axis=0))
        errors = np.mean(np.abs(y - x), axis=2)
    else:
        for i in range(n):
            y = step_function(y, dt, np.random.normal(scale=np.sqrt(dt), size=number_of_samples))
        for i, sample_dt in enumerate(sample_dts):
            for j in range(int(1 / sample_dt)):
                x[i] = step_function(x[i], sample_dt, np.sqrt(sample_dt) * np.random.randn(number_of_samples))
        errors = np.abs(np.mean(y, axis=1) - np.mean(x, axis=2))
    return errors

euler_maruyama_strong_errors = simulate(np.array([x0, y0, z0]).reshape((3, 1)), euler_maruyama)
euler_maruyama_weak_errors = simulate(np.array([x0, y0, z0]).reshape((3, 1)), euler_maruyama, weak=True)

for i in range(3):
    ax1 = plt.subplot(2, 1, 1)
    ax1.plot(sample_dts, euler_maruyama_strong_errors[:, i])
    ax2 = plt.subplot(2, 1, 2)
    ax2.plot(sample_dts, euler_maruyama_weak_errors[:, i])
    plt.tight_layout()
    plt.show()

milstein_strong_errors = simulate(np.array([x0, y0, z0]).reshape((3, 1)), milstein)
milstein_weak_errors = simulate(np.array([x0, y0, z0]).reshape((3, 1)), milstein, weak=True)

for i in range(3):
    ax1 = plt.subplot(2, 1, 1)
    ax1.plot(sample_dts, milstein_strong_errors[:, i])
    ax2 = plt.subplot(2, 1, 2)
    ax2.plot(sample_dts, milstein_weak_errors[:, i])
    plt.tight_layout()
    plt.show()

def order_of_convergence(dts, errors):
    return np.linalg.lstsq(np.column_stack((np.ones(len(dts)), np.log(dts))), np.log(errors), rcond=None)[0][1]

I have learned that Euler-Maruyama method has strong order of convergence $\sqrt{\Delta t}$ and weak order of convergence $\Delta t$ and Milstein method has both strong and weak order of convergence $\Delta t$. I have also learned that Milstein method is better than Euler-Maruyama method in terms of magnitude of errors. And I am using following definitions for strong and weak order of convergence.

$$ E\{|X_T-X_N|\}<=K(\Delta t)^j\\ |E\{X_T\}-E\{X_N\}|<=K(\Delta t)^j $$

However, running the code above and analyzing order of convergence using order_of_convergence function returns the following results.

$x$ $y$ $z$
Euler-Maruyama strong order 0.4126 0.3708 0.3154
Euler-Maruyama weak order 1.1720 1.1252 1.7638
Milstein strong order 0.4156 0.3686 0.3162
Milstein strong order 1.2197 1.1134 1.4276

Values do not match expectations and Milstein method is almost identical to Euler-Maruyama method and does not yield smaller magnitude of errors.

Is my Milstein equation wrong or experiment setup wrong?