diff --git a/Week 5/feedback/2022-03-08 22:12:52.822426 UTC/9 Ordinary Differential Equations.html b/Week 5/feedback/2022-03-08 22:12:52.822426 UTC/9 Ordinary Differential Equations.html new file mode 100644 index 0000000..980bbcd --- /dev/null +++ b/Week 5/feedback/2022-03-08 22:12:52.822426 UTC/9 Ordinary Differential Equations.html @@ -0,0 +1,14005 @@ + + +
+ + +See lecture notes and documentation on Brightspace for Python and Jupyter basics. If you are stuck, try to google or get in touch via Discord.
+Solutions must be submitted via the Jupyter Hub.
+Make sure you fill in any place that says YOUR CODE HERE or "YOUR ANSWER HERE".
In the following you will implement your own easy-to-extend ordinary differential equation (ODE) solver, which can be used to solve first order ODE systems of the form
+\begin{align*} + \vec{y}'(x) = \vec{f}(x, \vec{y}), +\end{align*}for $x = x_0, x_1, \dots, x_n$ with $x_i = i h$, step size $h$, and an initial condition of the form $\vec{y}(x=0)$. +The solver will be capable of using single-step as well as multi-step approaches.
+ +import numpy as np
+from matplotlib import pyplot as plt
+from matplotlib import gridspec
+Implement the Euler method in a general Python function $\text{integrator(x, y0, f, phi)}$, which takes as input a one-dimensional array $x$ of size $n+1$, the initial condition $y_0$, the callable function $f(x, y)$, and the integration scheme $\Phi(x, y, f, i)$. It should return the approximated function $\tilde{y}(x)$.
+The integration scheme $\text{phi}$ is supposed to be a callable function as returned from another Python function $\text{phi_euler(x, y, f, i)}$, where $i$ is the step number. In this way we will be able to easily extend the ODE solver to different methods.
+ +def integrator(x, y0, f, phi):
+ """
+ Numerically solves the initial value problem (IVP) given by ordinary differential
+ equation (ODE) f(x, y) = y' with initial value y0 using the integration scheme
+ provided by phi.
+
+ Args:
+ x: size n + 1 numerical array of x values
+ y0: an initial value to the function f
+ f: a callable function with signature (x, y), with x and y the current state
+ of the system, and f such that f(x, y) = y' in the ODE
+ phi: a callable function with signature (x, y, f, i), with x and y the current state
+ of the system, i the step number, and f as above, representing the integration
+ scheme to use
+
+ Returns:
+ An n + 1 numerical array representing an approximate solution to y in y' = f(x, y)
+ given initial value y0, steps from x, and using integration scheme phi.
+ """
+
+ eta = np.zeros(((len(x),) + np.shape(y0)))
+ eta[0] = y0
+
+ for i in range(1, len(eta)):
+ h = x[i] - x[i - 1]
+ eta[i] = eta[i - 1] + h*phi(x, eta, f, i)
+
+ return eta
+def phi_euler(x, y, f, i):
+ """
+ Returns the integrator phi = f(x, y) for the Euler method to solve the ODE
+ y' = f(x, y).
+
+ Args:
+ x: size n + 1 numerical array of x values
+ y: size n + 1 numerical array of y values with estimates up to index i - 1
+ f: a callable function with signature (x, y), with x and y the current state
+ variables of the system, and f such that f(x, y) = y' in the ODE
+ i: the current step number to calculate phi for
+
+ Returns:
+ The integrator phi(x, y, f, i) = f(x, y).
+ """
+
+ return f(x[i - 1], y[i - 1])
+Debug your implementation by applying it to the following ODE
+\begin{align*} + \vec{y}'(x) = y - x^2 + 1.0, +\end{align*}with initial condition $y(x=0) = 0.5$. To this end, define the right-hand side of the ODE as a Python function $\text{ODEF(x, y)}$, which in this case returns $f(x,y) = y - x^2 + 1.0$. You can then hand over the function $\text{ODEF}$ to the argument $\text{f}$ of your $\text{integrator}$ function.
+Plot the solution you found with the Euler method together with the exact solution $y(x) = (x+1)^2 - 0.5 e^x$.
+Then implement a unit test for your $\text{integrator}$ function using this system.
+ +# This here is for debugging. We thought to keep it here,
+# as debugging was part of the task.
+
+def ODEF(x, y):
+ return y - x**2 + 1.0
+
+x = np.linspace(0, 10, 3000)
+y0 = .5
+
+eta = integrator(x, y0, ODEF, phi_euler)
+y_exact = (x + 1)**2 - 0.5*np.exp(x)
+
+fig = plt.figure(figsize=(12, 8))
+gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1])
+
+# Above plots
+ax0 = plt.subplot(gs[0])
+ax0.plot(x, eta, label="Euler method solution")
+ax0.plot(x, y_exact, label="exact solution")
+ax0.legend()
+plt.title("Solutions to $y' = y - x^2 + 1.0$")
+plt.xlabel("x")
+ax0.set_ylabel("y(x)")
+
+# Relative absolute residue plots
+ax1 = plt.subplot(gs[1])
+ax1.plot(x, np.abs(eta/y_exact - 1), label="relative absolute residue")
+ax1.legend()
+ax1.set_ylabel("$|\eta(x)/y(x) - 1|$")
+
+# Absolute residue plots
+ax2 = plt.subplot(gs[2])
+ax2.plot(x, np.abs(eta - y_exact), label="absolute residue")
+ax2.legend()
+ax2.set_ylabel("$|\eta(x) - y(x)|$")
+
+plt.show()
+
+print("""
+ For the given initial value problem y' = (x + 1)^2 + 1.0, taking x from 0 to 10 in 3000 steps,
+ we find a large deviation between exact and estimate solutions at the point that y' = 0.
+ We could fix this by taking some other measure, like the absolute difference, but as
+ the solutions decrease so hard, that will suck later on.
+ """)
+def test_integrator():
+ """
+ Tests the Euler algorithm defined by phi_euler and the integrator
+ using y' = y - x^2 + 1.0 with y0 = 0.5 for 0 <= x <= 3.
+ """
+
+ def ODEF(x, y):
+ return y - x**2 + 1.0
+
+ x = np.linspace(0, 3, 100000)
+ y0 = .5
+
+ eta = integrator(x, y0, ODEF, phi_euler)
+ y_exact = (x + 1)**2 - 0.5*np.exp(x)
+
+ # Assert the relative error of the approximation is lower than TOL.
+ TOL = 1e-4
+ assert np.all(np.abs(eta/y_exact - 1) < TOL)
+
+test_integrator()
+Extend the set of possible single-step integration schemes to the modified Euler (Collatz), Heun, and 4th-order Runge-Kutta approaches by implementing the functions $\text{phi_euler_modified(x, y, f, i)}$, $\text{phi_heun(x, y, f, i)}$, and $\text{phi_rk4(x, y, f, i)}$. These can be used in your $\text{integrator}$ function instead of $\text{phi_euler}$.
+ +def phi_euler_modified(x, y, f, i):
+ """
+ Function that implements the modified Euler (Collatz) method to calculate phi at step i.
+
+ Args:
+ x: an array of x-values
+ y: an array of y-values
+ f: a callable function with scalar arguments x, y
+ i: the step i at which phi should be calculated
+
+ Returns:
+ phi: Phi calculated at step i for values x and y with function f
+ """
+
+ h = x[i]-x[i-1]
+ phi = f(x[i-1]+0.5*h, y[i-1]+0.5*h*f(x[i-1],y[i-1]))
+ return phi
+
+def phi_heun(x, y, f, i):
+ """
+ Function that implements the Heun method to calculate phi at step i.
+
+ Args:
+ x: an array of x-values
+ y: an array of y-values
+ f: a callable function with scalar arguments x, y
+ i: the step i at which phi should be calculated
+
+ Returns:
+ phi: Phi calculated at step i for values x and y with function f
+ """
+
+ h = x[i]-x[i-1]
+ phi = 0.5*(f(x[i-1],y[i-1])+f(x[i-1]+h,y[i-1]+h*f(x[i-1],y[i-1])))
+ return phi
+
+def phi_rk4(x, y, f, i):
+ """
+ Function that implements the 4th order Runge-Kutta method to calculate phi at step i.
+
+ Args:
+ x: an array of x-values
+ y: an array of y-values
+ f: a callable function with scalar arguments x, y
+ i: the step i at which phi should be calculated
+
+ Returns:
+ phi: Phi calculated at step i for values x and y with function f
+ """
+
+ h = x[i]-x[i-1]
+ k1 = f(x[i-1],y[i-1])
+ k2 = f(x[i-1]+0.5*h,y[i-1]+0.5*h*k1)
+ k3 = f(x[i-1]+0.5*h,y[i-1]+0.5*h*k2)
+ k4 = f(x[i-1]+h,y[i-1]+h*k3)
+ phi = (1/6)*(k1+2*k2+2*k3+k4)
+ return phi
+Add the possibility to also handle the following multi-step integration schemes:
+\begin{align*} + \Phi_{AB3}(x, y, f, i) = + \frac{1}{12} \left[ + 23 f(x_i, y_i) + - 16 f(x_{i-1}, y_{i-1}) + + 5 f(x_{i-2}, y_{i-2}) + \right] +\end{align*}and
+\begin{align*} +\Phi_{AB4}(x, y, f, i) = + \frac{1}{24} \left[ + 55 f(x_i, y_i) + - 59 f(x_{i-1}, y_{i-1}) + + 37 f(x_{i-2}, y_{i-2}) + - 9 f(x_{i-3}, y_{i-3}) + \right] +\end{align*}In these cases the initial condition $\text{y0}$ must be an array consisting of several initial values corresponding to $y_0$, $y_1$, $y_2$ (and $y_3$). Use the Runga-Kutta method to calculate all of the initial values before you start the AB3 and AB4 integrations.
+ +def phi_ab3(x, y, f, i):
+ """
+ Function that implements the AB3 method to calculate phi at step i, using the
+ 4th order Runge-Kutta method for the first three values.
+
+ Args:
+ x: an array of x-values
+ y: an array of y-values
+ f: a callable function with scalar arguments x, y
+ i: the step i at which phi should be calculated
+
+ Returns:
+ phi: Phi calculated at step i for values x and y with function f
+ """
+
+ # The first three y values are to be calculated using the Runga-Kutta method.
+ if i < 3:
+ return phi_rk4(x, y, f, i)
+
+ phi = (1/12)*( 23*f(x[i - 1], y[i - 1]) - 16*f(x[i - 2], y[i - 2]) + 5*f(x[i - 3], y[i - 3]) )
+ return phi
+
+def phi_ab4(x, y, f, i):
+ """
+ Function that implements the AB4 method to calculate phi at step i, using the
+ 4th order Runge-Kutta method for the first four values.
+
+ Args:
+ x: an array of x-values
+ y: an array of y-values
+ f: a callable function with scalar arguments x, y
+ i: the step i at which phi should be calculated
+
+ Returns:
+ phi: Phi calculated at step i for values x and y with function f
+ """
+
+ # The first four y values are to be calculated using the Runga-Kutta method.
+ if i < 4:
+ return phi_rk4(x, y, f, i)
+
+ phi = (1/24)*( 55*f(x[i - 1], y[i - 1]) - 59*f(x[i - 2], y[i - 2]) + 37*f(x[i - 3], y[i - 3]) - 9*f(x[i - 4], y[i - 4]) )
+ return phi
+Plot the absolute errors $\delta(x) = |\tilde{y}(x) - y(x)|$ with a $y$-log scale for all approaches with $0 \leq x \leq 2$ and a step size of $h=0.02$ for the ODE from task 2.
+ +algos = [
+ (phi_euler, "Euler"),
+ (phi_euler_modified, "modified Euler (Collatz)"),
+ (phi_heun, "Heun"),
+ (phi_rk4, "4th order Runge-Kutta"),
+ (phi_ab3, "AB3"),
+ (phi_ab4, "AB4"),
+]
+
+def ODEF(x, y):
+ return y - x**2 + 1.0
+
+h = .02
+x = np.arange(0, 2, h)
+y = (x + 1)**2 - 0.5*np.exp(x)
+y0 = .5
+eta = np.zeros(len(x))
+
+fig = plt.figure(figsize=(12, 12))
+gs = gridspec.GridSpec(2, 1, height_ratios=[2, 2])
+
+ax0 = plt.subplot(gs[0])
+ax0.plot(x, y, label="exact solution", color="springgreen")
+ax0.set_yscale("log")
+ax0.set_title("Solutions to $y' = y - x^2 + 1.0$")
+
+ax1 = plt.subplot(gs[1])
+ax1.set_ylabel("$\delta(x) = |\~y(x) - y(x)|$")
+ax1.set_yscale("log")
+
+for alg, alg_name in algos:
+ eta = integrator(x, y0, ODEF, alg)
+ ax0.plot(x, eta, label=alg_name)
+ ax1.plot(x, np.abs(eta - y), label=alg_name)
+
+ax0.legend()
+plt.xlabel("x")
+ax0.set_ylabel("y(x)")
+ax1.legend()
+plt.show()
+Study the accuracies of all approaches as a function of the step size $h$. To this end use your implementations to solve
+\begin{align*} + \vec{y}'(x) = y +\end{align*}with $y(x=0) = 1.0$ for $0 \leq x \leq 2$.
+Plot $\delta(2) = |\tilde{y}(2) - y(2)|$ as a function of $h$ for each integration scheme.
+ +# We assume the definitions of the previous block as not to copy everything.
+
+h_list = np.logspace(0, -6, 7)
+y2 = np.zeros(len(h_list))
+y0 = 1.0
+
+plt.figure(figsize=(12, 8))
+plt.ylabel("$\delta(2) = |\~y(2) - y(2)|$")
+
+for alg, alg_name in algos:
+ for i in range(len(h_list)):
+ x = np.arange(0, 2, h_list[i])
+ y = np.zeros(len(x))
+ y[0] = y0
+
+ eta = integrator(x, y0, ODEF, alg)
+ y2[i] = eta[-1]
+ plt.loglog(h_list, y2 - y[-1], label=alg_name)
+
+plt.xlabel("h")
+plt.minorticks_on()
+plt.legend()
+plt.show()
+Apply the Euler and the Runga-Kutta methods to solve the pendulum problem given by
+\begin{align*} + \vec{y}'(x) = \vec{f}(x, \vec{y}) + \quad \leftrightarrow \quad + \begin{pmatrix} + y_0'(x) \\ + y_1'(x) + \end{pmatrix} + = + \begin{pmatrix} + y_1(x) \\ + -\operatorname{sin}\left[y_0(x) \right] + \end{pmatrix} +\end{align*}To this end the quantities $\text{x}$ and $\text{y0}$ in your $\text{integrator}$ function must become vectors. Depending on your implementation in task 1 you might have to define a new $\text{integrator}$ function that can handle this type of input.
+Plot $y_0(x)$ for several oscillation periods. Does the Euler method behave physically?
+ +def ODEF_pendulum(x, y):
+ return np.array([y[1], -np.sin(y[0])])
+
+# The settings we can alter:
+h = .01
+periods = 28
+y0 = np.array([1., 1.])
+
+x = np.arange(0, periods*2*np.pi, h)
+eta = np.zeros((len(x), 2))
+
+fig = plt.figure(figsize=(16, 24))
+gs = gridspec.GridSpec(3, 1, height_ratios=[4, 2, 2])
+
+# Plotting the trajectories
+ax0 = plt.subplot(gs[0])
+ax0.set_title("Trajectories of pendulum with initial values $\\vec{y}(x=0) = (1.,1.)^T$, $h = .01$, for 28 periods")
+ax0.set_xlabel("$y_0(x)$")
+ax0.set_ylabel("$y_1(x)$")
+
+# Plotting eta_0 (or estimated y_0)
+ax1 = plt.subplot(gs[1])
+ax1.set_title("$y_0$ of pendulum with initial values $\\vec{y}(x=0) = (1.,1.)^T$, $h = .01$, for 28 periods")
+ax1.set_xlabel("$x$")
+ax1.set_ylabel("$y_0$")
+
+# Plotting eta_01 (or estimated y_1)
+ax2 = plt.subplot(gs[2])
+ax2.set_title("$y_1$ of pendulum with initial values $\\vec{y}(x=0) = (1.,1.)^T$, $h = .01$, for 28 periods")
+ax2.set_xlabel("$x$")
+ax2.set_ylabel("$y_1$")
+
+for alg, alg_name in [(phi_euler, "Euler"), (phi_rk4, "4th order Runge-Kutta")]:
+ eta = integrator(x, y0, ODEF_pendulum, alg)
+ ax0.plot(eta[:, 0], eta[:, 1], label=alg_name)
+ ax1.plot(x, eta[:, 0], label=alg_name)
+ ax2.plot(x, eta[:, 1], label=alg_name)
+
+ax0.legend()
+ax1.legend()
+ax2.legend()
+plt.show()
+
+print("""
+ The solutions differ greatly. The Euler method solution seems to go out of bounds of the pendulum string,
+ and still moves periodically, which is quite unphysical. The 4th order Runge-Kutta method solution does
+ stay within sane bounds.
+ """)
+