diff --git a/Week 3/7 Linear Equation Systems.ipynb b/Week 3/7 Linear Equation Systems.ipynb index d2daf7b..0ef5a4f 100644 --- a/Week 3/7 Linear Equation Systems.ipynb +++ b/Week 3/7 Linear Equation Systems.ipynb @@ -574,7 +574,8 @@ " assert len(A.shape) == 2 and A.shape[0] == A.shape[1]\n", " \n", " n = len(A)\n", - " x_cur = np.zeros(n)\n", + " \n", + " x_cur = np.zeros(n)\n", " x_prev = np.zeros(n)\n", " \n", " k = 0\n", @@ -583,7 +584,7 @@ " x_prev = x_cur.copy()\n", " \n", " v = b - A @ x_prev\n", - " t = np.dot(v,v)/np.dot(v, A @ v)\n", + " t = np.dot(v, v)/np.dot(v, A @ v)\n", " x_cur = x_prev.copy() + t*v\n", " \n", " return x_cur, k" @@ -705,7 +706,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": { "deletable": false, "nbgrader": { @@ -722,14 +723,41 @@ }, "outputs": [], "source": [ - "def CG(A, b, eps):\n", - " # YOUR CODE HERE\n", - " raise NotImplementedError()" + "def CG(A, b, eps, k_max = 10000):\n", + " \"\"\"\n", + " Return the Conjugate Gradient algorithm estimate solution x to the problem\n", + " Ax = b and the number of iterations k it took to decrease maximum\n", + " norm error below eps or to exceed iteration maximum k_max.\n", + " \"\"\"\n", + " \n", + " # Assert n by n matrix.\n", + " assert len(A.shape) == 2 and A.shape[0] == A.shape[1]\n", + " \n", + " n = len(A)\n", + " \n", + " x_cur = np.zeros(n)\n", + " x_prev = x_cur.copy()\n", + " r_cur = b - A @ x_cur\n", + " v = r_cur\n", + " \n", + " k = 0\n", + " while diff(x_cur, x_prev) > eps and k < k_max or k == 0:\n", + " k += 1\n", + " x_prev = x_cur.copy()\n", + " r_prev = r_cur\n", + " \n", + " t = np.dot(r_prev, r_prev)/np.dot(v, A @ v)\n", + " x_cur = x_prev + t*v\n", + " r_cur = r_prev - t*A @ v\n", + " s = np.dot(r_cur, r_cur)/np.dot(r_prev, r_prev)\n", + " v = r_cur + s*v\n", + " \n", + " return x_cur, k" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": { "deletable": false, "nbgrader": { @@ -744,11 +772,56 @@ "task": false } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Starting with A =\n", + "[[10 -1 2 0]\n", + " [-1 11 -1 3]\n", + " [ 2 -1 10 -1]\n", + " [ 0 3 -1 8]]\n", + "and b = [ 6 25 -11 15]\n", + "We apply the Conjugate Gradient algorithm to solve Ax = b.\n", + "\n", + "For eps = 1e-01\tafter k = 4\t iterations:\n", + "x =\t\t\t [ 1. 2. -1. 1.]\n", + "Ax =\t\t\t [ 6. 25. -11. 15.]\n", + "diff(Ax, b) =\t\t 1.7763568394002505e-15\n", + "diff(x, x_exact) =\t 2.220446049250313e-16\n", + "\n", + "For eps = 1e-02\tafter k = 5\t iterations:\n", + "x =\t\t\t [ 1. 2. -1. 1.]\n", + "Ax =\t\t\t [ 6. 25. -11. 15.]\n", + "diff(Ax, b) =\t\t 1.7763568394002505e-15\n", + "diff(x, x_exact) =\t 2.220446049250313e-16\n", + "\n", + "For eps = 1e-03\tafter k = 5\t iterations:\n", + "x =\t\t\t [ 1. 2. -1. 1.]\n", + "Ax =\t\t\t [ 6. 25. -11. 15.]\n", + "diff(Ax, b) =\t\t 1.7763568394002505e-15\n", + "diff(x, x_exact) =\t 2.220446049250313e-16\n", + "\n", + "For eps = 1e-04\tafter k = 5\t iterations:\n", + "x =\t\t\t [ 1. 2. -1. 1.]\n", + "Ax =\t\t\t [ 6. 25. -11. 15.]\n", + "diff(Ax, b) =\t\t 1.7763568394002505e-15\n", + "diff(x, x_exact) =\t 2.220446049250313e-16\n", + "\n" + ] + } + ], "source": [ "def test_CG():\n", - " # YOUR CODE HERE\n", - " raise NotImplementedError()\n", + " \"\"\"\n", + " Check that CG returns solutions for the example system Ax = b\n", + " within the error defined by the same eps as used for the iteration.\n", + " \"\"\"\n", + " \n", + " # TODO: Do we need to plot the error over epsilon like in task 4?\n", + " \n", + " return test_alg(CG, \"Conjugate Gradient\")\n", " \n", "test_CG()" ]