diff --git a/Week 4/8 Eigenvalues and Eigenvectors.ipynb b/Week 4/8 Eigenvalues and Eigenvectors.ipynb new file mode 100644 index 0000000..c3a2616 --- /dev/null +++ b/Week 4/8 Eigenvalues and Eigenvectors.ipynb @@ -0,0 +1,645 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "4ec40081b048ce2f34f3f4fedbb0be10", + "grade": false, + "grade_id": "cell-98f724ece1aacb67", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "# CDS: Numerical Methods Assignments\n", + "\n", + "- See lecture notes and documentation on Brightspace for Python and Jupyter basics. If you are stuck, try to google or get in touch via Discord.\n", + "\n", + "- Solutions must be submitted via the Jupyter Hub.\n", + "\n", + "- Make sure you fill in any place that says `YOUR CODE HERE` or \"YOUR ANSWER HERE\".\n", + "\n", + "## Submission\n", + "\n", + "1. Name all team members in the the cell below\n", + "2. make sure everything runs as expected\n", + "3. **restart the kernel** (in the menubar, select Kernel$\\rightarrow$Restart)\n", + "4. **run all cells** (in the menubar, select Cell$\\rightarrow$Run All)\n", + "5. Check all outputs (Out[\\*]) for errors and **resolve them if necessary**\n", + "6. submit your solutions **in time (before the deadline)**" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "team_members = \"Kees van Kempen, Koen Vendrig\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "26a1ebc121c0cc223b0079b3fcb1d606", + "grade": false, + "grade_id": "cell-79ad6e3d4ad2eb15", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "## Eigenvalues and Eigenvectors\n", + "\n", + "In the following you will implement your own eigenvalue / eigenvector calculation routines based on the inverse power method and the iterated QR decomposition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "5727ddddb338b9fd153d42af584e4e92", + "grade": true, + "grade_id": "cell-c6c3a2556ae5174d", + "locked": false, + "points": 0, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import math as m" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "1bcdeef3b38d48f61ea06b7c63fed895", + "grade": false, + "grade_id": "cell-ed71eb4882ee221a", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "### Task 1\n", + "We start by implementing the inverse power method, which calculates the eigenvector corresponding to an eigenvalue which is closest to a given parameter $\\sigma$. In detail, you should implement a Python function $\\text{inversePower(A, sigma, eps)}$, which takes as input the $n \\times n$ square matrix $A$, the parameter $\\sigma$, as well as some accuracy $\\varepsilon$. It should return the eigenvector $\\mathbf{v}$ (corresponding to the eigenvalue which is closest to $\\sigma$) and the number of needed iteration steps.\n", + "\t\n", + "To do so, implement the following algorithm. Start by setting up the needed input:\n", + "\n", + "\\begin{align}\n", + " B &= \\left( A - \\sigma \\mathbf{1} \\right)^{-1} \\\\\n", + " \\mathbf{b}^{(0)} &= (1,1,1,...)\n", + "\\end{align}\n", + "\n", + "where $\\mathbf{b}^{(0)}$ is a vector with $n$ entries. Then repeat the following and increase $k$ each iteration until the error $e$ is smaller than $\\varepsilon$:\n", + "\n", + "\\begin{align}\n", + " \\mathbf{b}^{(k)} &= B \\cdot \\mathbf{b}^{(k-1)} \\\\\n", + " \\mathbf{b}^{(k)} &= \\frac{\\mathbf{b}^{(k)}}{|\\mathbf{b}^{(k)}|} \\\\\n", + " e &= \\sqrt{ \\sum_{i=0}^n \\left(|b_i^{(k-1)}| - |b_i^{(k)}|\\right)^2 }\n", + "\\end{align}\n", + "\n", + "Return the last vector $\\mathbf{b}^{(k)}$ and the number of needed iterations $k$. \n", + "\n", + "Verify your implementation by calculating all the eigenvectors of the matrix below and comparing them to the ones calculated by $\\text{numpy.linalg.eig()}$. Then implement a unit test for your function.\n", + "\n", + "\\begin{align*}\n", + " A = \\begin{pmatrix}\n", + " 3 & 2 & 1\\\\ \n", + " 2 & 3 & 2\\\\\n", + " 1 & 2 & 3\n", + " \\end{pmatrix}.\n", + "\\end{align*}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "fbd7848114b10e527cfdadae70432b75", + "grade": true, + "grade_id": "cell-876c5ab872a98046", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "def inversePower(A, sigma, eps):\n", + " \"\"\"\n", + " Estimates the eigenvectors of matrix A by the inverse power method.\n", + "\n", + " Args:\n", + " A: an n x n matrix\n", + " sigma: an initial guess for an eigenvector\n", + " eps: the desired accuracy\n", + "\n", + " Returns:\n", + " A tuple (b, k) is returned, containing:\n", + " b: the eigenvector b corresponding to the eigenvalue\n", + " closests to sigma after k iterations\n", + " k: the number of iterations done\n", + " \n", + " See also:\n", + " https://www.sciencedirect.com/topics/mathematics/inverse-power-method\n", + " \"\"\"\n", + " \n", + " # Does https://johnfoster.pge.utexas.edu/numerical-methods-book/LinearAlgebra_EigenProblem1.html help?\n", + " \n", + " # A should be n x n.\n", + " n = len(A)\n", + " assert len(A.shape) == 2 and A.shape[0] == A.shape[1]\n", + " \n", + " # Setup some initial values.\n", + " #B = np.linalg.inv(A - sigma*np.ones(n))\n", + " B = np.linalg.inv(A - sigma*np.eye(n))\n", + " #B = 1/(A - sigma*np.ones(n))\n", + " #B = 1/(A - sigma*np.eye(n))\n", + " b = np.ones(n)\n", + " k = 0\n", + " e = 0\n", + " \n", + " while e > eps or k == 0:\n", + " b_prev = b.copy()\n", + " k += 1\n", + " \n", + " b = B @ b\n", + " b /= np.sqrt(b @ b) # although b = np.linalg.norm(b) could be used\n", + " e = np.sqrt(np.sum( (np.abs(b_prev) - np.abs(b))**2) )\n", + " \n", + " return b, k" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "d12ec4311abc0ec7ad2f4e7c12b6a247", + "grade": true, + "grade_id": "cell-e12bd2509520938e", + "locked": false, + "points": 0, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "# Use this cell for your own testing ...\n", + "\n", + "A = np.array([[3, 2, 1], [2, 3, 2], [1, 2, 3]])\n", + "\n", + "sigma_list = [.03, 6., 1.99999989999999]\n", + "#sigma = 0.03\n", + "\n", + "for sigma in sigma_list:\n", + " print(\"For sigma =\", sigma)\n", + " b, k = inversePower(A, sigma, 1e-6)\n", + " print(\"b =\", b)\n", + " lam = np.dot(A, b) / b\n", + " print(\"lam =\", lam)\n", + " print()\n", + " print()\n", + "\n", + "print(np.linalg.eig(A)[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "4848b41fcb6770679285941320a2de3b", + "grade": true, + "grade_id": "cell-f458054aeff75cc9", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "def test_inversePower():\n", + " A = np.array([[3, 2, 1], [2, 3, 2], [1, 2, 3]])\n", + " \n", + "test_inversePower()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "4c2230bb7698096ad982513c84b1b013", + "grade": false, + "grade_id": "cell-d25e9b53cd92f821", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "### Task 2 \n", + "\n", + "Next you will need to implement the tri-diagonalization scheme following Householder. To this end implement a Python function $\\text{tridiagonalize(A)}$ which takes a symmetric matrix $A$ as input and returns a tridiagonal matrix $T$ of the same dimension. Therefore, your algorithm should execute the following steps:\n", + "\t\n", + "First use an assertion to make sure $A$ is symmetric. Then let $k$ run from $0$ to $n-1$ and repeat the following:\n", + "\n", + "\\begin{align}\n", + " q &= \\sqrt{ \\sum_{j=k+1}^n \\left(A_{j,k}\\right)^2 } \\\\\n", + " \\alpha &= -\\operatorname{sgn}(A_{k+1,k}) \\cdot q \\\\\n", + " r &= \\sqrt{ \\frac{ \\alpha^2 - A_{k+1,k} \\cdot \\alpha }{2} } \\\\\n", + " \\mathbf{v} &= \\mathbf{0} \\qquad \\text{... vector of dimension } n \\\\\n", + " v_{k+1} &= \\frac{A_{k+1,k} - \\alpha}{2r} \\\\\n", + " v_{k+j} &= \\frac{A_{k+j,k}}{2r} \\quad \\text{for } j=2,3,\\dots,n \\\\\n", + " P &= \\mathbf{1} - 2 \\mathbf{v}\\mathbf{v}^T \\\\\n", + " A &= P \\cdot A \\cdot P\n", + "\\end{align}\n", + "\n", + "At the end return $A$ as $T$.\n", + "\n", + "Apply your routine to the matrix $A$ defined in task 1 as well as to a few random (but symmetric) matrices of different dimension $n$.\n", + "\n", + "Hint: Use $\\text{np.outer()}$ to calculate the *matrix* $\\mathbf{v}\\mathbf{v}^T$ needed in the definition of the Housholder transformation matrix $P$. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "5d057c8a5c00f59b856d55de8b2e5923", + "grade": true, + "grade_id": "cell-0cbf043c8cbc0601", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "def tridiagonalize(A):\n", + " \"\"\"\n", + " Tridiagonalizes a given matrix following the Householder method.\n", + " Args:\n", + " A: NxN matrix\n", + " Returns:\n", + " The matrix A, but now tridiagonalized\n", + " \"\"\"\n", + " assert A.shape[0] == A.shape[1] and len(A.shape) == 2\n", + " n = len(A)\n", + " for k in range(n-1):\n", + " q = m.sqrt(np.sum(A[k+1:n,k]**2))\n", + " alpha = -1*np.sign(A[k+1,k])*q\n", + " r = m.sqrt((alpha**2 - A[k+1,k]*alpha)/2)\n", + " v = np.zeros(n)\n", + " v[k+1] = (A[k+1,k]-alpha)/(2*r)\n", + " print(np.shape(v),np.shape(A),np.shape(r),np.shape(A[k+2:n]/(2*r)))\n", + " v[k+2:n] = A[k+2:n,k]/(2*r)\n", + " P = np.identity(n)-(2*np.outer(v,v))\n", + " A = np.dot(np.dot(P,A),P)\n", + " return A" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "3eab15fa13ffbb2cd9383870f305e206", + "grade": true, + "grade_id": "cell-460dfaeef80dd101", + "locked": false, + "points": 0, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "B = np.array([[3,2,1],[2,3,2],[1,2,3]])\n", + "\n", + "tridiagonalize(B)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "b847d4c4fa2311288742cf0badc5a053", + "grade": false, + "grade_id": "cell-e1a2cdfe8e8c0bd6", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "### Task 3\n", + "\n", + "Implement the $QR$ decomposition based diagonalization routine for tri-diagonal matrices $T$ in Python as a function $\\text{QREig(T, eps)}$. It should take a tri-diagonal matrix $T$ and some accuracy $\\varepsilon$ as inputs and should return all eigenvalues in the form of a vector $\\mathbf{d}$. By making use of the $QR$ decomposition as implemented in Numpy's $\\text{numpy.linalg.qr()}$ the algorithm is very simple and reads:\n", + "\n", + "Repeat the following until the error $e$ is smaller than $\\varepsilon$:\n", + "\n", + "\\begin{align}\n", + " Q \\cdot R &= T^{(k)} \\qquad \\text{... do this decomposition with the help of Numpy!} \\\\\n", + " T^{(k+1)} &= R \\cdot Q \\\\\n", + " e &= |\\mathbf{d_1}| \n", + "\\end{align}\n", + "\n", + "where $T^{(0)} = T$ and $\\mathbf{d_1}$ is the first sub-diagonal of $T^{(k+1)}$ at each iteration step $k$. At the end return the main-diagonal of $T^{(k+1)}$ as $\\mathbf{d}$. \n", + "\n", + "Implement a unit test for your function based on the matrix $A$ defined in task 1. You will need to tri-diagonalize it first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "bb2814035b4f1e593517cb2a54411b38", + "grade": true, + "grade_id": "cell-17fe57c93a9ad465", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "def QREig(T, eps):\n", + " \"\"\"\n", + " Follows the method of the QR decomposition based diagonalization routine for tridiagonalized matrices. The matrix T is\n", + " diagonalized, resulting in all diagonal elements being an eigenvalue.\n", + " Args:\n", + " T: a already tridiagonalized matrix\n", + " eps: the desired accuracy\n", + " Returns:\n", + " A one dimensional array with the eigenvalues of the matrix T\n", + " \"\"\"\n", + " e = eps + 1\n", + " while e > eps:\n", + " Q,R = np.linalg.qr(T)\n", + " T = np.matmul(R,Q)\n", + " e = np.sum(np.absolute(np.diag(T,k=1)))\n", + " #print(np.diag(T))\n", + " return np.diag(T)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "a2ce6f6cdaa5b7e83b5bdb2fe3b12370", + "grade": true, + "grade_id": "cell-19e1c89cfea6b4ca", + "locked": false, + "points": 0, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "A_tridiag = tridiagonalize(A)\n", + "print(A_tridiag)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "0bc7766605e0934f89b9df038f82dc51", + "grade": true, + "grade_id": "cell-e27978374106d1a4", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "def test_QREig():\n", + " \"\"\"\n", + " Tests the QREig function for the matrix A_tridiag (defined in task 2) and eps = 0.001\n", + " \"\"\"\n", + " eps = 0.001\n", + " x = QREig(A_tridiag,eps)\n", + " print(x)\n", + "\n", + "test_QREig()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "f1725d3ecdd5358e4129b9006a4b8bc1", + "grade": false, + "grade_id": "cell-c530435b67593096", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "### Task 4\n", + "\n", + "With the help of $\\text{QREig(T, eps)}$ you can now calculate all eigenvalues. Then you can calculate all corresponding eigenvectors with the help of $\\text{inversePower(A, sigma, eps)}$, by setting $\\sigma$ to approximately the eigenvalues you found (you should add some small random noise to $\\sigma$ in order to avoid singularity issues in the inversion needed for the inverse power method). \n", + "\n", + "Apply this combination of functions to calculate all eigenvalues and eigenvectors of the matrix $A$ defined in task 1." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "8792c616fbe2b5159caed5e873050243", + "grade": true, + "grade_id": "cell-bb8315bb7895761f", + "locked": false, + "points": 3, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "Eigenvalues and -vectors calculated numerically for matrix A using functions from previous tasks.\n", + "\"\"\"\n", + "eps = 0.001\n", + "T = QREig(A_tridiag,eps)\n", + "for i in range(len(T)):\n", + " sigma = T[i] + np.random.normal(0,0.01,1)\n", + " x = inversePower(A_tridiag,sigma,eps)\n", + " print(x)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "editable": false, + "nbgrader": { + "cell_type": "markdown", + "checksum": "1ea6ea8324c01aee6a8e3225cbcd98a5", + "grade": false, + "grade_id": "cell-a1926d9a9975679b", + "locked": true, + "schema_version": 3, + "solution": false, + "task": false + } + }, + "source": [ + "### Task 5\n", + "\n", + "Test your eigenvalue / eigenvector algorithm for larger random (but symmetric) matrices." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "deletable": false, + "nbgrader": { + "cell_type": "code", + "checksum": "42126fc8bf851a1b021f8ecf7cb2bc6f", + "grade": true, + "grade_id": "cell-9be1ee8fbf7ec7a8", + "locked": false, + "points": 2, + "schema_version": 3, + "solution": true, + "task": false + } + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "Completely 'random' and totally not self-made matrices C and D are being tridiagonalized and its eigenvalues and -vectors are\n", + "calculated.\n", + "\"\"\"\n", + "C = np.array([[5,4,3,2,1]\n", + " ,[3,4,5,1,2]\n", + " ,[5,1,4,2,3]\n", + " ,[3,2,1,5,1]\n", + " ,[5,4,3,2,1]])\n", + "#print(C)\n", + "C_tridiag = tridiagonalize(C)\n", + "T = QREig(C_tridiag,eps)\n", + "for i in range(len(T)):\n", + " sigma = T[i] + np.random.normal(0,0.01,1)\n", + " x = inversePower(C_tridiag,sigma,eps)\n", + " print(x)\n", + "\n", + "D = np.array([[6,4,3,2,1]\n", + " ,[3,3,5,1,2]\n", + " ,[5,1,4,2,2]\n", + " ,[3,1,1,5,6]\n", + " ,[1,4,7,2,3]])\n", + "#print(C)\n", + "C_tridiag = tridiagonalize(C)\n", + "T = QREig(C_tridiag,eps)\n", + "for i in range(len(T)):\n", + " sigma = T[i] + np.random.normal(0,0.01,1)\n", + " x = inversePower(C_tridiag,sigma,eps)\n", + " print(x)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Week 4/feedback/2022-03-02 22:52:01.507485 UTC/8 Eigenvalues and Eigenvectors.html b/Week 4/feedback/2022-03-02 22:52:01.507485 UTC/8 Eigenvalues and Eigenvectors.html new file mode 100644 index 0000000..f918d85 --- /dev/null +++ b/Week 4/feedback/2022-03-02 22:52:01.507485 UTC/8 Eigenvalues and Eigenvectors.html @@ -0,0 +1,13915 @@ + + +
+ + +See lecture notes and documentation on Brightspace for Python and Jupyter basics. If you are stuck, try to google or get in touch via Discord.
+Solutions must be submitted via the Jupyter Hub.
+Make sure you fill in any place that says YOUR CODE HERE or "YOUR ANSWER HERE".
In the following you will implement your own eigenvalue / eigenvector calculation routines based on the inverse power method and the iterated QR decomposition.
+ +import numpy as np
+import math as m
+We start by implementing the inverse power method, which calculates the eigenvector corresponding to an eigenvalue which is closest to a given parameter $\sigma$. In detail, you should implement a Python function $\text{inversePower(A, sigma, eps)}$, which takes as input the $n \times n$ square matrix $A$, the parameter $\sigma$, as well as some accuracy $\varepsilon$. It should return the eigenvector $\mathbf{v}$ (corresponding to the eigenvalue which is closest to $\sigma$) and the number of needed iteration steps.
+To do so, implement the following algorithm. Start by setting up the needed input:
+\begin{align} + B &= \left( A - \sigma \mathbf{1} \right)^{-1} \\ + \mathbf{b}^{(0)} &= (1,1,1,...) +\end{align}where $\mathbf{b}^{(0)}$ is a vector with $n$ entries. Then repeat the following and increase $k$ each iteration until the error $e$ is smaller than $\varepsilon$:
+\begin{align} + \mathbf{b}^{(k)} &= B \cdot \mathbf{b}^{(k-1)} \\ + \mathbf{b}^{(k)} &= \frac{\mathbf{b}^{(k)}}{|\mathbf{b}^{(k)}|} \\ + e &= \sqrt{ \sum_{i=0}^n \left(|b_i^{(k-1)}| - |b_i^{(k)}|\right)^2 } +\end{align}Return the last vector $\mathbf{b}^{(k)}$ and the number of needed iterations $k$.
+Verify your implementation by calculating all the eigenvectors of the matrix below and comparing them to the ones calculated by $\text{numpy.linalg.eig()}$. Then implement a unit test for your function.
+\begin{align*} + A = \begin{pmatrix} + 3 & 2 & 1\\ + 2 & 3 & 2\\ + 1 & 2 & 3 + \end{pmatrix}. +\end{align*} +def inversePower(A, sigma, eps):
+ """
+ Estimates the eigenvectors of matrix A by the inverse power method.
+
+ Args:
+ A: an n x n matrix
+ sigma: an initial guess for an eigenvector
+ eps: the desired accuracy
+
+ Returns:
+ A tuple (b, k) is returned, containing:
+ b: the eigenvector b corresponding to the eigenvalue
+ closests to sigma after k iterations
+ k: the number of iterations done
+
+ See also:
+ https://www.sciencedirect.com/topics/mathematics/inverse-power-method
+ """
+
+ # Does https://johnfoster.pge.utexas.edu/numerical-methods-book/LinearAlgebra_EigenProblem1.html help?
+
+ # A should be n x n.
+ n = len(A)
+ assert len(A.shape) == 2 and A.shape[0] == A.shape[1]
+
+ # Setup some initial values.
+ #B = np.linalg.inv(A - sigma*np.ones(n))
+ B = np.linalg.inv(A - sigma*np.eye(n))
+ #B = 1/(A - sigma*np.ones(n))
+ #B = 1/(A - sigma*np.eye(n))
+ b = np.ones(n)
+ k = 0
+ e = 0
+
+ while e > eps or k == 0:
+ b_prev = b.copy()
+ k += 1
+
+ b = B @ b
+ b /= np.sqrt(b @ b) # although b = np.linalg.norm(b) could be used
+ e = np.sqrt(np.sum( (np.abs(b_prev) - np.abs(b))**2) )
+
+ return b, k
+# Use this cell for your own testing ...
+
+A = np.array([[3, 2, 1], [2, 3, 2], [1, 2, 3]])
+
+sigma_list = [.03, 6., 1.99999989999999]
+#sigma = 0.03
+
+for sigma in sigma_list:
+ print("For sigma =", sigma)
+ b, k = inversePower(A, sigma, 1e-6)
+ print("b =", b)
+ lam = np.dot(A, b) / b
+ print("lam =", lam)
+ print()
+ print()
+
+print(np.linalg.eig(A)[0])
+def test_inversePower():
+ A = np.array([[3, 2, 1], [2, 3, 2], [1, 2, 3]])
+
+test_inversePower()
+Next you will need to implement the tri-diagonalization scheme following Householder. To this end implement a Python function $\text{tridiagonalize(A)}$ which takes a symmetric matrix $A$ as input and returns a tridiagonal matrix $T$ of the same dimension. Therefore, your algorithm should execute the following steps:
+First use an assertion to make sure $A$ is symmetric. Then let $k$ run from $0$ to $n-1$ and repeat the following:
+\begin{align} + q &= \sqrt{ \sum_{j=k+1}^n \left(A_{j,k}\right)^2 } \\ + \alpha &= -\operatorname{sgn}(A_{k+1,k}) \cdot q \\ + r &= \sqrt{ \frac{ \alpha^2 - A_{k+1,k} \cdot \alpha }{2} } \\ + \mathbf{v} &= \mathbf{0} \qquad \text{... vector of dimension } n \\ + v_{k+1} &= \frac{A_{k+1,k} - \alpha}{2r} \\ + v_{k+j} &= \frac{A_{k+j,k}}{2r} \quad \text{for } j=2,3,\dots,n \\ + P &= \mathbf{1} - 2 \mathbf{v}\mathbf{v}^T \\ + A &= P \cdot A \cdot P +\end{align}At the end return $A$ as $T$.
+Apply your routine to the matrix $A$ defined in task 1 as well as to a few random (but symmetric) matrices of different dimension $n$.
+Hint: Use $\text{np.outer()}$ to calculate the matrix $\mathbf{v}\mathbf{v}^T$ needed in the definition of the Housholder transformation matrix $P$.
+ +def tridiagonalize(A):
+ """
+ Tridiagonalizes a given matrix following the Householder method.
+ Args:
+ A: NxN matrix
+ Returns:
+ The matrix A, but now tridiagonalized
+ """
+ assert A.shape[0] == A.shape[1] and len(A.shape) == 2
+ n = len(A)
+ for k in range(n-1):
+ q = m.sqrt(np.sum(A[k+1:n,k]**2))
+ alpha = -1*np.sign(A[k+1,k])*q
+ r = m.sqrt((alpha**2 - A[k+1,k]*alpha)/2)
+ v = np.zeros(n)
+ v[k+1] = (A[k+1,k]-alpha)/(2*r)
+ print(np.shape(v),np.shape(A),np.shape(r),np.shape(A[k+2:n]/(2*r)))
+ v[k+2:n] = A[k+2:n,k]/(2*r)
+ P = np.identity(n)-(2*np.outer(v,v))
+ A = np.dot(np.dot(P,A),P)
+ return A
+B = np.array([[3,2,1],[2,3,2],[1,2,3]])
+
+tridiagonalize(B)
+Implement the $QR$ decomposition based diagonalization routine for tri-diagonal matrices $T$ in Python as a function $\text{QREig(T, eps)}$. It should take a tri-diagonal matrix $T$ and some accuracy $\varepsilon$ as inputs and should return all eigenvalues in the form of a vector $\mathbf{d}$. By making use of the $QR$ decomposition as implemented in Numpy's $\text{numpy.linalg.qr()}$ the algorithm is very simple and reads:
+Repeat the following until the error $e$ is smaller than $\varepsilon$:
+\begin{align} + Q \cdot R &= T^{(k)} \qquad \text{... do this decomposition with the help of Numpy!} \\ + T^{(k+1)} &= R \cdot Q \\ + e &= |\mathbf{d_1}| +\end{align}where $T^{(0)} = T$ and $\mathbf{d_1}$ is the first sub-diagonal of $T^{(k+1)}$ at each iteration step $k$. At the end return the main-diagonal of $T^{(k+1)}$ as $\mathbf{d}$.
+Implement a unit test for your function based on the matrix $A$ defined in task 1. You will need to tri-diagonalize it first.
+ +def QREig(T, eps):
+ """
+ Follows the method of the QR decomposition based diagonalization routine for tridiagonalized matrices. The matrix T is
+ diagonalized, resulting in all diagonal elements being an eigenvalue.
+ Args:
+ T: a already tridiagonalized matrix
+ eps: the desired accuracy
+ Returns:
+ A one dimensional array with the eigenvalues of the matrix T
+ """
+ e = eps + 1
+ while e > eps:
+ Q,R = np.linalg.qr(T)
+ T = np.matmul(R,Q)
+ e = np.sum(np.absolute(np.diag(T,k=1)))
+ #print(np.diag(T))
+ return np.diag(T)
+A_tridiag = tridiagonalize(A)
+print(A_tridiag)
+def test_QREig():
+ """
+ Tests the QREig function for the matrix A_tridiag (defined in task 2) and eps = 0.001
+ """
+ eps = 0.001
+ x = QREig(A_tridiag,eps)
+ print(x)
+
+test_QREig()
+With the help of $\text{QREig(T, eps)}$ you can now calculate all eigenvalues. Then you can calculate all corresponding eigenvectors with the help of $\text{inversePower(A, sigma, eps)}$, by setting $\sigma$ to approximately the eigenvalues you found (you should add some small random noise to $\sigma$ in order to avoid singularity issues in the inversion needed for the inverse power method).
+Apply this combination of functions to calculate all eigenvalues and eigenvectors of the matrix $A$ defined in task 1.
+ +"""
+Eigenvalues and -vectors calculated numerically for matrix A using functions from previous tasks.
+"""
+eps = 0.001
+T = QREig(A_tridiag,eps)
+for i in range(len(T)):
+ sigma = T[i] + np.random.normal(0,0.01,1)
+ x = inversePower(A_tridiag,sigma,eps)
+ print(x)
+Test your eigenvalue / eigenvector algorithm for larger random (but symmetric) matrices.
+ +"""
+Completely 'random' and totally not self-made matrices C and D are being tridiagonalized and its eigenvalues and -vectors are
+calculated.
+"""
+C = np.array([[5,4,3,2,1]
+ ,[3,4,5,1,2]
+ ,[5,1,4,2,3]
+ ,[3,2,1,5,1]
+ ,[5,4,3,2,1]])
+#print(C)
+C_tridiag = tridiagonalize(C)
+T = QREig(C_tridiag,eps)
+for i in range(len(T)):
+ sigma = T[i] + np.random.normal(0,0.01,1)
+ x = inversePower(C_tridiag,sigma,eps)
+ print(x)
+
+D = np.array([[6,4,3,2,1]
+ ,[3,3,5,1,2]
+ ,[5,1,4,2,2]
+ ,[3,1,1,5,6]
+ ,[1,4,7,2,3]])
+#print(C)
+C_tridiag = tridiagonalize(C)
+T = QREig(C_tridiag,eps)
+for i in range(len(T)):
+ sigma = T[i] + np.random.normal(0,0.01,1)
+ x = inversePower(C_tridiag,sigma,eps)
+ print(x)
+