Merge branch 'KW23'
This commit is contained in:
commit
0fd8914a46
1 changed files with 274 additions and 23 deletions
297
LinAlg2.tex
297
LinAlg2.tex
|
@ -3714,12 +3714,14 @@ $ \implies q(\tilde x_1, \tilde x_2) = \lambda_1 \tilde x_1^2 + \lambda_2 \tilde
|
|||
\begin{lemma}
|
||||
|
||||
Sei $\operatorname{char}(\K) \neq 2$. Dann entsprechen die quadratischen Formen und symmetrischen
|
||||
Bilinearformen eineindeutig.
|
||||
Bilinearformen einander eineindeutig.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
$\rho$ quadratische Form $\implies \sigma(v, w) = \rho(u + v) - \rho(u) - \rho(v)$ ist symmetrische
|
||||
Bilinearform. \\
|
||||
Sei $\sigma$ symmetrische Bilinearform, $\rho(v) := \frac 12 \sigma(v, v)$.
|
||||
Sei umgekehrt $\sigma$ symmetrische Bilinearform,
|
||||
$\rho(v) := \underset{\mathclap{\substack{\rotatebox{90}{$\to$}\\\operatorname{char}(\K) \neq 2}}}
|
||||
{\frac 12} \sigma(v, v)$.
|
||||
\begin{align*}
|
||||
\rho(\lambda v) = \frac 12 \sigma(\lambda v, \lambda v) & = \lambda^2 \frac 12 \sigma(v, v) =
|
||||
\lambda^2 \rho(v) \implies \text{a)} \\
|
||||
|
@ -3733,7 +3735,8 @@ $ \implies q(\tilde x_1, \tilde x_2) = \lambda_1 \tilde x_1^2 + \lambda_2 \tilde
|
|||
\end{proof}
|
||||
|
||||
\begin{defin}
|
||||
$V \C$-VR. $\rho: V \to \R$ heißt \underline{hermitesche Form} wenn $\forall u, v \in V, \lambda \in \C$:
|
||||
\label{theo:3.5.6}
|
||||
Sei $V\, \C$-VR. $\rho: V \to \R$ heißt \underline{hermitesche Form} wenn $\forall u, v \in V, \lambda \in \C$:
|
||||
\begin{enumerate}[label=\alph*)]
|
||||
\item $\rho(\lambda v) = \abs{\lambda}^2 \rho(v)$
|
||||
\item $\rho(u+v) + \rho(u -v) = 2(\rho(u) + \rho(v))$
|
||||
|
@ -3743,15 +3746,36 @@ $ \implies q(\tilde x_1, \tilde x_2) = \lambda_1 \tilde x_1^2 + \lambda_2 \tilde
|
|||
\end{defin}
|
||||
|
||||
\begin{lemma}
|
||||
|
||||
Hermitesche Formen und hermitesche Sesquilinearformen entsprechen einander eineindeutig
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
\begin{itemize}
|
||||
\item $\rho$ hermitesche Form, $\sigma$ wie oben in c) $\implies \sigma$ hermitesche Sesquilinearform.
|
||||
\item $\sigma$ hermitesche Sesquilinearform, $\rho(v) := \frac 12 \sigma(v, v) \overset{\text{\tl UE\br}}
|
||||
\implies \rho$ ist hermitesche Form.
|
||||
\end{itemize}
|
||||
Für hermitesche Form ist durch Definition \ref{theo:3.5.6} c) eine hermitesche Sesquilinearform definiert. \\
|
||||
Sei umgekehrt $\sigma$ hermitesche Sesquilinearform. Dann ist $\rho(v) := \frac12 \sigma(v, v)$ hermitesche
|
||||
Form:
|
||||
\begin{enumerate}[label=\alph*)]
|
||||
\item \checkmark
|
||||
\item \begin{align*}
|
||||
\rho(u+v) + \rho(u - v) &= \sigma(u+v, u+v) + \sigma(u-v, u-v) \\
|
||||
&\begin{multlined}= \sigma(u, u) + \sigma(v, v) + \sigma(u, v) + \sigma(v, u)
|
||||
+ \sigma(u, u)\\ + \sigma(v, v) - \sigma(u, v) - \sigma(v, u)
|
||||
\end{multlined} \\
|
||||
&= 2\sigma(u, u) + 2\sigma(v, v) \\
|
||||
&= 2(\rho(u) + \rho(v))
|
||||
\end {align*}
|
||||
\item \begin{align*}
|
||||
\frac12 (\rho(u+v) + i\rho(u+iv)
|
||||
& - (1+i)(\rho(u)+\rho(v))) = \\
|
||||
& \begin{multlined}
|
||||
= \sigma(u+v, u+v) + i \sigma(u+iv,u+iv) \\- \sigma(u, u) - \sigma(v, v) - i\sigma(u, u) -
|
||||
i \sigma(v, v) \end{multlined} \\
|
||||
& = \sigma(u, v) + \sigma(v, u) + i \sigma(iv, u) + i \sigma(u, iv) \\
|
||||
& = \sigma(u, v) + \overline{\sigma(u, v)} + i \overline{\sigma(u, iv)} + \sigma(u, v) \\
|
||||
& = \sigma(u, v) + \overline{\sigma(u, v)} + i \cdot \overline{\overline{i}} \cdot
|
||||
\overline{\sigma(u, v)}
|
||||
+ \sigma(u, v) \\
|
||||
& = 2 \sigma(u, v)
|
||||
\end{align*}
|
||||
\end{enumerate}
|
||||
\end{proof}
|
||||
|
||||
\subsubsection{Bemerkung}
|
||||
|
@ -3759,17 +3783,20 @@ $\sigma$ heißt Polarform von $\rho$
|
|||
|
||||
\section[Die Singulärwertzerlegung und die Pseudoinverse]{Die Singulärwertzerlegung und die \\Pseudoinverse}
|
||||
|
||||
Wollen Normalform für $\alpha \in \homk(V, W)$ mit $V, W$ euklidisch/unitär herleiten.
|
||||
Polarzerlegung: $W = V \implies \exists$ ONBs $B, B'$ von $V$ mit
|
||||
Wir wollen nun für zwei euklidische Vektorräume $V, W$ eine geeignete Normalform bezüglich Orthonormalbasen
|
||||
herleiten. Polarzerlegung besagt für $\alpha \in \Hom(V, V)$, dass Orthonormalbasen $B, B'$ von $V$ existieren
|
||||
mit
|
||||
\[
|
||||
{}_B M(\alpha)_B = \begin{pmatrix}s_1 \\
|
||||
{}_B M(\alpha)_B = \begin{pmatrix}
|
||||
s_1 \\
|
||||
& \ddots \\
|
||||
& & s_r \\
|
||||
& & & 0 \\
|
||||
& & & & \ddots \\
|
||||
& & & & & 0\end{pmatrix}, s_1, \dots, s_n > 0
|
||||
& & & & & 0
|
||||
\end{pmatrix}, s_1, \dots, s_n > 0
|
||||
\]
|
||||
Das gilt für $V, W$ allgemein.
|
||||
Das heißt $\alpha$ lässt sich aus orthogonalen Endomorphismen und Skalierung zusammensetzen.
|
||||
|
||||
\begin{satz}
|
||||
[Singulärwertzerlegung]
|
||||
|
@ -3781,22 +3808,24 @@ Das gilt für $V, W$ allgemein.
|
|||
& & s_r \\
|
||||
& & & 0 \\
|
||||
& & & & \ddots \\
|
||||
& & & & & 0\end{pmatrix}}_{\K^{m \times m}} \underbrace{V}_{\K^{m \times n}}
|
||||
& & & & & 0\end{pmatrix}}_{\K^{m \times n}} \underbrace{V}_{\K^{n \times n}}
|
||||
\]
|
||||
$s_1, \dots, s_r$ heißen positive \underline{Singulärwerte} von $A$.
|
||||
$s_1, \dots, s_r$ heißen \underline{Singulärwerte} von $A$.
|
||||
\end{satz}
|
||||
\begin{proof}
|
||||
\begin{itemize}
|
||||
\item $A^* A \in \K^{\nxn}$ selbstadjungiert und positiv semi-definit. \\
|
||||
\item $A^* A \in \K^{\nxn}$ selbstadjungiert und positiv semi-definit.
|
||||
Eigenwerte $\lambda_1, \dots, \lambda_n \in [0, \infty)$, ONB $b_1, \dots, b_n$ aus Eigenvektoren.
|
||||
Sei $\lambda_1, \dots, \lambda_r \in (0, \infty), \lambda_{r+1} = \dots = \lambda_n = 0$
|
||||
$s_i := \sqrt{\lambda_i}, i\in [n]$
|
||||
\item $\overbrace{\frac 1{s_1} A b_1}^{b_1'}, \dots, \overbrace{\frac 1{s_r} A b_r}^{b_r'}$ ist
|
||||
Orthonormalsystem in $\K^m$.
|
||||
\[
|
||||
\overline{\inner{Ab_i}{Ab_j}}_{\K^m} = \overline{b_i}^T A^* A b_j = \lambda_j \overline{b_i}^T b_j
|
||||
= \lambda_j \overline{\inner{b_i}{b_j}}_{\K^n} = \lambda_j \delta_{ij} \in \R
|
||||
\]
|
||||
\item Es gilt, dass $\overbrace{\frac 1{s_1} A b_1}^{b_1'}, \dots, \overbrace{\frac 1{s_r} A b_r}^{b_r'}$
|
||||
Orthonormalsystem in $\K^m$ ist.
|
||||
\begin{align*}
|
||||
\overline{\inner{Ab_i}{Ab_j}}_{\K^m} & = \overline{b_i^T A^T \overline A \,\overline{b_j}}
|
||||
= \overline{b_i}^T A^* A b_j = \lambda_j \overline{b_i}^T b_j \\
|
||||
& = \lambda_j \overline{\inner{b_i}{b_j}}_{\K^n}
|
||||
= \lambda_j \delta_{ij} \in \R
|
||||
\end{align*}
|
||||
\item Ergänze $b_1', \dots, b_r'$ zu Orthonormalbasis $b_1', \dots, b_r', \dots, b_m'$ von $\K^m$. \\
|
||||
Sei $\varphi_A: x \mapsto A\cdot x \implies {}_{B'} M(\varphi_A)_B = \left( \begin{smallmatrix}
|
||||
s_1 \\
|
||||
|
@ -3810,4 +3839,226 @@ Das gilt für $V, W$ allgemein.
|
|||
\end{itemize}
|
||||
\end{proof}
|
||||
|
||||
Mittels der Singulärwertzerlegung können wir für jede Matrix (bzw. lineare Abbildung) eine verallgemeinerte
|
||||
Inverse berechnen.
|
||||
|
||||
Sei ${}_B M(\alpha)_B = \begin{pmatrix}s_1 \\
|
||||
& \ddots \\
|
||||
& & s_r \\
|
||||
& & & 0 \\
|
||||
& & & & \ddots \\
|
||||
& & & & & 0\end{pmatrix}$ \\
|
||||
$\implies \ker(\alpha) = \langle b_{r+1}, \dots, b_n \rangle_V, \im(\alpha) = \langle b'_1, \dots b_r' \rangle_W,
|
||||
\ker(\alpha)^\bot = \langle b_1, \dots, b_r \rangle_V$
|
||||
|
||||
\begin{align*}
|
||||
\alpha: V & \to
|
||||
\ker(\alpha)^{\bot} & & \overset{\beta}{\to}
|
||||
\im(\alpha) & & \to
|
||||
W \\
|
||||
\sum_{i=1}^n \lambda_i b_i & \mapsto
|
||||
\sum_{i=1}^r \lambda_i b_i & & \mapsto
|
||||
\sum_{i=1}^r s_i \lambda_i b_i' & & \mapsto
|
||||
\sum_{i=1}^r s_i \lambda_i b_i' \\
|
||||
\left(\begin{smallmatrix}x_1 \\ \vdots \\ x_n\end{smallmatrix}\right) & \mapsto
|
||||
\left(\begin{smallmatrix}x_1 \\ \vdots \\ x_r\end{smallmatrix}\right) & & \mapsto
|
||||
\left(\begin{smallmatrix}s_1 x_1 \\ \vdots \\ s_r x_r\end{smallmatrix}\right) & & \mapsto
|
||||
\left.\left(\begin{smallmatrix}
|
||||
s_1 x_1 \\ \vdots \\ s_r x_r \\ 0 \\ \vdots \\ 0
|
||||
\end{smallmatrix}\right)\right\} m \\
|
||||
\sum_{i=1}^r \frac{\mu_i}{s_i} b_i & \mapsfrom
|
||||
\sum_{i=1}^r \frac{1}{s_i} \mu_i b_i & & \underset{\beta^{-1}}{\mapsfrom}
|
||||
\sum_{i=1}^r \mu_i b_i' & & \mapsfrom
|
||||
\sum_{i=1}^m \mu_i b_i' = w \cdot \alpha^{+}
|
||||
\end{align*}
|
||||
|
||||
\subsubsection{Bemerkung:}
|
||||
$\alpha$ invertierbar, $V=W \implies n = m = r \implies \alpha^+ = \alpha^{-1}$ \\
|
||||
Wir haben eine echte Verallgemeinerung.
|
||||
|
||||
\begin{defin}
|
||||
\leavevmode
|
||||
\begin{itemize}
|
||||
\item Sei $A \in \K^{m \times n}, \K \in \{\R,\C\}$ mit
|
||||
\[
|
||||
A = U^* \Sigma V, \Sigma = \left( \begin{smallmatrix}
|
||||
s_1 \\
|
||||
& \ddots \\
|
||||
& & s_r \\
|
||||
& & & 0 \\
|
||||
& & & & \ddots \\
|
||||
& & & & & 0
|
||||
\end{smallmatrix} \right)
|
||||
\]
|
||||
Dann heißt die Matrix
|
||||
\[
|
||||
A^+ = V^* \Sigma^+ U \in \K^{n \times m}, \Sigma^+ = \left( \begin{smallmatrix}
|
||||
\frac1{s_1} \\
|
||||
& \ddots \\
|
||||
& & \frac1{s_r} \\
|
||||
& & & 0 \\
|
||||
& & & & \ddots \\
|
||||
& & & & & 0
|
||||
\end{smallmatrix} \right)
|
||||
\]
|
||||
(Moore-Penrose) \underline{Pseudoinverse} von $A$.
|
||||
\item Sei $\alpha \in \homk(V, W), \dim(V), \dim(W) < \infty$ und $B, B'$ Orthonormalbasen mit
|
||||
${}_{B'} M(\alpha)_B = \Sigma$ und $\alpha^+$ so, dass ${}_B M(\alpha^+)_B = \Sigma^+$.
|
||||
Dann heißt $\alpha^+$ (Moore-Penrose) \underline{Pseudoinverse} von $\alpha$.
|
||||
\end{itemize}
|
||||
\end{defin}
|
||||
|
||||
\begin{satz}
|
||||
\label{theo:3.6.3}
|
||||
Seien $V, W$ endlich dimensional euklidische/unitäre Vektorräume, \\
|
||||
$\alpha \in \Hom(V, W)$. Dann gilt:
|
||||
\[
|
||||
\alpha^+ \text{ ist pseudoinverse} \iff \begin{aligned}
|
||||
& \alpha \circ \alpha^+ \circ \alpha = \alpha \\
|
||||
& \alpha^+ \circ \alpha \circ \alpha^+ = \alpha^+ \\
|
||||
& \alpha \circ \alpha^+ \text{ selbstadjungiert} \\
|
||||
& \alpha^+ \circ \alpha \text{ selbstadjungiert} \\
|
||||
\end{aligned}
|
||||
\]
|
||||
\end{satz}
|
||||
\begin{proof}
|
||||
Beweis über Matrizen, da äquivalent. Weiters die $\implies$ Richtung nur für \R.
|
||||
\begin{itemize}
|
||||
\item[$\implies$:] $A = U^T \Sigma V, A^+ = V^T \Sigma^+ U$
|
||||
\[
|
||||
A A^+ = U^T \Sigma \underbrace{V V^T}_{=I} \Sigma^+ U = U^T \Sigma \Sigma^+ U =
|
||||
U^T \left( \begin{smallmatrix} 1 \\ & \ddots \\ & & 1 \\ & & & 0 \\ & & & & \ddots \\
|
||||
& & & & & 0 \end{smallmatrix} \right) U
|
||||
\]
|
||||
$A^+ A$.
|
||||
\[
|
||||
A A^+ A = U^T \Sigma \underbrace{V V^T}_I \Sigma^+ \underbrace{U U^T}_I \Sigma V
|
||||
= U^T \underbrace{\Sigma \Sigma^+ \Sigma}_\Sigma V = U^T \Sigma V = A
|
||||
\]
|
||||
\item[$\impliedby$:] Steht noch aus
|
||||
% \begin{itemize}
|
||||
% \item \begin{equation} \label{eq:3.6.3.1}
|
||||
% \begin{aligned}
|
||||
% \ker(\alpha) = \ker(\alpha^+ \circ \alpha) && \im(\alpha) = \im(\alpha \circ \alpha^+) \\
|
||||
% \ker(\alpha^+) = \ker(\alpha \circ \alpha^+) && \im(\alpha^+) = \im(\alpha^+ \circ \alpha)
|
||||
% \end{aligned}
|
||||
% \end{equation}
|
||||
% \tl UE\br\,:
|
||||
% $\ker(\alpha) \subseteq \ker(\alpha^+ \circ \alpha) \subseteq \ker(\alpha \circ \alpha^+
|
||||
% \circ \alpha) = \ker(\alpha) \implies \ker(\alpha) = \ker(\alpha \circ \alpha^+ \circ \alpha)$
|
||||
% \item $\nu := \alpha^+ \circ \alpha$ ist Orthogonalprojektion auf $\ker(\alpha)^\bot$
|
||||
% \item $\nu$ selbstadjungiert $\implies \ker(v) \bot \im(v)$
|
||||
% \item $\nu \circ \nu = \alpha^+ \circ \underbrace{\alpha \circ \alpha^+ \circ \alpha}_\alpha
|
||||
% = \alpha^+ \circ \alpha = \nu$
|
||||
% \item $\forall u \in \im(\nu), v \in V$: \[
|
||||
% \inner{\nu(v) - v}u = \inner{\nu(v) - v}{\nu(w)} =
|
||||
% \inner{\nu^2(v) - \nu(v)}{w} = \inner{0}{w} = 0
|
||||
% \]
|
||||
% \end{itemize}
|
||||
\end{itemize}
|
||||
\end{proof}
|
||||
|
||||
\begin{satz}
|
||||
Sei $\alpha \in \Hom(V, W)$.
|
||||
\begin{itemize}
|
||||
\item $\alpha$ injektiv $\implies \alpha^+ = (\alpha^* \circ \alpha)^{{}^{-1}} \circ \alpha^*$
|
||||
\item $\alpha$ surjektiv $\alpha^+ = \alpha^* \circ (\alpha \circ \alpha^*)^{{}^{-1}}$
|
||||
\end{itemize}
|
||||
\end{satz}
|
||||
\begin{proof}
|
||||
Sei $\alpha$ injektiv $\implies \alpha^* \circ \alpha$ bijektiv. Angenommen $\alpha^* \circ \alpha$ nicht
|
||||
surjektiv $\implies$
|
||||
\begin{align*}
|
||||
\exists w \in V \setminus \{0\}: \forall v \in V: \inner{\alpha^* \circ \alpha(v)}{w} = 0 \\
|
||||
\implies \forall v: \inner{\alpha(v)}{\alpha(w)}_W = 0 \\
|
||||
\implies \alpha(w) \in \im(\alpha)^\bot \cap \im(\alpha) \implies \alpha(w) = 0 \\
|
||||
\overset{\alpha \text{ injektiv}}{\implies} w = 0 & \text{\Lightning}
|
||||
\end{align*}
|
||||
$\implies \beta:= (\alpha^* \circ \alpha)^{{}^{-1}} \circ \alpha^*$ ist wohldefiniert.
|
||||
Nun gilt:
|
||||
\begin{itemize}
|
||||
\item $\alpha \circ \beta \circ \alpha = \alpha \circ (\alpha^* \circ \alpha)^{{}^{-1}} \circ \alpha^* \circ
|
||||
\alpha = \alpha$
|
||||
\item $\beta \circ \alpha \circ \beta = (\alpha^* \circ \alpha)^{{}^{-1}} \circ \alpha^* \circ \alpha \circ
|
||||
\beta = \beta$
|
||||
\item $\beta \circ \alpha, \alpha \circ \beta$ sind selbstadjungiert.
|
||||
\end{itemize}
|
||||
$\underset{\text{Satz \ref{theo:3.6.3}}}{\implies} \beta = \alpha^+$
|
||||
\end{proof}
|
||||
|
||||
\subsubsection{Anwendung: Methode der kleinsten Quadrate}
|
||||
Sei $Ax = b$ Lineares Gleichungssystem mit $L(A,b) = \emptyset$. Versuche ein $x$ zu finden mit
|
||||
$\norm{Ax-b}_{\K^m}$ minimal, $\norm{\alpha(v) - w}$ minimal.
|
||||
Sei $b_1, \dots, b_n$ Orthonormalbasis von $V$, $b_1', \dots, b_m'$ ONB von $W$.
|
||||
$\langle b_1, \dots, b_r \rangle = \ker(\alpha)^\bot, \langle b_1', \dots b_r'\rangle = \im(\alpha)$
|
||||
$v = \sum_{i=1}^n \lambda_i b_i \implies \alpha(v) = \sum_{i=1}^r s_i \lambda_i b_i'$
|
||||
$w = \sum_{i=1}^n \mu_i b_i'$
|
||||
|
||||
\[
|
||||
\norm{\alpha(v) - w}^2 = \norm{\sum_{i=1}^r s_i \lambda_i b_i' - \sum_{i=1}^m \mu_i b_i'}^2 =
|
||||
\sum_{i=1}^r (s_i \lambda_i - \mu_i)^2 + \sum_{i = r+1}^m \mu_i^2
|
||||
\]
|
||||
Wird minimal wenn $\lambda_i = \frac{\mu_i}{s_i}, i \in [r]$, insbesondere für $v = \alpha^+(w)$
|
||||
|
||||
\begin{satz}
|
||||
Sei $\alpha \in \homk(V, W), \K \in \{\R,\C\}, V, W$ endlich dimensional.
|
||||
Sei $w \in W$. Dann gilt mit $v^+ = \alpha^+(w)$ dass
|
||||
\[
|
||||
\norm{\alpha(v^+)-w} = \min_{v\in V} \norm{\alpha(v) - w}
|
||||
\]
|
||||
Alle Vektoren mit dieser Eigenschaft erfüllen die\\ \underline{Normalgleichungen}
|
||||
$\ontop{\alpha^* \alpha(v) = \alpha^*(w)}{A^* A x = A^* b}$
|
||||
\end{satz}
|
||||
|
||||
\begin{satz}
|
||||
Sei $\alpha \in \Hom(V, W), w \in \im(\alpha)$.
|
||||
Dann gilt mit $v^+ = \alpha^+ (w)$:
|
||||
\[
|
||||
\norm{v^+} = \min\{\norm v: \alpha(v) = w \}
|
||||
\]
|
||||
\end{satz}
|
||||
|
||||
%\subsubsection{Beispiel (lineare Regression)}
|
||||
%\begin{tikzpicture}
|
||||
%\end{tikzpicture}
|
||||
%$(t_i, y_i)_{i=1}^m$.
|
||||
%Suche $f: f(t_i) \sim y_i, \forall i \in [m]$
|
||||
%$f(t) = a_0 + a_1 t + a_2 t^2$
|
||||
%\[
|
||||
% \text{minimiere }
|
||||
% \sum_{i=1}^m (f(t_i) - y_i)^2 = \sum_{i=1}^m (a_0 + a_1 t_i + a_2 t_i^2 - y_i)^2 = \norm{A x -b}^2_{\K^m}
|
||||
%\]
|
||||
|
||||
%\subsubsection{Anwendung: Ausgleichsquadrik}
|
||||
%Problem: homogenes LGS $Ax=0$. Finde $x$ mit $\norm x = 1$ und $\norm{Ax}$ minimal. \\
|
||||
%$b_1, \dots, b_n$ ONB aus EVen von $A^* A$ mit nichtnegativen EWen.
|
||||
%\begin{align*}
|
||||
% X = \sum \lambda_i b_i \implies \norm{Ax}^2 & = \inner{Ax}{Ax} \\
|
||||
% & = \inner{A^* A x}{x} =
|
||||
% \inner{\sum s_i \lambda_i b_i}{\sum \lambda_j b_j} = \sum_{i=1}^n s_i \abs{\lambda_i}^2
|
||||
%\end{align*}
|
||||
%$s_1 \le s_2 \le \dots \le s_n, \norm x = \sum \abs{\lambda_i}^2$
|
||||
%\[
|
||||
% \frac{\norm{Ax}}{\norm x} = \frac{\sum s_i \abs{\lambda_i}^2}{\sum \abs{\lambda_i}^2} \ge
|
||||
% \frac{s_1 \sum \abs{\lambda_i}^2}{\sum \abs{\lambda_i}^2} s_1
|
||||
%\]
|
||||
%$\norm x = 1 \implies \norm{Ax} \ge s_1$
|
||||
%$\norm{b_i} \implies \lambda_1, \lambda_2 = \dots = \lambda_n = 0 \implies \norm{Ab_1} = s_1 \implies b_1$
|
||||
%löst unser Minimierungsproblem. \\
|
||||
%$Q = \{(x,y) \in \R^2: \psi(x, y) = 0\}, \psi(x, y):= a_1 x^2 + a_2 xy + a_3 y^2 a_4 x + a_5 y + a_6$
|
||||
%Gegeben: $(x_i,y_i)^m_{i=1}$ Suche $x = (a_1, \dots, a_6)^T$ mit $\norm x = 1$ sodass
|
||||
%\[
|
||||
% \sum_{i=1}^m (a_1 x_i^2 + a_2 x_i y_i + a_3 y_i^2 + a_4 x_i + a_5 y_y + a_6)^2
|
||||
%\]
|
||||
%minimal.
|
||||
%$=\norm{Ax}^2, A = \begin{pmatrix}\end{pmatrix}$
|
||||
%
|
||||
%\begin{satz*}
|
||||
% Sei $A \in \K^{m \times n}$ und $b \in \K^n$ Eigenvektor von $A^* A$ zum kleinsten Eigenwert $r_1$.
|
||||
% Dann gilt
|
||||
% \[
|
||||
% \frac{\norm{Ab}}{\norm b} = \min\left\{\frac{\norm{Ax}}{\norm x}: x\in\R^n\right\} = \sqrt{r_1}
|
||||
% \]
|
||||
%\end{satz*}
|
||||
|
||||
\end{document}
|
||||
|
|
Loading…
Reference in a new issue