diff --git a/latex/thesis/chapters/lp_dec_using_admm.tex b/latex/thesis/chapters/lp_dec_using_admm.tex index cb5b664..a936c87 100644 --- a/latex/thesis/chapters/lp_dec_using_admm.tex +++ b/latex/thesis/chapters/lp_dec_using_admm.tex @@ -880,7 +880,7 @@ A single optimal value giving optimal performance does not exist; rather, as long as the value is chosen within a certain range, the performance is approximately equally good. -\begin{figure}[h] +\begin{figure}[H] \centering \begin{subfigure}[c]{0.48\textwidth} @@ -974,8 +974,9 @@ The values chosen for the rest of the parameters are the same as before. It is visible that choosing a large value for $\rho$ as well as a small value for $\mu$ minimizes the average number of iterations and thus the average run time of the decoding process. +The same behavior can be observed when looking at various% % -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1010,10 +1011,235 @@ run time of the decoding process. \label{fig:admm:mu_rho_iterations} \end{figure}% % -The same behavior can be observed when looking at various different codes, -as shown in figure \ref{fig:admm:mu_rho_multiple}. +\noindent different codes, as shown in figure \ref{fig:admm:mu_rho_multiple}. + +To get an estimate for the maximum number of iterations $K$ necessary, +the average error during decoding can be used. +This is shown in figure \ref{fig:admm:avg_error} as an average of +$\SI{100000}{}$ decodings. +$\mu$ is set to 5 and $\rho$ is set to $1$ and the rest of the parameters are +again chosen as $\epsilon_\text{pri}=10^{-5}$ and +$\epsilon_\text{dual}=10^{-5}$. +Similarly to the results in section \ref{subsec:prox:choice}, a dip is +visible around the $20$ iteration mark. +This is due to the fact that as the number of iterations increases, +more and more decodings converge, leaving only the mistaken ones to be +averaged. +The point at which the wrong decodings start to become dominant and the +decoding performance does not increase any longer is largely independent of +the \ac{SNR}, allowing the maximum number of iterations to be chosen without +considering the \ac{SNR}. + +\begin{figure}[H] + \centering + + \begin{tikzpicture} + \begin{axis}[ + grid=both, + width=0.6\textwidth, + height=0.45\textwidth, + xlabel={Iteration}, ylabel={Average $\left\Vert \hat{\boldsymbol{c}} + - \boldsymbol{c} \right\Vert$} + ] + \addplot[ForestGreen, line width=1pt] + table [col sep=comma, x=k, y=err, + discard if not={SNR}{1.0}, + discard if gt={k}{100}] + {res/admm/avg_error_20433484.csv}; + \addlegendentry{$E_b / N_0 = \SI{1}{dB}$} + + \addplot[RedOrange, line width=1pt] + table [col sep=comma, x=k, y=err, + discard if not={SNR}{2.0}, + discard if gt={k}{100}] + {res/admm/avg_error_20433484.csv}; + \addlegendentry{$E_b / N_0 = \SI{2}{dB}$} + + \addplot[NavyBlue, line width=1pt] + table [col sep=comma, x=k, y=err, + discard if not={SNR}{3.0}, + discard if gt={k}{100}] + {res/admm/avg_error_20433484.csv}; + \addlegendentry{$E_b / N_0 = \SI{3}{dB}$} + + \addplot[RoyalPurple, line width=1pt] + table [col sep=comma, x=k, y=err, + discard if not={SNR}{4.0}, + discard if gt={k}{100}] + {res/admm/avg_error_20433484.csv}; + \addlegendentry{$E_b / N_0 = \SI{4}{dB}$} + \end{axis} + \end{tikzpicture} + + \caption{Average error for $\SI{100000}{}$ decodings. (3,6) + regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} + \label{fig:admm:avg_error} +\end{figure}% + +The last two parameters remaining to be examined are the tolerances for the +stopping criterion of the algorithm, $\epsilon_\text{pri}$ and +$\epsilon_\text{dual}$. +These are both set to the same value $\epsilon$. +The effect of their value on the decoding performance is visualized in figure +\ref{fig:admm:epsilon}. +All parameters except $\epsilon_\text{pri}$ and $\epsilon_\text{dual}$ are +kept constant, with $\mu=5$, $\rho=1$ and $E_b / N_0 = \SI{4}{dB}$ and +performing a maximum of 200 iterations. +A lower value for the tolerance initially leads to a dramatic decrease in the +\ac{FER}, this effect fading as the tolerance becomes increasingly lower. + +\begin{figure}[H] + \centering + + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$\epsilon$}, ylabel={\acs{FER}}, + ymode=log, + xmode=log, + x dir=reverse, + width=0.6\textwidth, + height=0.45\textwidth, + ] + \addplot[NavyBlue, line width=1pt, densely dashed, mark=*] + table [col sep=comma, x=epsilon, y=FER, + discard if not={SNR}{3.0},] + {res/admm/fer_epsilon_20433484.csv}; + \end{axis} + \end{tikzpicture} + + \caption{Effect of the value of the parameters $\epsilon_\text{pri}$ and + $\epsilon_\text{dual}$ on the \acs{FER}. (3,6) regular \ac{LDPC} code with + $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} + \label{fig:admm:epsilon} +\end{figure}% + +In conclusion, the parameters $\mu$ and $\rho$ should be chosen comparatively +small and large, respectively, to reduce the average runtime of the decoding +process, while keeping them within a certain range as to not compromise the +decoding performance. +The maximum number of iterations performed can be chosen independently +of the \ac{SNR}. +Finally, small values should be given to the parameters +$\epsilon_{\text{pri}}$ and $\epsilon_{\text{dual}}$ to achieve the lowest +possible error rate. + + +\subsection{Decoding Performance} + +In figure \ref{fig:admm:results}, the simulation results for the ``Margulis'' +\ac{LDPC} code ($n=2640$, $k=1320$) presented by Barman et al. in +\cite{original_admm} are compared to the results from the simulations +conducted in the context of this thesis. +The parameters chosen were $\mu=3.3$, $\rho=1.9$, $K=1000$, +$\epsilon_\text{pri}=10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$, +the same as in \cite{original_admm}. +The two \ac{FER} curves are practically identical. +Also shown is the curve resulting from \ac{BP} decoding, performing +1000 iterations. +The two algorithms perform relatively similarly, staying within $\SI{0.5}{dB}$ +of one another. + +\begin{figure}[H] + \centering + + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0 \left( \text{dB} \right) $}, ylabel={\acs{FER}}, + ymode=log, + width=0.6\textwidth, + height=0.45\textwidth, + legend style={at={(0.5,-0.57)},anchor=south}, + legend cell align={left}, + ] + \addplot[Turquoise, line width=1pt, mark=*] + table [col sep=comma, x=SNR, y=FER, + discard if gt={SNR}{2.2}, + ] + {res/admm/fer_paper_margulis.csv}; + \addlegendentry{\acs{ADMM} (Barman et al.)} + \addplot[NavyBlue, densely dashed, line width=1pt, mark=triangle] + table [col sep=comma, x=SNR, y=FER,] + {res/admm/ber_margulis264013203.csv}; + \addlegendentry{\acs{ADMM} (Own results)} + \addplot[RoyalPurple, line width=1pt, mark=*] + table [col sep=comma, x=SNR, y=FER, discard if gt={SNR}{2.2},] + {res/generic/fer_bp_mackay_margulis.csv}; + \addlegendentry{\acs{BP} (Barman et al.)} + \end{axis} + \end{tikzpicture} + + \caption{Comparison of datapoints from Barman et al. with own simulation results. + ``Margulis'' \ac{LDPC} code with $n = 2640$, $k = 1320$ + \cite[\text{Margulis2640.1320.3}]{mackay_enc}\protect\footnotemark{}} + \label{fig:admm:results} +\end{figure}% % -\begin{figure}[h] +In figure \ref{fig:admm:bp_multiple}, \ac{FER} curves for \ac{LP} decoding +using \ac{ADMM} and \ac{BP} are shown for various codes. +To ensure comparability, in all cases the number of iterations was set to +$K=200$. +The values of the other parameters were chosen as $\mu = 5$, $\rho = 1$, +$\epsilon_\text{pri} = 10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$. +Comparing the simulation results for the different codes, it is apparent that +the difference in decoding performance depends on the code being +considered. +For all codes considered here, however, the performance of \ac{LP} decoding +using \ac{ADMM} comes close to that of \ac{BP}, again staying withing +approximately $\SI{0.5}{dB}$. + +\subsection{Computational Performance} +\label{subsec:admm:comp_perf} + +In terms of time complexity, the three steps of the decoding algorithm +in equations (\ref{eq:admm:c_update}) - (\ref{eq:admm:u_update}) have to be +considered. +The $\tilde{\boldsymbol{c}}$- and $\boldsymbol{u}_j$-update steps are +$\mathcal{O}\left( n \right)$ \cite[Sec. III. C.]{original_admm}. +The complexity of the $\boldsymbol{z}_j$-update step depends on the projection +algorithm employed. +Since for the implementation completed for this work the projection algorithm +presented in \cite{original_admm} is used, the $\boldsymbol{z}_j$-update step +also has linear time complexity. + +\begin{figure}[H] + \centering + + \begin{tikzpicture} + \begin{axis}[grid=both, + xlabel={$n$}, ylabel={Time per frame (s)}, + width=0.6\textwidth, + height=0.45\textwidth, + legend style={at={(0.5,-0.42)},anchor=south}, + legend cell align={left},] + + \addplot[NavyBlue, only marks, mark=triangle*] + table [col sep=comma, x=n, y=spf] + {res/admm/fps_vs_n.csv}; + \end{axis} + \end{tikzpicture} + + \caption{Timing requirements of the \ac{LP} decoding using \ac{ADMM} implementation} + \label{fig:admm:time} +\end{figure}% + +Simulation results from a range of different codes can be used to verify this +analysis. +Figure \ref{fig:admm:time} shows the average time needed to decode one +frame as a function of its length. +The codes used for this consideration are the same as in section \ref{subsec:prox:comp_perf} +The results are necessarily skewed because these vary not only +in their length, but also in their construction scheme and rate. +Additionally, different optimization opportunities arise depending on the +length of a code, since for smaller codes dynamic memory allocation can be +completely omitted. +This may explain why the datapoint at $n=504$ is higher then would be expected +with linear behavior. +Nonetheless, the simulation results roughly match the expected behavior +following from the theoretical considerations. + +\begin{figure}[H] \centering \begin{subfigure}[t]{0.48\textwidth} @@ -1207,187 +1433,12 @@ as shown in figure \ref{fig:admm:mu_rho_multiple}. \end{subfigure} - \caption{Dependence of the \ac{BER} on the value of the parameter $\gamma$ for various codes} + \caption{Dependence of average number of iterations required on the parameters + $\mu$ and $\rho$ for various codes} \label{fig:admm:mu_rho_multiple} \end{figure} -To get an estimate for the maximum number of iterations $K$ necessary, -the average error during decoding can be used. -This is shown in figure \ref{fig:admm:avg_error} as an average of -$\SI{100000}{}$ decodings. -$\mu$ is set to 5 and $\rho$ is set to $1$ and the rest of the parameters are -again chosen as $\epsilon_\text{pri}=10^{-5}$ and -$\epsilon_\text{dual}=10^{-5}$. -Similarly to the results in section \ref{subsec:prox:choice}, a dip is -visible around the $20$ iteration mark. -This is due to the fact that as the number of iterations increases, -more and more decodings converge, leaving only the mistaken ones to be -averaged. -The point at which the wrong decodings start to become dominant and the -decoding performance does not increase any longer is largely independent of -the \ac{SNR}, allowing the maximum number of iterations to be chosen without -considering the \ac{SNR}. - -\begin{figure}[h] - \centering - - \begin{tikzpicture} - \begin{axis}[ - grid=both, - width=0.6\textwidth, - height=0.45\textwidth, - xlabel={Iteration}, ylabel={Average $\left\Vert \hat{\boldsymbol{c}} - - \boldsymbol{c} \right\Vert$} - ] - \addplot[ForestGreen, line width=1pt] - table [col sep=comma, x=k, y=err, - discard if not={SNR}{1.0}, - discard if gt={k}{100}] - {res/admm/avg_error_20433484.csv}; - \addlegendentry{$E_b / N_0 = \SI{1}{dB}$} - - \addplot[RedOrange, line width=1pt] - table [col sep=comma, x=k, y=err, - discard if not={SNR}{2.0}, - discard if gt={k}{100}] - {res/admm/avg_error_20433484.csv}; - \addlegendentry{$E_b / N_0 = \SI{2}{dB}$} - - \addplot[NavyBlue, line width=1pt] - table [col sep=comma, x=k, y=err, - discard if not={SNR}{3.0}, - discard if gt={k}{100}] - {res/admm/avg_error_20433484.csv}; - \addlegendentry{$E_b / N_0 = \SI{3}{dB}$} - - \addplot[RoyalPurple, line width=1pt] - table [col sep=comma, x=k, y=err, - discard if not={SNR}{4.0}, - discard if gt={k}{100}] - {res/admm/avg_error_20433484.csv}; - \addlegendentry{$E_b / N_0 = \SI{4}{dB}$} - \end{axis} - \end{tikzpicture} - - \caption{Average error for $\SI{100000}{}$ decodings. (3,6) - regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} - \label{fig:admm:avg_error} -\end{figure}% - -The last two parameters remaining to be examined are the tolerances for the -stopping criterion of the algorithm, $\epsilon_\text{pri}$ and -$\epsilon_\text{dual}$. -These are both set to the same value $\epsilon$. -The effect of their value on the decoding performance is visualized in figure -\ref{fig:admm:epsilon}. -All parameters except $\epsilon_\text{pri}$ and $\epsilon_\text{dual}$ are -kept constant, with $\mu=5$, $\rho=1$ and $E_b / N_0 = \SI{4}{dB}$ and -performing a maximum of 200 iterations. -A lower value for the tolerance initially leads to a dramatic decrease in the -\ac{FER}, this effect fading as the tolerance becomes increasingly lower. - -\begin{figure}[h] - \centering - - \begin{tikzpicture} - \begin{axis}[ - grid=both, - xlabel={$\epsilon$}, ylabel={\acs{FER}}, - ymode=log, - xmode=log, - x dir=reverse, - width=0.6\textwidth, - height=0.45\textwidth, - ] - \addplot[NavyBlue, line width=1pt, densely dashed, mark=*] - table [col sep=comma, x=epsilon, y=FER, - discard if not={SNR}{3.0},] - {res/admm/fer_epsilon_20433484.csv}; - \end{axis} - \end{tikzpicture} - - \caption{Effect of the value of the parameters $\epsilon_\text{pri}$ and - $\epsilon_\text{dual}$ on the \acs{FER}. (3,6) regular \ac{LDPC} code with - $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} - \label{fig:admm:epsilon} -\end{figure}% - -In conclusion, the parameters $\mu$ and $\rho$ should be chosen comparatively -small and large, respectively, to reduce the average runtime of the decoding -process, while keeping them within a certain range as to not compromise the -decoding performance. -The maximum number of iterations performed can be chosen independently -of the \ac{SNR}. -Finally, small values should be given to the parameters -$\epsilon_{\text{pri}}$ and $\epsilon_{\text{dual}}$ to achieve the lowest -possible error rate. - - -\subsection{Decoding Performance} - -In figure \ref{fig:admm:results}, the simulation results for the ``Margulis'' -\ac{LDPC} code ($n=2640$, $k=1320$) presented by Barman et al. in -\cite{original_admm} are compared to the results from the simulations -conducted in the context of this thesis. -The parameters chosen were $\mu=3.3$, $\rho=1.9$, $K=1000$, -$\epsilon_\text{pri}=10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$, -the same as in \cite{original_admm}. -The two \ac{FER} curves are practically identical. -Also shown is the curve resulting from \ac{BP} decoding, performing -1000 iterations. -The two algorithms perform relatively similarly, staying within $\SI{0.5}{dB}$ -of one another. - -\begin{figure}[h] - \centering - - \begin{tikzpicture} - \begin{axis}[ - grid=both, - xlabel={$E_b / N_0 \left( \text{dB} \right) $}, ylabel={\acs{FER}}, - ymode=log, - width=0.6\textwidth, - height=0.45\textwidth, - legend style={at={(0.5,-0.57)},anchor=south}, - legend cell align={left}, - ] - \addplot[Turquoise, line width=1pt, mark=*] - table [col sep=comma, x=SNR, y=FER, - discard if gt={SNR}{2.2}, - ] - {res/admm/fer_paper_margulis.csv}; - \addlegendentry{\acs{ADMM} (Barman et al.)} - \addplot[NavyBlue, densely dashed, line width=1pt, mark=triangle] - table [col sep=comma, x=SNR, y=FER,] - {res/admm/ber_margulis264013203.csv}; - \addlegendentry{\acs{ADMM} (Own results)} - \addplot[RoyalPurple, line width=1pt, mark=*] - table [col sep=comma, x=SNR, y=FER, discard if gt={SNR}{2.2},] - {res/generic/fer_bp_mackay_margulis.csv}; - \addlegendentry{\acs{BP} (Barman et al.)} - \end{axis} - \end{tikzpicture} - - \caption{Comparison of datapoints from Barman et al. with own simulation results. - ``Margulis'' \ac{LDPC} code with $n = 2640$, $k = 1320$ - \cite[\text{Margulis2640.1320.3}]{mackay_enc}\protect\footnotemark{}} - \label{fig:admm:results} -\end{figure}% -% -In figure \ref{fig:admm:bp_multiple}, \ac{FER} curves for \ac{LP} decoding -using \ac{ADMM} and \ac{BP} are shown for various codes. -To ensure comparability, in all cases the number of iterations was set to -$K=200$. -The values of the other parameters were chosen as $\mu = 5$, $\rho = 1$, -$\epsilon_\text{pri} = 10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$. -Comparing the simulation results for the different codes, it is apparent that -the difference in decoding performance depends on the code being -considered. -For all codes considered here, however, the performance of \ac{LP} decoding -using \ac{ADMM} comes close to that of \ac{BP}, again staying withing -approximately $\SI{0.5}{dB}$. - -\begin{figure}[h] +\begin{figure}[H] \centering \begin{subfigure}[t]{0.48\textwidth} @@ -1581,54 +1632,3 @@ approximately $\SI{0.5}{dB}$. and \ac{BP} for various codes} \label{fig:admm:bp_multiple} \end{figure} - -\subsection{Computational Performance} -\label{subsec:admm:comp_perf} - -In terms of time complexity, the three steps of the decoding algorithm -in equations (\ref{eq:admm:c_update}) - (\ref{eq:admm:u_update}) have to be -considered. -The $\tilde{\boldsymbol{c}}$- and $\boldsymbol{u}_j$-update steps are -$\mathcal{O}\left( n \right)$ \cite[Sec. III. C.]{original_admm}. -The complexity of the $\boldsymbol{z}_j$-update step depends on the projection -algorithm employed. -Since for the implementation completed for this work the projection algorithm -presented in \cite{original_admm} is used, the $\boldsymbol{z}_j$-update step -also has linear time complexity. - -\begin{figure}[h] - \centering - - \begin{tikzpicture} - \begin{axis}[grid=both, - xlabel={$n$}, ylabel={Time per frame (s)}, - width=0.6\textwidth, - height=0.45\textwidth, - legend style={at={(0.5,-0.42)},anchor=south}, - legend cell align={left},] - - \addplot[NavyBlue, only marks, mark=triangle*] - table [col sep=comma, x=n, y=spf] - {res/admm/fps_vs_n.csv}; - \end{axis} - \end{tikzpicture} - - \caption{Timing requirements of the \ac{LP} decoding using \ac{ADMM} implementation} - \label{fig:admm:time} -\end{figure}% - -Simulation results from a range of different codes can be used to verify this -analysis. -Figure \ref{fig:admm:time} shows the average time needed to decode one -frame as a function of its length. -The codes used for this consideration are the same as in section \ref{subsec:prox:comp_perf} -The results are necessarily skewed because these vary not only -in their length, but also in their construction scheme and rate. -Additionally, different optimization opportunities arise depending on the -length of a code, since for smaller codes dynamic memory allocation can be -completely omitted. -This may explain why the datapoint at $n=504$ is higher then would be expected -with linear behavior. -Nonetheless, the simulation results roughly match the expected behavior -following from the theoretical considerations. - diff --git a/latex/thesis/chapters/proximal_decoding.tex b/latex/thesis/chapters/proximal_decoding.tex index da4140b..efd5db0 100644 --- a/latex/thesis/chapters/proximal_decoding.tex +++ b/latex/thesis/chapters/proximal_decoding.tex @@ -355,8 +355,10 @@ $\gamma$ are shown, as well as the curve resulting from decoding using a \ac{BP} decoder, as a reference. The results from Wadayama et al. are shown with solid lines, while the newly generated ones are shown with dashed lines. +The parameters chosen are $K=200, \mu = 0.05$ and $\eta=1.5$. +It is noticeable that for a moderately chosen value of $\gamma$ % -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -415,7 +417,6 @@ while the newly generated ones are shown with dashed lines. \label{fig:prox:results} \end{figure} % -It is noticeable that for a moderately chosen value of $\gamma$ ($\gamma = 0.05$) the decoding performance is better than for low ($\gamma = 0.01$) or high ($\gamma = 0.15$) values. The question arises whether there is some optimal value maximizing the decoding @@ -431,8 +432,17 @@ The previously shown results are highlighted. Evidently, while the decoding performance does depend on the value of $\gamma$, there is no single optimal value offering optimal performance, but rather a certain interval in which it stays largely unchanged. -% -\begin{figure}[h] +This indicates that while the choice of the parameter $\gamma$ +significantly affects the decoding performance, there is not much benefit +attainable in undertaking an extensive search for an exact optimum. +Rather, a preliminary examination providing a rough window for $\gamma$ may +be sufficient. +Similar results can be observed when comparing several different codes, as +shown in figure \ref{fig:prox:results_3d_multiple}. +It is apparent that while the exact landscape of the graph depends on the code, +the general behavior is the same for all codes analyzed in this thesis. + +\begin{figure}[H] \centering \begin{tikzpicture} @@ -477,16 +487,6 @@ rather a certain interval in which it stays largely unchanged. $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} \label{fig:prox:results_3d} \end{figure}% -% -This indicates that while the choice of the parameter $\gamma$ -significantly affects the decoding performance, there is not much benefit -attainable in undertaking an extensive search for an exact optimum. -Rather, a preliminary examination providing a rough window for $\gamma$ may -be sufficient. -When examining a number of different codes, see figure -\ref{fig:prox:results_3d_multiple}, it is apparent that while the exact -landscape of the graph depends on the code, the general behavior is the same -for all codes analyzed in this thesis. The parameter $\gamma$ describes the step-size for the optimization step dealing with the code-constraint polynomial; @@ -505,7 +505,15 @@ similar values of the two step sizes. Again, this consideration applies to a multitude of different codes, as depicted in figure \ref{fig:prox:gamma_omega_multiple}. -\begin{figure}[h] +To better understand how to determine the optimal value for the parameter $K$, +the average error is inspected. +This time $\gamma$ and $\omega$ are held constant at $0.05$ and the average +error is observed during each iteration of the decoding process, for several +different \acp{SNR}. +The plots have been generated by averaging the error over $\SI{500000}{}$ +decodings.% +% +\begin{figure}[H] \centering \begin{tikzpicture} @@ -519,8 +527,8 @@ depicted in figure \ref{fig:prox:gamma_omega_multiple}. yticklabels={0, 0.05, 0.1, 0.15}, xtick={0.05, 0.1, 0.15, 0.2}, xticklabels={0.05, 0.1, 0.15, 0.2}, - width=0.6\textwidth, - height=0.45\textwidth, + width=0.56\textwidth, + height=0.42\textwidth, point meta min=-5.7, point meta max=-0.5, colorbar style={ @@ -545,13 +553,6 @@ depicted in figure \ref{fig:prox:gamma_omega_multiple}. \end{figure}% % -To better understand how to determine the optimal value for the parameter $K$, -the average error is inspected. -This time $\gamma$ and $\omega$ are held constant at $0.05$ and the average -error is observed during each iteration of the decoding process, for several -different \acp{SNR}. -The plots have been generated by averaging the error over $\SI{500000}{}$ -decodings. As some decodings go on for more iterations than others, the number of values which are averaged for each datapoints vary. This explains the dip visible in all curves around the 20th iteration, since @@ -568,7 +569,7 @@ Remarkably, the \ac{SNR} seems to not have any impact on the number of iterations necessary to reach the point at which the average error stabilizes. -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -602,7 +603,6 @@ stabilizes. \caption{Average error for $\SI{500000}{}$ decodings. (3,6) regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} \end{figure}% -% Changing the parameter $\eta$ does not appear to have a significant effect on the decoding performance when keeping the value within a reasonable window @@ -622,7 +622,23 @@ means to bring about numerical stability. \subsection{Decoding Performance} -\begin{figure}[h] +Until now, only the \ac{BER} has been considered to gauge the decoding +performance. +The \ac{FER}, however, shows considerably different behavior, as can be seen in +figure \ref{fig:prox:ber_fer_dfr}. +Besides the \ac{BER} and \ac{FER} curves, the figure also shows the +\textit{decoding failure rate}. +This is the rate at which the iterative process produces invalid codewords, +i.e., the stopping criterion (line 6 of algorithm \ref{alg:prox}) is never +satisfied and the maximum number of iterations $K$ is reached without +converging to a valid codeword. +Three lines are plotted in each case, corresponding to different values of +the parameter $\gamma$. +The values chosen are the same as in figure \ref{fig:prox:results}, as they +seem to adequately describe the behavior across a wide range of values +(see figure \ref{fig:prox:results_3d}). + +\begin{figure}[H] \centering \begin{tikzpicture} @@ -700,23 +716,6 @@ means to bring about numerical stability. \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} \label{fig:prox:ber_fer_dfr} \end{figure}% -% - -Until now, only the \ac{BER} has been considered to gauge the decoding -performance. -The \ac{FER}, however, shows considerably different behavior, as can be seen in -figure \ref{fig:prox:ber_fer_dfr}. -Besides the \ac{BER} and \ac{FER} curves, the figure also shows the -\textit{decoding failure rate}. -This is the rate at which the iterative process produces invalid codewords, -i.e., the stopping criterion (line 6 of algorithm \ref{alg:prox}) is never -satisfied and the maximum number of iterations $K$ is reached without -converging to a valid codeword. -Three lines are plotted in each case, corresponding to different values of -the parameter $\gamma$. -The values chosen are the same as in figure \ref{fig:prox:results}, as they -seem to adequately describe the behavior across a wide range of values -(see figure \ref{fig:prox:results_3d}). It is apparent that the \ac{FER} and the decoding failure rate are extremely similar, especially for higher \acp{SNR}. @@ -724,7 +723,7 @@ This leads to the hypothesis that, at least for higher \acp{SNR}, frame errors arise mainly due to the non-convergence of the algorithm instead of convergence to the wrong codeword. This course of thought will be picked up in section -\ref{sec:prox:Improved Implementation} when proposing a method toimprove the +\ref{sec:prox:Improved Implementation} when proposing a method to improve the algorithm. In summary, the \ac{BER} and \ac{FER} indicate dissimilar decoding @@ -749,7 +748,7 @@ estimates and their values are interwoven to obtain the shown result), as well as the gradients of the negative log-likelihood and the code-constraint polynomial, which influence the next estimate. % -\begin{figure}[h] +\begin{figure}[H] \begin{subfigure}[t]{0.48\textwidth} \centering \begin{tikzpicture} @@ -984,18 +983,17 @@ The gradient of the negative log-likelihood points towards the received codeword as can be seen in figure \ref{fig:prox:gradients:L}, since assuming \ac{AWGN} and no other information that is the estimate maximizing the likelihood. +Looking at figure \ref{fig:prox:gradients:h} it also becomes apparent why the +value of the parameter $\gamma$ has to be kept small, as mentioned in section +\ref{sec:prox:Decoding Algorithm}. +Local minima are introduced between the codewords, in the areas in which it is +not immediately clear which codeword is the most likely one. +Increasing the value of $\gamma$ results in +$h \left( \tilde{\boldsymbol{x}} \right)$ dominating the landscape of the +objective function, thereby introducing these local minima into the objective +function. -It is obvious that walking along the gradients in an alternating fashion will -produce a net movement in a certain direction, as long as they -have a common component. -As soon as this common component is exhausted, they will start pulling the -estimate in opposing directions, leading to an oscillation as illustrated -in figure \ref{fig:prox:convergence}. -Consequently, this oscillation is an intrinsic property of the structure of -the proximal decoding algorithm, where the two parts of the objective function -are minimized in an alternating manner by use of their gradients. -% -\begin{figure}[h] +\begin{figure}[H] \centering \begin{subfigure}[c]{0.5\textwidth} @@ -1073,7 +1071,18 @@ are minimized in an alternating manner by use of their gradients. polynomial} \label{fig:prox:gradients} \end{figure}% -% + +It is obvious that walking along the gradients in an alternating fashion will +produce a net movement in a certain direction, as long as they +have a common component. +As soon as this common component is exhausted, they will start pulling the +estimate in opposing directions, leading to an oscillation as illustrated +in figure \ref{fig:prox:convergence}. +Consequently, this oscillation is an intrinsic property of the structure of +the minimization process of the proximal decoding algorithm, +where the two parts of the objective function +are minimized in an alternating fashion by use of their gradients. + While the initial net movement is generally directed in the right direction owing to the gradient of the negative log-likelihood, the resulting oscillation may well take place in a segment of space not corresponding to a valid @@ -1089,16 +1098,6 @@ a valid codeword before the oscillation takes place. This explains why the decoding performance is significantly better for higher \acp{SNR}. -Looking at figure \ref{fig:prox:gradients:h} it also becomes apparent why the -value of the parameter $\gamma$ has to be kept small, as mentioned in section -\ref{sec:prox:Decoding Algorithm}. -Local minima are introduced between the codewords, in the areas in which it is -not immediately clear which codeword is the most likely one. -Increasing the value of $\gamma$ results in -$h \left( \tilde{\boldsymbol{x}} \right)$ dominating the landscape of the -objective function, thereby introducing these local minima into the objective -function. - When considering codes with larger $n$ the behavior generally stays the same, with some minor differences. In figure \ref{fig:prox:convergence_large_n} the decoding process is @@ -1110,13 +1109,7 @@ However, in this case, the gradient of the code-constraint polynomial itself starts to oscillate, its average value being such that the effect of the gradient of the negative log-likelihood is counteracted. -In conclusion, as a general rule, the proximal decoding algorithm reaches -an oscillatory state which it cannot escape as a consequence of its structure. -In this state the constraints may not be satisfied, leading to the algorithm -exhausting its maximum number of iterations without converging and returning -an invalid codeword. - -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1148,7 +1141,12 @@ an invalid codeword. with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} \label{fig:prox:convergence_large_n} \end{figure}% -% + +In conclusion, as a general rule, the proximal decoding algorithm reaches +an oscillatory state which it cannot escape as a consequence of its structure. +In this state the constraints may not be satisfied, leading to the algorithm +exhausting its maximum number of iterations without converging and returning +an invalid codeword. \subsection{Computational Performance} @@ -1174,7 +1172,7 @@ practical since it is the same as that of $\ac{BP}$. This theoretical analysis is also corroborated by the practical results shown in figure \ref{fig:prox:time_comp}. -The codes considered are the BCH(31, 11) and BCH(31, 26) codes, a number of (3, 6) +The codes considered are the BCH(31, 11) and BCH(31, 26) codes, several (3, 6) regular \ac{LDPC} codes (\cite[\text{96.3.965, 204.33.484, 408.33.844}]{mackay_enc}), a (5,10) regular \ac{LDPC} code (\cite[\text{204.55.187}]{mackay_enc}) and a progressive edge growth construction code (\cite[\text{PEGReg252x504}]{mackay_enc}). @@ -1186,7 +1184,7 @@ decode a received frame and the length $n$ of the frame can be observed. These results were generated on an Intel Core i7-7700HQ 4-core CPU, running at $\SI{2.80}{GHz}$ and utilizing all cores. -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1237,8 +1235,23 @@ section \ref{subsec:prox:conv_properties} and shown in figure \ref{fig:prox:convergence_large_n}) and the probability of having a bit error are strongly correlated, a relationship being depicted in figure \ref{fig:prox:correlation}. -% -\begin{figure}[h] +The x-axis depicts the variance in $\nabla h\left( \tilde{\boldsymbol{x}} \right)$ +after the 100th iteration, and the y-axis depicts whether there is a bit error. +While this is not exactly the magnitude of the oscillation, it is +proportional and easier to compute. +The datapoints are taken from a single decoding operation. + +Using this observation as a rule to determine the $N\in\mathbb{N}$ most +probably wrong bits, all variations of the estimate with those bits modified +can be generated. +An \ac{ML}-in-the-list step can then be performed to determine the +most likely candidate. +This process is outlined in algorithm \ref{alg:prox:improved}. +Its only difference to algorithm \ref{alg:prox} is that instead of returning +the last estimate when no valid result is reached, an ML-in-the-list step is +performed. + +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1264,22 +1277,6 @@ error are strongly correlated, a relationship being depicted in figure (3,6) regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}} \label{fig:prox:correlation} \end{figure}% -% -The x-axis depicts the variance in $\nabla h\left( \tilde{\boldsymbol{x}} \right)$ -after the 100th iteration, and the y-axis depicts whether there is a bit error. -While this is not exactly the magnitude of the oscillation, it is -proportional and easier to compute. -The datapoints are taken from a single decoding operation. - -Using this observation as a rule to determine the $N\in\mathbb{N}$ most -probably wrong bits, all variations of the estimate with those bits modified -can be generated. -An \ac{ML}-in-the-list step can then be performed to determine the -most likely candidate. -This process is outlined in algorithm \ref{alg:prox:improved}. -Its only difference to algorithm \ref{alg:prox} is that instead of returning -the last estimate when no valid result is reached, an ML-in-the-list step is -performed. \begin{genericAlgorithm}[caption={Improved proximal decoding algorithm}, label={alg:prox:improved},] @@ -1294,15 +1291,13 @@ for $K$ iterations do end for $\textcolor{KITblue}{\text{Find }N\text{ most probably wrong bits}}$ $\textcolor{KITblue}{\text{Generate variations } \hat{\boldsymbol{c}}_l,\hspace{1mm} - l\in \mathbb{N}\text{ of } \hat{\boldsymbol{c}}\text{ with the }N\text{ bits modified}}$ + l=1,\ldots,2^N\text{ of } \hat{\boldsymbol{c}}\text{ with the }N\text{ bits modified}}$ $\textcolor{KITblue}{\text{Compute }d_H\left( \hat{\boldsymbol{c}}_l, \hat{\boldsymbol{c}} \right) \text{ for all valid codewords } \hat{\boldsymbol{c}}_l}$ $\textcolor{KITblue}{\text{Output }\hat{\boldsymbol{c}}_l\text{ with lowest } d_H\left( \hat{\boldsymbol{c}}_l, \hat{\boldsymbol{c}} \right)}$ \end{genericAlgorithm} -%\todo{Not hamming distance, correlation} - Figure \ref{fig:prox:improved_results} shows the gain that can be achieved when the number $N$ is chosen to be 12. Again, three values of gamma are chosen, for which the \ac{BER}, \ac{FER} @@ -1329,7 +1324,7 @@ Similar behavior can be observed in all cases, with varying improvement over standard proximal decoding. In some cases, a gain of up to $\SI{1}{dB}$ or higher can be achieved. -\begin{figure}[h] +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1459,22 +1454,37 @@ In some cases, a gain of up to $\SI{1}{dB}$ or higher can be achieved. \label{fig:prox:improved_results} \end{figure} -Interestingly, the improved algorithm does not have much different time -complexity than proximal decoding. -This is the case, because the ML-in-the-list step is only performed when the +The average run time of the improved algorithm depends on the parameter $N$. +The time complexity, however, does not: it is still linear with respect to $n$. +Interestingly, the improved algorithm does not have a much different average +run time than proximal decoding, because the computationally expensive +ML-in-the-list step is only performed when the proximal decoding algorithm produces an invalid result, which in absolute terms happens relatively infrequently. This is illustrated in figure \ref{fig:prox:time_complexity_comp}, where the average time needed to decode a single received frame is visualized for proximal decoding as well as for the improved algorithm. +The same codes as before are considered, i.e., the BCH(31, 11) and BCH(31, 26) +codes, several (3, 6) regular \ac{LDPC} codes +(\cite[\text{96.3.965, 204.33.484, 408.33.844}]{mackay_enc}), +a (5,10) regular \ac{LDPC} code (\cite[\text{204.55.187}]{mackay_enc}) and a +progressive edge growth construction code (\cite[\text{PEGReg252x504}]{mackay_enc}). It should be noted that some variability in the data is to be expected, since the timing of the actual simulations depends on a multitude of other parameters such as the scheduling choices of the operating system as well as variations in the implementations themselves. -Nevertheless, the empirical data serves, at least in part, to validate the +Nevertheless, the empirical data, at least in part, supports the theoretical considerations. -\begin{figure}[h] +In conclusion, the decoding performance of proximal decoding can be improved +by appending an ML-in-the-list step when the algorithm does not produce a +valid result. +The gain can in some cases be as high as $\SI{1}{dB}$ and is achievable with +negligible computational performance penalty. +The improvement is mainly noticeable for higher \ac{SNR} values and depends on +the code as well as the chosen parameters. + +\begin{figure}[H] \centering \begin{tikzpicture} @@ -1500,14 +1510,774 @@ theoretical considerations. \caption{Comparison of the timing requirements of the implementations of proximal decoding and the improved algorithm} \label{fig:prox:time_complexity_comp} -\end{figure}% -% +\end{figure} -In conclusion, the decoding performance of proximal decoding can be improved -by appending an ML-in-the-list step when the algorithm does not produce a -valid result. -The gain can in some cases be as high as $\SI{1}{dB}$ and is achievable with -negligible computational performance penalty. -The improvement is mainly noticeable for higher \ac{SNR} values and depends on -the code as well as the chosen parameters. + +\begin{figure}[H] + \centering + + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b / N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \end{axis} + \end{tikzpicture} + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=96, k=48$ + \cite[\text{96.3.965}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b / N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \end{axis} + \end{tikzpicture} + \caption{BCH code with $n=31, k=26$\\[2\baselineskip]} + \end{subfigure} + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b/N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20433484_fewer_SNR.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20433484_fewer_SNR.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20433484_fewer_SNR.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20433484_fewer_SNR.csv}; + \end{axis} + \end{tikzpicture} + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.33.484}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b / N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \end{axis} + \end{tikzpicture} + \caption{$\left( 5, 10 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.55.187}]{mackay_enc}} + \end{subfigure}% + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b / N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \end{axis} + \end{tikzpicture} + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=408, k=204$ + \cite[\text{408.33.844}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[view={75}{30}, + zmode=log, + xlabel={$E_b / N_0$ (dB)}, + ylabel={$\gamma$}, + zlabel={BER}, + width=\textwidth, + height=0.75\textwidth,] + \addplot3[surf, + mesh/rows=17, mesh/cols=10, + colormap/viridis] table [col sep=comma, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot3[RedOrange, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.05}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot3[NavyBlue, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.01}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot3[ForestGreen, line width=1.5] table[col sep=comma, + discard if not={gamma}{0.15}, + x=SNR, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \end{axis} + \end{tikzpicture} + \caption{LDPC code (progressive edge growth construction) with $n=504, k=252$ + \cite[\text{PEGReg252x504}]{mackay_enc}} + \end{subfigure}% + + \vspace{5mm} + + \begin{subfigure}[t]{\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[hide axis, + xmin=10, xmax=50, + ymin=0, ymax=0.4, + legend style={draw=white!15!black,legend cell align=left}] + \addlegendimage{surf, colormap/viridis} + \addlegendentry{$\gamma = \left[ 0\text{ : }0.01\text{ : }0.16 \right] $}; + \addlegendimage{NavyBlue, line width=1.5pt} + \addlegendentry{$\gamma = 0.01$}; + \addlegendimage{RedOrange, line width=1.5pt} + \addlegendentry{$\gamma = 0.05$}; + \addlegendimage{ForestGreen, line width=1.5pt} + \addlegendentry{$\gamma = 0.15$}; + \end{axis} + \end{tikzpicture} + + \end{subfigure} + + \caption{Dependence of the \ac{BER} on the value of the parameter $\gamma$ for various codes} + \label{fig:prox:results_3d_multiple} +\end{figure} + +\begin{figure}[H] + \centering + + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_963965.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=96, k=48$ + \cite[\text{96.3.965}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_bch_31_26.csv}; + \end{axis} + \end{tikzpicture} + + \caption{BCH code with $n=31, k=26$\\[2\baselineskip]} + \end{subfigure}% + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_20433484.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.33.484}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_20455187.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 5, 10 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.55.187}]{mackay_enc}} + \end{subfigure}% + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_40833844.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=408, k=204$ + \cite[\text{408.33.844}]{mackay_enc}} + \end{subfigure}% + \hfill + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + colormap/viridis, + xlabel={$\omega$}, ylabel={$\gamma$}, + at={(0,0)}, view={0}{90}, + zmode=log, + ytick={0, 0.05, 0.1, 0.15}, + yticklabels={0, 0.05, 0.1, 0.15}, + xtick={0.05, 0.1, 0.15, 0.2}, + xticklabels={0.05, 0.1, 0.15, 0.2}, + width=\textwidth, + height=0.75\textwidth, + point meta min=-5.7, + point meta max=-0.5, + ] + \addplot3[ + surf, + shader=flat, + mesh/rows=17, mesh/cols=10, + ] + table [col sep=comma, x=omega, y=gamma, z=BER] + {res/proximal/2d_ber_fer_dfr_gamma_omega_pegreg252x504.csv}; + \end{axis} + \end{tikzpicture} + + \caption{LDPC code (progressive edge growth construction) with $n=504, k=252$ + \cite[\text{PEGReg252x504}]{mackay_enc}} + \end{subfigure}% + + \vspace{5mm} + + \begin{subfigure}{\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + hide axis, + scale only axis, + height=0pt, + width=0pt, + colormap/viridis, + colorbar horizontal, + point meta min=-5.7, + point meta max=-0.5, + colorbar style={ + title={BER}, + width=10cm, + xtick={-5,-4,...,-1}, + xticklabels={$10^{-5}$,$10^{-4}$,$10^{-3}$,$10^{-2}$,$10^{-1}$} + }] + \addplot [draw=none] coordinates {(0,0)}; + \end{axis} + \end{tikzpicture} + \end{subfigure}% + + \caption{The \ac{BER} as a function of $\gamma$ and $\omega$ for various codes} + \label{fig:prox:gamma_omega_multiple} +\end{figure} + +\begin{figure}[H] + \centering + + \begin{subfigure}[t]{0.48\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/hybrid/2d_ber_fer_dfr_963965.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/hybrid/2d_ber_fer_dfr_963965.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/proximal/2d_ber_fer_dfr_963965.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/hybrid/2d_ber_fer_dfr_963965.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=96, k=48$ + \cite[\text{96.3.965}]{mackay_enc}} + \end{subfigure}% + \hfill% + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + %legend columns=2, + %legend style={at={(0.5,-0.45)},anchor=south}, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/hybrid/2d_ber_fer_dfr_bch_31_26.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/hybrid/2d_ber_fer_dfr_bch_31_26.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/proximal/2d_ber_fer_dfr_bch_31_26.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/hybrid/2d_ber_fer_dfr_bch_31_26.csv}; + \end{axis} + \end{tikzpicture} + + \caption{BCH code with $n=31, k=26$\\[\baselineskip]} + \end{subfigure}% + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + xmin=0.5, xmax=6, xtick={1, ..., 5}, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid,] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.15}, + discard if gt={SNR}{5.5},] + {res/proximal/2d_ber_fer_dfr_20433484.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.15}, + discard if gt={SNR}{5.5},] + {res/hybrid/2d_ber_fer_dfr_20433484.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.01}, + discard if gt={SNR}{5.5},] + {res/proximal/2d_ber_fer_dfr_20433484.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.01}, + discard if gt={SNR}{5.5},] + {res/hybrid/2d_ber_fer_dfr_20433484.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.05}, + discard if gt={SNR}{5.5},] + {res/proximal/2d_ber_fer_dfr_20433484.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, + discard if not={gamma}{0.05}, + discard if gt={SNR}{5.5},] + {res/hybrid/2d_ber_fer_dfr_20433484.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.33.484}]{mackay_enc}} + \end{subfigure}% + \hfill% + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + %legend columns=2, + %legend style={at={(0.5,-0.45)},anchor=south}, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/hybrid/2d_ber_fer_dfr_20455187.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/hybrid/2d_ber_fer_dfr_20455187.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/proximal/2d_ber_fer_dfr_20455187.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/hybrid/2d_ber_fer_dfr_20455187.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 5, 10 \right)$-regular \ac{LDPC} code with $n=204, k=102$ + \cite[\text{204.55.187}]{mackay_enc}} + \end{subfigure}% + + \vspace{3mm} + + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + %legend columns=2, + %legend style={at={(0.5,-0.45)},anchor=south}, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/hybrid/2d_ber_fer_dfr_40833844.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/hybrid/2d_ber_fer_dfr_40833844.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/proximal/2d_ber_fer_dfr_40833844.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/hybrid/2d_ber_fer_dfr_40833844.csv}; + \end{axis} + \end{tikzpicture} + + \caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=408, k=204$ + \cite[\text{408.33.844}]{mackay_enc}} + \end{subfigure}% + \hfill% + \begin{subfigure}[t]{0.48\textwidth} + \centering + \begin{tikzpicture} + \begin{axis}[ + grid=both, + xlabel={$E_b / N_0$ (dB)}, ylabel={FER}, + ymode=log, + legend columns=1, + legend pos=outer north east, + ymax=1.5, ymin=8e-5, + width=\textwidth, + height=0.75\textwidth, + ] + + \addplot[ForestGreen, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot[Emerald, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.15}] + {res/hybrid/2d_ber_fer_dfr_pegreg252x504.csv}; + + \addplot[NavyBlue, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot[RoyalPurple, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.01}] + {res/hybrid/2d_ber_fer_dfr_pegreg252x504.csv}; + + \addplot[RedOrange, mark=*, solid] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/proximal/2d_ber_fer_dfr_pegreg252x504.csv}; + \addplot[red, mark=triangle, densely dashed] + table [x=SNR, y=FER, col sep=comma, discard if not={gamma}{0.05}] + {res/hybrid/2d_ber_fer_dfr_pegreg252x504.csv}; + \end{axis} + \end{tikzpicture}\\ + + \caption{\ac{LDPC} code (progressive edge growth construction) with $n=504, k=252$ + \cite[\text{PEGReg252x504}]{mackay_enc}} + \label{fig:prox:improved:comp:504} + \end{subfigure}% + + \vspace{5mm} + + \begin{subfigure}[t]{\textwidth} + \centering + + \begin{tikzpicture} + \begin{axis}[hide axis, + xmin=10, xmax=50, + ymin=0, ymax=0.4, + legend columns=3, + legend style={draw=white!15!black,legend cell align=left}] + \addlegendimage{ForestGreen, mark=*, solid} + \addlegendentry{proximal, $\gamma = 0.15$} + + \addlegendimage{NavyBlue, mark=*, solid} + \addlegendentry{proximal, $\gamma = 0.01$} + + \addlegendimage{RedOrange, mark=*, solid} + \addlegendentry{proximal, $\gamma = 0.05$} + + \addlegendimage{Emerald, mark=triangle, densely dashed} + \addlegendentry{improved, $\gamma = 0.15$} + + \addlegendimage{RoyalPurple, mark=triangle, densely dashed} + \addlegendentry{improved, $\gamma = 0.01$} + + \addlegendimage{red, mark=triangle, densely dashed} + \addlegendentry{improved, $\gamma = 0.05$} + \end{axis} + \end{tikzpicture} + \end{subfigure} + + \caption{Comparison of improvement in decoding performance for various + codes} + \label{fig:prox:improved:comp} +\end{figure} diff --git a/latex/thesis/thesis.tex b/latex/thesis/thesis.tex index ee23e1b..0e4990e 100644 --- a/latex/thesis/thesis.tex +++ b/latex/thesis/thesis.tex @@ -222,7 +222,7 @@ \include{chapters/comparison} % \include{chapters/discussion} \include{chapters/conclusion} - \include{chapters/appendix} +% \include{chapters/appendix} %\listoffigures