Reorganized 'utility' into own package

This commit is contained in:
2022-11-08 00:52:43 +01:00
parent bbd9d9037b
commit 781ae1442d
9 changed files with 64 additions and 98 deletions

View File

@@ -1,24 +0,0 @@
import numpy as np
# TODO: Should the encoder be responsible for mapping the message from [0, 1]^n to [-1, 1]^n?
# (ie. should the encoder perform modulation?)
class Encoder:
"""Class implementing an encoder for block codes.
"""
def __init__(self, G: np.array):
"""Construct a new Encoder object.
:param G: Generator matrix
"""
self._G = G
def encode(self, d: np.array) -> np.array:
"""Map a given dataword onto the corresponding codeword.
The returned codeword is mapped from [0, 1]^n onto [-1, 1]^n.
:param d: Dataword (element of [0, 1]^n)
:return: Codeword (already element of [-1, 1]^n)
"""
return np.dot(d, self._G) * 2 - 1

View File

@@ -3,6 +3,7 @@ import numpy as np
import itertools
# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n
class SoftDecisionDecoder:
"""This class naively implements a soft decision decoder. The decoder calculates
the correlation between the received signal and each codeword and then chooses the

View File

@@ -1,6 +1,7 @@
import numpy as np
# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n
class ProximalDecoder:
"""Class implementing the Proximal Decoding algorithm. See "Proximal Decoding for LDPC Codes"
by Tadashi Wadayama, and Satoshi Takabe.
@@ -25,16 +26,8 @@ class ProximalDecoder:
self._gamma = gamma
self._eta = eta
self._A = []
self._B = []
for row in self._H:
A_k = np.argwhere(row == 1)
self._A.append(A_k[:, 0])
for column in self._H.T:
B_k = np.argwhere(column == 1)
self._B.append(B_k[:, 0])
self._k, self._n = self._H.shape
self._H_ne_0 = H != 0
@staticmethod
def _L_awgn(s: np.array, y: np.array) -> np.array:
@@ -47,10 +40,8 @@ class ProximalDecoder:
"""Gradient of the code-constraint polynomial. See 2.3, p. 2."""
# Pre-computations
k, _ = self._H.shape
A_prod_matrix = np.tile(x, (k, 1))
A_prods = np.prod(A_prod_matrix, axis=1, where=self._H > 0)
A_prod_matrix = np.tile(x, (self._k, 1))
A_prods = np.prod(A_prod_matrix, axis=1, where=self._H_ne_0)
# Calculate gradient
@@ -89,8 +80,8 @@ class ProximalDecoder:
and 'w' is noise)
:return: Most probably sent dataword (element of [0, 1]^k)
"""
s = np.zeros(y.size)
x_hat = np.zeros(y.size)
s = np.zeros(self._n)
x_hat = np.zeros(self._n)
for k in range(self._K):
r = s - self._step_size * self._L_awgn(s, y)

View File

@@ -1,96 +0,0 @@
"""This file contains various utility functions that can be used in combination with the decoders.
"""
import numpy as np
import typing
from tqdm import tqdm
def _get_noise_amp_from_SNR(SNR: float, signal_amp: float = 1) -> float:
"""Calculate the amplitude of the noise from an SNR and the signal amplitude.
:param SNR: Signal-to-Noise-Ratio in dB
:param signal_amp: Signal Amplitude (linear)
:return: Noise Amplitude (linear)
"""
SNR_linear = 10 ** (SNR / 10)
noise_amp = (1 / np.sqrt(SNR_linear)) * signal_amp
return noise_amp
def add_awgn(c: np.array, SNR: float, signal_amp: float = 1) -> np.array:
"""Add Additive White Gaussian Noise to a data vector. As this function adds random noise to
the input, the output changes, even if it is called multiple times with the same input.
:param c: Binary vector representing the data to be transmitted
:param SNR: Signal-to-Noise-Ratio in dB
:param signal_amp: Amplitude of the signal. Used for the noise amplitude calculation
:return: Data vector with added noise
"""
noise_amp = _get_noise_amp_from_SNR(SNR, signal_amp=signal_amp)
y = c + np.random.normal(scale=noise_amp, size=c.size)
return y
def count_bit_errors(d: np.array, d_hat: np.array) -> int:
"""Count the number of wrong bits in a decoded codeword.
:param d: Originally sent data
:param d_hat: Received data
:return: Number of bit errors
"""
return np.sum(d != d_hat)
def test_decoder(encoder: typing.Any,
decoder: typing.Any,
d: np.array,
SNRs: typing.Sequence[float] = np.linspace(1, 4, 7),
target_bit_errors: int = 100,
N_max: int = 10000) \
-> typing.Tuple[np.array, np.array]:
"""Calculate the Bit Error Rate (BER) for a given decoder for a number of SNRs.
This function prints its progress to stdout.
:param encoder: Instance of the encoder used to generate the codeword to transmit
:param decoder: Instance of the decoder to be tested
:param d: Dataword (element of [0, 1]^n)
:param SNRs: List of SNRs for which the BER should be calculated
:param target_bit_errors: Number of bit errors after which to stop the simulation
:param N_max: Maximum number of iterations to perform for each SNR
:return: Tuple of numpy arrays of the form (SNRs, BERs)
"""
x = encoder.encode(d)
BERs = []
for SNR in tqdm(SNRs, desc="Calculating Bit-Error-Rates",
position=0,
leave=False,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
total_bit_errors = 0
total_bits = 0
for n in tqdm(range(N_max), desc=f"Simulating for SNR = {SNR} dB",
position=1,
leave=False,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
# TODO: Is this a valid simulation? Can we just add AWGN to the codeword,
# ignoring and modulation and (e.g. matched) filtering?
y = add_awgn(x, SNR, signal_amp=np.sqrt(2))
y_hat = decoder.decode(y)
total_bit_errors += count_bit_errors(d, y_hat)
total_bits += d.size
if total_bit_errors >= target_bit_errors:
break
BERs.append(total_bit_errors / total_bits)
return np.array(SNRs), np.array(BERs)