diff --git a/sw/decoders/naive_soft_decision.py b/sw/decoders/naive_soft_decision.py index 9bb2ff9..8b76d1e 100644 --- a/sw/decoders/naive_soft_decision.py +++ b/sw/decoders/naive_soft_decision.py @@ -3,6 +3,7 @@ import numpy as np import itertools +# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n class SoftDecisionDecoder: """This class naively implements a soft decision decoder. The decoder calculates the correlation between the received signal and each codeword and then chooses the diff --git a/sw/decoders/proximal.py b/sw/decoders/proximal.py index 954e59b..26f7fc6 100644 --- a/sw/decoders/proximal.py +++ b/sw/decoders/proximal.py @@ -1,6 +1,7 @@ import numpy as np +# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n class ProximalDecoder: """Class implementing the Proximal Decoding algorithm. See "Proximal Decoding for LDPC Codes" by Tadashi Wadayama, and Satoshi Takabe. @@ -25,16 +26,8 @@ class ProximalDecoder: self._gamma = gamma self._eta = eta - self._A = [] - self._B = [] - - for row in self._H: - A_k = np.argwhere(row == 1) - self._A.append(A_k[:, 0]) - - for column in self._H.T: - B_k = np.argwhere(column == 1) - self._B.append(B_k[:, 0]) + self._k, self._n = self._H.shape + self._H_ne_0 = H != 0 @staticmethod def _L_awgn(s: np.array, y: np.array) -> np.array: @@ -47,10 +40,8 @@ class ProximalDecoder: """Gradient of the code-constraint polynomial. See 2.3, p. 2.""" # Pre-computations - k, _ = self._H.shape - - A_prod_matrix = np.tile(x, (k, 1)) - A_prods = np.prod(A_prod_matrix, axis=1, where=self._H > 0) + A_prod_matrix = np.tile(x, (self._k, 1)) + A_prods = np.prod(A_prod_matrix, axis=1, where=self._H_ne_0) # Calculate gradient @@ -89,8 +80,8 @@ class ProximalDecoder: and 'w' is noise) :return: Most probably sent dataword (element of [0, 1]^k) """ - s = np.zeros(y.size) - x_hat = np.zeros(y.size) + s = np.zeros(self._n) + x_hat = np.zeros(self._n) for k in range(self._K): r = s - self._step_size * self._L_awgn(s, y) diff --git a/sw/main.py b/sw/main.py index 9c7a4c3..904c960 100644 --- a/sw/main.py +++ b/sw/main.py @@ -4,10 +4,8 @@ import seaborn as sns import pandas as pd from timeit import default_timer as timer -from decoders import proximal -from decoders import naive_soft_decision -from decoders import channel -from decoders import utility +from decoders import proximal, naive_soft_decision +from utility import noise, simulations, encoders def main(): @@ -29,7 +27,7 @@ def main(): # Define encoder and decoders - encoder = channel.Encoder(G) + encoder = encoders.Encoder(G) decoders = {"naive_soft_decision": naive_soft_decision.SoftDecisionDecoder(G, H, R), "proximal_0_01": proximal.ProximalDecoder(H, R, K=100, gamma=0.01), @@ -49,11 +47,11 @@ def main(): for decoder_name in decoders: decoder = decoders[decoder_name] - _, BERs_sd = utility.test_decoder(encoder=encoder, - decoder=decoder, - d=d, - SNRs=SNRs, - N_max=2000) + _, BERs_sd = simulations.test_decoder(encoder=encoder, + decoder=decoder, + d=d, + SNRs=SNRs, + N_max=2000) data[f"BER_{decoder_name}"] = BERs_sd diff --git a/sw/test/test_proximal.py b/sw/test/test_proximal.py index 2e1572a..7319389 100644 --- a/sw/test/test_proximal.py +++ b/sw/test/test_proximal.py @@ -63,40 +63,6 @@ class GradientTestCase(unittest.TestCase): self.assertEqual(np.array_equal(grad_h, expected_grad_h), True) - def test_gen_A_B(self): - """Test the generation of the A and B sets used for the gradient calculation.""" - # Hamming(7,4) code - G = np.array([[1, 1, 1, 0, 0, 0, 0], - [1, 0, 0, 1, 1, 0, 0], - [0, 1, 0, 1, 0, 1, 0], - [1, 1, 0, 1, 0, 0, 1]]) - H = np.array([[1, 0, 1, 0, 1, 0, 1], - [0, 1, 1, 0, 0, 1, 1], - [0, 0, 0, 1, 1, 1, 1]]) - R = np.array([[0, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0], - [0, 0, 0, 0, 0, 1, 0], - [0, 0, 0, 0, 0, 0, 1]]) - - decoder = proximal.ProximalDecoder(H, R) - - expected_A = [np.array([0, 2, 4, 6]), - np.array([1, 2, 5, 6]), - np.array([3, 4, 5, 6])] - expected_B = [np.array([0]), - np.array([1]), - np.array([0, 1]), - np.array([2]), - np.array([0, 2]), - np.array([1, 2]), - np.array([0, 1, 2])] - - for A_i, expected_A_i in zip(decoder._A, expected_A): - self.assertEqual(np.array_equal(A_i, expected_A_i), True) - - for B_k, expected_B_k in zip(decoder._B, expected_B): - self.assertEqual(np.array_equal(B_k, expected_B_k), True) - if __name__ == "__main__": unittest.main() diff --git a/sw/test/test_utility.py b/sw/test/test_utility.py index 2c47fc8..81b51b6 100644 --- a/sw/test/test_utility.py +++ b/sw/test/test_utility.py @@ -1,6 +1,8 @@ import unittest import numpy as np -from decoders import utility + +from utility import simulations +from utility import noise class CountBitErrorsTestCase(unittest.TestCase): @@ -15,9 +17,9 @@ class CountBitErrorsTestCase(unittest.TestCase): d3 = np.array([0, 0, 0, 0]) y_hat3 = np.array([1, 1, 1, 1]) - self.assertEqual(utility.count_bit_errors(d1, y_hat1), 2) - self.assertEqual(utility.count_bit_errors(d2, y_hat2), 0) - self.assertEqual(utility.count_bit_errors(d3, y_hat3), 4) + self.assertEqual(simulations.count_bit_errors(d1, y_hat1), 2) + self.assertEqual(simulations.count_bit_errors(d2, y_hat2), 0) + self.assertEqual(simulations.count_bit_errors(d3, y_hat3), 4) # TODO: Is this correct? @@ -30,11 +32,11 @@ class NoiseAmpFromSNRTestCase(unittest.TestCase): SNR4 = -20 SNR5 = 60 - self.assertEqual(utility._get_noise_amp_from_SNR(SNR1, signal_amp=1), 1) - self.assertAlmostEqual(utility._get_noise_amp_from_SNR(SNR2, signal_amp=1), 0.5, places=2) - self.assertEqual(utility._get_noise_amp_from_SNR(SNR3, signal_amp=1), 0.1) - self.assertEqual(utility._get_noise_amp_from_SNR(SNR4, signal_amp=1), 10) - self.assertEqual(utility._get_noise_amp_from_SNR(SNR5, signal_amp=2), 0.002) + self.assertEqual(noise.get_noise_amp_from_SNR(SNR1, signal_amp=1), 1) + self.assertAlmostEqual(noise.get_noise_amp_from_SNR(SNR2, signal_amp=1), 0.5, places=2) + self.assertEqual(noise.get_noise_amp_from_SNR(SNR3, signal_amp=1), 0.1) + self.assertEqual(noise.get_noise_amp_from_SNR(SNR4, signal_amp=1), 10) + self.assertEqual(noise.get_noise_amp_from_SNR(SNR5, signal_amp=2), 0.002) if __name__ == '__main__': diff --git a/sw/utility/__init__.py b/sw/utility/__init__.py new file mode 100644 index 0000000..fabcde5 --- /dev/null +++ b/sw/utility/__init__.py @@ -0,0 +1 @@ +"""This package contains various utilities that can be used in combination with the decoders.""" diff --git a/sw/decoders/channel.py b/sw/utility/encoders.py similarity index 92% rename from sw/decoders/channel.py rename to sw/utility/encoders.py index 255ec1f..06cd1af 100644 --- a/sw/decoders/channel.py +++ b/sw/utility/encoders.py @@ -1,6 +1,7 @@ import numpy as np +# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n # TODO: Should the encoder be responsible for mapping the message from [0, 1]^n to [-1, 1]^n? # (ie. should the encoder perform modulation?) class Encoder: diff --git a/sw/utility/noise.py b/sw/utility/noise.py new file mode 100644 index 0000000..f7831cd --- /dev/null +++ b/sw/utility/noise.py @@ -0,0 +1,31 @@ +"""Utility functions relating to noise and SNR calculations.""" + + +import numpy as np + + +def get_noise_amp_from_SNR(SNR: float, signal_amp: float = 1) -> float: + """Calculate the amplitude of the noise from an SNR and the signal amplitude. + + :param SNR: Signal-to-Noise-Ratio in dB + :param signal_amp: Signal Amplitude (linear) + :return: Noise Amplitude (linear) + """ + SNR_linear = 10 ** (SNR / 10) + noise_amp = (1 / np.sqrt(SNR_linear)) * signal_amp + + return noise_amp + + +def add_awgn(c: np.array, SNR: float, signal_amp: float = 1) -> np.array: + """Add Additive White Gaussian Noise to a data vector. As this function adds random noise to + the input, the output changes, even if it is called multiple times with the same input. + + :param c: Binary vector representing the data to be transmitted + :param SNR: Signal-to-Noise-Ratio in dB + :param signal_amp: Amplitude of the signal. Used for the noise amplitude calculation + :return: Data vector with added noise + """ + noise_amp = get_noise_amp_from_SNR(SNR, signal_amp=signal_amp) + y = c + np.random.normal(scale=noise_amp, size=c.size) + return y diff --git a/sw/decoders/utility.py b/sw/utility/simulations.py similarity index 64% rename from sw/decoders/utility.py rename to sw/utility/simulations.py index 83da734..938cfed 100644 --- a/sw/decoders/utility.py +++ b/sw/utility/simulations.py @@ -1,36 +1,11 @@ -"""This file contains various utility functions that can be used in combination with the decoders. -""" +"""This file contains utility functions relating to tests and simulations of the decoders.""" + import numpy as np import typing from tqdm import tqdm - -def _get_noise_amp_from_SNR(SNR: float, signal_amp: float = 1) -> float: - """Calculate the amplitude of the noise from an SNR and the signal amplitude. - - :param SNR: Signal-to-Noise-Ratio in dB - :param signal_amp: Signal Amplitude (linear) - :return: Noise Amplitude (linear) - """ - SNR_linear = 10 ** (SNR / 10) - noise_amp = (1 / np.sqrt(SNR_linear)) * signal_amp - - return noise_amp - - -def add_awgn(c: np.array, SNR: float, signal_amp: float = 1) -> np.array: - """Add Additive White Gaussian Noise to a data vector. As this function adds random noise to - the input, the output changes, even if it is called multiple times with the same input. - - :param c: Binary vector representing the data to be transmitted - :param SNR: Signal-to-Noise-Ratio in dB - :param signal_amp: Amplitude of the signal. Used for the noise amplitude calculation - :return: Data vector with added noise - """ - noise_amp = _get_noise_amp_from_SNR(SNR, signal_amp=signal_amp) - y = c + np.random.normal(scale=noise_amp, size=c.size) - return y +from utility import noise def count_bit_errors(d: np.array, d_hat: np.array) -> int: @@ -81,7 +56,7 @@ def test_decoder(encoder: typing.Any, # TODO: Is this a valid simulation? Can we just add AWGN to the codeword, # ignoring and modulation and (e.g. matched) filtering? - y = add_awgn(x, SNR, signal_amp=np.sqrt(2)) + y = noise.add_awgn(x, SNR, signal_amp=np.sqrt(2)) y_hat = decoder.decode(y)