Now calculating the error rate based on the codewrords, not datawords

This commit is contained in:
2022-11-08 20:03:48 +01:00
parent 4e0fcbcec8
commit 3a178f2d35
4 changed files with 32 additions and 36 deletions

View File

@@ -19,27 +19,25 @@ def count_bit_errors(d: np.array, d_hat: np.array) -> int:
# TODO: Fix uses of n, k, a everywhere
def test_decoder(encoder: typing.Any,
def test_decoder(x: np.array,
decoder: typing.Any,
d: np.array,
SNRs: typing.Sequence[float] = np.linspace(1, 4, 7),
target_bit_errors: int = 100,
N_max: int = 10000) \
-> typing.Tuple[np.array, np.array]:
"""Calculate the Bit Error Rate (BER) for a given decoder for a number of SNRs.
This function prints its progress to stdout.
This function assumes the all-zeros assumption holds. Progress is printed to stdout.
:param encoder: Instance of the encoder used to generate the codeword to transmit
:param x: Codeword to be sent (Element of [0, 1]^n)
:param decoder: Instance of the decoder to be tested
:param d: Dataword (element of [0, 1]^n)
:param SNRs: List of SNRs for which the BER should be calculated
:param target_bit_errors: Number of bit errors after which to stop the simulation
:param N_max: Maximum number of iterations to perform for each SNR
:return: Tuple of numpy arrays of the form (SNRs, BERs)
"""
x = encoder.encode(d)
x_bpsk = 1 - 2 * x # Map x from [0, 1]^n to [-1, 1]^n
BERs = []
for SNR in tqdm(SNRs, desc="Calculating Bit-Error-Rates",
@@ -55,14 +53,12 @@ def test_decoder(encoder: typing.Any,
leave=False,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
# TODO: Is this a valid simulation? Can we just add AWGN to the codeword,
# ignoring and modulation and (e.g. matched) filtering?
y = noise.add_awgn(x, SNR, signal_amp=np.sqrt(2))
y = noise.add_awgn(x_bpsk, SNR, signal_amp=np.sqrt(2))
y_hat = decoder.decode(y)
total_bit_errors += count_bit_errors(d, y_hat)
total_bits += d.size
total_bit_errors += count_bit_errors(x, y_hat)
total_bits += x.size
if total_bit_errors >= target_bit_errors:
break