Added Encoder class and modified interface of utility.test_decoder()
This commit is contained in:
24
sw/decoders/channel.py
Normal file
24
sw/decoders/channel.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
# TODO: Should the encoder be responsible for mapping the message from [0, 1]^n to [-1, 1]^n?
|
||||
# (ie. should the encoder perform modulation?)
|
||||
class Encoder:
|
||||
"""Class implementing an encoder for block codes.
|
||||
"""
|
||||
def __init__(self, G: np.array):
|
||||
"""Construct a new Encoder object.
|
||||
|
||||
:param G: Generator matrix
|
||||
"""
|
||||
self._G = G
|
||||
|
||||
def encode(self, d: np.array) -> np.array:
|
||||
"""Map a given dataword onto the corresponding codeword.
|
||||
|
||||
The returned codeword is mapped from [0, 1]^n onto [-1, 1]^n.
|
||||
|
||||
:param d: Dataword (element of [0, 1]^n)
|
||||
:return: Codeword (already element of [-1, 1]^n)
|
||||
"""
|
||||
return np.dot(d, self._G) * 2 - 1
|
||||
@@ -43,26 +43,28 @@ def count_bit_errors(d: np.array, d_hat: np.array) -> int:
|
||||
return np.sum(d != d_hat)
|
||||
|
||||
|
||||
def test_decoder(decoder: typing.Any,
|
||||
def test_decoder(encoder: typing.Any,
|
||||
decoder: typing.Any,
|
||||
d: np.array,
|
||||
c: np.array,
|
||||
SNRs: typing.Sequence[float] = np.linspace(1, 4, 7),
|
||||
target_bit_errors=100,
|
||||
N_max=10000) \
|
||||
target_bit_errors: int = 100,
|
||||
N_max: int = 10000) \
|
||||
-> typing.Tuple[np.array, np.array]:
|
||||
"""Calculate the Bit Error Rate (BER) for a given decoder for a number of SNRs.
|
||||
|
||||
This function prints its progress to stdout.
|
||||
|
||||
:param encoder: Instance of the encoder used to generate the codeword to transmit
|
||||
:param decoder: Instance of the decoder to be tested
|
||||
:param d: Dataword (element of [0, 1]^n)
|
||||
:param c: Codeword whose transmission is to be simulated (element of [0, 1]^n)
|
||||
:param SNRs: List of SNRs for which the BER should be calculated
|
||||
:param target_bit_errors: Number of bit errors after which to stop the simulation
|
||||
:param N_max: Maximum number of iterations to perform for each SNR
|
||||
:return: Tuple of numpy arrays of the form (SNRs, BERs)
|
||||
"""
|
||||
x = c * 2 - 1 # Map the codeword from [0, 1]^n to [-1, 1]^n
|
||||
|
||||
x = encoder.encode(d)
|
||||
|
||||
BERs = []
|
||||
for SNR in tqdm(SNRs, desc="Calculating Bit-Error-Rates",
|
||||
position=0,
|
||||
@@ -79,10 +81,11 @@ def test_decoder(decoder: typing.Any,
|
||||
# TODO: Is this a valid simulation? Can we just add AWGN to the codeword, ignoring and modulation and (
|
||||
# e.g. matched) filtering?
|
||||
y = add_awgn(x, SNR, signal_amp=np.sqrt(2))
|
||||
|
||||
y_hat = decoder.decode(y)
|
||||
|
||||
total_bit_errors += count_bit_errors(d, y_hat)
|
||||
total_bits += c.size
|
||||
total_bits += x.size
|
||||
|
||||
if total_bit_errors >= target_bit_errors:
|
||||
break
|
||||
|
||||
Reference in New Issue
Block a user