Now calculating the error rate based on the codewrords, not datawords
This commit is contained in:
parent
4e0fcbcec8
commit
3a178f2d35
@ -11,7 +11,7 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
|
||||
# TODO: Is 'R' actually called 'decoding matrix'?
|
||||
def __init__(self, G: np.array, H: np.array, R: np.array):
|
||||
def __init__(self, G: np.array, H: np.array):
|
||||
"""Construct a new SoftDecisionDecoder object.
|
||||
|
||||
:param G: Generator matrix
|
||||
@ -20,9 +20,8 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
self._G = G
|
||||
self._H = H
|
||||
self._R = R
|
||||
self._datawords, self._codewords = self._gen_codewords()
|
||||
self._codewords_bpsk = self._codewords * 2 - 1 # The codewords, but mapped to [-1, 1]^n
|
||||
self._codewords_bpsk = 1 - 2 * self._codewords # The codewords, but mapped to [-1, 1]^n
|
||||
|
||||
def _gen_codewords(self) -> np.array:
|
||||
"""Generate a list of all possible codewords.
|
||||
@ -43,7 +42,7 @@ class SoftDecisionDecoder:
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal.
|
||||
|
||||
This function assumes a BPSK-like modulated signal ([-1, 1]^n instead of [0, 1]^n).
|
||||
This function assumes a BPSK modulated signal.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is element of [-1, 1]^n
|
||||
and 'w' is noise)
|
||||
@ -51,4 +50,4 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
correlations = np.dot(self._codewords_bpsk, y)
|
||||
|
||||
return np.dot(self._R, self._codewords[numpy.argmax(correlations)])
|
||||
return self._codewords[numpy.argmax(correlations)]
|
||||
|
||||
@ -8,7 +8,7 @@ class ProximalDecoder:
|
||||
"""
|
||||
|
||||
# TODO: Is 'R' actually called 'decoding matrix'?
|
||||
def __init__(self, H: np.array, R: np.array, K: int = 100, step_size: float = 0.1,
|
||||
def __init__(self, H: np.array, K: int = 100, step_size: float = 0.1,
|
||||
gamma: float = 0.05, eta: float = 1.5):
|
||||
"""Construct a new ProximalDecoder Object.
|
||||
|
||||
@ -20,7 +20,6 @@ class ProximalDecoder:
|
||||
:param eta: Positive constant slightly larger than one. See 3.2, p. 3
|
||||
"""
|
||||
self._H = H
|
||||
self._R = R
|
||||
self._K = K
|
||||
self._step_size = step_size
|
||||
self._gamma = gamma
|
||||
@ -51,7 +50,6 @@ class ProximalDecoder:
|
||||
|
||||
return result
|
||||
|
||||
# TODO: Is the 'projection onto [-eta, eta]' actually just clipping?
|
||||
def _projection(self, x):
|
||||
"""Project a vector onto [-eta, eta]^n in order to avoid numerical instability.
|
||||
Detailed in 3.2, p. 3 (Equation (15)).
|
||||
@ -73,12 +71,11 @@ class ProximalDecoder:
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal. The algorithm is detailed in 3.2, p.3.
|
||||
|
||||
This function assumes a BPSK-like modulated signal ([-1, 1]^n instead of [0, 1]^n)
|
||||
and an AWGN channel.
|
||||
This function assumes a BPSK modulated signal and an AWGN channel.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is element of [-1, 1]^n
|
||||
and 'w' is noise)
|
||||
:return: Most probably sent dataword (element of [0, 1]^k)
|
||||
:return: Most probably sent codeword (element of [0, 1]^k)
|
||||
"""
|
||||
s = np.zeros(self._n)
|
||||
x_hat = np.zeros(self._n)
|
||||
@ -89,9 +86,9 @@ class ProximalDecoder:
|
||||
s = self._projection(s) # Equation (15)
|
||||
|
||||
x_hat = np.sign(s)
|
||||
x_hat = (x_hat == 1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
x_hat = (x_hat == -1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
|
||||
if self._check_parity(x_hat):
|
||||
break
|
||||
|
||||
return np.dot(self._R, x_hat)
|
||||
return x_hat
|
||||
|
||||
28
sw/main.py
28
sw/main.py
@ -6,29 +6,35 @@ import typing
|
||||
from pathlib import Path
|
||||
import os
|
||||
from itertools import chain
|
||||
from timeit import default_timer
|
||||
|
||||
from decoders import proximal, naive_soft_decision
|
||||
from utility import simulations, encoders, codes
|
||||
|
||||
|
||||
def test_decoders(G, encoder, decoders: typing.List) -> pd.DataFrame:
|
||||
def test_decoders(G, decoders: typing.List) -> pd.DataFrame:
|
||||
k, n = G.shape
|
||||
d = np.zeros(k) # All-zeros assumption
|
||||
x = np.zeros(n) # All-zeros assumption
|
||||
|
||||
SNRs = np.linspace(1, 8, 8)
|
||||
data = pd.DataFrame({"SNR": SNRs})
|
||||
|
||||
start_time = default_timer()
|
||||
|
||||
for decoder_name in decoders:
|
||||
decoder = decoders[decoder_name]
|
||||
_, BERs_sd = simulations.test_decoder(encoder=encoder,
|
||||
_, BERs_sd = simulations.test_decoder(x,
|
||||
decoder=decoder,
|
||||
d=d,
|
||||
SNRs=SNRs,
|
||||
target_bit_errors=100,
|
||||
N_max=30000)
|
||||
|
||||
data[f"BER_{decoder_name}"] = BERs_sd
|
||||
|
||||
stop_time = default_timer()
|
||||
|
||||
print(f"Elapsed time: {stop_time-start_time:.2f}s")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@ -83,18 +89,16 @@ def main():
|
||||
for used_code in used_codes:
|
||||
G = codes.Gs[used_code]
|
||||
H = codes.get_systematic_H(G)
|
||||
R = codes.get_systematic_R(G)
|
||||
|
||||
encoder = encoders.Encoder(G)
|
||||
decoders = {
|
||||
"naive_soft_decision": naive_soft_decision.SoftDecisionDecoder(G, H, R),
|
||||
"proximal_0_01": proximal.ProximalDecoder(H, R, K=100, gamma=0.01),
|
||||
"proximal_0_05": proximal.ProximalDecoder(H, R, K=100, gamma=0.05),
|
||||
"proximal_0_15": proximal.ProximalDecoder(H, R, K=100, gamma=0.15),
|
||||
"naive_soft_decision": naive_soft_decision.SoftDecisionDecoder(G, H),
|
||||
"proximal_0_01": proximal.ProximalDecoder(H, K=100, gamma=0.01),
|
||||
"proximal_0_05": proximal.ProximalDecoder(H, K=100, gamma=0.05),
|
||||
"proximal_0_15": proximal.ProximalDecoder(H, K=100, gamma=0.15),
|
||||
}
|
||||
|
||||
# data = test_decoders(G, encoder, decoders)
|
||||
# data.to_csv(f"sim_results/{used_code}.csv")
|
||||
data = test_decoders(G, decoders)
|
||||
data.to_csv(f"sim_results/{used_code}.csv")
|
||||
|
||||
plot_results()
|
||||
|
||||
|
||||
@ -19,27 +19,25 @@ def count_bit_errors(d: np.array, d_hat: np.array) -> int:
|
||||
|
||||
|
||||
# TODO: Fix uses of n, k, a everywhere
|
||||
def test_decoder(encoder: typing.Any,
|
||||
def test_decoder(x: np.array,
|
||||
decoder: typing.Any,
|
||||
d: np.array,
|
||||
SNRs: typing.Sequence[float] = np.linspace(1, 4, 7),
|
||||
target_bit_errors: int = 100,
|
||||
N_max: int = 10000) \
|
||||
-> typing.Tuple[np.array, np.array]:
|
||||
"""Calculate the Bit Error Rate (BER) for a given decoder for a number of SNRs.
|
||||
|
||||
This function prints its progress to stdout.
|
||||
This function assumes the all-zeros assumption holds. Progress is printed to stdout.
|
||||
|
||||
:param encoder: Instance of the encoder used to generate the codeword to transmit
|
||||
:param x: Codeword to be sent (Element of [0, 1]^n)
|
||||
:param decoder: Instance of the decoder to be tested
|
||||
:param d: Dataword (element of [0, 1]^n)
|
||||
:param SNRs: List of SNRs for which the BER should be calculated
|
||||
:param target_bit_errors: Number of bit errors after which to stop the simulation
|
||||
:param N_max: Maximum number of iterations to perform for each SNR
|
||||
:return: Tuple of numpy arrays of the form (SNRs, BERs)
|
||||
"""
|
||||
|
||||
x = encoder.encode(d)
|
||||
x_bpsk = 1 - 2 * x # Map x from [0, 1]^n to [-1, 1]^n
|
||||
|
||||
BERs = []
|
||||
for SNR in tqdm(SNRs, desc="Calculating Bit-Error-Rates",
|
||||
@ -55,14 +53,12 @@ def test_decoder(encoder: typing.Any,
|
||||
leave=False,
|
||||
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
|
||||
|
||||
# TODO: Is this a valid simulation? Can we just add AWGN to the codeword,
|
||||
# ignoring and modulation and (e.g. matched) filtering?
|
||||
y = noise.add_awgn(x, SNR, signal_amp=np.sqrt(2))
|
||||
y = noise.add_awgn(x_bpsk, SNR, signal_amp=np.sqrt(2))
|
||||
|
||||
y_hat = decoder.decode(y)
|
||||
|
||||
total_bit_errors += count_bit_errors(d, y_hat)
|
||||
total_bits += d.size
|
||||
total_bit_errors += count_bit_errors(x, y_hat)
|
||||
total_bits += x.size
|
||||
|
||||
if total_bit_errors >= target_bit_errors:
|
||||
break
|
||||
|
||||
Loading…
Reference in New Issue
Block a user