changed test_decoder() to accept target frame errors instead of bit errors

This commit is contained in:
Andreas Tsouchlos 2022-11-10 10:21:57 +01:00
parent bef9c8ee3a
commit a6849a1f7d
2 changed files with 8 additions and 6 deletions

View File

@ -16,7 +16,7 @@ def test_decoders(G, decoders: typing.List) -> pd.DataFrame:
k, n = G.shape
x = np.zeros(n) # All-zeros assumption
SNRs = np.linspace(1, 8, 8)
SNRs = np.linspace(1, 7, 7)
data = pd.DataFrame({"SNR": SNRs})
start_time = default_timer()
@ -26,7 +26,7 @@ def test_decoders(G, decoders: typing.List) -> pd.DataFrame:
_, BERs_sd = simulations.test_decoder(x,
decoder=decoder,
SNRs=SNRs,
target_bit_errors=100,
target_frame_errors=100,
N_max=50000)
data[f"BER_{decoder_name}"] = BERs_sd
@ -91,7 +91,7 @@ def main():
H = codes.get_systematic_H(G)
decoders = {
"naive_soft_decision": maximum_likelihood.MLDecoder(G, H),
"ML": maximum_likelihood.MLDecoder(G, H),
"proximal_0_01": proximal.ProximalDecoder(H, gamma=0.01),
"proximal_0_05": proximal.ProximalDecoder(H, gamma=0.05),
"proximal_0_15": proximal.ProximalDecoder(H, gamma=0.15),

View File

@ -21,7 +21,7 @@ def count_bit_errors(d: np.array, d_hat: np.array) -> int:
def test_decoder(x: np.array,
decoder: typing.Any,
SNRs: typing.Sequence[float] = np.linspace(1, 7, 7),
target_bit_errors: int = 100,
target_frame_errors: int = 100,
N_max: int = 10000) \
-> typing.Tuple[np.array, np.array]:
"""Calculate the Bit Error Rate (BER) for a given decoder for a number of SNRs.
@ -31,7 +31,7 @@ def test_decoder(x: np.array,
:param x: Codeword to be sent (Element of [0, 1]^n)
:param decoder: Instance of the decoder to be tested
:param SNRs: List of SNRs for which the BER should be calculated
:param target_bit_errors: Number of bit errors after which to stop the simulation
:param target_frame_errors: Number of frame errors after which to stop the simulation
:param N_max: Maximum number of iterations to perform for each SNR
:return: Tuple of numpy arrays of the form (SNRs, BERs)
"""
@ -46,6 +46,7 @@ def test_decoder(x: np.array,
total_bit_errors = 0
total_bits = 0
total_frame_errors = 0
for n in tqdm(range(N_max), desc=f"Simulating for SNR = {SNR} dB",
position=1,
@ -58,8 +59,9 @@ def test_decoder(x: np.array,
total_bit_errors += count_bit_errors(x, x_hat)
total_bits += x.size
total_frame_errors += 1 if total_bit_errors > 0 else 0
if total_bit_errors >= target_bit_errors:
if total_frame_errors >= target_frame_errors:
break
BERs.append(total_bit_errors / total_bits)