Merge branch 'feature/multithreading'
This commit is contained in:
commit
b6fe01e149
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,5 +1,9 @@
|
|||||||
latex/build/
|
latex/build/
|
||||||
latex/tmp/
|
latex/tmp/
|
||||||
|
sw/cpp_modules
|
||||||
|
sw/cpp/build
|
||||||
|
sw/cpp/cmake-build-debug
|
||||||
|
sw/cpp/cmake-build-release
|
||||||
sw/sim_saves/
|
sw/sim_saves/
|
||||||
.idea
|
.idea
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|||||||
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
## Software
|
## Software
|
||||||
|
|
||||||
|
Warning: At least a `Python 3.9` or higher is required to properly run the
|
||||||
|
scripts in this project
|
||||||
|
|
||||||
### Create a python virtual environment
|
### Create a python virtual environment
|
||||||
|
|
||||||
In the root directory of this repository run
|
In the root directory of this repository run
|
||||||
|
|||||||
28
sw/cpp/.clang-format
Normal file
28
sw/cpp/.clang-format
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
BasedOnStyle: LLVM
|
||||||
|
Language: Cpp
|
||||||
|
|
||||||
|
IndentWidth: 4
|
||||||
|
UseTab: Never
|
||||||
|
#NamespaceIndentation: All
|
||||||
|
|
||||||
|
PointerAlignment: Left
|
||||||
|
AccessModifierOffset: -4
|
||||||
|
AlwaysBreakTemplateDeclarations: true
|
||||||
|
LambdaBodyIndentation: Signature
|
||||||
|
|
||||||
|
MaxEmptyLinesToKeep: 3
|
||||||
|
# ColumnLimit: 128
|
||||||
|
|
||||||
|
CompactNamespaces: true
|
||||||
|
FixNamespaceComments: true
|
||||||
|
|
||||||
|
AllowShortFunctionsOnASingleLine: false
|
||||||
|
AllowShortIfStatementsOnASingleLine: true
|
||||||
|
|
||||||
|
AlignConsecutiveAssignments: true
|
||||||
|
AlignConsecutiveBitFields: true
|
||||||
|
AlignConsecutiveDeclarations: true
|
||||||
|
AlignConsecutiveMacros: true
|
||||||
|
|
||||||
|
#BraceWrapping:
|
||||||
|
# BeforeElse: true
|
||||||
37
sw/cpp/CMakeLists.txt
Normal file
37
sw/cpp/CMakeLists.txt
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
cmake_minimum_required (VERSION 3.0)
|
||||||
|
project(cpp_decoders)
|
||||||
|
|
||||||
|
|
||||||
|
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||||
|
message(STATUS "Setting build type to 'Release' as none was specified.")
|
||||||
|
set(CMAKE_BUILD_TYPE
|
||||||
|
Release
|
||||||
|
CACHE STRING "Choose the type of build." FORCE)
|
||||||
|
set_property(
|
||||||
|
CACHE CMAKE_BUILD_TYPE
|
||||||
|
PROPERTY STRINGS
|
||||||
|
"Debug"
|
||||||
|
"Release")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 23)
|
||||||
|
|
||||||
|
find_package(Eigen3 3.3 REQUIRED NO_MODULE)
|
||||||
|
find_package(pybind11 CONFIG REQUIRED)
|
||||||
|
|
||||||
|
include_directories(${pybind11_INCLUDE_DIRS})
|
||||||
|
|
||||||
|
|
||||||
|
find_package(OpenMP REQUIRED)
|
||||||
|
|
||||||
|
#add_compile_options(-ffast-math)
|
||||||
|
|
||||||
|
pybind11_add_module(cpp_decoders src/python_interface.cpp)
|
||||||
|
target_link_libraries(cpp_decoders PRIVATE Eigen3::Eigen OpenMP::OpenMP_CXX)
|
||||||
|
|
||||||
|
set(INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../cpp_modules)
|
||||||
|
|
||||||
|
install(TARGETS cpp_decoders ARCHIVE DESTINATION ${INSTALL_DIR}
|
||||||
|
LIBRARY DESTINATION ${INSTALL_DIR}
|
||||||
|
RUNTIME DESTINATION ${INSTALL_DIR})
|
||||||
|
|
||||||
277
sw/cpp/src/proximal.h
Normal file
277
sw/cpp/src/proximal.h
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Eigen/Dense>
|
||||||
|
#include <bit>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <iostream>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#include <pybind11/eigen.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Using declarations
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
template <int t_rows, int t_cols>
|
||||||
|
using MatrixiR = Eigen::Matrix<int, t_rows, t_cols, Eigen::RowMajor>;
|
||||||
|
|
||||||
|
template <int t_rows, int t_cols>
|
||||||
|
using MatrixdR = Eigen::Matrix<double, t_rows, t_cols, Eigen::RowMajor>;
|
||||||
|
|
||||||
|
template <int t_size>
|
||||||
|
using RowVectori = Eigen::RowVector<int, t_size>;
|
||||||
|
|
||||||
|
template <int t_size>
|
||||||
|
using RowVectord = Eigen::RowVector<double, t_size>;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Proximal decoder implementation
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class implementing the Proximal Decoding algorithm. See "Proximal
|
||||||
|
* Decoding for LDPC Codes" by Tadashi Wadayama, and Satoshi Takabe.
|
||||||
|
* @tparam t_m Number of rows of the H Matrix
|
||||||
|
* @tparam t_n Number of columns of the H Matrix
|
||||||
|
*/
|
||||||
|
template <int t_m, int t_n>
|
||||||
|
class ProximalDecoder {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Constructor
|
||||||
|
* @param H Parity-Check Matrix
|
||||||
|
* @param K Number of iterations to run while decoding
|
||||||
|
* @param omega Step size
|
||||||
|
* @param gamma Positive constant. Arises in the approximation of the prior
|
||||||
|
* PDF
|
||||||
|
* @param eta Positive constant slightly larger than one. See 3.2, p. 3
|
||||||
|
*/
|
||||||
|
ProximalDecoder(const Eigen::Ref<const MatrixiR<t_m, t_n>>& H, int K,
|
||||||
|
double omega, double gamma, double eta)
|
||||||
|
: mK(K), mOmega(omega), mGamma(gamma), mEta(eta), mH(H),
|
||||||
|
mH_zero_indices(find_zero(H)) {
|
||||||
|
|
||||||
|
Eigen::setNbThreads(8);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decode a received signal. The algorithm is detailed in 3.2, p.3.
|
||||||
|
* This function assumes a BPSK modulated signal and an AWGN channel.
|
||||||
|
* @param y Vector of received values. (y = x + w, where 'x' is element of
|
||||||
|
* [-1, 1]^n and 'w' is noise)
|
||||||
|
* @return \b std::pair of the form (x_hat, num_iter), x_hat is the most
|
||||||
|
* probably sent codeword and num_iter is the number of iterations that were
|
||||||
|
* performed. If the parity check fails and no valid codeword is reached,
|
||||||
|
* num_iter is -1
|
||||||
|
*/
|
||||||
|
std::pair<RowVectori<t_n>, int>
|
||||||
|
decode(const Eigen::Ref<const RowVectord<t_n>>& y) {
|
||||||
|
if (y.size() != mH.cols())
|
||||||
|
throw std::runtime_error("Length of vector must match H matrix");
|
||||||
|
|
||||||
|
RowVectord<t_n> s = RowVectord<t_n>::Zero(t_n);
|
||||||
|
RowVectori<t_n> x_hat;
|
||||||
|
RowVectord<t_n> r;
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < mK; ++i) {
|
||||||
|
r = s - mOmega * L_awgn(s, y);
|
||||||
|
|
||||||
|
s = projection(r - mGamma * grad_H(r));
|
||||||
|
|
||||||
|
x_hat = s.unaryExpr([](double d) {
|
||||||
|
uint64_t bits = std::bit_cast<uint64_t>(d);
|
||||||
|
// Return the sign bit: 1 for negative, 0 for positive
|
||||||
|
return (bits >> 63);
|
||||||
|
}).template cast<int>();
|
||||||
|
|
||||||
|
if (check_parity(x_hat)) {
|
||||||
|
return {x_hat, i + 1};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {x_hat, -1};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decode a received signal an measure error value (x - sign(x_hat))
|
||||||
|
* @param x Transmitted word
|
||||||
|
* @param y Received signal
|
||||||
|
* @return \b std::vector of error values. Each element corresponds to one
|
||||||
|
* iteration of the algorithm
|
||||||
|
*/
|
||||||
|
std::vector<double>
|
||||||
|
get_error_values(const Eigen::Ref<const RowVectori<t_n>>& x,
|
||||||
|
const Eigen::Ref<const RowVectord<t_n>>& y) {
|
||||||
|
|
||||||
|
if (y.size() != mH.cols())
|
||||||
|
throw std::runtime_error("Length of vector must match H matrix");
|
||||||
|
|
||||||
|
std::vector<double> error_values;
|
||||||
|
error_values.reserve(mK);
|
||||||
|
|
||||||
|
RowVectord<t_n> s = RowVectord<t_n>::Zero(t_n);
|
||||||
|
RowVectori<t_n> x_hat;
|
||||||
|
RowVectord<t_n> r;
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < mK; ++i) {
|
||||||
|
r = s - mOmega * L_awgn(s, y);
|
||||||
|
|
||||||
|
s = projection(r - mGamma * grad_H(r));
|
||||||
|
|
||||||
|
x_hat = s.unaryExpr([](double d) {
|
||||||
|
uint64_t bits = std::bit_cast<uint64_t>(d);
|
||||||
|
// Return the sign bit: 1 for negative, 0 for positive
|
||||||
|
return (bits >> 63);
|
||||||
|
}).template cast<int>();
|
||||||
|
|
||||||
|
RowVectord<t_n> x_hat_bpsk =
|
||||||
|
-1 * ((2 * x_hat.template cast<double>()).array() - 1).matrix();
|
||||||
|
error_values.push_back(
|
||||||
|
(x.template cast<double>() - x_hat_bpsk).norm());
|
||||||
|
|
||||||
|
if (check_parity(x_hat)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return error_values;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decode a received signal and measure the norm of the two gradients
|
||||||
|
* at each iteration
|
||||||
|
* @param y
|
||||||
|
* @return \b std::vector of \b std::pair of gradient values. Each element corresponds to
|
||||||
|
* one iteration. Result is of the form [(grad_H_1, grad_L_1), ...]
|
||||||
|
*/
|
||||||
|
std::vector<std::pair<double, double>>
|
||||||
|
get_gradient_values(const Eigen::Ref<const RowVectord<t_n>>& y) {
|
||||||
|
|
||||||
|
if (y.size() != mH.cols())
|
||||||
|
throw std::runtime_error("Length of vector must match H matrix");
|
||||||
|
|
||||||
|
std::vector<std::pair<double, double>> gradient_values;
|
||||||
|
gradient_values.reserve(mK);
|
||||||
|
|
||||||
|
RowVectord<t_n> s = RowVectord<t_n>::Zero(t_n);
|
||||||
|
RowVectori<t_n> x_hat;
|
||||||
|
RowVectord<t_n> r;
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < mK; ++i) {
|
||||||
|
RowVectord<t_n> gradl = L_awgn(s, y);
|
||||||
|
r = s - mOmega * gradl;
|
||||||
|
|
||||||
|
RowVectord<t_n> gradh = grad_H(r);
|
||||||
|
s = projection(r - mGamma * gradh);
|
||||||
|
|
||||||
|
gradient_values.push_back({gradh.norm(), gradl.norm()});
|
||||||
|
|
||||||
|
x_hat = s.unaryExpr([](double d) {
|
||||||
|
uint64_t bits = std::bit_cast<uint64_t>(d);
|
||||||
|
// Return the sign bit: 1 for negative, 0 for positive
|
||||||
|
return (bits >> 63);
|
||||||
|
}).template cast<int>();
|
||||||
|
|
||||||
|
if (check_parity(x_hat)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return gradient_values;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the values of all member variables necessary to recreate an
|
||||||
|
* exact copy of this class. Used for pickling
|
||||||
|
* @return \b std::tuple
|
||||||
|
*/
|
||||||
|
auto get_decoder_state() const {
|
||||||
|
return std::tuple(mH, mK, mOmega, mGamma, mEta);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const int mK;
|
||||||
|
const double mOmega;
|
||||||
|
const double mGamma;
|
||||||
|
const double mEta;
|
||||||
|
|
||||||
|
const MatrixiR<t_m, t_n> mH;
|
||||||
|
const std::vector<Eigen::Index> mH_zero_indices;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Variation of the negative log-likelihood for the special case of AWGN
|
||||||
|
* noise. See 4.1, p. 4.
|
||||||
|
*/
|
||||||
|
static Eigen::RowVectorXd L_awgn(const RowVectord<t_n>& s,
|
||||||
|
const RowVectord<t_n>& y) {
|
||||||
|
return s.array() - y.array();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Find all indices of a matrix, where the corresponding value is
|
||||||
|
* zero
|
||||||
|
* @return \b std::vector of indices
|
||||||
|
*/
|
||||||
|
static std::vector<Eigen::Index> find_zero(MatrixiR<t_m, t_n> mat) {
|
||||||
|
std::vector<Eigen::Index> indices;
|
||||||
|
|
||||||
|
for (Eigen::Index i = 0; i < mat.size(); ++i)
|
||||||
|
if (mat(i) == 0) indices.push_back(i);
|
||||||
|
|
||||||
|
return indices;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gradient of the code-constraint polynomial. See 2.3, p. 2.
|
||||||
|
*/
|
||||||
|
RowVectord<t_n> grad_H(const RowVectord<t_n>& x) {
|
||||||
|
MatrixdR<t_m, t_n> A_prod_matrix = x.replicate(t_m, 1);
|
||||||
|
|
||||||
|
for (const auto& index : mH_zero_indices)
|
||||||
|
A_prod_matrix(index) = 1;
|
||||||
|
RowVectord<t_m> A_prods = A_prod_matrix.rowwise().prod();
|
||||||
|
|
||||||
|
RowVectord<t_m> B_terms =
|
||||||
|
(A_prods.array().pow(2) - A_prods.array()).matrix().transpose();
|
||||||
|
|
||||||
|
RowVectord<t_n> B_sums = B_terms * mH.template cast<double>();
|
||||||
|
|
||||||
|
RowVectord<t_n> result = 4 * (x.array().pow(2) - 1) * x.array() +
|
||||||
|
(2 * x.array().inverse()) * B_sums.array();
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Perform a parity check for a given codeword.
|
||||||
|
* @param x_hat: codeword to be checked (element of [0, 1]^n)
|
||||||
|
* @return \b True if the parity check passes, i.e. the codeword is valid.
|
||||||
|
* False otherwise
|
||||||
|
*/
|
||||||
|
bool check_parity(const RowVectori<t_n>& x_hat) {
|
||||||
|
RowVectori<t_m> syndrome =
|
||||||
|
(mH * x_hat.transpose()).unaryExpr([](int i) { return i % 2; });
|
||||||
|
|
||||||
|
return !(syndrome.count() > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Project a vector onto [-eta, eta]^n in order to avoid numerical
|
||||||
|
* instability. Detailed in 3.2, p. 3 (Equation (15)).
|
||||||
|
* @param v Vector to project
|
||||||
|
* @return v clipped to [-eta, eta]^n
|
||||||
|
*/
|
||||||
|
RowVectord<t_n> projection(const RowVectord<t_n>& v) {
|
||||||
|
return v.cwiseMin(mEta).cwiseMax(-mEta);
|
||||||
|
}
|
||||||
|
};
|
||||||
58
sw/cpp/src/python_interface.cpp
Normal file
58
sw/cpp/src/python_interface.cpp
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
#define EIGEN_STACK_ALLOCATION_LIMIT 1048576
|
||||||
|
|
||||||
|
#include "proximal.h"
|
||||||
|
|
||||||
|
#include <pybind11/pybind11.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace py = pybind11;
|
||||||
|
using namespace pybind11::literals;
|
||||||
|
|
||||||
|
|
||||||
|
#define DEF_PROXIMAL_DECODER(name, m, n) \
|
||||||
|
py::class_<ProximalDecoder<m, n>>(proximal, name) \
|
||||||
|
.def(py::init<MatrixiR<m, n>, int, double, double, double>(), \
|
||||||
|
"H"_a.noconvert(), "K"_a = 100, "omega"_a = 0.0002, \
|
||||||
|
"gamma"_a = .05, "eta"_a = 1.5) \
|
||||||
|
.def("decode", &ProximalDecoder<m, n>::decode, "y"_a.noconvert()) \
|
||||||
|
.def("get_error_values", &ProximalDecoder<m, n>::get_error_values, \
|
||||||
|
"x"_a.noconvert(), "y"_a.noconvert()) \
|
||||||
|
.def("get_gradient_values", \
|
||||||
|
&ProximalDecoder<m, n>::get_gradient_values, "y"_a.noconvert()) \
|
||||||
|
.def(py::pickle( \
|
||||||
|
[](const ProximalDecoder<m, n>& a) { \
|
||||||
|
auto state = a.get_decoder_state(); \
|
||||||
|
\
|
||||||
|
MatrixiR<m, n> H = std::get<0>(state); \
|
||||||
|
int K = std::get<1>(state); \
|
||||||
|
double omega = std::get<2>(state); \
|
||||||
|
double gamma = std::get<3>(state); \
|
||||||
|
double eta = std::get<4>(state); \
|
||||||
|
\
|
||||||
|
return py::make_tuple(H, K, omega, gamma, eta); \
|
||||||
|
}, \
|
||||||
|
[](py::tuple t) { \
|
||||||
|
return ProximalDecoder<m, n>{ \
|
||||||
|
t[0].cast<MatrixiR<m, n>>(), t[1].cast<int>(), \
|
||||||
|
t[2].cast<double>(), t[3].cast<double>(), \
|
||||||
|
t[4].cast<double>()}; \
|
||||||
|
}));
|
||||||
|
|
||||||
|
|
||||||
|
PYBIND11_MODULE(cpp_decoders, proximal) {
|
||||||
|
proximal.doc() = "Proximal decoder";
|
||||||
|
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_7_3", 3, 7)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_31_20", 20, 31)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_31_5", 5, 31)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_96_48", 48, 96)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_204_102", 102, 204)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_408_204", 204, 408)
|
||||||
|
DEF_PROXIMAL_DECODER("ProximalDecoder_Dynamic", Eigen::Dynamic,
|
||||||
|
Eigen::Dynamic)
|
||||||
|
|
||||||
|
py::register_exception<std::runtime_error>(
|
||||||
|
proximal, "CppException: std::runtime_error");
|
||||||
|
py::register_exception<std::bad_alloc>(proximal,
|
||||||
|
"CppException: std::bad_alloc");
|
||||||
|
}
|
||||||
@ -7,7 +7,7 @@ class ProximalDecoder:
|
|||||||
by Tadashi Wadayama, and Satoshi Takabe.
|
by Tadashi Wadayama, and Satoshi Takabe.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, H: np.array, K: int = 100, omega: float = 0.1,
|
def __init__(self, H: np.array, K: int = 1000, omega: float = 0.0002,
|
||||||
gamma: float = 0.05, eta: float = 1.5):
|
gamma: float = 0.05, eta: float = 1.5):
|
||||||
"""Construct a new ProximalDecoder Object.
|
"""Construct a new ProximalDecoder Object.
|
||||||
|
|
||||||
@ -75,7 +75,8 @@ class ProximalDecoder:
|
|||||||
|
|
||||||
:param y: Vector of received values. (y = x + w, where 'x' is
|
:param y: Vector of received values. (y = x + w, where 'x' is
|
||||||
element of [-1, 1]^n and 'w' is noise)
|
element of [-1, 1]^n and 'w' is noise)
|
||||||
:return: Most probably sent codeword (element of [0, 1]^n)
|
:return: Most probably sent codeword (element of [0, 1]^n). If
|
||||||
|
decoding fails, the returned value is 'None'
|
||||||
"""
|
"""
|
||||||
s = np.zeros(self._n)
|
s = np.zeros(self._n)
|
||||||
x_hat = np.zeros(self._n)
|
x_hat = np.zeros(self._n)
|
||||||
@ -86,11 +87,11 @@ class ProximalDecoder:
|
|||||||
s = self._projection(s) # Equation (15)
|
s = self._projection(s) # Equation (15)
|
||||||
|
|
||||||
x_hat = np.sign(s)
|
x_hat = np.sign(s)
|
||||||
|
|
||||||
# Map the codeword from [ -1, 1]^n to [0, 1]^n
|
# Map the codeword from [ -1, 1]^n to [0, 1]^n
|
||||||
x_hat = (x_hat == -1) * 1
|
x_hat = (x_hat == -1) * 1
|
||||||
|
|
||||||
if self._check_parity(x_hat):
|
if self._check_parity(x_hat):
|
||||||
break
|
return x_hat
|
||||||
|
|
||||||
return x_hat
|
return None
|
||||||
|
|||||||
64
sw/main.py
64
sw/main.py
@ -1,64 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
|
|
||||||
from decoders import proximal, maximum_likelihood
|
|
||||||
from utility import simulation, codes
|
|
||||||
|
|
||||||
|
|
||||||
def start_new_simulation(sim_mgr: simulation.SimulationManager):
|
|
||||||
sim_name = "test"
|
|
||||||
|
|
||||||
# H = codes.read_alist_file("res/204.3.486.alist")
|
|
||||||
# H = codes.read_alist_file("res/204.55.187.alist")
|
|
||||||
H = codes.read_alist_file("res/96.3.965.alist")
|
|
||||||
# H = codes.read_alist_file("res/408.33.844.alist")
|
|
||||||
# H = codes.read_alist_file("res/PEGReg252x504.alist")
|
|
||||||
# H = codes.read_alist_file("res/999.111.3.5543.alist")
|
|
||||||
# H = codes.read_alist_file("res/999.111.3.5565.alist")
|
|
||||||
# H = codes.read_alist_file("res/816.1A4.845.alist")
|
|
||||||
k, n = H.shape
|
|
||||||
|
|
||||||
decoders = [
|
|
||||||
proximal.ProximalDecoder(H, gamma=0.01),
|
|
||||||
proximal.ProximalDecoder(H, gamma=0.05),
|
|
||||||
proximal.ProximalDecoder(H, gamma=0.15)
|
|
||||||
]
|
|
||||||
|
|
||||||
labels = [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
]
|
|
||||||
|
|
||||||
sim = simulation.Simulator(n=n, k=k, decoders=decoders,
|
|
||||||
target_frame_errors=100,
|
|
||||||
SNRs=np.arange(1, 6, 0.5))
|
|
||||||
|
|
||||||
sim_mgr.configure_simulation(simulator=sim, name=sim_name,
|
|
||||||
column_labels=labels)
|
|
||||||
sim_mgr.simulate()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Perform necessary initialization
|
|
||||||
|
|
||||||
results_dir = "sim_results"
|
|
||||||
saves_dir = "sim_saves"
|
|
||||||
|
|
||||||
sim_mgr = simulation.SimulationManager(results_dir=results_dir,
|
|
||||||
saves_dir=saves_dir)
|
|
||||||
|
|
||||||
# Calculate BERs
|
|
||||||
|
|
||||||
unfinished_sims = sim_mgr.get_unfinished()
|
|
||||||
|
|
||||||
if len(unfinished_sims) > 0:
|
|
||||||
print("Found unfinished simulation. Picking up where it was left of")
|
|
||||||
|
|
||||||
sim_mgr.load_unfinished(unfinished_sims[0])
|
|
||||||
sim_mgr.simulate()
|
|
||||||
else:
|
|
||||||
start_new_simulation(sim_mgr)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -3,7 +3,8 @@ import typing
|
|||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import seaborn as sns
|
import seaborn as sns
|
||||||
import os
|
import os
|
||||||
from utility import visualization, simulation
|
from utility import visualization
|
||||||
|
from utility.simulation import SimulationDeSerializer
|
||||||
|
|
||||||
|
|
||||||
# TODO: This should be the responsibility of the DeSerializer
|
# TODO: This should be the responsibility of the DeSerializer
|
||||||
@ -26,7 +27,7 @@ def plot_results() -> None:
|
|||||||
|
|
||||||
slugs = get_sim_slugs(results_dir)
|
slugs = get_sim_slugs(results_dir)
|
||||||
|
|
||||||
deserializer = simulation.SimulationDeSerializer(save_dir=saves_dir,
|
deserializer = SimulationDeSerializer(save_dir=saves_dir,
|
||||||
results_dir=results_dir)
|
results_dir=results_dir)
|
||||||
|
|
||||||
# Read data
|
# Read data
|
||||||
|
|||||||
310
sw/res/204.33.484.alist
Normal file
310
sw/res/204.33.484.alist
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
204 102
|
||||||
|
3 6
|
||||||
|
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
|
||||||
|
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
|
||||||
|
73 84 81
|
||||||
|
63 45 26
|
||||||
|
17 77 55
|
||||||
|
74 47 24
|
||||||
|
9 10 52
|
||||||
|
62 63 44
|
||||||
|
38 39 35
|
||||||
|
60 4 100
|
||||||
|
82 98 63
|
||||||
|
40 80 68
|
||||||
|
91 81 18
|
||||||
|
86 88 99
|
||||||
|
77 71 65
|
||||||
|
29 9 33
|
||||||
|
15 41 34
|
||||||
|
75 11 22
|
||||||
|
48 24 95
|
||||||
|
22 44 60
|
||||||
|
5 19 41
|
||||||
|
31 22 43
|
||||||
|
21 18 56
|
||||||
|
83 51 49
|
||||||
|
79 7 88
|
||||||
|
36 67 5
|
||||||
|
84 75 32
|
||||||
|
1 79 38
|
||||||
|
43 82 75
|
||||||
|
2 1 23
|
||||||
|
33 61 83
|
||||||
|
69 3 30
|
||||||
|
28 5 77
|
||||||
|
8 56 4
|
||||||
|
53 76 36
|
||||||
|
96 28 102
|
||||||
|
44 17 48
|
||||||
|
92 26 74
|
||||||
|
56 69 11
|
||||||
|
18 68 50
|
||||||
|
72 34 37
|
||||||
|
25 37 76
|
||||||
|
23 30 21
|
||||||
|
7 29 40
|
||||||
|
71 78 39
|
||||||
|
13 2 96
|
||||||
|
55 33 51
|
||||||
|
49 64 16
|
||||||
|
37 66 54
|
||||||
|
50 36 89
|
||||||
|
45 15 57
|
||||||
|
88 35 15
|
||||||
|
57 102 78
|
||||||
|
98 27 71
|
||||||
|
27 52 94
|
||||||
|
68 20 87
|
||||||
|
89 38 8
|
||||||
|
24 94 53
|
||||||
|
11 50 19
|
||||||
|
14 83 98
|
||||||
|
76 55 2
|
||||||
|
41 70 62
|
||||||
|
78 86 14
|
||||||
|
3 21 59
|
||||||
|
64 23 46
|
||||||
|
47 43 45
|
||||||
|
95 31 1
|
||||||
|
85 59 84
|
||||||
|
94 54 101
|
||||||
|
93 6 31
|
||||||
|
59 95 61
|
||||||
|
58 73 47
|
||||||
|
54 87 7
|
||||||
|
52 97 79
|
||||||
|
12 90 28
|
||||||
|
30 99 97
|
||||||
|
42 100 73
|
||||||
|
101 62 20
|
||||||
|
80 96 85
|
||||||
|
100 58 91
|
||||||
|
19 16 6
|
||||||
|
99 49 72
|
||||||
|
70 92 17
|
||||||
|
90 93 69
|
||||||
|
10 40 25
|
||||||
|
39 72 82
|
||||||
|
67 74 12
|
||||||
|
4 65 27
|
||||||
|
20 85 90
|
||||||
|
102 13 92
|
||||||
|
46 8 9
|
||||||
|
35 46 10
|
||||||
|
97 53 93
|
||||||
|
66 48 42
|
||||||
|
87 89 64
|
||||||
|
26 25 58
|
||||||
|
6 57 67
|
||||||
|
65 12 3
|
||||||
|
61 91 66
|
||||||
|
81 101 29
|
||||||
|
51 60 86
|
||||||
|
16 14 80
|
||||||
|
34 32 70
|
||||||
|
32 42 13
|
||||||
|
72 17 45
|
||||||
|
52 14 22
|
||||||
|
49 53 2
|
||||||
|
95 76 60
|
||||||
|
39 57 26
|
||||||
|
32 60 48
|
||||||
|
2 78 8
|
||||||
|
92 3 54
|
||||||
|
71 55 42
|
||||||
|
23 5 62
|
||||||
|
35 99 50
|
||||||
|
37 31 18
|
||||||
|
24 84 12
|
||||||
|
14 97 56
|
||||||
|
94 40 44
|
||||||
|
22 74 63
|
||||||
|
63 49 28
|
||||||
|
42 96 78
|
||||||
|
101 27 85
|
||||||
|
55 75 99
|
||||||
|
40 9 67
|
||||||
|
61 28 97
|
||||||
|
76 101 16
|
||||||
|
88 95 33
|
||||||
|
5 33 37
|
||||||
|
68 61 27
|
||||||
|
65 52 38
|
||||||
|
20 42 15
|
||||||
|
60 23 84
|
||||||
|
67 73 52
|
||||||
|
25 66 53
|
||||||
|
51 77 21
|
||||||
|
81 62 1
|
||||||
|
66 21 96
|
||||||
|
75 56 83
|
||||||
|
8 13 19
|
||||||
|
30 19 51
|
||||||
|
102 90 58
|
||||||
|
97 11 68
|
||||||
|
89 15 98
|
||||||
|
44 20 35
|
||||||
|
74 35 5
|
||||||
|
26 72 59
|
||||||
|
10 88 94
|
||||||
|
46 69 100
|
||||||
|
16 54 77
|
||||||
|
91 6 74
|
||||||
|
13 64 101
|
||||||
|
47 32 40
|
||||||
|
50 24 90
|
||||||
|
38 48 46
|
||||||
|
86 85 47
|
||||||
|
6 51 34
|
||||||
|
7 68 36
|
||||||
|
56 7 31
|
||||||
|
69 92 61
|
||||||
|
48 39 43
|
||||||
|
31 71 64
|
||||||
|
33 8 93
|
||||||
|
79 4 17
|
||||||
|
98 37 95
|
||||||
|
77 87 25
|
||||||
|
19 67 49
|
||||||
|
87 2 69
|
||||||
|
1 22 10
|
||||||
|
64 29 88
|
||||||
|
70 91 65
|
||||||
|
84 25 102
|
||||||
|
99 45 66
|
||||||
|
58 18 57
|
||||||
|
4 1 70
|
||||||
|
12 30 72
|
||||||
|
82 89 32
|
||||||
|
15 47 55
|
||||||
|
18 59 24
|
||||||
|
21 81 13
|
||||||
|
54 100 29
|
||||||
|
57 50 4
|
||||||
|
90 94 23
|
||||||
|
34 10 79
|
||||||
|
93 44 89
|
||||||
|
73 102 41
|
||||||
|
80 26 82
|
||||||
|
11 65 92
|
||||||
|
45 41 87
|
||||||
|
28 83 71
|
||||||
|
85 34 39
|
||||||
|
9 82 14
|
||||||
|
3 36 20
|
||||||
|
83 86 76
|
||||||
|
29 70 80
|
||||||
|
43 12 9
|
||||||
|
78 63 73
|
||||||
|
27 46 30
|
||||||
|
59 16 86
|
||||||
|
62 58 6
|
||||||
|
41 98 91
|
||||||
|
96 93 11
|
||||||
|
53 43 81
|
||||||
|
36 79 75
|
||||||
|
17 38 3
|
||||||
|
100 80 7
|
||||||
|
26 167 28 173 65 135
|
||||||
|
28 109 44 166 59 105
|
||||||
|
62 191 30 110 96 203
|
||||||
|
86 173 8 162 32 180
|
||||||
|
19 127 31 112 24 144
|
||||||
|
95 155 68 149 79 198
|
||||||
|
42 156 23 157 71 204
|
||||||
|
32 138 89 161 55 109
|
||||||
|
5 190 14 123 89 194
|
||||||
|
83 146 5 182 90 167
|
||||||
|
57 186 16 141 37 200
|
||||||
|
73 174 96 194 85 115
|
||||||
|
44 150 88 138 102 178
|
||||||
|
58 116 100 104 61 190
|
||||||
|
15 176 49 142 50 130
|
||||||
|
100 148 79 197 46 125
|
||||||
|
3 203 35 103 81 162
|
||||||
|
38 177 21 172 11 114
|
||||||
|
79 165 19 139 57 138
|
||||||
|
87 130 54 143 76 191
|
||||||
|
21 178 62 136 41 134
|
||||||
|
18 118 20 167 16 104
|
||||||
|
41 112 63 131 28 181
|
||||||
|
56 115 17 152 4 177
|
||||||
|
40 133 94 170 83 164
|
||||||
|
94 145 36 185 2 107
|
||||||
|
53 196 52 121 86 128
|
||||||
|
31 188 34 124 73 119
|
||||||
|
14 193 42 168 98 179
|
||||||
|
74 139 41 174 30 196
|
||||||
|
20 160 65 114 68 157
|
||||||
|
102 108 101 151 25 175
|
||||||
|
29 161 45 127 14 126
|
||||||
|
101 182 39 189 15 155
|
||||||
|
90 113 50 144 7 143
|
||||||
|
24 202 48 191 33 156
|
||||||
|
47 114 40 163 39 127
|
||||||
|
7 153 55 203 26 129
|
||||||
|
84 107 7 159 43 189
|
||||||
|
10 123 83 117 42 151
|
||||||
|
60 199 15 187 19 184
|
||||||
|
75 120 102 130 92 111
|
||||||
|
27 194 64 201 20 159
|
||||||
|
35 143 18 183 6 117
|
||||||
|
49 187 2 171 64 103
|
||||||
|
89 147 90 196 63 153
|
||||||
|
64 151 4 176 70 154
|
||||||
|
17 159 92 153 35 108
|
||||||
|
46 105 80 119 22 165
|
||||||
|
48 152 57 180 38 113
|
||||||
|
99 134 22 155 45 139
|
||||||
|
72 104 53 129 5 132
|
||||||
|
33 201 91 105 56 133
|
||||||
|
71 179 67 148 47 110
|
||||||
|
45 122 59 111 3 176
|
||||||
|
37 157 32 137 21 116
|
||||||
|
51 180 95 107 49 172
|
||||||
|
70 172 78 198 94 140
|
||||||
|
69 197 66 177 62 145
|
||||||
|
8 131 99 108 18 106
|
||||||
|
97 124 29 128 69 158
|
||||||
|
6 198 76 135 60 112
|
||||||
|
2 119 6 195 9 118
|
||||||
|
63 168 46 150 93 160
|
||||||
|
96 129 86 186 13 169
|
||||||
|
92 136 47 133 97 171
|
||||||
|
85 132 24 165 95 123
|
||||||
|
54 128 38 156 10 141
|
||||||
|
30 158 37 147 82 166
|
||||||
|
81 169 60 193 101 173
|
||||||
|
43 111 13 160 52 188
|
||||||
|
39 103 84 145 80 174
|
||||||
|
1 184 70 132 75 195
|
||||||
|
4 144 85 118 36 149
|
||||||
|
16 137 25 122 27 202
|
||||||
|
59 125 33 106 40 192
|
||||||
|
13 164 3 134 31 148
|
||||||
|
61 195 43 109 51 120
|
||||||
|
23 162 26 202 72 182
|
||||||
|
77 185 10 204 100 193
|
||||||
|
98 135 11 178 1 201
|
||||||
|
9 175 27 190 84 185
|
||||||
|
22 192 58 188 29 137
|
||||||
|
25 170 1 115 66 131
|
||||||
|
66 189 87 154 77 121
|
||||||
|
12 154 61 192 99 197
|
||||||
|
93 166 71 164 54 187
|
||||||
|
50 126 12 146 23 168
|
||||||
|
55 142 93 175 48 183
|
||||||
|
82 181 73 140 87 152
|
||||||
|
11 149 97 169 78 199
|
||||||
|
36 110 81 158 88 186
|
||||||
|
68 183 82 200 91 161
|
||||||
|
67 117 56 181 53 146
|
||||||
|
65 106 69 126 17 163
|
||||||
|
34 200 77 120 44 136
|
||||||
|
91 141 72 116 74 124
|
||||||
|
52 163 9 199 58 142
|
||||||
|
80 171 74 113 12 122
|
||||||
|
78 204 75 179 8 147
|
||||||
|
76 121 98 125 67 150
|
||||||
|
88 140 51 184 34 170
|
||||||
55
sw/res/BCH_31_11.alist
Normal file
55
sw/res/BCH_31_11.alist
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
31 20
|
||||||
|
6 6
|
||||||
|
1 1 2 3 4 4 4 4 4 5 5 6 6 6 6 6 6 6 6 6 5 5 4 3 2 2 2 2 2 1 1
|
||||||
|
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
|
||||||
|
1 0 0 0 0 0
|
||||||
|
2 0 0 0 0 0
|
||||||
|
1 3 0 0 0 0
|
||||||
|
1 2 4 0 0 0
|
||||||
|
1 2 3 5 0 0
|
||||||
|
2 3 4 6 0 0
|
||||||
|
3 4 5 7 0 0
|
||||||
|
4 5 6 8 0 0
|
||||||
|
5 6 7 9 0 0
|
||||||
|
1 6 7 8 10 0
|
||||||
|
2 7 8 9 11 0
|
||||||
|
1 3 8 9 10 12
|
||||||
|
2 4 9 10 11 13
|
||||||
|
3 5 10 11 12 14
|
||||||
|
4 6 11 12 13 15
|
||||||
|
5 7 12 13 14 16
|
||||||
|
6 8 13 14 15 17
|
||||||
|
7 9 14 15 16 18
|
||||||
|
8 10 15 16 17 19
|
||||||
|
9 11 16 17 18 20
|
||||||
|
10 12 17 18 19 0
|
||||||
|
11 13 18 19 20 0
|
||||||
|
12 14 19 20 0 0
|
||||||
|
13 15 20 0 0 0
|
||||||
|
14 16 0 0 0 0
|
||||||
|
15 17 0 0 0 0
|
||||||
|
16 18 0 0 0 0
|
||||||
|
17 19 0 0 0 0
|
||||||
|
18 20 0 0 0 0
|
||||||
|
19 0 0 0 0 0
|
||||||
|
20 0 0 0 0 0
|
||||||
|
1 3 4 5 10 12
|
||||||
|
2 4 5 6 11 13
|
||||||
|
3 5 6 7 12 14
|
||||||
|
4 6 7 8 13 15
|
||||||
|
5 7 8 9 14 16
|
||||||
|
6 8 9 10 15 17
|
||||||
|
7 9 10 11 16 18
|
||||||
|
8 10 11 12 17 19
|
||||||
|
9 11 12 13 18 20
|
||||||
|
10 12 13 14 19 21
|
||||||
|
11 13 14 15 20 22
|
||||||
|
12 14 15 16 21 23
|
||||||
|
13 15 16 17 22 24
|
||||||
|
14 16 17 18 23 25
|
||||||
|
15 17 18 19 24 26
|
||||||
|
16 18 19 20 25 27
|
||||||
|
17 19 20 21 26 28
|
||||||
|
18 20 21 22 27 29
|
||||||
|
19 21 22 23 28 30
|
||||||
|
20 22 23 24 29 31
|
||||||
40
sw/res/BCH_31_26.alist
Normal file
40
sw/res/BCH_31_26.alist
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
31 5
|
||||||
|
5 16
|
||||||
|
1 1 1 2 2 2 3 3 2 3 3 3 4 5 4 3 2 2 2 2 3 4 4 3 4 3 3 2 2 1 1
|
||||||
|
16 16 16 16 16
|
||||||
|
1 0 0 0 0
|
||||||
|
2 0 0 0 0
|
||||||
|
3 0 0 0 0
|
||||||
|
1 4 0 0 0
|
||||||
|
2 5 0 0 0
|
||||||
|
1 3 0 0 0
|
||||||
|
1 2 4 0 0
|
||||||
|
2 3 5 0 0
|
||||||
|
3 4 0 0 0
|
||||||
|
1 4 5 0 0
|
||||||
|
1 2 5 0 0
|
||||||
|
1 2 3 0 0
|
||||||
|
1 2 3 4 0
|
||||||
|
1 2 3 4 5
|
||||||
|
2 3 4 5 0
|
||||||
|
3 4 5 0 0
|
||||||
|
4 5 0 0 0
|
||||||
|
1 5 0 0 0
|
||||||
|
1 2 0 0 0
|
||||||
|
2 3 0 0 0
|
||||||
|
1 3 4 0 0
|
||||||
|
1 2 4 5 0
|
||||||
|
1 2 3 5 0
|
||||||
|
2 3 4 0 0
|
||||||
|
1 3 4 5 0
|
||||||
|
2 4 5 0 0
|
||||||
|
1 3 5 0 0
|
||||||
|
2 4 0 0 0
|
||||||
|
3 5 0 0 0
|
||||||
|
4 0 0 0 0
|
||||||
|
5 0 0 0 0
|
||||||
|
1 4 6 7 10 11 12 13 14 18 19 21 22 23 25 27
|
||||||
|
2 5 7 8 11 12 13 14 15 19 20 22 23 24 26 28
|
||||||
|
3 6 8 9 12 13 14 15 16 20 21 23 24 25 27 29
|
||||||
|
4 7 9 10 13 14 15 16 17 21 22 24 25 26 28 30
|
||||||
|
5 8 10 11 14 15 16 17 18 22 23 25 26 27 29 31
|
||||||
14
sw/res/BCH_7_4.alist
Normal file
14
sw/res/BCH_7_4.alist
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
7 3
|
||||||
|
3 4
|
||||||
|
1 1 2 2 3 2 1
|
||||||
|
4 4 4
|
||||||
|
1 0 0
|
||||||
|
2 0 0
|
||||||
|
1 3 0
|
||||||
|
1 2 0
|
||||||
|
1 2 3
|
||||||
|
2 3 0
|
||||||
|
3 0 0
|
||||||
|
1 3 4 5
|
||||||
|
2 4 5 6
|
||||||
|
3 5 6 7
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.10563725490196078,0.09682585905649388,0.22583333333333333
|
|
||||||
1,1.5,0.08,0.0683067619571193,0.20122549019607844
|
|
||||||
2,2.0,0.06259803921568627,0.05018059855521156,0.18916666666666668
|
|
||||||
3,2.5,0.05205882352941176,0.029125518820666954,0.18099181420140872
|
|
||||||
4,3.0,0.039411764705882354,0.01665640078020737,0.16258169934640523
|
|
||||||
5,3.5,0.029266161910308678,0.007462921065862243,0.14732620320855616
|
|
||||||
6,4.0,0.022200226244343892,0.003201766930435059,0.12311618862421002
|
|
||||||
7,4.5,0.013597612958226769,0.0014110192617440731,0.11709803921568628
|
|
||||||
8,5.0,0.008648459383753502,0.00041590809215895537,0.10357142857142858
|
|
||||||
9,5.5,0.004875886524822695,0.00014872398470788968,0.10611709472521402
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "204.3.486",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.12745098039215685,0.1317156862745098,0.435343137254902
|
|
||||||
1,1.5,0.1032843137254902,0.1073577946029897,0.4167647058823529
|
|
||||||
2,2.0,0.0886764705882353,0.09187536400698894,0.4475
|
|
||||||
3,2.5,0.06348039215686274,0.06462063086104007,0.4189705882352941
|
|
||||||
4,3.0,0.055074971164936565,0.03978257969854609,0.44857843137254905
|
|
||||||
5,3.5,0.035550256138491436,0.018195755668522554,0.41799019607843135
|
|
||||||
6,4.0,0.023133814929480565,0.006259426847662142,0.4513235294117647
|
|
||||||
7,4.5,0.011700300558179477,0.00220798701994459,0.4648378582202112
|
|
||||||
8,5.0,0.005628177196804648,0.0005365073064232019,0.5115686274509804
|
|
||||||
9,5.5,0.0020973563554775145,8.043597037287447e-05,0.5982224665567162
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "204.55.187",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.10080882352941177,0.09544117647058824,0.1990686274509804
|
|
||||||
1,1.5,0.0811764705882353,0.06887254901960785,0.18088235294117647
|
|
||||||
2,2.0,0.06674019607843137,0.05043733179546328,0.15377450980392157
|
|
||||||
3,2.5,0.05235294117647059,0.030307315233785822,0.13629901960784313
|
|
||||||
4,3.0,0.03963235294117647,0.015560011883541296,0.11936274509803922
|
|
||||||
5,3.5,0.02784313725490196,0.006988436400201106,0.10667831489031256
|
|
||||||
6,4.0,0.019362745098039216,0.0030565167243367937,0.09600077654824306
|
|
||||||
7,4.5,0.014438943894389438,0.001095362298945249,0.1011764705882353
|
|
||||||
8,5.0,0.009734554199038107,0.0002930821517592325,0.0927732479130266
|
|
||||||
9,5.5,0.005115935262994087,0.0001041452669542788,0.08391574451562803
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "408.33.844",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.1615441176470588,0.16159313725490196,0.2829289215686275
|
|
||||||
1,1.5,0.14033088235294117,0.13708333333333333,0.26653186274509805
|
|
||||||
2,2.0,0.11881127450980392,0.10397058823529412,0.25346813725490197
|
|
||||||
3,2.5,0.09993872549019608,0.08181372549019608,0.22971813725490195
|
|
||||||
4,3.0,0.08060049019607843,0.05144607843137255,0.21645833333333334
|
|
||||||
5,3.5,0.06575980392156863,0.0249069535221496,0.19486519607843136
|
|
||||||
6,4.0,0.0488235294117647,0.009746588693957114,0.16463235294117648
|
|
||||||
7,4.5,0.035134803921568626,0.0021558361564521095,0.15033088235294118
|
|
||||||
8,5.0,0.022855392156862744,0.000351575056252009,0.137046568627451
|
|
||||||
9,5.5,0.014522058823529412,5.58038513435561e-05,0.12879901960784312
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "816.1A4.845",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.0999795751633987,0.08963373655913978,0.24563492063492062
|
|
||||||
1,1.5,0.08741830065359477,0.07678834808259587,0.2594246031746032
|
|
||||||
2,2.0,0.07170307443365696,0.047135416666666666,0.24610591900311526
|
|
||||||
3,2.5,0.056105610561056105,0.03345458553791887,0.2098731884057971
|
|
||||||
4,3.0,0.04147376543209876,0.02209486735870819,0.16597222222222222
|
|
||||||
5,3.5,0.028025793650793652,0.008191915011037528,0.13171296296296298
|
|
||||||
6,4.0,0.017744252873563217,0.0045138888888888885,0.09421202956989247
|
|
||||||
7,4.5,0.012328586497890296,0.0013146575646575647,0.08682983682983683
|
|
||||||
8,5.0,0.008111702127659574,0.000641025641025641,0.06716008771929824
|
|
||||||
9,5.5,0.005926089517078916,0.00029386036848723417,0.043951023391812866
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name":"96.3965",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.06862862862862863,0.0705005005005005,0.5143343343343343
|
|
||||||
1,1.5,0.05656656656656656,0.057967967967967965,0.5056556556556556
|
|
||||||
2,2.0,0.04464464464464465,0.04675675675675676,0.48372372372372374
|
|
||||||
3,2.5,0.034694694694694696,0.037237237237237236,0.463013013013013
|
|
||||||
4,3.0,0.024634634634634636,0.0277977977977978,0.42564564564564566
|
|
||||||
5,3.5,0.018068068068068068,0.01872872872872873,0.37803803803803804
|
|
||||||
6,4.0,0.012562562562562562,0.014474474474474475,0.31125125125125125
|
|
||||||
7,4.5,0.007379928948556399,0.009704148593037481,0.27387387387387385
|
|
||||||
8,5.0,0.0037327046672841063,0.008071063189173425,0.21937937937937937
|
|
||||||
9,5.5,0.0017655450726316868,0.0038553404890038553,0.1765260406037105
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "999.111.3.5543",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.0679079079079079,0.07043043043043043,0.4982982982982983
|
|
||||||
1,1.5,0.05665665665665666,0.05916916916916917,0.49123123123123125
|
|
||||||
2,2.0,0.045395395395395395,0.04723723723723724,0.4886886886886887
|
|
||||||
3,2.5,0.035115115115115114,0.03822822822822823,0.46526526526526524
|
|
||||||
4,3.0,0.026556556556556556,0.026096096096096096,0.4365265265265265
|
|
||||||
5,3.5,0.01785785785785786,0.02015015015015015,0.38725725725725724
|
|
||||||
6,4.0,0.011111111111111112,0.013983983983983985,0.3212812812812813
|
|
||||||
7,4.5,0.006565388918330094,0.012264601049647779,0.26955955955955957
|
|
||||||
8,5.0,0.0035591146702257815,0.008438125625625625,0.20996996996996997
|
|
||||||
9,5.5,0.0017769707692188313,0.004728132387706856,0.1771078008701771
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "999.111.3.5565",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
@ -1,11 +0,0 @@
|
|||||||
,SNR,BER_0,BER_1,BER_2
|
|
||||||
0,1.0,0.10359126984126985,0.0946626984126984,0.18404761904761904
|
|
||||||
1,1.5,0.08339285714285714,0.06678571428571428,0.16257936507936507
|
|
||||||
2,2.0,0.06956349206349206,0.04954365079365079,0.13936507936507936
|
|
||||||
3,2.5,0.05238095238095238,0.02796321020620086,0.1226984126984127
|
|
||||||
4,3.0,0.03801587301587302,0.015180146132527085,0.1051984126984127
|
|
||||||
5,3.5,0.028888888888888888,0.006950113378684807,0.08777777777777777
|
|
||||||
6,4.0,0.020873015873015873,0.001969749252357948,0.07857142857142857
|
|
||||||
7,4.5,0.013201320132013201,0.0008142205766395831,0.07563492063492064
|
|
||||||
8,5.0,0.008584787050133585,0.00018992133497197202,0.0662030488763162
|
|
||||||
9,5.5,0.004884004884004884,4.768522303060029e-05,0.07061157796451914
|
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"duration": 0,
|
|
||||||
"name": "PEGReg252x504",
|
|
||||||
"labels": [
|
|
||||||
"proximal $\\gamma = 0.01$",
|
|
||||||
"proximal $\\gamma = 0.05$",
|
|
||||||
"proximal $\\gamma = 0.15$"
|
|
||||||
],
|
|
||||||
"platform": "Linux-6.0.9-arch1-1-x86_64-with-glibc2.36",
|
|
||||||
"end_time": "2022-11-23 15:13:09.470306"
|
|
||||||
}
|
|
||||||
148
sw/simulate_2d_BER.py
Normal file
148
sw/simulate_2d_BER.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
import numpy as np
|
||||||
|
import seaborn as sns
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import signal
|
||||||
|
from timeit import default_timer
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from utility import codes, noise, misc
|
||||||
|
from utility.simulation.simulators import GenericMultithreadedSimulator
|
||||||
|
from utility.simulation import SimulationManager
|
||||||
|
|
||||||
|
from cpp_modules.cpp_decoders import ProximalDecoder_204_102 as ProximalDecoder
|
||||||
|
|
||||||
|
|
||||||
|
def task_func(params):
|
||||||
|
"""Function called by the GenericMultithreadedSimulator instance.
|
||||||
|
|
||||||
|
Calculate the BER, FER, and DFR for a given SNR and gamma.
|
||||||
|
"""
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
decoder = params["decoder"]
|
||||||
|
max_iterations = params["max_iterations"]
|
||||||
|
SNR = params["SNR"]
|
||||||
|
n = params["n"]
|
||||||
|
k = params["k"]
|
||||||
|
|
||||||
|
c = np.zeros(n)
|
||||||
|
x_bpsk = c + 1
|
||||||
|
|
||||||
|
total_bit_errors = 0
|
||||||
|
total_frame_errors = 0
|
||||||
|
dec_fails = 0
|
||||||
|
|
||||||
|
num_iterations = 0
|
||||||
|
|
||||||
|
for i in range(max_iterations):
|
||||||
|
x = noise.add_awgn(x_bpsk, SNR, n, k)
|
||||||
|
x_hat, k_max = decoder.decode(x)
|
||||||
|
|
||||||
|
bit_errors = misc.count_bit_errors(x_hat, c)
|
||||||
|
if bit_errors > 0:
|
||||||
|
total_bit_errors += bit_errors
|
||||||
|
total_frame_errors += 1
|
||||||
|
|
||||||
|
num_iterations += 1
|
||||||
|
|
||||||
|
if k_max == -1:
|
||||||
|
dec_fails += 1
|
||||||
|
|
||||||
|
if total_frame_errors > 100:
|
||||||
|
break
|
||||||
|
|
||||||
|
BER = total_bit_errors / (num_iterations * n)
|
||||||
|
FER = total_frame_errors / num_iterations
|
||||||
|
DFR = dec_fails / (num_iterations + dec_fails)
|
||||||
|
|
||||||
|
return {"BER": BER, "FER": FER, "DFR": DFR,
|
||||||
|
"num_iterations": num_iterations}
|
||||||
|
|
||||||
|
|
||||||
|
def get_params(code_name: str):
|
||||||
|
"""In this function all parameters for the simulation are defined."""
|
||||||
|
# Define global simulation parameters
|
||||||
|
|
||||||
|
H_file = f"res/{code_name}.alist"
|
||||||
|
|
||||||
|
H = codes.read_alist_file(H_file)
|
||||||
|
n_min_k, n = H.shape
|
||||||
|
k = n - n_min_k
|
||||||
|
|
||||||
|
omega = 0.05
|
||||||
|
K = 100
|
||||||
|
gammas = np.arange(0.0, 0.17, 0.01)
|
||||||
|
|
||||||
|
SNRs = np.arange(1, 6, 0.5)
|
||||||
|
max_iterations = 20000
|
||||||
|
|
||||||
|
# Define parameters different for each task
|
||||||
|
|
||||||
|
task_params = []
|
||||||
|
for i, SNR in enumerate(SNRs):
|
||||||
|
for j, gamma in enumerate(gammas):
|
||||||
|
decoder = ProximalDecoder(H=H.astype('int32'), K=K, omega=omega,
|
||||||
|
gamma=gamma)
|
||||||
|
|
||||||
|
task_params.append(
|
||||||
|
{"decoder": decoder, "max_iterations": max_iterations,
|
||||||
|
"SNR": SNR, "gamma": gamma, "n": n, "k": k})
|
||||||
|
|
||||||
|
return omega, K, task_params
|
||||||
|
|
||||||
|
|
||||||
|
def configure_new_simulation(sim_mgr: SimulationManager, code_name: str,
|
||||||
|
sim_name: str) -> None:
|
||||||
|
sim = GenericMultithreadedSimulator()
|
||||||
|
|
||||||
|
omega, K, task_params = get_params(code_name)
|
||||||
|
|
||||||
|
sim.task_params = task_params
|
||||||
|
sim.task_func = task_func
|
||||||
|
sim.format_func = partial(misc.pgf_reformat_data_3d, x_param_name="SNR",
|
||||||
|
y_param_name="gamma",
|
||||||
|
z_param_names=["BER", "FER", "DFR",
|
||||||
|
"num_iterations"])
|
||||||
|
|
||||||
|
sim_mgr.configure_simulation(simulator=sim, name=sim_name,
|
||||||
|
additional_metadata={"omega": omega, "K": K})
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# code_name = "BCH_7_4"
|
||||||
|
# code_name = "BCH_31_11"
|
||||||
|
# code_name = "BCH_31_26"
|
||||||
|
# code_name = "96.3.965"
|
||||||
|
# code_name = "204.33.486"
|
||||||
|
code_name = "204.33.484"
|
||||||
|
# code_name = "204.55.187"
|
||||||
|
# code_name = "408.33.844"
|
||||||
|
|
||||||
|
sim_name = f"2d_BER_FER_DFR_{misc.slugify(code_name)}"
|
||||||
|
|
||||||
|
# Run simulation
|
||||||
|
|
||||||
|
sim_mgr = SimulationManager(saves_dir="sim_saves",
|
||||||
|
results_dir="sim_results")
|
||||||
|
|
||||||
|
unfinished_sims = sim_mgr.get_unfinished()
|
||||||
|
if len(unfinished_sims) > 0:
|
||||||
|
sim_mgr.load_unfinished(unfinished_sims[0])
|
||||||
|
else:
|
||||||
|
configure_new_simulation(sim_mgr=sim_mgr, code_name=code_name,
|
||||||
|
sim_name=sim_name)
|
||||||
|
|
||||||
|
sim_mgr.simulate()
|
||||||
|
|
||||||
|
# Plot results
|
||||||
|
|
||||||
|
sns.set_theme()
|
||||||
|
ax = sns.lineplot(data=sim_mgr.get_current_results(), x="SNR", y="BER",
|
||||||
|
hue="gamma")
|
||||||
|
ax.set_yscale('log')
|
||||||
|
ax.set_ylim((5e-5, 2e-0))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
144
sw/simulate_2d_avg_error.py
Normal file
144
sw/simulate_2d_avg_error.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
import numpy as np
|
||||||
|
import seaborn as sns
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import signal
|
||||||
|
from timeit import default_timer
|
||||||
|
from functools import partial
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from utility import codes, noise, misc
|
||||||
|
from utility.simulation.simulators import GenericMultithreadedSimulator
|
||||||
|
from utility.simulation import SimulationManager
|
||||||
|
|
||||||
|
from cpp_modules.cpp_decoders import ProximalDecoder_204_102 as ProximalDecoder
|
||||||
|
|
||||||
|
|
||||||
|
def task_func(params):
|
||||||
|
"""Function called by the GenericMultithreadedSimulator instance.
|
||||||
|
|
||||||
|
Calculate the average error over a number of iterations.
|
||||||
|
"""
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
decoder = params["decoder"]
|
||||||
|
num_iterations = params["num_iterations"]
|
||||||
|
x_bpsk = params["x_bpsk"]
|
||||||
|
SNR = params["SNR"]
|
||||||
|
n = params["n"]
|
||||||
|
k = params["k"]
|
||||||
|
K = params["K"]
|
||||||
|
|
||||||
|
avg_error_values = np.zeros(K)
|
||||||
|
|
||||||
|
for i in range(num_iterations):
|
||||||
|
x = noise.add_awgn(x_bpsk, SNR, n, k)
|
||||||
|
|
||||||
|
error_values = decoder.get_error_values(x_bpsk.astype('int32'), x)
|
||||||
|
|
||||||
|
for j, val in enumerate(error_values):
|
||||||
|
avg_error_values[j] += val
|
||||||
|
|
||||||
|
avg_error_values = avg_error_values / num_iterations
|
||||||
|
|
||||||
|
return {"err": avg_error_values}
|
||||||
|
|
||||||
|
|
||||||
|
def get_params(code_name: str):
|
||||||
|
"""In this function all parameters for the simulation are defined."""
|
||||||
|
# Define global simulation parameters
|
||||||
|
|
||||||
|
H_file = f"res/{code_name}.alist"
|
||||||
|
H = codes.read_alist_file(H_file)
|
||||||
|
n_min_k, n = H.shape
|
||||||
|
k = n - n_min_k
|
||||||
|
|
||||||
|
SNR = 8
|
||||||
|
omegas = np.logspace(-0, -10, 40)
|
||||||
|
K = 200
|
||||||
|
|
||||||
|
num_iterations = 1000
|
||||||
|
x_bpsk = np.zeros(n) + 1
|
||||||
|
|
||||||
|
# Define parameters different for each task
|
||||||
|
|
||||||
|
task_params = []
|
||||||
|
for i, omega in enumerate(omegas):
|
||||||
|
decoder = ProximalDecoder(H=H.astype('int32'), K=K,
|
||||||
|
omega=omega)
|
||||||
|
task_params.append(
|
||||||
|
{"decoder": decoder, "num_iterations": num_iterations,
|
||||||
|
"x_bpsk": x_bpsk, "SNR": SNR, "n": n, "k": k, "K": K,
|
||||||
|
"omega": omega})
|
||||||
|
|
||||||
|
return SNR, K, task_params
|
||||||
|
|
||||||
|
|
||||||
|
def reformat_data(results):
|
||||||
|
"""Reformat the data obtained from the GenericMultithreadedSimulator to
|
||||||
|
be usable by pgfplots.
|
||||||
|
"""
|
||||||
|
K = 200
|
||||||
|
num_points = len(results) * K
|
||||||
|
|
||||||
|
x = np.zeros(num_points)
|
||||||
|
y = np.zeros(num_points)
|
||||||
|
z = np.zeros(num_points)
|
||||||
|
|
||||||
|
for i, (params, result) in enumerate(results.items()):
|
||||||
|
np.put(x, np.arange(i * K, (i + 1) * K), np.arange(1, K+1))
|
||||||
|
np.put(y, np.arange(i * K, (i + 1) * K), params["omega"])
|
||||||
|
np.put(z, np.arange(i * K, (i + 1) * K), result["err"])
|
||||||
|
|
||||||
|
x = x[::4]
|
||||||
|
y = y[::4]
|
||||||
|
z = z[::4]
|
||||||
|
|
||||||
|
df = pd.DataFrame({"k": x, "omega": y, "err": z}).sort_values(
|
||||||
|
by=['k', 'omega'], ascending=[True, False])
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def configure_new_simulation(sim_mgr: SimulationManager, code_name: str,
|
||||||
|
sim_name: str) -> None:
|
||||||
|
sim = GenericMultithreadedSimulator()
|
||||||
|
|
||||||
|
SNR, K, task_params = get_params(code_name)
|
||||||
|
|
||||||
|
sim.task_params = task_params
|
||||||
|
sim.task_func = task_func
|
||||||
|
sim.format_func = reformat_data
|
||||||
|
|
||||||
|
sim_mgr.configure_simulation(simulator=sim, name=sim_name,
|
||||||
|
additional_metadata={"SNR": SNR, "K": K})
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# code_name = "BCH_7_4"
|
||||||
|
# code_name = "BCH_31_11"
|
||||||
|
# code_name = "BCH_31_26"
|
||||||
|
# code_name = "96.3.965"
|
||||||
|
# code_name = "204.33.486"
|
||||||
|
code_name = "204.33.484"
|
||||||
|
# code_name = "204.55.187"
|
||||||
|
# code_name = "408.33.844"
|
||||||
|
|
||||||
|
sim_name = f"2d_avg_error_{misc.slugify(code_name)}"
|
||||||
|
|
||||||
|
# Run simulation
|
||||||
|
|
||||||
|
sim_mgr = SimulationManager(saves_dir="sim_saves",
|
||||||
|
results_dir="sim_results")
|
||||||
|
|
||||||
|
unfinished_sims = sim_mgr.get_unfinished()
|
||||||
|
if len(unfinished_sims) > 0:
|
||||||
|
sim_mgr.load_unfinished(unfinished_sims[0])
|
||||||
|
else:
|
||||||
|
configure_new_simulation(sim_mgr=sim_mgr, code_name=code_name,
|
||||||
|
sim_name=sim_name)
|
||||||
|
|
||||||
|
sim_mgr.simulate()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
85
sw/simulate_gradient.py
Normal file
85
sw/simulate_gradient.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import seaborn as sns
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import signal
|
||||||
|
from timeit import default_timer
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
from utility import codes, noise, misc
|
||||||
|
from utility.simulation.simulators import GenericMultithreadedSimulator
|
||||||
|
|
||||||
|
# from cpp_modules.cpp_decoders import ProximalDecoder
|
||||||
|
from cpp_modules.cpp_decoders import ProximalDecoder_204_102 as ProximalDecoder
|
||||||
|
|
||||||
|
|
||||||
|
def simulate(H_file, SNR, omega, K, gamma):
|
||||||
|
H = codes.read_alist_file(f"res/{H_file}")
|
||||||
|
n_min_k, n = H.shape
|
||||||
|
k = n - n_min_k
|
||||||
|
|
||||||
|
decoder = ProximalDecoder(H.astype('int32'), K=K, omega=omega, gamma=gamma)
|
||||||
|
|
||||||
|
c = np.zeros(n)
|
||||||
|
x_bpsk = (c + 1)
|
||||||
|
|
||||||
|
avg_grad_values = np.zeros(shape=(K, 2))
|
||||||
|
|
||||||
|
for i in range(1000):
|
||||||
|
x = noise.add_awgn(x_bpsk, SNR, n, k)
|
||||||
|
grad_values = decoder.get_gradient_values(x)
|
||||||
|
|
||||||
|
for j, (val_h, val_l) in enumerate(grad_values):
|
||||||
|
avg_grad_values[j, 0] += val_h
|
||||||
|
avg_grad_values[j, 1] += val_l
|
||||||
|
|
||||||
|
avg_grad_values = avg_grad_values / 1000
|
||||||
|
|
||||||
|
return avg_grad_values
|
||||||
|
|
||||||
|
|
||||||
|
def reformat_data(results):
|
||||||
|
return pd.DataFrame({"k": np.arange(0, results.size // 2, 1), "grad_h": results[:, 0], "grad_l": results[:, 1]})
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Set up simulation params
|
||||||
|
|
||||||
|
sim_name = "avg_grad_1dB"
|
||||||
|
|
||||||
|
# H_file = "96.3.965.alist"
|
||||||
|
H_file = "204.33.486.alist"
|
||||||
|
# H_file = "204.33.484.alist"
|
||||||
|
# H_file = "204.55.187.alist"
|
||||||
|
# H_file = "408.33.844.alist"
|
||||||
|
# H_file = "BCH_7_4.alist"
|
||||||
|
# H_file = "BCH_31_11.alist"
|
||||||
|
# H_file = "BCH_31_26.alist"
|
||||||
|
|
||||||
|
SNR = 1
|
||||||
|
omega = 0.05
|
||||||
|
K = 100
|
||||||
|
gamma = 0.05
|
||||||
|
|
||||||
|
# Run simulation
|
||||||
|
|
||||||
|
start_time = default_timer()
|
||||||
|
results = simulate(H_file, SNR, omega, K, gamma)
|
||||||
|
end_time = default_timer()
|
||||||
|
|
||||||
|
print(f"duration: {end_time - start_time}")
|
||||||
|
|
||||||
|
df = reformat_data(results)
|
||||||
|
|
||||||
|
df.to_csv(
|
||||||
|
f"sim_results/{sim_name}_{misc.slugify(H_file)}.csv", index=False)
|
||||||
|
|
||||||
|
sns.set_theme()
|
||||||
|
sns.lineplot(data=df, x="k", y="grad_h")
|
||||||
|
sns.lineplot(data=df, x="k", y="grad_l")
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@ -5,6 +5,7 @@ from decoders import proximal
|
|||||||
|
|
||||||
class CheckParityTestCase(unittest.TestCase):
|
class CheckParityTestCase(unittest.TestCase):
|
||||||
"""Test case for the check_parity function."""
|
"""Test case for the check_parity function."""
|
||||||
|
|
||||||
def test_check_parity(self):
|
def test_check_parity(self):
|
||||||
# Hamming(7,4) code
|
# Hamming(7,4) code
|
||||||
G = np.array([[1, 1, 1, 0, 0, 0, 0],
|
G = np.array([[1, 1, 1, 0, 0, 0, 0],
|
||||||
@ -19,7 +20,7 @@ class CheckParityTestCase(unittest.TestCase):
|
|||||||
[0, 0, 0, 0, 0, 1, 0],
|
[0, 0, 0, 0, 0, 1, 0],
|
||||||
[0, 0, 0, 0, 0, 0, 1]])
|
[0, 0, 0, 0, 0, 0, 1]])
|
||||||
|
|
||||||
decoder = proximal.ProximalDecoder(H, R)
|
decoder = proximal.ProximalDecoder(H)
|
||||||
|
|
||||||
d1 = np.array([0, 1, 0, 1])
|
d1 = np.array([0, 1, 0, 1])
|
||||||
c1 = np.dot(np.transpose(G), d1) % 2
|
c1 = np.dot(np.transpose(G), d1) % 2
|
||||||
@ -39,7 +40,9 @@ class CheckParityTestCase(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class GradientTestCase(unittest.TestCase):
|
class GradientTestCase(unittest.TestCase):
|
||||||
"""Test case for the calculation of the gradient of the code-constraint-polynomial."""
|
"""Test case for the calculation of the gradient of the
|
||||||
|
code-constraint-polynomial."""
|
||||||
|
|
||||||
def test_grad_h(self):
|
def test_grad_h(self):
|
||||||
"""Test the gradient of the code-constraint polynomial."""
|
"""Test the gradient of the code-constraint polynomial."""
|
||||||
# Hamming(7,4) code
|
# Hamming(7,4) code
|
||||||
@ -56,9 +59,10 @@ class GradientTestCase(unittest.TestCase):
|
|||||||
[0, 0, 0, 0, 0, 0, 1]])
|
[0, 0, 0, 0, 0, 0, 1]])
|
||||||
|
|
||||||
x = np.array([1, 2, -1, -2, 2, 1, -1]) # Some randomly chosen vector
|
x = np.array([1, 2, -1, -2, 2, 1, -1]) # Some randomly chosen vector
|
||||||
expected_grad_h = np.array([4, 26, -8, -36, 38, 28, -32]) # Manually calculated result
|
expected_grad_h = np.array(
|
||||||
|
[4, 26, -8, -36, 38, 28, -32]) # Manually calculated result
|
||||||
|
|
||||||
decoder = proximal.ProximalDecoder(H, R)
|
decoder = proximal.ProximalDecoder(H)
|
||||||
grad_h = decoder._grad_h(x)
|
grad_h = decoder._grad_h(x)
|
||||||
|
|
||||||
self.assertEqual(np.array_equal(grad_h, expected_grad_h), True)
|
self.assertEqual(np.array_equal(grad_h, expected_grad_h), True)
|
||||||
|
|||||||
@ -1,25 +1,7 @@
|
|||||||
import unittest
|
import unittest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from utility import simulation, noise, codes
|
from utility import noise, codes
|
||||||
|
|
||||||
|
|
||||||
class CountBitErrorsTestCase(unittest.TestCase):
|
|
||||||
"""Test case for bit error counting."""
|
|
||||||
|
|
||||||
def test_count_bit_errors(self):
|
|
||||||
d1 = np.array([0, 0, 0, 0])
|
|
||||||
y_hat1 = np.array([0, 1, 0, 1])
|
|
||||||
|
|
||||||
d2 = np.array([0, 0, 0, 0])
|
|
||||||
y_hat2 = np.array([0, 0, 0, 0])
|
|
||||||
|
|
||||||
d3 = np.array([0, 0, 0, 0])
|
|
||||||
y_hat3 = np.array([1, 1, 1, 1])
|
|
||||||
|
|
||||||
self.assertEqual(simulation.count_bit_errors(d1, y_hat1), 2)
|
|
||||||
self.assertEqual(simulation.count_bit_errors(d2, y_hat2), 0)
|
|
||||||
self.assertEqual(simulation.count_bit_errors(d3, y_hat3), 4)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Rewrite tests for new SNR calculation
|
# TODO: Rewrite tests for new SNR calculation
|
||||||
|
|||||||
@ -1,5 +1,8 @@
|
|||||||
import unicodedata
|
import unicodedata
|
||||||
import re
|
import re
|
||||||
|
import typing
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
def slugify(value, allow_unicode=False):
|
def slugify(value, allow_unicode=False):
|
||||||
@ -20,3 +23,50 @@ def slugify(value, allow_unicode=False):
|
|||||||
'ascii')
|
'ascii')
|
||||||
value = re.sub(r'[^\w\s-]', '', value.lower())
|
value = re.sub(r'[^\w\s-]', '', value.lower())
|
||||||
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
||||||
|
|
||||||
|
|
||||||
|
def pgf_reformat_data_3d(results: typing.Sequence, x_param_name: str,
|
||||||
|
y_param_name: str,
|
||||||
|
z_param_names: typing.Sequence[str]):
|
||||||
|
"""Reformat the results obtained from the GenericMultithreadedSimulator
|
||||||
|
into a form usable by pgfplots.
|
||||||
|
|
||||||
|
:param results: Results from GenericMultiThreadedSimulator
|
||||||
|
(dict of the form {params1: results1, params2: results2, ...}),
|
||||||
|
where resultsN and paramsN are themselves dicts:
|
||||||
|
paramsN = {param_name_1: val, param_name_2: val, ...}
|
||||||
|
resultsN = {result_name_1: val, result_name_2: val, ...}
|
||||||
|
:param x_param_name:
|
||||||
|
:param y_param_name:
|
||||||
|
:param z_param_names:
|
||||||
|
:return: pandas DataFrame of the following form:
|
||||||
|
{x_param_name: [x1, x1, x1, ..., x2, x2, x2, ...],
|
||||||
|
y_param_name: [y1, y2, y3, ..., y1, y2, y3, ...],
|
||||||
|
z_param_name: [z11, z21, z31, ..., z12, z22, z32, ...]}
|
||||||
|
"""
|
||||||
|
# Create result variables
|
||||||
|
x = np.zeros(len(results))
|
||||||
|
y = np.zeros(len(results))
|
||||||
|
zs = {name: np.zeros(len(results)) for name in z_param_names}
|
||||||
|
|
||||||
|
# Populate result variables
|
||||||
|
for i, (params, result) in enumerate(results.items()):
|
||||||
|
x_val = params[x_param_name]
|
||||||
|
y_val = params[y_param_name]
|
||||||
|
for z_param_name in z_param_names:
|
||||||
|
zs[z_param_name][i] = result[z_param_name]
|
||||||
|
|
||||||
|
x[i] = x_val
|
||||||
|
y[i] = y_val
|
||||||
|
|
||||||
|
# Create and return pandas DataFrame
|
||||||
|
df = pd.DataFrame({x_param_name: x, y_param_name: y})
|
||||||
|
for z_param_name in z_param_names:
|
||||||
|
df[z_param_name] = zs[z_param_name]
|
||||||
|
|
||||||
|
return df.sort_values(by=[x_param_name, y_param_name])
|
||||||
|
|
||||||
|
|
||||||
|
def count_bit_errors(x: np.array, x_hat: np.array) -> int:
|
||||||
|
"""Count the number of different bits between two words."""
|
||||||
|
return np.sum(x != x_hat)
|
||||||
|
|||||||
@ -1,468 +0,0 @@
|
|||||||
"""This file contains utility functions relating to tests and simulations of
|
|
||||||
the decoders."""
|
|
||||||
import json
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import typing
|
|
||||||
from tqdm import tqdm
|
|
||||||
import signal
|
|
||||||
import pickle
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
import platform
|
|
||||||
from datetime import datetime
|
|
||||||
import timeit
|
|
||||||
|
|
||||||
from utility import noise, misc
|
|
||||||
|
|
||||||
|
|
||||||
def count_bit_errors(d: np.array, d_hat: np.array) -> int:
|
|
||||||
"""Count the number of wrong bits in a decoded codeword.
|
|
||||||
|
|
||||||
:param d: Originally sent data
|
|
||||||
:param d_hat: Received data
|
|
||||||
:return: Number of bit errors
|
|
||||||
"""
|
|
||||||
return np.sum(d != d_hat)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Write unit tests
|
|
||||||
class Simulator:
|
|
||||||
"""Class allowing for saving of simulations state.
|
|
||||||
|
|
||||||
Given a list of decoders, this class allows for simulating the
|
|
||||||
Bit-Error-Rates of each decoder for various SNRs.
|
|
||||||
|
|
||||||
The functionality implemented by this class could be achieved by a bunch
|
|
||||||
of loops and a function. However, storing the state of the simulation as
|
|
||||||
member variables allows for pausing and resuming the simulation at a
|
|
||||||
later time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, n: int, k: int,
|
|
||||||
decoders: typing.Sequence[typing.Any],
|
|
||||||
SNRs: typing.Sequence[float],
|
|
||||||
target_frame_errors: int):
|
|
||||||
"""Construct and object of type simulator.
|
|
||||||
|
|
||||||
:param n: Number of bits in a codeword
|
|
||||||
:param k: Number of bits in a dataword
|
|
||||||
:param decoders: Sequence of decoders to test
|
|
||||||
:param SNRs: Sequence of SNRs for which the BERs should be calculated
|
|
||||||
:param target_frame_errors: Number of frame errors after which to
|
|
||||||
stop the simulation
|
|
||||||
"""
|
|
||||||
# Simulation parameters
|
|
||||||
|
|
||||||
self._n = n
|
|
||||||
self._k = k
|
|
||||||
self._decoders = decoders
|
|
||||||
self._SNRs = SNRs
|
|
||||||
self._target_frame_errors = target_frame_errors
|
|
||||||
|
|
||||||
self._x = np.zeros(self._n)
|
|
||||||
self._x_bpsk = 1 - 2 * self._x # Map x from [0, 1]^n to [-1, 1]^n
|
|
||||||
|
|
||||||
# Simulation state
|
|
||||||
|
|
||||||
self._current_decoder_index = 0
|
|
||||||
self._current_SNRs_index = 0
|
|
||||||
|
|
||||||
self._curr_num_frame_errors = 0
|
|
||||||
self._curr_num_bit_errors = 0
|
|
||||||
self._curr_num_iterations = 0
|
|
||||||
|
|
||||||
# Results & Miscellaneous
|
|
||||||
|
|
||||||
self._BERs = [[]]
|
|
||||||
|
|
||||||
self._create_pbars()
|
|
||||||
|
|
||||||
self._sim_running = False
|
|
||||||
|
|
||||||
def _create_pbars(self):
|
|
||||||
self._overall_pbar = tqdm(total=len(self._decoders),
|
|
||||||
desc="Calculating the answer to life, "
|
|
||||||
"the universe and everything",
|
|
||||||
leave=False,
|
|
||||||
bar_format="{l_bar}{bar}| {n_fmt}/{"
|
|
||||||
"total_fmt}")
|
|
||||||
|
|
||||||
decoder = self._decoders[self._current_decoder_index]
|
|
||||||
self._decoder_pbar = tqdm(total=len(self._SNRs),
|
|
||||||
desc=f"Calculatin"
|
|
||||||
f"g BERs"
|
|
||||||
f" for {decoder.__class__.__name__}",
|
|
||||||
leave=False,
|
|
||||||
bar_format="{l_bar}{bar}| {n_fmt}/{"
|
|
||||||
"total_fmt}")
|
|
||||||
|
|
||||||
self._snr_pbar = tqdm(total=self._target_frame_errors,
|
|
||||||
desc=f"Simulating for SNR = {self._SNRs[0]} dB",
|
|
||||||
leave=False,
|
|
||||||
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} "
|
|
||||||
"[{elapsed}<{remaining}]")
|
|
||||||
|
|
||||||
def __getstate__(self) -> typing.Dict:
|
|
||||||
"""Custom serialization function called by the 'pickle' module
|
|
||||||
when saving the state of a currently running simulation
|
|
||||||
"""
|
|
||||||
state = self.__dict__.copy()
|
|
||||||
del state['_overall_pbar']
|
|
||||||
del state['_decoder_pbar']
|
|
||||||
del state['_snr_pbar']
|
|
||||||
return state
|
|
||||||
|
|
||||||
def __setstate__(self, state) -> None:
|
|
||||||
"""Custom deserialization function called by the 'pickle' module
|
|
||||||
when loading a previously saved simulation
|
|
||||||
|
|
||||||
:param state: Dictionary storing the serialized version of an object
|
|
||||||
of this class
|
|
||||||
"""
|
|
||||||
self.__dict__.update(state)
|
|
||||||
|
|
||||||
self._create_pbars()
|
|
||||||
|
|
||||||
self._overall_pbar.update(self._current_decoder_index)
|
|
||||||
self._decoder_pbar.update(self._current_SNRs_index)
|
|
||||||
self._snr_pbar.update(self._curr_num_frame_errors)
|
|
||||||
|
|
||||||
self._overall_pbar.refresh()
|
|
||||||
self._decoder_pbar.refresh()
|
|
||||||
self._snr_pbar.refresh()
|
|
||||||
|
|
||||||
def _simulate_transmission(self) -> int:
|
|
||||||
"""Simulate the transmission of a single codeword.
|
|
||||||
|
|
||||||
:return: Number of bit errors that occurred
|
|
||||||
"""
|
|
||||||
SNR = self._SNRs[self._current_SNRs_index]
|
|
||||||
decoder = self._decoders[self._current_decoder_index]
|
|
||||||
|
|
||||||
y = noise.add_awgn(self._x_bpsk, SNR, self._n, self._k)
|
|
||||||
x_hat = decoder.decode(y)
|
|
||||||
|
|
||||||
return count_bit_errors(self._x, x_hat)
|
|
||||||
|
|
||||||
def _update_statistics(self, bit_errors: int) -> None:
|
|
||||||
"""Update the statistics of the simulator.
|
|
||||||
|
|
||||||
:param bit_errors: Number of bit errors that occurred during the
|
|
||||||
last transmission
|
|
||||||
"""
|
|
||||||
self._curr_num_iterations += 1
|
|
||||||
|
|
||||||
if bit_errors > 0:
|
|
||||||
self._curr_num_frame_errors += 1
|
|
||||||
self._curr_num_bit_errors += bit_errors
|
|
||||||
|
|
||||||
self._snr_pbar.update(1)
|
|
||||||
|
|
||||||
def _advance_state(self) -> None:
|
|
||||||
"""Advance the state of the simulator.
|
|
||||||
|
|
||||||
This function also appends a new BER value to the self._BERs array
|
|
||||||
if the number of target frame errors has been reached
|
|
||||||
"""
|
|
||||||
if self._curr_num_frame_errors >= self._target_frame_errors:
|
|
||||||
self._BERs[self._current_decoder_index] \
|
|
||||||
.append(self._curr_num_bit_errors / (
|
|
||||||
self._curr_num_iterations * self._n))
|
|
||||||
|
|
||||||
self._curr_num_frame_errors = 0
|
|
||||||
self._curr_num_bit_errors = 0
|
|
||||||
self._curr_num_iterations = 0
|
|
||||||
|
|
||||||
if self._current_SNRs_index < len(self._SNRs) - 1:
|
|
||||||
self._current_SNRs_index += 1
|
|
||||||
|
|
||||||
self._snr_pbar.reset()
|
|
||||||
self._snr_pbar.set_description(
|
|
||||||
f"Simulating for SNR = "
|
|
||||||
f"{self._SNRs[self._current_SNRs_index]} dB")
|
|
||||||
self._decoder_pbar.update(1)
|
|
||||||
else:
|
|
||||||
if self._current_decoder_index < len(self._decoders) - 1:
|
|
||||||
self._current_decoder_index += 1
|
|
||||||
self._current_SNRs_index = 0
|
|
||||||
self._BERs.append([])
|
|
||||||
|
|
||||||
self._decoder_pbar.reset()
|
|
||||||
decoder = self._decoders[self._current_decoder_index]
|
|
||||||
self._decoder_pbar.set_description(
|
|
||||||
f"Calculating BERs for {decoder.__class__.__name__}")
|
|
||||||
self._overall_pbar.update(1)
|
|
||||||
else:
|
|
||||||
self._sim_running = False
|
|
||||||
|
|
||||||
self._snr_pbar.close()
|
|
||||||
self._decoder_pbar.close()
|
|
||||||
self._overall_pbar.close()
|
|
||||||
|
|
||||||
def start(self) -> None:
|
|
||||||
"""Start the simulation.
|
|
||||||
|
|
||||||
This is a blocking call. A call to the stop() function
|
|
||||||
from another thread will stop this function.
|
|
||||||
"""
|
|
||||||
self._sim_running = True
|
|
||||||
|
|
||||||
while self._sim_running:
|
|
||||||
bit_errors = self._simulate_transmission()
|
|
||||||
self._update_statistics(bit_errors)
|
|
||||||
self._advance_state()
|
|
||||||
|
|
||||||
def stop(self) -> None:
|
|
||||||
"""Stop the simulation."""
|
|
||||||
self._sim_running = False
|
|
||||||
|
|
||||||
def get_current_results(self) -> pd.DataFrame:
|
|
||||||
"""Get the current results.
|
|
||||||
|
|
||||||
If the simulation has not yet completed, the BERs which have not yet
|
|
||||||
been calculated are set to 0.
|
|
||||||
|
|
||||||
:return: pandas Dataframe with the columns ["SNR", "BER_1", "BER_2",
|
|
||||||
...]
|
|
||||||
"""
|
|
||||||
data = {"SNR": np.array(self._SNRs)}
|
|
||||||
|
|
||||||
# If the BERs of a decoder have not been calculated for all SNRs,
|
|
||||||
# fill the rest up with zeros to match the length of the 'SNRs' array
|
|
||||||
for i, decoder_BER_list in enumerate(self._BERs):
|
|
||||||
padded = np.pad(decoder_BER_list,
|
|
||||||
(0, len(self._SNRs) - len(decoder_BER_list)))
|
|
||||||
data[f"BER_{i}"] = padded
|
|
||||||
|
|
||||||
# If the BERs have not been calculated for all decoders, fill up the
|
|
||||||
# BERs list
|
|
||||||
# with zero-vectors to match the length of the 'decoders' list
|
|
||||||
for i in range(len(self._decoders), len(self._BERs)):
|
|
||||||
data[f"BER_{i}"] = np.zeros(len(self._SNRs))
|
|
||||||
|
|
||||||
return pd.DataFrame(data)
|
|
||||||
|
|
||||||
|
|
||||||
class SimulationDeSerializer:
|
|
||||||
"""Class responsible for file management, de- and serialization of
|
|
||||||
Simulator objects."""
|
|
||||||
|
|
||||||
def __init__(self, save_dir: str, results_dir: str):
|
|
||||||
self._saves_dir = save_dir
|
|
||||||
self._results_dir = results_dir
|
|
||||||
|
|
||||||
Path(self._saves_dir).mkdir(parents=True, exist_ok=True)
|
|
||||||
Path(self._results_dir).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
def _get_savefile_path(self, sim_name):
|
|
||||||
return f"{self._saves_dir}/{misc.slugify(sim_name)}_state.pickle"
|
|
||||||
|
|
||||||
def _get_metadata_path(self, sim_name):
|
|
||||||
return f"{self._results_dir}/{misc.slugify(sim_name)}_metadata.json"
|
|
||||||
|
|
||||||
def _get_results_path(self, sim_name):
|
|
||||||
return f"{self._results_dir}/{misc.slugify(sim_name)}.csv"
|
|
||||||
|
|
||||||
def _read_metadata(self, sim_name) -> typing.Dict:
|
|
||||||
with open(self._get_metadata_path(sim_name), 'r',
|
|
||||||
encoding='utf-8') as f:
|
|
||||||
return json.load(f)
|
|
||||||
|
|
||||||
def _save_metadata(self, sim_name, metadata) -> None:
|
|
||||||
with open(self._get_metadata_path(sim_name), 'w+',
|
|
||||||
encoding='utf-8') as f:
|
|
||||||
json.dump(metadata, f, ensure_ascii=False, indent=4)
|
|
||||||
|
|
||||||
def unfinished_sim_present(self, sim_name: str):
|
|
||||||
"""Check if the savefile of a previously paused simulation is
|
|
||||||
present.
|
|
||||||
|
|
||||||
:param sim_name: Name
|
|
||||||
:return: True if a paused simulation with the given name is found
|
|
||||||
"""
|
|
||||||
return os.path.isfile(
|
|
||||||
self._get_savefile_path(sim_name)) and os.path.isfile(
|
|
||||||
self._get_metadata_path(sim_name))
|
|
||||||
|
|
||||||
# TODO: Make the directories configurable in the init function
|
|
||||||
def get_unfinished_sims(self) -> typing.List[str]:
|
|
||||||
"""Get a list unfinished simulations."""
|
|
||||||
save_files = [f for f in os.listdir(self._saves_dir) if
|
|
||||||
os.path.isfile(os.path.join(self._saves_dir, f))]
|
|
||||||
|
|
||||||
state_files = [f for f in save_files if f.endswith("_state.pickle")]
|
|
||||||
sim_slugs = [f.removesuffix("_state.pickle") for f in state_files]
|
|
||||||
|
|
||||||
sim_names = [self._read_metadata(slug)["name"] for slug in sim_slugs]
|
|
||||||
|
|
||||||
return sim_names
|
|
||||||
|
|
||||||
def remove_unfinished_sim(self, sim_name: str):
|
|
||||||
"""Remove the savefile of a previously paused simulation.
|
|
||||||
|
|
||||||
:param sim_name: Name of the simulation
|
|
||||||
"""
|
|
||||||
os.remove(self._get_savefile_path(sim_name))
|
|
||||||
# os.remove(self._get_metadata_path(sim_name))
|
|
||||||
|
|
||||||
def save_state(self, simulator: typing.Any, sim_name: str,
|
|
||||||
metadata: typing.Dict) -> None:
|
|
||||||
"""Save the state of a currently running simulation.
|
|
||||||
|
|
||||||
:param simulator: Simulator object
|
|
||||||
:param sim_name: Name of the simulation
|
|
||||||
:param metadata: Metadata to be saved besides the actual state
|
|
||||||
"""
|
|
||||||
# Save metadata
|
|
||||||
self._save_metadata(sim_name, metadata)
|
|
||||||
|
|
||||||
# Save simulation state
|
|
||||||
with open(self._get_savefile_path(sim_name), "wb") as file:
|
|
||||||
pickle.dump(simulator, file)
|
|
||||||
|
|
||||||
def read_state(self, sim_name: str) -> typing.Tuple[
|
|
||||||
typing.Any, typing.Dict]:
|
|
||||||
"""Read the saved state of a paused simulation.
|
|
||||||
|
|
||||||
:param sim_name: Name of the simulation
|
|
||||||
:return: Tuple of the form (simulator, metadata)
|
|
||||||
"""
|
|
||||||
# Read metadata
|
|
||||||
metadata = self._read_metadata(sim_name)
|
|
||||||
|
|
||||||
# Read simulation state
|
|
||||||
simulator = None
|
|
||||||
with open(self._get_savefile_path(sim_name), "rb") as file:
|
|
||||||
simulator = pickle.load(file)
|
|
||||||
|
|
||||||
return simulator, metadata
|
|
||||||
|
|
||||||
# TODO: Is the simulator object actually necessary here?
|
|
||||||
def save_results(self, simulator: typing.Any, sim_name: str,
|
|
||||||
metadata: typing.Dict) -> None:
|
|
||||||
"""Save simulation results to file.
|
|
||||||
|
|
||||||
:param simulator: Simulator object. Used to obtain the data
|
|
||||||
:param sim_name: Name of the simulation. Determines the filename
|
|
||||||
:param metadata: Metadata to be saved besides the actual simulation
|
|
||||||
results
|
|
||||||
"""
|
|
||||||
# Save metadata
|
|
||||||
self._save_metadata(sim_name, metadata)
|
|
||||||
|
|
||||||
# Save results
|
|
||||||
df = simulator.get_current_results()
|
|
||||||
df.to_csv(self._get_results_path(sim_name))
|
|
||||||
|
|
||||||
def read_results(self, sim_name: str) -> typing.Tuple[
|
|
||||||
pd.DataFrame, typing.Dict]:
|
|
||||||
"""Read simulation results from file.
|
|
||||||
|
|
||||||
:param sim_name: Name of the simulation.
|
|
||||||
:return: Tuple of the form (data, metadata), where data is a pandas
|
|
||||||
dataframe and metadata is a dict
|
|
||||||
"""
|
|
||||||
# Read metadata
|
|
||||||
metadata = self._read_metadata(sim_name)
|
|
||||||
|
|
||||||
# Read results
|
|
||||||
results = pd.read_csv(self._get_results_path(sim_name))
|
|
||||||
|
|
||||||
return results, metadata
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Autosave simulation every so often
|
|
||||||
# TODO: Comment explaining what a Simulator class is
|
|
||||||
class SimulationManager:
|
|
||||||
"""This class only contains functions relating to stopping and
|
|
||||||
restarting of simulations (and storing of the simulation state in a
|
|
||||||
file, to be resumed at a later date).
|
|
||||||
|
|
||||||
All actual work is outsourced to a provided simulator class.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, saves_dir: str, results_dir: str):
|
|
||||||
"""Construct a SimulationManager object.
|
|
||||||
|
|
||||||
:param saves_dir: Directory in which the simulation state of a paused
|
|
||||||
simulation should be stored
|
|
||||||
:param results_dir: Directory in which the results of the simulation
|
|
||||||
should be stored
|
|
||||||
"""
|
|
||||||
self._de_serializer = SimulationDeSerializer(saves_dir, results_dir)
|
|
||||||
|
|
||||||
self._simulator = None
|
|
||||||
self._sim_name = None
|
|
||||||
self._metadata = {"duration": 0}
|
|
||||||
self._sim_start_time = None
|
|
||||||
|
|
||||||
signal.signal(signal.SIGINT, self._exit_gracefully)
|
|
||||||
signal.signal(signal.SIGTERM, self._exit_gracefully)
|
|
||||||
signal.signal(signal.SIGHUP, self._exit_gracefully)
|
|
||||||
|
|
||||||
def _sim_configured(self) -> bool:
|
|
||||||
"""Check whether 'configure_simulation()' has been called."""
|
|
||||||
return (self._simulator is not None) and (
|
|
||||||
self._sim_name is not None) and (
|
|
||||||
self._metadata is not None)
|
|
||||||
|
|
||||||
def configure_simulation(self, simulator: typing.Any, name: str,
|
|
||||||
column_labels: typing.Sequence[str]) -> None:
|
|
||||||
"""Configure a new simulation."""
|
|
||||||
self._simulator = simulator
|
|
||||||
self._sim_name = name
|
|
||||||
self._metadata["name"] = name
|
|
||||||
self._metadata["labels"] = column_labels
|
|
||||||
self._metadata["platform"] = platform.platform()
|
|
||||||
|
|
||||||
def get_unfinished(self) -> typing.List[str]:
|
|
||||||
"""Get a list of names of all present unfinished simulations."""
|
|
||||||
return self._de_serializer.get_unfinished_sims()
|
|
||||||
|
|
||||||
def load_unfinished(self, sim_name: str) -> None:
|
|
||||||
"""Load the state of an unfinished simulation form its savefile.
|
|
||||||
|
|
||||||
Warning: This function deletes the savefile after loading.
|
|
||||||
"""
|
|
||||||
assert self._de_serializer.unfinished_sim_present(sim_name)
|
|
||||||
|
|
||||||
self._sim_name = sim_name
|
|
||||||
self._simulator, self._metadata = self._de_serializer.read_state(
|
|
||||||
sim_name)
|
|
||||||
|
|
||||||
self._de_serializer.remove_unfinished_sim(sim_name)
|
|
||||||
|
|
||||||
# TODO: Metadata is being written twice here. Should save_results() also
|
|
||||||
# save the metadata?
|
|
||||||
def _exit_gracefully(self, *args) -> None:
|
|
||||||
"""Handler called when the program is interrupted. Pauses and saves
|
|
||||||
the currently running simulation."""
|
|
||||||
if self._sim_configured():
|
|
||||||
self._simulator.stop()
|
|
||||||
|
|
||||||
self._metadata["end_time"] = f"{datetime.now(tz=None)}"
|
|
||||||
self._metadata["duration"] \
|
|
||||||
+= timeit.default_timer() - self._sim_start_time
|
|
||||||
|
|
||||||
self._de_serializer.save_state(self._simulator, self._sim_name,
|
|
||||||
self._metadata)
|
|
||||||
self._de_serializer.save_results(self._simulator, self._sim_name,
|
|
||||||
self._metadata)
|
|
||||||
|
|
||||||
exit()
|
|
||||||
|
|
||||||
def simulate(self) -> None:
|
|
||||||
"""Start the simulation. This is a blocking call."""
|
|
||||||
assert self._sim_configured()
|
|
||||||
|
|
||||||
self._sim_start_time = timeit.default_timer()
|
|
||||||
|
|
||||||
self._simulator.start()
|
|
||||||
|
|
||||||
self._metadata["end_time"] = f"{datetime.now(tz=None)}"
|
|
||||||
self._metadata["duration"] \
|
|
||||||
+= timeit.default_timer() - self._sim_start_time
|
|
||||||
|
|
||||||
self._de_serializer.save_results(self._simulator, self._sim_name,
|
|
||||||
self._metadata)
|
|
||||||
77
sw/utility/simulation/__init__.py
Normal file
77
sw/utility/simulation/__init__.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
"""Simulation package.
|
||||||
|
|
||||||
|
This package provides a way to easily define simulations in such a way that
|
||||||
|
they can be paused and resumed.
|
||||||
|
|
||||||
|
General Structure
|
||||||
|
=================
|
||||||
|
The package consists of 3 main components:
|
||||||
|
- The 'SimulationDeSerializer': Responsible for file IO
|
||||||
|
- The 'Simulator': Responsible for the actual simulating
|
||||||
|
- The 'SimulationManager': Delegates work to the DeSerializer and the
|
||||||
|
Simulator
|
||||||
|
|
||||||
|
The Simulator Class
|
||||||
|
===================
|
||||||
|
For each new simulating task, a new 'Simulator' must be defined. The
|
||||||
|
requirements for this class are the following:
|
||||||
|
- Must define the 'start_or_continue()', 'stop()' and
|
||||||
|
'get_current_results()' functions
|
||||||
|
- Must be picklable in order to store the simulation state
|
||||||
|
|
||||||
|
An example simulator could look as follows:
|
||||||
|
----------------------------------------------------------------
|
||||||
|
class SomeSimulator:
|
||||||
|
def __init__(self, num_iterations):
|
||||||
|
self._num_iterations = num_iterations
|
||||||
|
self._current_iter = 0
|
||||||
|
|
||||||
|
self._simulation_running = False
|
||||||
|
|
||||||
|
self._results = pd.DataFrame()
|
||||||
|
|
||||||
|
def _perform_iteration(self):
|
||||||
|
# Perform iteration and append results
|
||||||
|
...
|
||||||
|
|
||||||
|
def start_or_continue(self) -> None:
|
||||||
|
self._simulation_running = True
|
||||||
|
|
||||||
|
while self._simulation_running and (
|
||||||
|
self._current_iter < self._num_iterations):
|
||||||
|
self._perform_iteration()
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self._simulation_running = False
|
||||||
|
|
||||||
|
def get_current_results(self) -> pd.DataFrame:
|
||||||
|
return self._results
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
Usage
|
||||||
|
=====
|
||||||
|
To start a new simulation:
|
||||||
|
----------------------------------------------------------------
|
||||||
|
sim_mgr = SimulationManager(results_dir="results", saves_dir="saves")
|
||||||
|
|
||||||
|
sim = SomeSimulator(num_iterations=100)
|
||||||
|
sim_mgr.configure_simulation(simulator=sim, name='Some Simulation', \
|
||||||
|
column_labels=['label1', 'label2'])
|
||||||
|
sim_mgr.start()
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
To check for a previously interrupted simulation and continue:
|
||||||
|
----------------------------------------------------------------
|
||||||
|
sim_mgr = SimulationManager(results_dir="results", saves_dir="saves")
|
||||||
|
|
||||||
|
unfinished_sims = sim_mgr.get_unfinished()
|
||||||
|
|
||||||
|
if len(unfinished_sims) > 0:
|
||||||
|
sim_mgr.load_unfinished(unfinished_sims[0])
|
||||||
|
sim_mgr.simulate()
|
||||||
|
----------------------------------------------------------------
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from utility.simulation.management import SimulationManager, \
|
||||||
|
SimulationDeSerializer
|
||||||
239
sw/utility/simulation/management.py
Normal file
239
sw/utility/simulation/management.py
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
import json
|
||||||
|
import pandas as pd
|
||||||
|
import typing
|
||||||
|
import signal
|
||||||
|
import pickle
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
import platform
|
||||||
|
from datetime import datetime
|
||||||
|
import timeit
|
||||||
|
import collections.abc
|
||||||
|
|
||||||
|
from utility import misc
|
||||||
|
|
||||||
|
|
||||||
|
class SimulationDeSerializer:
|
||||||
|
"""Class responsible for file management, de- and serialization of
|
||||||
|
Simulator objects."""
|
||||||
|
|
||||||
|
def __init__(self, save_dir: str, results_dir: str):
|
||||||
|
self._saves_dir = save_dir
|
||||||
|
self._results_dir = results_dir
|
||||||
|
|
||||||
|
Path(self._saves_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
Path(self._results_dir).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
def _get_savefile_path(self, sim_name):
|
||||||
|
return f"{self._saves_dir}/{misc.slugify(sim_name)}_state.pickle"
|
||||||
|
|
||||||
|
def _get_metadata_path(self, sim_name):
|
||||||
|
return f"{self._results_dir}/{misc.slugify(sim_name)}_metadata.json"
|
||||||
|
|
||||||
|
def _get_results_path(self, sim_name):
|
||||||
|
return f"{self._results_dir}/{misc.slugify(sim_name)}.csv"
|
||||||
|
|
||||||
|
def _read_metadata(self, sim_name) -> typing.Dict:
|
||||||
|
with open(self._get_metadata_path(sim_name), 'r',
|
||||||
|
encoding='utf-8') as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
def _save_metadata(self, sim_name, metadata) -> None:
|
||||||
|
with open(self._get_metadata_path(sim_name), 'w+',
|
||||||
|
encoding='utf-8') as f:
|
||||||
|
json.dump(metadata, f, ensure_ascii=False, indent=4)
|
||||||
|
|
||||||
|
def unfinished_sim_present(self, sim_name: str):
|
||||||
|
"""Check if the savefile of a previously paused simulation is
|
||||||
|
present.
|
||||||
|
|
||||||
|
:param sim_name: Name
|
||||||
|
:return: True if a paused simulation with the given name is found
|
||||||
|
"""
|
||||||
|
return os.path.isfile(
|
||||||
|
self._get_savefile_path(sim_name)) and os.path.isfile(
|
||||||
|
self._get_metadata_path(sim_name))
|
||||||
|
|
||||||
|
# TODO: Make the directories configurable in the init function
|
||||||
|
def get_unfinished_sims(self) -> typing.List[str]:
|
||||||
|
"""Get a list unfinished simulations."""
|
||||||
|
save_files = [f for f in os.listdir(self._saves_dir) if
|
||||||
|
os.path.isfile(os.path.join(self._saves_dir, f))]
|
||||||
|
|
||||||
|
state_files = [f for f in save_files if f.endswith("_state.pickle")]
|
||||||
|
sim_slugs = [f.removesuffix("_state.pickle") for f in state_files]
|
||||||
|
|
||||||
|
sim_names = [self._read_metadata(slug)["name"] for slug in sim_slugs]
|
||||||
|
|
||||||
|
return sim_names
|
||||||
|
|
||||||
|
def remove_unfinished_sim(self, sim_name: str):
|
||||||
|
"""Remove the savefile of a previously paused simulation.
|
||||||
|
|
||||||
|
:param sim_name: Name of the simulation
|
||||||
|
"""
|
||||||
|
os.remove(self._get_savefile_path(sim_name))
|
||||||
|
# os.remove(self._get_metadata_path(sim_name))
|
||||||
|
|
||||||
|
def save_state(self, simulator: typing.Any, sim_name: str,
|
||||||
|
metadata: typing.Dict) -> None:
|
||||||
|
"""Save the state of a currently running simulation.
|
||||||
|
|
||||||
|
:param simulator: Simulator object
|
||||||
|
:param sim_name: Name of the simulation
|
||||||
|
:param metadata: Metadata to be saved besides the actual state
|
||||||
|
"""
|
||||||
|
# Save metadata
|
||||||
|
self._save_metadata(sim_name, metadata)
|
||||||
|
|
||||||
|
# Save simulation state
|
||||||
|
with open(self._get_savefile_path(sim_name), "wb") as file:
|
||||||
|
pickle.dump(simulator, file)
|
||||||
|
|
||||||
|
def read_state(self, sim_name: str) -> typing.Tuple[
|
||||||
|
typing.Any, typing.Dict]:
|
||||||
|
"""Read the saved state of a paused simulation.
|
||||||
|
|
||||||
|
:param sim_name: Name of the simulation
|
||||||
|
:return: Tuple of the form (simulator, metadata)
|
||||||
|
"""
|
||||||
|
# Read metadata
|
||||||
|
metadata = self._read_metadata(sim_name)
|
||||||
|
|
||||||
|
# Read simulation state
|
||||||
|
simulator = None
|
||||||
|
with open(self._get_savefile_path(sim_name), "rb") as file:
|
||||||
|
simulator = pickle.load(file)
|
||||||
|
|
||||||
|
return simulator, metadata
|
||||||
|
|
||||||
|
# TODO: Is the simulator object actually necessary here?
|
||||||
|
def save_results(self, simulator: typing.Any, sim_name: str,
|
||||||
|
metadata: typing.Dict) -> None:
|
||||||
|
"""Save simulation results to file.
|
||||||
|
|
||||||
|
:param simulator: Simulator object. Used to obtain the data
|
||||||
|
:param sim_name: Name of the simulation. Determines the filename
|
||||||
|
:param metadata: Metadata to be saved besides the actual simulation
|
||||||
|
results
|
||||||
|
"""
|
||||||
|
# Save metadata
|
||||||
|
self._save_metadata(sim_name, metadata)
|
||||||
|
|
||||||
|
# Save current results
|
||||||
|
simulator.current_results.to_csv(self._get_results_path(sim_name),
|
||||||
|
index=False)
|
||||||
|
|
||||||
|
def read_results(self, sim_name: str) -> typing.Tuple[
|
||||||
|
pd.DataFrame, typing.Dict]:
|
||||||
|
"""Read simulation results from file.
|
||||||
|
|
||||||
|
:param sim_name: Name of the simulation.
|
||||||
|
:return: Tuple of the form (data, metadata), where data is a pandas
|
||||||
|
dataframe and metadata is a dict
|
||||||
|
"""
|
||||||
|
# Read metadata
|
||||||
|
metadata = self._read_metadata(sim_name)
|
||||||
|
|
||||||
|
# Read results
|
||||||
|
results = pd.read_csv(self._get_results_path(sim_name))
|
||||||
|
|
||||||
|
return results, metadata
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Autosave simulation every so often
|
||||||
|
# TODO: Comment explaining what a Simulator class is
|
||||||
|
class SimulationManager:
|
||||||
|
"""This class only contains functions relating to stopping and
|
||||||
|
restarting of simulations (and storing of the simulation state in a
|
||||||
|
file, to be resumed at a later date).
|
||||||
|
|
||||||
|
All actual work is outsourced to a provided simulator class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, saves_dir: str, results_dir: str):
|
||||||
|
"""Construct a SimulationManager object.
|
||||||
|
|
||||||
|
:param saves_dir: Directory in which the simulation state of a paused
|
||||||
|
simulation should be stored
|
||||||
|
:param results_dir: Directory in which the results of the simulation
|
||||||
|
should be stored
|
||||||
|
"""
|
||||||
|
self._de_serializer = SimulationDeSerializer(saves_dir, results_dir)
|
||||||
|
|
||||||
|
self._simulator = None
|
||||||
|
self._sim_name = None
|
||||||
|
self._metadata = {"duration": 0}
|
||||||
|
self._sim_start_time = None
|
||||||
|
|
||||||
|
def _sim_configured(self) -> bool:
|
||||||
|
"""Check whether 'configure_simulation()' has been called."""
|
||||||
|
return (self._simulator is not None) and (
|
||||||
|
self._sim_name is not None) and (
|
||||||
|
self._metadata is not None)
|
||||||
|
|
||||||
|
def configure_simulation(self, simulator: typing.Any, name: str,
|
||||||
|
additional_metadata: dict = {}) -> None:
|
||||||
|
"""Configure a new simulation."""
|
||||||
|
self._simulator = simulator
|
||||||
|
self._sim_name = name
|
||||||
|
self._metadata["name"] = name
|
||||||
|
self._metadata["platform"] = platform.platform()
|
||||||
|
self._metadata.update(additional_metadata)
|
||||||
|
|
||||||
|
def get_unfinished(self) -> typing.List[str]:
|
||||||
|
"""Get a list of names of all present unfinished simulations."""
|
||||||
|
return self._de_serializer.get_unfinished_sims()
|
||||||
|
|
||||||
|
def load_unfinished(self, sim_name: str) -> None:
|
||||||
|
"""Load the state of an unfinished simulation form its savefile.
|
||||||
|
|
||||||
|
Warning: This function deletes the savefile after loading.
|
||||||
|
"""
|
||||||
|
assert self._de_serializer.unfinished_sim_present(sim_name)
|
||||||
|
|
||||||
|
self._sim_name = sim_name
|
||||||
|
self._simulator, self._metadata = self._de_serializer.read_state(
|
||||||
|
sim_name)
|
||||||
|
|
||||||
|
self._de_serializer.remove_unfinished_sim(sim_name)
|
||||||
|
|
||||||
|
# TODO: Metadata is being written twice here. Should save_results() also
|
||||||
|
# save the metadata?
|
||||||
|
def _exit_gracefully(self, *args) -> None:
|
||||||
|
"""Handler called when the program is interrupted. Pauses and saves
|
||||||
|
the currently running simulation."""
|
||||||
|
if self._sim_configured():
|
||||||
|
self._simulator.stop()
|
||||||
|
|
||||||
|
self._metadata["end_time"] = f"{datetime.now(tz=None)}"
|
||||||
|
self._metadata["duration"] \
|
||||||
|
+= timeit.default_timer() - self._sim_start_time
|
||||||
|
|
||||||
|
self._de_serializer.save_state(self._simulator, self._sim_name,
|
||||||
|
self._metadata)
|
||||||
|
self._de_serializer.save_results(self._simulator, self._sim_name,
|
||||||
|
self._metadata)
|
||||||
|
|
||||||
|
exit()
|
||||||
|
|
||||||
|
def simulate(self) -> None:
|
||||||
|
"""Start the simulation. This is a blocking call."""
|
||||||
|
assert self._sim_configured()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._sim_start_time = timeit.default_timer()
|
||||||
|
|
||||||
|
self._simulator.start_or_continue()
|
||||||
|
|
||||||
|
self._metadata["end_time"] = f"{datetime.now(tz=None)}"
|
||||||
|
self._metadata["duration"] \
|
||||||
|
+= timeit.default_timer() - self._sim_start_time
|
||||||
|
|
||||||
|
self._de_serializer.save_results(self._simulator, self._sim_name,
|
||||||
|
self._metadata)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self._exit_gracefully()
|
||||||
|
|
||||||
|
def get_current_results(self) -> pd.DataFrame:
|
||||||
|
return self._simulator.current_results
|
||||||
361
sw/utility/simulation/simulators.py
Normal file
361
sw/utility/simulation/simulators.py
Normal file
@ -0,0 +1,361 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import typing
|
||||||
|
from tqdm import tqdm
|
||||||
|
from concurrent.futures import ProcessPoolExecutor, process, wait
|
||||||
|
from functools import partial
|
||||||
|
from multiprocessing import Lock
|
||||||
|
|
||||||
|
from utility import noise
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Fix ProximalDecoder_Dynamic
|
||||||
|
# from cpp_modules.cpp_decoders import ProximalDecoder_Dynamic as
|
||||||
|
# ProximalDecoder
|
||||||
|
|
||||||
|
|
||||||
|
def count_bit_errors(d: np.array, d_hat: np.array) -> int:
|
||||||
|
"""Count the number of wrong bits in a decoded codeword.
|
||||||
|
|
||||||
|
:param d: Originally sent data
|
||||||
|
:param d_hat: Received data
|
||||||
|
:return: Number of bit errors
|
||||||
|
"""
|
||||||
|
return np.sum(d != d_hat)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Write unit tests
|
||||||
|
class ProximalDecoderSimulator:
|
||||||
|
"""Class allowing for saving of simulations state.
|
||||||
|
|
||||||
|
Given a list of decoders, this class allows for simulating the
|
||||||
|
Bit-Error-Rates of each decoder for various SNRs.
|
||||||
|
|
||||||
|
The functionality implemented by this class could be achieved by a bunch
|
||||||
|
of loops and a function. However, storing the state of the simulation as
|
||||||
|
member variables allows for pausing and resuming the simulation at a
|
||||||
|
later time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, n: int, k: int,
|
||||||
|
decoders: typing.Sequence[typing.Any],
|
||||||
|
SNRs: typing.Sequence[float],
|
||||||
|
target_frame_errors: int,
|
||||||
|
max_num_iterations: int):
|
||||||
|
"""Construct and object of type simulator.
|
||||||
|
|
||||||
|
:param n: Number of bits in a codeword
|
||||||
|
:param k: Number of bits in a dataword
|
||||||
|
:param decoders: Sequence of decoders to test
|
||||||
|
:param SNRs: Sequence of SNRs for which the BERs should be calculated
|
||||||
|
:param target_frame_errors: Number of frame errors after which to
|
||||||
|
stop the simulation
|
||||||
|
"""
|
||||||
|
# Simulation parameters
|
||||||
|
|
||||||
|
self._n = n
|
||||||
|
self._k = k
|
||||||
|
self._decoders = decoders
|
||||||
|
self._SNRs = SNRs
|
||||||
|
self._target_frame_errors = target_frame_errors
|
||||||
|
self._max_num_iterations = max_num_iterations
|
||||||
|
|
||||||
|
self._x = np.zeros(self._n)
|
||||||
|
self._x_bpsk = 1 - 2 * self._x # Map x from [0, 1]^n to [-1, 1]^n
|
||||||
|
|
||||||
|
# Simulation state
|
||||||
|
|
||||||
|
self._curr_decoder_index = 0
|
||||||
|
self._curr_SNRs_index = 0
|
||||||
|
|
||||||
|
self._curr_num_frame_errors = 0
|
||||||
|
self._curr_num_bit_errors = 0
|
||||||
|
self._curr_num_iterations = 0
|
||||||
|
self._curr_num_dec_fails = 0
|
||||||
|
|
||||||
|
# Results & Miscellaneous
|
||||||
|
|
||||||
|
self._BERs = [np.zeros(len(SNRs)) for i in range(len(decoders))]
|
||||||
|
self._dec_fails = [np.zeros(len(SNRs)) for i in range(len(decoders))]
|
||||||
|
self._avg_K = [np.zeros(len(SNRs)) for i in range(len(decoders))]
|
||||||
|
|
||||||
|
self._create_pbars()
|
||||||
|
|
||||||
|
self._sim_running = False
|
||||||
|
|
||||||
|
def _create_pbars(self):
|
||||||
|
self._overall_pbar = tqdm(total=len(self._decoders),
|
||||||
|
desc="Calculating the answer to life, "
|
||||||
|
"the universe and everything",
|
||||||
|
leave=False,
|
||||||
|
bar_format="{l_bar}{bar}| {n_fmt}/{"
|
||||||
|
"total_fmt} [{elapsed}]")
|
||||||
|
|
||||||
|
decoder = self._decoders[self._curr_decoder_index]
|
||||||
|
self._decoder_pbar = tqdm(total=len(self._SNRs),
|
||||||
|
desc=f"Calculating"
|
||||||
|
f"g BERs"
|
||||||
|
f" for {decoder.__class__.__name__}",
|
||||||
|
leave=False,
|
||||||
|
bar_format="{l_bar}{bar}| {n_fmt}/{"
|
||||||
|
"total_fmt}")
|
||||||
|
|
||||||
|
self._snr_pbar = tqdm(total=self._max_num_iterations,
|
||||||
|
desc=f"Simulating for SNR = {self._SNRs[0]} dB",
|
||||||
|
leave=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __getstate__(self) -> typing.Dict:
|
||||||
|
"""Custom serialization function called by the 'pickle' module
|
||||||
|
when saving the state of a currently running simulation
|
||||||
|
"""
|
||||||
|
state = self.__dict__.copy()
|
||||||
|
del state['_overall_pbar']
|
||||||
|
del state['_decoder_pbar']
|
||||||
|
del state['_snr_pbar']
|
||||||
|
return state
|
||||||
|
|
||||||
|
def __setstate__(self, state) -> None:
|
||||||
|
"""Custom deserialization function called by the 'pickle' module
|
||||||
|
when loading a previously saved simulation
|
||||||
|
|
||||||
|
:param state: Dictionary storing the serialized version of an object
|
||||||
|
of this class
|
||||||
|
"""
|
||||||
|
self.__dict__.update(state)
|
||||||
|
|
||||||
|
self._create_pbars()
|
||||||
|
|
||||||
|
self._overall_pbar.update(self._curr_decoder_index)
|
||||||
|
self._decoder_pbar.update(self._curr_SNRs_index)
|
||||||
|
self._snr_pbar.update(self._curr_num_frame_errors)
|
||||||
|
|
||||||
|
self._overall_pbar.refresh()
|
||||||
|
self._decoder_pbar.refresh()
|
||||||
|
self._snr_pbar.refresh()
|
||||||
|
|
||||||
|
def _simulate_transmission(self) -> int:
|
||||||
|
"""Simulate the transmission of a single codeword.
|
||||||
|
|
||||||
|
:return: Number of bit errors that occurred
|
||||||
|
"""
|
||||||
|
SNR = self._SNRs[self._curr_SNRs_index]
|
||||||
|
decoder = self._decoders[self._curr_decoder_index]
|
||||||
|
|
||||||
|
y = noise.add_awgn(self._x_bpsk, SNR, self._n, self._k)
|
||||||
|
x_hat, K = decoder.decode(y)
|
||||||
|
|
||||||
|
# Handle decoding failure
|
||||||
|
if x_hat is not None:
|
||||||
|
self._avg_K[self._curr_decoder_index][self._curr_SNRs_index] += K
|
||||||
|
return count_bit_errors(self._x, x_hat)
|
||||||
|
else:
|
||||||
|
self._curr_num_dec_fails += 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _update_statistics(self, bit_errors: int) -> None:
|
||||||
|
"""Update the statistics of the simulator.
|
||||||
|
|
||||||
|
:param bit_errors: Number of bit errors that occurred during the
|
||||||
|
last transmission
|
||||||
|
"""
|
||||||
|
self._curr_num_iterations += 1
|
||||||
|
self._snr_pbar.update(1)
|
||||||
|
|
||||||
|
if bit_errors > 0:
|
||||||
|
self._curr_num_frame_errors += 1
|
||||||
|
self._curr_num_bit_errors += bit_errors
|
||||||
|
|
||||||
|
def _advance_state(self) -> None:
|
||||||
|
"""Advance the state of the simulator.
|
||||||
|
|
||||||
|
This function also handles setting the result arrays and progress bars.
|
||||||
|
"""
|
||||||
|
if (self._curr_num_frame_errors >= self._target_frame_errors) or (
|
||||||
|
self._curr_num_iterations > self._max_num_iterations):
|
||||||
|
|
||||||
|
# Adjust the number of iterations to ignore decoding failures
|
||||||
|
adj_num_iterations = self._curr_num_iterations - \
|
||||||
|
self._curr_num_dec_fails
|
||||||
|
|
||||||
|
if adj_num_iterations == 0:
|
||||||
|
self._BERs[self._curr_decoder_index][self._curr_SNRs_index] = 1
|
||||||
|
else:
|
||||||
|
self._BERs[self._curr_decoder_index][self._curr_SNRs_index] \
|
||||||
|
= self._curr_num_bit_errors / (
|
||||||
|
adj_num_iterations * self._n)
|
||||||
|
self._avg_K[self._curr_decoder_index][self._curr_SNRs_index] \
|
||||||
|
= \
|
||||||
|
self._avg_K[self._curr_decoder_index][
|
||||||
|
self._curr_SNRs_index] / adj_num_iterations
|
||||||
|
|
||||||
|
self._dec_fails[self._curr_decoder_index][self._curr_SNRs_index] \
|
||||||
|
= self._curr_num_dec_fails
|
||||||
|
|
||||||
|
self._curr_num_frame_errors = 0
|
||||||
|
self._curr_num_bit_errors = 0
|
||||||
|
self._curr_num_iterations = 0
|
||||||
|
self._curr_num_dec_fails = 0
|
||||||
|
|
||||||
|
if self._curr_SNRs_index < len(self._SNRs) - 1:
|
||||||
|
self._curr_SNRs_index += 1
|
||||||
|
|
||||||
|
self._snr_pbar.reset()
|
||||||
|
self._overall_pbar.refresh()
|
||||||
|
self._snr_pbar.set_description(
|
||||||
|
f"Simulating for SNR = "
|
||||||
|
f"{self._SNRs[self._curr_SNRs_index]} dB")
|
||||||
|
self._decoder_pbar.update(1)
|
||||||
|
else:
|
||||||
|
if self._curr_decoder_index < len(self._decoders) - 1:
|
||||||
|
self._curr_decoder_index += 1
|
||||||
|
self._curr_SNRs_index = 0
|
||||||
|
|
||||||
|
self._decoder_pbar.reset()
|
||||||
|
decoder = self._decoders[self._curr_decoder_index]
|
||||||
|
self._decoder_pbar.set_description(
|
||||||
|
f"Calculating BERs for {decoder.__class__.__name__}")
|
||||||
|
self._overall_pbar.update(1)
|
||||||
|
else:
|
||||||
|
self._sim_running = False
|
||||||
|
|
||||||
|
self._snr_pbar.close()
|
||||||
|
self._decoder_pbar.close()
|
||||||
|
self._overall_pbar.close()
|
||||||
|
|
||||||
|
def start_or_continue(self) -> None:
|
||||||
|
"""Start the simulation.
|
||||||
|
|
||||||
|
This is a blocking call. A call to the stop() function
|
||||||
|
from another thread will stop this function.
|
||||||
|
"""
|
||||||
|
self._sim_running = True
|
||||||
|
|
||||||
|
while self._sim_running:
|
||||||
|
bit_errors = self._simulate_transmission()
|
||||||
|
self._update_statistics(bit_errors)
|
||||||
|
self._advance_state()
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
"""Stop the simulation."""
|
||||||
|
self._sim_running = False
|
||||||
|
|
||||||
|
def get_current_results(self) -> pd.DataFrame:
|
||||||
|
"""Get the current results.
|
||||||
|
|
||||||
|
If the simulation has not yet completed, the BERs which have not yet
|
||||||
|
been calculated are set to 0.
|
||||||
|
|
||||||
|
:return: pandas Dataframe with the columns ["SNR", "BER_1", "BER_2",
|
||||||
|
..., "DecFails_1", "DecFails_2", ...]
|
||||||
|
"""
|
||||||
|
data = {"SNR": np.array(self._SNRs)}
|
||||||
|
|
||||||
|
for i, decoder_BERs in enumerate(self._BERs):
|
||||||
|
data[f"BER_{i}"] = decoder_BERs
|
||||||
|
|
||||||
|
for i, decoder_dec_fails in enumerate(self._dec_fails):
|
||||||
|
data[f"DecFails_{i}"] = decoder_dec_fails
|
||||||
|
|
||||||
|
for i, avg_K in enumerate(self._avg_K):
|
||||||
|
data[f"AvgK_{i}"] = avg_K
|
||||||
|
|
||||||
|
return pd.DataFrame(data)
|
||||||
|
|
||||||
|
|
||||||
|
class HashableDict:
|
||||||
|
"""Class behaving like an immutable dict. More importantly it is
|
||||||
|
hashable and thus usable as a key type for another dict."""
|
||||||
|
|
||||||
|
def __init__(self, data_dict):
|
||||||
|
assert (isinstance(data_dict, dict))
|
||||||
|
for key, val in data_dict.items():
|
||||||
|
self.__dict__[key] = val
|
||||||
|
|
||||||
|
def __getitem__(self, item):
|
||||||
|
return self.__dict__[item]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
class GenericMultithreadedSimulator:
|
||||||
|
def __init__(self, max_workers=8):
|
||||||
|
self._format_func = None
|
||||||
|
self._task_func = None
|
||||||
|
self._task_params = None
|
||||||
|
self._max_workers = max_workers
|
||||||
|
|
||||||
|
self._results = {}
|
||||||
|
self._executor = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def task_params(self):
|
||||||
|
return self._task_params
|
||||||
|
|
||||||
|
@task_params.setter
|
||||||
|
def task_params(self, sim_params):
|
||||||
|
self._task_params = {HashableDict(iteration_params): iteration_params
|
||||||
|
for iteration_params in sim_params}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def task_func(self):
|
||||||
|
return self._task_func
|
||||||
|
|
||||||
|
@task_func.setter
|
||||||
|
def task_func(self, func):
|
||||||
|
self._task_func = func
|
||||||
|
|
||||||
|
@property
|
||||||
|
def format_func(self):
|
||||||
|
return self._format_func
|
||||||
|
|
||||||
|
@format_func.setter
|
||||||
|
def format_func(self, func):
|
||||||
|
self._format_func = func
|
||||||
|
|
||||||
|
def start_or_continue(self):
|
||||||
|
assert self._task_func is not None
|
||||||
|
assert self._task_params is not None
|
||||||
|
assert self._format_func is not None
|
||||||
|
|
||||||
|
self._executor = ProcessPoolExecutor(max_workers=self._max_workers)
|
||||||
|
|
||||||
|
with tqdm(total=(len(self._task_params)), leave=False) as pbar:
|
||||||
|
def done_callback(key, f):
|
||||||
|
try:
|
||||||
|
pbar.update(1)
|
||||||
|
self._results[key] = f.result()
|
||||||
|
del self._task_params[key]
|
||||||
|
except process.BrokenProcessPool:
|
||||||
|
# This exception is thrown when the program is
|
||||||
|
# prematurely stopped with a KeyboardInterrupt
|
||||||
|
# TODO: Make sure task_params have not been removed
|
||||||
|
pass
|
||||||
|
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
for key, params in list(self._task_params.items()):
|
||||||
|
future = self._executor.submit(self._task_func, params)
|
||||||
|
future.add_done_callback(partial(done_callback, key))
|
||||||
|
futures.append(future)
|
||||||
|
|
||||||
|
self._executor.shutdown(wait=True, cancel_futures=False)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
assert self._executor is not None, "The simulation has to be started" \
|
||||||
|
" before it can be stopped"
|
||||||
|
self._executor.shutdown(wait=True, cancel_futures=True)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def current_results(self):
|
||||||
|
return self._format_func(self._results)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
state = self.__dict__.copy()
|
||||||
|
state["_executor"] = None
|
||||||
|
return state
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
self.__dict__.update(state)
|
||||||
|
self._executor = ProcessPoolExecutor()
|
||||||
Loading…
Reference in New Issue
Block a user