Changed the way the codewords are mapped onto [0, 1]^n
This commit is contained in:
parent
6af243efba
commit
9348689292
@ -79,10 +79,9 @@ class ProximalDecoder:
|
||||
def _check_parity(self, y_hat: np.array) -> bool:
|
||||
"""Perform a parity check for a given codeword.
|
||||
|
||||
:param y_hat: codeword to be checked (element of [-1, 1]^n)
|
||||
:param y_hat: codeword to be checked (element of [0, 1]^n)
|
||||
:return: True if the parity check passes, i.e. the codeword is valid. False otherwise
|
||||
"""
|
||||
y_hat_binary = (y_hat == 1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
syndrome = np.dot(self._H, y_hat) % 2
|
||||
return not np.any(syndrome)
|
||||
|
||||
@ -96,15 +95,16 @@ class ProximalDecoder:
|
||||
and 'w' is noise)
|
||||
:return: Most probably sent dataword (element of [0, 1]^k)
|
||||
"""
|
||||
s = 0
|
||||
x_hat = 0
|
||||
s = np.zeros(y.size)
|
||||
x_hat = np.zeros(y.size)
|
||||
for k in range(self._K):
|
||||
r = s - self._step_size * self._L_awgn(s, y)
|
||||
|
||||
s = r - self._gamma * self._grad_h(r)
|
||||
s = self._projection(s) # Equation (15)
|
||||
|
||||
x_hat = (np.sign(s) == 1) * 1
|
||||
x_hat = np.sign(s)
|
||||
x_hat = (x_hat == 1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
|
||||
if self._check_parity(x_hat):
|
||||
break
|
||||
|
||||
Loading…
Reference in New Issue
Block a user