Exercises
ex-sp-ch01-01
EasyCreate a conda environment named test-env with Python 3.11, NumPy,
and SciPy. Export it to environment.yml, delete the environment,
and recreate it from the export file. Verify that import numpy
works in the recreated environment.
Use conda create -n test-env python=3.11 numpy scipy.
Export with conda env export > environment.yml.
Delete with conda env remove -n test-env, recreate with conda env create -f environment.yml.
Create and export
conda create -n test-env python=3.11 numpy scipy -y
conda activate test-env
conda env export > environment.yml
Delete and recreate
conda deactivate
conda env remove -n test-env
conda env create -f environment.yml
conda activate test-env
python -c "import numpy; print(numpy.__version__)"
ex-sp-ch01-02
EasyImplement a __repr__ and __str__ for a SimResult dataclass that
stores method (str), snr_db (float), and ber (float).
__repr__ should be unambiguous; __str__ should be human-friendly.
Use @dataclass and implement the two methods.
__repr__ should look like SimResult(method='ZF', snr_db=10.0, ber=0.001).
__str__ could be ZF @ 10.0 dB: BER = 1.00e-03.
Implementation
from dataclasses import dataclass
@dataclass
class SimResult:
method: str
snr_db: float
ber: float
def __repr__(self) -> str:
return (f"SimResult(method={self.method!r}, "
f"snr_db={self.snr_db}, ber={self.ber})")
def __str__(self) -> str:
return f"{self.method} @ {self.snr_db:.1f} dB: BER = {self.ber:.2e}"
r = SimResult("ZF", 10.0, 1e-3)
print(repr(r)) # SimResult(method='ZF', snr_db=10.0, ber=0.001)
print(str(r)) # ZF @ 10.0 dB: BER = 1.00e-03
ex-sp-ch01-03
EasyUsing collections.Counter, write a function that takes a string of
DNA bases (e.g., "ATCGATCGAATTCCGG") and returns a dictionary of
base frequencies as percentages.
Use Counter(dna_string) to count occurrences.
Divide each count by len(dna_string) and multiply by 100.
Implementation
from collections import Counter
def base_frequencies(dna: str) -> dict[str, float]:
counts = Counter(dna.upper())
total = len(dna)
return {base: (count / total) * 100 for base, count in counts.items()}
print(base_frequencies("ATCGATCGAATTCCGG"))
# {'A': 25.0, 'T': 25.0, 'C': 25.0, 'G': 25.0}
ex-sp-ch01-04
EasyWrite a generator function snr_range(start_db, stop_db, step_db)
that yields SNR values in dB from start_db to stop_db (inclusive
if exactly reached) with step step_db. Do not use range() or
numpy.
Use a while loop with a running variable.
Be careful with floating-point: use <= with a small epsilon for the stop condition.
Implementation
def snr_range(start_db: float, stop_db: float, step_db: float):
current = start_db
while current <= stop_db + 1e-10:
yield current
current += step_db
for snr in snr_range(-5.0, 20.0, 2.5):
print(f"{snr:.1f} dB")
ex-sp-ch01-05
EasyUsing a dict comprehension, convert a list of SNR values in dB
to a dictionary mapping each dB value to its linear equivalent:
snr_linear = 10^(snr_db / 10).
Use {db: 10**(db/10) for db in snr_list}.
Implementation
snr_list = [-5, 0, 5, 10, 15, 20, 25, 30]
snr_linear = {db: 10**(db / 10) for db in snr_list}
for db, lin in snr_linear.items():
print(f"{db:>3d} dB -> {lin:.4f}")
# -5 dB -> 0.3162
# 0 dB -> 1.0000
# 5 dB -> 3.1623
# ...
ex-sp-ch01-06
EasyWrite a function format_results_table(results) that takes a list
of dicts with keys method, ber, and runtime_s, and prints a
nicely formatted table with aligned columns. BER should be in
scientific notation with 2 decimal places; runtime in fixed-point
with 3 decimal places.
Use f-strings with alignment specs like {method:<12} and {ber:>10.2e}.
Print a header row first, then a separator line.
Implementation
def format_results_table(results: list[dict]) -> None:
header = f"{'Method':<12} {'BER':>12} {'Runtime (s)':>12}"
print(header)
print("-" * len(header))
for r in results:
print(f"{r['method']:<12} {r['ber']:>12.2e} {r['runtime_s']:>12.3f}")
results = [
{"method": "ZF", "ber": 3.45e-3, "runtime_s": 0.123},
{"method": "MMSE", "ber": 1.87e-3, "runtime_s": 0.156},
]
format_results_table(results)
ex-sp-ch01-07
MediumImplement a Matrix class that supports:
__init__(self, rows: list[list[float]])β store a 2D matrix__repr__β unambiguous representation__getitem__(self, key)β support bothM[i](row) andM[i, j](element)__matmul__β matrix multiplication via@shapeproperty returning(rows, cols)
For __getitem__, check if key is a tuple to distinguish M[i] from M[i, j].
For __matmul__, implement the triple loop: .
The shape property can use @property decorator.
Implementation
class Matrix:
def __init__(self, rows: list[list[float]]) -> None:
self._data = [list(row) for row in rows]
self._nrows = len(rows)
self._ncols = len(rows[0]) if rows else 0
@property
def shape(self) -> tuple[int, int]:
return (self._nrows, self._ncols)
def __repr__(self) -> str:
return f"Matrix({self._data})"
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
return self._data[i][j]
return self._data[key]
def __matmul__(self, other: 'Matrix') -> 'Matrix':
assert self._ncols == other._nrows
result = [[sum(self[i, k] * other[k, j]
for k in range(self._ncols))
for j in range(other._ncols)]
for i in range(self._nrows)]
return Matrix(result)
A = Matrix([[1, 2], [3, 4]])
B = Matrix([[5, 6], [7, 8]])
C = A @ B
print(C[0, 0]) # 19
print(C.shape) # (2, 2)
ex-sp-ch01-08
MediumWrite a generator sliding_window(iterable, size) that yields
tuples of size consecutive elements from iterable.
For example, sliding_window([1,2,3,4,5], 3) yields
(1,2,3), (2,3,4), (3,4,5).
Use collections.deque internally.
Initialize a deque with maxlen=size.
Fill it with the first size elements, then yield and slide.
Implementation
from collections import deque
from typing import Iterator
def sliding_window(iterable, size: int) -> Iterator[tuple]:
it = iter(iterable)
window = deque(maxlen=size)
for _ in range(size):
window.append(next(it))
yield tuple(window)
for item in it:
window.append(item)
yield tuple(window)
for w in sliding_window(range(10), 3):
print(w)
# (0, 1, 2)
# (1, 2, 3)
# ...
ex-sp-ch01-09
MediumUse itertools.product to generate all 2x2 matrices whose entries
are from . Count how many of them are invertible
(determinant ). Print each invertible matrix.
There are such matrices.
The determinant of is .
Implementation
import itertools
count = 0
for a, b, c, d in itertools.product([0, 1], repeat=4):
det = a * d - b * c
if det != 0:
count += 1
print(f"[[{a}, {b}], [{c}, {d}]] det = {det}")
print(f"\nInvertible matrices: {count} out of 16")
# Invertible matrices: 6 out of 16
ex-sp-ch01-10
MediumWrite a script that reads a CSV file with columns timestamp,
snr_db, ber, groups the data by SNR value using
collections.defaultdict, and prints the average BER for each
SNR level. Use f-strings with proper formatting.
Use csv.DictReader to read the file.
Use defaultdict(list) to group BER values by SNR.
Compute the average with sum(values) / len(values).
Implementation
import csv
from collections import defaultdict
def analyze_results(filepath: str) -> None:
grouped = defaultdict(list)
with open(filepath, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
snr = float(row['snr_db'])
ber = float(row['ber'])
grouped[snr].append(ber)
print(f"{'SNR (dB)':>10} {'Avg BER':>12} {'Samples':>8}")
print("-" * 32)
for snr in sorted(grouped):
values = grouped[snr]
avg = sum(values) / len(values)
print(f"{snr:>10.1f} {avg:>12.2e} {len(values):>8d}")
analyze_results('simulation_results.csv')
ex-sp-ch01-11
MediumImplement a @dataclass called AntennaArray with fields
n_elements (int), spacing_wavelengths (float, default 0.5),
and geometry (str, default "ula"). Add a method
steering_vector(theta) that returns the ULA steering vector
as a list of complex numbers, where is the spacing in wavelengths.
Import cmath for cmath.exp with complex arguments.
The steering vector element is .
Implementation
import cmath
import math
from dataclasses import dataclass
@dataclass
class AntennaArray:
n_elements: int
spacing_wavelengths: float = 0.5
geometry: str = "ula"
def steering_vector(self, theta: float) -> list[complex]:
"""ULA steering vector for angle theta (radians)."""
d = self.spacing_wavelengths
return [
cmath.exp(1j * 2 * math.pi * k * d * math.sin(theta))
for k in range(self.n_elements)
]
array = AntennaArray(n_elements=4)
sv = array.steering_vector(math.radians(30))
for k, val in enumerate(sv):
print(f"a[{k}] = {val:.4f}")
ex-sp-ch01-12
MediumWrite a functools.lru_cache-decorated function factorial(n) that
computes recursively. Then write a function binomial(n, k)
that uses factorial to compute .
Print factorial.cache_info() to show cache hits.
Decorate factorial with @lru_cache(maxsize=None).
Computing multiple binomial coefficients will reuse cached factorials.
Implementation
from functools import lru_cache
@lru_cache(maxsize=None)
def factorial(n: int) -> int:
if n <= 1:
return 1
return n * factorial(n - 1)
def binomial(n: int, k: int) -> int:
return factorial(n) // (factorial(k) * factorial(n - k))
# Compute several binomial coefficients
for n in range(10):
row = [binomial(n, k) for k in range(n + 1)]
print(row)
print(f"\nCache: {factorial.cache_info()}")
# CacheInfo(hits=X, misses=Y, maxsize=None, currsize=Z)
ex-sp-ch01-13
MediumWrite a YAML configuration file for a simulation with nested
parameters (channel model, SNR sweep, antenna config). Then write
Python code to load it, run a mock simulation for each parameter
combination (using itertools.product), and save results to a JSON
file with metadata including the timestamp and Python version.
Use yaml.safe_load() and json.dump().
Use itertools.product over the parameter lists from the config.
Include datetime.now().isoformat() and sys.version in metadata.
YAML config
# config.yaml
simulation:
snr_db: [0, 5, 10, 15, 20]
n_antennas: [2, 4, 8]
channel: [rayleigh, ricean]
n_realizations: 1000
Python implementation
import yaml, json, sys, itertools
from datetime import datetime
with open('config.yaml') as f:
config = yaml.safe_load(f)
sim = config['simulation']
results = []
for snr, nt, ch in itertools.product(
sim['snr_db'], sim['n_antennas'], sim['channel']
):
# Mock simulation
ber = 10 ** (-(snr + nt) / 10)
results.append({
'snr_db': snr, 'n_antennas': nt,
'channel': ch, 'ber': ber,
})
output = {
'config': config,
'results': results,
'metadata': {
'timestamp': datetime.now().isoformat(),
'python_version': sys.version,
},
}
with open('results.json', 'w') as f:
json.dump(output, f, indent=2)
ex-sp-ch01-14
HardImplement a SortedList class that maintains elements in sorted
order. It should support:
add(value)β insert maintaining sort order (usebisect)__contains__(value)β membership test__getitem__(index)β indexing__len__()β length__iter__()β iteration__repr__()β representation
Demonstrate that __contains__ is while the equivalent
list search is .
Use bisect.insort for maintaining sorted order on insert.
Use bisect.bisect_left for search in __contains__.
Time both approaches for large lists to demonstrate the difference.
Implementation
import bisect
class SortedList:
def __init__(self):
self._data = []
def add(self, value):
bisect.insort(self._data, value)
def __contains__(self, value) -> bool:
i = bisect.bisect_left(self._data, value)
return i < len(self._data) and self._data[i] == value
def __getitem__(self, index):
return self._data[index]
def __len__(self) -> int:
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self) -> str:
return f"SortedList({self._data})"
# Benchmark
import time
sl = SortedList()
plain = []
for x in range(100_000):
sl.add(x)
plain.append(x)
t0 = time.perf_counter()
for _ in range(10_000):
99_999 in sl
t1 = time.perf_counter()
print(f"SortedList: {t1-t0:.4f}s")
t0 = time.perf_counter()
for _ in range(10_000):
99_999 in plain
t1 = time.perf_counter()
print(f"Plain list: {t1-t0:.4f}s")
ex-sp-ch01-15
HardWrite a generator infinite_primes() that yields prime numbers
indefinitely using the Sieve of Eratosthenes adapted for unbounded
generation. Use it to find the 10,000th prime number.
Maintain a dictionary mapping each composite to its smallest prime factor.
For each candidate , if it is not in the dictionary, it is prime.
When a prime is found, mark as composite with factor .
Implementation
from typing import Iterator
def infinite_primes() -> Iterator[int]:
"""Yield primes indefinitely using an incremental sieve."""
composites: dict[int, list[int]] = {}
candidate = 2
while True:
if candidate not in composites:
yield candidate
composites[candidate * candidate] = [candidate]
else:
for prime in composites[candidate]:
composites.setdefault(candidate + prime, []).append(prime)
del composites[candidate]
candidate += 1
import itertools
primes = infinite_primes()
tenth_k = next(itertools.islice(primes, 9999, 10000))
print(f"The 10,000th prime is {tenth_k}")
# The 10,000th prime is 104729
ex-sp-ch01-16
HardImplement __eq__, __lt__, and __hash__ for a ComplexNumber
class that stores real and imaginary parts. Use functools.total_ordering
to get all comparison operators from just __eq__ and __lt__.
Ordering should be by magnitude. Demonstrate that instances can
be used as dict keys and in sets.
Decorate the class with @functools.total_ordering.
Magnitude is .
__hash__ must be consistent with __eq__: equal objects must hash the same.
Implementation
import math
from functools import total_ordering
@total_ordering
class ComplexNumber:
def __init__(self, real: float, imag: float) -> None:
self.real = real
self.imag = imag
@property
def magnitude(self) -> float:
return math.sqrt(self.real**2 + self.imag**2)
def __eq__(self, other) -> bool:
if not isinstance(other, ComplexNumber):
return NotImplemented
return self.real == other.real and self.imag == other.imag
def __lt__(self, other) -> bool:
if not isinstance(other, ComplexNumber):
return NotImplemented
return self.magnitude < other.magnitude
def __hash__(self) -> int:
return hash((self.real, self.imag))
def __repr__(self) -> str:
return f"ComplexNumber({self.real}, {self.imag})"
# Can use in sets and as dict keys
s = {ComplexNumber(1, 0), ComplexNumber(0, 1), ComplexNumber(1, 0)}
print(len(s)) # 2 (duplicate removed)
# Sorting by magnitude
nums = [ComplexNumber(3, 4), ComplexNumber(1, 0), ComplexNumber(0, 2)]
print(sorted(nums)) # sorted by magnitude: 1.0, 2.0, 5.0
ex-sp-ch01-17
HardWrite a Pipeline class that chains generator functions together.
It should support a >> operator (using __rshift__) to compose
stages. Each stage is a function that takes an iterable and yields
transformed items.
Example usage:
pipeline = source >> filter_stage >> transform_stage >> sink_stage
pipeline.run()
Store stages as a list of callables.
Override __rshift__ to append a stage and return self.
run() chains the generators: stage2(stage1(source())).
Implementation
from functools import reduce
from typing import Callable, Iterator
class Pipeline:
def __init__(self, source: Callable[[], Iterator]):
self._stages = [source]
def __rshift__(self, stage: Callable) -> 'Pipeline':
self._stages.append(stage)
return self
def run(self) -> list:
it = self._stages[0]()
for stage in self._stages[1:]:
it = stage(it)
return list(it)
# Define stages
def numbers():
yield from range(100)
def evens(it):
yield from (x for x in it if x % 2 == 0)
def squares(it):
yield from (x**2 for x in it)
def first_10(it):
import itertools
yield from itertools.islice(it, 10)
result = (Pipeline(numbers) >> evens >> squares >> first_10).run()
print(result) # [0, 4, 16, 36, 64, ...]
ex-sp-ch01-18
HardImplement a timed_generator(gen_func) wrapper that measures and
prints the total time to exhaust a generator, the number of items
yielded, and the average time per item. It should work as a
decorator. Test it on a generator that computes prime factors.
Use time.perf_counter() for high-resolution timing.
The wrapper should yield items from the original generator while counting.
Print the summary when the generator is exhausted (after the loop ends).
Implementation
import time
from functools import wraps
def timed_generator(gen_func):
@wraps(gen_func)
def wrapper(*args, **kwargs):
gen = gen_func(*args, **kwargs)
count = 0
t0 = time.perf_counter()
for item in gen:
count += 1
yield item
elapsed = time.perf_counter() - t0
avg = elapsed / count if count else 0
print(f"[{gen_func.__name__}] {count} items in "
f"{elapsed:.4f}s ({avg*1e6:.1f} us/item)")
return wrapper
@timed_generator
def prime_factors(n: int):
d = 2
while d * d <= n:
while n % d == 0:
yield d
n //= d
d += 1
if n > 1:
yield n
factors = list(prime_factors(2**31 - 1))
print(f"Factors: {factors}")
ex-sp-ch01-19
ChallengeImplement a LazyMatrix class that represents a matrix without
storing it. Instead, it stores a function f(i, j) that computes
element on demand. Support @ (matrix multiply by
computing elements of the result lazily), __getitem__, and a
to_list() method that materializes the full matrix.
Use this to represent a DFT matrix without storing complex numbers, and compute a single row of .
For __matmul__, return a new LazyMatrix whose element function computes the dot product of row of self with column of other.
The DFT matrix element is cmath.exp(-1j * 2 * pi * k * n / N).
Computing a single row of should give a row of .
Implementation
import cmath
import math
class LazyMatrix:
def __init__(self, rows: int, cols: int, func):
self.rows = rows
self.cols = cols
self._func = func
def __getitem__(self, key):
i, j = key
return self._func(i, j)
def __matmul__(self, other: 'LazyMatrix') -> 'LazyMatrix':
assert self.cols == other.rows
K = self.cols
def product_func(i, j):
return sum(self[i, k] * other[k, j] for k in range(K))
return LazyMatrix(self.rows, other.cols, product_func)
def row(self, i: int) -> list:
return [self[i, j] for j in range(self.cols)]
N = 1000
F = LazyMatrix(N, N, lambda k, n: cmath.exp(-1j * 2 * math.pi * k * n / N))
FH = LazyMatrix(N, N, lambda n, k: cmath.exp(1j * 2 * math.pi * k * n / N))
FFH = F @ FH
# Row 0 should be [N, 0, 0, ..., 0]
row0 = FFH.row(0)
print(f"FFH[0,0] = {row0[0]:.1f}") # 1000.0
print(f"FFH[0,1] = {row0[1]:.2e}") # ~0 (machine precision)
ex-sp-ch01-20
ChallengeBuild a ConfigManager class that:
- Reads a YAML configuration file
- Allows dot-notation access (
config.simulation.snr_db) - Supports nested defaults via
__getattr__ - Can be frozen (made immutable) after loading
- Provides a
to_dict()method for serialization - Is iterable (yields key-value pairs)
This pattern is used extensively in research code (similar to OmegaConf / Hydra configurations).
Use __getattr__ to convert dict keys to attribute access.
For nested dicts, recursively wrap them in ConfigManager.
For freezing, use a _frozen flag and override __setattr__.
Implementation
import yaml
class ConfigManager:
def __init__(self, data: dict = None, frozen: bool = False):
super().__setattr__('_data', data or {})
super().__setattr__('_frozen', frozen)
def __getattr__(self, key: str):
try:
val = self._data[key]
if isinstance(val, dict):
return ConfigManager(val, self._frozen)
return val
except KeyError:
raise AttributeError(f"No config key: {key}")
def __setattr__(self, key: str, value):
if self._frozen:
raise AttributeError("Config is frozen")
self._data[key] = value
def __iter__(self):
return iter(self._data.items())
def __repr__(self) -> str:
return f"ConfigManager({self._data})"
def to_dict(self) -> dict:
return dict(self._data)
def freeze(self) -> 'ConfigManager':
return ConfigManager(self._data, frozen=True)
@classmethod
def from_yaml(cls, path: str) -> 'ConfigManager':
with open(path) as f:
return cls(yaml.safe_load(f))
# Usage
cfg = ConfigManager({
'simulation': {
'snr_db': [0, 5, 10, 15, 20],
'n_antennas': 4,
'channel': 'rayleigh',
}
})
print(cfg.simulation.n_antennas) # 4
print(cfg.simulation.snr_db) # [0, 5, 10, 15, 20]
frozen = cfg.freeze()
# frozen.simulation.n_antennas = 8 # Raises AttributeError